diff --git a/.dir-locals.el b/.dir-locals.el index 2fdca14f5dd73..0728ce905ddfe 100644 --- a/.dir-locals.el +++ b/.dir-locals.el @@ -83,6 +83,6 @@ )) (c-basic-offset . 4) (c-comment-only-line-offset . (0 . 0)) - (fill-column . 140) - (fci-rule-column . 140) + (fill-column . 100) + (fci-rule-column . 100) (compile-command . "gradle compileTestJava")))) diff --git a/.github/PULL_REQUEST_TEMPLATE.md b/.github/PULL_REQUEST_TEMPLATE.md index 92b35e97baa05..6a4531f1bdefa 100644 --- a/.github/PULL_REQUEST_TEMPLATE.md +++ b/.github/PULL_REQUEST_TEMPLATE.md @@ -11,3 +11,4 @@ attention. - If submitting code, have you built your formula locally prior to submission with `gradle check`? - If submitting code, is your pull request against master? Unless there is a good reason otherwise, we prefer pull requests against master and will backport as needed. - If submitting code, have you checked that your submission is for an [OS that we support](https://www.elastic.co/support/matrix#show_os)? +- If you are submitting this code for a class then read our [policy](https://github.com/elastic/elasticsearch/blob/master/CONTRIBUTING.md#contributing-as-part-of-a-class) for that. diff --git a/CONTRIBUTING.md b/CONTRIBUTING.md index 5885bf9def7eb..f9c69fbf5d6ee 100644 --- a/CONTRIBUTING.md +++ b/CONTRIBUTING.md @@ -88,8 +88,8 @@ Contributing to the Elasticsearch codebase **Repository:** [https://github.com/elastic/elasticsearch](https://github.com/elastic/elasticsearch) Make sure you have [Gradle](http://gradle.org) installed, as -Elasticsearch uses it as its build system. Gradle must be version 2.13 _exactly_ in -order to build successfully. +Elasticsearch uses it as its build system. Gradle must be at least +version 3.3 in order to build successfully. Eclipse users can automatically configure their IDE: `gradle eclipse` then `File: Import: Existing Projects into Workspace`. Select the @@ -117,7 +117,7 @@ For Eclipse, go to `Preferences->Java->Installed JREs` and add `-ea` to Please follow these formatting guidelines: * Java indent is 4 spaces -* Line width is 140 characters +* Line width is 100 characters * The rest is left to Java coding standards * Disable “auto-format on save” to prevent unnecessary format changes. This makes reviews much harder as it generates unnecessary formatting changes. If your IDE supports formatting only modified chunks that is fine to do. * Wildcard imports (`import foo.bar.baz.*`) are forbidden and will cause the build to fail. Please attempt to tame your IDE so it doesn't make them and please send a PR against this document with instructions for your IDE if it doesn't contain them. @@ -139,3 +139,32 @@ Before submitting your changes, run the test suite to make sure that nothing is ```sh gradle check ``` + +Contributing as part of a class +------------------------------- +In general Elasticsearch is happy to accept contributions that were created as +part of a class but strongly advise against making the contribution as part of +the class. So if you have code you wrote for a class feel free to submit it. + +Please, please, please do not assign contributing to Elasticsearch as part of a +class. If you really want to assign writing code for Elasticsearch as an +assignment then the code contributions should be made to your private clone and +opening PRs against the primary Elasticsearch clone must be optional, fully +voluntary, not for a grade, and without any deadlines. + +Because: + +* While the code review process is likely very educational, it can take wildly +varying amounts of time depending on who is available, where the change is, and +how deep the change is. There is no way to predict how long it will take unless +we rush. +* We do not rush reviews without a very, very good reason. Class deadlines +aren't a good enough reason for us to rush reviews. +* We deeply discourage opening a PR you don't intend to work through the entire +code review process because it wastes our time. +* We don't have the capacity to absorb an entire class full of new contributors, +especially when they are unlikely to become long time contributors. + +Finally, we require that you run `gradle check` before submitting a +non-documentation contribution. This is mentioned above, but it is worth +repeating in this section because it has come up in this context. diff --git a/README.textile b/README.textile index 5bc5b7f25053a..9c2b2c5d91e2c 100644 --- a/README.textile +++ b/README.textile @@ -200,7 +200,7 @@ We have just covered a very small portion of what Elasticsearch is all about. Fo h3. Building from Source -Elasticsearch uses "Gradle":https://gradle.org for its build system. You'll need to have version 2.13 of Gradle installed. +Elasticsearch uses "Gradle":https://gradle.org for its build system. You'll need to have at least version 3.3 of Gradle installed. In order to create a distribution, simply run the @gradle assemble@ command in the cloned directory. diff --git a/build.gradle b/build.gradle index fd97470ec6c0a..09748ea1e8afb 100644 --- a/build.gradle +++ b/build.gradle @@ -18,15 +18,17 @@ */ import java.nio.file.Path +import java.util.regex.Matcher import org.eclipse.jgit.lib.Repository import org.eclipse.jgit.lib.RepositoryBuilder import org.gradle.plugins.ide.eclipse.model.SourceFolder import org.apache.tools.ant.taskdefs.condition.Os +import org.elasticsearch.gradle.VersionProperties // common maven publishing configuration subprojects { group = 'org.elasticsearch' - version = org.elasticsearch.gradle.VersionProperties.elasticsearch + version = VersionProperties.elasticsearch description = "Elasticsearch subproject ${project.path}" } @@ -59,12 +61,26 @@ configure(subprojects.findAll { it.projectDir.toPath().startsWith(rootPath) }) { } } +int prevMajor = Integer.parseInt(VersionProperties.elasticsearch.split('\\.')[0]) - 1 +String prevSnapshot = VersionProperties.elasticsearch.contains('alpha') ? '-SNAPSHOT' : '' +File versionFile = file('core/src/main/java/org/elasticsearch/Version.java') +List versionLines = versionFile.readLines('UTF-8') +int prevMinor = 0 +for (String line : versionLines) { + Matcher match = line =~ /\W+public static final Version V_${prevMajor}_(\d+)_.*/ + if (match.matches()) { + prevMinor = Math.max(Integer.parseInt(match.group(1)), prevMinor) + } +} + +// injecting groovy property variables into all projects allprojects { - // injecting groovy property variables into all projects project.ext { // for ide hacks... isEclipse = System.getProperty("eclipse.launcher") != null || gradle.startParameter.taskNames.contains('eclipse') || gradle.startParameter.taskNames.contains('cleanEclipse') isIdea = System.getProperty("idea.active") != null || gradle.startParameter.taskNames.contains('idea') || gradle.startParameter.taskNames.contains('cleanIdea') + // for backcompat testing + bwcVersion = "${prevMajor}.${prevMinor}.0${prevSnapshot}" } } @@ -112,6 +128,7 @@ subprojects { "org.elasticsearch.client:transport:${version}": ':client:transport', "org.elasticsearch.test:framework:${version}": ':test:framework', "org.elasticsearch.distribution.integ-test-zip:elasticsearch:${version}": ':distribution:integ-test-zip', + "org.elasticsearch.distribution.zip:elasticsearch:${bwcVersion}": ':distribution:bwc-zip', "org.elasticsearch.distribution.zip:elasticsearch:${version}": ':distribution:zip', "org.elasticsearch.distribution.tar:elasticsearch:${version}": ':distribution:tar', "org.elasticsearch.distribution.rpm:elasticsearch:${version}": ':distribution:rpm', @@ -123,10 +140,12 @@ subprojects { "org.elasticsearch.plugin:lang-mustache-client:${version}": ':modules:lang-mustache', "org.elasticsearch.plugin:percolator-client:${version}": ':modules:percolator', ] - configurations.all { - resolutionStrategy.dependencySubstitution { DependencySubstitutions subs -> - projectSubstitutions.each { k,v -> - subs.substitute(subs.module(k)).with(subs.project(v)) + project.afterEvaluate { + configurations.all { + resolutionStrategy.dependencySubstitution { DependencySubstitutions subs -> + projectSubstitutions.each { k,v -> + subs.substitute(subs.module(k)).with(subs.project(v)) + } } } } diff --git a/buildSrc/build.gradle b/buildSrc/build.gradle index fcb504b83cec7..6536c77e58726 100644 --- a/buildSrc/build.gradle +++ b/buildSrc/build.gradle @@ -23,8 +23,8 @@ apply plugin: 'groovy' group = 'org.elasticsearch.gradle' -if (GradleVersion.current() < GradleVersion.version('2.13')) { - throw new GradleException('Gradle 2.13+ is required to build elasticsearch') +if (GradleVersion.current() < GradleVersion.version('3.3')) { + throw new GradleException('Gradle 3.3+ is required to build elasticsearch') } if (JavaVersion.current() < JavaVersion.VERSION_1_8) { @@ -96,23 +96,12 @@ dependencies { compile 'org.apache.rat:apache-rat:0.11' } -// Gradle version-specific options (allows build to run with Gradle 2.13 as well as 2.14+/3.+) -if (GradleVersion.current() == GradleVersion.version("2.13")) { - // ProgressLogger(-Factory) classes are part of the public Gradle API - sourceSets.main.groovy.srcDir 'src/main/gradle-2.13-groovy' +// Gradle 2.14+ removed ProgressLogger(-Factory) classes from the public APIs +// Use logging dependency instead - dependencies { - compile 'ru.vyarus:gradle-animalsniffer-plugin:1.0.1' // last version compatible with Gradle 2.13 - } -} else { - // Gradle 2.14+ removed ProgressLogger(-Factory) classes from the public APIs - // Use logging dependency instead - sourceSets.main.groovy.srcDir 'src/main/gradle-2.14-groovy' - - dependencies { - compileOnly "org.gradle:gradle-logging:${GradleVersion.current().getVersion()}" - compile 'ru.vyarus:gradle-animalsniffer-plugin:1.2.0' // Gradle 2.14 requires a version > 1.0.1 - } +dependencies { + compileOnly "org.gradle:gradle-logging:${GradleVersion.current().getVersion()}" + compile 'ru.vyarus:gradle-animalsniffer-plugin:1.2.0' // Gradle 2.14 requires a version > 1.0.1 } /***************************************************************************** diff --git a/buildSrc/src/main/gradle-2.13-groovy/org/elasticsearch/gradle/ProgressLogger.groovy b/buildSrc/src/main/gradle-2.13-groovy/org/elasticsearch/gradle/ProgressLogger.groovy deleted file mode 100644 index 5c02e255a1a8a..0000000000000 --- a/buildSrc/src/main/gradle-2.13-groovy/org/elasticsearch/gradle/ProgressLogger.groovy +++ /dev/null @@ -1,31 +0,0 @@ -/* - * Licensed to Elasticsearch under one or more contributor - * license agreements. See the NOTICE file distributed with - * this work for additional information regarding copyright - * ownership. Elasticsearch licenses this file to you under - * the Apache License, Version 2.0 (the "License"); you may - * not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, - * software distributed under the License is distributed on an - * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY - * KIND, either express or implied. See the License for the - * specific language governing permissions and limitations - * under the License. - */ -package org.elasticsearch.gradle - -/** - * Wraps a ProgressLogger so that code in src/main/groovy does not need to - * define imports on Gradle 2.13/2.14+ ProgressLoggers - */ -class ProgressLogger { - @Delegate org.gradle.logging.ProgressLogger progressLogger - - ProgressLogger(org.gradle.logging.ProgressLogger progressLogger) { - this.progressLogger = progressLogger - } -} diff --git a/buildSrc/src/main/gradle-2.13-groovy/org/elasticsearch/gradle/ProgressLoggerFactoryInjection.groovy b/buildSrc/src/main/gradle-2.13-groovy/org/elasticsearch/gradle/ProgressLoggerFactoryInjection.groovy deleted file mode 100644 index 290c4d581d635..0000000000000 --- a/buildSrc/src/main/gradle-2.13-groovy/org/elasticsearch/gradle/ProgressLoggerFactoryInjection.groovy +++ /dev/null @@ -1,35 +0,0 @@ -/* - * Licensed to Elasticsearch under one or more contributor - * license agreements. See the NOTICE file distributed with - * this work for additional information regarding copyright - * ownership. Elasticsearch licenses this file to you under - * the Apache License, Version 2.0 (the "License"); you may - * not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, - * software distributed under the License is distributed on an - * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY - * KIND, either express or implied. See the License for the - * specific language governing permissions and limitations - * under the License. - */ -package org.elasticsearch.gradle - -import org.gradle.logging.ProgressLoggerFactory - -import javax.inject.Inject - -/** - * Allows to inject a ProgressLoggerFactory to tasks in src/main/groovy - * without requiring the corresponding import of ProgressLoggerFactory, - * making it compatible with both Gradle 2.13 and 2.14+. - */ -trait ProgressLoggerFactoryInjection { - @Inject - ProgressLoggerFactory getProgressLoggerFactory() { - throw new UnsupportedOperationException() - } -} diff --git a/buildSrc/src/main/gradle-2.14-groovy/org/elasticsearch/gradle/ProgressLogger.groovy b/buildSrc/src/main/gradle-2.14-groovy/org/elasticsearch/gradle/ProgressLogger.groovy deleted file mode 100644 index 2c9fab78b4378..0000000000000 --- a/buildSrc/src/main/gradle-2.14-groovy/org/elasticsearch/gradle/ProgressLogger.groovy +++ /dev/null @@ -1,31 +0,0 @@ -/* - * Licensed to Elasticsearch under one or more contributor - * license agreements. See the NOTICE file distributed with - * this work for additional information regarding copyright - * ownership. Elasticsearch licenses this file to you under - * the Apache License, Version 2.0 (the "License"); you may - * not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, - * software distributed under the License is distributed on an - * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY - * KIND, either express or implied. See the License for the - * specific language governing permissions and limitations - * under the License. - */ -package org.elasticsearch.gradle - -/** - * Wraps a ProgressLogger so that code in src/main/groovy does not need to - * define imports on Gradle 2.13/2.14+ ProgressLoggers - */ -class ProgressLogger { - @Delegate org.gradle.internal.logging.progress.ProgressLogger progressLogger - - ProgressLogger(org.gradle.internal.logging.progress.ProgressLogger progressLogger) { - this.progressLogger = progressLogger - } -} diff --git a/buildSrc/src/main/gradle-2.14-groovy/org/elasticsearch/gradle/ProgressLoggerFactoryInjection.groovy b/buildSrc/src/main/gradle-2.14-groovy/org/elasticsearch/gradle/ProgressLoggerFactoryInjection.groovy deleted file mode 100644 index 8891d65611a6b..0000000000000 --- a/buildSrc/src/main/gradle-2.14-groovy/org/elasticsearch/gradle/ProgressLoggerFactoryInjection.groovy +++ /dev/null @@ -1,35 +0,0 @@ -/* - * Licensed to Elasticsearch under one or more contributor - * license agreements. See the NOTICE file distributed with - * this work for additional information regarding copyright - * ownership. Elasticsearch licenses this file to you under - * the Apache License, Version 2.0 (the "License"); you may - * not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, - * software distributed under the License is distributed on an - * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY - * KIND, either express or implied. See the License for the - * specific language governing permissions and limitations - * under the License. - */ -package org.elasticsearch.gradle - -import org.gradle.internal.logging.progress.ProgressLoggerFactory - -import javax.inject.Inject - -/** - * Allows to inject a ProgressLoggerFactory to tasks in src/main/groovy - * without requiring the corresponding import of ProgressLoggerFactory, - * making it compatible with both Gradle 2.13 and 2.14+. - */ -trait ProgressLoggerFactoryInjection { - @Inject - ProgressLoggerFactory getProgressLoggerFactory() { - throw new UnsupportedOperationException() - } -} diff --git a/buildSrc/src/main/groovy/com/carrotsearch/gradle/junit4/RandomizedTestingTask.groovy b/buildSrc/src/main/groovy/com/carrotsearch/gradle/junit4/RandomizedTestingTask.groovy index 8d93301e0c70e..e24c226837d26 100644 --- a/buildSrc/src/main/groovy/com/carrotsearch/gradle/junit4/RandomizedTestingTask.groovy +++ b/buildSrc/src/main/groovy/com/carrotsearch/gradle/junit4/RandomizedTestingTask.groovy @@ -8,7 +8,6 @@ import org.apache.tools.ant.BuildException import org.apache.tools.ant.DefaultLogger import org.apache.tools.ant.RuntimeConfigurable import org.apache.tools.ant.UnknownElement -import org.elasticsearch.gradle.ProgressLoggerFactoryInjection import org.gradle.api.DefaultTask import org.gradle.api.file.FileCollection import org.gradle.api.file.FileTreeElement @@ -20,9 +19,12 @@ import org.gradle.api.tasks.Optional import org.gradle.api.tasks.TaskAction import org.gradle.api.tasks.util.PatternFilterable import org.gradle.api.tasks.util.PatternSet +import org.gradle.internal.logging.progress.ProgressLoggerFactory import org.gradle.util.ConfigureUtil -class RandomizedTestingTask extends DefaultTask implements ProgressLoggerFactoryInjection { +import javax.inject.Inject + +class RandomizedTestingTask extends DefaultTask { // TODO: change to "executable" to match gradle test params? @Optional @@ -92,6 +94,11 @@ class RandomizedTestingTask extends DefaultTask implements ProgressLoggerFactory listenersConfig.listeners.add(new TestReportLogger(logger: logger, config: testLoggingConfig)) } + @Inject + ProgressLoggerFactory getProgressLoggerFactory() { + throw new UnsupportedOperationException() + } + void jvmArgs(Iterable arguments) { jvmArgs.addAll(arguments) } diff --git a/buildSrc/src/main/groovy/com/carrotsearch/gradle/junit4/TestProgressLogger.groovy b/buildSrc/src/main/groovy/com/carrotsearch/gradle/junit4/TestProgressLogger.groovy index a9786935c5651..da25afa938916 100644 --- a/buildSrc/src/main/groovy/com/carrotsearch/gradle/junit4/TestProgressLogger.groovy +++ b/buildSrc/src/main/groovy/com/carrotsearch/gradle/junit4/TestProgressLogger.groovy @@ -25,7 +25,8 @@ import com.carrotsearch.ant.tasks.junit4.events.aggregated.AggregatedStartEvent import com.carrotsearch.ant.tasks.junit4.events.aggregated.AggregatedSuiteResultEvent import com.carrotsearch.ant.tasks.junit4.events.aggregated.AggregatedTestResultEvent import com.carrotsearch.ant.tasks.junit4.listeners.AggregatedEventListener -import org.elasticsearch.gradle.ProgressLogger +import org.gradle.internal.logging.progress.ProgressLogger +import org.gradle.internal.logging.progress.ProgressLoggerFactory import static com.carrotsearch.ant.tasks.junit4.FormattingUtils.formatDurationInSeconds import static com.carrotsearch.ant.tasks.junit4.events.aggregated.TestStatus.ERROR @@ -51,6 +52,8 @@ import static java.lang.Math.max * quick. */ class TestProgressLogger implements AggregatedEventListener { + /** Factory to build a progress logger when testing starts */ + ProgressLoggerFactory factory ProgressLogger progressLogger int totalSuites int totalSlaves @@ -74,17 +77,14 @@ class TestProgressLogger implements AggregatedEventListener { /** Have we finished a whole suite yet? */ volatile boolean suiteFinished = false /* Note that we probably overuse volatile here but it isn't hurting us and - lets us move things around without worying about breaking things. */ - - TestProgressLogger(Map args) { - progressLogger = new ProgressLogger(args.factory.newOperation(TestProgressLogger)) - progressLogger.setDescription('Randomized test runner') - } + lets us move things around without worrying about breaking things. */ @Subscribe void onStart(AggregatedStartEvent e) throws IOException { totalSuites = e.suiteCount totalSlaves = e.slaveCount + progressLogger = factory.newOperation(TestProgressLogger) + progressLogger.setDescription('Randomized test runner') progressLogger.started() progressLogger.progress( "Starting JUnit4 for ${totalSuites} suites on ${totalSlaves} jvms") diff --git a/buildSrc/src/main/groovy/org/elasticsearch/gradle/BuildPlugin.groovy b/buildSrc/src/main/groovy/org/elasticsearch/gradle/BuildPlugin.groovy index 011ac94cf2eb1..dd9d1781ccd9f 100644 --- a/buildSrc/src/main/groovy/org/elasticsearch/gradle/BuildPlugin.groovy +++ b/buildSrc/src/main/groovy/org/elasticsearch/gradle/BuildPlugin.groovy @@ -122,7 +122,7 @@ class BuildPlugin implements Plugin { } // enforce gradle version - GradleVersion minGradle = GradleVersion.version('2.13') + GradleVersion minGradle = GradleVersion.version('3.3') if (GradleVersion.current() < minGradle) { throw new GradleException("${minGradle} or above is required to build elasticsearch") } @@ -328,46 +328,15 @@ class BuildPlugin implements Plugin { return } - // check each dependency for any transitive deps + // fix deps incorrectly marked as runtime back to compile time deps + // see https://discuss.gradle.org/t/maven-publish-plugin-generated-pom-making-dependency-scope-runtime/7494/4 for (Node depNode : depsNodes.get(0).children()) { - String groupId = depNode.get('groupId').get(0).text() - String artifactId = depNode.get('artifactId').get(0).text() - String version = depNode.get('version').get(0).text() - - // fix deps incorrectly marked as runtime back to compile time deps - // see https://discuss.gradle.org/t/maven-publish-plugin-generated-pom-making-dependency-scope-runtime/7494/4 boolean isCompileDep = project.configurations.compile.allDependencies.find { dep -> dep.name == depNode.artifactId.text() } if (depNode.scope.text() == 'runtime' && isCompileDep) { depNode.scope*.value = 'compile' } - - // collect the transitive deps now that we know what this dependency is - String depConfig = transitiveDepConfigName(groupId, artifactId, version) - Configuration configuration = project.configurations.findByName(depConfig) - if (configuration == null) { - continue // we did not make this dep non-transitive - } - Set artifacts = configuration.resolvedConfiguration.resolvedArtifacts - if (artifacts.size() <= 1) { - // this dep has no transitive deps (or the only artifact is itself) - continue - } - - // we now know we have something to exclude, so add exclusions for all artifacts except the main one - Node exclusions = depNode.appendNode('exclusions') - for (ResolvedArtifact artifact : artifacts) { - ModuleVersionIdentifier moduleVersionIdentifier = artifact.moduleVersion.id; - String depGroupId = moduleVersionIdentifier.group - String depArtifactId = moduleVersionIdentifier.name - // add exclusions for all artifacts except the main one - if (depGroupId != groupId || depArtifactId != artifactId) { - Node exclusion = exclusions.appendNode('exclusion') - exclusion.appendNode('groupId', depGroupId) - exclusion.appendNode('artifactId', depArtifactId) - } - } } } } diff --git a/buildSrc/src/main/groovy/org/elasticsearch/gradle/plugin/PluginPropertiesExtension.groovy b/buildSrc/src/main/groovy/org/elasticsearch/gradle/plugin/PluginPropertiesExtension.groovy index 353b8127545bf..1251be265da9a 100644 --- a/buildSrc/src/main/groovy/org/elasticsearch/gradle/plugin/PluginPropertiesExtension.groovy +++ b/buildSrc/src/main/groovy/org/elasticsearch/gradle/plugin/PluginPropertiesExtension.groovy @@ -39,6 +39,9 @@ class PluginPropertiesExtension { @Input String classname + @Input + boolean hasNativeController = false + /** Indicates whether the plugin jar should be made available for the transport client. */ @Input boolean hasClientJar = false diff --git a/buildSrc/src/main/groovy/org/elasticsearch/gradle/plugin/PluginPropertiesTask.groovy b/buildSrc/src/main/groovy/org/elasticsearch/gradle/plugin/PluginPropertiesTask.groovy index 94bc0ba3e750b..91efe247a016b 100644 --- a/buildSrc/src/main/groovy/org/elasticsearch/gradle/plugin/PluginPropertiesTask.groovy +++ b/buildSrc/src/main/groovy/org/elasticsearch/gradle/plugin/PluginPropertiesTask.groovy @@ -79,7 +79,8 @@ class PluginPropertiesTask extends Copy { 'version': stringSnap(extension.version), 'elasticsearchVersion': stringSnap(VersionProperties.elasticsearch), 'javaVersion': project.targetCompatibility as String, - 'classname': extension.classname + 'classname': extension.classname, + 'hasNativeController': extension.hasNativeController ] } } diff --git a/buildSrc/src/main/groovy/org/elasticsearch/gradle/test/ClusterConfiguration.groovy b/buildSrc/src/main/groovy/org/elasticsearch/gradle/test/ClusterConfiguration.groovy index 8d65f8c0d60fe..4a88473525983 100644 --- a/buildSrc/src/main/groovy/org/elasticsearch/gradle/test/ClusterConfiguration.groovy +++ b/buildSrc/src/main/groovy/org/elasticsearch/gradle/test/ClusterConfiguration.groovy @@ -125,6 +125,8 @@ class ClusterConfiguration { Map settings = new HashMap<>() + Map keystoreSettings = new HashMap<>() + // map from destination path, to source file Map extraConfigFiles = new HashMap<>() @@ -144,6 +146,11 @@ class ClusterConfiguration { settings.put(name, value) } + @Input + void keystoreSetting(String name, String value) { + keystoreSettings.put(name, value) + } + @Input void plugin(String path) { Project pluginProject = project.project(path) diff --git a/buildSrc/src/main/groovy/org/elasticsearch/gradle/test/ClusterFormationTasks.groovy b/buildSrc/src/main/groovy/org/elasticsearch/gradle/test/ClusterFormationTasks.groovy index c3dff77dfd496..e8061b02f3d08 100644 --- a/buildSrc/src/main/groovy/org/elasticsearch/gradle/test/ClusterFormationTasks.groovy +++ b/buildSrc/src/main/groovy/org/elasticsearch/gradle/test/ClusterFormationTasks.groovy @@ -38,6 +38,7 @@ import org.gradle.api.tasks.Copy import org.gradle.api.tasks.Delete import org.gradle.api.tasks.Exec +import java.nio.charset.StandardCharsets import java.nio.file.Paths import java.util.concurrent.TimeUnit @@ -157,10 +158,14 @@ class ClusterFormationTasks { node.cwd.mkdirs() } } + setup = configureCheckPreviousTask(taskName(prefix, node, 'checkPrevious'), project, setup, node) setup = configureStopTask(taskName(prefix, node, 'stopPrevious'), project, setup, node) setup = configureExtractTask(taskName(prefix, node, 'extract'), project, setup, node, configuration) setup = configureWriteConfigTask(taskName(prefix, node, 'configure'), project, setup, node, seedNode) + setup = configureCreateKeystoreTask(taskName(prefix, node, 'createKeystore'), project, setup, node) + setup = configureAddKeystoreSettingTasks(prefix, project, setup, node) + if (node.config.plugins.isEmpty() == false) { if (node.nodeVersion == VersionProperties.elasticsearch) { setup = configureCopyPluginsTask(taskName(prefix, node, 'copyPlugins'), project, setup, node) @@ -303,6 +308,33 @@ class ClusterFormationTasks { } } + /** Adds a task to create keystore */ + static Task configureCreateKeystoreTask(String name, Project project, Task setup, NodeInfo node) { + if (node.config.keystoreSettings.isEmpty()) { + return setup + } else { + File esKeystoreUtil = Paths.get(node.homeDir.toString(), "bin/" + "elasticsearch-keystore").toFile() + return configureExecTask(name, project, setup, node, esKeystoreUtil, 'create') + } + } + + /** Adds tasks to add settings to the keystore */ + static Task configureAddKeystoreSettingTasks(String parent, Project project, Task setup, NodeInfo node) { + Map kvs = node.config.keystoreSettings + File esKeystoreUtil = Paths.get(node.homeDir.toString(), "bin/" + "elasticsearch-keystore").toFile() + Task parentTask = setup + for (Map.Entry entry in kvs) { + String key = entry.getKey() + String name = taskName(parent, node, 'addToKeystore#' + key) + Task t = configureExecTask(name, project, parentTask, node, esKeystoreUtil, 'add', key, '-x') + t.doFirst { + standardInput = new ByteArrayInputStream(entry.getValue().getBytes(StandardCharsets.UTF_8)) + } + parentTask = t + } + return parentTask + } + static Task configureExtraConfigFilesTask(String name, Project project, Task setup, NodeInfo node) { if (node.config.extraConfigFiles.isEmpty()) { return setup diff --git a/buildSrc/src/main/groovy/org/elasticsearch/gradle/test/RestIntegTestTask.groovy b/buildSrc/src/main/groovy/org/elasticsearch/gradle/test/RestIntegTestTask.groovy index 075e8129e6fa8..98ee91e37a8ab 100644 --- a/buildSrc/src/main/groovy/org/elasticsearch/gradle/test/RestIntegTestTask.groovy +++ b/buildSrc/src/main/groovy/org/elasticsearch/gradle/test/RestIntegTestTask.groovy @@ -22,10 +22,15 @@ import com.carrotsearch.gradle.junit4.RandomizedTestingTask import org.elasticsearch.gradle.BuildPlugin import org.gradle.api.DefaultTask import org.gradle.api.Task +import org.gradle.api.execution.TaskExecutionAdapter import org.gradle.api.internal.tasks.options.Option import org.gradle.api.plugins.JavaBasePlugin import org.gradle.api.tasks.Input -import org.gradle.util.ConfigureUtil +import org.gradle.api.tasks.TaskState + +import java.nio.charset.StandardCharsets +import java.nio.file.Files +import java.util.stream.Stream /** * A wrapper task around setting up a cluster and running rest tests. @@ -71,6 +76,24 @@ public class RestIntegTestTask extends DefaultTask { // both as separate sysprops runner.systemProperty('tests.cluster', "${-> nodes[0].transportUri()}") + // dump errors and warnings from cluster log on failure + TaskExecutionAdapter logDumpListener = new TaskExecutionAdapter() { + @Override + void afterExecute(Task task, TaskState state) { + if (state.failure != null) { + for (NodeInfo nodeInfo : nodes) { + printLogExcerpt(nodeInfo) + } + } + } + } + runner.doFirst { + project.gradle.addListener(logDumpListener) + } + runner.doLast { + project.gradle.removeListener(logDumpListener) + } + // copy the rest spec/tests into the test resources RestSpecHack.configureDependencies(project) project.afterEvaluate { @@ -126,4 +149,42 @@ public class RestIntegTestTask extends DefaultTask { public Task mustRunAfter(Object... tasks) { clusterInit.mustRunAfter(tasks) } + + /** Print out an excerpt of the log from the given node. */ + protected static void printLogExcerpt(NodeInfo nodeInfo) { + File logFile = new File(nodeInfo.homeDir, "logs/${nodeInfo.clusterName}.log") + println("\nCluster ${nodeInfo.clusterName} - node ${nodeInfo.nodeNum} log excerpt:") + println("(full log at ${logFile})") + println('-----------------------------------------') + Stream stream = Files.lines(logFile.toPath(), StandardCharsets.UTF_8) + try { + boolean inStartup = true + boolean inExcerpt = false + int linesSkipped = 0 + for (String line : stream) { + if (line.startsWith("[")) { + inExcerpt = false // clear with the next log message + } + if (line =~ /(\[WARN\])|(\[ERROR\])/) { + inExcerpt = true // show warnings and errors + } + if (inStartup || inExcerpt) { + if (linesSkipped != 0) { + println("... SKIPPED ${linesSkipped} LINES ...") + } + println(line) + linesSkipped = 0 + } else { + ++linesSkipped + } + if (line =~ /recovered \[\d+\] indices into cluster_state/) { + inStartup = false + } + } + } finally { + stream.close() + } + println('=========================================') + + } } diff --git a/buildSrc/src/main/groovy/org/elasticsearch/gradle/vagrant/TapLoggerOutputStream.groovy b/buildSrc/src/main/groovy/org/elasticsearch/gradle/vagrant/TapLoggerOutputStream.groovy index 85fd433bc771f..e15759a1fe588 100644 --- a/buildSrc/src/main/groovy/org/elasticsearch/gradle/vagrant/TapLoggerOutputStream.groovy +++ b/buildSrc/src/main/groovy/org/elasticsearch/gradle/vagrant/TapLoggerOutputStream.groovy @@ -19,10 +19,9 @@ package org.elasticsearch.gradle.vagrant import com.carrotsearch.gradle.junit4.LoggingOutputStream -import groovy.transform.PackageScope -import org.elasticsearch.gradle.ProgressLogger import org.gradle.api.GradleScriptException import org.gradle.api.logging.Logger +import org.gradle.internal.logging.progress.ProgressLogger import java.util.regex.Matcher @@ -48,7 +47,7 @@ public class TapLoggerOutputStream extends LoggingOutputStream { TapLoggerOutputStream(Map args) { logger = args.logger - progressLogger = new ProgressLogger(args.factory.newOperation(VagrantLoggerOutputStream)) + progressLogger = args.factory.newOperation(VagrantLoggerOutputStream) progressLogger.setDescription("TAP output for `${args.command}`") } diff --git a/buildSrc/src/main/groovy/org/elasticsearch/gradle/vagrant/VagrantCommandTask.groovy b/buildSrc/src/main/groovy/org/elasticsearch/gradle/vagrant/VagrantCommandTask.groovy index cd4d4bf87a568..abc6af9e09d97 100644 --- a/buildSrc/src/main/groovy/org/elasticsearch/gradle/vagrant/VagrantCommandTask.groovy +++ b/buildSrc/src/main/groovy/org/elasticsearch/gradle/vagrant/VagrantCommandTask.groovy @@ -19,15 +19,17 @@ package org.elasticsearch.gradle.vagrant import org.apache.commons.io.output.TeeOutputStream -import org.elasticsearch.gradle.ProgressLoggerFactoryInjection import org.elasticsearch.gradle.LoggedExec import org.gradle.api.tasks.Input +import org.gradle.internal.logging.progress.ProgressLoggerFactory + +import javax.inject.Inject /** * Runs a vagrant command. Pretty much like Exec task but with a nicer output * formatter and defaults to `vagrant` as first part of commandLine. */ -public class VagrantCommandTask extends LoggedExec implements ProgressLoggerFactoryInjection { +public class VagrantCommandTask extends LoggedExec { @Input String boxName @@ -47,6 +49,11 @@ public class VagrantCommandTask extends LoggedExec implements ProgressLoggerFact } } + @Inject + ProgressLoggerFactory getProgressLoggerFactory() { + throw new UnsupportedOperationException() + } + protected OutputStream createLoggerOutputStream() { return new VagrantLoggerOutputStream( command: commandLine.join(' '), diff --git a/buildSrc/src/main/groovy/org/elasticsearch/gradle/vagrant/VagrantLoggerOutputStream.groovy b/buildSrc/src/main/groovy/org/elasticsearch/gradle/vagrant/VagrantLoggerOutputStream.groovy index de6c5a36db945..e899c0171298b 100644 --- a/buildSrc/src/main/groovy/org/elasticsearch/gradle/vagrant/VagrantLoggerOutputStream.groovy +++ b/buildSrc/src/main/groovy/org/elasticsearch/gradle/vagrant/VagrantLoggerOutputStream.groovy @@ -19,7 +19,7 @@ package org.elasticsearch.gradle.vagrant import com.carrotsearch.gradle.junit4.LoggingOutputStream -import org.elasticsearch.gradle.ProgressLogger +import org.gradle.internal.logging.progress.ProgressLogger /** * Adapts an OutputStream being written to by vagrant into a ProcessLogger. It @@ -53,7 +53,7 @@ public class VagrantLoggerOutputStream extends LoggingOutputStream { private String heading = '' VagrantLoggerOutputStream(Map args) { - progressLogger = new ProgressLogger(args.factory.newOperation(VagrantLoggerOutputStream)) + progressLogger = args.factory.newOperation(VagrantLoggerOutputStream) progressLogger.setDescription("Vagrant output for `$args.command`") squashedPrefix = args.squashedPrefix } diff --git a/buildSrc/src/main/groovy/org/elasticsearch/gradle/vagrant/VagrantTestPlugin.groovy b/buildSrc/src/main/groovy/org/elasticsearch/gradle/vagrant/VagrantTestPlugin.groovy index f5974d7c0f8e1..66574f5f289e8 100644 --- a/buildSrc/src/main/groovy/org/elasticsearch/gradle/vagrant/VagrantTestPlugin.groovy +++ b/buildSrc/src/main/groovy/org/elasticsearch/gradle/vagrant/VagrantTestPlugin.groovy @@ -217,7 +217,7 @@ class VagrantTestPlugin implements Plugin { // Now we iterate over dependencies of the bats configuration. When a project dependency is found, // we bring back its own archives, test files or test utils. project.afterEvaluate { - project.configurations.bats.dependencies.findAll {it.configuration == BATS }.each { d -> + project.configurations.bats.dependencies.findAll {it.targetConfiguration == BATS }.each { d -> if (d instanceof DefaultProjectDependency) { DefaultProjectDependency externalBatsDependency = (DefaultProjectDependency) d Project externalBatsProject = externalBatsDependency.dependencyProject diff --git a/buildSrc/src/main/resources/checkstyle_suppressions.xml b/buildSrc/src/main/resources/checkstyle_suppressions.xml index 659d31186f126..c95ad03f9ac60 100644 --- a/buildSrc/src/main/resources/checkstyle_suppressions.xml +++ b/buildSrc/src/main/resources/checkstyle_suppressions.xml @@ -491,12 +491,10 @@ - - @@ -607,7 +605,6 @@ - @@ -1567,18 +1564,15 @@ - - - @@ -2310,7 +2304,6 @@ - @@ -2391,7 +2384,6 @@ - @@ -3012,7 +3004,6 @@ - @@ -3845,7 +3836,6 @@ - @@ -3945,7 +3935,6 @@ - @@ -3954,13 +3943,9 @@ - - - - diff --git a/buildSrc/src/main/resources/forbidden/es-all-signatures.txt b/buildSrc/src/main/resources/forbidden/es-all-signatures.txt index a4666de4fa1b4..64ae6784f15f2 100644 --- a/buildSrc/src/main/resources/forbidden/es-all-signatures.txt +++ b/buildSrc/src/main/resources/forbidden/es-all-signatures.txt @@ -44,4 +44,13 @@ java.net.URLConnection#getInputStream() java.net.Socket#connect(java.net.SocketAddress) java.net.Socket#connect(java.net.SocketAddress, int) java.nio.channels.SocketChannel#open(java.net.SocketAddress) -java.nio.channels.SocketChannel#connect(java.net.SocketAddress) \ No newline at end of file +java.nio.channels.SocketChannel#connect(java.net.SocketAddress) + +# This method is misleading, and uses lenient boolean parsing under the hood. If you intend to parse +# a system property as a boolean, use +# org.elasticsearch.common.Booleans#parseBoolean(java.lang.String) on the result of +# java.lang.SystemProperty#getProperty(java.lang.String) instead. If you were not intending to parse +# a system property as a boolean, but instead parse a string to a boolean, use +# org.elasticsearch.common.Booleans#parseBoolean(java.lang.String) directly on the string. +@defaultMessage use org.elasticsearch.common.Booleans#parseBoolean(java.lang.String) +java.lang.Boolean#getBoolean(java.lang.String) diff --git a/buildSrc/src/main/resources/plugin-descriptor.properties b/buildSrc/src/main/resources/plugin-descriptor.properties index ebde46d326ba9..67c6ee39968cd 100644 --- a/buildSrc/src/main/resources/plugin-descriptor.properties +++ b/buildSrc/src/main/resources/plugin-descriptor.properties @@ -30,11 +30,15 @@ name=${name} # 'classname': the name of the class to load, fully-qualified. classname=${classname} # -# 'java.version' version of java the code is built against +# 'java.version': version of java the code is built against # use the system property java.specification.version # version string must be a sequence of nonnegative decimal integers # separated by "."'s and may have leading zeros java.version=${javaVersion} # -# 'elasticsearch.version' version of elasticsearch compiled against +# 'elasticsearch.version': version of elasticsearch compiled against elasticsearch.version=${elasticsearchVersion} +### optional elements for plugins: +# +# 'has.native.controller': whether or not the plugin has a native controller +has.native.controller=${hasNativeController} diff --git a/buildSrc/version.properties b/buildSrc/version.properties index 77157d2f3c85d..cea96db283d15 100644 --- a/buildSrc/version.properties +++ b/buildSrc/version.properties @@ -1,6 +1,6 @@ # When updating elasticsearch, please update 'rest' version in core/src/main/resources/org/elasticsearch/bootstrap/test-framework.policy elasticsearch = 6.0.0-alpha1 -lucene = 6.5.0-snapshot-d00c5ca +lucene = 6.5.0 # optional dependencies spatial4j = 0.6 @@ -10,7 +10,7 @@ snakeyaml = 1.15 # When updating log4j, please update also docs/java-api/index.asciidoc log4j = 2.7 slf4j = 1.6.2 -jna = 4.2.2 +jna = 4.4.0 # test dependencies randomizedrunner = 2.5.0 diff --git a/client/rest-high-level/src/test/java/org/elasticsearch/client/documentation/DeleteDocumentationIT.java b/client/rest-high-level/src/test/java/org/elasticsearch/client/documentation/DeleteDocumentationIT.java index ddb96cdc00ea9..00c19019f47e7 100644 --- a/client/rest-high-level/src/test/java/org/elasticsearch/client/documentation/DeleteDocumentationIT.java +++ b/client/rest-high-level/src/test/java/org/elasticsearch/client/documentation/DeleteDocumentationIT.java @@ -56,35 +56,35 @@ public class DeleteDocumentationIT extends ESRestHighLevelClientTestCase { public void testDelete() throws IOException { RestHighLevelClient client = highLevelClient(); - // tag::delete-request[] + // tag::delete-request DeleteRequest request = new DeleteRequest( "index", // <1> "type", // <2> "id"); // <3> - // end::delete-request[] + // end::delete-request - // tag::delete-request-props[] + // tag::delete-request-props request.timeout(TimeValue.timeValueSeconds(1)); // <1> request.timeout("1s"); // <2> request.setRefreshPolicy(WriteRequest.RefreshPolicy.WAIT_UNTIL); // <3> request.setRefreshPolicy("wait_for"); // <4> request.version(2); // <5> request.versionType(VersionType.EXTERNAL); // <6> - // end::delete-request-props[] + // end::delete-request-props - // tag::delete-execute[] + // tag::delete-execute DeleteResponse response = client.delete(request); - // end::delete-execute[] + // end::delete-execute try { - // tag::delete-notfound[] + // tag::delete-notfound if (response.getResult().equals(DocWriteResponse.Result.NOT_FOUND)) { throw new Exception("Can't find document to be removed"); // <1> } - // end::delete-notfound[] + // end::delete-notfound } catch (Exception ignored) { } - // tag::delete-execute-async[] + // tag::delete-execute-async client.deleteAsync(request, new ActionListener() { @Override public void onResponse(DeleteResponse deleteResponse) { @@ -96,9 +96,9 @@ public void onFailure(Exception e) { // <2> } }); - // end::delete-execute-async[] + // end::delete-execute-async - // tag::delete-conflict[] + // tag::delete-conflict try { client.delete(request); } catch (ElasticsearchException exception) { @@ -106,7 +106,7 @@ public void onFailure(Exception e) { // <1> } } - // end::delete-conflict[] + // end::delete-conflict } } diff --git a/core/licenses/jna-4.2.2.jar.sha1 b/core/licenses/jna-4.2.2.jar.sha1 deleted file mode 100644 index 8b1acbbe5d7ab..0000000000000 --- a/core/licenses/jna-4.2.2.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -5012450aee579c3118ff09461d5ce210e0cdc2a9 \ No newline at end of file diff --git a/core/licenses/jna-4.4.0.jar.sha1 b/core/licenses/jna-4.4.0.jar.sha1 new file mode 100644 index 0000000000000..9655b2c92e8f8 --- /dev/null +++ b/core/licenses/jna-4.4.0.jar.sha1 @@ -0,0 +1 @@ +cb208278274bf12ebdb56c61bd7407e6f774d65a \ No newline at end of file diff --git a/core/licenses/lucene-analyzers-common-6.5.0-snapshot-d00c5ca.jar.sha1 b/core/licenses/lucene-analyzers-common-6.5.0-snapshot-d00c5ca.jar.sha1 deleted file mode 100644 index 320a300a765d6..0000000000000 --- a/core/licenses/lucene-analyzers-common-6.5.0-snapshot-d00c5ca.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -9ad2a7bd252cbdb76ac121287e670d75f4db2cd3 \ No newline at end of file diff --git a/core/licenses/lucene-analyzers-common-6.5.0.jar.sha1 b/core/licenses/lucene-analyzers-common-6.5.0.jar.sha1 new file mode 100644 index 0000000000000..77d21a23774e3 --- /dev/null +++ b/core/licenses/lucene-analyzers-common-6.5.0.jar.sha1 @@ -0,0 +1 @@ +3989779b05ecd0ace6affe19223b1c27156604f1 \ No newline at end of file diff --git a/core/licenses/lucene-backward-codecs-6.5.0-snapshot-d00c5ca.jar.sha1 b/core/licenses/lucene-backward-codecs-6.5.0-snapshot-d00c5ca.jar.sha1 deleted file mode 100644 index c313a86e27168..0000000000000 --- a/core/licenses/lucene-backward-codecs-6.5.0-snapshot-d00c5ca.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -c6a940eff8a87df40262b752ed7b135e448b7873 \ No newline at end of file diff --git a/core/licenses/lucene-backward-codecs-6.5.0.jar.sha1 b/core/licenses/lucene-backward-codecs-6.5.0.jar.sha1 new file mode 100644 index 0000000000000..9eaff80ac08e0 --- /dev/null +++ b/core/licenses/lucene-backward-codecs-6.5.0.jar.sha1 @@ -0,0 +1 @@ +6a8660e7133f357ef40d9cac26316ccd9937a2eb \ No newline at end of file diff --git a/core/licenses/lucene-core-6.5.0-snapshot-d00c5ca.jar.sha1 b/core/licenses/lucene-core-6.5.0-snapshot-d00c5ca.jar.sha1 deleted file mode 100644 index 8e2ce3f31ac66..0000000000000 --- a/core/licenses/lucene-core-6.5.0-snapshot-d00c5ca.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -6ef5ad88141760c00ea041da1535f3ffc364d67d \ No newline at end of file diff --git a/core/licenses/lucene-core-6.5.0.jar.sha1 b/core/licenses/lucene-core-6.5.0.jar.sha1 new file mode 100644 index 0000000000000..7af91ec15bf1e --- /dev/null +++ b/core/licenses/lucene-core-6.5.0.jar.sha1 @@ -0,0 +1 @@ +ff176c9bde4228b43827849f5d2ff2e2717e3297 \ No newline at end of file diff --git a/core/licenses/lucene-grouping-6.5.0-snapshot-d00c5ca.jar.sha1 b/core/licenses/lucene-grouping-6.5.0-snapshot-d00c5ca.jar.sha1 deleted file mode 100644 index b5e793e4d3995..0000000000000 --- a/core/licenses/lucene-grouping-6.5.0-snapshot-d00c5ca.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -f15775571fb5762dfc92e00c3909cb8db8ff1d53 \ No newline at end of file diff --git a/core/licenses/lucene-grouping-6.5.0.jar.sha1 b/core/licenses/lucene-grouping-6.5.0.jar.sha1 new file mode 100644 index 0000000000000..08ccc2cd08690 --- /dev/null +++ b/core/licenses/lucene-grouping-6.5.0.jar.sha1 @@ -0,0 +1 @@ +10d2e5b36f460527ac9b948be0ec3077bde5b0ca \ No newline at end of file diff --git a/core/licenses/lucene-highlighter-6.5.0-snapshot-d00c5ca.jar.sha1 b/core/licenses/lucene-highlighter-6.5.0-snapshot-d00c5ca.jar.sha1 deleted file mode 100644 index c90084cc1cb03..0000000000000 --- a/core/licenses/lucene-highlighter-6.5.0-snapshot-d00c5ca.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -051d793aa64257beead4ccc7432eb5df81d17f23 \ No newline at end of file diff --git a/core/licenses/lucene-highlighter-6.5.0.jar.sha1 b/core/licenses/lucene-highlighter-6.5.0.jar.sha1 new file mode 100644 index 0000000000000..a8069723f1640 --- /dev/null +++ b/core/licenses/lucene-highlighter-6.5.0.jar.sha1 @@ -0,0 +1 @@ +0019bb6a631ea0123e8e553b0510fa81c9d3c3eb \ No newline at end of file diff --git a/core/licenses/lucene-join-6.5.0-snapshot-d00c5ca.jar.sha1 b/core/licenses/lucene-join-6.5.0-snapshot-d00c5ca.jar.sha1 deleted file mode 100644 index b6dfc376fbbfc..0000000000000 --- a/core/licenses/lucene-join-6.5.0-snapshot-d00c5ca.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -5bc4cba55670c14ea812ff5de65edad4c312fdf6 \ No newline at end of file diff --git a/core/licenses/lucene-join-6.5.0.jar.sha1 b/core/licenses/lucene-join-6.5.0.jar.sha1 new file mode 100644 index 0000000000000..cbad6199d76b5 --- /dev/null +++ b/core/licenses/lucene-join-6.5.0.jar.sha1 @@ -0,0 +1 @@ +dad85baba266793b9ceb80a9b08c4ee9838e09df \ No newline at end of file diff --git a/core/licenses/lucene-memory-6.5.0-snapshot-d00c5ca.jar.sha1 b/core/licenses/lucene-memory-6.5.0-snapshot-d00c5ca.jar.sha1 deleted file mode 100644 index cfc1d044ca766..0000000000000 --- a/core/licenses/lucene-memory-6.5.0-snapshot-d00c5ca.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -68cf08bcd8414a57493debf3a6a509d78a9abb56 \ No newline at end of file diff --git a/core/licenses/lucene-memory-6.5.0.jar.sha1 b/core/licenses/lucene-memory-6.5.0.jar.sha1 new file mode 100644 index 0000000000000..5f22c0d9cfe3b --- /dev/null +++ b/core/licenses/lucene-memory-6.5.0.jar.sha1 @@ -0,0 +1 @@ +938f9f7efe8a403fd57c99aedd75d040d9caa896 \ No newline at end of file diff --git a/core/licenses/lucene-misc-6.5.0-snapshot-d00c5ca.jar.sha1 b/core/licenses/lucene-misc-6.5.0-snapshot-d00c5ca.jar.sha1 deleted file mode 100644 index 938b26b5a4d6b..0000000000000 --- a/core/licenses/lucene-misc-6.5.0-snapshot-d00c5ca.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -f5d90756dbeda1218d723b7bea0799c88d621adb \ No newline at end of file diff --git a/core/licenses/lucene-misc-6.5.0.jar.sha1 b/core/licenses/lucene-misc-6.5.0.jar.sha1 new file mode 100644 index 0000000000000..2b405d7f16aa9 --- /dev/null +++ b/core/licenses/lucene-misc-6.5.0.jar.sha1 @@ -0,0 +1 @@ +afdff39ecb30f6e2c6f056a5bdfcb13d928a25af \ No newline at end of file diff --git a/core/licenses/lucene-queries-6.5.0-snapshot-d00c5ca.jar.sha1 b/core/licenses/lucene-queries-6.5.0-snapshot-d00c5ca.jar.sha1 deleted file mode 100644 index 31dcaaaaabc6e..0000000000000 --- a/core/licenses/lucene-queries-6.5.0-snapshot-d00c5ca.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -9298e7d1ed96e7beb63d7ccdce1a4502eb0fe484 \ No newline at end of file diff --git a/core/licenses/lucene-queries-6.5.0.jar.sha1 b/core/licenses/lucene-queries-6.5.0.jar.sha1 new file mode 100644 index 0000000000000..9a046ce204f08 --- /dev/null +++ b/core/licenses/lucene-queries-6.5.0.jar.sha1 @@ -0,0 +1 @@ +8e3971a008070712d57b59cf1f7b44c0d9d3df25 \ No newline at end of file diff --git a/core/licenses/lucene-queryparser-6.5.0-snapshot-d00c5ca.jar.sha1 b/core/licenses/lucene-queryparser-6.5.0-snapshot-d00c5ca.jar.sha1 deleted file mode 100644 index 346d897a4cf5c..0000000000000 --- a/core/licenses/lucene-queryparser-6.5.0-snapshot-d00c5ca.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -918de18963607af69dff38e4773c0bde89c73ae3 \ No newline at end of file diff --git a/core/licenses/lucene-queryparser-6.5.0.jar.sha1 b/core/licenses/lucene-queryparser-6.5.0.jar.sha1 new file mode 100644 index 0000000000000..3136885ab92ca --- /dev/null +++ b/core/licenses/lucene-queryparser-6.5.0.jar.sha1 @@ -0,0 +1 @@ +225b904edf91ccdffffa398e1924ebadd5677c09 \ No newline at end of file diff --git a/core/licenses/lucene-sandbox-6.5.0-snapshot-d00c5ca.jar.sha1 b/core/licenses/lucene-sandbox-6.5.0-snapshot-d00c5ca.jar.sha1 deleted file mode 100644 index d7e3a49e9eeb5..0000000000000 --- a/core/licenses/lucene-sandbox-6.5.0-snapshot-d00c5ca.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -a311a7d9f3e9a8fbf3a367a4e2731f9d4579732b \ No newline at end of file diff --git a/core/licenses/lucene-sandbox-6.5.0.jar.sha1 b/core/licenses/lucene-sandbox-6.5.0.jar.sha1 new file mode 100644 index 0000000000000..e3787e336dfbf --- /dev/null +++ b/core/licenses/lucene-sandbox-6.5.0.jar.sha1 @@ -0,0 +1 @@ +5c994fc5dc4f37133a861571211303d81c5d51ff \ No newline at end of file diff --git a/core/licenses/lucene-spatial-6.5.0-snapshot-d00c5ca.jar.sha1 b/core/licenses/lucene-spatial-6.5.0-snapshot-d00c5ca.jar.sha1 deleted file mode 100644 index 21062261226a5..0000000000000 --- a/core/licenses/lucene-spatial-6.5.0-snapshot-d00c5ca.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -693bc4cb0e2e4465e0173c67ed0818071c4b460b \ No newline at end of file diff --git a/core/licenses/lucene-spatial-6.5.0.jar.sha1 b/core/licenses/lucene-spatial-6.5.0.jar.sha1 new file mode 100644 index 0000000000000..cbadbfc42d75f --- /dev/null +++ b/core/licenses/lucene-spatial-6.5.0.jar.sha1 @@ -0,0 +1 @@ +553b7b13bef994f14076a85557df03cad67322e9 \ No newline at end of file diff --git a/core/licenses/lucene-spatial-extras-6.5.0-snapshot-d00c5ca.jar.sha1 b/core/licenses/lucene-spatial-extras-6.5.0-snapshot-d00c5ca.jar.sha1 deleted file mode 100644 index 8eae8b0675d9a..0000000000000 --- a/core/licenses/lucene-spatial-extras-6.5.0-snapshot-d00c5ca.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -0326f31e63c76d476c23488c7354265cf915350f \ No newline at end of file diff --git a/core/licenses/lucene-spatial-extras-6.5.0.jar.sha1 b/core/licenses/lucene-spatial-extras-6.5.0.jar.sha1 new file mode 100644 index 0000000000000..f2ad71855f292 --- /dev/null +++ b/core/licenses/lucene-spatial-extras-6.5.0.jar.sha1 @@ -0,0 +1 @@ +73deae791d861820974600705ba06e9f801cbe56 \ No newline at end of file diff --git a/core/licenses/lucene-spatial3d-6.5.0-snapshot-d00c5ca.jar.sha1 b/core/licenses/lucene-spatial3d-6.5.0-snapshot-d00c5ca.jar.sha1 deleted file mode 100644 index 8ce95f97f1154..0000000000000 --- a/core/licenses/lucene-spatial3d-6.5.0-snapshot-d00c5ca.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -69a3a86e9d045f872408793ea411d49e0c577268 \ No newline at end of file diff --git a/core/licenses/lucene-spatial3d-6.5.0.jar.sha1 b/core/licenses/lucene-spatial3d-6.5.0.jar.sha1 new file mode 100644 index 0000000000000..8fca9ac1ebc68 --- /dev/null +++ b/core/licenses/lucene-spatial3d-6.5.0.jar.sha1 @@ -0,0 +1 @@ +c2aad69500dac79338ef45f570cab47bec3d2724 \ No newline at end of file diff --git a/core/licenses/lucene-suggest-6.5.0-snapshot-d00c5ca.jar.sha1 b/core/licenses/lucene-suggest-6.5.0-snapshot-d00c5ca.jar.sha1 deleted file mode 100644 index 2941229bbe00c..0000000000000 --- a/core/licenses/lucene-suggest-6.5.0-snapshot-d00c5ca.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -fabc05ca175150171cf60370877276b933716bcd \ No newline at end of file diff --git a/core/licenses/lucene-suggest-6.5.0.jar.sha1 b/core/licenses/lucene-suggest-6.5.0.jar.sha1 new file mode 100644 index 0000000000000..62764fbbc3270 --- /dev/null +++ b/core/licenses/lucene-suggest-6.5.0.jar.sha1 @@ -0,0 +1 @@ +acf211f2bf901dfc8155a46c5a42c5650edf74ef \ No newline at end of file diff --git a/core/src/main/java/org/apache/lucene/index/OneMergeHelper.java b/core/src/main/java/org/apache/lucene/index/OneMergeHelper.java index 99ef7f4dd7fef..f8b8c6178225b 100644 --- a/core/src/main/java/org/apache/lucene/index/OneMergeHelper.java +++ b/core/src/main/java/org/apache/lucene/index/OneMergeHelper.java @@ -19,6 +19,8 @@ package org.apache.lucene.index; +import java.io.IOException; + /** * Allows pkg private access */ @@ -27,4 +29,33 @@ private OneMergeHelper() {} public static String getSegmentName(MergePolicy.OneMerge merge) { return merge.info != null ? merge.info.info.name : "_na_"; } + + /** + * The current MB per second rate limit for this merge. + **/ + public static double getMbPerSec(Thread thread, MergePolicy.OneMerge merge) { + if (thread instanceof ConcurrentMergeScheduler.MergeThread) { + return ((ConcurrentMergeScheduler.MergeThread) thread).rateLimiter.getMBPerSec(); + } + assert false: "this is not merge thread"; + return Double.POSITIVE_INFINITY; + } + + /** + * Returns total bytes written by this merge. + **/ + public static long getTotalBytesWritten(Thread thread, + MergePolicy.OneMerge merge) throws IOException { + /** + * TODO: The number of bytes written during the merge should be accessible in OneMerge. + */ + if (thread instanceof ConcurrentMergeScheduler.MergeThread) { + return ((ConcurrentMergeScheduler.MergeThread) thread).rateLimiter + .getTotalBytesWritten(); + } + assert false: "this is not merge thread"; + return merge.totalBytesSize(); + } + + } diff --git a/core/src/main/java/org/apache/lucene/search/grouping/CollapseTopFieldDocs.java b/core/src/main/java/org/apache/lucene/search/grouping/CollapseTopFieldDocs.java index 169a89edbcf63..b4d3c82343957 100644 --- a/core/src/main/java/org/apache/lucene/search/grouping/CollapseTopFieldDocs.java +++ b/core/src/main/java/org/apache/lucene/search/grouping/CollapseTopFieldDocs.java @@ -26,7 +26,6 @@ import org.apache.lucene.search.TopFieldDocs; import org.apache.lucene.util.PriorityQueue; -import java.io.IOException; import java.util.ArrayList; import java.util.HashSet; import java.util.List; @@ -35,7 +34,7 @@ /** * Represents hits returned by {@link CollapsingTopDocsCollector#getTopDocs()}. */ -public class CollapseTopFieldDocs extends TopFieldDocs { +public final class CollapseTopFieldDocs extends TopFieldDocs { /** The field used for collapsing **/ public final String field; /** The collapse value for each top doc */ @@ -49,22 +48,59 @@ public CollapseTopFieldDocs(String field, int totalHits, ScoreDoc[] scoreDocs, } // Refers to one hit: - private static class ShardRef { + private static final class ShardRef { // Which shard (index into shardHits[]): final int shardIndex; + // True if we should use the incoming ScoreDoc.shardIndex for sort order + final boolean useScoreDocIndex; + // Which hit within the shard: int hitIndex; - ShardRef(int shardIndex) { + ShardRef(int shardIndex, boolean useScoreDocIndex) { this.shardIndex = shardIndex; + this.useScoreDocIndex = useScoreDocIndex; } @Override public String toString() { return "ShardRef(shardIndex=" + shardIndex + " hitIndex=" + hitIndex + ")"; } - }; + + int getShardIndex(ScoreDoc scoreDoc) { + if (useScoreDocIndex) { + if (scoreDoc.shardIndex == -1) { + throw new IllegalArgumentException("setShardIndex is false but TopDocs[" + + shardIndex + "].scoreDocs[" + hitIndex + "] is not set"); + } + return scoreDoc.shardIndex; + } else { + // NOTE: we don't assert that shardIndex is -1 here, because caller could in fact have set it but asked us to ignore it now + return shardIndex; + } + } + } + + /** + * if we need to tie-break since score / sort value are the same we first compare shard index (lower shard wins) + * and then iff shard index is the same we use the hit index. + */ + static boolean tieBreakLessThan(ShardRef first, ScoreDoc firstDoc, ShardRef second, ScoreDoc secondDoc) { + final int firstShardIndex = first.getShardIndex(firstDoc); + final int secondShardIndex = second.getShardIndex(secondDoc); + // Tie break: earlier shard wins + if (firstShardIndex < secondShardIndex) { + return true; + } else if (firstShardIndex > secondShardIndex) { + return false; + } else { + // Tie break in same shard: resolve however the + // shard had resolved it: + assert first.hitIndex != second.hitIndex; + return first.hitIndex < second.hitIndex; + } + } private static class MergeSortQueue extends PriorityQueue { // These are really FieldDoc instances: @@ -72,7 +108,7 @@ private static class MergeSortQueue extends PriorityQueue { final FieldComparator[] comparators; final int[] reverseMul; - MergeSortQueue(Sort sort, CollapseTopFieldDocs[] shardHits) throws IOException { + MergeSortQueue(Sort sort, CollapseTopFieldDocs[] shardHits) { super(shardHits.length); this.shardHits = new ScoreDoc[shardHits.length][]; for (int shardIDX = 0; shardIDX < shardHits.length; shardIDX++) { @@ -115,18 +151,7 @@ public boolean lessThan(ShardRef first, ShardRef second) { return cmp < 0; } } - - // Tie break: earlier shard wins - if (first.shardIndex < second.shardIndex) { - return true; - } else if (first.shardIndex > second.shardIndex) { - return false; - } else { - // Tie break in same shard: resolve however the - // shard had resolved it: - assert first.hitIndex != second.hitIndex; - return first.hitIndex < second.hitIndex; - } + return tieBreakLessThan(first, firstFD, second, secondFD); } } @@ -135,7 +160,7 @@ public boolean lessThan(ShardRef first, ShardRef second) { * the provided CollapseTopDocs, sorting by score. Each {@link CollapseTopFieldDocs} instance must be sorted. **/ public static CollapseTopFieldDocs merge(Sort sort, int start, int size, - CollapseTopFieldDocs[] shardHits) throws IOException { + CollapseTopFieldDocs[] shardHits, boolean setShardIndex) { String collapseField = shardHits[0].field; for (int i = 1; i < shardHits.length; i++) { if (collapseField.equals(shardHits[i].field) == false) { @@ -155,7 +180,7 @@ public static CollapseTopFieldDocs merge(Sort sort, int start, int size, totalHitCount += shard.totalHits; if (shard.scoreDocs != null && shard.scoreDocs.length > 0) { availHitCount += shard.scoreDocs.length; - queue.add(new ShardRef(shardIDX)); + queue.add(new ShardRef(shardIDX, setShardIndex == false)); maxScore = Math.max(maxScore, shard.getMaxScore()); } } @@ -192,7 +217,9 @@ public static CollapseTopFieldDocs merge(Sort sort, int start, int size, continue; } seen.add(collapseValue); - hit.shardIndex = ref.shardIndex; + if (setShardIndex) { + hit.shardIndex = ref.shardIndex; + } if (hitUpto >= start) { hitList.add(hit); collapseList.add(collapseValue); diff --git a/core/src/main/java/org/elasticsearch/Version.java b/core/src/main/java/org/elasticsearch/Version.java index 23b889ea592a7..735043a3a28fd 100644 --- a/core/src/main/java/org/elasticsearch/Version.java +++ b/core/src/main/java/org/elasticsearch/Version.java @@ -116,6 +116,8 @@ public class Version implements Comparable { public static final Version V_5_2_3_UNRELEASED = new Version(V_5_2_3_ID_UNRELEASED, org.apache.lucene.util.Version.LUCENE_6_4_1); public static final int V_5_3_0_ID_UNRELEASED = 5030099; public static final Version V_5_3_0_UNRELEASED = new Version(V_5_3_0_ID_UNRELEASED, org.apache.lucene.util.Version.LUCENE_6_4_1); + public static final int V_5_3_1_ID_UNRELEASED = 5030199; + public static final Version V_5_3_1_UNRELEASED = new Version(V_5_3_1_ID_UNRELEASED, org.apache.lucene.util.Version.LUCENE_6_4_1); public static final int V_5_4_0_ID_UNRELEASED = 5040099; public static final Version V_5_4_0_UNRELEASED = new Version(V_5_4_0_ID_UNRELEASED, org.apache.lucene.util.Version.LUCENE_6_5_0); public static final int V_6_0_0_alpha1_ID_UNRELEASED = 6000001; @@ -138,6 +140,10 @@ public static Version fromId(int id) { switch (id) { case V_6_0_0_alpha1_ID_UNRELEASED: return V_6_0_0_alpha1_UNRELEASED; + case V_5_4_0_ID_UNRELEASED: + return V_5_4_0_UNRELEASED; + case V_5_3_1_ID_UNRELEASED: + return V_5_3_1_UNRELEASED; case V_5_3_0_ID_UNRELEASED: return V_5_3_0_UNRELEASED; case V_5_2_3_ID_UNRELEASED: diff --git a/core/src/main/java/org/elasticsearch/action/ActionListener.java b/core/src/main/java/org/elasticsearch/action/ActionListener.java index f9fafa9f95a2e..e0d91a9036437 100644 --- a/core/src/main/java/org/elasticsearch/action/ActionListener.java +++ b/core/src/main/java/org/elasticsearch/action/ActionListener.java @@ -24,6 +24,7 @@ import java.util.ArrayList; import java.util.List; +import java.util.concurrent.atomic.AtomicBoolean; import java.util.function.Consumer; /** diff --git a/core/src/main/java/org/elasticsearch/action/ActionModule.java b/core/src/main/java/org/elasticsearch/action/ActionModule.java index 94db7d2030851..c1d0541d4ce10 100644 --- a/core/src/main/java/org/elasticsearch/action/ActionModule.java +++ b/core/src/main/java/org/elasticsearch/action/ActionModule.java @@ -149,6 +149,9 @@ import org.elasticsearch.action.delete.TransportDeleteAction; import org.elasticsearch.action.explain.ExplainAction; import org.elasticsearch.action.explain.TransportExplainAction; +import org.elasticsearch.action.fieldcaps.FieldCapabilitiesAction; +import org.elasticsearch.action.fieldcaps.TransportFieldCapabilitiesAction; +import org.elasticsearch.action.fieldcaps.TransportFieldCapabilitiesIndexAction; import org.elasticsearch.action.fieldstats.FieldStatsAction; import org.elasticsearch.action.fieldstats.TransportFieldStatsAction; import org.elasticsearch.action.get.GetAction; @@ -205,6 +208,7 @@ import org.elasticsearch.plugins.ActionPlugin.ActionHandler; import org.elasticsearch.rest.RestController; import org.elasticsearch.rest.RestHandler; +import org.elasticsearch.rest.action.RestFieldCapabilitiesAction; import org.elasticsearch.rest.action.RestFieldStatsAction; import org.elasticsearch.rest.action.RestMainAction; import org.elasticsearch.rest.action.admin.cluster.RestCancelTasksAction; @@ -479,6 +483,8 @@ public void reg actions.register(DeleteStoredScriptAction.INSTANCE, TransportDeleteStoredScriptAction.class); actions.register(FieldStatsAction.INSTANCE, TransportFieldStatsAction.class); + actions.register(FieldCapabilitiesAction.INSTANCE, TransportFieldCapabilitiesAction.class, + TransportFieldCapabilitiesIndexAction.class); actions.register(PutPipelineAction.INSTANCE, PutPipelineTransportAction.class); actions.register(GetPipelineAction.INSTANCE, GetPipelineTransportAction.class); @@ -587,6 +593,7 @@ public void initRestHandlers(Supplier nodesInCluster) { registerHandler.accept(new RestDeleteStoredScriptAction(settings, restController)); registerHandler.accept(new RestFieldStatsAction(settings, restController)); + registerHandler.accept(new RestFieldCapabilitiesAction(settings, restController)); // Tasks API registerHandler.accept(new RestListTasksAction(settings, restController, nodesInCluster)); diff --git a/core/src/main/java/org/elasticsearch/action/NotifyOnceListener.java b/core/src/main/java/org/elasticsearch/action/NotifyOnceListener.java new file mode 100644 index 0000000000000..1b717dcc6c05a --- /dev/null +++ b/core/src/main/java/org/elasticsearch/action/NotifyOnceListener.java @@ -0,0 +1,50 @@ +/* + * Licensed to Elasticsearch under one or more contributor + * license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright + * ownership. Elasticsearch licenses this file to you under + * the Apache License, Version 2.0 (the "License"); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.elasticsearch.action; + +import java.util.concurrent.atomic.AtomicBoolean; + +/** + * A listener that ensures that only one of onResponse or onFailure is called. And the method + * the is called is only called once. Subclasses should implement notification logic with + * innerOnResponse and innerOnFailure. + */ +public abstract class NotifyOnceListener implements ActionListener { + + private final AtomicBoolean hasBeenCalled = new AtomicBoolean(false); + + protected abstract void innerOnResponse(Response response); + + protected abstract void innerOnFailure(Exception e); + + @Override + public final void onResponse(Response response) { + if (hasBeenCalled.compareAndSet(false, true)) { + innerOnResponse(response); + } + } + + @Override + public final void onFailure(Exception e) { + if (hasBeenCalled.compareAndSet(false, true)) { + innerOnFailure(e); + } + } +} diff --git a/core/src/main/java/org/elasticsearch/action/admin/cluster/stats/ClusterStatsNodes.java b/core/src/main/java/org/elasticsearch/action/admin/cluster/stats/ClusterStatsNodes.java index 0d545ddfa70ed..27276b27dd99f 100644 --- a/core/src/main/java/org/elasticsearch/action/admin/cluster/stats/ClusterStatsNodes.java +++ b/core/src/main/java/org/elasticsearch/action/admin/cluster/stats/ClusterStatsNodes.java @@ -25,6 +25,7 @@ import org.elasticsearch.action.admin.cluster.node.info.NodeInfo; import org.elasticsearch.action.admin.cluster.node.stats.NodeStats; import org.elasticsearch.cluster.node.DiscoveryNode; +import org.elasticsearch.common.Strings; import org.elasticsearch.common.network.NetworkModule; import org.elasticsearch.common.settings.Settings; import org.elasticsearch.common.transport.TransportAddress; @@ -73,7 +74,8 @@ public class ClusterStatsNodes implements ToXContent { this.plugins.addAll(nodeResponse.nodeInfo().getPlugins().getPluginInfos()); // now do the stats that should be deduped by hardware (implemented by ip deduping) - TransportAddress publishAddress = nodeResponse.nodeInfo().getTransport().address().publishAddress(); + TransportAddress publishAddress = + nodeResponse.nodeInfo().getTransport().address().publishAddress(); final InetAddress inetAddress = publishAddress.address().getAddress(); if (!seenAddresses.add(inetAddress)) { continue; @@ -209,7 +211,8 @@ static final class Fields { } @Override - public XContentBuilder toXContent(XContentBuilder builder, Params params) throws IOException { + public XContentBuilder toXContent(XContentBuilder builder, Params params) + throws IOException { builder.field(Fields.TOTAL, total); for (Map.Entry entry : roles.entrySet()) { builder.field(entry.getKey(), entry.getValue()); @@ -280,7 +283,8 @@ static final class Fields { } @Override - public XContentBuilder toXContent(XContentBuilder builder, Params params) throws IOException { + public XContentBuilder toXContent(XContentBuilder builder, Params params) + throws IOException { builder.field(Fields.AVAILABLE_PROCESSORS, availableProcessors); builder.field(Fields.ALLOCATED_PROCESSORS, allocatedProcessors); builder.startArray(Fields.NAMES); @@ -326,7 +330,8 @@ private ProcessStats(List nodeStatsList) { // fd can be -1 if not supported on platform totalOpenFileDescriptors += fd; } - // we still do min max calc on -1, so we'll have an indication of it not being supported on one of the nodes. + // we still do min max calc on -1, so we'll have an indication + // of it not being supported on one of the nodes. minOpenFileDescriptors = Math.min(minOpenFileDescriptors, fd); maxOpenFileDescriptors = Math.max(maxOpenFileDescriptors, fd); } @@ -375,7 +380,8 @@ static final class Fields { } @Override - public XContentBuilder toXContent(XContentBuilder builder, Params params) throws IOException { + public XContentBuilder toXContent(XContentBuilder builder, Params params) + throws IOException { builder.startObject(Fields.CPU).field(Fields.PERCENT, cpuPercent).endObject(); if (count > 0) { builder.startObject(Fields.OPEN_FILE_DESCRIPTORS); @@ -479,7 +485,8 @@ static final class Fields { } @Override - public XContentBuilder toXContent(XContentBuilder builder, Params params) throws IOException { + public XContentBuilder toXContent(XContentBuilder builder, Params params) + throws IOException { builder.timeValueField(Fields.MAX_UPTIME_IN_MILLIS, Fields.MAX_UPTIME, maxUptime); builder.startArray(Fields.VERSIONS); for (ObjectIntCursor v : versions) { @@ -540,17 +547,25 @@ static class NetworkTypes implements ToXContent { private final Map transportTypes; private final Map httpTypes; - private NetworkTypes(final List nodeInfos) { + NetworkTypes(final List nodeInfos) { final Map transportTypes = new HashMap<>(); final Map httpTypes = new HashMap<>(); for (final NodeInfo nodeInfo : nodeInfos) { final Settings settings = nodeInfo.getSettings(); final String transportType = - settings.get(NetworkModule.TRANSPORT_TYPE_KEY, NetworkModule.TRANSPORT_DEFAULT_TYPE_SETTING.get(settings)); + settings.get(NetworkModule.TRANSPORT_TYPE_KEY, + NetworkModule.TRANSPORT_DEFAULT_TYPE_SETTING.get(settings)); final String httpType = - settings.get(NetworkModule.HTTP_TYPE_KEY, NetworkModule.HTTP_DEFAULT_TYPE_SETTING.get(settings)); - transportTypes.computeIfAbsent(transportType, k -> new AtomicInteger()).incrementAndGet(); - httpTypes.computeIfAbsent(httpType, k -> new AtomicInteger()).incrementAndGet(); + settings.get(NetworkModule.HTTP_TYPE_KEY, + NetworkModule.HTTP_DEFAULT_TYPE_SETTING.get(settings)); + if (Strings.hasText(transportType)) { + transportTypes.computeIfAbsent(transportType, + k -> new AtomicInteger()).incrementAndGet(); + } + if (Strings.hasText(httpType)) { + httpTypes.computeIfAbsent(httpType, + k -> new AtomicInteger()).incrementAndGet(); + } } this.transportTypes = Collections.unmodifiableMap(transportTypes); this.httpTypes = Collections.unmodifiableMap(httpTypes); diff --git a/core/src/main/java/org/elasticsearch/action/admin/indices/validate/query/QueryExplanation.java b/core/src/main/java/org/elasticsearch/action/admin/indices/validate/query/QueryExplanation.java index 6da503ef8281c..f03bb49fdaeae 100644 --- a/core/src/main/java/org/elasticsearch/action/admin/indices/validate/query/QueryExplanation.java +++ b/core/src/main/java/org/elasticsearch/action/admin/indices/validate/query/QueryExplanation.java @@ -19,6 +19,7 @@ package org.elasticsearch.action.admin.indices.validate.query; +import org.elasticsearch.Version; import org.elasticsearch.common.io.stream.StreamInput; import org.elasticsearch.common.io.stream.StreamOutput; import org.elasticsearch.common.io.stream.Streamable; @@ -27,20 +28,26 @@ public class QueryExplanation implements Streamable { + public static final int RANDOM_SHARD = -1; + private String index; - + + private int shard = RANDOM_SHARD; + private boolean valid; - + private String explanation; - + private String error; QueryExplanation() { - + } - - public QueryExplanation(String index, boolean valid, String explanation, String error) { + + public QueryExplanation(String index, int shard, boolean valid, String explanation, + String error) { this.index = index; + this.shard = shard; this.valid = valid; this.explanation = explanation; this.error = error; @@ -50,6 +57,10 @@ public String getIndex() { return this.index; } + public int getShard() { + return this.shard; + } + public boolean isValid() { return this.valid; } @@ -65,6 +76,11 @@ public String getExplanation() { @Override public void readFrom(StreamInput in) throws IOException { index = in.readString(); + if (in.getVersion().onOrAfter(Version.V_5_4_0_UNRELEASED)) { + shard = in.readInt(); + } else { + shard = RANDOM_SHARD; + } valid = in.readBoolean(); explanation = in.readOptionalString(); error = in.readOptionalString(); @@ -73,6 +89,9 @@ public void readFrom(StreamInput in) throws IOException { @Override public void writeTo(StreamOutput out) throws IOException { out.writeString(index); + if (out.getVersion().onOrAfter(Version.V_5_4_0_UNRELEASED)) { + out.writeInt(shard); + } out.writeBoolean(valid); out.writeOptionalString(explanation); out.writeOptionalString(error); diff --git a/core/src/main/java/org/elasticsearch/action/admin/indices/validate/query/TransportValidateQueryAction.java b/core/src/main/java/org/elasticsearch/action/admin/indices/validate/query/TransportValidateQueryAction.java index b80b721149cd9..3a13915b3aaea 100644 --- a/core/src/main/java/org/elasticsearch/action/admin/indices/validate/query/TransportValidateQueryAction.java +++ b/core/src/main/java/org/elasticsearch/action/admin/indices/validate/query/TransportValidateQueryAction.java @@ -89,8 +89,14 @@ protected ShardValidateQueryResponse newShardResponse() { @Override protected GroupShardsIterator shards(ClusterState clusterState, ValidateQueryRequest request, String[] concreteIndices) { - // Hard-code routing to limit request to a single shard, but still, randomize it... - Map> routingMap = indexNameExpressionResolver.resolveSearchRouting(clusterState, Integer.toString(Randomness.get().nextInt(1000)), request.indices()); + final String routing; + if (request.allShards()) { + routing = null; + } else { + // Random routing to limit request to a single shard + routing = Integer.toString(Randomness.get().nextInt(1000)); + } + Map> routingMap = indexNameExpressionResolver.resolveSearchRouting(clusterState, routing, request.indices()); return clusterService.operationRouting().searchShards(clusterState, concreteIndices, routingMap, "_local"); } @@ -124,12 +130,13 @@ protected ValidateQueryResponse newResponse(ValidateQueryRequest request, Atomic } else { ShardValidateQueryResponse validateQueryResponse = (ShardValidateQueryResponse) shardResponse; valid = valid && validateQueryResponse.isValid(); - if (request.explain() || request.rewrite()) { + if (request.explain() || request.rewrite() || request.allShards()) { if (queryExplanations == null) { queryExplanations = new ArrayList<>(); } queryExplanations.add(new QueryExplanation( validateQueryResponse.getIndex(), + request.allShards() ? validateQueryResponse.getShardId().getId() : QueryExplanation.RANDOM_SHARD, validateQueryResponse.isValid(), validateQueryResponse.getExplanation(), validateQueryResponse.getError() diff --git a/core/src/main/java/org/elasticsearch/action/admin/indices/validate/query/ValidateQueryRequest.java b/core/src/main/java/org/elasticsearch/action/admin/indices/validate/query/ValidateQueryRequest.java index 41ef37ad621f1..18ccf1ede7d5c 100644 --- a/core/src/main/java/org/elasticsearch/action/admin/indices/validate/query/ValidateQueryRequest.java +++ b/core/src/main/java/org/elasticsearch/action/admin/indices/validate/query/ValidateQueryRequest.java @@ -19,6 +19,7 @@ package org.elasticsearch.action.admin.indices.validate.query; +import org.elasticsearch.Version; import org.elasticsearch.action.ActionRequestValidationException; import org.elasticsearch.action.ValidateActions; import org.elasticsearch.action.support.IndicesOptions; @@ -43,6 +44,7 @@ public class ValidateQueryRequest extends BroadcastRequest private boolean explain; private boolean rewrite; + private boolean allShards; private String[] types = Strings.EMPTY_ARRAY; @@ -125,6 +127,20 @@ public boolean rewrite() { return rewrite; } + /** + * Indicates whether the query should be validated on all shards instead of one random shard + */ + public void allShards(boolean allShards) { + this.allShards = allShards; + } + + /** + * Indicates whether the query should be validated on all shards instead of one random shard + */ + public boolean allShards() { + return allShards; + } + @Override public void readFrom(StreamInput in) throws IOException { super.readFrom(in); @@ -138,6 +154,9 @@ public void readFrom(StreamInput in) throws IOException { } explain = in.readBoolean(); rewrite = in.readBoolean(); + if (in.getVersion().onOrAfter(Version.V_5_4_0_UNRELEASED)) { + allShards = in.readBoolean(); + } } @Override @@ -150,11 +169,14 @@ public void writeTo(StreamOutput out) throws IOException { } out.writeBoolean(explain); out.writeBoolean(rewrite); + if (out.getVersion().onOrAfter(Version.V_5_4_0_UNRELEASED)) { + out.writeBoolean(allShards); + } } @Override public String toString() { return "[" + Arrays.toString(indices) + "]" + Arrays.toString(types) + ", query[" + query + "], explain:" + explain + - ", rewrite:" + rewrite; + ", rewrite:" + rewrite + ", all_shards:" + allShards; } } diff --git a/core/src/main/java/org/elasticsearch/action/admin/indices/validate/query/ValidateQueryRequestBuilder.java b/core/src/main/java/org/elasticsearch/action/admin/indices/validate/query/ValidateQueryRequestBuilder.java index 8e377968980c6..bd8067e05cb9f 100644 --- a/core/src/main/java/org/elasticsearch/action/admin/indices/validate/query/ValidateQueryRequestBuilder.java +++ b/core/src/main/java/org/elasticsearch/action/admin/indices/validate/query/ValidateQueryRequestBuilder.java @@ -64,4 +64,12 @@ public ValidateQueryRequestBuilder setRewrite(boolean rewrite) { request.rewrite(rewrite); return this; } + + /** + * Indicates whether the query should be validated on all shards + */ + public ValidateQueryRequestBuilder setAllShards(boolean rewrite) { + request.allShards(rewrite); + return this; + } } diff --git a/core/src/main/java/org/elasticsearch/action/bulk/MappingUpdatePerformer.java b/core/src/main/java/org/elasticsearch/action/bulk/MappingUpdatePerformer.java index 27d636d3d93df..e33c54d6dd569 100644 --- a/core/src/main/java/org/elasticsearch/action/bulk/MappingUpdatePerformer.java +++ b/core/src/main/java/org/elasticsearch/action/bulk/MappingUpdatePerformer.java @@ -20,48 +20,28 @@ package org.elasticsearch.action.bulk; import org.elasticsearch.action.index.IndexRequest; -import org.elasticsearch.common.Nullable; import org.elasticsearch.index.engine.Engine; import org.elasticsearch.index.shard.IndexShard; +import org.elasticsearch.index.shard.ShardId; import java.util.Objects; public interface MappingUpdatePerformer { + /** - * Determine if any mappings need to be updated, and update them on the - * master node if necessary. Returnes a failed {@code Engine.IndexResult} - * in the event updating the mappings fails or null if successful. - * Throws a {@code ReplicationOperation.RetryOnPrimaryException} if the - * operation needs to be retried on the primary due to the mappings not - * being present yet, or a different exception if updating the mappings - * on the master failed. + * Determine if any mappings need to be updated, and update them on the master node if + * necessary. Returnes a failure Exception in the event updating the mappings fails or null if + * successful. */ - @Nullable - MappingUpdateResult updateMappingsIfNeeded(IndexShard primary, IndexRequest request) throws Exception; + void updateMappingsIfNeeded(Engine.Index operation, + ShardId shardId, + String type) throws Exception; /** - * Class encapsulating the resulting of potentially updating the mapping + * Throws a {@code ReplicationOperation.RetryOnPrimaryException} if the operation needs to be + * retried on the primary due to the mappings not being present yet, or a different exception if + * updating the mappings on the master failed. */ - class MappingUpdateResult { - @Nullable - public final Engine.Index operation; - @Nullable - public final Exception failure; - - MappingUpdateResult(Exception failure) { - Objects.requireNonNull(failure, "failure cannot be null"); - this.failure = failure; - this.operation = null; - } - - MappingUpdateResult(Engine.Index operation) { - Objects.requireNonNull(operation, "operation cannot be null"); - this.operation = operation; - this.failure = null; - } + void verifyMappings(Engine.Index operation, ShardId shardId) throws Exception; - public boolean isFailed() { - return failure != null; - } - } } diff --git a/core/src/main/java/org/elasticsearch/action/bulk/TransportShardBulkAction.java b/core/src/main/java/org/elasticsearch/action/bulk/TransportShardBulkAction.java index d1bba8d2d4ab5..6a286c5a75860 100644 --- a/core/src/main/java/org/elasticsearch/action/bulk/TransportShardBulkAction.java +++ b/core/src/main/java/org/elasticsearch/action/bulk/TransportShardBulkAction.java @@ -65,6 +65,9 @@ import org.elasticsearch.threadpool.ThreadPool; import org.elasticsearch.transport.TransportRequestOptions; import org.elasticsearch.transport.TransportService; +import org.elasticsearch.index.translog.Translog.Location; +import org.elasticsearch.action.bulk.BulkItemResultHolder; +import org.elasticsearch.action.bulk.BulkItemResponse; import java.io.IOException; import java.util.Map; @@ -154,10 +157,23 @@ private static BulkItemResultHolder executeDeleteRequest(final DeleteRequest del } } + static Translog.Location calculateTranslogLocation(final Translog.Location originalLocation, + final BulkItemResultHolder bulkItemResult) { + final Engine.Result operationResult = bulkItemResult.operationResult; + if (operationResult != null && operationResult.hasFailure() == false) { + return locationToSync(originalLocation, operationResult.getTranslogLocation()); + } else { + return originalLocation; + } + } + // Visible for unit testing - static Translog.Location updateReplicaRequest(BulkItemResultHolder bulkItemResult, + /** + * Creates a BulkItemResponse for the primary operation and returns it. If no bulk response is + * needed (because one already exists and the operation failed), then return null. + */ + static BulkItemResponse createPrimaryResponse(BulkItemResultHolder bulkItemResult, final DocWriteRequest.OpType opType, - final Translog.Location originalLocation, BulkShardRequest request) { final Engine.Result operationResult = bulkItemResult.operationResult; final DocWriteResponse response = bulkItemResult.response; @@ -165,16 +181,13 @@ static Translog.Location updateReplicaRequest(BulkItemResultHolder bulkItemResul if (operationResult == null) { // in case of noop update operation assert response.getResult() == DocWriteResponse.Result.NOOP : "only noop updates can have a null operation"; - replicaRequest.setPrimaryResponse(new BulkItemResponse(replicaRequest.id(), opType, response)); - return originalLocation; + return new BulkItemResponse(replicaRequest.id(), opType, response); } else if (operationResult.hasFailure() == false) { BulkItemResponse primaryResponse = new BulkItemResponse(replicaRequest.id(), opType, response); - replicaRequest.setPrimaryResponse(primaryResponse); // set a blank ShardInfo so we can safely send it to the replicas. We won't use it in the real response though. primaryResponse.getResponse().setShardInfo(new ShardInfo()); - // The operation was successful, advance the translog - return locationToSync(originalLocation, operationResult.getTranslogLocation()); + return primaryResponse; } else { DocWriteRequest docWriteRequest = replicaRequest.request(); @@ -187,19 +200,19 @@ static Translog.Location updateReplicaRequest(BulkItemResultHolder bulkItemResul request.shardId(), docWriteRequest.opType().getLowercase(), request), failure); } - // if it's a conflict failure, and we already executed the request on a primary (and we execute it // again, due to primary relocation and only processing up to N bulk items when the shard gets closed) // then just use the response we got from the failed execution if (replicaRequest.getPrimaryResponse() == null || isConflictException(failure) == false) { - replicaRequest.setPrimaryResponse( - new BulkItemResponse(replicaRequest.id(), docWriteRequest.opType(), - // Make sure to use request.indox() here, if you - // use docWriteRequest.index() it will use the - // concrete index instead of an alias if used! - new BulkItemResponse.Failure(request.index(), docWriteRequest.type(), docWriteRequest.id(), failure))); + return new BulkItemResponse(replicaRequest.id(), docWriteRequest.opType(), + // Make sure to use request.index() here, if you + // use docWriteRequest.index() it will use the + // concrete index instead of an alias if used! + new BulkItemResponse.Failure(request.index(), docWriteRequest.type(), docWriteRequest.id(), failure)); + } else { + assert replicaRequest.getPrimaryResponse() != null : "replica request must have a primary response"; + return null; } - return originalLocation; } } @@ -233,11 +246,14 @@ static Translog.Location executeBulkItemRequest(IndexMetaData metaData, IndexSha // update the bulk item request because update request execution can mutate the bulk item request request.items()[requestIndex] = replicaRequest; - // Modify the replica request, if needed, and return a new translog location - location = updateReplicaRequest(responseHolder, opType, location, request); + // Retrieve the primary response, and update the replica request with the primary's response + BulkItemResponse primaryResponse = createPrimaryResponse(responseHolder, opType, request); + if (primaryResponse != null) { + replicaRequest.setPrimaryResponse(primaryResponse); + } - assert replicaRequest.getPrimaryResponse() != null : "replica request must have a primary response"; - return location; + // Update the translog with the new location, if needed + return calculateTranslogLocation(location, responseHolder); } private static boolean isConflictException(final Exception e) { @@ -396,14 +412,16 @@ public WriteReplicaResult shardOperationOnReplica(BulkShardReq return new WriteReplicaResult<>(request, location, null, replica, logger); } - private static Translog.Location locationToSync(Translog.Location current, Translog.Location next) { - /* here we are moving forward in the translog with each operation. Under the hood - * this might cross translog files which is ok since from the user perspective - * the translog is like a tape where only the highest location needs to be fsynced - * in order to sync all previous locations even though they are not in the same file. - * When the translog rolls over files the previous file is fsynced on after closing if needed.*/ + private static Translog.Location locationToSync(Translog.Location current, + Translog.Location next) { + /* here we are moving forward in the translog with each operation. Under the hood this might + * cross translog files which is ok since from the user perspective the translog is like a + * tape where only the highest location needs to be fsynced in order to sync all previous + * locations even though they are not in the same file. When the translog rolls over files + * the previous file is fsynced on after closing if needed.*/ assert next != null : "next operation can't be null"; - assert current == null || current.compareTo(next) < 0 : "translog locations are not increasing"; + assert current == null || current.compareTo(next) < 0 : + "translog locations are not increasing"; return next; } @@ -411,45 +429,82 @@ private static Translog.Location locationToSync(Translog.Location current, Trans * Execute the given {@link IndexRequest} on a replica shard, throwing a * {@link RetryOnReplicaException} if the operation needs to be re-tried. */ - public static Engine.IndexResult executeIndexRequestOnReplica(DocWriteResponse primaryResponse, IndexRequest request, IndexShard replica) throws IOException { - final ShardId shardId = replica.shardId(); - SourceToParse sourceToParse = - SourceToParse.source(SourceToParse.Origin.REPLICA, shardId.getIndexName(), request.type(), request.id(), request.source(), - request.getContentType()).routing(request.routing()).parent(request.parent()); + public static Engine.IndexResult executeIndexRequestOnReplica( + DocWriteResponse primaryResponse, + IndexRequest request, + IndexShard replica) throws IOException { final Engine.Index operation; - final long version = primaryResponse.getVersion(); - final VersionType versionType = request.versionType().versionTypeForReplicationAndRecovery(); - assert versionType.validateVersionForWrites(version); - final long seqNo = primaryResponse.getSeqNo(); try { - operation = replica.prepareIndexOnReplica(sourceToParse, seqNo, version, versionType, request.getAutoGeneratedTimestamp(), request.isRetry()); + operation = prepareIndexOperationOnReplica(primaryResponse, request, replica); } catch (MapperParsingException e) { - return new Engine.IndexResult(e, version, seqNo); + return new Engine.IndexResult(e, primaryResponse.getVersion(), + primaryResponse.getSeqNo()); } + Mapping update = operation.parsedDoc().dynamicMappingsUpdate(); if (update != null) { - throw new RetryOnReplicaException(shardId, "Mappings are not available on the replica yet, triggered update: " + update); + final ShardId shardId = replica.shardId(); + throw new RetryOnReplicaException(shardId, + "Mappings are not available on the replica yet, triggered update: " + update); } return replica.index(operation); } + /** Utility method to prepare an index operation on replica shards */ + static Engine.Index prepareIndexOperationOnReplica( + DocWriteResponse primaryResponse, + IndexRequest request, + IndexShard replica) { + + final ShardId shardId = replica.shardId(); + final long version = primaryResponse.getVersion(); + final long seqNo = primaryResponse.getSeqNo(); + final SourceToParse sourceToParse = + SourceToParse.source(SourceToParse.Origin.REPLICA, shardId.getIndexName(), + request.type(), request.id(), request.source(), request.getContentType()) + .routing(request.routing()).parent(request.parent()); + final VersionType versionType = request.versionType().versionTypeForReplicationAndRecovery(); + assert versionType.validateVersionForWrites(version); + + return replica.prepareIndexOnReplica(sourceToParse, seqNo, version, versionType, + request.getAutoGeneratedTimestamp(), request.isRetry()); + } + /** Utility method to prepare an index operation on primary shards */ static Engine.Index prepareIndexOperationOnPrimary(IndexRequest request, IndexShard primary) { - SourceToParse sourceToParse = - SourceToParse.source(SourceToParse.Origin.PRIMARY, request.index(), request.type(), request.id(), request.source(), - request.getContentType()).routing(request.routing()).parent(request.parent()); - return primary.prepareIndexOnPrimary(sourceToParse, request.version(), request.versionType(), request.getAutoGeneratedTimestamp(), request.isRetry()); + final SourceToParse sourceToParse = + SourceToParse.source(SourceToParse.Origin.PRIMARY, request.index(), request.type(), + request.id(), request.source(), request.getContentType()) + .routing(request.routing()).parent(request.parent()); + return primary.prepareIndexOnPrimary(sourceToParse, request.version(), request.versionType(), + request.getAutoGeneratedTimestamp(), request.isRetry()); } /** Executes index operation on primary shard after updates mapping if dynamic mappings are found */ public static Engine.IndexResult executeIndexRequestOnPrimary(IndexRequest request, IndexShard primary, MappingUpdatePerformer mappingUpdater) throws Exception { - MappingUpdatePerformer.MappingUpdateResult result = mappingUpdater.updateMappingsIfNeeded(primary, request); - if (result.isFailed()) { - return new Engine.IndexResult(result.failure, request.version()); + // Update the mappings if parsing the documents includes new dynamic updates + try { + final Engine.Index preUpdateOperation = prepareIndexOperationOnPrimary(request, primary); + mappingUpdater.updateMappingsIfNeeded(preUpdateOperation, primary.shardId(), request.type()); + } catch (MapperParsingException | IllegalArgumentException failure) { + return new Engine.IndexResult(failure, request.version()); + } + + // Verify that there are no more mappings that need to be applied. If there are failures, a + // ReplicationOperation.RetryOnPrimaryException is thrown. + final Engine.Index operation; + try { + operation = prepareIndexOperationOnPrimary(request, primary); + mappingUpdater.verifyMappings(operation, primary.shardId()); + } catch (MapperParsingException | IllegalStateException e) { + // there was an error in parsing the document that was not because + // of pending mapping updates, so return a failure for the result + return new Engine.IndexResult(e, request.version()); } - return primary.index(result.operation); + + return primary.index(operation); } private static Engine.DeleteResult executeDeleteRequestOnPrimary(DeleteRequest request, IndexShard primary) throws IOException { @@ -468,36 +523,22 @@ private static Engine.DeleteResult executeDeleteRequestOnReplica(DocWriteRespons class ConcreteMappingUpdatePerformer implements MappingUpdatePerformer { - @Nullable - public MappingUpdateResult updateMappingsIfNeeded(IndexShard primary, IndexRequest request) throws Exception { - Engine.Index operation; - try { - operation = prepareIndexOperationOnPrimary(request, primary); - } catch (MapperParsingException | IllegalArgumentException e) { - return new MappingUpdateResult(e); - } + public void updateMappingsIfNeeded(final Engine.Index operation, final ShardId shardId, + final String type) throws Exception { final Mapping update = operation.parsedDoc().dynamicMappingsUpdate(); - final ShardId shardId = primary.shardId(); if (update != null) { - // can throw timeout exception when updating mappings or ISE for attempting to update default mappings - // which are bubbled up - try { - mappingUpdatedAction.updateMappingOnMaster(shardId.getIndex(), request.type(), update); - } catch (IllegalArgumentException e) { - // throws IAE on conflicts merging dynamic mappings - return new MappingUpdateResult(e); - } - try { - operation = prepareIndexOperationOnPrimary(request, primary); - } catch (MapperParsingException | IllegalArgumentException e) { - return new MappingUpdateResult(e); - } - if (operation.parsedDoc().dynamicMappingsUpdate() != null) { - throw new ReplicationOperation.RetryOnPrimaryException(shardId, - "Dynamic mappings are not available on the node that holds the primary yet"); - } + // can throw timeout exception when updating mappings or ISE for attempting to + // update default mappings which are bubbled up + mappingUpdatedAction.updateMappingOnMaster(shardId.getIndex(), type, update); + } + } + + public void verifyMappings(final Engine.Index operation, + final ShardId shardId) throws Exception { + if (operation.parsedDoc().dynamicMappingsUpdate() != null) { + throw new ReplicationOperation.RetryOnPrimaryException(shardId, + "Dynamic mappings are not available on the node that holds the primary yet"); } - return new MappingUpdateResult(operation); } } } diff --git a/core/src/main/java/org/elasticsearch/action/bulk/byscroll/AbstractAsyncBulkByScrollAction.java b/core/src/main/java/org/elasticsearch/action/bulk/byscroll/AbstractAsyncBulkByScrollAction.java index 834321f1798ec..72d39c038ed01 100644 --- a/core/src/main/java/org/elasticsearch/action/bulk/byscroll/AbstractAsyncBulkByScrollAction.java +++ b/core/src/main/java/org/elasticsearch/action/bulk/byscroll/AbstractAsyncBulkByScrollAction.java @@ -465,14 +465,18 @@ protected void finishHim(Exception failure) { * @param searchFailures any search failures accumulated during the request * @param timedOut have any of the sub-requests timed out? */ - protected void finishHim(Exception failure, List indexingFailures, List searchFailures, boolean timedOut) { - scrollSource.close(); - if (failure == null) { - listener.onResponse( - buildResponse(timeValueNanos(System.nanoTime() - startTime.get()), indexingFailures, searchFailures, timedOut)); - } else { - listener.onFailure(failure); - } + protected void finishHim(Exception failure, List indexingFailures, + List searchFailures, boolean timedOut) { + scrollSource.close(() -> { + if (failure == null) { + BulkByScrollResponse response = buildResponse( + timeValueNanos(System.nanoTime() - startTime.get()), + indexingFailures, searchFailures, timedOut); + listener.onResponse(response); + } else { + listener.onFailure(failure); + } + }); } /** diff --git a/core/src/main/java/org/elasticsearch/action/bulk/byscroll/ClientScrollableHitSource.java b/core/src/main/java/org/elasticsearch/action/bulk/byscroll/ClientScrollableHitSource.java index 9fc02e29e62c9..3bacc187ebb6d 100644 --- a/core/src/main/java/org/elasticsearch/action/bulk/byscroll/ClientScrollableHitSource.java +++ b/core/src/main/java/org/elasticsearch/action/bulk/byscroll/ClientScrollableHitSource.java @@ -113,8 +113,8 @@ public void onFailure(Exception e) { } @Override - protected void cleanup() { - // Nothing to do + protected void cleanup(Runnable onCompletion) { + onCompletion.run(); } /** diff --git a/core/src/main/java/org/elasticsearch/action/bulk/byscroll/DeleteByQueryRequest.java b/core/src/main/java/org/elasticsearch/action/bulk/byscroll/DeleteByQueryRequest.java index f625152d407b1..2644d0d94967f 100644 --- a/core/src/main/java/org/elasticsearch/action/bulk/byscroll/DeleteByQueryRequest.java +++ b/core/src/main/java/org/elasticsearch/action/bulk/byscroll/DeleteByQueryRequest.java @@ -74,6 +74,8 @@ public ActionRequestValidationException validate() { } if (getSearchRequest() == null || getSearchRequest().source() == null) { e = addValidationError("source is missing", e); + } else if (getSearchRequest().source().query() == null) { + e = addValidationError("query is missing", e); } return e; } diff --git a/core/src/main/java/org/elasticsearch/action/bulk/byscroll/ParentBulkByScrollTask.java b/core/src/main/java/org/elasticsearch/action/bulk/byscroll/ParentBulkByScrollTask.java index dfaf03f411547..a37dc61897c0c 100644 --- a/core/src/main/java/org/elasticsearch/action/bulk/byscroll/ParentBulkByScrollTask.java +++ b/core/src/main/java/org/elasticsearch/action/bulk/byscroll/ParentBulkByScrollTask.java @@ -40,7 +40,7 @@ public class ParentBulkByScrollTask extends BulkByScrollTask { * Holds the responses as they come back. This uses {@link Tuple} as an "Either" style holder where only the response or the exception * is set. */ - private final AtomicArray> results; + private final AtomicArray results; private final AtomicInteger counter; public ParentBulkByScrollTask(long id, String type, String action, String description, TaskId parentTaskId, int slices) { @@ -82,13 +82,11 @@ public TaskInfo getInfoGivenSliceInfo(String localNodeId, List sliceIn } private void addResultsToList(List sliceStatuses) { - for (AtomicArray.Entry> t : results.asList()) { - if (t.value != null) { - if (t.value.v1() != null) { - sliceStatuses.set(t.index, new StatusOrException(t.value.v1().getStatus())); - } else { - sliceStatuses.set(t.index, new StatusOrException(t.value.v2())); - } + for (Result t : results.asList()) { + if (t.response != null) { + sliceStatuses.set(t.sliceId, new StatusOrException(t.response.getStatus())); + } else { + sliceStatuses.set(t.sliceId, new StatusOrException(t.failure)); } } } @@ -97,7 +95,7 @@ private void addResultsToList(List sliceStatuses) { * Record a response from a slice and respond to the listener if the request is finished. */ public void onSliceResponse(ActionListener listener, int sliceId, BulkByScrollResponse response) { - results.setOnce(sliceId, new Tuple<>(response, null)); + results.setOnce(sliceId, new Result(sliceId, response)); /* If the request isn't finished we could automatically rethrottle the sub-requests here but we would only want to do that if we * were fairly sure they had a while left to go. */ recordSliceCompletionAndRespondIfAllDone(listener); @@ -107,7 +105,7 @@ public void onSliceResponse(ActionListener listener, int s * Record a failure from a slice and respond to the listener if the request is finished. */ void onSliceFailure(ActionListener listener, int sliceId, Exception e) { - results.setOnce(sliceId, new Tuple<>(null, e)); + results.setOnce(sliceId, new Result(sliceId, e)); recordSliceCompletionAndRespondIfAllDone(listener); // TODO cancel when a slice fails? } @@ -118,17 +116,17 @@ private void recordSliceCompletionAndRespondIfAllDone(ActionListener responses = new ArrayList<>(results.length()); Exception exception = null; - for (AtomicArray.Entry> t : results.asList()) { - if (t.value.v1() == null) { - assert t.value.v2() != null : "exception shouldn't be null if value is null"; + for (Result t : results.asList()) { + if (t.response == null) { + assert t.failure != null : "exception shouldn't be null if value is null"; if (exception == null) { - exception = t.value.v2(); + exception = t.failure; } else { - exception.addSuppressed(t.value.v2()); + exception.addSuppressed(t.failure); } } else { - assert t.value.v2() == null : "exception should be null if response is not null"; - responses.add(t.value.v1()); + assert t.failure == null : "exception should be null if response is not null"; + responses.add(t.response); } } if (exception == null) { @@ -138,4 +136,21 @@ private void recordSliceCompletionAndRespondIfAllDone(ActionListener scrollId = new AtomicReference<>(); protected final Logger logger; @@ -82,25 +82,31 @@ public final void startNextScroll(TimeValue extraKeepAlive, Consumer o } protected abstract void doStartNextScroll(String scrollId, TimeValue extraKeepAlive, Consumer onResponse); - @Override - public final void close() { + public final void close(Runnable onCompletion) { String scrollId = this.scrollId.get(); if (Strings.hasLength(scrollId)) { - clearScroll(scrollId, this::cleanup); + clearScroll(scrollId, () -> cleanup(onCompletion)); } else { - cleanup(); + cleanup(onCompletion); } } + /** * Called to clear a scroll id. + * * @param scrollId the id to clear - * @param onCompletion implementers must call this after completing the clear whether they are successful or not + * @param onCompletion implementers must call this after completing the clear whether they are + * successful or not */ protected abstract void clearScroll(String scrollId, Runnable onCompletion); /** - * Called after the process has been totally finished to clean up any resources the process needed like remote connections. + * Called after the process has been totally finished to clean up any resources the process + * needed like remote connections. + * + * @param onCompletion implementers must call this after completing the cleanup whether they are + * successful or not */ - protected abstract void cleanup(); + protected abstract void cleanup(Runnable onCompletion); /** * Set the id of the last scroll. Used for debugging. diff --git a/core/src/main/java/org/elasticsearch/action/fieldcaps/FieldCapabilities.java b/core/src/main/java/org/elasticsearch/action/fieldcaps/FieldCapabilities.java new file mode 100644 index 0000000000000..ef7513f38abc2 --- /dev/null +++ b/core/src/main/java/org/elasticsearch/action/fieldcaps/FieldCapabilities.java @@ -0,0 +1,282 @@ +/* + * Licensed to Elasticsearch under one or more contributor + * license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright + * ownership. Elasticsearch licenses this file to you under + * the Apache License, Version 2.0 (the "License"); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.elasticsearch.action.fieldcaps; + +import org.elasticsearch.common.io.stream.StreamInput; +import org.elasticsearch.common.io.stream.StreamOutput; +import org.elasticsearch.common.io.stream.Writeable; +import org.elasticsearch.common.xcontent.ToXContent; +import org.elasticsearch.common.xcontent.XContentBuilder; + +import java.io.IOException; +import java.util.Collections; +import java.util.Arrays; +import java.util.List; +import java.util.ArrayList; +import java.util.Comparator; + +/** + * Describes the capabilities of a field optionally merged across multiple indices. + */ +public class FieldCapabilities implements Writeable, ToXContent { + private final String name; + private final String type; + private final boolean isSearchable; + private final boolean isAggregatable; + + private final String[] indices; + private final String[] nonSearchableIndices; + private final String[] nonAggregatableIndices; + + /** + * Constructor + * @param name The name of the field. + * @param type The type associated with the field. + * @param isSearchable Whether this field is indexed for search. + * @param isAggregatable Whether this field can be aggregated on. + */ + FieldCapabilities(String name, String type, boolean isSearchable, boolean isAggregatable) { + this(name, type, isSearchable, isAggregatable, null, null, null); + } + + /** + * Constructor + * @param name The name of the field + * @param type The type associated with the field. + * @param isSearchable Whether this field is indexed for search. + * @param isAggregatable Whether this field can be aggregated on. + * @param indices The list of indices where this field name is defined as {@code type}, + * or null if all indices have the same {@code type} for the field. + * @param nonSearchableIndices The list of indices where this field is not searchable, + * or null if the field is searchable in all indices. + * @param nonAggregatableIndices The list of indices where this field is not aggregatable, + * or null if the field is aggregatable in all indices. + */ + FieldCapabilities(String name, String type, + boolean isSearchable, boolean isAggregatable, + String[] indices, + String[] nonSearchableIndices, + String[] nonAggregatableIndices) { + this.name = name; + this.type = type; + this.isSearchable = isSearchable; + this.isAggregatable = isAggregatable; + this.indices = indices; + this.nonSearchableIndices = nonSearchableIndices; + this.nonAggregatableIndices = nonAggregatableIndices; + } + + FieldCapabilities(StreamInput in) throws IOException { + this.name = in.readString(); + this.type = in.readString(); + this.isSearchable = in.readBoolean(); + this.isAggregatable = in.readBoolean(); + this.indices = in.readOptionalStringArray(); + this.nonSearchableIndices = in.readOptionalStringArray(); + this.nonAggregatableIndices = in.readOptionalStringArray(); + } + + @Override + public void writeTo(StreamOutput out) throws IOException { + out.writeString(name); + out.writeString(type); + out.writeBoolean(isSearchable); + out.writeBoolean(isAggregatable); + out.writeOptionalStringArray(indices); + out.writeOptionalStringArray(nonSearchableIndices); + out.writeOptionalStringArray(nonAggregatableIndices); + } + + @Override + public XContentBuilder toXContent(XContentBuilder builder, Params params) throws IOException { + builder.startObject(); + builder.field("type", type); + builder.field("searchable", isSearchable); + builder.field("aggregatable", isAggregatable); + if (indices != null) { + builder.field("indices", indices); + } + if (nonSearchableIndices != null) { + builder.field("non_searchable_indices", nonSearchableIndices); + } + if (nonAggregatableIndices != null) { + builder.field("non_aggregatable_indices", nonAggregatableIndices); + } + builder.endObject(); + return builder; + } + + /** + * The name of the field. + */ + public String getName() { + return name; + } + + /** + * Whether this field is indexed for search on all indices. + */ + public boolean isAggregatable() { + return isAggregatable; + } + + /** + * Whether this field can be aggregated on all indices. + */ + public boolean isSearchable() { + return isSearchable; + } + + /** + * The type of the field. + */ + public String getType() { + return type; + } + + /** + * The list of indices where this field name is defined as {@code type}, + * or null if all indices have the same {@code type} for the field. + */ + public String[] indices() { + return indices; + } + + /** + * The list of indices where this field is not searchable, + * or null if the field is searchable in all indices. + */ + public String[] nonSearchableIndices() { + return nonSearchableIndices; + } + + /** + * The list of indices where this field is not aggregatable, + * or null if the field is aggregatable in all indices. + */ + public String[] nonAggregatableIndices() { + return nonAggregatableIndices; + } + + @Override + public boolean equals(Object o) { + if (this == o) return true; + if (o == null || getClass() != o.getClass()) return false; + + FieldCapabilities that = (FieldCapabilities) o; + + if (isSearchable != that.isSearchable) return false; + if (isAggregatable != that.isAggregatable) return false; + if (!name.equals(that.name)) return false; + if (!type.equals(that.type)) return false; + if (!Arrays.equals(indices, that.indices)) return false; + if (!Arrays.equals(nonSearchableIndices, that.nonSearchableIndices)) return false; + return Arrays.equals(nonAggregatableIndices, that.nonAggregatableIndices); + } + + @Override + public int hashCode() { + int result = name.hashCode(); + result = 31 * result + type.hashCode(); + result = 31 * result + (isSearchable ? 1 : 0); + result = 31 * result + (isAggregatable ? 1 : 0); + result = 31 * result + Arrays.hashCode(indices); + result = 31 * result + Arrays.hashCode(nonSearchableIndices); + result = 31 * result + Arrays.hashCode(nonAggregatableIndices); + return result; + } + + static class Builder { + private String name; + private String type; + private boolean isSearchable; + private boolean isAggregatable; + private List indiceList; + + Builder(String name, String type) { + this.name = name; + this.type = type; + this.isSearchable = true; + this.isAggregatable = true; + this.indiceList = new ArrayList<>(); + } + + void add(String index, boolean search, boolean agg) { + IndexCaps indexCaps = new IndexCaps(index, search, agg); + indiceList.add(indexCaps); + this.isSearchable &= search; + this.isAggregatable &= agg; + } + + FieldCapabilities build(boolean withIndices) { + final String[] indices; + /* Eclipse can't deal with o -> o.name, maybe because of + * https://bugs.eclipse.org/bugs/show_bug.cgi?id=511750 */ + Collections.sort(indiceList, Comparator.comparing((IndexCaps o) -> o.name)); + if (withIndices) { + indices = indiceList.stream() + .map(caps -> caps.name) + .toArray(String[]::new); + } else { + indices = null; + } + + final String[] nonSearchableIndices; + if (isSearchable == false && + indiceList.stream().anyMatch((caps) -> caps.isSearchable)) { + // Iff this field is searchable in some indices AND non-searchable in others + // we record the list of non-searchable indices + nonSearchableIndices = indiceList.stream() + .filter((caps) -> caps.isSearchable == false) + .map(caps -> caps.name) + .toArray(String[]::new); + } else { + nonSearchableIndices = null; + } + + final String[] nonAggregatableIndices; + if (isAggregatable == false && + indiceList.stream().anyMatch((caps) -> caps.isAggregatable)) { + // Iff this field is aggregatable in some indices AND non-searchable in others + // we keep the list of non-aggregatable indices + nonAggregatableIndices = indiceList.stream() + .filter((caps) -> caps.isAggregatable == false) + .map(caps -> caps.name) + .toArray(String[]::new); + } else { + nonAggregatableIndices = null; + } + return new FieldCapabilities(name, type, isSearchable, isAggregatable, + indices, nonSearchableIndices, nonAggregatableIndices); + } + } + + private static class IndexCaps { + final String name; + final boolean isSearchable; + final boolean isAggregatable; + + IndexCaps(String name, boolean isSearchable, boolean isAggregatable) { + this.name = name; + this.isSearchable = isSearchable; + this.isAggregatable = isAggregatable; + } + } +} diff --git a/core/src/main/java/org/elasticsearch/action/fieldcaps/FieldCapabilitiesAction.java b/core/src/main/java/org/elasticsearch/action/fieldcaps/FieldCapabilitiesAction.java new file mode 100644 index 0000000000000..93d67f3fc3cc4 --- /dev/null +++ b/core/src/main/java/org/elasticsearch/action/fieldcaps/FieldCapabilitiesAction.java @@ -0,0 +1,44 @@ +/* + * Licensed to Elasticsearch under one or more contributor + * license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright + * ownership. Elasticsearch licenses this file to you under + * the Apache License, Version 2.0 (the "License"); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.elasticsearch.action.fieldcaps; + +import org.elasticsearch.action.Action; +import org.elasticsearch.client.ElasticsearchClient; + +public class FieldCapabilitiesAction extends Action { + + public static final FieldCapabilitiesAction INSTANCE = new FieldCapabilitiesAction(); + public static final String NAME = "indices:data/read/field_caps"; + + private FieldCapabilitiesAction() { + super(NAME); + } + + @Override + public FieldCapabilitiesResponse newResponse() { + return new FieldCapabilitiesResponse(); + } + + @Override + public FieldCapabilitiesRequestBuilder newRequestBuilder(ElasticsearchClient client) { + return new FieldCapabilitiesRequestBuilder(client, this); + } +} diff --git a/core/src/main/java/org/elasticsearch/action/fieldcaps/FieldCapabilitiesIndexRequest.java b/core/src/main/java/org/elasticsearch/action/fieldcaps/FieldCapabilitiesIndexRequest.java new file mode 100644 index 0000000000000..460a21ae866aa --- /dev/null +++ b/core/src/main/java/org/elasticsearch/action/fieldcaps/FieldCapabilitiesIndexRequest.java @@ -0,0 +1,65 @@ +/* + * Licensed to Elasticsearch under one or more contributor + * license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright + * ownership. Elasticsearch licenses this file to you under + * the Apache License, Version 2.0 (the "License"); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.elasticsearch.action.fieldcaps; + +import org.elasticsearch.action.ActionRequestValidationException; +import org.elasticsearch.action.support.single.shard.SingleShardRequest; +import org.elasticsearch.common.io.stream.StreamInput; +import org.elasticsearch.common.io.stream.StreamOutput; + +import java.io.IOException; + +public class FieldCapabilitiesIndexRequest + extends SingleShardRequest { + + private String[] fields; + + // For serialization + FieldCapabilitiesIndexRequest() {} + + FieldCapabilitiesIndexRequest(String[] fields, String index) { + super(index); + if (fields == null || fields.length == 0) { + throw new IllegalArgumentException("specified fields can't be null or empty"); + } + this.fields = fields; + } + + public String[] fields() { + return fields; + } + + @Override + public void readFrom(StreamInput in) throws IOException { + super.readFrom(in); + fields = in.readStringArray(); + } + + @Override + public void writeTo(StreamOutput out) throws IOException { + super.writeTo(out); + out.writeStringArray(fields); + } + + @Override + public ActionRequestValidationException validate() { + return null; + } +} diff --git a/core/src/main/java/org/elasticsearch/action/fieldcaps/FieldCapabilitiesIndexResponse.java b/core/src/main/java/org/elasticsearch/action/fieldcaps/FieldCapabilitiesIndexResponse.java new file mode 100644 index 0000000000000..de520ee6274f6 --- /dev/null +++ b/core/src/main/java/org/elasticsearch/action/fieldcaps/FieldCapabilitiesIndexResponse.java @@ -0,0 +1,97 @@ +/* + * Licensed to Elasticsearch under one or more contributor + * license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright + * ownership. Elasticsearch licenses this file to you under + * the Apache License, Version 2.0 (the "License"); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.elasticsearch.action.fieldcaps; + +import org.elasticsearch.action.ActionResponse; +import org.elasticsearch.common.io.stream.StreamInput; +import org.elasticsearch.common.io.stream.StreamOutput; + +import java.io.IOException; +import java.util.Map; + +/** + * Response for {@link FieldCapabilitiesIndexRequest} requests. + */ +public class FieldCapabilitiesIndexResponse extends ActionResponse { + private String indexName; + private Map responseMap; + + FieldCapabilitiesIndexResponse(String indexName, Map responseMap) { + this.indexName = indexName; + this.responseMap = responseMap; + } + + FieldCapabilitiesIndexResponse() { + } + + + /** + * Get the index name + */ + public String getIndexName() { + return indexName; + } + + /** + * Get the field capabilities map + */ + public Map get() { + return responseMap; + } + + /** + * + * Get the field capabilities for the provided {@code field} + */ + public FieldCapabilities getField(String field) { + return responseMap.get(field); + } + + @Override + public void readFrom(StreamInput in) throws IOException { + super.readFrom(in); + this.indexName = in.readString(); + this.responseMap = + in.readMap(StreamInput::readString, FieldCapabilities::new); + } + + @Override + public void writeTo(StreamOutput out) throws IOException { + super.writeTo(out); + out.writeString(indexName); + out.writeMap(responseMap, + StreamOutput::writeString, (valueOut, fc) -> fc.writeTo(valueOut)); + } + + @Override + public boolean equals(Object o) { + if (this == o) return true; + if (o == null || getClass() != o.getClass()) return false; + + FieldCapabilitiesIndexResponse that = (FieldCapabilitiesIndexResponse) o; + + return responseMap.equals(that.responseMap); + } + + @Override + public int hashCode() { + return responseMap.hashCode(); + } +} diff --git a/core/src/main/java/org/elasticsearch/action/fieldcaps/FieldCapabilitiesRequest.java b/core/src/main/java/org/elasticsearch/action/fieldcaps/FieldCapabilitiesRequest.java new file mode 100644 index 0000000000000..7eab911216282 --- /dev/null +++ b/core/src/main/java/org/elasticsearch/action/fieldcaps/FieldCapabilitiesRequest.java @@ -0,0 +1,144 @@ +/* + * Licensed to Elasticsearch under one or more contributor + * license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright + * ownership. Elasticsearch licenses this file to you under + * the Apache License, Version 2.0 (the "License"); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.elasticsearch.action.fieldcaps; + +import org.elasticsearch.action.ActionRequest; +import org.elasticsearch.action.ActionRequestValidationException; +import org.elasticsearch.action.IndicesRequest; +import org.elasticsearch.action.ValidateActions; +import org.elasticsearch.action.support.IndicesOptions; +import org.elasticsearch.common.ParseField; +import org.elasticsearch.common.Strings; +import org.elasticsearch.common.io.stream.StreamInput; +import org.elasticsearch.common.io.stream.StreamOutput; +import org.elasticsearch.common.xcontent.ObjectParser; +import org.elasticsearch.common.xcontent.XContentParser; + +import java.io.IOException; +import java.util.Arrays; +import java.util.HashSet; +import java.util.Set; + +import static org.elasticsearch.common.xcontent.ObjectParser.fromList; + +public class FieldCapabilitiesRequest extends ActionRequest + implements IndicesRequest.Replaceable { + public static final ParseField FIELDS_FIELD = new ParseField("fields"); + public static final String NAME = "field_caps_request"; + private String[] indices = Strings.EMPTY_ARRAY; + private IndicesOptions indicesOptions = IndicesOptions.strictExpandOpen(); + private String[] fields = Strings.EMPTY_ARRAY; + + private static ObjectParser PARSER = + new ObjectParser<>(NAME, FieldCapabilitiesRequest::new); + + static { + PARSER.declareStringArray(fromList(String.class, FieldCapabilitiesRequest::fields), + FIELDS_FIELD); + } + + public FieldCapabilitiesRequest() {} + + @Override + public void readFrom(StreamInput in) throws IOException { + super.readFrom(in); + fields = in.readStringArray(); + } + + @Override + public void writeTo(StreamOutput out) throws IOException { + super.writeTo(out); + out.writeStringArray(fields); + } + + public static FieldCapabilitiesRequest parseFields(XContentParser parser) throws IOException { + return PARSER.parse(parser, null); + } + + /** + * The list of field names to retrieve + */ + public FieldCapabilitiesRequest fields(String... fields) { + if (fields == null || fields.length == 0) { + throw new IllegalArgumentException("specified fields can't be null or empty"); + } + Set fieldSet = new HashSet<>(Arrays.asList(fields)); + this.fields = fieldSet.toArray(new String[0]); + return this; + } + + public String[] fields() { + return fields; + } + + /** + * + * The list of indices to lookup + */ + public FieldCapabilitiesRequest indices(String... indices) { + this.indices = indices; + return this; + } + + public FieldCapabilitiesRequest indicesOptions(IndicesOptions indicesOptions) { + this.indicesOptions = indicesOptions; + return this; + } + + @Override + public String[] indices() { + return indices; + } + + @Override + public IndicesOptions indicesOptions() { + return indicesOptions; + } + + @Override + public ActionRequestValidationException validate() { + ActionRequestValidationException validationException = null; + if (fields == null || fields.length == 0) { + validationException = + ValidateActions.addValidationError("no fields specified", validationException); + } + return validationException; + } + + @Override + public boolean equals(Object o) { + if (this == o) return true; + if (o == null || getClass() != o.getClass()) return false; + + FieldCapabilitiesRequest that = (FieldCapabilitiesRequest) o; + + if (!Arrays.equals(indices, that.indices)) return false; + if (!indicesOptions.equals(that.indicesOptions)) return false; + return Arrays.equals(fields, that.fields); + } + + @Override + public int hashCode() { + int result = Arrays.hashCode(indices); + result = 31 * result + indicesOptions.hashCode(); + result = 31 * result + Arrays.hashCode(fields); + return result; + } +} diff --git a/core/src/main/java/org/elasticsearch/search/query/QuerySearchResultProvider.java b/core/src/main/java/org/elasticsearch/action/fieldcaps/FieldCapabilitiesRequestBuilder.java similarity index 51% rename from core/src/main/java/org/elasticsearch/search/query/QuerySearchResultProvider.java rename to core/src/main/java/org/elasticsearch/action/fieldcaps/FieldCapabilitiesRequestBuilder.java index 852a97e5248af..742d5b3ee3297 100644 --- a/core/src/main/java/org/elasticsearch/search/query/QuerySearchResultProvider.java +++ b/core/src/main/java/org/elasticsearch/action/fieldcaps/FieldCapabilitiesRequestBuilder.java @@ -17,25 +17,25 @@ * under the License. */ -package org.elasticsearch.search.query; +package org.elasticsearch.action.fieldcaps; -import org.elasticsearch.search.SearchPhaseResult; -import org.elasticsearch.search.fetch.FetchSearchResult; -import org.elasticsearch.transport.TransportResponse; +import org.elasticsearch.action.ActionRequestBuilder; +import org.elasticsearch.client.ElasticsearchClient; -public abstract class QuerySearchResultProvider extends TransportResponse implements SearchPhaseResult { - - /** - * Returns the query result iff it's included in this response otherwise null - */ - public QuerySearchResult queryResult() { - return null; +public class FieldCapabilitiesRequestBuilder extends + ActionRequestBuilder { + public FieldCapabilitiesRequestBuilder(ElasticsearchClient client, + FieldCapabilitiesAction action, + String... indices) { + super(client, action, new FieldCapabilitiesRequest().indices(indices)); } /** - * Returns the fetch result iff it's included in this response otherwise null + * The list of field names to retrieve. */ - public FetchSearchResult fetchResult() { - return null; + public FieldCapabilitiesRequestBuilder setFields(String... fields) { + request().fields(fields); + return this; } } diff --git a/core/src/main/java/org/elasticsearch/action/fieldcaps/FieldCapabilitiesResponse.java b/core/src/main/java/org/elasticsearch/action/fieldcaps/FieldCapabilitiesResponse.java new file mode 100644 index 0000000000000..9ff2cf3850b1f --- /dev/null +++ b/core/src/main/java/org/elasticsearch/action/fieldcaps/FieldCapabilitiesResponse.java @@ -0,0 +1,106 @@ +/* + * Licensed to Elasticsearch under one or more contributor + * license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright + * ownership. Elasticsearch licenses this file to you under + * the Apache License, Version 2.0 (the "License"); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.elasticsearch.action.fieldcaps; + +import org.elasticsearch.action.ActionResponse; +import org.elasticsearch.common.io.stream.StreamInput; +import org.elasticsearch.common.io.stream.StreamOutput; +import org.elasticsearch.common.xcontent.ToXContent; +import org.elasticsearch.common.xcontent.XContentBuilder; + +import java.io.IOException; +import java.util.Collections; +import java.util.Map; + +/** + * Response for {@link FieldCapabilitiesRequest} requests. + */ +public class FieldCapabilitiesResponse extends ActionResponse implements ToXContent { + private Map> responseMap; + + FieldCapabilitiesResponse(Map> responseMap) { + this.responseMap = responseMap; + } + + /** + * Used for serialization + */ + FieldCapabilitiesResponse() { + this.responseMap = Collections.emptyMap(); + } + + /** + * Get the field capabilities map. + */ + public Map> get() { + return responseMap; + } + + /** + * + * Get the field capabilities per type for the provided {@code field}. + */ + public Map getField(String field) { + return responseMap.get(field); + } + + @Override + public void readFrom(StreamInput in) throws IOException { + super.readFrom(in); + this.responseMap = + in.readMap(StreamInput::readString, FieldCapabilitiesResponse::readField); + } + + private static Map readField(StreamInput in) throws IOException { + return in.readMap(StreamInput::readString, FieldCapabilities::new); + } + + @Override + public void writeTo(StreamOutput out) throws IOException { + super.writeTo(out); + out.writeMap(responseMap, StreamOutput::writeString, FieldCapabilitiesResponse::writeField); + } + + private static void writeField(StreamOutput out, + Map map) throws IOException { + out.writeMap(map, StreamOutput::writeString, (valueOut, fc) -> fc.writeTo(valueOut)); + } + + @Override + public XContentBuilder toXContent(XContentBuilder builder, Params params) throws IOException { + builder.field("fields", responseMap); + return builder; + } + + @Override + public boolean equals(Object o) { + if (this == o) return true; + if (o == null || getClass() != o.getClass()) return false; + + FieldCapabilitiesResponse that = (FieldCapabilitiesResponse) o; + + return responseMap.equals(that.responseMap); + } + + @Override + public int hashCode() { + return responseMap.hashCode(); + } +} diff --git a/core/src/main/java/org/elasticsearch/action/fieldcaps/TransportFieldCapabilitiesAction.java b/core/src/main/java/org/elasticsearch/action/fieldcaps/TransportFieldCapabilitiesAction.java new file mode 100644 index 0000000000000..a7f268eaf5d8d --- /dev/null +++ b/core/src/main/java/org/elasticsearch/action/fieldcaps/TransportFieldCapabilitiesAction.java @@ -0,0 +1,134 @@ +/* + * Licensed to Elasticsearch under one or more contributor + * license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright + * ownership. Elasticsearch licenses this file to you under + * the Apache License, Version 2.0 (the "License"); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.elasticsearch.action.fieldcaps; + +import org.elasticsearch.action.ActionListener; +import org.elasticsearch.action.support.ActionFilters; +import org.elasticsearch.action.support.HandledTransportAction; +import org.elasticsearch.cluster.ClusterState; +import org.elasticsearch.cluster.metadata.IndexNameExpressionResolver; +import org.elasticsearch.cluster.service.ClusterService; +import org.elasticsearch.common.inject.Inject; +import org.elasticsearch.common.settings.Settings; +import org.elasticsearch.threadpool.ThreadPool; +import org.elasticsearch.transport.TransportService; + +import java.util.HashMap; +import java.util.Map; +import java.util.concurrent.atomic.AtomicInteger; +import java.util.concurrent.atomic.AtomicReferenceArray; + +public class TransportFieldCapabilitiesAction + extends HandledTransportAction { + private final ClusterService clusterService; + private final TransportFieldCapabilitiesIndexAction shardAction; + + @Inject + public TransportFieldCapabilitiesAction(Settings settings, TransportService transportService, + ClusterService clusterService, ThreadPool threadPool, + TransportFieldCapabilitiesIndexAction shardAction, + ActionFilters actionFilters, + IndexNameExpressionResolver + indexNameExpressionResolver) { + super(settings, FieldCapabilitiesAction.NAME, threadPool, transportService, + actionFilters, indexNameExpressionResolver, FieldCapabilitiesRequest::new); + this.clusterService = clusterService; + this.shardAction = shardAction; + } + + @Override + protected void doExecute(FieldCapabilitiesRequest request, + final ActionListener listener) { + ClusterState clusterState = clusterService.state(); + String[] concreteIndices = + indexNameExpressionResolver.concreteIndexNames(clusterState, request); + final AtomicInteger indexCounter = new AtomicInteger(); + final AtomicInteger completionCounter = new AtomicInteger(concreteIndices.length); + final AtomicReferenceArray indexResponses = + new AtomicReferenceArray<>(concreteIndices.length); + if (concreteIndices.length == 0) { + listener.onResponse(new FieldCapabilitiesResponse()); + } else { + for (String index : concreteIndices) { + FieldCapabilitiesIndexRequest indexRequest = + new FieldCapabilitiesIndexRequest(request.fields(), index); + shardAction.execute(indexRequest, + new ActionListener () { + @Override + public void onResponse(FieldCapabilitiesIndexResponse result) { + indexResponses.set(indexCounter.getAndIncrement(), result); + if (completionCounter.decrementAndGet() == 0) { + listener.onResponse(merge(indexResponses)); + } + } + + @Override + public void onFailure(Exception e) { + indexResponses.set(indexCounter.getAndIncrement(), e); + if (completionCounter.decrementAndGet() == 0) { + listener.onResponse(merge(indexResponses)); + } + } + }); + } + } + } + + private FieldCapabilitiesResponse merge(AtomicReferenceArray indexResponses) { + Map> responseMapBuilder = new HashMap<> (); + for (int i = 0; i < indexResponses.length(); i++) { + Object element = indexResponses.get(i); + if (element instanceof FieldCapabilitiesIndexResponse == false) { + assert element instanceof Exception; + continue; + } + FieldCapabilitiesIndexResponse response = (FieldCapabilitiesIndexResponse) element; + for (String field : response.get().keySet()) { + Map typeMap = responseMapBuilder.get(field); + if (typeMap == null) { + typeMap = new HashMap<> (); + responseMapBuilder.put(field, typeMap); + } + FieldCapabilities fieldCap = response.getField(field); + FieldCapabilities.Builder builder = typeMap.get(fieldCap.getType()); + if (builder == null) { + builder = new FieldCapabilities.Builder(field, fieldCap.getType()); + typeMap.put(fieldCap.getType(), builder); + } + builder.add(response.getIndexName(), + fieldCap.isSearchable(), fieldCap.isAggregatable()); + } + } + + Map> responseMap = new HashMap<>(); + for (Map.Entry> entry : + responseMapBuilder.entrySet()) { + Map typeMap = new HashMap<>(); + boolean multiTypes = entry.getValue().size() > 1; + for (Map.Entry fieldEntry : + entry.getValue().entrySet()) { + typeMap.put(fieldEntry.getKey(), fieldEntry.getValue().build(multiTypes)); + } + responseMap.put(entry.getKey(), typeMap); + } + + return new FieldCapabilitiesResponse(responseMap); + } +} diff --git a/core/src/main/java/org/elasticsearch/action/fieldcaps/TransportFieldCapabilitiesIndexAction.java b/core/src/main/java/org/elasticsearch/action/fieldcaps/TransportFieldCapabilitiesIndexAction.java new file mode 100644 index 0000000000000..5bab727686015 --- /dev/null +++ b/core/src/main/java/org/elasticsearch/action/fieldcaps/TransportFieldCapabilitiesIndexAction.java @@ -0,0 +1,121 @@ +/* + * Licensed to Elasticsearch under one or more contributor + * license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright + * ownership. Elasticsearch licenses this file to you under + * the Apache License, Version 2.0 (the "License"); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.elasticsearch.action.fieldcaps; + +import org.elasticsearch.action.support.ActionFilters; +import org.elasticsearch.action.support.single.shard.TransportSingleShardAction; +import org.elasticsearch.cluster.ClusterState; +import org.elasticsearch.cluster.block.ClusterBlockException; +import org.elasticsearch.cluster.block.ClusterBlockLevel; +import org.elasticsearch.cluster.metadata.IndexNameExpressionResolver; +import org.elasticsearch.cluster.routing.ShardsIterator; +import org.elasticsearch.cluster.service.ClusterService; +import org.elasticsearch.common.inject.Inject; +import org.elasticsearch.common.settings.Settings; +import org.elasticsearch.index.mapper.MappedFieldType; +import org.elasticsearch.index.mapper.MapperService; +import org.elasticsearch.index.shard.ShardId; +import org.elasticsearch.indices.IndicesService; +import org.elasticsearch.threadpool.ThreadPool; +import org.elasticsearch.transport.TransportService; + +import java.util.HashMap; +import java.util.HashSet; +import java.util.Map; +import java.util.Set; + +public class TransportFieldCapabilitiesIndexAction + extends TransportSingleShardAction { + + private static final String ACTION_NAME = FieldCapabilitiesAction.NAME + "[index]"; + + protected final ClusterService clusterService; + private final IndicesService indicesService; + + @Inject + public TransportFieldCapabilitiesIndexAction(Settings settings, + ClusterService clusterService, + TransportService transportService, + IndicesService indicesService, + ThreadPool threadPool, + ActionFilters actionFilters, + IndexNameExpressionResolver + indexNameExpressionResolver) { + super(settings, + ACTION_NAME, + threadPool, + clusterService, + transportService, + actionFilters, + indexNameExpressionResolver, + FieldCapabilitiesIndexRequest::new, + ThreadPool.Names.MANAGEMENT); + this.clusterService = clusterService; + this.indicesService = indicesService; + } + + @Override + protected boolean resolveIndex(FieldCapabilitiesIndexRequest request) { + //internal action, index already resolved + return false; + } + + @Override + protected ShardsIterator shards(ClusterState state, InternalRequest request) { + // Will balance requests between shards + // Resolve patterns and deduplicate + return state.routingTable().index(request.concreteIndex()).randomAllActiveShardsIt(); + } + + @Override + protected FieldCapabilitiesIndexResponse shardOperation( + final FieldCapabilitiesIndexRequest request, + ShardId shardId) { + MapperService mapperService = + indicesService.indexServiceSafe(shardId.getIndex()).mapperService(); + Set fieldNames = new HashSet<>(); + for (String field : request.fields()) { + fieldNames.addAll(mapperService.simpleMatchToIndexNames(field)); + } + Map responseMap = new HashMap<>(); + for (String field : fieldNames) { + MappedFieldType ft = mapperService.fullName(field); + FieldCapabilities fieldCap = new FieldCapabilities(field, + ft.typeName(), + ft.isSearchable(), + ft.isAggregatable()); + responseMap.put(field, fieldCap); + } + return new FieldCapabilitiesIndexResponse(shardId.getIndexName(), responseMap); + } + + @Override + protected FieldCapabilitiesIndexResponse newResponse() { + return new FieldCapabilitiesIndexResponse(); + } + + @Override + protected ClusterBlockException checkRequestBlock(ClusterState state, + InternalRequest request) { + return state.blocks().indexBlockedException(ClusterBlockLevel.METADATA_READ, + request.concreteIndex()); + } +} diff --git a/core/src/main/java/org/elasticsearch/action/get/MultiGetRequest.java b/core/src/main/java/org/elasticsearch/action/get/MultiGetRequest.java index d4627391b1169..20a619cec2c70 100644 --- a/core/src/main/java/org/elasticsearch/action/get/MultiGetRequest.java +++ b/core/src/main/java/org/elasticsearch/action/get/MultiGetRequest.java @@ -44,6 +44,7 @@ import java.util.Collections; import java.util.Iterator; import java.util.List; +import java.util.Locale; public class MultiGetRequest extends ActionRequest implements Iterable, CompositeIndicesRequest, RealtimeRequest { @@ -319,6 +320,14 @@ public MultiGetRequest add(@Nullable String defaultIndex, @Nullable String defau boolean allowExplicitIndex) throws IOException { XContentParser.Token token; String currentFieldName = null; + if ((token = parser.nextToken()) != XContentParser.Token.START_OBJECT) { + final String message = String.format( + Locale.ROOT, + "unexpected token [%s], expected [%s]", + token, + XContentParser.Token.START_OBJECT); + throw new ParsingException(parser.getTokenLocation(), message); + } while ((token = parser.nextToken()) != XContentParser.Token.END_OBJECT) { if (token == XContentParser.Token.FIELD_NAME) { currentFieldName = parser.currentName(); @@ -327,7 +336,22 @@ public MultiGetRequest add(@Nullable String defaultIndex, @Nullable String defau parseDocuments(parser, this.items, defaultIndex, defaultType, defaultFields, defaultFetchSource, defaultRouting, allowExplicitIndex); } else if ("ids".equals(currentFieldName)) { parseIds(parser, this.items, defaultIndex, defaultType, defaultFields, defaultFetchSource, defaultRouting); + } else { + final String message = String.format( + Locale.ROOT, + "unknown key [%s] for a %s, expected [docs] or [ids]", + currentFieldName, + token); + throw new ParsingException(parser.getTokenLocation(), message); } + } else { + final String message = String.format( + Locale.ROOT, + "unexpected token [%s], expected [%s] or [%s]", + token, + XContentParser.Token.FIELD_NAME, + XContentParser.Token.START_ARRAY); + throw new ParsingException(parser.getTokenLocation(), message); } } return this; diff --git a/core/src/main/java/org/elasticsearch/action/search/AbstractSearchAsyncAction.java b/core/src/main/java/org/elasticsearch/action/search/AbstractSearchAsyncAction.java index bf95b7517c6ed..c2137803411f3 100644 --- a/core/src/main/java/org/elasticsearch/action/search/AbstractSearchAsyncAction.java +++ b/core/src/main/java/org/elasticsearch/action/search/AbstractSearchAsyncAction.java @@ -43,6 +43,7 @@ import java.util.List; import java.util.Map; import java.util.concurrent.Executor; +import java.util.concurrent.TimeUnit; import java.util.concurrent.atomic.AtomicInteger; import java.util.function.Function; import java.util.stream.Collectors; @@ -67,17 +68,17 @@ abstract class AbstractSearchAsyncAction exten private final SetOnce> shardFailures = new SetOnce<>(); private final Object shardFailuresMutex = new Object(); private final AtomicInteger successfulOps = new AtomicInteger(); - private final long startTime; + private final TransportSearchAction.SearchTimeProvider timeProvider; protected AbstractSearchAsyncAction(String name, Logger logger, SearchTransportService searchTransportService, Function nodeIdToConnection, Map aliasFilter, Map concreteIndexBoosts, Executor executor, SearchRequest request, - ActionListener listener, GroupShardsIterator shardsIts, long startTime, + ActionListener listener, GroupShardsIterator shardsIts, TransportSearchAction.SearchTimeProvider timeProvider, long clusterStateVersion, SearchTask task, SearchPhaseResults resultConsumer) { super(name, request, shardsIts, logger); - this.startTime = startTime; + this.timeProvider = timeProvider; this.logger = logger; this.searchTransportService = searchTransportService; this.executor = executor; @@ -94,10 +95,9 @@ protected AbstractSearchAsyncAction(String name, Logger logger, SearchTransportS /** * Builds how long it took to execute the search. */ - private long buildTookInMillis() { - // protect ourselves against time going backwards - // negative values don't make sense and we want to be able to serialize that thing as a vLong - return Math.max(1, System.currentTimeMillis() - startTime); + long buildTookInMillis() { + return TimeUnit.NANOSECONDS.toMillis( + timeProvider.getRelativeCurrentNanos() - timeProvider.getRelativeStartNanos()); } /** @@ -122,7 +122,8 @@ public final void executeNextPhase(SearchPhase currentPhase, SearchPhase nextPha if (successfulOps.get() == 0) { // we have 0 successful results that means we shortcut stuff and return a failure if (logger.isDebugEnabled()) { final ShardOperationFailedException[] shardSearchFailures = ExceptionsHelper.groupBy(buildShardFailures()); - Throwable cause = ElasticsearchException.guessRootCauses(shardSearchFailures[0].getCause())[0]; + Throwable cause = shardSearchFailures.length == 0 ? null : + ElasticsearchException.guessRootCauses(shardSearchFailures[0].getCause())[0]; logger.debug((Supplier) () -> new ParameterizedMessage("All shards failed for phase: [{}]", getName()), cause); } @@ -130,7 +131,7 @@ public final void executeNextPhase(SearchPhase currentPhase, SearchPhase nextPha } else { if (logger.isTraceEnabled()) { final String resultsFrom = results.getSuccessfulResults() - .map(r -> r.shardTarget().toString()).collect(Collectors.joining(",")); + .map(r -> r.getSearchShardTarget().toString()).collect(Collectors.joining(",")); logger.trace("[{}] Moving to next phase: [{}], based on results from: {} (cluster state version: {})", currentPhase.getName(), nextPhase.getName(), resultsFrom, clusterStateVersion); } @@ -158,10 +159,10 @@ private ShardSearchFailure[] buildShardFailures() { if (shardFailures == null) { return ShardSearchFailure.EMPTY_ARRAY; } - List> entries = shardFailures.asList(); + List entries = shardFailures.asList(); ShardSearchFailure[] failures = new ShardSearchFailure[entries.size()]; for (int i = 0; i < failures.length; i++) { - failures[i] = entries.get(i).value; + failures[i] = entries.get(i); } return failures; } @@ -208,8 +209,8 @@ public final void onShardFailure(final int shardIndex, @Nullable SearchShardTarg private void raisePhaseFailure(SearchPhaseExecutionException exception) { results.getSuccessfulResults().forEach((entry) -> { try { - Transport.Connection connection = nodeIdToConnection.apply(entry.shardTarget().getNodeId()); - sendReleaseSearchContext(entry.id(), connection); + Transport.Connection connection = nodeIdToConnection.apply(entry.getSearchShardTarget().getNodeId()); + sendReleaseSearchContext(entry.getRequestId(), connection); } catch (Exception inner) { inner.addSuppressed(exception); logger.trace("failed to release context", inner); @@ -219,18 +220,18 @@ private void raisePhaseFailure(SearchPhaseExecutionException exception) { } @Override - public final void onShardSuccess(int shardIndex, Result result) { + public final void onShardSuccess(Result result) { successfulOps.incrementAndGet(); - results.consumeResult(shardIndex, result); + results.consumeResult(result); if (logger.isTraceEnabled()) { - logger.trace("got first-phase result from {}", result != null ? result.shardTarget() : null); + logger.trace("got first-phase result from {}", result != null ? result.getSearchShardTarget() : null); } // clean a previous error on this shard group (note, this code will be serialized on the same shardIndex value level // so its ok concurrency wise to miss potentially the shard failures being created because of another failure // in the #addShardFailure, because by definition, it will happen on *another* shardIndex AtomicArray shardFailures = this.shardFailures.get(); if (shardFailures != null) { - shardFailures.set(shardIndex, null); + shardFailures.set(result.getShardIndex(), null); } } @@ -300,7 +301,7 @@ public final ShardSearchTransportRequest buildShardSearchRequest(ShardIterator s assert filter != null; float indexBoost = concreteIndexBoosts.getOrDefault(shard.index().getUUID(), DEFAULT_INDEX_BOOST); return new ShardSearchTransportRequest(request, shardIt.shardId(), getNumShards(), - filter, indexBoost, startTime); + filter, indexBoost, timeProvider.getAbsoluteStartMillis()); } /** diff --git a/core/src/main/java/org/elasticsearch/action/search/CountedCollector.java b/core/src/main/java/org/elasticsearch/action/search/CountedCollector.java index 65f2d2d280ba1..2dd255aa14c69 100644 --- a/core/src/main/java/org/elasticsearch/action/search/CountedCollector.java +++ b/core/src/main/java/org/elasticsearch/action/search/CountedCollector.java @@ -23,18 +23,20 @@ import org.elasticsearch.search.SearchPhaseResult; import org.elasticsearch.search.SearchShardTarget; +import java.util.function.Consumer; + /** * This is a simple base class to simplify fan out to shards and collect their results. Each results passed to - * {@link #onResult(int, SearchPhaseResult, SearchShardTarget)} will be set to the provided result array + * {@link #onResult(SearchPhaseResult)} will be set to the provided result array * where the given index is used to set the result on the array. */ final class CountedCollector { - private final ResultConsumer resultConsumer; + private final Consumer resultConsumer; private final CountDown counter; private final Runnable onFinish; private final SearchPhaseContext context; - CountedCollector(ResultConsumer resultConsumer, int expectedOps, Runnable onFinish, SearchPhaseContext context) { + CountedCollector(Consumer resultConsumer, int expectedOps, Runnable onFinish, SearchPhaseContext context) { this.resultConsumer = resultConsumer; this.counter = new CountDown(expectedOps); this.onFinish = onFinish; @@ -55,10 +57,9 @@ void countDown() { /** * Sets the result to the given array index and then runs {@link #countDown()} */ - void onResult(int index, R result, SearchShardTarget target) { + void onResult(R result) { try { - result.shardTarget(target); - resultConsumer.consume(index, result); + resultConsumer.accept(result); } finally { countDown(); } @@ -75,12 +76,4 @@ void onFailure(final int shardIndex, @Nullable SearchShardTarget shardTarget, Ex countDown(); } } - - /** - * A functional interface to plug in shard result consumers to this collector - */ - @FunctionalInterface - public interface ResultConsumer { - void consume(int shardIndex, R result); - } } diff --git a/core/src/main/java/org/elasticsearch/action/search/DfsQueryPhase.java b/core/src/main/java/org/elasticsearch/action/search/DfsQueryPhase.java index 0ac3c69b8ebc7..353baf117502a 100644 --- a/core/src/main/java/org/elasticsearch/action/search/DfsQueryPhase.java +++ b/core/src/main/java/org/elasticsearch/action/search/DfsQueryPhase.java @@ -20,16 +20,17 @@ import org.apache.logging.log4j.message.ParameterizedMessage; import org.apache.logging.log4j.util.Supplier; -import org.elasticsearch.action.ActionListener; import org.elasticsearch.common.util.concurrent.AtomicArray; +import org.elasticsearch.search.SearchPhaseResult; import org.elasticsearch.search.SearchShardTarget; import org.elasticsearch.search.dfs.AggregatedDfs; import org.elasticsearch.search.dfs.DfsSearchResult; import org.elasticsearch.search.query.QuerySearchRequest; -import org.elasticsearch.search.query.QuerySearchResultProvider; +import org.elasticsearch.search.query.QuerySearchResult; import org.elasticsearch.transport.Transport; import java.io.IOException; +import java.util.List; import java.util.function.Function; /** @@ -40,16 +41,16 @@ * @see CountedCollector#onFailure(int, SearchShardTarget, Exception) */ final class DfsQueryPhase extends SearchPhase { - private final InitialSearchPhase.SearchPhaseResults queryResult; + private final InitialSearchPhase.SearchPhaseResults queryResult; private final SearchPhaseController searchPhaseController; private final AtomicArray dfsSearchResults; - private final Function, SearchPhase> nextPhaseFactory; + private final Function, SearchPhase> nextPhaseFactory; private final SearchPhaseContext context; private final SearchTransportService searchTransportService; DfsQueryPhase(AtomicArray dfsSearchResults, SearchPhaseController searchPhaseController, - Function, SearchPhase> nextPhaseFactory, + Function, SearchPhase> nextPhaseFactory, SearchPhaseContext context) { super("dfs_query"); this.queryResult = searchPhaseController.newSearchPhaseResults(context.getRequest(), context.getNumShards()); @@ -64,22 +65,26 @@ final class DfsQueryPhase extends SearchPhase { public void run() throws IOException { // TODO we can potentially also consume the actual per shard results from the initial phase here in the aggregateDfs // to free up memory early - final AggregatedDfs dfs = searchPhaseController.aggregateDfs(dfsSearchResults); - final CountedCollector counter = new CountedCollector<>(queryResult::consumeResult, - dfsSearchResults.asList().size(), - () -> { - context.executeNextPhase(this, nextPhaseFactory.apply(queryResult)); - }, context); - for (final AtomicArray.Entry entry : dfsSearchResults.asList()) { - DfsSearchResult dfsResult = entry.value; - final int shardIndex = entry.index; - final SearchShardTarget searchShardTarget = dfsResult.shardTarget(); + final List resultList = dfsSearchResults.asList(); + final AggregatedDfs dfs = searchPhaseController.aggregateDfs(resultList); + final CountedCollector counter = new CountedCollector<>(queryResult::consumeResult, + resultList.size(), + () -> context.executeNextPhase(this, nextPhaseFactory.apply(queryResult)), context); + for (final DfsSearchResult dfsResult : resultList) { + final SearchShardTarget searchShardTarget = dfsResult.getSearchShardTarget(); Transport.Connection connection = context.getConnection(searchShardTarget.getNodeId()); - QuerySearchRequest querySearchRequest = new QuerySearchRequest(context.getRequest(), dfsResult.id(), dfs); + QuerySearchRequest querySearchRequest = new QuerySearchRequest(context.getRequest(), dfsResult.getRequestId(), dfs); + final int shardIndex = dfsResult.getShardIndex(); searchTransportService.sendExecuteQuery(connection, querySearchRequest, context.getTask(), - ActionListener.wrap( - result -> counter.onResult(shardIndex, result, searchShardTarget), - exception -> { + new SearchActionListener(searchShardTarget, shardIndex) { + + @Override + protected void innerOnResponse(QuerySearchResult response) { + counter.onResult(response); + } + + @Override + public void onFailure(Exception exception) { try { if (context.getLogger().isDebugEnabled()) { context.getLogger().debug((Supplier) () -> new ParameterizedMessage("[{}] Failed to execute query phase", @@ -92,7 +97,8 @@ public void run() throws IOException { // release it again to be in the safe side context.sendReleaseSearchContext(querySearchRequest.id(), connection); } - })); + } + }); } } } diff --git a/core/src/main/java/org/elasticsearch/action/search/FetchSearchPhase.java b/core/src/main/java/org/elasticsearch/action/search/FetchSearchPhase.java index 20d91770675f7..428053c357bd4 100644 --- a/core/src/main/java/org/elasticsearch/action/search/FetchSearchPhase.java +++ b/core/src/main/java/org/elasticsearch/action/search/FetchSearchPhase.java @@ -23,15 +23,14 @@ import org.apache.logging.log4j.message.ParameterizedMessage; import org.apache.logging.log4j.util.Supplier; import org.apache.lucene.search.ScoreDoc; -import org.elasticsearch.action.ActionListener; import org.elasticsearch.action.ActionRunnable; import org.elasticsearch.common.util.concurrent.AtomicArray; +import org.elasticsearch.search.SearchPhaseResult; import org.elasticsearch.search.SearchShardTarget; import org.elasticsearch.search.fetch.FetchSearchResult; import org.elasticsearch.search.fetch.ShardFetchSearchRequest; import org.elasticsearch.search.internal.InternalSearchResponse; import org.elasticsearch.search.query.QuerySearchResult; -import org.elasticsearch.search.query.QuerySearchResultProvider; import org.elasticsearch.transport.Transport; import java.io.IOException; @@ -45,13 +44,13 @@ final class FetchSearchPhase extends SearchPhase { private final AtomicArray fetchResults; private final SearchPhaseController searchPhaseController; - private final AtomicArray queryResults; + private final AtomicArray queryResults; private final Function nextPhaseFactory; private final SearchPhaseContext context; private final Logger logger; - private final InitialSearchPhase.SearchPhaseResults resultConsumer; + private final InitialSearchPhase.SearchPhaseResults resultConsumer; - FetchSearchPhase(InitialSearchPhase.SearchPhaseResults resultConsumer, + FetchSearchPhase(InitialSearchPhase.SearchPhaseResults resultConsumer, SearchPhaseController searchPhaseController, SearchPhaseContext context) { this(resultConsumer, searchPhaseController, context, @@ -59,7 +58,7 @@ final class FetchSearchPhase extends SearchPhase { (finalResponse) -> sendResponsePhase(finalResponse, context))); } - FetchSearchPhase(InitialSearchPhase.SearchPhaseResults resultConsumer, + FetchSearchPhase(InitialSearchPhase.SearchPhaseResults resultConsumer, SearchPhaseController searchPhaseController, SearchPhaseContext context, Function nextPhaseFactory) { super("fetch"); @@ -98,35 +97,35 @@ public void onFailure(Exception e) { private void innerRun() throws IOException { final int numShards = context.getNumShards(); final boolean isScrollSearch = context.getRequest().scroll() != null; - ScoreDoc[] sortedShardDocs = searchPhaseController.sortDocs(isScrollSearch, queryResults); + List phaseResults = queryResults.asList(); + ScoreDoc[] sortedShardDocs = searchPhaseController.sortDocs(isScrollSearch, phaseResults, context.getNumShards()); String scrollId = isScrollSearch ? TransportSearchHelper.buildScrollId(queryResults) : null; - List> queryResultsAsList = queryResults.asList(); final SearchPhaseController.ReducedQueryPhase reducedQueryPhase = resultConsumer.reduce(); final boolean queryAndFetchOptimization = queryResults.length() == 1; final Runnable finishPhase = () -> moveToNextPhase(searchPhaseController, sortedShardDocs, scrollId, reducedQueryPhase, queryAndFetchOptimization ? queryResults : fetchResults); if (queryAndFetchOptimization) { - assert queryResults.get(0) == null || queryResults.get(0).fetchResult() != null; + assert phaseResults.isEmpty() || phaseResults.get(0).fetchResult() != null; // query AND fetch optimization finishPhase.run(); } else { final IntArrayList[] docIdsToLoad = searchPhaseController.fillDocIdsToLoad(numShards, sortedShardDocs); if (sortedShardDocs.length == 0) { // no docs to fetch -- sidestep everything and return - queryResultsAsList.stream() - .map(e -> e.value.queryResult()) + phaseResults.stream() + .map(e -> e.queryResult()) .forEach(this::releaseIrrelevantSearchContext); // we have to release contexts here to free up resources finishPhase.run(); } else { final ScoreDoc[] lastEmittedDocPerShard = isScrollSearch ? searchPhaseController.getLastEmittedDocPerShard(reducedQueryPhase, sortedShardDocs, numShards) : null; - final CountedCollector counter = new CountedCollector<>(fetchResults::set, + final CountedCollector counter = new CountedCollector<>(r -> fetchResults.set(r.getShardIndex(), r), docIdsToLoad.length, // we count down every shard in the result no matter if we got any results or not finishPhase, context); for (int i = 0; i < docIdsToLoad.length; i++) { IntArrayList entry = docIdsToLoad[i]; - QuerySearchResultProvider queryResult = queryResults.get(i); + SearchPhaseResult queryResult = queryResults.get(i); if (entry == null) { // no results for this shard ID if (queryResult != null) { // if we got some hits from this shard we have to release the context there @@ -137,10 +136,10 @@ private void innerRun() throws IOException { // in any case we count down this result since we don't talk to this shard anymore counter.countDown(); } else { - Transport.Connection connection = context.getConnection(queryResult.shardTarget().getNodeId()); - ShardFetchSearchRequest fetchSearchRequest = createFetchRequest(queryResult.queryResult().id(), i, entry, + Transport.Connection connection = context.getConnection(queryResult.getSearchShardTarget().getNodeId()); + ShardFetchSearchRequest fetchSearchRequest = createFetchRequest(queryResult.queryResult().getRequestId(), i, entry, lastEmittedDocPerShard); - executeFetch(i, queryResult.shardTarget(), counter, fetchSearchRequest, queryResult.queryResult(), + executeFetch(i, queryResult.getSearchShardTarget(), counter, fetchSearchRequest, queryResult.queryResult(), connection); } } @@ -159,10 +158,10 @@ private void executeFetch(final int shardIndex, final SearchShardTarget shardTar final ShardFetchSearchRequest fetchSearchRequest, final QuerySearchResult querySearchResult, final Transport.Connection connection) { context.getSearchTransport().sendExecuteFetch(connection, fetchSearchRequest, context.getTask(), - new ActionListener() { + new SearchActionListener(shardTarget, shardIndex) { @Override - public void onResponse(FetchSearchResult result) { - counter.onResult(shardIndex, result, shardTarget); + public void innerOnResponse(FetchSearchResult result) { + counter.onResult(result); } @Override @@ -191,8 +190,8 @@ private void releaseIrrelevantSearchContext(QuerySearchResult queryResult) { // and if it has at lease one hit that didn't make it to the global topDocs if (context.getRequest().scroll() == null && queryResult.hasHits()) { try { - Transport.Connection connection = context.getConnection(queryResult.shardTarget().getNodeId()); - context.sendReleaseSearchContext(queryResult.id(), connection); + Transport.Connection connection = context.getConnection(queryResult.getSearchShardTarget().getNodeId()); + context.sendReleaseSearchContext(queryResult.getRequestId(), connection); } catch (Exception e) { context.getLogger().trace("failed to release context", e); } @@ -201,9 +200,9 @@ private void releaseIrrelevantSearchContext(QuerySearchResult queryResult) { private void moveToNextPhase(SearchPhaseController searchPhaseController, ScoreDoc[] sortedDocs, String scrollId, SearchPhaseController.ReducedQueryPhase reducedQueryPhase, - AtomicArray fetchResultsArr) { + AtomicArray fetchResultsArr) { final InternalSearchResponse internalResponse = searchPhaseController.merge(context.getRequest().scroll() != null, - sortedDocs, reducedQueryPhase, fetchResultsArr); + sortedDocs, reducedQueryPhase, fetchResultsArr.asList(), fetchResultsArr::get); context.executeNextPhase(this, nextPhaseFactory.apply(context.buildSearchResponse(internalResponse, scrollId))); } diff --git a/core/src/main/java/org/elasticsearch/action/search/InitialSearchPhase.java b/core/src/main/java/org/elasticsearch/action/search/InitialSearchPhase.java index f21e9d228d69f..be91cebe50117 100644 --- a/core/src/main/java/org/elasticsearch/action/search/InitialSearchPhase.java +++ b/core/src/main/java/org/elasticsearch/action/search/InitialSearchPhase.java @@ -21,7 +21,6 @@ import org.apache.logging.log4j.Logger; import org.apache.logging.log4j.message.ParameterizedMessage; import org.apache.logging.log4j.util.Supplier; -import org.elasticsearch.action.ActionListener; import org.elasticsearch.action.NoShardAvailableActionException; import org.elasticsearch.action.support.TransportActions; import org.elasticsearch.cluster.routing.GroupShardsIterator; @@ -144,10 +143,11 @@ private void performPhaseOnShard(final int shardIndex, final ShardIterator shard onShardFailure(shardIndex, null, null, shardIt, new NoShardAvailableActionException(shardIt.shardId())); } else { try { - executePhaseOnShard(shardIt, shard, new ActionListener() { + executePhaseOnShard(shardIt, shard, new SearchActionListener(new SearchShardTarget(shard.currentNodeId(), + shardIt.shardId()), shardIndex) { @Override - public void onResponse(FirstResult result) { - onShardResult(shardIndex, shard.currentNodeId(), result, shardIt); + public void innerOnResponse(FirstResult result) { + onShardResult(result, shardIt); } @Override @@ -164,9 +164,10 @@ public void onFailure(Exception t) { } } - private void onShardResult(int shardIndex, String nodeId, FirstResult result, ShardIterator shardIt) { - result.shardTarget(new SearchShardTarget(nodeId, shardIt.shardId())); - onShardSuccess(shardIndex, result); + private void onShardResult(FirstResult result, ShardIterator shardIt) { + assert result.getShardIndex() != -1 : "shard index is not set"; + assert result.getSearchShardTarget() != null : "search shard target must not be null"; + onShardSuccess(result); // we need to increment successful ops first before we compare the exit condition otherwise if we // are fast we could concurrently update totalOps but then preempt one of the threads which can // cause the successor to read a wrong value from successfulOps if second phase is very fast ie. count etc. @@ -185,7 +186,7 @@ private void onShardResult(int shardIndex, String nodeId, FirstResult result, Sh /** * Executed once all shard results have been received and processed * @see #onShardFailure(int, SearchShardTarget, Exception) - * @see #onShardSuccess(int, SearchPhaseResult) + * @see #onShardSuccess(SearchPhaseResult) */ abstract void onPhaseDone(); // as a tribute to @kimchy aka. finishHim() @@ -201,12 +202,10 @@ private void onShardResult(int shardIndex, String nodeId, FirstResult result, Sh /** * Executed once for every successful shard level request. - * @param shardIndex the internal index for this shard. Each shard has an index / ordinal assigned that is used to reference - * it's results * @param result the result returned form the shard * */ - abstract void onShardSuccess(int shardIndex, FirstResult result); + abstract void onShardSuccess(FirstResult result); /** * Sends the request to the actual shard. @@ -214,7 +213,7 @@ private void onShardResult(int shardIndex, String nodeId, FirstResult result, Sh * @param shard the shard routing to send the request for * @param listener the listener to notify on response */ - protected abstract void executePhaseOnShard(ShardIterator shardIt, ShardRouting shard, ActionListener listener); + protected abstract void executePhaseOnShard(ShardIterator shardIt, ShardRouting shard, SearchActionListener listener); /** * This class acts as a basic result collection that can be extended to do on-the-fly reduction or result processing @@ -237,17 +236,16 @@ final int getNumShards() { * A stream of all non-null (successful) shard results */ final Stream getSuccessfulResults() { - return results.asList().stream().map(e -> e.value); + return results.asList().stream(); } /** * Consumes a single shard result - * @param shardIndex the shards index, this is a 0-based id that is used to establish a 1 to 1 mapping to the searched shards * @param result the shards result */ - void consumeResult(int shardIndex, Result result) { - assert results.get(shardIndex) == null : "shardIndex: " + shardIndex + " is already set"; - results.set(shardIndex, result); + void consumeResult(Result result) { + assert results.get(result.getShardIndex()) == null : "shardIndex: " + result.getShardIndex() + " is already set"; + results.set(result.getShardIndex(), result); } /** diff --git a/core/src/main/java/org/elasticsearch/action/search/RemoteClusterConnection.java b/core/src/main/java/org/elasticsearch/action/search/RemoteClusterConnection.java index 73bc4f2ee7ed3..aea1aab7d3e36 100644 --- a/core/src/main/java/org/elasticsearch/action/search/RemoteClusterConnection.java +++ b/core/src/main/java/org/elasticsearch/action/search/RemoteClusterConnection.java @@ -35,6 +35,7 @@ import org.elasticsearch.common.settings.Settings; import org.elasticsearch.common.util.CancellableThreads; import org.elasticsearch.common.util.concurrent.AbstractRunnable; +import org.elasticsearch.common.util.concurrent.ThreadContext; import org.elasticsearch.threadpool.ThreadPool; import org.elasticsearch.transport.ConnectTransportException; import org.elasticsearch.transport.ConnectionProfile; @@ -59,7 +60,6 @@ import java.util.concurrent.ArrayBlockingQueue; import java.util.concurrent.BlockingQueue; import java.util.concurrent.ConcurrentHashMap; -import java.util.concurrent.CopyOnWriteArrayList; import java.util.concurrent.ExecutorService; import java.util.concurrent.RejectedExecutionException; import java.util.concurrent.Semaphore; @@ -373,10 +373,19 @@ void collectRemoteNodes(Iterator seedNodes, // here we pass on the connection since we can only close it once the sendRequest returns otherwise // due to the async nature (it will return before it's actually sent) this can cause the request to fail // due to an already closed connection. - transportService.sendRequest(connection, - ClusterStateAction.NAME, request, TransportRequestOptions.EMPTY, + ThreadPool threadPool = transportService.getThreadPool(); + ThreadContext threadContext = threadPool.getThreadContext(); + TransportService.ContextRestoreResponseHandler responseHandler = new TransportService + .ContextRestoreResponseHandler<>(threadContext.newRestorableContext(false), new SniffClusterStateResponseHandler(transportService, connection, listener, seedNodes, cancellableThreads)); + try (ThreadContext.StoredContext ignore = threadContext.stashContext()) { + // we stash any context here since this is an internal execution and should not leak any + // existing context information. + threadContext.markAsSystemContext(); + transportService.sendRequest(connection, ClusterStateAction.NAME, request, TransportRequestOptions.EMPTY, + responseHandler); + } success = true; } finally { if (success == false) { @@ -445,6 +454,7 @@ public ClusterStateResponse newInstance() { @Override public void handleResponse(ClusterStateResponse response) { + assert transportService.getThreadPool().getThreadContext().isSystemContext() == false : "context is a system context"; try { try (Closeable theConnection = connection) { // the connection is unused - see comment in #collectRemoteNodes // we have to close this connection before we notify listeners - this is mainly needed for test correctness @@ -483,6 +493,7 @@ public void handleResponse(ClusterStateResponse response) { @Override public void handleException(TransportException exp) { + assert transportService.getThreadPool().getThreadContext().isSystemContext() == false : "context is a system context"; logger.warn((Supplier) () -> new ParameterizedMessage("fetching nodes from external cluster {} failed", clusterAlias), exp); @@ -505,4 +516,9 @@ boolean assertNoRunningConnections() { // for testing only assert connectHandler.running.availablePermits() == 1; return true; } + + boolean isNodeConnected(final DiscoveryNode node) { + return connectedNodes.contains(node); + } + } diff --git a/core/src/main/java/org/elasticsearch/action/search/RemoteClusterService.java b/core/src/main/java/org/elasticsearch/action/search/RemoteClusterService.java index d9e6862c6976d..089ce57a1146b 100644 --- a/core/src/main/java/org/elasticsearch/action/search/RemoteClusterService.java +++ b/core/src/main/java/org/elasticsearch/action/search/RemoteClusterService.java @@ -26,8 +26,10 @@ import org.elasticsearch.action.admin.cluster.shards.ClusterSearchShardsResponse; import org.elasticsearch.action.support.PlainActionFuture; import org.elasticsearch.cluster.node.DiscoveryNode; +import org.elasticsearch.cluster.node.DiscoveryNodes; import org.elasticsearch.cluster.routing.PlainShardIterator; import org.elasticsearch.cluster.routing.ShardIterator; +import org.elasticsearch.common.Booleans; import org.elasticsearch.common.Strings; import org.elasticsearch.common.component.AbstractComponent; import org.elasticsearch.common.settings.Setting; @@ -136,7 +138,7 @@ private synchronized void updateRemoteClusters(Map> // nodes can be tagged with node.attr.remote_gateway: true to allow a node to be a gateway node for // cross cluster search String attribute = REMOTE_NODE_ATTRIBUTE.get(settings); - nodePredicate = nodePredicate.and((node) -> Boolean.getBoolean(node.getAttributes().getOrDefault(attribute, "false"))); + nodePredicate = nodePredicate.and((node) -> Booleans.parseBoolean(node.getAttributes().getOrDefault(attribute, "false"))); } remoteClusters.putAll(this.remoteClusters); for (Map.Entry> entry : seeds.entrySet()) { @@ -185,6 +187,10 @@ boolean isCrossClusterSearchEnabled() { return remoteClusters.isEmpty() == false; } + boolean isRemoteNodeConnected(final String remoteCluster, final DiscoveryNode node) { + return remoteClusters.get(remoteCluster).isNodeConnected(node); + } + /** * Groups indices per cluster by splitting remote cluster-alias, index-name pairs on {@link #REMOTE_CLUSTER_INDEX_SEPARATOR}. All * indices per cluster are collected as a list in the returned map keyed by the cluster alias. Local indices are grouped under @@ -326,13 +332,20 @@ private Transport.Connection getConnection(DiscoveryNode node, String cluster) { } void updateRemoteCluster(String clusterAlias, List addresses) { - updateRemoteClusters(Collections.singletonMap(clusterAlias, addresses.stream().map(address -> { - TransportAddress transportAddress = new TransportAddress(address); - return new DiscoveryNode(clusterAlias + "#" + transportAddress.toString(), - transportAddress, - Version.CURRENT.minimumCompatibilityVersion()); - }).collect(Collectors.toList())), - ActionListener.wrap((x) -> {}, (x) -> {}) ); + updateRemoteCluster(clusterAlias, addresses, ActionListener.wrap((x) -> {}, (x) -> {})); + } + + void updateRemoteCluster( + final String clusterAlias, + final List addresses, + final ActionListener connectionListener) { + final List nodes = addresses.stream().map(address -> { + final TransportAddress transportAddress = new TransportAddress(address); + final String id = clusterAlias + "#" + transportAddress.toString(); + final Version version = Version.CURRENT.minimumCompatibilityVersion(); + return new DiscoveryNode(id, transportAddress, version); + }).collect(Collectors.toList()); + updateRemoteClusters(Collections.singletonMap(clusterAlias, nodes), connectionListener); } static Map> buildRemoteClustersSeeds(Settings settings) { diff --git a/core/src/main/java/org/elasticsearch/action/search/SearchActionListener.java b/core/src/main/java/org/elasticsearch/action/search/SearchActionListener.java new file mode 100644 index 0000000000000..709d1e5e23711 --- /dev/null +++ b/core/src/main/java/org/elasticsearch/action/search/SearchActionListener.java @@ -0,0 +1,53 @@ +/* + * Licensed to Elasticsearch under one or more contributor + * license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright + * ownership. Elasticsearch licenses this file to you under + * the Apache License, Version 2.0 (the "License"); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ +package org.elasticsearch.action.search; + +import org.elasticsearch.action.ActionListener; +import org.elasticsearch.search.SearchPhaseResult; +import org.elasticsearch.search.SearchShardTarget; + +/** + * An base action listener that ensures shard target and shard index is set on all responses + * received by this listener. + */ +abstract class SearchActionListener implements ActionListener { + private final int requestIndex; + private final SearchShardTarget searchShardTarget; + + protected SearchActionListener(SearchShardTarget searchShardTarget, + int shardIndex) { + assert shardIndex >= 0 : "shard index must be positive"; + this.searchShardTarget = searchShardTarget; + this.requestIndex = shardIndex; + } + + @Override + public final void onResponse(T response) { + response.setShardIndex(requestIndex); + setSearchShardTarget(response); + innerOnResponse(response); + } + + protected void setSearchShardTarget(T response) { // some impls need to override this + response.setSearchShardTarget(searchShardTarget); + } + + protected abstract void innerOnResponse(T response); + +} diff --git a/core/src/main/java/org/elasticsearch/action/search/SearchDfsQueryThenFetchAsyncAction.java b/core/src/main/java/org/elasticsearch/action/search/SearchDfsQueryThenFetchAsyncAction.java index d846c42dbea5d..7151c8712ed7e 100644 --- a/core/src/main/java/org/elasticsearch/action/search/SearchDfsQueryThenFetchAsyncAction.java +++ b/core/src/main/java/org/elasticsearch/action/search/SearchDfsQueryThenFetchAsyncAction.java @@ -33,28 +33,59 @@ import java.util.function.Function; final class SearchDfsQueryThenFetchAsyncAction extends AbstractSearchAsyncAction { + private final SearchPhaseController searchPhaseController; - SearchDfsQueryThenFetchAsyncAction(Logger logger, SearchTransportService searchTransportService, - Function nodeIdToConnection, - Map aliasFilter, Map concreteIndexBoosts, - SearchPhaseController searchPhaseController, Executor executor, SearchRequest request, - ActionListener listener, GroupShardsIterator shardsIts, long startTime, - long clusterStateVersion, SearchTask task) { - super("dfs", logger, searchTransportService, nodeIdToConnection, aliasFilter, concreteIndexBoosts, executor, - request, listener, shardsIts, startTime, clusterStateVersion, task, new SearchPhaseResults<>(shardsIts.size())); + SearchDfsQueryThenFetchAsyncAction( + final Logger logger, + final SearchTransportService searchTransportService, + final Function nodeIdToConnection, + final Map aliasFilter, + final Map concreteIndexBoosts, + final SearchPhaseController searchPhaseController, + final Executor executor, + final SearchRequest request, + final ActionListener listener, + final GroupShardsIterator shardsIts, + final TransportSearchAction.SearchTimeProvider timeProvider, + final long clusterStateVersion, + final SearchTask task) { + super( + "dfs", + logger, + searchTransportService, + nodeIdToConnection, + aliasFilter, + concreteIndexBoosts, + executor, + request, + listener, + shardsIts, + timeProvider, + clusterStateVersion, + task, + new SearchPhaseResults<>(shardsIts.size())); this.searchPhaseController = searchPhaseController; } @Override - protected void executePhaseOnShard(ShardIterator shardIt, ShardRouting shard, ActionListener listener) { + protected void executePhaseOnShard( + final ShardIterator shardIt, + final ShardRouting shard, + final SearchActionListener listener) { getSearchTransport().sendExecuteDfs(getConnection(shard.currentNodeId()), buildShardSearchRequest(shardIt, shard) , getTask(), listener); } @Override - protected SearchPhase getNextPhase(SearchPhaseResults results, SearchPhaseContext context) { - return new DfsQueryPhase(results.results, searchPhaseController, - (queryResults) -> new FetchSearchPhase(queryResults, searchPhaseController, context), context); + protected SearchPhase getNextPhase( + final SearchPhaseResults results, final SearchPhaseContext context) { + return new DfsQueryPhase( + results.results, + searchPhaseController, + (queryResults) -> + new FetchSearchPhase(queryResults, searchPhaseController, context), + context); } + } diff --git a/core/src/main/java/org/elasticsearch/action/search/SearchPhaseContext.java b/core/src/main/java/org/elasticsearch/action/search/SearchPhaseContext.java index 1a21eb3cc3468..26c5403f4abba 100644 --- a/core/src/main/java/org/elasticsearch/action/search/SearchPhaseContext.java +++ b/core/src/main/java/org/elasticsearch/action/search/SearchPhaseContext.java @@ -93,8 +93,8 @@ interface SearchPhaseContext extends ActionListener, Executor { /** * Releases a search context with the given context ID on the node the given connection is connected to. - * @see org.elasticsearch.search.query.QuerySearchResult#id() - * @see org.elasticsearch.search.fetch.FetchSearchResult#id() + * @see org.elasticsearch.search.query.QuerySearchResult#getRequestId() + * @see org.elasticsearch.search.fetch.FetchSearchResult#getRequestId() * */ default void sendReleaseSearchContext(long contextId, Transport.Connection connection) { diff --git a/core/src/main/java/org/elasticsearch/action/search/SearchPhaseController.java b/core/src/main/java/org/elasticsearch/action/search/SearchPhaseController.java index cec44d9e9e58f..810530b5507e9 100644 --- a/core/src/main/java/org/elasticsearch/action/search/SearchPhaseController.java +++ b/core/src/main/java/org/elasticsearch/action/search/SearchPhaseController.java @@ -36,10 +36,10 @@ import org.elasticsearch.common.lucene.Lucene; import org.elasticsearch.common.settings.Settings; import org.elasticsearch.common.util.BigArrays; -import org.elasticsearch.common.util.concurrent.AtomicArray; import org.elasticsearch.script.ScriptService; import org.elasticsearch.search.SearchHit; import org.elasticsearch.search.SearchHits; +import org.elasticsearch.search.SearchPhaseResult; import org.elasticsearch.search.aggregations.InternalAggregation; import org.elasticsearch.search.aggregations.InternalAggregation.ReduceContext; import org.elasticsearch.search.aggregations.InternalAggregations; @@ -52,7 +52,6 @@ import org.elasticsearch.search.profile.ProfileShardResult; import org.elasticsearch.search.profile.SearchProfileShardResults; import org.elasticsearch.search.query.QuerySearchResult; -import org.elasticsearch.search.query.QuerySearchResultProvider; import org.elasticsearch.search.suggest.Suggest; import org.elasticsearch.search.suggest.Suggest.Suggestion; import org.elasticsearch.search.suggest.Suggest.Suggestion.Entry; @@ -61,14 +60,16 @@ import java.io.IOException; import java.util.ArrayList; import java.util.Arrays; +import java.util.Collection; import java.util.Collections; import java.util.HashMap; import java.util.List; import java.util.Map; +import java.util.function.IntFunction; import java.util.stream.Collectors; import java.util.stream.StreamSupport; -public class SearchPhaseController extends AbstractComponent { +public final class SearchPhaseController extends AbstractComponent { private static final ScoreDoc[] EMPTY_DOCS = new ScoreDoc[0]; @@ -81,13 +82,13 @@ public SearchPhaseController(Settings settings, BigArrays bigArrays, ScriptServi this.scriptService = scriptService; } - public AggregatedDfs aggregateDfs(AtomicArray results) { + public AggregatedDfs aggregateDfs(Collection results) { ObjectObjectHashMap termStatistics = HppcMaps.newNoNullKeysMap(); ObjectObjectHashMap fieldStatistics = HppcMaps.newNoNullKeysMap(); long aggMaxDoc = 0; - for (AtomicArray.Entry lEntry : results.asList()) { - final Term[] terms = lEntry.value.terms(); - final TermStatistics[] stats = lEntry.value.termStatistics(); + for (DfsSearchResult lEntry : results) { + final Term[] terms = lEntry.terms(); + final TermStatistics[] stats = lEntry.termStatistics(); assert terms.length == stats.length; for (int i = 0; i < terms.length; i++) { assert terms[i] != null; @@ -105,9 +106,9 @@ public AggregatedDfs aggregateDfs(AtomicArray results) { } - assert !lEntry.value.fieldStatistics().containsKey(null); - final Object[] keys = lEntry.value.fieldStatistics().keys; - final Object[] values = lEntry.value.fieldStatistics().values; + assert !lEntry.fieldStatistics().containsKey(null); + final Object[] keys = lEntry.fieldStatistics().keys; + final Object[] values = lEntry.fieldStatistics().values; for (int i = 0; i < keys.length; i++) { if (keys[i] != null) { String key = (String) keys[i]; @@ -127,7 +128,7 @@ public AggregatedDfs aggregateDfs(AtomicArray results) { } } } - aggMaxDoc += lEntry.value.maxDoc(); + aggMaxDoc += lEntry.maxDoc(); } return new AggregatedDfs(termStatistics, fieldStatistics, aggMaxDoc); } @@ -146,10 +147,9 @@ private static long optionalSum(long left, long right) { * * @param ignoreFrom Whether to ignore the from and sort all hits in each shard result. * Enabled only for scroll search, because that only retrieves hits of length 'size' in the query phase. - * @param resultsArr Shard result holder + * @param results Shard result holder */ - public ScoreDoc[] sortDocs(boolean ignoreFrom, AtomicArray resultsArr) throws IOException { - List> results = resultsArr.asList(); + public ScoreDoc[] sortDocs(boolean ignoreFrom, Collection results, int numShards) throws IOException { if (results.isEmpty()) { return EMPTY_DOCS; } @@ -159,25 +159,25 @@ public ScoreDoc[] sortDocs(boolean ignoreFrom, AtomicArray entry : results) { - if (entry.value.queryResult().hasHits()) { + for (SearchPhaseResult entry : results) { + if (entry.queryResult().hasHits()) { if (hasResult) { // we already have one, can't really optimize canOptimize = false; break; } canOptimize = true; hasResult = true; - resultToOptimize = entry.value.queryResult(); - shardIndex = entry.index; + resultToOptimize = entry.queryResult(); + shardIndex = resultToOptimize.getShardIndex(); } } - result = canOptimize ? resultToOptimize : results.get(0).value.queryResult(); + result = canOptimize ? resultToOptimize : results.stream().findFirst().get().queryResult(); assert result != null; } if (canOptimize) { @@ -228,22 +228,21 @@ public ScoreDoc[] sortDocs(boolean ignoreFrom, AtomicArray>> groupedCompletionSuggestions = new HashMap<>(); // group suggestions and assign shard index - for (AtomicArray.Entry sortedResult : results) { - Suggest shardSuggest = sortedResult.value.queryResult().suggest(); + for (SearchPhaseResult sortedResult : results) { + Suggest shardSuggest = sortedResult.queryResult().suggest(); if (shardSuggest != null) { for (CompletionSuggestion suggestion : shardSuggest.filter(CompletionSuggestion.class)) { - suggestion.setShardIndex(sortedResult.index); + suggestion.setShardIndex(sortedResult.getShardIndex()); List> suggestions = groupedCompletionSuggestions.computeIfAbsent(suggestion.getName(), s -> new ArrayList<>()); suggestions.add(suggestion); @@ -286,17 +285,16 @@ public ScoreDoc[] sortDocs(boolean ignoreFrom, AtomicArray void fillTopDocs(T[] shardTopDocs, - List> results, - T empytTopDocs) { + Collection results, T empytTopDocs) { if (results.size() != shardTopDocs.length) { // TopDocs#merge can't deal with null shard TopDocs Arrays.fill(shardTopDocs, empytTopDocs); } - for (AtomicArray.Entry resultProvider : results) { - final T topDocs = (T) resultProvider.value.queryResult().topDocs(); + for (SearchPhaseResult resultProvider : results) { + final T topDocs = (T) resultProvider.queryResult().topDocs(); assert topDocs != null : "top docs must not be null in a valid result"; // the 'index' field is the position in the resultsArr atomic array - shardTopDocs[resultProvider.index] = topDocs; + shardTopDocs[resultProvider.getShardIndex()] = topDocs; } } public ScoreDoc[] getLastEmittedDocPerShard(ReducedQueryPhase reducedQueryPhase, @@ -340,12 +338,11 @@ public IntArrayList[] fillDocIdsToLoad(int numShards, ScoreDoc[] shardDocs) { */ public InternalSearchResponse merge(boolean ignoreFrom, ScoreDoc[] sortedDocs, ReducedQueryPhase reducedQueryPhase, - AtomicArray fetchResultsArr) { + Collection fetchResults, IntFunction resultsLookup) { if (reducedQueryPhase.isEmpty()) { return InternalSearchResponse.empty(); } - List> fetchResults = fetchResultsArr.asList(); - SearchHits hits = getHits(reducedQueryPhase, ignoreFrom, sortedDocs, fetchResultsArr); + SearchHits hits = getHits(reducedQueryPhase, ignoreFrom, sortedDocs, fetchResults, resultsLookup); if (reducedQueryPhase.suggest != null) { if (!fetchResults.isEmpty()) { int currentOffset = hits.getHits().length; @@ -353,7 +350,7 @@ public InternalSearchResponse merge(boolean ignoreFrom, ScoreDoc[] sortedDocs, final List suggestionOptions = suggestion.getOptions(); for (int scoreDocIndex = currentOffset; scoreDocIndex < currentOffset + suggestionOptions.size(); scoreDocIndex++) { ScoreDoc shardDoc = sortedDocs[scoreDocIndex]; - QuerySearchResultProvider searchResultProvider = fetchResultsArr.get(shardDoc.shardIndex); + SearchPhaseResult searchResultProvider = resultsLookup.apply(shardDoc.shardIndex); if (searchResultProvider == null) { continue; } @@ -364,7 +361,7 @@ public InternalSearchResponse merge(boolean ignoreFrom, ScoreDoc[] sortedDocs, CompletionSuggestion.Entry.Option suggestOption = suggestionOptions.get(scoreDocIndex - currentOffset); hit.score(shardDoc.score); - hit.shard(fetchResult.shardTarget()); + hit.shard(fetchResult.getSearchShardTarget()); suggestOption.setHit(hit); } } @@ -377,8 +374,7 @@ public InternalSearchResponse merge(boolean ignoreFrom, ScoreDoc[] sortedDocs, } private SearchHits getHits(ReducedQueryPhase reducedQueryPhase, boolean ignoreFrom, ScoreDoc[] sortedDocs, - AtomicArray fetchResultsArr) { - List> fetchResults = fetchResultsArr.asList(); + Collection fetchResults, IntFunction resultsLookup) { boolean sorted = false; int sortScoreIndex = -1; if (reducedQueryPhase.oneResult.topDocs() instanceof TopFieldDocs) { @@ -396,8 +392,8 @@ private SearchHits getHits(ReducedQueryPhase reducedQueryPhase, boolean ignoreFr } } // clean the fetch counter - for (AtomicArray.Entry entry : fetchResults) { - entry.value.fetchResult().initCounter(); + for (SearchPhaseResult entry : fetchResults) { + entry.fetchResult().initCounter(); } int from = ignoreFrom ? 0 : reducedQueryPhase.oneResult.queryResult().from(); int numSearchHits = (int) Math.min(reducedQueryPhase.fetchHits - from, reducedQueryPhase.oneResult.size()); @@ -408,7 +404,7 @@ private SearchHits getHits(ReducedQueryPhase reducedQueryPhase, boolean ignoreFr if (!fetchResults.isEmpty()) { for (int i = 0; i < numSearchHits; i++) { ScoreDoc shardDoc = sortedDocs[i]; - QuerySearchResultProvider fetchResultProvider = fetchResultsArr.get(shardDoc.shardIndex); + SearchPhaseResult fetchResultProvider = resultsLookup.apply(shardDoc.shardIndex); if (fetchResultProvider == null) { continue; } @@ -417,7 +413,7 @@ private SearchHits getHits(ReducedQueryPhase reducedQueryPhase, boolean ignoreFr if (index < fetchResult.hits().internalHits().length) { SearchHit searchHit = fetchResult.hits().internalHits()[index]; searchHit.score(shardDoc.score); - searchHit.shard(fetchResult.shardTarget()); + searchHit.shard(fetchResult.getSearchShardTarget()); if (sorted) { FieldDoc fieldDoc = (FieldDoc) shardDoc; searchHit.sortValues(fieldDoc.fields, reducedQueryPhase.oneResult.sortValueFormats()); @@ -437,7 +433,7 @@ private SearchHits getHits(ReducedQueryPhase reducedQueryPhase, boolean ignoreFr * Reduces the given query results and consumes all aggregations and profile results. * @param queryResults a list of non-null query shard results */ - public final ReducedQueryPhase reducedQueryPhase(List> queryResults) { + public ReducedQueryPhase reducedQueryPhase(List queryResults) { return reducedQueryPhase(queryResults, null, 0); } @@ -450,7 +446,7 @@ public final ReducedQueryPhase reducedQueryPhase(List> queryResults, + private ReducedQueryPhase reducedQueryPhase(Collection queryResults, List bufferdAggs, int numReducePhases) { assert numReducePhases >= 0 : "num reduce phases must be >= 0 but was: " + numReducePhases; numReducePhases++; // increment for this phase @@ -463,7 +459,7 @@ private ReducedQueryPhase reducedQueryPhase(List> groupedSuggestions = hasSuggest ? new HashMap<>() : Collections.emptyMap(); final Map profileResults = hasProfileResults ? new HashMap<>(queryResults.size()) : Collections.emptyMap(); - for (AtomicArray.Entry entry : queryResults) { - QuerySearchResult result = entry.value.queryResult(); + for (SearchPhaseResult entry : queryResults) { + QuerySearchResult result = entry.queryResult(); if (result.searchTimedOut()) { timedOut = true; } @@ -515,7 +511,7 @@ private ReducedQueryPhase reducedQueryPhase(List { + extends InitialSearchPhase.SearchPhaseResults { private final InternalAggregations[] buffer; private int index; private final SearchPhaseController controller; @@ -649,8 +645,8 @@ private QueryPhaseResultConsumer(SearchPhaseController controller, int expectedR } @Override - public void consumeResult(int shardIndex, QuerySearchResultProvider result) { - super.consumeResult(shardIndex, result); + public void consumeResult(SearchPhaseResult result) { + super.consumeResult(result); QuerySearchResult queryResult = result.queryResult(); assert queryResult.hasAggs() : "this collector should only be used if aggs are requested"; consumeInternal(queryResult); @@ -691,7 +687,7 @@ int getNumBuffered() { /** * Returns a new SearchPhaseResults instance. This might return an instance that reduces search responses incrementally. */ - InitialSearchPhase.SearchPhaseResults newSearchPhaseResults(SearchRequest request, int numShards) { + InitialSearchPhase.SearchPhaseResults newSearchPhaseResults(SearchRequest request, int numShards) { SearchSourceBuilder source = request.source(); if (source != null && source.aggregations() != null) { if (request.getBatchedReduceSize() < numShards) { diff --git a/core/src/main/java/org/elasticsearch/action/search/SearchQueryThenFetchAsyncAction.java b/core/src/main/java/org/elasticsearch/action/search/SearchQueryThenFetchAsyncAction.java index 210a9aefda755..fd1d19770295a 100644 --- a/core/src/main/java/org/elasticsearch/action/search/SearchQueryThenFetchAsyncAction.java +++ b/core/src/main/java/org/elasticsearch/action/search/SearchQueryThenFetchAsyncAction.java @@ -24,38 +24,66 @@ import org.elasticsearch.cluster.routing.GroupShardsIterator; import org.elasticsearch.cluster.routing.ShardIterator; import org.elasticsearch.cluster.routing.ShardRouting; +import org.elasticsearch.search.SearchPhaseResult; import org.elasticsearch.search.internal.AliasFilter; -import org.elasticsearch.search.query.QuerySearchResultProvider; import org.elasticsearch.transport.Transport; import java.util.Map; import java.util.concurrent.Executor; import java.util.function.Function; -final class SearchQueryThenFetchAsyncAction extends AbstractSearchAsyncAction { +final class SearchQueryThenFetchAsyncAction + extends AbstractSearchAsyncAction { + private final SearchPhaseController searchPhaseController; - SearchQueryThenFetchAsyncAction(Logger logger, SearchTransportService searchTransportService, - Function nodeIdToConnection, - Map aliasFilter, Map concreteIndexBoosts, - SearchPhaseController searchPhaseController, Executor executor, - SearchRequest request, ActionListener listener, - GroupShardsIterator shardsIts, long startTime, long clusterStateVersion, - SearchTask task) { - super("query", logger, searchTransportService, nodeIdToConnection, aliasFilter, concreteIndexBoosts, executor, - request, listener, shardsIts, startTime, clusterStateVersion, task, - searchPhaseController.newSearchPhaseResults(request, shardsIts.size())); + SearchQueryThenFetchAsyncAction( + final Logger logger, + final SearchTransportService searchTransportService, + final Function nodeIdToConnection, + final Map aliasFilter, + final Map concreteIndexBoosts, + final SearchPhaseController searchPhaseController, + final Executor executor, + final SearchRequest request, + final ActionListener listener, + final GroupShardsIterator shardsIts, + final TransportSearchAction.SearchTimeProvider timeProvider, + long clusterStateVersion, + SearchTask task) { + super( + "query", + logger, + searchTransportService, + nodeIdToConnection, + aliasFilter, + concreteIndexBoosts, + executor, + request, + listener, + shardsIts, + timeProvider, + clusterStateVersion, + task, + searchPhaseController.newSearchPhaseResults(request, shardsIts.size())); this.searchPhaseController = searchPhaseController; } - - protected void executePhaseOnShard(ShardIterator shardIt, ShardRouting shard, ActionListener listener) { - getSearchTransport().sendExecuteQuery(getConnection(shard.currentNodeId()), - buildShardSearchRequest(shardIt, shard), getTask(), listener); + protected void executePhaseOnShard( + final ShardIterator shardIt, + final ShardRouting shard, + final SearchActionListener listener) { + getSearchTransport().sendExecuteQuery( + getConnection(shard.currentNodeId()), + buildShardSearchRequest(shardIt, shard), + getTask(), + listener); } @Override - protected SearchPhase getNextPhase(SearchPhaseResults results, SearchPhaseContext context) { + protected SearchPhase getNextPhase( + final SearchPhaseResults results, + final SearchPhaseContext context) { return new FetchSearchPhase(results, searchPhaseController, context); } } diff --git a/core/src/main/java/org/elasticsearch/action/search/SearchScrollQueryAndFetchAsyncAction.java b/core/src/main/java/org/elasticsearch/action/search/SearchScrollQueryAndFetchAsyncAction.java index b005c0fc2fe42..cda974a33de96 100644 --- a/core/src/main/java/org/elasticsearch/action/search/SearchScrollQueryAndFetchAsyncAction.java +++ b/core/src/main/java/org/elasticsearch/action/search/SearchScrollQueryAndFetchAsyncAction.java @@ -32,13 +32,14 @@ import org.elasticsearch.search.fetch.ScrollQueryFetchSearchResult; import org.elasticsearch.search.internal.InternalScrollSearchRequest; import org.elasticsearch.search.internal.InternalSearchResponse; +import org.elasticsearch.search.query.ScrollQuerySearchResult; import java.util.List; import java.util.concurrent.atomic.AtomicInteger; import static org.elasticsearch.action.search.TransportSearchHelper.internalScrollSearchRequest; -class SearchScrollQueryAndFetchAsyncAction extends AbstractAsyncAction { +final class SearchScrollQueryAndFetchAsyncAction extends AbstractAsyncAction { private final Logger logger; private final SearchPhaseController searchPhaseController; @@ -70,21 +71,17 @@ class SearchScrollQueryAndFetchAsyncAction extends AbstractAsyncAction { this.queryFetchResults = new AtomicArray<>(scrollId.getContext().length); } - protected final ShardSearchFailure[] buildShardFailures() { + private ShardSearchFailure[] buildShardFailures() { if (shardFailures == null) { return ShardSearchFailure.EMPTY_ARRAY; } - List> entries = shardFailures.asList(); - ShardSearchFailure[] failures = new ShardSearchFailure[entries.size()]; - for (int i = 0; i < failures.length; i++) { - failures[i] = entries.get(i).value; - } - return failures; + List failures = shardFailures.asList(); + return failures.toArray(new ShardSearchFailure[failures.size()]); } // we do our best to return the shard failures, but its ok if its not fully concurrently safe // we simply try and return as much as possible - protected final void addShardFailure(final int shardIndex, ShardSearchFailure failure) { + private void addShardFailure(final int shardIndex, ShardSearchFailure failure) { if (shardFailures == null) { shardFailures = new AtomicArray<>(scrollId.getContext().length); } @@ -130,15 +127,20 @@ public void start() { void executePhase(final int shardIndex, DiscoveryNode node, final long searchId) { InternalScrollSearchRequest internalRequest = internalScrollSearchRequest(searchId, request); - searchTransportService.sendExecuteFetch(node, internalRequest, task, new ActionListener() { + searchTransportService.sendExecuteScrollFetch(node, internalRequest, task, + new SearchActionListener(null, shardIndex) { + @Override + protected void setSearchShardTarget(ScrollQueryFetchSearchResult response) { + // don't do this - it's part of the response... + assert response.getSearchShardTarget() != null : "search shard target must not be null"; + } @Override - public void onResponse(ScrollQueryFetchSearchResult result) { - queryFetchResults.set(shardIndex, result.result()); + protected void innerOnResponse(ScrollQueryFetchSearchResult response) { + queryFetchResults.set(response.getShardIndex(), response.result()); if (counter.decrementAndGet() == 0) { finishHim(); } } - @Override public void onFailure(Exception t) { onPhaseFailure(t, searchId, shardIndex); @@ -170,9 +172,10 @@ private void finishHim() { } private void innerFinishHim() throws Exception { - ScoreDoc[] sortedShardDocs = searchPhaseController.sortDocs(true, queryFetchResults); + List queryFetchSearchResults = queryFetchResults.asList(); + ScoreDoc[] sortedShardDocs = searchPhaseController.sortDocs(true, queryFetchResults.asList(), queryFetchResults.length()); final InternalSearchResponse internalResponse = searchPhaseController.merge(true, sortedShardDocs, - searchPhaseController.reducedQueryPhase(queryFetchResults.asList()), queryFetchResults); + searchPhaseController.reducedQueryPhase(queryFetchSearchResults), queryFetchSearchResults, queryFetchResults::get); String scrollId = null; if (request.scroll() != null) { scrollId = request.scrollId(); diff --git a/core/src/main/java/org/elasticsearch/action/search/SearchScrollQueryThenFetchAsyncAction.java b/core/src/main/java/org/elasticsearch/action/search/SearchScrollQueryThenFetchAsyncAction.java index 13c81c1d5e6e5..aed234d4a89f8 100644 --- a/core/src/main/java/org/elasticsearch/action/search/SearchScrollQueryThenFetchAsyncAction.java +++ b/core/src/main/java/org/elasticsearch/action/search/SearchScrollQueryThenFetchAsyncAction.java @@ -29,6 +29,7 @@ import org.elasticsearch.cluster.node.DiscoveryNodes; import org.elasticsearch.cluster.service.ClusterService; import org.elasticsearch.common.util.concurrent.AtomicArray; +import org.elasticsearch.common.util.concurrent.CountDown; import org.elasticsearch.search.fetch.FetchSearchResult; import org.elasticsearch.search.fetch.ShardFetchRequest; import org.elasticsearch.search.internal.InternalScrollSearchRequest; @@ -41,7 +42,7 @@ import static org.elasticsearch.action.search.TransportSearchHelper.internalScrollSearchRequest; -class SearchScrollQueryThenFetchAsyncAction extends AbstractAsyncAction { +final class SearchScrollQueryThenFetchAsyncAction extends AbstractAsyncAction { private final Logger logger; private final SearchTask task; @@ -73,21 +74,17 @@ class SearchScrollQueryThenFetchAsyncAction extends AbstractAsyncAction { this.fetchResults = new AtomicArray<>(scrollId.getContext().length); } - protected final ShardSearchFailure[] buildShardFailures() { + private ShardSearchFailure[] buildShardFailures() { if (shardFailures == null) { return ShardSearchFailure.EMPTY_ARRAY; } - List> entries = shardFailures.asList(); - ShardSearchFailure[] failures = new ShardSearchFailure[entries.size()]; - for (int i = 0; i < failures.length; i++) { - failures[i] = entries.get(i).value; - } - return failures; + List failures = shardFailures.asList(); + return failures.toArray(new ShardSearchFailure[failures.size()]); } // we do our best to return the shard failures, but its ok if its not fully concurrently safe // we simply try and return as much as possible - protected final void addShardFailure(final int shardIndex, ShardSearchFailure failure) { + private void addShardFailure(final int shardIndex, ShardSearchFailure failure) { if (shardFailures == null) { shardFailures = new AtomicArray<>(scrollId.getContext().length); } @@ -99,8 +96,7 @@ public void start() { listener.onFailure(new SearchPhaseExecutionException("query", "no nodes to search on", ShardSearchFailure.EMPTY_ARRAY)); return; } - final AtomicInteger counter = new AtomicInteger(scrollId.getContext().length); - + final CountDown counter = new CountDown(scrollId.getContext().length); ScrollIdForNode[] context = scrollId.getContext(); for (int i = 0; i < context.length; i++) { ScrollIdForNode target = context[i]; @@ -112,7 +108,7 @@ public void start() { logger.debug("Node [{}] not available for scroll request [{}]", target.getNode(), scrollId.getSource()); } successfulOps.decrementAndGet(); - if (counter.decrementAndGet() == 0) { + if (counter.countDown()) { try { executeFetchPhase(); } catch (Exception e) { @@ -124,13 +120,21 @@ public void start() { } } - private void executeQueryPhase(final int shardIndex, final AtomicInteger counter, DiscoveryNode node, final long searchId) { + private void executeQueryPhase(final int shardIndex, final CountDown counter, DiscoveryNode node, final long searchId) { InternalScrollSearchRequest internalRequest = internalScrollSearchRequest(searchId, request); - searchTransportService.sendExecuteQuery(node, internalRequest, task, new ActionListener() { + searchTransportService.sendExecuteScrollQuery(node, internalRequest, task, + new SearchActionListener(null, shardIndex) { + @Override - public void onResponse(ScrollQuerySearchResult result) { - queryResults.set(shardIndex, result.queryResult()); - if (counter.decrementAndGet() == 0) { + protected void setSearchShardTarget(ScrollQuerySearchResult response) { + // don't do this - it's part of the response... + assert response.getSearchShardTarget() != null : "search shard target must not be null"; + } + + @Override + protected void innerOnResponse(ScrollQuerySearchResult result) { + queryResults.setOnce(result.getShardIndex(), result.queryResult()); + if (counter.countDown()) { try { executeFetchPhase(); } catch (Exception e) { @@ -146,13 +150,13 @@ public void onFailure(Exception t) { }); } - void onQueryPhaseFailure(final int shardIndex, final AtomicInteger counter, final long searchId, Exception failure) { + void onQueryPhaseFailure(final int shardIndex, final CountDown counter, final long searchId, Exception failure) { if (logger.isDebugEnabled()) { logger.debug((Supplier) () -> new ParameterizedMessage("[{}] Failed to execute query phase", searchId), failure); } addShardFailure(shardIndex, new ShardSearchFailure(failure)); successfulOps.decrementAndGet(); - if (counter.decrementAndGet() == 0) { + if (counter.countDown()) { if (successfulOps.get() == 0) { listener.onFailure(new SearchPhaseExecutionException("query", "all shards failed", failure, buildShardFailures())); } else { @@ -167,7 +171,7 @@ void onQueryPhaseFailure(final int shardIndex, final AtomicInteger counter, fina } private void executeFetchPhase() throws Exception { - sortedShardDocs = searchPhaseController.sortDocs(true, queryResults); + sortedShardDocs = searchPhaseController.sortDocs(true, queryResults.asList(), queryResults.length()); if (sortedShardDocs.length == 0) { finishHim(searchPhaseController.reducedQueryPhase(queryResults.asList())); return; @@ -177,21 +181,21 @@ private void executeFetchPhase() throws Exception { SearchPhaseController.ReducedQueryPhase reducedQueryPhase = searchPhaseController.reducedQueryPhase(queryResults.asList()); final ScoreDoc[] lastEmittedDocPerShard = searchPhaseController.getLastEmittedDocPerShard(reducedQueryPhase, sortedShardDocs, queryResults.length()); - final AtomicInteger counter = new AtomicInteger(docIdsToLoad.length); + final CountDown counter = new CountDown(docIdsToLoad.length); for (int i = 0; i < docIdsToLoad.length; i++) { final int index = i; final IntArrayList docIds = docIdsToLoad[index]; if (docIds != null) { final QuerySearchResult querySearchResult = queryResults.get(index); ScoreDoc lastEmittedDoc = lastEmittedDocPerShard[index]; - ShardFetchRequest shardFetchRequest = new ShardFetchRequest(querySearchResult.id(), docIds, lastEmittedDoc); - DiscoveryNode node = nodes.get(querySearchResult.shardTarget().getNodeId()); - searchTransportService.sendExecuteFetchScroll(node, shardFetchRequest, task, new ActionListener() { + ShardFetchRequest shardFetchRequest = new ShardFetchRequest(querySearchResult.getRequestId(), docIds, lastEmittedDoc); + DiscoveryNode node = nodes.get(querySearchResult.getSearchShardTarget().getNodeId()); + searchTransportService.sendExecuteFetchScroll(node, shardFetchRequest, task, + new SearchActionListener(querySearchResult.getSearchShardTarget(), index) { @Override - public void onResponse(FetchSearchResult result) { - result.shardTarget(querySearchResult.shardTarget()); - fetchResults.set(index, result); - if (counter.decrementAndGet() == 0) { + protected void innerOnResponse(FetchSearchResult response) { + fetchResults.setOnce(response.getShardIndex(), response); + if (counter.countDown()) { finishHim(reducedQueryPhase); } } @@ -202,14 +206,14 @@ public void onFailure(Exception t) { logger.debug("Failed to execute fetch phase", t); } successfulOps.decrementAndGet(); - if (counter.decrementAndGet() == 0) { + if (counter.countDown()) { finishHim(reducedQueryPhase); } } }); } else { // the counter is set to the total size of docIdsToLoad which can have null values so we have to count them down too - if (counter.decrementAndGet() == 0) { + if (counter.countDown()) { finishHim(reducedQueryPhase); } } @@ -218,7 +222,8 @@ public void onFailure(Exception t) { private void finishHim(SearchPhaseController.ReducedQueryPhase queryPhase) { try { - final InternalSearchResponse internalResponse = searchPhaseController.merge(true, sortedShardDocs, queryPhase, fetchResults); + final InternalSearchResponse internalResponse = searchPhaseController.merge(true, sortedShardDocs, queryPhase, + fetchResults.asList(), fetchResults::get); String scrollId = null; if (request.scroll() != null) { scrollId = request.scrollId(); diff --git a/core/src/main/java/org/elasticsearch/action/search/SearchTransportService.java b/core/src/main/java/org/elasticsearch/action/search/SearchTransportService.java index 4ebf7c79c2ae6..80583e24c9c20 100644 --- a/core/src/main/java/org/elasticsearch/action/search/SearchTransportService.java +++ b/core/src/main/java/org/elasticsearch/action/search/SearchTransportService.java @@ -31,6 +31,7 @@ import org.elasticsearch.common.io.stream.StreamOutput; import org.elasticsearch.common.settings.ClusterSettings; import org.elasticsearch.common.settings.Settings; +import org.elasticsearch.search.SearchPhaseResult; import org.elasticsearch.search.SearchService; import org.elasticsearch.search.dfs.DfsSearchResult; import org.elasticsearch.search.fetch.FetchSearchResult; @@ -42,7 +43,6 @@ import org.elasticsearch.search.internal.ShardSearchTransportRequest; import org.elasticsearch.search.query.QuerySearchRequest; import org.elasticsearch.search.query.QuerySearchResult; -import org.elasticsearch.search.query.QuerySearchResultProvider; import org.elasticsearch.search.query.ScrollQuerySearchResult; import org.elasticsearch.tasks.Task; import org.elasticsearch.threadpool.ThreadPool; @@ -118,17 +118,17 @@ public void sendClearAllScrollContexts(DiscoveryNode node, final ActionListener< } public void sendExecuteDfs(Transport.Connection connection, final ShardSearchTransportRequest request, SearchTask task, - final ActionListener listener) { + final SearchActionListener listener) { transportService.sendChildRequest(connection, DFS_ACTION_NAME, request, task, new ActionListenerResponseHandler<>(listener, DfsSearchResult::new)); } public void sendExecuteQuery(Transport.Connection connection, final ShardSearchTransportRequest request, SearchTask task, - final ActionListener listener) { + final SearchActionListener listener) { // we optimize this and expect a QueryFetchSearchResult if we only have a single shard in the search request // this used to be the QUERY_AND_FETCH which doesn't exists anymore. final boolean fetchDocuments = request.numberOfShards() == 1; - Supplier supplier = fetchDocuments ? QueryFetchSearchResult::new : QuerySearchResult::new; + Supplier supplier = fetchDocuments ? QueryFetchSearchResult::new : QuerySearchResult::new; if (connection.getVersion().onOrBefore(Version.V_5_3_0_UNRELEASED) && fetchDocuments) { // TODO this BWC layer can be removed once this is back-ported to 5.3 transportService.sendChildRequest(connection, QUERY_FETCH_ACTION_NAME, request, task, @@ -140,35 +140,35 @@ public void sendExecuteQuery(Transport.Connection connection, final ShardSearchT } public void sendExecuteQuery(Transport.Connection connection, final QuerySearchRequest request, SearchTask task, - final ActionListener listener) { + final SearchActionListener listener) { transportService.sendChildRequest(connection, QUERY_ID_ACTION_NAME, request, task, new ActionListenerResponseHandler<>(listener, QuerySearchResult::new)); } - public void sendExecuteQuery(DiscoveryNode node, final InternalScrollSearchRequest request, SearchTask task, - final ActionListener listener) { + public void sendExecuteScrollQuery(DiscoveryNode node, final InternalScrollSearchRequest request, SearchTask task, + final SearchActionListener listener) { transportService.sendChildRequest(transportService.getConnection(node), QUERY_SCROLL_ACTION_NAME, request, task, new ActionListenerResponseHandler<>(listener, ScrollQuerySearchResult::new)); } - public void sendExecuteFetch(DiscoveryNode node, final InternalScrollSearchRequest request, SearchTask task, - final ActionListener listener) { + public void sendExecuteScrollFetch(DiscoveryNode node, final InternalScrollSearchRequest request, SearchTask task, + final SearchActionListener listener) { transportService.sendChildRequest(transportService.getConnection(node), QUERY_FETCH_SCROLL_ACTION_NAME, request, task, new ActionListenerResponseHandler<>(listener, ScrollQueryFetchSearchResult::new)); } public void sendExecuteFetch(Transport.Connection connection, final ShardFetchSearchRequest request, SearchTask task, - final ActionListener listener) { + final SearchActionListener listener) { sendExecuteFetch(connection, FETCH_ID_ACTION_NAME, request, task, listener); } public void sendExecuteFetchScroll(DiscoveryNode node, final ShardFetchRequest request, SearchTask task, - final ActionListener listener) { + final SearchActionListener listener) { sendExecuteFetch(transportService.getConnection(node), FETCH_ID_SCROLL_ACTION_NAME, request, task, listener); } private void sendExecuteFetch(Transport.Connection connection, String action, final ShardFetchRequest request, SearchTask task, - final ActionListener listener) { + final SearchActionListener listener) { transportService.sendChildRequest(connection, action, request, task, new ActionListenerResponseHandler<>(listener, FetchSearchResult::new)); } @@ -327,7 +327,7 @@ public void messageReceived(ShardSearchTransportRequest request, TransportChanne new TaskAwareTransportRequestHandler() { @Override public void messageReceived(ShardSearchTransportRequest request, TransportChannel channel, Task task) throws Exception { - QuerySearchResultProvider result = searchService.executeQueryPhase(request, (SearchTask)task); + SearchPhaseResult result = searchService.executeQueryPhase(request, (SearchTask)task); channel.sendResponse(result); } }); @@ -361,7 +361,7 @@ public void messageReceived(InternalScrollSearchRequest request, TransportChanne @Override public void messageReceived(ShardSearchTransportRequest request, TransportChannel channel, Task task) throws Exception { assert request.numberOfShards() == 1 : "expected single shard request but got: " + request.numberOfShards(); - QuerySearchResultProvider result = searchService.executeQueryPhase(request, (SearchTask)task); + SearchPhaseResult result = searchService.executeQueryPhase(request, (SearchTask)task); channel.sendResponse(result); } }); diff --git a/core/src/main/java/org/elasticsearch/action/search/TransportSearchAction.java b/core/src/main/java/org/elasticsearch/action/search/TransportSearchAction.java index e86cfef6e1410..008d022a6556f 100644 --- a/core/src/main/java/org/elasticsearch/action/search/TransportSearchAction.java +++ b/core/src/main/java/org/elasticsearch/action/search/TransportSearchAction.java @@ -52,6 +52,7 @@ import java.util.Set; import java.util.concurrent.Executor; import java.util.function.Function; +import java.util.function.LongSupplier; import static org.elasticsearch.action.search.SearchType.QUERY_THEN_FETCH; @@ -116,10 +117,62 @@ private Map resolveIndexBoosts(SearchRequest searchRequest, Clust return Collections.unmodifiableMap(concreteIndexBoosts); } + /** + * Search operations need two clocks. One clock is to fulfill real clock needs (e.g., resolving + * "now" to an index name). Another clock is needed for measuring how long a search operation + * took. These two uses are at odds with each other. There are many issues with using a real + * clock for measuring how long an operation took (they often lack precision, they are subject + * to moving backwards due to NTP and other such complexities, etc.). There are also issues with + * using a relative clock for reporting real time. Thus, we simply separate these two uses. + */ + static class SearchTimeProvider { + + private final long absoluteStartMillis; + private final long relativeStartNanos; + private final LongSupplier relativeCurrentNanosProvider; + + /** + * Instantiates a new search time provider. The absolute start time is the real clock time + * used for resolving index expressions that include dates. The relative start time is the + * start of the search operation according to a relative clock. The total time the search + * operation took can be measured against the provided relative clock and the relative start + * time. + * + * @param absoluteStartMillis the absolute start time in milliseconds since the epoch + * @param relativeStartNanos the relative start time in nanoseconds + * @param relativeCurrentNanosProvider provides the current relative time + */ + SearchTimeProvider( + final long absoluteStartMillis, + final long relativeStartNanos, + final LongSupplier relativeCurrentNanosProvider) { + this.absoluteStartMillis = absoluteStartMillis; + this.relativeStartNanos = relativeStartNanos; + this.relativeCurrentNanosProvider = relativeCurrentNanosProvider; + } + + long getAbsoluteStartMillis() { + return absoluteStartMillis; + } + + long getRelativeStartNanos() { + return relativeStartNanos; + } + + long getRelativeCurrentNanos() { + return relativeCurrentNanosProvider.getAsLong(); + } + + } + + @Override protected void doExecute(Task task, SearchRequest searchRequest, ActionListener listener) { - // pure paranoia if time goes backwards we are at least positive - final long startTimeInMillis = Math.max(0, System.currentTimeMillis()); + final long absoluteStartMillis = System.currentTimeMillis(); + final long relativeStartNanos = System.nanoTime(); + final SearchTimeProvider timeProvider = + new SearchTimeProvider(absoluteStartMillis, relativeStartNanos, System::nanoTime); + final String[] localIndices; final Map> remoteClusterIndices; final ClusterState clusterState = clusterService.state(); @@ -134,7 +187,7 @@ protected void doExecute(Task task, SearchRequest searchRequest, ActionListener< } if (remoteClusterIndices.isEmpty()) { - executeSearch((SearchTask)task, startTimeInMillis, searchRequest, localIndices, Collections.emptyList(), + executeSearch((SearchTask)task, timeProvider, searchRequest, localIndices, Collections.emptyList(), (nodeId) -> null, clusterState, Collections.emptyMap(), listener); } else { remoteClusterService.collectSearchShards(searchRequest, remoteClusterIndices, @@ -143,13 +196,13 @@ protected void doExecute(Task task, SearchRequest searchRequest, ActionListener< Map remoteAliasFilters = new HashMap<>(); Function connectionFunction = remoteClusterService.processRemoteShards( searchShardsResponses, remoteShardIterators, remoteAliasFilters); - executeSearch((SearchTask)task, startTimeInMillis, searchRequest, localIndices, remoteShardIterators, + executeSearch((SearchTask)task, timeProvider, searchRequest, localIndices, remoteShardIterators, connectionFunction, clusterState, remoteAliasFilters, listener); }, listener::onFailure)); } } - private void executeSearch(SearchTask task, long startTimeInMillis, SearchRequest searchRequest, String[] localIndices, + private void executeSearch(SearchTask task, SearchTimeProvider timeProvider, SearchRequest searchRequest, String[] localIndices, List remoteShardIterators, Function remoteConnections, ClusterState clusterState, Map remoteAliasMap, ActionListener listener) { @@ -163,7 +216,7 @@ private void executeSearch(SearchTask task, long startTimeInMillis, SearchReques indices = Index.EMPTY_ARRAY; // don't search on _all if only remote indices were specified } else { indices = indexNameExpressionResolver.concreteIndices(clusterState, searchRequest.indicesOptions(), - startTimeInMillis, localIndices); + timeProvider.getAbsoluteStartMillis(), localIndices); } Map aliasFilter = buildPerIndexAliasFilter(searchRequest, clusterState, indices, remoteAliasMap); Map> routingMap = indexNameExpressionResolver.resolveSearchRouting(clusterState, searchRequest.routing(), @@ -211,7 +264,7 @@ private void executeSearch(SearchTask task, long startTimeInMillis, SearchReques return connection; }; - searchAsyncAction(task, searchRequest, shardIterators, startTimeInMillis, connectionLookup, clusterState.version(), + searchAsyncAction(task, searchRequest, shardIterators, timeProvider, connectionLookup, clusterState.version(), Collections.unmodifiableMap(aliasFilter), concreteIndexBoosts, listener).start(); } @@ -236,7 +289,7 @@ protected final void doExecute(SearchRequest searchRequest, ActionListener connectionLookup, + SearchTimeProvider timeProvider, Function connectionLookup, long clusterStateVersion, Map aliasFilter, Map concreteIndexBoosts, ActionListener listener) { @@ -245,12 +298,12 @@ private AbstractSearchAsyncAction searchAsyncAction(SearchTask task, SearchReque switch(searchRequest.searchType()) { case DFS_QUERY_THEN_FETCH: searchAsyncAction = new SearchDfsQueryThenFetchAsyncAction(logger, searchTransportService, connectionLookup, - aliasFilter, concreteIndexBoosts, searchPhaseController, executor, searchRequest, listener, shardIterators, startTime, + aliasFilter, concreteIndexBoosts, searchPhaseController, executor, searchRequest, listener, shardIterators, timeProvider, clusterStateVersion, task); break; case QUERY_THEN_FETCH: searchAsyncAction = new SearchQueryThenFetchAsyncAction(logger, searchTransportService, connectionLookup, - aliasFilter, concreteIndexBoosts, searchPhaseController, executor, searchRequest, listener, shardIterators, startTime, + aliasFilter, concreteIndexBoosts, searchPhaseController, executor, searchRequest, listener, shardIterators, timeProvider, clusterStateVersion, task); break; default: diff --git a/core/src/main/java/org/elasticsearch/action/search/TransportSearchHelper.java b/core/src/main/java/org/elasticsearch/action/search/TransportSearchHelper.java index 975c0be6f0f82..e494bb6768d65 100644 --- a/core/src/main/java/org/elasticsearch/action/search/TransportSearchHelper.java +++ b/core/src/main/java/org/elasticsearch/action/search/TransportSearchHelper.java @@ -38,10 +38,9 @@ static String buildScrollId(AtomicArray searchPhase try (RAMOutputStream out = new RAMOutputStream()) { out.writeString(searchPhaseResults.length() == 1 ? ParsedScrollId.QUERY_AND_FETCH_TYPE : ParsedScrollId.QUERY_THEN_FETCH_TYPE); out.writeVInt(searchPhaseResults.asList().size()); - for (AtomicArray.Entry entry : searchPhaseResults.asList()) { - SearchPhaseResult searchPhaseResult = entry.value; - out.writeLong(searchPhaseResult.id()); - out.writeString(searchPhaseResult.shardTarget().getNodeId()); + for (SearchPhaseResult searchPhaseResult : searchPhaseResults.asList()) { + out.writeLong(searchPhaseResult.getRequestId()); + out.writeString(searchPhaseResult.getSearchShardTarget().getNodeId()); } byte[] bytes = new byte[(int) out.getFilePointer()]; out.writeTo(bytes, 0); diff --git a/core/src/main/java/org/elasticsearch/action/support/replication/TransportWriteAction.java b/core/src/main/java/org/elasticsearch/action/support/replication/TransportWriteAction.java index 10f8741ecccb6..ae4ae78c03386 100644 --- a/core/src/main/java/org/elasticsearch/action/support/replication/TransportWriteAction.java +++ b/core/src/main/java/org/elasticsearch/action/support/replication/TransportWriteAction.java @@ -22,16 +22,12 @@ import org.apache.logging.log4j.Logger; import org.apache.logging.log4j.message.ParameterizedMessage; import org.elasticsearch.action.ActionListener; -import org.elasticsearch.action.bulk.BulkRequest; -import org.elasticsearch.action.bulk.BulkShardRequest; import org.elasticsearch.action.support.ActionFilters; import org.elasticsearch.action.support.WriteRequest; import org.elasticsearch.action.support.WriteResponse; -import org.elasticsearch.client.transport.NoNodeAvailableException; import org.elasticsearch.cluster.action.shard.ShardStateAction; import org.elasticsearch.cluster.block.ClusterBlockLevel; import org.elasticsearch.cluster.metadata.IndexNameExpressionResolver; -import org.elasticsearch.cluster.node.DiscoveryNode; import org.elasticsearch.cluster.routing.ShardRouting; import org.elasticsearch.cluster.service.ClusterService; import org.elasticsearch.common.Nullable; @@ -46,7 +42,6 @@ import org.elasticsearch.transport.TransportException; import org.elasticsearch.transport.TransportResponse; import org.elasticsearch.transport.TransportService; -import org.apache.logging.log4j.core.pattern.ConverterKeys; import java.util.concurrent.atomic.AtomicBoolean; import java.util.concurrent.atomic.AtomicInteger; @@ -302,15 +297,21 @@ private void maybeFinish() { } void run() { - // we either respond immediately ie. if we we don't fsync per request or wait for refresh - // OR we got an pass async operations on and wait for them to return to respond. - indexShard.maybeFlush(); - maybeFinish(); // decrement the pendingOpts by one, if there is nothing else to do we just respond with success. + /* + * We either respond immediately (i.e., if we do not fsync per request or wait for + * refresh), or we there are past async operations and we wait for them to return to + * respond. + */ + indexShard.afterWriteOperation(); + // decrement pending by one, if there is nothing else to do we just respond with success + maybeFinish(); if (waitUntilRefresh) { assert pendingOps.get() > 0; indexShard.addRefreshListener(location, forcedRefresh -> { if (forcedRefresh) { - logger.warn("block_until_refresh request ran out of slots and forced a refresh: [{}]", request); + logger.warn( + "block until refresh ran out of slots and forced a refresh: [{}]", + request); } refreshed.set(forcedRefresh); maybeFinish(); diff --git a/core/src/main/java/org/elasticsearch/action/support/tasks/TransportTasksAction.java b/core/src/main/java/org/elasticsearch/action/support/tasks/TransportTasksAction.java index 9d1cf5e37e7f6..86d158784c03a 100644 --- a/core/src/main/java/org/elasticsearch/action/support/tasks/TransportTasksAction.java +++ b/core/src/main/java/org/elasticsearch/action/support/tasks/TransportTasksAction.java @@ -135,14 +135,14 @@ private void respondIfFinished() { } List results = new ArrayList<>(); List exceptions = new ArrayList<>(); - for (AtomicArray.Entry> response : responses.asList()) { - if (response.value.v1() == null) { - assert response.value.v2() != null; + for (Tuple response : responses.asList()) { + if (response.v1() == null) { + assert response.v2() != null; exceptions.add(new TaskOperationFailure(clusterService.localNode().getId(), tasks.get(taskIndex).getId(), - response.value.v2())); + response.v2())); } else { - assert response.value.v2() == null; - results.add(response.value.v1()); + assert response.v2() == null; + results.add(response.v1()); } } listener.onResponse(new NodeTasksResponse(clusterService.localNode().getId(), results, exceptions)); diff --git a/core/src/main/java/org/elasticsearch/action/update/UpdateHelper.java b/core/src/main/java/org/elasticsearch/action/update/UpdateHelper.java index 7f5482afb0da5..756ff80ddad60 100644 --- a/core/src/main/java/org/elasticsearch/action/update/UpdateHelper.java +++ b/core/src/main/java/org/elasticsearch/action/update/UpdateHelper.java @@ -122,6 +122,7 @@ protected Result prepare(ShardId shardId, UpdateRequest request, final GetResult .setRefreshPolicy(request.getRefreshPolicy()) .routing(request.routing()) .parent(request.parent()) + .timeout(request.timeout()) .waitForActiveShards(request.waitForActiveShards()); if (request.versionType() != VersionType.INTERNAL) { // in all but the internal versioning mode, we want to create the new document using the given version. @@ -188,12 +189,14 @@ protected Result prepare(ShardId shardId, UpdateRequest request, final GetResult .source(updatedSourceAsMap, updateSourceContentType) .version(updateVersion).versionType(request.versionType()) .waitForActiveShards(request.waitForActiveShards()) + .timeout(request.timeout()) .setRefreshPolicy(request.getRefreshPolicy()); return new Result(indexRequest, DocWriteResponse.Result.UPDATED, updatedSourceAsMap, updateSourceContentType); } else if ("delete".equals(operation)) { DeleteRequest deleteRequest = Requests.deleteRequest(request.index()).type(request.type()).id(request.id()).routing(routing).parent(parent) .version(updateVersion).versionType(request.versionType()) .waitForActiveShards(request.waitForActiveShards()) + .timeout(request.timeout()) .setRefreshPolicy(request.getRefreshPolicy()); return new Result(deleteRequest, DocWriteResponse.Result.DELETED, updatedSourceAsMap, updateSourceContentType); } else if ("none".equals(operation)) { diff --git a/core/src/main/java/org/elasticsearch/bootstrap/BootstrapChecks.java b/core/src/main/java/org/elasticsearch/bootstrap/BootstrapChecks.java index 7e82852a9f3cc..a9758267ff3f1 100644 --- a/core/src/main/java/org/elasticsearch/bootstrap/BootstrapChecks.java +++ b/core/src/main/java/org/elasticsearch/bootstrap/BootstrapChecks.java @@ -195,6 +195,7 @@ static List checks(final Settings settings) { checks.add(new SystemCallFilterCheck(BootstrapSettings.SYSTEM_CALL_FILTER_SETTING.get(settings))); checks.add(new OnErrorCheck()); checks.add(new OnOutOfMemoryErrorCheck()); + checks.add(new EarlyAccessCheck()); checks.add(new G1GCCheck()); return Collections.unmodifiableList(checks); } @@ -577,6 +578,34 @@ public String errorMessage() { } + /** + * Bootstrap check for early-access builds from OpenJDK. + */ + static class EarlyAccessCheck implements BootstrapCheck { + + @Override + public boolean check() { + return "Oracle Corporation".equals(jvmVendor()) && javaVersion().endsWith("-ea"); + } + + String jvmVendor() { + return Constants.JVM_VENDOR; + } + + String javaVersion() { + return Constants.JAVA_VERSION; + } + + @Override + public String errorMessage() { + return String.format( + Locale.ROOT, + "Java version [%s] is an early-access build, only use release builds", + javaVersion()); + } + + } + /** * Bootstrap check for versions of HotSpot that are known to have issues that can lead to index corruption when G1GC is enabled. */ diff --git a/core/src/main/java/org/elasticsearch/bootstrap/ESPolicy.java b/core/src/main/java/org/elasticsearch/bootstrap/ESPolicy.java index e8538daec56b0..74fa7e0c1d5ac 100644 --- a/core/src/main/java/org/elasticsearch/bootstrap/ESPolicy.java +++ b/core/src/main/java/org/elasticsearch/bootstrap/ESPolicy.java @@ -31,6 +31,7 @@ import java.security.Permissions; import java.security.Policy; import java.security.ProtectionDomain; +import java.util.Collections; import java.util.Map; import java.util.function.Predicate; @@ -50,7 +51,7 @@ final class ESPolicy extends Policy { ESPolicy(PermissionCollection dynamic, Map plugins, boolean filterBadDefaults) { this.template = Security.readPolicy(getClass().getResource(POLICY_RESOURCE), JarHell.parseClassPath()); - this.untrusted = Security.readPolicy(getClass().getResource(UNTRUSTED_RESOURCE), new URL[0]); + this.untrusted = Security.readPolicy(getClass().getResource(UNTRUSTED_RESOURCE), Collections.emptySet()); if (filterBadDefaults) { this.system = new SystemPolicy(Policy.getPolicy()); } else { diff --git a/core/src/main/java/org/elasticsearch/bootstrap/JarHell.java b/core/src/main/java/org/elasticsearch/bootstrap/JarHell.java index 22ba936d9030f..c5346bf243d90 100644 --- a/core/src/main/java/org/elasticsearch/bootstrap/JarHell.java +++ b/core/src/main/java/org/elasticsearch/bootstrap/JarHell.java @@ -36,9 +36,11 @@ import java.nio.file.SimpleFileVisitor; import java.nio.file.attribute.BasicFileAttributes; import java.util.Arrays; +import java.util.Collections; import java.util.Enumeration; import java.util.HashMap; import java.util.HashSet; +import java.util.LinkedHashSet; import java.util.Locale; import java.util.Map; import java.util.Set; @@ -93,7 +95,7 @@ public static void checkJarHell() throws IOException, URISyntaxException { * @return array of URLs * @throws IllegalStateException if the classpath contains empty elements */ - public static URL[] parseClassPath() { + public static Set parseClassPath() { return parseClassPath(System.getProperty("java.class.path")); } @@ -104,13 +106,12 @@ public static URL[] parseClassPath() { * @throws IllegalStateException if the classpath contains empty elements */ @SuppressForbidden(reason = "resolves against CWD because that is how classpaths work") - static URL[] parseClassPath(String classPath) { + static Set parseClassPath(String classPath) { String pathSeparator = System.getProperty("path.separator"); String fileSeparator = System.getProperty("file.separator"); String elements[] = classPath.split(pathSeparator); - URL urlElements[] = new URL[elements.length]; - for (int i = 0; i < elements.length; i++) { - String element = elements[i]; + Set urlElements = new LinkedHashSet<>(); // order is already lost, but some filesystems have it + for (String element : elements) { // Technically empty classpath element behaves like CWD. // So below is the "correct" code, however in practice with ES, this is usually just a misconfiguration, // from old shell scripts left behind or something: @@ -136,13 +137,17 @@ static URL[] parseClassPath(String classPath) { } // now just parse as ordinary file try { - urlElements[i] = PathUtils.get(element).toUri().toURL(); + URL url = PathUtils.get(element).toUri().toURL(); + if (urlElements.add(url) == false) { + throw new IllegalStateException("jar hell!" + System.lineSeparator() + + "duplicate jar on classpath: " + classPath); + } } catch (MalformedURLException e) { // should not happen, as we use the filesystem API throw new RuntimeException(e); } } - return urlElements; + return Collections.unmodifiableSet(urlElements); } /** @@ -150,7 +155,7 @@ static URL[] parseClassPath(String classPath) { * @throws IllegalStateException if jar hell was found */ @SuppressForbidden(reason = "needs JarFile for speed, just reading entries") - public static void checkJarHell(URL urls[]) throws URISyntaxException, IOException { + public static void checkJarHell(Set urls) throws URISyntaxException, IOException { Logger logger = Loggers.getLogger(JarHell.class); // we don't try to be sneaky and use deprecated/internal/not portable stuff // like sun.boot.class.path, and with jigsaw we don't yet have a way to get @@ -168,8 +173,8 @@ public static void checkJarHell(URL urls[]) throws URISyntaxException, IOExcepti } if (path.toString().endsWith(".jar")) { if (!seenJars.add(path)) { - logger.debug("excluding duplicate classpath element: {}", path); - continue; + throw new IllegalStateException("jar hell!" + System.lineSeparator() + + "duplicate jar on classpath: " + path); } logger.debug("examining jar: {}", path); try (JarFile file = new JarFile(path.toString())) { @@ -198,8 +203,8 @@ public static void checkJarHell(URL urls[]) throws URISyntaxException, IOExcepti public FileVisitResult visitFile(Path file, BasicFileAttributes attrs) throws IOException { String entry = root.relativize(file).toString(); if (entry.endsWith(".class")) { - // normalize with the os separator - entry = entry.replace(sep, ".").substring(0, entry.length() - 6); + // normalize with the os separator, remove '.class' + entry = entry.replace(sep, ".").substring(0, entry.length() - ".class".length()); checkClass(clazzes, entry, path); } return super.visitFile(file, attrs); diff --git a/core/src/main/java/org/elasticsearch/bootstrap/Security.java b/core/src/main/java/org/elasticsearch/bootstrap/Security.java index 3b59f235b1cf4..de16bbe76aa42 100644 --- a/core/src/main/java/org/elasticsearch/bootstrap/Security.java +++ b/core/src/main/java/org/elasticsearch/bootstrap/Security.java @@ -48,8 +48,10 @@ import java.util.ArrayList; import java.util.Collections; import java.util.HashMap; +import java.util.LinkedHashSet; import java.util.List; import java.util.Map; +import java.util.Set; /** * Initializes SecurityManager with necessary permissions. @@ -127,19 +129,23 @@ static void configure(Environment environment, boolean filterBadDefaults) throws @SuppressForbidden(reason = "proper use of URL") static Map getPluginPermissions(Environment environment) throws IOException, NoSuchAlgorithmException { Map map = new HashMap<>(); - // collect up lists of plugins and modules - List pluginsAndModules = new ArrayList<>(); + // collect up set of plugins and modules by listing directories. + Set pluginsAndModules = new LinkedHashSet<>(); // order is already lost, but some filesystems have it if (Files.exists(environment.pluginsFile())) { try (DirectoryStream stream = Files.newDirectoryStream(environment.pluginsFile())) { for (Path plugin : stream) { - pluginsAndModules.add(plugin); + if (pluginsAndModules.add(plugin) == false) { + throw new IllegalStateException("duplicate plugin: " + plugin); + } } } } if (Files.exists(environment.modulesFile())) { try (DirectoryStream stream = Files.newDirectoryStream(environment.modulesFile())) { - for (Path plugin : stream) { - pluginsAndModules.add(plugin); + for (Path module : stream) { + if (pluginsAndModules.add(module) == false) { + throw new IllegalStateException("duplicate module: " + module); + } } } } @@ -149,15 +155,18 @@ static Map getPluginPermissions(Environment environment) throws I if (Files.exists(policyFile)) { // first get a list of URLs for the plugins' jars: // we resolve symlinks so map is keyed on the normalize codebase name - List codebases = new ArrayList<>(); + Set codebases = new LinkedHashSet<>(); // order is already lost, but some filesystems have it try (DirectoryStream jarStream = Files.newDirectoryStream(plugin, "*.jar")) { for (Path jar : jarStream) { - codebases.add(jar.toRealPath().toUri().toURL()); + URL url = jar.toRealPath().toUri().toURL(); + if (codebases.add(url) == false) { + throw new IllegalStateException("duplicate module/plugin: " + url); + } } } // parse the plugin's policy file into a set of permissions - Policy policy = readPolicy(policyFile.toUri().toURL(), codebases.toArray(new URL[codebases.size()])); + Policy policy = readPolicy(policyFile.toUri().toURL(), codebases); // consult this policy for each of the plugin's jars: for (URL url : codebases) { @@ -175,24 +184,33 @@ static Map getPluginPermissions(Environment environment) throws I /** * Reads and returns the specified {@code policyFile}. *

- * Resources (e.g. jar files and directories) listed in {@code codebases} location - * will be provided to the policy file via a system property of the short name: - * e.g. ${codebase.joda-convert-1.2.jar} would map to full URL. + * Jar files listed in {@code codebases} location will be provided to the policy file via + * a system property of the short name: e.g. ${codebase.joda-convert-1.2.jar} + * would map to full URL. */ @SuppressForbidden(reason = "accesses fully qualified URLs to configure security") - static Policy readPolicy(URL policyFile, URL codebases[]) { + static Policy readPolicy(URL policyFile, Set codebases) { try { try { // set codebase properties for (URL url : codebases) { String shortName = PathUtils.get(url.toURI()).getFileName().toString(); - System.setProperty("codebase." + shortName, url.toString()); + if (shortName.endsWith(".jar") == false) { + continue; // tests :( + } + String previous = System.setProperty("codebase." + shortName, url.toString()); + if (previous != null) { + throw new IllegalStateException("codebase property already set: " + shortName + "->" + previous); + } } return Policy.getInstance("JavaPolicy", new URIParameter(policyFile.toURI())); } finally { // clear codebase properties for (URL url : codebases) { String shortName = PathUtils.get(url.toURI()).getFileName().toString(); + if (shortName.endsWith(".jar") == false) { + continue; // tests :( + } System.clearProperty("codebase." + shortName); } } diff --git a/core/src/main/java/org/elasticsearch/bootstrap/Spawner.java b/core/src/main/java/org/elasticsearch/bootstrap/Spawner.java index 44cf2d2b0aa40..53983cb472ef1 100644 --- a/core/src/main/java/org/elasticsearch/bootstrap/Spawner.java +++ b/core/src/main/java/org/elasticsearch/bootstrap/Spawner.java @@ -19,9 +19,10 @@ package org.elasticsearch.bootstrap; -import org.apache.lucene.util.Constants; import org.apache.lucene.util.IOUtils; import org.elasticsearch.env.Environment; +import org.elasticsearch.plugins.PluginInfo; +import org.elasticsearch.plugins.Platforms; import java.io.Closeable; import java.io.IOException; @@ -32,97 +33,89 @@ import java.util.Collections; import java.util.List; import java.util.Locale; +import java.util.concurrent.atomic.AtomicBoolean; /** - * Spawns native plugin controller processes if present. Will only work prior to a system call filter being installed. + * Spawns native plugin controller processes if present. Will only work prior to a system call + * filter being installed. */ final class Spawner implements Closeable { - private static final String PROGRAM_NAME = Constants.WINDOWS ? "controller.exe" : "controller"; - private static final String PLATFORM_NAME = makePlatformName(Constants.OS_NAME, Constants.OS_ARCH); - private static final String TMP_ENVVAR = "TMPDIR"; - - /** + /* * References to the processes that have been spawned, so that we can destroy them. */ private final List processes = new ArrayList<>(); + private AtomicBoolean spawned = new AtomicBoolean(); @Override public void close() throws IOException { - try { - IOUtils.close(() -> processes.stream().map(s -> (Closeable)s::destroy).iterator()); - } finally { - processes.clear(); - } + IOUtils.close(() -> processes.stream().map(s -> (Closeable) s::destroy).iterator()); } /** - * For each plugin, attempt to spawn the controller daemon. Silently ignore any plugins - * that don't include a controller for the correct platform. + * Spawns the native controllers for each plugin + * + * @param environment the node environment + * @throws IOException if an I/O error occurs reading the plugins or spawning a native process */ - void spawnNativePluginControllers(Environment environment) throws IOException { - if (Files.exists(environment.pluginsFile())) { - try (DirectoryStream stream = Files.newDirectoryStream(environment.pluginsFile())) { - for (Path plugin : stream) { - Path spawnPath = makeSpawnPath(plugin); - if (Files.isRegularFile(spawnPath)) { - spawnNativePluginController(spawnPath, environment.tmpFile()); - } + void spawnNativePluginControllers(final Environment environment) throws IOException { + if (!spawned.compareAndSet(false, true)) { + throw new IllegalStateException("native controllers already spawned"); + } + final Path pluginsFile = environment.pluginsFile(); + if (!Files.exists(pluginsFile)) { + throw new IllegalStateException("plugins directory [" + pluginsFile + "] not found"); + } + /* + * For each plugin, attempt to spawn the controller daemon. Silently ignore any plugin that + * don't include a controller for the correct platform. + */ + try (DirectoryStream stream = Files.newDirectoryStream(pluginsFile)) { + for (final Path plugin : stream) { + final PluginInfo info = PluginInfo.readFromProperties(plugin); + final Path spawnPath = Platforms.nativeControllerPath(plugin); + if (!Files.isRegularFile(spawnPath)) { + continue; + } + if (!info.hasNativeController()) { + final String message = String.format( + Locale.ROOT, + "plugin [%s] does not have permission to fork native controller", + plugin.getFileName()); + throw new IllegalArgumentException(message); } + final Process process = + spawnNativePluginController(spawnPath, environment.tmpFile()); + processes.add(process); } } } /** - * Attempt to spawn the controller daemon for a given plugin. The spawned process - * will remain connected to this JVM via its stdin, stdout and stderr, but the - * references to these streams are not available to code outside this package. + * Attempt to spawn the controller daemon for a given plugin. The spawned process will remain + * connected to this JVM via its stdin, stdout, and stderr streams, but the references to these + * streams are not available to code outside this package. */ - private void spawnNativePluginController(Path spawnPath, Path tmpPath) throws IOException { - ProcessBuilder pb = new ProcessBuilder(spawnPath.toString()); + private Process spawnNativePluginController( + final Path spawnPath, + final Path tmpPath) throws IOException { + final ProcessBuilder pb = new ProcessBuilder(spawnPath.toString()); - // The only environment variable passes on the path to the temporary directory + // the only environment variable passes on the path to the temporary directory pb.environment().clear(); - pb.environment().put(TMP_ENVVAR, tmpPath.toString()); + pb.environment().put("TMPDIR", tmpPath.toString()); - // The output stream of the Process object corresponds to the daemon's stdin - processes.add(pb.start()); - } - - List getProcesses() { - return Collections.unmodifiableList(processes); + // the output stream of the process object corresponds to the daemon's stdin + return pb.start(); } /** - * Make the full path to the program to be spawned. + * The collection of processes representing spawned native controllers. + * + * @return the processes */ - static Path makeSpawnPath(Path plugin) { - return plugin.resolve("platform").resolve(PLATFORM_NAME).resolve("bin").resolve(PROGRAM_NAME); + List getProcesses() { + return Collections.unmodifiableList(processes); } - /** - * Make the platform name in the format used in Kibana downloads, for example: - * - darwin-x86_64 - * - linux-x86-64 - * - windows-x86_64 - * For *nix platforms this is more-or-less `uname -s`-`uname -m` converted to lower case. - * However, for consistency between different operating systems on the same architecture - * "amd64" is replaced with "x86_64" and "i386" with "x86". - * For Windows it's "windows-" followed by either "x86" or "x86_64". - */ - static String makePlatformName(String osName, String osArch) { - String os = osName.toLowerCase(Locale.ROOT); - if (os.startsWith("windows")) { - os = "windows"; - } else if (os.equals("mac os x")) { - os = "darwin"; - } - String cpu = osArch.toLowerCase(Locale.ROOT); - if (cpu.equals("amd64")) { - cpu = "x86_64"; - } else if (cpu.equals("i386")) { - cpu = "x86"; - } - return os + "-" + cpu; - } } diff --git a/core/src/main/java/org/elasticsearch/client/Client.java b/core/src/main/java/org/elasticsearch/client/Client.java index 0cf22d7a2c4fc..663b820dc3956 100644 --- a/core/src/main/java/org/elasticsearch/client/Client.java +++ b/core/src/main/java/org/elasticsearch/client/Client.java @@ -30,6 +30,10 @@ import org.elasticsearch.action.explain.ExplainRequest; import org.elasticsearch.action.explain.ExplainRequestBuilder; import org.elasticsearch.action.explain.ExplainResponse; +import org.elasticsearch.action.fieldcaps.FieldCapabilities; +import org.elasticsearch.action.fieldcaps.FieldCapabilitiesRequest; +import org.elasticsearch.action.fieldcaps.FieldCapabilitiesRequestBuilder; +import org.elasticsearch.action.fieldcaps.FieldCapabilitiesResponse; import org.elasticsearch.action.fieldstats.FieldStatsRequest; import org.elasticsearch.action.fieldstats.FieldStatsRequestBuilder; import org.elasticsearch.action.fieldstats.FieldStatsResponse; @@ -458,6 +462,21 @@ public interface Client extends ElasticsearchClient, Releasable { void fieldStats(FieldStatsRequest request, ActionListener listener); + /** + * Builder for the field capabilities request. + */ + FieldCapabilitiesRequestBuilder prepareFieldCaps(); + + /** + * An action that returns the field capabilities from the provided request + */ + ActionFuture fieldCaps(FieldCapabilitiesRequest request); + + /** + * An action that returns the field capabilities from the provided request + */ + void fieldCaps(FieldCapabilitiesRequest request, ActionListener listener); + /** * Returns this clients settings */ diff --git a/core/src/main/java/org/elasticsearch/client/IndicesAdminClient.java b/core/src/main/java/org/elasticsearch/client/IndicesAdminClient.java index 03fdee2db3400..b254039910c01 100644 --- a/core/src/main/java/org/elasticsearch/client/IndicesAdminClient.java +++ b/core/src/main/java/org/elasticsearch/client/IndicesAdminClient.java @@ -50,6 +50,9 @@ import org.elasticsearch.action.admin.indices.exists.types.TypesExistsRequest; import org.elasticsearch.action.admin.indices.exists.types.TypesExistsRequestBuilder; import org.elasticsearch.action.admin.indices.exists.types.TypesExistsResponse; +import org.elasticsearch.action.fieldcaps.FieldCapabilitiesRequest; +import org.elasticsearch.action.fieldcaps.FieldCapabilitiesRequestBuilder; +import org.elasticsearch.action.fieldcaps.FieldCapabilitiesResponse; import org.elasticsearch.action.admin.indices.flush.FlushRequest; import org.elasticsearch.action.admin.indices.flush.FlushRequestBuilder; import org.elasticsearch.action.admin.indices.flush.FlushResponse; @@ -817,5 +820,4 @@ public interface IndicesAdminClient extends ElasticsearchClient { * Swaps the index pointed to by an alias given all provided conditions are satisfied */ void rolloverIndex(RolloverRequest request, ActionListener listener); - } diff --git a/core/src/main/java/org/elasticsearch/client/support/AbstractClient.java b/core/src/main/java/org/elasticsearch/client/support/AbstractClient.java index e0ebcfe70a924..726875a6d5c20 100644 --- a/core/src/main/java/org/elasticsearch/client/support/AbstractClient.java +++ b/core/src/main/java/org/elasticsearch/client/support/AbstractClient.java @@ -272,6 +272,10 @@ import org.elasticsearch.action.explain.ExplainRequest; import org.elasticsearch.action.explain.ExplainRequestBuilder; import org.elasticsearch.action.explain.ExplainResponse; +import org.elasticsearch.action.fieldcaps.FieldCapabilitiesAction; +import org.elasticsearch.action.fieldcaps.FieldCapabilitiesRequest; +import org.elasticsearch.action.fieldcaps.FieldCapabilitiesRequestBuilder; +import org.elasticsearch.action.fieldcaps.FieldCapabilitiesResponse; import org.elasticsearch.action.fieldstats.FieldStatsAction; import org.elasticsearch.action.fieldstats.FieldStatsRequest; import org.elasticsearch.action.fieldstats.FieldStatsRequestBuilder; @@ -667,6 +671,21 @@ public FieldStatsRequestBuilder prepareFieldStats() { return new FieldStatsRequestBuilder(this, FieldStatsAction.INSTANCE); } + @Override + public void fieldCaps(FieldCapabilitiesRequest request, ActionListener listener) { + execute(FieldCapabilitiesAction.INSTANCE, request, listener); + } + + @Override + public ActionFuture fieldCaps(FieldCapabilitiesRequest request) { + return execute(FieldCapabilitiesAction.INSTANCE, request); + } + + @Override + public FieldCapabilitiesRequestBuilder prepareFieldCaps() { + return new FieldCapabilitiesRequestBuilder(this, FieldCapabilitiesAction.INSTANCE); + } + static class Admin implements AdminClient { private final ClusterAdmin clusterAdmin; diff --git a/core/src/main/java/org/elasticsearch/cluster/RestoreInProgress.java b/core/src/main/java/org/elasticsearch/cluster/RestoreInProgress.java index 55e70dbe644ff..8f6527ccaa78b 100644 --- a/core/src/main/java/org/elasticsearch/cluster/RestoreInProgress.java +++ b/core/src/main/java/org/elasticsearch/cluster/RestoreInProgress.java @@ -89,6 +89,18 @@ public int hashCode() { return entries.hashCode(); } + @Override + public String toString() { + StringBuilder builder = new StringBuilder("RestoreInProgress["); + for (int i = 0; i < entries.size(); i++) { + builder.append(entries.get(i).snapshot().getSnapshotId().getName()); + if (i + 1 < entries.size()) { + builder.append(","); + } + } + return builder.append("]").toString(); + } + /** * Restore metadata */ diff --git a/core/src/main/java/org/elasticsearch/cluster/SnapshotDeletionsInProgress.java b/core/src/main/java/org/elasticsearch/cluster/SnapshotDeletionsInProgress.java index b3ab12fe21abf..446f4ae07410d 100644 --- a/core/src/main/java/org/elasticsearch/cluster/SnapshotDeletionsInProgress.java +++ b/core/src/main/java/org/elasticsearch/cluster/SnapshotDeletionsInProgress.java @@ -150,6 +150,18 @@ public XContentBuilder toXContent(XContentBuilder builder, Params params) throws return builder; } + @Override + public String toString() { + StringBuilder builder = new StringBuilder("SnapshotDeletionsInProgress["); + for (int i = 0; i < entries.size(); i++) { + builder.append(entries.get(i).getSnapshot().getSnapshotId().getName()); + if (i + 1 < entries.size()) { + builder.append(","); + } + } + return builder.append("]").toString(); + } + /** * A class representing a snapshot deletion request entry in the cluster state. */ diff --git a/core/src/main/java/org/elasticsearch/cluster/SnapshotsInProgress.java b/core/src/main/java/org/elasticsearch/cluster/SnapshotsInProgress.java index 0ac1e8e4090ab..1e1b61281b4b5 100644 --- a/core/src/main/java/org/elasticsearch/cluster/SnapshotsInProgress.java +++ b/core/src/main/java/org/elasticsearch/cluster/SnapshotsInProgress.java @@ -70,6 +70,18 @@ public int hashCode() { return entries.hashCode(); } + @Override + public String toString() { + StringBuilder builder = new StringBuilder("SnapshotsInProgress["); + for (int i = 0; i < entries.size(); i++) { + builder.append(entries.get(i).snapshot().getSnapshotId().getName()); + if (i + 1 < entries.size()) { + builder.append(","); + } + } + return builder.append("]").toString(); + } + public static class Entry { private final State state; private final Snapshot snapshot; diff --git a/core/src/main/java/org/elasticsearch/cluster/metadata/IndexMetaData.java b/core/src/main/java/org/elasticsearch/cluster/metadata/IndexMetaData.java index 68575634a1eb3..1f7f3374ed41c 100644 --- a/core/src/main/java/org/elasticsearch/cluster/metadata/IndexMetaData.java +++ b/core/src/main/java/org/elasticsearch/cluster/metadata/IndexMetaData.java @@ -226,7 +226,6 @@ static Setting buildNumberOfShardsSetting() { public static final String SETTING_VERSION_CREATED_STRING = "index.version.created_string"; public static final String SETTING_VERSION_UPGRADED = "index.version.upgraded"; public static final String SETTING_VERSION_UPGRADED_STRING = "index.version.upgraded_string"; - public static final String SETTING_VERSION_MINIMUM_COMPATIBLE = "index.version.minimum_compatible"; public static final String SETTING_CREATION_DATE = "index.creation_date"; /** * The user provided name for an index. This is the plain string provided by the user when the index was created. @@ -311,7 +310,6 @@ static Setting buildNumberOfShardsSetting() { private final Version indexCreatedVersion; private final Version indexUpgradedVersion; - private final org.apache.lucene.util.Version minimumCompatibleLuceneVersion; private final ActiveShardCount waitForActiveShards; @@ -319,7 +317,7 @@ private IndexMetaData(Index index, long version, long[] primaryTerms, State stat ImmutableOpenMap mappings, ImmutableOpenMap aliases, ImmutableOpenMap customs, ImmutableOpenIntMap> inSyncAllocationIds, DiscoveryNodeFilters requireFilters, DiscoveryNodeFilters initialRecoveryFilters, DiscoveryNodeFilters includeFilters, DiscoveryNodeFilters excludeFilters, - Version indexCreatedVersion, Version indexUpgradedVersion, org.apache.lucene.util.Version minimumCompatibleLuceneVersion, + Version indexCreatedVersion, Version indexUpgradedVersion, int routingNumShards, int routingPartitionSize, ActiveShardCount waitForActiveShards) { this.index = index; @@ -341,7 +339,6 @@ private IndexMetaData(Index index, long version, long[] primaryTerms, State stat this.initialRecoveryFilters = initialRecoveryFilters; this.indexCreatedVersion = indexCreatedVersion; this.indexUpgradedVersion = indexUpgradedVersion; - this.minimumCompatibleLuceneVersion = minimumCompatibleLuceneVersion; this.routingNumShards = routingNumShards; this.routingFactor = routingNumShards / numberOfShards; this.routingPartitionSize = routingPartitionSize; @@ -401,13 +398,6 @@ public Version getUpgradedVersion() { return indexUpgradedVersion; } - /** - * Return the {@link org.apache.lucene.util.Version} of the oldest lucene segment in the index - */ - public org.apache.lucene.util.Version getMinimumCompatibleVersion() { - return minimumCompatibleLuceneVersion; - } - public long getCreationDate() { return settings.getAsLong(SETTING_CREATION_DATE, -1L); } @@ -1052,17 +1042,6 @@ public IndexMetaData build() { } Version indexCreatedVersion = Version.indexCreated(settings); Version indexUpgradedVersion = settings.getAsVersion(IndexMetaData.SETTING_VERSION_UPGRADED, indexCreatedVersion); - String stringLuceneVersion = settings.get(SETTING_VERSION_MINIMUM_COMPATIBLE); - final org.apache.lucene.util.Version minimumCompatibleLuceneVersion; - if (stringLuceneVersion != null) { - try { - minimumCompatibleLuceneVersion = org.apache.lucene.util.Version.parse(stringLuceneVersion); - } catch (ParseException ex) { - throw new IllegalStateException("Cannot parse lucene version [" + stringLuceneVersion + "] in the [" + SETTING_VERSION_MINIMUM_COMPATIBLE + "] setting", ex); - } - } else { - minimumCompatibleLuceneVersion = null; - } if (primaryTerms == null) { initializePrimaryTerms(); @@ -1081,7 +1060,7 @@ public IndexMetaData build() { final String uuid = settings.get(SETTING_INDEX_UUID, INDEX_UUID_NA_VALUE); return new IndexMetaData(new Index(index, uuid), version, primaryTerms, state, numberOfShards, numberOfReplicas, tmpSettings, mappings.build(), tmpAliases.build(), customs.build(), filledInSyncAllocationIds.build(), requireFilters, initialRecoveryFilters, includeFilters, excludeFilters, - indexCreatedVersion, indexUpgradedVersion, minimumCompatibleLuceneVersion, getRoutingNumShards(), routingPartitionSize, waitForActiveShards); + indexCreatedVersion, indexUpgradedVersion, getRoutingNumShards(), routingPartitionSize, waitForActiveShards); } public static void toXContent(IndexMetaData indexMetaData, XContentBuilder builder, ToXContent.Params params) throws IOException { diff --git a/core/src/main/java/org/elasticsearch/cluster/metadata/MetaDataCreateIndexService.java b/core/src/main/java/org/elasticsearch/cluster/metadata/MetaDataCreateIndexService.java index 0bde4a23b032f..f23915be15dbd 100644 --- a/core/src/main/java/org/elasticsearch/cluster/metadata/MetaDataCreateIndexService.java +++ b/core/src/main/java/org/elasticsearch/cluster/metadata/MetaDataCreateIndexService.java @@ -603,9 +603,6 @@ static void prepareShrinkIndexSettings(ClusterState currentState, Set ma .put(IndexMetaData.SETTING_ROUTING_PARTITION_SIZE, sourceMetaData.getRoutingPartitionSize()) .put(IndexMetaData.INDEX_SHRINK_SOURCE_NAME.getKey(), shrinkFromIndex.getName()) .put(IndexMetaData.INDEX_SHRINK_SOURCE_UUID.getKey(), shrinkFromIndex.getUUID()); - if (sourceMetaData.getMinimumCompatibleVersion() != null) { - indexSettingsBuilder.put(IndexMetaData.SETTING_VERSION_MINIMUM_COMPATIBLE, sourceMetaData.getMinimumCompatibleVersion()); - } } } diff --git a/core/src/main/java/org/elasticsearch/cluster/metadata/MetaDataUpdateSettingsService.java b/core/src/main/java/org/elasticsearch/cluster/metadata/MetaDataUpdateSettingsService.java index ae9eb0d1b16b5..82d2f5fc04b12 100644 --- a/core/src/main/java/org/elasticsearch/cluster/metadata/MetaDataUpdateSettingsService.java +++ b/core/src/main/java/org/elasticsearch/cluster/metadata/MetaDataUpdateSettingsService.java @@ -329,7 +329,6 @@ public ClusterState execute(ClusterState currentState) { // No reason to pollute the settings, we didn't really upgrade anything metaDataBuilder.put(IndexMetaData.builder(indexMetaData) .settings(Settings.builder().put(indexMetaData.getSettings()) - .put(IndexMetaData.SETTING_VERSION_MINIMUM_COMPATIBLE, entry.getValue().v2()) .put(IndexMetaData.SETTING_VERSION_UPGRADED, entry.getValue().v1()) ) ); diff --git a/core/src/main/java/org/elasticsearch/common/joda/Joda.java b/core/src/main/java/org/elasticsearch/common/joda/Joda.java index 7978ceff48c76..c9eaa9ab3aa2e 100644 --- a/core/src/main/java/org/elasticsearch/common/joda/Joda.java +++ b/core/src/main/java/org/elasticsearch/common/joda/Joda.java @@ -333,9 +333,10 @@ public int parseInto(DateTimeParserBucket bucket, String text, int position) { boolean isPositive = text.startsWith("-") == false; boolean isTooLong = text.length() > estimateParsedLength(); - if ((isPositive && isTooLong) || - // timestamps have to have UTC timezone - bucket.getZone() != DateTimeZone.UTC) { + if (bucket.getZone() != DateTimeZone.UTC) { + String format = hasMilliSecondPrecision ? "epoch_millis" : "epoch_second"; + throw new IllegalArgumentException("time_zone must be UTC for format [" + format + "]"); + } else if (isPositive && isTooLong) { return -1; } diff --git a/core/src/main/java/org/elasticsearch/common/lucene/Lucene.java b/core/src/main/java/org/elasticsearch/common/lucene/Lucene.java index bbcea3041fc3e..bf448b6153947 100644 --- a/core/src/main/java/org/elasticsearch/common/lucene/Lucene.java +++ b/core/src/main/java/org/elasticsearch/common/lucene/Lucene.java @@ -57,6 +57,8 @@ import org.apache.lucene.search.TwoPhaseIterator; import org.apache.lucene.search.Weight; import org.apache.lucene.search.grouping.CollapseTopFieldDocs; +import org.apache.lucene.search.SortedNumericSortField; +import org.apache.lucene.search.SortedSetSortField; import org.apache.lucene.store.Directory; import org.apache.lucene.store.IOContext; import org.apache.lucene.store.IndexInput; @@ -552,7 +554,22 @@ public static void writeSortField(StreamOutput out, SortField sortField) throws SortField newSortField = new SortField(sortField.getField(), SortField.Type.DOUBLE); newSortField.setMissingValue(sortField.getMissingValue()); sortField = newSortField; + } else if (sortField.getClass() == SortedSetSortField.class) { + // for multi-valued sort field, we replace the SortedSetSortField with a simple SortField. + // It works because the sort field is only used to merge results from different shards. + SortField newSortField = new SortField(sortField.getField(), SortField.Type.STRING, sortField.getReverse()); + newSortField.setMissingValue(sortField.getMissingValue()); + sortField = newSortField; + } else if (sortField.getClass() == SortedNumericSortField.class) { + // for multi-valued sort field, we replace the SortedSetSortField with a simple SortField. + // It works because the sort field is only used to merge results from different shards. + SortField newSortField = new SortField(sortField.getField(), + ((SortedNumericSortField) sortField).getNumericType(), + sortField.getReverse()); + newSortField.setMissingValue(sortField.getMissingValue()); + sortField = newSortField; } + if (sortField.getClass() != SortField.class) { throw new IllegalArgumentException("Cannot serialize SortField impl [" + sortField + "]"); } diff --git a/core/src/main/java/org/elasticsearch/common/settings/IndexScopedSettings.java b/core/src/main/java/org/elasticsearch/common/settings/IndexScopedSettings.java index badd80d5aea76..a072b68b2770d 100644 --- a/core/src/main/java/org/elasticsearch/common/settings/IndexScopedSettings.java +++ b/core/src/main/java/org/elasticsearch/common/settings/IndexScopedSettings.java @@ -125,6 +125,7 @@ public final class IndexScopedSettings extends AbstractScopedSettings { EnableAllocationDecider.INDEX_ROUTING_REBALANCE_ENABLE_SETTING, EnableAllocationDecider.INDEX_ROUTING_ALLOCATION_ENABLE_SETTING, IndexSettings.INDEX_TRANSLOG_FLUSH_THRESHOLD_SIZE_SETTING, + IndexSettings.INDEX_TRANSLOG_GENERATION_THRESHOLD_SIZE_SETTING, IndexFieldDataService.INDEX_FIELDDATA_CACHE_KEY, FieldMapper.IGNORE_MALFORMED_SETTING, FieldMapper.COERCE_SETTING, diff --git a/core/src/main/java/org/elasticsearch/common/util/concurrent/AtomicArray.java b/core/src/main/java/org/elasticsearch/common/util/concurrent/AtomicArray.java index 2278220d9dd3d..2bf5e50a1c2e4 100644 --- a/core/src/main/java/org/elasticsearch/common/util/concurrent/AtomicArray.java +++ b/core/src/main/java/org/elasticsearch/common/util/concurrent/AtomicArray.java @@ -40,7 +40,7 @@ public static E empty() { } private final AtomicReferenceArray array; - private volatile List> nonNullList; + private volatile List nonNullList; public AtomicArray(int size) { array = new AtomicReferenceArray<>(size); @@ -87,19 +87,18 @@ public E get(int i) { } /** - * Returns the it as a non null list, with an Entry wrapping each value allowing to - * retain its index. + * Returns the it as a non null list. */ - public List> asList() { + public List asList() { if (nonNullList == null) { if (array == null || array.length() == 0) { nonNullList = Collections.emptyList(); } else { - List> list = new ArrayList<>(array.length()); + List list = new ArrayList<>(array.length()); for (int i = 0; i < array.length(); i++) { E e = array.get(i); if (e != null) { - list.add(new Entry<>(i, e)); + list.add(e); } } nonNullList = list; @@ -120,23 +119,4 @@ public E[] toArray(E[] a) { } return a; } - - /** - * An entry within the array. - */ - public static class Entry { - /** - * The original index of the value within the array. - */ - public final int index; - /** - * The value. - */ - public final E value; - - public Entry(int index, E value) { - this.index = index; - this.value = value; - } - } } diff --git a/core/src/main/java/org/elasticsearch/common/util/concurrent/ThreadContext.java b/core/src/main/java/org/elasticsearch/common/util/concurrent/ThreadContext.java index 409a70eb6492e..1ce119636f734 100644 --- a/core/src/main/java/org/elasticsearch/common/util/concurrent/ThreadContext.java +++ b/core/src/main/java/org/elasticsearch/common/util/concurrent/ThreadContext.java @@ -25,7 +25,6 @@ import org.elasticsearch.common.settings.Setting; import org.elasticsearch.common.settings.Setting.Property; import org.elasticsearch.common.settings.Settings; -import org.elasticsearch.index.store.Store; import java.io.Closeable; import java.io.IOException; @@ -75,6 +74,7 @@ public final class ThreadContext implements Closeable, Writeable { private static final ThreadContextStruct DEFAULT_CONTEXT = new ThreadContextStruct(); private final Map defaultHeader; private final ContextThreadLocal threadLocal; + private boolean isSystemContext; /** * Creates a new ThreadContext instance @@ -317,6 +317,21 @@ boolean isDefaultContext() { return threadLocal.get() == DEFAULT_CONTEXT; } + /** + * Marks this thread context as an internal system context. This signals that actions in this context are issued + * by the system itself rather than by a user action. + */ + public void markAsSystemContext() { + threadLocal.set(threadLocal.get().setSystemContext()); + } + + /** + * Returns true iff this context is a system context + */ + public boolean isSystemContext() { + return threadLocal.get().isSystemContext; + } + /** * Returns true if the context is closed, otherwise true */ @@ -338,6 +353,7 @@ private static final class ThreadContextStruct { private final Map requestHeaders; private final Map transientHeaders; private final Map> responseHeaders; + private final boolean isSystemContext; private ThreadContextStruct(StreamInput in) throws IOException { final int numRequest = in.readVInt(); @@ -349,27 +365,36 @@ private ThreadContextStruct(StreamInput in) throws IOException { this.requestHeaders = requestHeaders; this.responseHeaders = in.readMapOfLists(StreamInput::readString, StreamInput::readString); this.transientHeaders = Collections.emptyMap(); + isSystemContext = false; // we never serialize this it's a transient flag + } + + private ThreadContextStruct setSystemContext() { + if (isSystemContext) { + return this; + } + return new ThreadContextStruct(requestHeaders, responseHeaders, transientHeaders, true); } private ThreadContextStruct(Map requestHeaders, Map> responseHeaders, - Map transientHeaders) { + Map transientHeaders, boolean isSystemContext) { this.requestHeaders = requestHeaders; this.responseHeaders = responseHeaders; this.transientHeaders = transientHeaders; + this.isSystemContext = isSystemContext; } /** * This represents the default context and it should only ever be called by {@link #DEFAULT_CONTEXT}. */ private ThreadContextStruct() { - this(Collections.emptyMap(), Collections.emptyMap(), Collections.emptyMap()); + this(Collections.emptyMap(), Collections.emptyMap(), Collections.emptyMap(), false); } private ThreadContextStruct putRequest(String key, String value) { Map newRequestHeaders = new HashMap<>(this.requestHeaders); putSingleHeader(key, value, newRequestHeaders); - return new ThreadContextStruct(newRequestHeaders, responseHeaders, transientHeaders); + return new ThreadContextStruct(newRequestHeaders, responseHeaders, transientHeaders, isSystemContext); } private void putSingleHeader(String key, String value, Map newHeaders) { @@ -387,7 +412,7 @@ private ThreadContextStruct putHeaders(Map headers) { putSingleHeader(entry.getKey(), entry.getValue(), newHeaders); } newHeaders.putAll(this.requestHeaders); - return new ThreadContextStruct(newHeaders, responseHeaders, transientHeaders); + return new ThreadContextStruct(newHeaders, responseHeaders, transientHeaders, isSystemContext); } } @@ -408,7 +433,7 @@ private ThreadContextStruct putResponseHeaders(Map> headers newResponseHeaders.put(key, entry.getValue()); } } - return new ThreadContextStruct(requestHeaders, newResponseHeaders, transientHeaders); + return new ThreadContextStruct(requestHeaders, newResponseHeaders, transientHeaders, isSystemContext); } private ThreadContextStruct putResponse(final String key, final String value, final Function uniqueValue) { @@ -432,7 +457,7 @@ private ThreadContextStruct putResponse(final String key, final String value, fi newResponseHeaders.put(key, Collections.singletonList(value)); } - return new ThreadContextStruct(requestHeaders, newResponseHeaders, transientHeaders); + return new ThreadContextStruct(requestHeaders, newResponseHeaders, transientHeaders, isSystemContext); } private ThreadContextStruct putTransient(String key, Object value) { @@ -440,7 +465,7 @@ private ThreadContextStruct putTransient(String key, Object value) { if (newTransient.putIfAbsent(key, value) != null) { throw new IllegalArgumentException("value for key [" + key + "] already present"); } - return new ThreadContextStruct(requestHeaders, responseHeaders, newTransient); + return new ThreadContextStruct(requestHeaders, responseHeaders, newTransient, isSystemContext); } boolean isEmpty() { diff --git a/core/src/main/java/org/elasticsearch/discovery/zen/MembershipAction.java b/core/src/main/java/org/elasticsearch/discovery/zen/MembershipAction.java index 6f56a547d3fba..442491e6b13bd 100644 --- a/core/src/main/java/org/elasticsearch/discovery/zen/MembershipAction.java +++ b/core/src/main/java/org/elasticsearch/discovery/zen/MembershipAction.java @@ -178,20 +178,28 @@ static class ValidateJoinRequestRequestHandler implements TransportRequestHandle @Override public void messageReceived(ValidateJoinRequest request, TransportChannel channel) throws Exception { - ensureIndexCompatibility(Version.CURRENT.minimumIndexCompatibilityVersion(), request.state.getMetaData()); + ensureIndexCompatibility(Version.CURRENT, request.state.getMetaData()); // for now, the mere fact that we can serialize the cluster state acts as validation.... channel.sendResponse(TransportResponse.Empty.INSTANCE); } } /** - * Ensures that all indices are compatible with the supported index version. + * Ensures that all indices are compatible with the given node version. This will ensure that all indices in the given metadata + * will not be created with a newer version of elasticsearch as well as that all indices are newer or equal to the minimum index + * compatibility version. + * @see Version#minimumIndexCompatibilityVersion() * @throws IllegalStateException if any index is incompatible with the given version */ - static void ensureIndexCompatibility(final Version supportedIndexVersion, MetaData metaData) { + static void ensureIndexCompatibility(final Version nodeVersion, MetaData metaData) { + Version supportedIndexVersion = nodeVersion.minimumIndexCompatibilityVersion(); // we ensure that all indices in the cluster we join are compatible with us no matter if they are // closed or not we can't read mappings of these indices so we need to reject the join... for (IndexMetaData idxMetaData : metaData) { + if (idxMetaData.getCreationVersion().after(nodeVersion)) { + throw new IllegalStateException("index " + idxMetaData.getIndex() + " version not supported: " + + idxMetaData.getCreationVersion() + " the node version is: " + nodeVersion); + } if (idxMetaData.getCreationVersion().before(supportedIndexVersion)) { throw new IllegalStateException("index " + idxMetaData.getIndex() + " version not supported: " + idxMetaData.getCreationVersion() + " minimum compatible index version is: " + supportedIndexVersion); diff --git a/core/src/main/java/org/elasticsearch/discovery/zen/NodeJoinController.java b/core/src/main/java/org/elasticsearch/discovery/zen/NodeJoinController.java index 2d84f5f863d54..354425a3dca67 100644 --- a/core/src/main/java/org/elasticsearch/discovery/zen/NodeJoinController.java +++ b/core/src/main/java/org/elasticsearch/discovery/zen/NodeJoinController.java @@ -453,7 +453,7 @@ public ClusterTasksResult execute(ClusterState currentState, List } // we do this validation quite late to prevent race conditions between nodes joining and importing dangling indices // we have to reject nodes that don't support all indices we have in this cluster - MembershipAction.ensureIndexCompatibility(minNodeVersion.minimumIndexCompatibilityVersion(), currentState.getMetaData()); + MembershipAction.ensureIndexCompatibility(minNodeVersion, currentState.getMetaData()); if (nodesChanged) { newState.nodes(nodesBuilder); return results.build(allocationService.reroute(newState.build(), "node_join")); diff --git a/core/src/main/java/org/elasticsearch/discovery/zen/ZenDiscovery.java b/core/src/main/java/org/elasticsearch/discovery/zen/ZenDiscovery.java index be6f52fc22c0c..7b24536346c52 100644 --- a/core/src/main/java/org/elasticsearch/discovery/zen/ZenDiscovery.java +++ b/core/src/main/java/org/elasticsearch/discovery/zen/ZenDiscovery.java @@ -839,7 +839,7 @@ void handleJoinRequest(final DiscoveryNode node, final ClusterState state, final } else { // we do this in a couple of places including the cluster update thread. This one here is really just best effort // to ensure we fail as fast as possible. - MembershipAction.ensureIndexCompatibility(node.getVersion().minimumIndexCompatibilityVersion(), state.getMetaData()); + MembershipAction.ensureIndexCompatibility(node.getVersion(), state.getMetaData()); // try and connect to the node, if it fails, we can raise an exception back to the client... transportService.connectToNode(node); diff --git a/core/src/main/java/org/elasticsearch/index/IndexSettings.java b/core/src/main/java/org/elasticsearch/index/IndexSettings.java index 47c7ffb71bc85..4ae16255d5e97 100644 --- a/core/src/main/java/org/elasticsearch/index/IndexSettings.java +++ b/core/src/main/java/org/elasticsearch/index/IndexSettings.java @@ -22,7 +22,6 @@ import org.apache.lucene.index.MergePolicy; import org.elasticsearch.Version; import org.elasticsearch.cluster.metadata.IndexMetaData; -import org.elasticsearch.common.logging.DeprecationLogger; import org.elasticsearch.common.logging.Loggers; import org.elasticsearch.common.settings.IndexScopedSettings; import org.elasticsearch.common.settings.Setting; @@ -112,6 +111,25 @@ public final class IndexSettings { Setting.byteSizeSetting("index.translog.flush_threshold_size", new ByteSizeValue(512, ByteSizeUnit.MB), Property.Dynamic, Property.IndexScope); + /** + * The maximum size of a translog generation. This is independent of the maximum size of + * translog operations that have not been flushed. + */ + public static final Setting INDEX_TRANSLOG_GENERATION_THRESHOLD_SIZE_SETTING = + Setting.byteSizeSetting( + "index.translog.generation_threshold_size", + new ByteSizeValue(64, ByteSizeUnit.MB), + /* + * An empty translog occupies 43 bytes on disk. If the generation threshold is + * below this, the flush thread can get stuck in an infinite loop repeatedly + * rolling the generation as every new generation will already exceed the + * generation threshold. However, small thresholds are useful for testing so we + * do not add a large lower bound here. + */ + new ByteSizeValue(64, ByteSizeUnit.BYTES), + new ByteSizeValue(Long.MAX_VALUE, ByteSizeUnit.BYTES), + new Property[]{Property.Dynamic, Property.IndexScope}); + public static final Setting INDEX_SEQ_NO_CHECKPOINT_SYNC_INTERVAL = Setting.timeSetting("index.seq_no.checkpoint_sync_interval", new TimeValue(30, TimeUnit.SECONDS), new TimeValue(-1, TimeUnit.MILLISECONDS), Property.Dynamic, Property.IndexScope); @@ -156,6 +174,7 @@ public final class IndexSettings { private volatile TimeValue refreshInterval; private final TimeValue globalCheckpointInterval; private volatile ByteSizeValue flushThresholdSize; + private volatile ByteSizeValue generationThresholdSize; private final MergeSchedulerConfig mergeSchedulerConfig; private final MergePolicyConfig mergePolicyConfig; private final IndexScopedSettings scopedSettings; @@ -250,6 +269,7 @@ public IndexSettings(final IndexMetaData indexMetaData, final Settings nodeSetti refreshInterval = scopedSettings.get(INDEX_REFRESH_INTERVAL_SETTING); globalCheckpointInterval = scopedSettings.get(INDEX_SEQ_NO_CHECKPOINT_SYNC_INTERVAL); flushThresholdSize = scopedSettings.get(INDEX_TRANSLOG_FLUSH_THRESHOLD_SIZE_SETTING); + generationThresholdSize = scopedSettings.get(INDEX_TRANSLOG_GENERATION_THRESHOLD_SIZE_SETTING); mergeSchedulerConfig = new MergeSchedulerConfig(this); gcDeletesInMillis = scopedSettings.get(INDEX_GC_DELETES_SETTING).getMillis(); warmerEnabled = scopedSettings.get(INDEX_WARMER_ENABLED_SETTING); @@ -281,6 +301,9 @@ public IndexSettings(final IndexMetaData indexMetaData, final Settings nodeSetti scopedSettings.addSettingsUpdateConsumer(INDEX_WARMER_ENABLED_SETTING, this::setEnableWarmer); scopedSettings.addSettingsUpdateConsumer(INDEX_GC_DELETES_SETTING, this::setGCDeletes); scopedSettings.addSettingsUpdateConsumer(INDEX_TRANSLOG_FLUSH_THRESHOLD_SIZE_SETTING, this::setTranslogFlushThresholdSize); + scopedSettings.addSettingsUpdateConsumer( + INDEX_TRANSLOG_GENERATION_THRESHOLD_SIZE_SETTING, + this::setGenerationThresholdSize); scopedSettings.addSettingsUpdateConsumer(INDEX_REFRESH_INTERVAL_SETTING, this::setRefreshInterval); scopedSettings.addSettingsUpdateConsumer(MAX_REFRESH_LISTENERS_PER_SHARD, this::setMaxRefreshListeners); scopedSettings.addSettingsUpdateConsumer(MAX_SLICES_PER_SCROLL, this::setMaxSlicesPerScroll); @@ -290,6 +313,10 @@ private void setTranslogFlushThresholdSize(ByteSizeValue byteSizeValue) { this.flushThresholdSize = byteSizeValue; } + private void setGenerationThresholdSize(final ByteSizeValue generationThresholdSize) { + this.generationThresholdSize = generationThresholdSize; + } + private void setGCDeletes(TimeValue timeValue) { this.gcDeletesInMillis = timeValue.getMillis(); } @@ -461,6 +488,19 @@ public TimeValue getGlobalCheckpointInterval() { */ public ByteSizeValue getFlushThresholdSize() { return flushThresholdSize; } + /** + * Returns the generation threshold size. As sequence numbers can cause multiple generations to + * be preserved for rollback purposes, we want to keep the size of individual generations from + * growing too large to avoid excessive disk space consumption. Therefore, the translog is + * automatically rolled to a new generation when the current generation exceeds this generation + * threshold size. + * + * @return the generation threshold size + */ + public ByteSizeValue getGenerationThresholdSize() { + return generationThresholdSize; + } + /** * Returns the {@link MergeSchedulerConfig} */ diff --git a/core/src/main/java/org/elasticsearch/index/analysis/KeywordMarkerTokenFilterFactory.java b/core/src/main/java/org/elasticsearch/index/analysis/KeywordMarkerTokenFilterFactory.java index 9cb01e75e557a..a4cd4c41c9766 100644 --- a/core/src/main/java/org/elasticsearch/index/analysis/KeywordMarkerTokenFilterFactory.java +++ b/core/src/main/java/org/elasticsearch/index/analysis/KeywordMarkerTokenFilterFactory.java @@ -21,32 +21,70 @@ import org.apache.lucene.analysis.CharArraySet; import org.apache.lucene.analysis.TokenStream; +import org.apache.lucene.analysis.miscellaneous.PatternKeywordMarkerFilter; import org.apache.lucene.analysis.miscellaneous.SetKeywordMarkerFilter; import org.elasticsearch.common.settings.Settings; import org.elasticsearch.env.Environment; import org.elasticsearch.index.IndexSettings; import java.util.Set; +import java.util.regex.Pattern; +/** + * A factory for creating keyword marker token filters that prevent tokens from + * being modified by stemmers. Two types of keyword marker filters are available: + * the {@link SetKeywordMarkerFilter} and the {@link PatternKeywordMarkerFilter}. + * + * The {@link SetKeywordMarkerFilter} uses a set of keywords to denote which tokens + * should be excluded from stemming. This filter is created if the settings include + * {@code keywords}, which contains the list of keywords, or {@code `keywords_path`}, + * which contains a path to a file in the config directory with the keywords. + * + * The {@link PatternKeywordMarkerFilter} uses a regular expression pattern to match + * against tokens that should be excluded from stemming. This filter is created if + * the settings include {@code keywords_pattern}, which contains the regular expression + * to match against. + */ public class KeywordMarkerTokenFilterFactory extends AbstractTokenFilterFactory { private final CharArraySet keywordLookup; + private final Pattern keywordPattern; public KeywordMarkerTokenFilterFactory(IndexSettings indexSettings, Environment env, String name, Settings settings) { super(indexSettings, name, settings); boolean ignoreCase = settings.getAsBooleanLenientForPreEs6Indices(indexSettings.getIndexVersionCreated(), "ignore_case", false, deprecationLogger); - Set rules = Analysis.getWordSet(env, indexSettings.getIndexVersionCreated(), settings, "keywords"); - if (rules == null) { - throw new IllegalArgumentException("keyword filter requires either `keywords` or `keywords_path` to be configured"); + String patternString = settings.get("keywords_pattern"); + if (patternString != null) { + // a pattern for matching keywords is specified, as opposed to a + // set of keyword strings to match against + if (settings.get("keywords") != null || settings.get("keywords_path") != null) { + throw new IllegalArgumentException( + "cannot specify both `keywords_pattern` and `keywords` or `keywords_path`"); + } + keywordPattern = Pattern.compile(patternString); + keywordLookup = null; + } else { + Set rules = Analysis.getWordSet(env, indexSettings.getIndexVersionCreated(), settings, "keywords"); + if (rules == null) { + throw new IllegalArgumentException( + "keyword filter requires either `keywords`, `keywords_path`, " + + "or `keywords_pattern` to be configured"); + } + // a set of keywords (or a path to them) is specified + keywordLookup = new CharArraySet(rules, ignoreCase); + keywordPattern = null; } - keywordLookup = new CharArraySet(rules, ignoreCase); } @Override public TokenStream create(TokenStream tokenStream) { - return new SetKeywordMarkerFilter(tokenStream, keywordLookup); + if (keywordPattern != null) { + return new PatternKeywordMarkerFilter(tokenStream, keywordPattern); + } else { + return new SetKeywordMarkerFilter(tokenStream, keywordLookup); + } } } diff --git a/core/src/main/java/org/elasticsearch/index/engine/ElasticsearchConcurrentMergeScheduler.java b/core/src/main/java/org/elasticsearch/index/engine/ElasticsearchConcurrentMergeScheduler.java index 11cc838660fe6..f4876149cac13 100644 --- a/core/src/main/java/org/elasticsearch/index/engine/ElasticsearchConcurrentMergeScheduler.java +++ b/core/src/main/java/org/elasticsearch/index/engine/ElasticsearchConcurrentMergeScheduler.java @@ -110,10 +110,15 @@ protected void doMerge(IndexWriter writer, MergePolicy.OneMerge merge) throws IO totalMergesNumDocs.inc(totalNumDocs); totalMergesSizeInBytes.inc(totalSizeInBytes); totalMerges.inc(tookMS); - - long stoppedMS = TimeValue.nsecToMSec(merge.rateLimiter.getTotalStoppedNS()); - long throttledMS = TimeValue.nsecToMSec(merge.rateLimiter.getTotalPausedNS()); - + long stoppedMS = TimeValue.nsecToMSec( + merge.getMergeProgress().getPauseTimes().get(MergePolicy.OneMergeProgress.PauseReason.STOPPED) + ); + long throttledMS = TimeValue.nsecToMSec( + merge.getMergeProgress().getPauseTimes().get(MergePolicy.OneMergeProgress.PauseReason.PAUSED) + ); + final Thread thread = Thread.currentThread(); + long totalBytesWritten = OneMergeHelper.getTotalBytesWritten(thread, merge); + double mbPerSec = OneMergeHelper.getMbPerSec(thread, merge); totalMergeStoppedTime.inc(stoppedMS); totalMergeThrottledTime.inc(throttledMS); @@ -125,8 +130,8 @@ protected void doMerge(IndexWriter writer, MergePolicy.OneMerge merge) throws IO totalNumDocs, TimeValue.timeValueMillis(stoppedMS), TimeValue.timeValueMillis(throttledMS), - merge.rateLimiter.getTotalBytesWritten()/1024f/1024f, - merge.rateLimiter.getMBPerSec()); + totalBytesWritten/1024f/1024f, + mbPerSec); if (tookMS > 20000) { // if more than 20 seconds, DEBUG log it logger.debug("{}", message); diff --git a/core/src/main/java/org/elasticsearch/index/fielddata/IndexFieldData.java b/core/src/main/java/org/elasticsearch/index/fielddata/IndexFieldData.java index 97f10932e0fcd..0b63dfb8df80a 100644 --- a/core/src/main/java/org/elasticsearch/index/fielddata/IndexFieldData.java +++ b/core/src/main/java/org/elasticsearch/index/fielddata/IndexFieldData.java @@ -85,9 +85,9 @@ public static MemoryStorageFormat fromString(String string) { FD loadDirect(LeafReaderContext context) throws Exception; /** - * Comparator used for sorting. + * Returns the {@link SortField} to used for sorting. */ - XFieldComparatorSource comparatorSource(@Nullable Object missingValue, MultiValueMode sortMode, Nested nested); + SortField sortField(@Nullable Object missingValue, MultiValueMode sortMode, Nested nested, boolean reverse); /** * Clears any resources associated with this field data. @@ -136,17 +136,17 @@ public DocIdSetIterator innerDocs(LeafReaderContext ctx) throws IOException { } /** Whether missing values should be sorted first. */ - protected final boolean sortMissingFirst(Object missingValue) { + public final boolean sortMissingFirst(Object missingValue) { return "_first".equals(missingValue); } /** Whether missing values should be sorted last, this is the default. */ - protected final boolean sortMissingLast(Object missingValue) { + public final boolean sortMissingLast(Object missingValue) { return missingValue == null || "_last".equals(missingValue); } /** Return the missing object value according to the reduced type of the comparator. */ - protected final Object missingObject(Object missingValue, boolean reversed) { + public final Object missingObject(Object missingValue, boolean reversed) { if (sortMissingFirst(missingValue) || sortMissingLast(missingValue)) { final boolean min = sortMissingFirst(missingValue) ^ reversed; switch (reducedType()) { diff --git a/core/src/main/java/org/elasticsearch/index/fielddata/ordinals/GlobalOrdinalsIndexFieldData.java b/core/src/main/java/org/elasticsearch/index/fielddata/ordinals/GlobalOrdinalsIndexFieldData.java index 3e75620400247..2055208021e80 100644 --- a/core/src/main/java/org/elasticsearch/index/fielddata/ordinals/GlobalOrdinalsIndexFieldData.java +++ b/core/src/main/java/org/elasticsearch/index/fielddata/ordinals/GlobalOrdinalsIndexFieldData.java @@ -20,6 +20,7 @@ import org.apache.lucene.index.DirectoryReader; import org.apache.lucene.index.LeafReaderContext; +import org.apache.lucene.search.SortField; import org.apache.lucene.util.Accountable; import org.elasticsearch.common.Nullable; import org.elasticsearch.index.AbstractIndexComponent; @@ -68,7 +69,7 @@ public String getFieldName() { } @Override - public XFieldComparatorSource comparatorSource(@Nullable Object missingValue, MultiValueMode sortMode, Nested nested) { + public SortField sortField(@Nullable Object missingValue, MultiValueMode sortMode, Nested nested, boolean reverse) { throw new UnsupportedOperationException("no global ordinals sorting yet"); } diff --git a/core/src/main/java/org/elasticsearch/index/fielddata/plain/AbstractGeoPointDVIndexFieldData.java b/core/src/main/java/org/elasticsearch/index/fielddata/plain/AbstractGeoPointDVIndexFieldData.java index b35706961ba12..8db38e59ce02f 100644 --- a/core/src/main/java/org/elasticsearch/index/fielddata/plain/AbstractGeoPointDVIndexFieldData.java +++ b/core/src/main/java/org/elasticsearch/index/fielddata/plain/AbstractGeoPointDVIndexFieldData.java @@ -21,6 +21,7 @@ import org.apache.lucene.index.DocValues; import org.apache.lucene.index.LeafReaderContext; +import org.apache.lucene.search.SortField; import org.elasticsearch.common.Nullable; import org.elasticsearch.index.Index; import org.elasticsearch.index.IndexSettings; @@ -43,7 +44,7 @@ public abstract class AbstractGeoPointDVIndexFieldData extends DocValuesIndexFie } @Override - public final XFieldComparatorSource comparatorSource(@Nullable Object missingValue, MultiValueMode sortMode, Nested nested) { + public SortField sortField(@Nullable Object missingValue, MultiValueMode sortMode, Nested nested, boolean reverse) { throw new IllegalArgumentException("can't sort on geo_point field without using specific sorting feature, like geo_distance"); } diff --git a/core/src/main/java/org/elasticsearch/index/fielddata/plain/AbstractIndexGeoPointFieldData.java b/core/src/main/java/org/elasticsearch/index/fielddata/plain/AbstractIndexGeoPointFieldData.java index c5858afaf22ce..bdf1bbac33235 100644 --- a/core/src/main/java/org/elasticsearch/index/fielddata/plain/AbstractIndexGeoPointFieldData.java +++ b/core/src/main/java/org/elasticsearch/index/fielddata/plain/AbstractIndexGeoPointFieldData.java @@ -19,6 +19,7 @@ package org.elasticsearch.index.fielddata.plain; +import org.apache.lucene.search.SortField; import org.apache.lucene.spatial.geopoint.document.GeoPointField; import org.apache.lucene.util.BytesRef; import org.apache.lucene.util.BytesRefIterator; @@ -28,6 +29,7 @@ import org.elasticsearch.common.geo.GeoPoint; import org.elasticsearch.index.IndexSettings; import org.elasticsearch.index.fielddata.AtomicGeoPointFieldData; +import org.elasticsearch.index.fielddata.IndexFieldData; import org.elasticsearch.index.fielddata.IndexFieldData.XFieldComparatorSource.Nested; import org.elasticsearch.index.fielddata.IndexFieldDataCache; import org.elasticsearch.index.fielddata.IndexGeoPointFieldData; @@ -104,7 +106,7 @@ public GeoPoint next() throws IOException { } @Override - public final XFieldComparatorSource comparatorSource(@Nullable Object missingValue, MultiValueMode sortMode, Nested nested) { + public SortField sortField(@Nullable Object missingValue, MultiValueMode sortMode, Nested nested, boolean reverse) { throw new IllegalArgumentException("can't sort on geo_point field without using specific sorting feature, like geo_distance"); } diff --git a/core/src/main/java/org/elasticsearch/index/fielddata/plain/AbstractIndexOrdinalsFieldData.java b/core/src/main/java/org/elasticsearch/index/fielddata/plain/AbstractIndexOrdinalsFieldData.java index 84fa585b6b238..fef4b3b0a7d70 100644 --- a/core/src/main/java/org/elasticsearch/index/fielddata/plain/AbstractIndexOrdinalsFieldData.java +++ b/core/src/main/java/org/elasticsearch/index/fielddata/plain/AbstractIndexOrdinalsFieldData.java @@ -55,11 +55,6 @@ protected AbstractIndexOrdinalsFieldData(IndexSettings indexSettings, String fie this.minSegmentSize = minSegmentSize; } - @Override - public XFieldComparatorSource comparatorSource(@Nullable Object missingValue, MultiValueMode sortMode, Nested nested) { - return new BytesRefFieldComparatorSource(this, missingValue, sortMode, nested); - } - @Override public IndexOrdinalsFieldData loadGlobal(DirectoryReader indexReader) { if (indexReader.leaves().size() <= 1) { diff --git a/core/src/main/java/org/elasticsearch/index/fielddata/plain/AbstractLatLonPointDVIndexFieldData.java b/core/src/main/java/org/elasticsearch/index/fielddata/plain/AbstractLatLonPointDVIndexFieldData.java index 7ce6eb958870d..3b4ac58e0e81e 100644 --- a/core/src/main/java/org/elasticsearch/index/fielddata/plain/AbstractLatLonPointDVIndexFieldData.java +++ b/core/src/main/java/org/elasticsearch/index/fielddata/plain/AbstractLatLonPointDVIndexFieldData.java @@ -24,6 +24,7 @@ import org.apache.lucene.index.FieldInfo; import org.apache.lucene.index.LeafReader; import org.apache.lucene.index.LeafReaderContext; +import org.apache.lucene.search.SortField; import org.elasticsearch.ElasticsearchException; import org.elasticsearch.common.Nullable; import org.elasticsearch.index.Index; @@ -46,8 +47,7 @@ public abstract class AbstractLatLonPointDVIndexFieldData extends DocValuesIndex } @Override - public final XFieldComparatorSource comparatorSource(@Nullable Object missingValue, MultiValueMode sortMode, - XFieldComparatorSource.Nested nested) { + public SortField sortField(@Nullable Object missingValue, MultiValueMode sortMode, XFieldComparatorSource.Nested nested, boolean reverse) { throw new IllegalArgumentException("can't sort on geo_point field without using specific sorting feature, like geo_distance"); } diff --git a/core/src/main/java/org/elasticsearch/index/fielddata/plain/BinaryDVIndexFieldData.java b/core/src/main/java/org/elasticsearch/index/fielddata/plain/BinaryDVIndexFieldData.java index 586ad1f0d4869..a7e1981766704 100644 --- a/core/src/main/java/org/elasticsearch/index/fielddata/plain/BinaryDVIndexFieldData.java +++ b/core/src/main/java/org/elasticsearch/index/fielddata/plain/BinaryDVIndexFieldData.java @@ -20,9 +20,12 @@ package org.elasticsearch.index.fielddata.plain; import org.apache.lucene.index.LeafReaderContext; +import org.apache.lucene.search.SortField; +import org.apache.lucene.search.SortedSetSortField; +import org.apache.lucene.search.SortedSetSelector; +import org.elasticsearch.common.Nullable; import org.elasticsearch.index.Index; import org.elasticsearch.index.fielddata.IndexFieldData; -import org.elasticsearch.index.fielddata.IndexFieldData.XFieldComparatorSource.Nested; import org.elasticsearch.index.fielddata.fieldcomparator.BytesRefFieldComparatorSource; import org.elasticsearch.search.MultiValueMode; @@ -43,7 +46,21 @@ public BinaryDVAtomicFieldData loadDirect(LeafReaderContext context) throws Exce } @Override - public org.elasticsearch.index.fielddata.IndexFieldData.XFieldComparatorSource comparatorSource(Object missingValue, MultiValueMode sortMode, Nested nested) { - return new BytesRefFieldComparatorSource(this, missingValue, sortMode, nested); + public SortField sortField(@Nullable Object missingValue, MultiValueMode sortMode, XFieldComparatorSource.Nested nested, boolean reverse) { + XFieldComparatorSource source = new BytesRefFieldComparatorSource(this, missingValue, sortMode, nested); + /** + * Check if we can use a simple {@link SortedSetSortField} compatible with index sorting and + * returns a custom sort field otherwise. + */ + if (nested != null || + (sortMode != MultiValueMode.MAX && sortMode != MultiValueMode.MIN) || + (source.sortMissingFirst(missingValue) == false && source.sortMissingLast(missingValue) == false)) { + return new SortField(getFieldName(), source, reverse); + } + SortField sortField = new SortedSetSortField(fieldName, reverse, + sortMode == MultiValueMode.MAX ? SortedSetSelector.Type.MAX : SortedSetSelector.Type.MIN); + sortField.setMissingValue(source.sortMissingLast(missingValue) ^ reverse ? + SortedSetSortField.STRING_LAST : SortedSetSortField.STRING_FIRST); + return sortField; } } diff --git a/core/src/main/java/org/elasticsearch/index/fielddata/plain/BytesBinaryDVIndexFieldData.java b/core/src/main/java/org/elasticsearch/index/fielddata/plain/BytesBinaryDVIndexFieldData.java index bd3cdd71184c5..398093c034b79 100644 --- a/core/src/main/java/org/elasticsearch/index/fielddata/plain/BytesBinaryDVIndexFieldData.java +++ b/core/src/main/java/org/elasticsearch/index/fielddata/plain/BytesBinaryDVIndexFieldData.java @@ -21,6 +21,7 @@ import org.apache.lucene.index.DocValues; import org.apache.lucene.index.LeafReaderContext; +import org.apache.lucene.search.SortField; import org.elasticsearch.common.Nullable; import org.elasticsearch.index.Index; import org.elasticsearch.index.IndexSettings; @@ -41,7 +42,7 @@ public BytesBinaryDVIndexFieldData(Index index, String fieldName) { } @Override - public final XFieldComparatorSource comparatorSource(@Nullable Object missingValue, MultiValueMode sortMode, Nested nested) { + public SortField sortField(@Nullable Object missingValue, MultiValueMode sortMode, Nested nested, boolean reverse) { throw new IllegalArgumentException("can't sort on binary field"); } diff --git a/core/src/main/java/org/elasticsearch/index/fielddata/plain/IndexIndexFieldData.java b/core/src/main/java/org/elasticsearch/index/fielddata/plain/IndexIndexFieldData.java index 348aee910986a..28c0752965cde 100644 --- a/core/src/main/java/org/elasticsearch/index/fielddata/plain/IndexIndexFieldData.java +++ b/core/src/main/java/org/elasticsearch/index/fielddata/plain/IndexIndexFieldData.java @@ -24,17 +24,21 @@ import org.apache.lucene.index.LeafReaderContext; import org.apache.lucene.index.RandomAccessOrds; import org.apache.lucene.index.SortedDocValues; +import org.apache.lucene.search.SortField; import org.apache.lucene.util.Accountable; import org.apache.lucene.util.BytesRef; +import org.elasticsearch.common.Nullable; import org.elasticsearch.index.IndexSettings; import org.elasticsearch.index.fielddata.AtomicOrdinalsFieldData; import org.elasticsearch.index.fielddata.IndexFieldData; import org.elasticsearch.index.fielddata.IndexFieldDataCache; import org.elasticsearch.index.fielddata.IndexOrdinalsFieldData; +import org.elasticsearch.index.fielddata.fieldcomparator.BytesRefFieldComparatorSource; import org.elasticsearch.index.mapper.MappedFieldType; import org.elasticsearch.index.mapper.MapperService; import org.elasticsearch.index.mapper.TextFieldMapper; import org.elasticsearch.indices.breaker.CircuitBreakerService; +import org.elasticsearch.search.MultiValueMode; import java.util.Collection; import java.util.Collections; @@ -124,6 +128,12 @@ public AtomicOrdinalsFieldData loadDirect(LeafReaderContext context) return atomicFieldData; } + @Override + public SortField sortField(@Nullable Object missingValue, MultiValueMode sortMode, XFieldComparatorSource.Nested nested, boolean reverse) { + final XFieldComparatorSource source = new BytesRefFieldComparatorSource(this, missingValue, sortMode, nested); + return new SortField(getFieldName(), source, reverse); + } + @Override public IndexOrdinalsFieldData loadGlobal(DirectoryReader indexReader) { return this; diff --git a/core/src/main/java/org/elasticsearch/index/fielddata/plain/PagedBytesIndexFieldData.java b/core/src/main/java/org/elasticsearch/index/fielddata/plain/PagedBytesIndexFieldData.java index 3b01f01eafbe4..eab98040bbbed 100644 --- a/core/src/main/java/org/elasticsearch/index/fielddata/plain/PagedBytesIndexFieldData.java +++ b/core/src/main/java/org/elasticsearch/index/fielddata/plain/PagedBytesIndexFieldData.java @@ -27,10 +27,12 @@ import org.apache.lucene.index.Terms; import org.apache.lucene.index.TermsEnum; import org.apache.lucene.search.DocIdSetIterator; +import org.apache.lucene.search.SortField; import org.apache.lucene.util.BytesRef; import org.apache.lucene.util.PagedBytes; import org.apache.lucene.util.packed.PackedInts; import org.apache.lucene.util.packed.PackedLongValues; +import org.elasticsearch.common.Nullable; import org.elasticsearch.common.breaker.CircuitBreaker; import org.elasticsearch.index.IndexSettings; import org.elasticsearch.index.fielddata.AtomicOrdinalsFieldData; @@ -38,11 +40,13 @@ import org.elasticsearch.index.fielddata.IndexFieldDataCache; import org.elasticsearch.index.fielddata.IndexOrdinalsFieldData; import org.elasticsearch.index.fielddata.RamAccountingTermsEnum; +import org.elasticsearch.index.fielddata.fieldcomparator.BytesRefFieldComparatorSource; import org.elasticsearch.index.fielddata.ordinals.Ordinals; import org.elasticsearch.index.fielddata.ordinals.OrdinalsBuilder; import org.elasticsearch.index.mapper.MappedFieldType; import org.elasticsearch.index.mapper.MapperService; import org.elasticsearch.indices.breaker.CircuitBreakerService; +import org.elasticsearch.search.MultiValueMode; import java.io.IOException; @@ -74,6 +78,12 @@ public PagedBytesIndexFieldData(IndexSettings indexSettings, String fieldName, super(indexSettings, fieldName, cache, breakerService, minFrequency, maxFrequency, minSegmentSize); } + @Override + public SortField sortField(@Nullable Object missingValue, MultiValueMode sortMode, XFieldComparatorSource.Nested nested, boolean reverse) { + XFieldComparatorSource source = new BytesRefFieldComparatorSource(this, missingValue, sortMode, nested); + return new SortField(getFieldName(), source, reverse); + } + @Override public AtomicOrdinalsFieldData loadDirect(LeafReaderContext context) throws Exception { LeafReader reader = context.reader(); diff --git a/core/src/main/java/org/elasticsearch/index/fielddata/plain/ParentChildIndexFieldData.java b/core/src/main/java/org/elasticsearch/index/fielddata/plain/ParentChildIndexFieldData.java index 71d1e04604b2f..ee451ff023375 100644 --- a/core/src/main/java/org/elasticsearch/index/fielddata/plain/ParentChildIndexFieldData.java +++ b/core/src/main/java/org/elasticsearch/index/fielddata/plain/ParentChildIndexFieldData.java @@ -27,6 +27,7 @@ import org.apache.lucene.index.MultiDocValues; import org.apache.lucene.index.MultiDocValues.OrdinalMap; import org.apache.lucene.index.SortedDocValues; +import org.apache.lucene.search.SortField; import org.apache.lucene.util.Accountable; import org.apache.lucene.util.BytesRef; import org.apache.lucene.util.LongValues; @@ -89,8 +90,9 @@ public ParentChildIndexFieldData(IndexSettings indexSettings, String fieldName, } @Override - public XFieldComparatorSource comparatorSource(@Nullable Object missingValue, MultiValueMode sortMode, Nested nested) { - return new BytesRefFieldComparatorSource(this, missingValue, sortMode, nested); + public SortField sortField(@Nullable Object missingValue, MultiValueMode sortMode, Nested nested, boolean reverse) { + final XFieldComparatorSource source = new BytesRefFieldComparatorSource(this, missingValue, sortMode, nested); + return new SortField(getFieldName(), source, reverse); } @Override @@ -335,7 +337,7 @@ public AtomicParentChildFieldData loadDirect(LeafReaderContext context) throws E } @Override - public XFieldComparatorSource comparatorSource(Object missingValue, MultiValueMode sortMode, Nested nested) { + public SortField sortField(@Nullable Object missingValue, MultiValueMode sortMode, Nested nested, boolean reverse) { throw new UnsupportedOperationException("No sorting on global ords"); } diff --git a/core/src/main/java/org/elasticsearch/index/fielddata/plain/SortedNumericDVIndexFieldData.java b/core/src/main/java/org/elasticsearch/index/fielddata/plain/SortedNumericDVIndexFieldData.java index a19681df33419..82f3de0f72738 100644 --- a/core/src/main/java/org/elasticsearch/index/fielddata/plain/SortedNumericDVIndexFieldData.java +++ b/core/src/main/java/org/elasticsearch/index/fielddata/plain/SortedNumericDVIndexFieldData.java @@ -26,6 +26,9 @@ import org.apache.lucene.index.LeafReaderContext; import org.apache.lucene.index.NumericDocValues; import org.apache.lucene.index.SortedNumericDocValues; +import org.apache.lucene.search.SortField; +import org.apache.lucene.search.SortedNumericSelector; +import org.apache.lucene.search.SortedNumericSortField; import org.apache.lucene.util.Accountable; import org.apache.lucene.util.NumericUtils; import org.elasticsearch.index.Index; @@ -60,17 +63,53 @@ public SortedNumericDVIndexFieldData(Index index, String fieldNames, NumericType } @Override - public org.elasticsearch.index.fielddata.IndexFieldData.XFieldComparatorSource comparatorSource(Object missingValue, MultiValueMode sortMode, Nested nested) { + public SortField sortField(Object missingValue, MultiValueMode sortMode, Nested nested, boolean reverse) { + final XFieldComparatorSource source; switch (numericType) { case HALF_FLOAT: case FLOAT: - return new FloatValuesComparatorSource(this, missingValue, sortMode, nested); + source = new FloatValuesComparatorSource(this, missingValue, sortMode, nested); + break; + case DOUBLE: - return new DoubleValuesComparatorSource(this, missingValue, sortMode, nested); + source = new DoubleValuesComparatorSource(this, missingValue, sortMode, nested); + break; + + default: + assert !numericType.isFloatingPoint(); + source = new LongValuesComparatorSource(this, missingValue, sortMode, nested); + break; + } + + /** + * Check if we can use a simple {@link SortedNumericSortField} compatible with index sorting and + * returns a custom sort field otherwise. + */ + if (nested != null + || (sortMode != MultiValueMode.MAX && sortMode != MultiValueMode.MIN) + || numericType == NumericType.HALF_FLOAT) { + return new SortField(fieldName, source, reverse); + } + + final SortField sortField; + final SortedNumericSelector.Type selectorType = sortMode == MultiValueMode.MAX ? + SortedNumericSelector.Type.MAX : SortedNumericSelector.Type.MIN; + switch (numericType) { + case FLOAT: + sortField = new SortedNumericSortField(fieldName, SortField.Type.FLOAT, reverse, selectorType); + break; + + case DOUBLE: + sortField = new SortedNumericSortField(fieldName, SortField.Type.DOUBLE, reverse, selectorType); + break; + default: assert !numericType.isFloatingPoint(); - return new LongValuesComparatorSource(this, missingValue, sortMode, nested); + sortField = new SortedNumericSortField(fieldName, SortField.Type.LONG, reverse, selectorType); + break; } + sortField.setMissingValue(source.missingObject(missingValue, reverse)); + return sortField; } @Override diff --git a/core/src/main/java/org/elasticsearch/index/fielddata/plain/SortedSetDVOrdinalsIndexFieldData.java b/core/src/main/java/org/elasticsearch/index/fielddata/plain/SortedSetDVOrdinalsIndexFieldData.java index 7fe636f096247..2c59e8559c272 100644 --- a/core/src/main/java/org/elasticsearch/index/fielddata/plain/SortedSetDVOrdinalsIndexFieldData.java +++ b/core/src/main/java/org/elasticsearch/index/fielddata/plain/SortedSetDVOrdinalsIndexFieldData.java @@ -22,10 +22,13 @@ import org.apache.lucene.index.DirectoryReader; import org.apache.lucene.index.LeafReaderContext; import org.apache.lucene.index.RandomAccessOrds; +import org.apache.lucene.search.SortField; +import org.apache.lucene.search.SortedSetSortField; +import org.apache.lucene.search.SortedSetSelector; import org.elasticsearch.ElasticsearchException; +import org.elasticsearch.common.Nullable; import org.elasticsearch.index.IndexSettings; import org.elasticsearch.index.fielddata.AtomicOrdinalsFieldData; -import org.elasticsearch.index.fielddata.IndexFieldData; import org.elasticsearch.index.fielddata.IndexFieldData.XFieldComparatorSource.Nested; import org.elasticsearch.index.fielddata.IndexFieldDataCache; import org.elasticsearch.index.fielddata.IndexOrdinalsFieldData; @@ -55,8 +58,22 @@ public SortedSetDVOrdinalsIndexFieldData(IndexSettings indexSettings, IndexField } @Override - public org.elasticsearch.index.fielddata.IndexFieldData.XFieldComparatorSource comparatorSource(Object missingValue, MultiValueMode sortMode, Nested nested) { - return new BytesRefFieldComparatorSource((IndexFieldData) this, missingValue, sortMode, nested); + public SortField sortField(@Nullable Object missingValue, MultiValueMode sortMode, Nested nested, boolean reverse) { + XFieldComparatorSource source = new BytesRefFieldComparatorSource(this, missingValue, sortMode, nested); + /** + * Check if we can use a simple {@link SortedSetSortField} compatible with index sorting and + * returns a custom sort field otherwise. + */ + if (nested != null || + (sortMode != MultiValueMode.MAX && sortMode != MultiValueMode.MIN) || + (source.sortMissingLast(missingValue) == false && source.sortMissingFirst(missingValue) == false)) { + return new SortField(getFieldName(), source, reverse); + } + SortField sortField = new SortedSetSortField(fieldName, reverse, + sortMode == MultiValueMode.MAX ? SortedSetSelector.Type.MAX : SortedSetSelector.Type.MIN); + sortField.setMissingValue(source.sortMissingLast(missingValue) ^ reverse ? + SortedSetSortField.STRING_LAST : SortedSetSortField.STRING_FIRST); + return sortField; } @Override diff --git a/core/src/main/java/org/elasticsearch/index/mapper/DocumentMapper.java b/core/src/main/java/org/elasticsearch/index/mapper/DocumentMapper.java index a8c74101bff45..8b7355dca4b4a 100644 --- a/core/src/main/java/org/elasticsearch/index/mapper/DocumentMapper.java +++ b/core/src/main/java/org/elasticsearch/index/mapper/DocumentMapper.java @@ -257,11 +257,6 @@ public Map objectMappers() { return this.objectMappers; } - // TODO this method looks like it is only used in tests... - public ParsedDocument parse(String index, String type, String id, BytesReference source) throws MapperParsingException { - return parse(SourceToParse.source(index, type, id, source, XContentType.JSON)); - } - public ParsedDocument parse(SourceToParse source) throws MapperParsingException { return documentParser.parseDocument(source); } diff --git a/core/src/main/java/org/elasticsearch/index/mapper/FieldMappers.java b/core/src/main/java/org/elasticsearch/index/mapper/FieldMappers.java deleted file mode 100644 index 6e1732c94eac7..0000000000000 --- a/core/src/main/java/org/elasticsearch/index/mapper/FieldMappers.java +++ /dev/null @@ -1,89 +0,0 @@ -/* - * Licensed to Elasticsearch under one or more contributor - * license agreements. See the NOTICE file distributed with - * this work for additional information regarding copyright - * ownership. Elasticsearch licenses this file to you under - * the Apache License, Version 2.0 (the "License"); you may - * not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, - * software distributed under the License is distributed on an - * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY - * KIND, either express or implied. See the License for the - * specific language governing permissions and limitations - * under the License. - */ - -package org.elasticsearch.index.mapper; - -import java.util.ArrayList; -import java.util.Arrays; -import java.util.Iterator; -import java.util.List; - -/** - * A holder for several {@link FieldMapper}. - */ -public class FieldMappers implements Iterable { - - private final FieldMapper[] fieldMappers; - private final List fieldMappersAsList; - - public FieldMappers() { - this.fieldMappers = new FieldMapper[0]; - this.fieldMappersAsList = Arrays.asList(fieldMappers); - } - - public FieldMappers(FieldMapper fieldMapper) { - this.fieldMappers = new FieldMapper[]{fieldMapper}; - this.fieldMappersAsList = Arrays.asList(this.fieldMappers); - } - - private FieldMappers(FieldMapper[] fieldMappers) { - this.fieldMappers = fieldMappers; - this.fieldMappersAsList = Arrays.asList(this.fieldMappers); - } - - public FieldMapper mapper() { - if (fieldMappers.length == 0) { - return null; - } - return fieldMappers[0]; - } - - public boolean isEmpty() { - return fieldMappers.length == 0; - } - - public List mappers() { - return this.fieldMappersAsList; - } - - @Override - public Iterator iterator() { - return fieldMappersAsList.iterator(); - } - - /** - * Concats and returns a new {@link FieldMappers}. - */ - public FieldMappers concat(FieldMapper mapper) { - FieldMapper[] newMappers = new FieldMapper[fieldMappers.length + 1]; - System.arraycopy(fieldMappers, 0, newMappers, 0, fieldMappers.length); - newMappers[fieldMappers.length] = mapper; - return new FieldMappers(newMappers); - } - - public FieldMappers remove(FieldMapper mapper) { - ArrayList list = new ArrayList<>(fieldMappers.length); - for (FieldMapper fieldMapper : fieldMappers) { - if (!fieldMapper.equals(mapper)) { // identify equality - list.add(fieldMapper); - } - } - return new FieldMappers(list.toArray(new FieldMapper[list.size()])); - } -} diff --git a/core/src/main/java/org/elasticsearch/index/mapper/MappedFieldType.java b/core/src/main/java/org/elasticsearch/index/mapper/MappedFieldType.java index 93f22d42a8080..55c2e4cb3c698 100644 --- a/core/src/main/java/org/elasticsearch/index/mapper/MappedFieldType.java +++ b/core/src/main/java/org/elasticsearch/index/mapper/MappedFieldType.java @@ -313,14 +313,14 @@ public Object valueForDisplay(Object value) { /** Returns true if the field is searchable. * */ - protected boolean isSearchable() { + public boolean isSearchable() { return indexOptions() != IndexOptions.NONE; } /** Returns true if the field is aggregatable. * */ - protected boolean isAggregatable() { + public boolean isAggregatable() { try { fielddataBuilder(); return true; diff --git a/core/src/main/java/org/elasticsearch/index/mapper/MapperService.java b/core/src/main/java/org/elasticsearch/index/mapper/MapperService.java index aeab1e5c0cf23..68983bcf63ff4 100755 --- a/core/src/main/java/org/elasticsearch/index/mapper/MapperService.java +++ b/core/src/main/java/org/elasticsearch/index/mapper/MapperService.java @@ -110,7 +110,7 @@ public enum MergeReason { private volatile Map mappers = emptyMap(); private volatile FieldTypeLookup fieldTypes; - private volatile Map fullPathObjectMappers = new HashMap<>(); + private volatile Map fullPathObjectMappers = emptyMap(); private boolean hasNested = false; // updated dynamically to true when a nested object is added private boolean allEnabled = false; // updated dynamically to true when _all is enabled @@ -394,6 +394,7 @@ private synchronized Map internalMerge(@Nullable Documen for (ObjectMapper objectMapper : objectMappers) { if (fullPathObjectMappers == this.fullPathObjectMappers) { + // first time through the loops fullPathObjectMappers = new HashMap<>(this.fullPathObjectMappers); } fullPathObjectMappers.put(objectMapper.fullPath(), objectMapper); @@ -414,6 +415,7 @@ private synchronized Map internalMerge(@Nullable Documen if (oldMapper == null && newMapper.parentFieldMapper().active()) { if (parentTypes == this.parentTypes) { + // first time through the loop parentTypes = new HashSet<>(this.parentTypes); } parentTypes.add(mapper.parentFieldMapper().type()); @@ -456,8 +458,15 @@ private synchronized Map internalMerge(@Nullable Documen // make structures immutable mappers = Collections.unmodifiableMap(mappers); results = Collections.unmodifiableMap(results); - parentTypes = Collections.unmodifiableSet(parentTypes); - fullPathObjectMappers = Collections.unmodifiableMap(fullPathObjectMappers); + + // only need to immutably rewrap these if the previous reference was changed. + // if not then they are already implicitly immutable. + if (fullPathObjectMappers != this.fullPathObjectMappers) { + fullPathObjectMappers = Collections.unmodifiableMap(fullPathObjectMappers); + } + if (parentTypes != this.parentTypes) { + parentTypes = Collections.unmodifiableSet(parentTypes); + } // commit the change if (defaultMappingSource != null) { diff --git a/core/src/main/java/org/elasticsearch/index/mapper/RangeFieldMapper.java b/core/src/main/java/org/elasticsearch/index/mapper/RangeFieldMapper.java index f270ae74ca999..9a271916ac19f 100644 --- a/core/src/main/java/org/elasticsearch/index/mapper/RangeFieldMapper.java +++ b/core/src/main/java/org/elasticsearch/index/mapper/RangeFieldMapper.java @@ -19,10 +19,10 @@ package org.elasticsearch.index.mapper; import org.apache.lucene.document.Field; -import org.apache.lucene.document.DoubleRangeField; -import org.apache.lucene.document.FloatRangeField; -import org.apache.lucene.document.IntRangeField; -import org.apache.lucene.document.LongRangeField; +import org.apache.lucene.document.DoubleRange; +import org.apache.lucene.document.FloatRange; +import org.apache.lucene.document.IntRange; +import org.apache.lucene.document.LongRange; import org.apache.lucene.document.StoredField; import org.apache.lucene.index.IndexOptions; import org.apache.lucene.index.IndexableField; @@ -430,7 +430,7 @@ public enum RangeType { DATE("date_range", NumberType.LONG) { @Override public Field getRangeField(String name, Range r) { - return new LongRangeField(name, new long[] {r.from.longValue()}, new long[] {r.to.longValue()}); + return new LongRange(name, new long[] {r.from.longValue()}, new long[] {r.to.longValue()}); } private Number parse(DateMathParser dateMathParser, String dateStr) { return dateMathParser.parse(dateStr, () -> {throw new IllegalArgumentException("now is not used at indexing time");}); @@ -516,7 +516,7 @@ public Float nextDown(Number value) { } @Override public Field getRangeField(String name, Range r) { - return new FloatRangeField(name, new float[] {r.from.floatValue()}, new float[] {r.to.floatValue()}); + return new FloatRange(name, new float[] {r.from.floatValue()}, new float[] {r.to.floatValue()}); } @Override public byte[] getBytes(Range r) { @@ -527,19 +527,19 @@ public byte[] getBytes(Range r) { } @Override public Query withinQuery(String field, Number from, Number to, boolean includeFrom, boolean includeTo) { - return FloatRangeField.newWithinQuery(field, + return FloatRange.newWithinQuery(field, new float[] {includeFrom ? (Float)from : Math.nextUp((Float)from)}, new float[] {includeTo ? (Float)to : Math.nextDown((Float)to)}); } @Override public Query containsQuery(String field, Number from, Number to, boolean includeFrom, boolean includeTo) { - return FloatRangeField.newContainsQuery(field, + return FloatRange.newContainsQuery(field, new float[] {includeFrom ? (Float)from : Math.nextUp((Float)from)}, new float[] {includeTo ? (Float)to : Math.nextDown((Float)to)}); } @Override public Query intersectsQuery(String field, Number from, Number to, boolean includeFrom, boolean includeTo) { - return FloatRangeField.newIntersectsQuery(field, + return FloatRange.newIntersectsQuery(field, new float[] {includeFrom ? (Float)from : Math.nextUp((Float)from)}, new float[] {includeTo ? (Float)to : Math.nextDown((Float)to)}); } @@ -563,7 +563,7 @@ public Double nextDown(Number value) { } @Override public Field getRangeField(String name, Range r) { - return new DoubleRangeField(name, new double[] {r.from.doubleValue()}, new double[] {r.to.doubleValue()}); + return new DoubleRange(name, new double[] {r.from.doubleValue()}, new double[] {r.to.doubleValue()}); } @Override public byte[] getBytes(Range r) { @@ -574,19 +574,19 @@ public byte[] getBytes(Range r) { } @Override public Query withinQuery(String field, Number from, Number to, boolean includeFrom, boolean includeTo) { - return DoubleRangeField.newWithinQuery(field, + return DoubleRange.newWithinQuery(field, new double[] {includeFrom ? (Double)from : Math.nextUp((Double)from)}, new double[] {includeTo ? (Double)to : Math.nextDown((Double)to)}); } @Override public Query containsQuery(String field, Number from, Number to, boolean includeFrom, boolean includeTo) { - return DoubleRangeField.newContainsQuery(field, + return DoubleRange.newContainsQuery(field, new double[] {includeFrom ? (Double)from : Math.nextUp((Double)from)}, new double[] {includeTo ? (Double)to : Math.nextDown((Double)to)}); } @Override public Query intersectsQuery(String field, Number from, Number to, boolean includeFrom, boolean includeTo) { - return DoubleRangeField.newIntersectsQuery(field, + return DoubleRange.newIntersectsQuery(field, new double[] {includeFrom ? (Double)from : Math.nextUp((Double)from)}, new double[] {includeTo ? (Double)to : Math.nextDown((Double)to)}); } @@ -612,7 +612,7 @@ public Integer nextDown(Number value) { } @Override public Field getRangeField(String name, Range r) { - return new IntRangeField(name, new int[] {r.from.intValue()}, new int[] {r.to.intValue()}); + return new IntRange(name, new int[] {r.from.intValue()}, new int[] {r.to.intValue()}); } @Override public byte[] getBytes(Range r) { @@ -623,17 +623,17 @@ public byte[] getBytes(Range r) { } @Override public Query withinQuery(String field, Number from, Number to, boolean includeFrom, boolean includeTo) { - return IntRangeField.newWithinQuery(field, new int[] {(Integer)from + (includeFrom ? 0 : 1)}, + return IntRange.newWithinQuery(field, new int[] {(Integer)from + (includeFrom ? 0 : 1)}, new int[] {(Integer)to - (includeTo ? 0 : 1)}); } @Override public Query containsQuery(String field, Number from, Number to, boolean includeFrom, boolean includeTo) { - return IntRangeField.newContainsQuery(field, new int[] {(Integer)from + (includeFrom ? 0 : 1)}, + return IntRange.newContainsQuery(field, new int[] {(Integer)from + (includeFrom ? 0 : 1)}, new int[] {(Integer)to - (includeTo ? 0 : 1)}); } @Override public Query intersectsQuery(String field, Number from, Number to, boolean includeFrom, boolean includeTo) { - return IntRangeField.newIntersectsQuery(field, new int[] {(Integer)from + (includeFrom ? 0 : 1)}, + return IntRange.newIntersectsQuery(field, new int[] {(Integer)from + (includeFrom ? 0 : 1)}, new int[] {(Integer)to - (includeTo ? 0 : 1)}); } }, @@ -656,7 +656,7 @@ public Long nextDown(Number value) { } @Override public Field getRangeField(String name, Range r) { - return new LongRangeField(name, new long[] {r.from.longValue()}, new long[] {r.to.longValue()}); + return new LongRange(name, new long[] {r.from.longValue()}, new long[] {r.to.longValue()}); } @Override public byte[] getBytes(Range r) { @@ -669,17 +669,17 @@ public byte[] getBytes(Range r) { } @Override public Query withinQuery(String field, Number from, Number to, boolean includeFrom, boolean includeTo) { - return LongRangeField.newWithinQuery(field, new long[] {(Long)from + (includeFrom ? 0 : 1)}, + return LongRange.newWithinQuery(field, new long[] {(Long)from + (includeFrom ? 0 : 1)}, new long[] {(Long)to - (includeTo ? 0 : 1)}); } @Override public Query containsQuery(String field, Number from, Number to, boolean includeFrom, boolean includeTo) { - return LongRangeField.newContainsQuery(field, new long[] {(Long)from + (includeFrom ? 0 : 1)}, + return LongRange.newContainsQuery(field, new long[] {(Long)from + (includeFrom ? 0 : 1)}, new long[] {(Long)to - (includeTo ? 0 : 1)}); } @Override public Query intersectsQuery(String field, Number from, Number to, boolean includeFrom, boolean includeTo) { - return LongRangeField.newIntersectsQuery(field, new long[] {(Long)from + (includeFrom ? 0 : 1)}, + return LongRange.newIntersectsQuery(field, new long[] {(Long)from + (includeFrom ? 0 : 1)}, new long[] {(Long)to - (includeTo ? 0 : 1)}); } }; diff --git a/core/src/main/java/org/elasticsearch/index/mapper/ScaledFloatFieldMapper.java b/core/src/main/java/org/elasticsearch/index/mapper/ScaledFloatFieldMapper.java index 62ff8bdede08e..226ab905a2790 100644 --- a/core/src/main/java/org/elasticsearch/index/mapper/ScaledFloatFieldMapper.java +++ b/core/src/main/java/org/elasticsearch/index/mapper/ScaledFloatFieldMapper.java @@ -28,8 +28,10 @@ import org.apache.lucene.index.SortedNumericDocValues; import org.apache.lucene.search.BoostQuery; import org.apache.lucene.search.Query; +import org.apache.lucene.search.SortField; import org.elasticsearch.action.fieldstats.FieldStats; import org.elasticsearch.common.Explicit; +import org.elasticsearch.common.Nullable; import org.elasticsearch.common.settings.Setting; import org.elasticsearch.common.settings.Settings; import org.elasticsearch.common.xcontent.XContentBuilder; @@ -492,9 +494,9 @@ public AtomicNumericFieldData loadDirect(LeafReaderContext context) throws Excep } @Override - public org.elasticsearch.index.fielddata.IndexFieldData.XFieldComparatorSource comparatorSource(Object missingValue, - MultiValueMode sortMode, Nested nested) { - return new DoubleValuesComparatorSource(this, missingValue, sortMode, nested); + public SortField sortField(@Nullable Object missingValue, MultiValueMode sortMode, Nested nested, boolean reverse) { + final XFieldComparatorSource source = new DoubleValuesComparatorSource(this, missingValue, sortMode, nested); + return new SortField(getFieldName(), source, reverse); } @Override diff --git a/core/src/main/java/org/elasticsearch/index/mapper/TextFieldMapper.java b/core/src/main/java/org/elasticsearch/index/mapper/TextFieldMapper.java index b41695eb8bd49..6b2298ba62c50 100644 --- a/core/src/main/java/org/elasticsearch/index/mapper/TextFieldMapper.java +++ b/core/src/main/java/org/elasticsearch/index/mapper/TextFieldMapper.java @@ -287,7 +287,7 @@ public IndexFieldData.Builder fielddataBuilder() { if (fielddata == false) { throw new IllegalArgumentException("Fielddata is disabled on text fields by default. Set fielddata=true on [" + name() + "] in order to load fielddata in memory by uninverting the inverted index. Note that this can however " - + "use significant memory."); + + "use significant memory. Alternatively use a keyword field instead."); } return new PagedBytesIndexFieldData.Builder(fielddataMinFrequency, fielddataMaxFrequency, fielddataMinSegmentSize); } diff --git a/core/src/main/java/org/elasticsearch/index/query/FuzzyQueryBuilder.java b/core/src/main/java/org/elasticsearch/index/query/FuzzyQueryBuilder.java index 5da9edcd2a5b4..c6f153f319c6e 100644 --- a/core/src/main/java/org/elasticsearch/index/query/FuzzyQueryBuilder.java +++ b/core/src/main/java/org/elasticsearch/index/query/FuzzyQueryBuilder.java @@ -275,7 +275,7 @@ public static FuzzyQueryBuilder fromXContent(QueryParseContext parseContext) thr while ((token = parser.nextToken()) != XContentParser.Token.END_OBJECT) { if (token == XContentParser.Token.FIELD_NAME) { currentFieldName = parser.currentName(); - } else { + } else if (token.isValue()) { if (TERM_FIELD.match(currentFieldName)) { value = parser.objectBytes(); } else if (VALUE_FIELD.match(currentFieldName)) { @@ -298,6 +298,9 @@ public static FuzzyQueryBuilder fromXContent(QueryParseContext parseContext) thr throw new ParsingException(parser.getTokenLocation(), "[fuzzy] query does not support [" + currentFieldName + "]"); } + } else { + throw new ParsingException(parser.getTokenLocation(), + "[" + NAME + "] unexpected token [" + token + "] after [" + currentFieldName + "]"); } } } else { diff --git a/core/src/main/java/org/elasticsearch/index/shard/IndexShard.java b/core/src/main/java/org/elasticsearch/index/shard/IndexShard.java index 0e6054deccd0f..32d3d4d4bf8ee 100644 --- a/core/src/main/java/org/elasticsearch/index/shard/IndexShard.java +++ b/core/src/main/java/org/elasticsearch/index/shard/IndexShard.java @@ -771,27 +771,44 @@ public Engine.SyncedFlushResult syncFlush(String syncId, Engine.CommitId expecte return engine.syncFlush(syncId, expectedCommitId); } - public Engine.CommitId flush(FlushRequest request) throws ElasticsearchException { - boolean waitIfOngoing = request.waitIfOngoing(); - boolean force = request.force(); - if (logger.isTraceEnabled()) { - logger.trace("flush with {}", request); - } - // we allows flush while recovering, since we allow for operations to happen - // while recovering, and we want to keep the translog at bay (up to deletes, which - // we don't gc). Yet, we don't use flush internally to clear deletes and flush the indexwriter since - // we use #writeIndexingBuffer for this now. + /** + * Executes the given flush request against the engine. + * + * @param request the flush request + * @return the commit ID + */ + public Engine.CommitId flush(FlushRequest request) { + final boolean waitIfOngoing = request.waitIfOngoing(); + final boolean force = request.force(); + logger.trace("flush with {}", request); + /* + * We allow flushes while recovery since we allow operations to happen while recovering and + * we want to keep the translog under control (up to deletes, which we do not GC). Yet, we + * do not use flush internally to clear deletes and flush the index writer since we use + * Engine#writeIndexingBuffer for this now. + */ verifyNotClosed(); - Engine engine = getEngine(); + final Engine engine = getEngine(); if (engine.isRecovering()) { - throw new IllegalIndexShardStateException(shardId(), state, "flush is only allowed if the engine is not recovery" + - " from translog"); + throw new IllegalIndexShardStateException( + shardId(), + state, + "flush is only allowed if the engine is not recovery from translog"); } - long time = System.nanoTime(); - Engine.CommitId commitId = engine.flush(force, waitIfOngoing); + final long time = System.nanoTime(); + final Engine.CommitId commitId = engine.flush(force, waitIfOngoing); flushMetric.inc(System.nanoTime() - time); return commitId; + } + /** + * Rolls the tranlog generation. + * + * @throws IOException if any file operations on the translog throw an I/O exception + */ + private void rollTranslogGeneration() throws IOException { + final Engine engine = getEngine(); + engine.getTranslog().rollGeneration(); } public void forceMerge(ForceMergeRequest forceMerge) throws IOException { @@ -1256,17 +1273,39 @@ public boolean restoreFromRepository(Repository repository) { } /** - * Returns true iff this shard needs to be flushed due to too many translog operation or a too large transaction log. - * Otherwise false. + * Tests whether or not the translog should be flushed. This test is based on the current size + * of the translog comparted to the configured flush threshold size. + * + * @return {@code true} if the translog should be flushed */ boolean shouldFlush() { - Engine engine = getEngineOrNull(); + final Engine engine = getEngineOrNull(); if (engine != null) { try { - Translog translog = engine.getTranslog(); - return translog.sizeInBytes() > indexSettings.getFlushThresholdSize().getBytes(); - } catch (AlreadyClosedException ex) { - // that's fine we are already close - no need to flush + final Translog translog = engine.getTranslog(); + return translog.shouldFlush(); + } catch (final AlreadyClosedException e) { + // we are already closed, no need to flush or roll + } + } + return false; + } + + /** + * Tests whether or not the translog generation should be rolled to a new generation. This test + * is based on the size of the current generation compared to the configured generation + * threshold size. + * + * @return {@code true} if the current generation should be rolled to a new generation + */ + boolean shouldRollTranslogGeneration() { + final Engine engine = getEngineOrNull(); + if (engine != null) { + try { + final Translog translog = engine.getTranslog(); + return translog.shouldRollGeneration(); + } catch (final AlreadyClosedException e) { + // we are already closed, no need to flush or roll } } return false; @@ -1810,28 +1849,31 @@ public Translog.Durability getTranslogDurability() { return indexSettings.getTranslogDurability(); } - private final AtomicBoolean asyncFlushRunning = new AtomicBoolean(); + // we can not protect with a lock since we "release" on a different thread + private final AtomicBoolean flushOrRollRunning = new AtomicBoolean(); /** - * Schedules a flush if needed but won't schedule more than one flush concurrently. The flush will be executed on the - * Flush thread-pool asynchronously. - * - * @return true if a new flush is scheduled otherwise false. + * Schedules a flush or translog generation roll if needed but will not schedule more than one + * concurrently. The operation will be executed asynchronously on the flush thread pool. */ - public boolean maybeFlush() { - if (shouldFlush()) { - if (asyncFlushRunning.compareAndSet(false, true)) { // we can't use a lock here since we "release" in a different thread - if (shouldFlush() == false) { - // we have to check again since otherwise there is a race when a thread passes - // the first shouldFlush() check next to another thread which flushes fast enough - // to finish before the current thread could flip the asyncFlushRunning flag. - // in that situation we have an extra unexpected flush. - asyncFlushRunning.compareAndSet(true, false); - } else { + public void afterWriteOperation() { + if (shouldFlush() || shouldRollTranslogGeneration()) { + if (flushOrRollRunning.compareAndSet(false, true)) { + /* + * We have to check again since otherwise there is a race when a thread passes the + * first check next to another thread which performs the operation quickly enough to + * finish before the current thread could flip the flag. In that situation, we have + * an extra operation. + * + * Additionally, a flush implicitly executes a translog generation roll so if we + * execute a flush then we do not need to check if we should roll the translog + * generation. + */ + if (shouldFlush()) { logger.debug("submitting async flush request"); - final AbstractRunnable abstractRunnable = new AbstractRunnable() { + final AbstractRunnable flush = new AbstractRunnable() { @Override - public void onFailure(Exception e) { + public void onFailure(final Exception e) { if (state != IndexShardState.CLOSED) { logger.warn("failed to flush index", e); } @@ -1844,16 +1886,38 @@ protected void doRun() throws Exception { @Override public void onAfter() { - asyncFlushRunning.compareAndSet(true, false); - maybeFlush(); // fire a flush up again if we have filled up the limits such that shouldFlush() returns true + flushOrRollRunning.compareAndSet(true, false); + afterWriteOperation(); } }; - threadPool.executor(ThreadPool.Names.FLUSH).execute(abstractRunnable); - return true; + threadPool.executor(ThreadPool.Names.FLUSH).execute(flush); + } else if (shouldRollTranslogGeneration()) { + logger.debug("submitting async roll translog generation request"); + final AbstractRunnable roll = new AbstractRunnable() { + @Override + public void onFailure(final Exception e) { + if (state != IndexShardState.CLOSED) { + logger.warn("failed to roll translog generation", e); + } + } + + @Override + protected void doRun() throws Exception { + rollTranslogGeneration(); + } + + @Override + public void onAfter() { + flushOrRollRunning.compareAndSet(true, false); + afterWriteOperation(); + } + }; + threadPool.executor(ThreadPool.Names.FLUSH).execute(roll); + } else { + flushOrRollRunning.compareAndSet(true, false); } } } - return false; } /** diff --git a/core/src/main/java/org/elasticsearch/index/similarity/BooleanSimilarityProvider.java b/core/src/main/java/org/elasticsearch/index/similarity/BooleanSimilarityProvider.java new file mode 100644 index 0000000000000..e5db045f3716f --- /dev/null +++ b/core/src/main/java/org/elasticsearch/index/similarity/BooleanSimilarityProvider.java @@ -0,0 +1,48 @@ +/* + * Licensed to Elasticsearch under one or more contributor + * license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright + * ownership. Elasticsearch licenses this file to you under + * the Apache License, Version 2.0 (the "License"); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.elasticsearch.index.similarity; + +import org.apache.lucene.search.similarities.BooleanSimilarity; +import org.elasticsearch.common.settings.Settings; + +/** + * {@link SimilarityProvider} for the {@link BooleanSimilarity}, + * which is a simple similarity that gives terms a score equal + * to their query boost only. This is useful in situations where + * a field does not need to be scored by a full-text ranking + * algorithm, but rather all that matters is whether the query + * terms matched or not. + */ +public class BooleanSimilarityProvider extends AbstractSimilarityProvider { + + private final BooleanSimilarity similarity = new BooleanSimilarity(); + + public BooleanSimilarityProvider(String name, Settings settings, Settings indexSettings) { + super(name); + } + + /** + * {@inheritDoc} + */ + @Override + public BooleanSimilarity get() { + return similarity; + } +} diff --git a/core/src/main/java/org/elasticsearch/index/similarity/SimilarityService.java b/core/src/main/java/org/elasticsearch/index/similarity/SimilarityService.java index 54aa940a71f4a..e8203af852317 100644 --- a/core/src/main/java/org/elasticsearch/index/similarity/SimilarityService.java +++ b/core/src/main/java/org/elasticsearch/index/similarity/SimilarityService.java @@ -47,6 +47,7 @@ public final class SimilarityService extends AbstractIndexComponent { Map> buildIn = new HashMap<>(); defaults.put("classic", ClassicSimilarityProvider::new); defaults.put("BM25", BM25SimilarityProvider::new); + defaults.put("boolean", BooleanSimilarityProvider::new); buildIn.put("classic", ClassicSimilarityProvider::new); buildIn.put("BM25", BM25SimilarityProvider::new); buildIn.put("DFR", DFRSimilarityProvider::new); diff --git a/core/src/main/java/org/elasticsearch/index/store/Store.java b/core/src/main/java/org/elasticsearch/index/store/Store.java index 9c7c95b72118c..51516c3ddedaa 100644 --- a/core/src/main/java/org/elasticsearch/index/store/Store.java +++ b/core/src/main/java/org/elasticsearch/index/store/Store.java @@ -263,7 +263,6 @@ public MetadataSnapshot getMetadata(IndexCommit commit) throws IOException { } } - /** * Renames all the given files from the key of the map to the * value of the map. All successfully renamed files are removed from the map in-place. @@ -389,7 +388,6 @@ private void closeInternal() { } } - /** * Reads a MetadataSnapshot from the given index locations or returns an empty snapshot if it can't be read. * @@ -597,7 +595,7 @@ private static void failIfCorrupted(Directory directory, ShardId shardId) throws /** * This method deletes every file in this store that is not contained in the given source meta data or is a * legacy checksum file. After the delete it pulls the latest metadata snapshot from the store and compares it - * to the given snapshot. If the snapshots are inconsistent an illegal state exception is thrown + * to the given snapshot. If the snapshots are inconsistent an illegal state exception is thrown. * * @param reason the reason for this cleanup operation logged for each deleted file * @param sourceMetaData the metadata used for cleanup. all files in this metadata should be kept around. @@ -641,9 +639,9 @@ final void verifyAfterCleanup(MetadataSnapshot sourceMetaData, MetadataSnapshot for (StoreFileMetaData meta : recoveryDiff.different) { StoreFileMetaData local = targetMetaData.get(meta.name()); StoreFileMetaData remote = sourceMetaData.get(meta.name()); - // if we have different files the they must have no checksums otherwise something went wrong during recovery. - // we have that problem when we have an empty index is only a segments_1 file then we can't tell if it's a Lucene 4.8 file - // and therefore no checksum. That isn't much of a problem since we simply copy it over anyway but those files come out as + // if we have different files then they must have no checksums; otherwise something went wrong during recovery. + // we have that problem when we have an empty index is only a segments_1 file so we can't tell if it's a Lucene 4.8 file + // and therefore no checksum is included. That isn't a problem since we simply copy it over anyway but those files come out as // different in the diff. That's why we have to double check here again if the rest of it matches. // all is fine this file is just part of a commit or a segment that is different @@ -676,7 +674,6 @@ static final class StoreDirectory extends FilterDirectory { this.deletesLogger = deletesLogger; } - @Override public void close() throws IOException { assert false : "Nobody should close this directory except of the Store itself"; diff --git a/core/src/main/java/org/elasticsearch/index/translog/Translog.java b/core/src/main/java/org/elasticsearch/index/translog/Translog.java index ee4d0a4391a23..d9a8cc408f822 100644 --- a/core/src/main/java/org/elasticsearch/index/translog/Translog.java +++ b/core/src/main/java/org/elasticsearch/index/translog/Translog.java @@ -34,6 +34,7 @@ import org.elasticsearch.common.io.stream.StreamInput; import org.elasticsearch.common.io.stream.StreamOutput; import org.elasticsearch.common.io.stream.Writeable; +import org.elasticsearch.common.lease.Releasable; import org.elasticsearch.common.lease.Releasables; import org.elasticsearch.common.lucene.uid.Versions; import org.elasticsearch.common.util.BigArrays; @@ -55,6 +56,7 @@ import java.nio.file.StandardOpenOption; import java.util.ArrayList; import java.util.List; +import java.util.Locale; import java.util.Optional; import java.util.Set; import java.util.concurrent.atomic.AtomicBoolean; @@ -329,7 +331,7 @@ public Path location() { * Returns the generation of the current transaction log. */ public long currentFileGeneration() { - try (ReleasableLock lock = readLock.acquire()) { + try (ReleasableLock ignored = readLock.acquire()) { return current.getGeneration(); } } @@ -409,10 +411,9 @@ TranslogWriter createWriter(long fileGeneration) throws IOException { public Location add(final Operation operation) throws IOException { final ReleasableBytesStreamOutput out = new ReleasableBytesStreamOutput(bigArrays); try { - final BufferedChecksumStreamOutput checksumStreamOutput = new BufferedChecksumStreamOutput(out); final long start = out.position(); out.skip(Integer.BYTES); - writeOperationNoSize(checksumStreamOutput, operation); + writeOperationNoSize(new BufferedChecksumStreamOutput(out), operation); final long end = out.position(); final int operationSize = (int) (end - Integer.BYTES - start); out.seek(start); @@ -442,6 +443,30 @@ public Location add(final Operation operation) throws IOException { } } + /** + * Tests whether or not the translog should be flushed. This test is based on the current size + * of the translog comparted to the configured flush threshold size. + * + * @return {@code true} if the translog should be flushed + */ + public boolean shouldFlush() { + final long size = this.sizeInBytes(); + return size > this.indexSettings.getFlushThresholdSize().getBytes(); + } + + /** + * Tests whether or not the translog generation should be rolled to a new generation. This test + * is based on the size of the current generation compared to the configured generation + * threshold size. + * + * @return {@code true} if the current generation should be rolled to a new generation + */ + public boolean shouldRollGeneration() { + final long size = this.current.sizeInBytes(); + final long threshold = this.indexSettings.getGenerationThresholdSize().getBytes(); + return size > threshold; + } + /** * The a {@linkplain Location} that will sort after the {@linkplain Location} returned by the last write but before any locations which * can be returned by the next write. @@ -1322,44 +1347,63 @@ public static void writeOperationNoSize(BufferedChecksumStreamOutput out, Transl out.writeInt((int) checksum); } + /** + * Roll the current translog generation into a new generation. This does not commit the + * translog. + * + * @throws IOException if an I/O exception occurred during any file operations + */ + public void rollGeneration() throws IOException { + try (Releasable ignored = writeLock.acquire()) { + try { + final TranslogReader reader = current.closeIntoReader(); + readers.add(reader); + final Path checkpoint = location.resolve(CHECKPOINT_FILE_NAME); + assert Checkpoint.read(checkpoint).generation == current.getGeneration(); + final Path generationCheckpoint = + location.resolve(getCommitCheckpointFileName(current.getGeneration())); + Files.copy(checkpoint, generationCheckpoint); + IOUtils.fsync(generationCheckpoint, false); + IOUtils.fsync(generationCheckpoint.getParent(), true); + // create a new translog file; this will sync it and update the checkpoint data; + current = createWriter(current.getGeneration() + 1); + logger.trace("current translog set to [{}]", current.getGeneration()); + } catch (final Exception e) { + IOUtils.closeWhileHandlingException(this); // tragic event + throw e; + } + } + } + @Override public long prepareCommit() throws IOException { - try (ReleasableLock lock = writeLock.acquire()) { + try (ReleasableLock ignored = writeLock.acquire()) { ensureOpen(); if (currentCommittingGeneration != NOT_SET_GENERATION) { - throw new IllegalStateException("already committing a translog with generation: " + currentCommittingGeneration); + final String message = String.format( + Locale.ROOT, + "already committing a translog with generation [%d]", + currentCommittingGeneration); + throw new IllegalStateException(message); } currentCommittingGeneration = current.getGeneration(); - TranslogReader currentCommittingTranslog = current.closeIntoReader(); - readers.add(currentCommittingTranslog); - Path checkpoint = location.resolve(CHECKPOINT_FILE_NAME); - assert Checkpoint.read(checkpoint).generation == currentCommittingTranslog.getGeneration(); - Path commitCheckpoint = location.resolve(getCommitCheckpointFileName(currentCommittingTranslog.getGeneration())); - Files.copy(checkpoint, commitCheckpoint); - IOUtils.fsync(commitCheckpoint, false); - IOUtils.fsync(commitCheckpoint.getParent(), true); - // create a new translog file - this will sync it and update the checkpoint data; - current = createWriter(current.getGeneration() + 1); - logger.trace("current translog set to [{}]", current.getGeneration()); - - } catch (Exception e) { - IOUtils.closeWhileHandlingException(this); // tragic event - throw e; + rollGeneration(); } - return 0L; + return 0; } @Override public long commit() throws IOException { - try (ReleasableLock lock = writeLock.acquire()) { + try (ReleasableLock ignored = writeLock.acquire()) { ensureOpen(); if (currentCommittingGeneration == NOT_SET_GENERATION) { prepareCommit(); } assert currentCommittingGeneration != NOT_SET_GENERATION; - assert readers.stream().filter(r -> r.getGeneration() == currentCommittingGeneration).findFirst().isPresent() - : "reader list doesn't contain committing generation [" + currentCommittingGeneration + "]"; - lastCommittedTranslogFileGeneration = current.getGeneration(); // this is important - otherwise old files will not be cleaned up + assert readers.stream().anyMatch(r -> r.getGeneration() == currentCommittingGeneration) + : "readers missing committing generation [" + currentCommittingGeneration + "]"; + // set the last committed generation otherwise old files will not be cleaned up + lastCommittedTranslogFileGeneration = currentCommittingGeneration + 1; currentCommittingGeneration = NOT_SET_GENERATION; trimUnreferencedReaders(); } diff --git a/core/src/main/java/org/elasticsearch/indices/IndicesService.java b/core/src/main/java/org/elasticsearch/indices/IndicesService.java index a4e4c83bc0079..7bf80cc19861a 100644 --- a/core/src/main/java/org/elasticsearch/indices/IndicesService.java +++ b/core/src/main/java/org/elasticsearch/indices/IndicesService.java @@ -121,7 +121,6 @@ import java.io.IOException; import java.nio.file.Files; import java.util.ArrayList; -import java.util.EnumSet; import java.util.HashMap; import java.util.Iterator; import java.util.List; @@ -1145,7 +1144,7 @@ public void loadIntoContext(ShardSearchRequest request, SearchContext context, Q final QuerySearchResult result = context.queryResult(); StreamInput in = new NamedWriteableAwareStreamInput(bytesReference.streamInput(), namedWriteableRegistry); result.readFromWithId(context.id(), in); - result.shardTarget(context.shardTarget()); + result.setSearchShardTarget(context.shardTarget()); } else if (context.queryResult().searchTimedOut()) { // we have to invalidate the cache entry if we cached a query result form a request that timed out. // we can't really throw exceptions in the loading part to signal a timed out search to the outside world since if there are diff --git a/core/src/main/java/org/elasticsearch/monitor/fs/FsInfo.java b/core/src/main/java/org/elasticsearch/monitor/fs/FsInfo.java index 7bb1e51cd2372..e20eb42427ffe 100644 --- a/core/src/main/java/org/elasticsearch/monitor/fs/FsInfo.java +++ b/core/src/main/java/org/elasticsearch/monitor/fs/FsInfo.java @@ -138,8 +138,8 @@ private double addDouble(double current, double other) { public void add(Path path) { total = FsProbe.adjustForHugeFilesystems(addLong(total, path.total)); - free = addLong(free, path.free); - available = addLong(available, path.available); + free = FsProbe.adjustForHugeFilesystems(addLong(free, path.free)); + available = FsProbe.adjustForHugeFilesystems(addLong(available, path.available)); if (path.spins != null && path.spins.booleanValue()) { // Spinning is contagious! spins = Boolean.TRUE; diff --git a/core/src/main/java/org/elasticsearch/plugins/DummyPluginInfo.java b/core/src/main/java/org/elasticsearch/plugins/DummyPluginInfo.java index 90b1d32f4ae4a..73a3811f729f7 100644 --- a/core/src/main/java/org/elasticsearch/plugins/DummyPluginInfo.java +++ b/core/src/main/java/org/elasticsearch/plugins/DummyPluginInfo.java @@ -21,9 +21,13 @@ public class DummyPluginInfo extends PluginInfo { private DummyPluginInfo(String name, String description, String version, String classname) { - super(name, description, version, classname); + super(name, description, version, classname, false); } - public static final DummyPluginInfo INSTANCE = new DummyPluginInfo( - "dummy_plugin_name", "dummy plugin description", "dummy_plugin_version", "DummyPluginName"); + public static final DummyPluginInfo INSTANCE = + new DummyPluginInfo( + "dummy_plugin_name", + "dummy plugin description", + "dummy_plugin_version", + "DummyPluginName"); } diff --git a/core/src/main/java/org/elasticsearch/plugins/InstallPluginCommand.java b/core/src/main/java/org/elasticsearch/plugins/InstallPluginCommand.java index be78dd927ff32..7360eef9238a4 100644 --- a/core/src/main/java/org/elasticsearch/plugins/InstallPluginCommand.java +++ b/core/src/main/java/org/elasticsearch/plugins/InstallPluginCommand.java @@ -105,6 +105,13 @@ class InstallPluginCommand extends EnvironmentAwareCommand { private static final String PROPERTY_STAGING_ID = "es.plugins.staging"; + // exit codes for install + /** A plugin with the same name is already installed. */ + static final int PLUGIN_EXISTS = 1; + /** The plugin zip is not properly structured. */ + static final int PLUGIN_MALFORMED = 2; + + /** The builtin modules, which are plugins, but cannot be installed or removed. */ static final Set MODULES; static { @@ -333,7 +340,8 @@ private Path downloadZipAndChecksum(Terminal terminal, String urlString, Path tm byte[] zipbytes = Files.readAllBytes(zip); String gotChecksum = MessageDigests.toHexString(MessageDigests.sha1().digest(zipbytes)); if (expectedChecksum.equals(gotChecksum) == false) { - throw new UserException(ExitCodes.IO_ERROR, "SHA1 mismatch, expected " + expectedChecksum + " but got " + gotChecksum); + throw new UserException(ExitCodes.IO_ERROR, + "SHA1 mismatch, expected " + expectedChecksum + " but got " + gotChecksum); } return zip; @@ -357,12 +365,14 @@ private Path unzip(Path zip, Path pluginsDir) throws IOException, UserException hasEsDir = true; Path targetFile = target.resolve(entry.getName().substring("elasticsearch/".length())); - // Using the entry name as a path can result in an entry outside of the plugin dir, either if the - // name starts with the root of the filesystem, or it is a relative entry like ../whatever. - // This check attempts to identify both cases by first normalizing the path (which removes foo/..) - // and ensuring the normalized entry is still rooted with the target plugin directory. + // Using the entry name as a path can result in an entry outside of the plugin dir, + // either if the name starts with the root of the filesystem, or it is a relative + // entry like ../whatever. This check attempts to identify both cases by first + // normalizing the path (which removes foo/..) and ensuring the normalized entry + // is still rooted with the target plugin directory. if (targetFile.normalize().startsWith(target) == false) { - throw new IOException("Zip contains entry name '" + entry.getName() + "' resolving outside of plugin directory"); + throw new UserException(PLUGIN_MALFORMED, "Zip contains entry name '" + + entry.getName() + "' resolving outside of plugin directory"); } // be on the safe side: do not rely on that directories are always extracted @@ -384,7 +394,8 @@ private Path unzip(Path zip, Path pluginsDir) throws IOException, UserException Files.delete(zip); if (hasEsDir == false) { IOUtils.rm(target); - throw new UserException(ExitCodes.DATA_ERROR, "`elasticsearch` directory is missing in the plugin zip"); + throw new UserException(PLUGIN_MALFORMED, + "`elasticsearch` directory is missing in the plugin zip"); } return target; } @@ -424,10 +435,11 @@ private PluginInfo verify(Terminal terminal, Path pluginRoot, boolean isBatch, E if (Files.exists(destination)) { final String message = String.format( Locale.ROOT, - "plugin directory [%s] already exists; if you need to update the plugin, uninstall it first using command 'remove %s'", + "plugin directory [%s] already exists; if you need to update the plugin, " + + "uninstall it first using command 'remove %s'", destination.toAbsolutePath(), info.getName()); - throw new UserException(ExitCodes.CONFIG, message); + throw new UserException(PLUGIN_EXISTS, message); } terminal.println(VERBOSE, info.toString()); @@ -435,8 +447,8 @@ private PluginInfo verify(Terminal terminal, Path pluginRoot, boolean isBatch, E // don't let user install plugin as a module... // they might be unavoidably in maven central and are packaged up the same way) if (MODULES.contains(info.getName())) { - throw new UserException( - ExitCodes.USAGE, "plugin '" + info.getName() + "' cannot be installed like this, it is a system module"); + throw new UserException(ExitCodes.USAGE, "plugin '" + info.getName() + + "' cannot be installed like this, it is a system module"); } // check for jar hell before any copying @@ -446,7 +458,7 @@ private PluginInfo verify(Terminal terminal, Path pluginRoot, boolean isBatch, E // if it exists, confirm or warn the user Path policy = pluginRoot.resolve(PluginInfo.ES_PLUGIN_POLICY); if (Files.exists(policy)) { - PluginSecurity.readPolicy(policy, terminal, env, isBatch); + PluginSecurity.readPolicy(info, policy, terminal, env::tmpFile, isBatch); } return info; @@ -455,8 +467,7 @@ private PluginInfo verify(Terminal terminal, Path pluginRoot, boolean isBatch, E /** check a candidate plugin for jar hell before installing it */ void jarHellCheck(Path candidate, Path pluginsDir) throws Exception { // create list of current jars in classpath - final List jars = new ArrayList<>(); - jars.addAll(Arrays.asList(JarHell.parseClassPath())); + final Set jars = new HashSet<>(JarHell.parseClassPath()); // read existing bundles. this does some checks on the installation too. PluginsService.getPluginBundles(pluginsDir); @@ -464,13 +475,15 @@ void jarHellCheck(Path candidate, Path pluginsDir) throws Exception { // add plugin jars to the list Path pluginJars[] = FileSystemUtils.files(candidate, "*.jar"); for (Path jar : pluginJars) { - jars.add(jar.toUri().toURL()); + if (jars.add(jar.toUri().toURL()) == false) { + throw new IllegalStateException("jar hell! duplicate plugin jar: " + jar); + } } // TODO: no jars should be an error // TODO: verify the classname exists in one of the jars! // check combined (current classpath + new jars to-be-added) - JarHell.checkJarHell(jars.toArray(new URL[jars.size()])); + JarHell.checkJarHell(jars); } /** @@ -533,7 +546,7 @@ public FileVisitResult visitFile(Path pluginFile, BasicFileAttributes attrs) thr /** Copies the files from {@code tmpBinDir} into {@code destBinDir}, along with permissions from dest dirs parent. */ private void installBin(PluginInfo info, Path tmpBinDir, Path destBinDir) throws Exception { if (Files.isDirectory(tmpBinDir) == false) { - throw new UserException(ExitCodes.IO_ERROR, "bin in plugin " + info.getName() + " is not a directory"); + throw new UserException(PLUGIN_MALFORMED, "bin in plugin " + info.getName() + " is not a directory"); } Files.createDirectory(destBinDir); setFileAttributes(destBinDir, BIN_DIR_PERMS); @@ -541,9 +554,8 @@ private void installBin(PluginInfo info, Path tmpBinDir, Path destBinDir) throws try (DirectoryStream stream = Files.newDirectoryStream(tmpBinDir)) { for (Path srcFile : stream) { if (Files.isDirectory(srcFile)) { - throw new UserException( - ExitCodes.DATA_ERROR, - "Directories not allowed in bin dir for plugin " + info.getName() + ", found " + srcFile.getFileName()); + throw new UserException(PLUGIN_MALFORMED, "Directories not allowed in bin dir " + + "for plugin " + info.getName() + ", found " + srcFile.getFileName()); } Path destFile = destBinDir.resolve(tmpBinDir.relativize(srcFile)); @@ -560,7 +572,8 @@ private void installBin(PluginInfo info, Path tmpBinDir, Path destBinDir) throws */ private void installConfig(PluginInfo info, Path tmpConfigDir, Path destConfigDir) throws Exception { if (Files.isDirectory(tmpConfigDir) == false) { - throw new UserException(ExitCodes.IO_ERROR, "config in plugin " + info.getName() + " is not a directory"); + throw new UserException(PLUGIN_MALFORMED, + "config in plugin " + info.getName() + " is not a directory"); } Files.createDirectories(destConfigDir); @@ -576,7 +589,8 @@ private void installConfig(PluginInfo info, Path tmpConfigDir, Path destConfigDi try (DirectoryStream stream = Files.newDirectoryStream(tmpConfigDir)) { for (Path srcFile : stream) { if (Files.isDirectory(srcFile)) { - throw new UserException(ExitCodes.DATA_ERROR, "Directories not allowed in config dir for plugin " + info.getName()); + throw new UserException(PLUGIN_MALFORMED, + "Directories not allowed in config dir for plugin " + info.getName()); } Path destFile = destConfigDir.resolve(tmpConfigDir.relativize(srcFile)); diff --git a/core/src/main/java/org/elasticsearch/plugins/ListPluginsCommand.java b/core/src/main/java/org/elasticsearch/plugins/ListPluginsCommand.java index a674e7c6e2477..c2b5ce34b5469 100644 --- a/core/src/main/java/org/elasticsearch/plugins/ListPluginsCommand.java +++ b/core/src/main/java/org/elasticsearch/plugins/ListPluginsCommand.java @@ -56,9 +56,17 @@ protected void execute(Terminal terminal, OptionSet options, Environment env) th } Collections.sort(plugins); for (final Path plugin : plugins) { - terminal.println(plugin.getFileName().toString()); - PluginInfo info = PluginInfo.readFromProperties(env.pluginsFile().resolve(plugin.toAbsolutePath())); - terminal.println(Terminal.Verbosity.VERBOSE, info.toString()); + terminal.println(Terminal.Verbosity.SILENT, plugin.getFileName().toString()); + try { + PluginInfo info = PluginInfo.readFromProperties(env.pluginsFile().resolve(plugin.toAbsolutePath())); + terminal.println(Terminal.Verbosity.VERBOSE, info.toString()); + } catch (IllegalArgumentException e) { + if (e.getMessage().contains("incompatible with version")) { + terminal.println("WARNING: " + e.getMessage()); + } else { + throw e; + } + } } } } diff --git a/core/src/main/java/org/elasticsearch/plugins/Platforms.java b/core/src/main/java/org/elasticsearch/plugins/Platforms.java new file mode 100644 index 0000000000000..62bb32a4e9a1a --- /dev/null +++ b/core/src/main/java/org/elasticsearch/plugins/Platforms.java @@ -0,0 +1,83 @@ +/* + * Licensed to Elasticsearch under one or more contributor + * license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright + * ownership. Elasticsearch licenses this file to you under + * the Apache License, Version 2.0 (the "License"); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.elasticsearch.plugins; + +import org.apache.lucene.util.Constants; + +import java.nio.file.Path; +import java.util.Locale; + +/** + * Encapsulates platform-dependent methods for handling native components of plugins. + */ +public class Platforms { + + private static final String PROGRAM_NAME = Constants.WINDOWS ? "controller.exe" : "controller"; + private static final String PLATFORM_NAME = + Platforms.platformName(Constants.OS_NAME, Constants.OS_ARCH); + + private Platforms() {} + + /** + * The path to the native controller for a plugin with native components. + */ + public static Path nativeControllerPath(Path plugin) { + return plugin + .resolve("platform") + .resolve(PLATFORM_NAME) + .resolve("bin") + .resolve(PROGRAM_NAME); + } + + /** + * Return the platform name based on the OS name and + * - darwin-x86_64 + * - linux-x86-64 + * - windows-x86_64 + * For *nix platforms this is more-or-less `uname -s`-`uname -m` converted to lower case. + * However, for consistency between different operating systems on the same architecture + * "amd64" is replaced with "x86_64" and "i386" with "x86". + * For Windows it's "windows-" followed by either "x86" or "x86_64". + */ + public static String platformName(final String osName, final String osArch) { + final String lowerCaseOs = osName.toLowerCase(Locale.ROOT); + final String normalizedOs; + if (lowerCaseOs.startsWith("windows")) { + normalizedOs = "windows"; + } else if (lowerCaseOs.equals("mac os x")) { + normalizedOs = "darwin"; + } else { + normalizedOs = lowerCaseOs; + } + + final String lowerCaseArch = osArch.toLowerCase(Locale.ROOT); + final String normalizedArch; + if (lowerCaseArch.equals("amd64")) { + normalizedArch = "x86_64"; + } else if (lowerCaseArch.equals("i386")) { + normalizedArch = "x86"; + } else { + normalizedArch = lowerCaseArch; + } + + return normalizedOs + "-" + normalizedArch; + } + +} diff --git a/core/src/main/java/org/elasticsearch/plugins/PluginInfo.java b/core/src/main/java/org/elasticsearch/plugins/PluginInfo.java index 3e241eadd37ba..943f9018e6f88 100644 --- a/core/src/main/java/org/elasticsearch/plugins/PluginInfo.java +++ b/core/src/main/java/org/elasticsearch/plugins/PluginInfo.java @@ -16,6 +16,7 @@ * specific language governing permissions and limitations * under the License. */ + package org.elasticsearch.plugins; import org.elasticsearch.Version; @@ -30,133 +31,215 @@ import java.io.InputStream; import java.nio.file.Files; import java.nio.file.Path; +import java.util.Locale; import java.util.Properties; +/** + * An in-memory representation of the plugin descriptor. + */ public class PluginInfo implements Writeable, ToXContent { public static final String ES_PLUGIN_PROPERTIES = "plugin-descriptor.properties"; public static final String ES_PLUGIN_POLICY = "plugin-security.policy"; - static final class Fields { - static final String NAME = "name"; - static final String DESCRIPTION = "description"; - static final String URL = "url"; - static final String VERSION = "version"; - static final String CLASSNAME = "classname"; - } - private final String name; private final String description; private final String version; private final String classname; + private final boolean hasNativeController; /** - * Information about plugins + * Construct plugin info. * - * @param name Its name - * @param description Its description - * @param version Version number + * @param name the name of the plugin + * @param description a description of the plugin + * @param version the version of Elasticsearch the plugin is built for + * @param classname the entry point to the plugin + * @param hasNativeController whether or not the plugin has a native controller */ - public PluginInfo(String name, String description, String version, String classname) { + public PluginInfo( + final String name, + final String description, + final String version, + final String classname, + final boolean hasNativeController) { this.name = name; this.description = description; this.version = version; this.classname = classname; + this.hasNativeController = hasNativeController; } - public PluginInfo(StreamInput in) throws IOException { + /** + * Construct plugin info from a stream. + * + * @param in the stream + * @throws IOException if an I/O exception occurred reading the plugin info from the stream + */ + public PluginInfo(final StreamInput in) throws IOException { this.name = in.readString(); this.description = in.readString(); this.version = in.readString(); this.classname = in.readString(); + if (in.getVersion().onOrAfter(Version.V_5_4_0_UNRELEASED)) { + hasNativeController = in.readBoolean(); + } else { + hasNativeController = false; + } } @Override - public void writeTo(StreamOutput out) throws IOException { + public void writeTo(final StreamOutput out) throws IOException { out.writeString(name); out.writeString(description); out.writeString(version); out.writeString(classname); + if (out.getVersion().onOrAfter(Version.V_5_4_0_UNRELEASED)) { + out.writeBoolean(hasNativeController); + } } /** reads (and validates) plugin metadata descriptor file */ - public static PluginInfo readFromProperties(Path dir) throws IOException { - Path descriptor = dir.resolve(ES_PLUGIN_PROPERTIES); - Properties props = new Properties(); + + /** + * Reads and validates the plugin descriptor file. + * + * @param path the path to the root directory for the plugin + * @return the plugin info + * @throws IOException if an I/O exception occurred reading the plugin descriptor + */ + public static PluginInfo readFromProperties(final Path path) throws IOException { + final Path descriptor = path.resolve(ES_PLUGIN_PROPERTIES); + final Properties props = new Properties(); try (InputStream stream = Files.newInputStream(descriptor)) { props.load(stream); } - String name = props.getProperty("name"); + final String name = props.getProperty("name"); if (name == null || name.isEmpty()) { - throw new IllegalArgumentException("Property [name] is missing in [" + descriptor + "]"); + throw new IllegalArgumentException( + "property [name] is missing in [" + descriptor + "]"); } - String description = props.getProperty("description"); + final String description = props.getProperty("description"); if (description == null) { - throw new IllegalArgumentException("Property [description] is missing for plugin [" + name + "]"); + throw new IllegalArgumentException( + "property [description] is missing for plugin [" + name + "]"); } - String version = props.getProperty("version"); + final String version = props.getProperty("version"); if (version == null) { - throw new IllegalArgumentException("Property [version] is missing for plugin [" + name + "]"); + throw new IllegalArgumentException( + "property [version] is missing for plugin [" + name + "]"); } - String esVersionString = props.getProperty("elasticsearch.version"); + final String esVersionString = props.getProperty("elasticsearch.version"); if (esVersionString == null) { - throw new IllegalArgumentException("Property [elasticsearch.version] is missing for plugin [" + name + "]"); + throw new IllegalArgumentException( + "property [elasticsearch.version] is missing for plugin [" + name + "]"); } - Version esVersion = Version.fromString(esVersionString); + final Version esVersion = Version.fromString(esVersionString); if (esVersion.equals(Version.CURRENT) == false) { - throw new IllegalArgumentException("Plugin [" + name + "] is incompatible with Elasticsearch [" + Version.CURRENT.toString() + - "]. Was designed for version [" + esVersionString + "]"); + final String message = String.format( + Locale.ROOT, + "plugin [%s] is incompatible with version [%s]; was designed for version [%s]", + name, + Version.CURRENT.toString(), + esVersionString); + throw new IllegalArgumentException(message); } - String javaVersionString = props.getProperty("java.version"); + final String javaVersionString = props.getProperty("java.version"); if (javaVersionString == null) { - throw new IllegalArgumentException("Property [java.version] is missing for plugin [" + name + "]"); + throw new IllegalArgumentException( + "property [java.version] is missing for plugin [" + name + "]"); } JarHell.checkVersionFormat(javaVersionString); JarHell.checkJavaVersion(name, javaVersionString); - String classname = props.getProperty("classname"); + final String classname = props.getProperty("classname"); if (classname == null) { - throw new IllegalArgumentException("Property [classname] is missing for plugin [" + name + "]"); + throw new IllegalArgumentException( + "property [classname] is missing for plugin [" + name + "]"); } - return new PluginInfo(name, description, version, classname); + final String hasNativeControllerValue = props.getProperty("has.native.controller"); + final boolean hasNativeController; + if (hasNativeControllerValue == null) { + hasNativeController = false; + } else { + switch (hasNativeControllerValue) { + case "true": + hasNativeController = true; + break; + case "false": + hasNativeController = false; + break; + default: + final String message = String.format( + Locale.ROOT, + "property [%s] must be [%s], [%s], or unspecified but was [%s]", + "has_native_controller", + "true", + "false", + hasNativeControllerValue); + throw new IllegalArgumentException(message); + } + } + + return new PluginInfo(name, description, version, classname, hasNativeController); } /** - * @return Plugin's name + * The name of the plugin. + * + * @return the plugin name */ public String getName() { return name; } /** - * @return Plugin's description if any + * The description of the plugin. + * + * @return the plugin description */ public String getDescription() { return description; } /** - * @return plugin's classname + * The entry point to the plugin. + * + * @return the entry point to the plugin */ public String getClassname() { return classname; } /** - * @return Version number for the plugin + * The version of Elasticsearch the plugin was built for. + * + * @return the version */ public String getVersion() { return version; } + /** + * Whether or not the plugin has a native controller. + * + * @return {@code true} if the plugin has a native controller + */ + public boolean hasNativeController() { + return hasNativeController; + } + @Override public XContentBuilder toXContent(XContentBuilder builder, Params params) throws IOException { builder.startObject(); - builder.field(Fields.NAME, name); - builder.field(Fields.VERSION, version); - builder.field(Fields.DESCRIPTION, description); - builder.field(Fields.CLASSNAME, classname); + { + builder.field("name", name); + builder.field("version", version); + builder.field("description", description); + builder.field("classname", classname); + builder.field("has_native_controller", hasNativeController); + } builder.endObject(); return builder; @@ -187,8 +270,9 @@ public String toString() { .append("Name: ").append(name).append("\n") .append("Description: ").append(description).append("\n") .append("Version: ").append(version).append("\n") + .append("Native Controller: ").append(hasNativeController).append("\n") .append(" * Classname: ").append(classname); - return information.toString(); } + } diff --git a/core/src/main/java/org/elasticsearch/plugins/PluginSecurity.java b/core/src/main/java/org/elasticsearch/plugins/PluginSecurity.java index f9c3d1826c992..55a3c6069e7ef 100644 --- a/core/src/main/java/org/elasticsearch/plugins/PluginSecurity.java +++ b/core/src/main/java/org/elasticsearch/plugins/PluginSecurity.java @@ -37,60 +37,74 @@ import java.util.Collections; import java.util.Comparator; import java.util.List; +import java.util.function.Supplier; class PluginSecurity { /** * Reads plugin policy, prints/confirms exceptions */ - static void readPolicy(Path file, Terminal terminal, Environment environment, boolean batch) throws IOException { - PermissionCollection permissions = parsePermissions(terminal, file, environment.tmpFile()); + static void readPolicy(PluginInfo info, Path file, Terminal terminal, Supplier tmpFile, boolean batch) throws IOException { + PermissionCollection permissions = parsePermissions(terminal, file, tmpFile.get()); List requested = Collections.list(permissions.elements()); if (requested.isEmpty()) { terminal.println(Verbosity.VERBOSE, "plugin has a policy file with no additional permissions"); - return; - } + } else { - // sort permissions in a reasonable order - Collections.sort(requested, new Comparator() { - @Override - public int compare(Permission o1, Permission o2) { - int cmp = o1.getClass().getName().compareTo(o2.getClass().getName()); - if (cmp == 0) { - String name1 = o1.getName(); - String name2 = o2.getName(); - if (name1 == null) { - name1 = ""; - } - if (name2 == null) { - name2 = ""; - } - cmp = name1.compareTo(name2); + // sort permissions in a reasonable order + Collections.sort(requested, new Comparator() { + @Override + public int compare(Permission o1, Permission o2) { + int cmp = o1.getClass().getName().compareTo(o2.getClass().getName()); if (cmp == 0) { - String actions1 = o1.getActions(); - String actions2 = o2.getActions(); - if (actions1 == null) { - actions1 = ""; + String name1 = o1.getName(); + String name2 = o2.getName(); + if (name1 == null) { + name1 = ""; + } + if (name2 == null) { + name2 = ""; } - if (actions2 == null) { - actions2 = ""; + cmp = name1.compareTo(name2); + if (cmp == 0) { + String actions1 = o1.getActions(); + String actions2 = o2.getActions(); + if (actions1 == null) { + actions1 = ""; + } + if (actions2 == null) { + actions2 = ""; + } + cmp = actions1.compareTo(actions2); } - cmp = actions1.compareTo(actions2); } + return cmp; } - return cmp; + }); + + terminal.println(Verbosity.NORMAL, "@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@"); + terminal.println(Verbosity.NORMAL, "@ WARNING: plugin requires additional permissions @"); + terminal.println(Verbosity.NORMAL, "@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@"); + // print all permissions: + for (Permission permission : requested) { + terminal.println(Verbosity.NORMAL, "* " + formatPermission(permission)); } - }); - - terminal.println(Verbosity.NORMAL, "@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@"); - terminal.println(Verbosity.NORMAL, "@ WARNING: plugin requires additional permissions @"); - terminal.println(Verbosity.NORMAL, "@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@"); - // print all permissions: - for (Permission permission : requested) { - terminal.println(Verbosity.NORMAL, "* " + formatPermission(permission)); + terminal.println(Verbosity.NORMAL, "See http://docs.oracle.com/javase/8/docs/technotes/guides/security/permissions.html"); + terminal.println(Verbosity.NORMAL, "for descriptions of what these permissions allow and the associated risks."); + prompt(terminal, batch); + } + + if (info.hasNativeController()) { + terminal.println(Verbosity.NORMAL, "@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@"); + terminal.println(Verbosity.NORMAL, "@ WARNING: plugin forks a native controller @"); + terminal.println(Verbosity.NORMAL, "@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@"); + terminal.println(Verbosity.NORMAL, "This plugin launches a native controller that is not subject to the Java"); + terminal.println(Verbosity.NORMAL, "security manager nor to system call filters."); + prompt(terminal, batch); } - terminal.println(Verbosity.NORMAL, "See http://docs.oracle.com/javase/8/docs/technotes/guides/security/permissions.html"); - terminal.println(Verbosity.NORMAL, "for descriptions of what these permissions allow and the associated risks."); + } + + private static void prompt(final Terminal terminal, final boolean batch) { if (!batch) { terminal.println(Verbosity.NORMAL, ""); String text = terminal.readText("Continue with installation? [y/N]"); diff --git a/core/src/main/java/org/elasticsearch/plugins/PluginsService.java b/core/src/main/java/org/elasticsearch/plugins/PluginsService.java index 7d9887370588e..fc63678b94f98 100644 --- a/core/src/main/java/org/elasticsearch/plugins/PluginsService.java +++ b/core/src/main/java/org/elasticsearch/plugins/PluginsService.java @@ -58,8 +58,10 @@ import java.util.Collections; import java.util.HashMap; import java.util.HashSet; +import java.util.LinkedHashSet; import java.util.List; import java.util.Map; +import java.util.Objects; import java.util.Set; import java.util.function.Function; import java.util.stream.Collectors; @@ -99,7 +101,7 @@ public PluginsService(Settings settings, Path modulesDirectory, Path pluginsDire // first we load plugins that are on the classpath. this is for tests and transport clients for (Class pluginClass : classpathPlugins) { Plugin plugin = loadPlugin(pluginClass, settings); - PluginInfo pluginInfo = new PluginInfo(pluginClass.getName(), "classpath plugin", "NA", pluginClass.getName()); + PluginInfo pluginInfo = new PluginInfo(pluginClass.getName(), "classpath plugin", "NA", pluginClass.getName(), false); if (logger.isTraceEnabled()) { logger.trace("plugin loaded from classpath [{}]", pluginInfo); } @@ -107,16 +109,16 @@ public PluginsService(Settings settings, Path modulesDirectory, Path pluginsDire pluginsList.add(pluginInfo); } + Set seenBundles = new LinkedHashSet<>(); List modulesList = new ArrayList<>(); // load modules if (modulesDirectory != null) { try { - List bundles = getModuleBundles(modulesDirectory); - List> loaded = loadBundles(bundles); - pluginsLoaded.addAll(loaded); - for (Tuple module : loaded) { - modulesList.add(module.v1()); + Set modules = getModuleBundles(modulesDirectory); + for (Bundle bundle : modules) { + modulesList.add(bundle.plugin); } + seenBundles.addAll(modules); } catch (IOException ex) { throw new IllegalStateException("Unable to initialize modules", ex); } @@ -125,17 +127,19 @@ public PluginsService(Settings settings, Path modulesDirectory, Path pluginsDire // now, find all the ones that are in plugins/ if (pluginsDirectory != null) { try { - List bundles = getPluginBundles(pluginsDirectory); - List> loaded = loadBundles(bundles); - pluginsLoaded.addAll(loaded); - for (Tuple plugin : loaded) { - pluginsList.add(plugin.v1()); + Set plugins = getPluginBundles(pluginsDirectory); + for (Bundle bundle : plugins) { + pluginsList.add(bundle.plugin); } + seenBundles.addAll(plugins); } catch (IOException ex) { throw new IllegalStateException("Unable to initialize plugins", ex); } } + List> loaded = loadBundles(seenBundles); + pluginsLoaded.addAll(loaded); + this.info = new PluginsAndModules(pluginsList, modulesList); this.plugins = Collections.unmodifiableList(pluginsLoaded); @@ -234,48 +238,70 @@ public PluginsAndModules info() { // a "bundle" is a group of plugins in a single classloader // really should be 1-1, but we are not so fortunate static class Bundle { - List plugins = new ArrayList<>(); - List urls = new ArrayList<>(); + final PluginInfo plugin; + final Set urls; + + Bundle(PluginInfo plugin, Set urls) { + this.plugin = Objects.requireNonNull(plugin); + this.urls = Objects.requireNonNull(urls); + } + + @Override + public boolean equals(Object o) { + if (this == o) return true; + if (o == null || getClass() != o.getClass()) return false; + Bundle bundle = (Bundle) o; + return Objects.equals(plugin, bundle.plugin); + } + + @Override + public int hashCode() { + return Objects.hash(plugin); + } } // similar in impl to getPluginBundles, but DO NOT try to make them share code. // we don't need to inherit all the leniency, and things are different enough. - static List getModuleBundles(Path modulesDirectory) throws IOException { + static Set getModuleBundles(Path modulesDirectory) throws IOException { // damn leniency if (Files.notExists(modulesDirectory)) { - return Collections.emptyList(); + return Collections.emptySet(); } - List bundles = new ArrayList<>(); + Set bundles = new LinkedHashSet<>(); try (DirectoryStream stream = Files.newDirectoryStream(modulesDirectory)) { for (Path module : stream) { if (FileSystemUtils.isHidden(module)) { continue; // skip over .DS_Store etc } PluginInfo info = PluginInfo.readFromProperties(module); - Bundle bundle = new Bundle(); - bundle.plugins.add(info); + Set urls = new LinkedHashSet<>(); // gather urls for jar files try (DirectoryStream jarStream = Files.newDirectoryStream(module, "*.jar")) { for (Path jar : jarStream) { // normalize with toRealPath to get symlinks out of our hair - bundle.urls.add(jar.toRealPath().toUri().toURL()); + URL url = jar.toRealPath().toUri().toURL(); + if (urls.add(url) == false) { + throw new IllegalStateException("duplicate codebase: " + url); + } } } - bundles.add(bundle); + if (bundles.add(new Bundle(info, urls)) == false) { + throw new IllegalStateException("duplicate module: " + info); + } } } return bundles; } - static List getPluginBundles(Path pluginsDirectory) throws IOException { + static Set getPluginBundles(Path pluginsDirectory) throws IOException { Logger logger = Loggers.getLogger(PluginsService.class); // TODO: remove this leniency, but tests bogusly rely on it if (!isAccessibleDirectory(pluginsDirectory, logger)) { - return Collections.emptyList(); + return Collections.emptySet(); } - List bundles = new ArrayList<>(); + Set bundles = new LinkedHashSet<>(); try (DirectoryStream stream = Files.newDirectoryStream(pluginsDirectory)) { for (Path plugin : stream) { @@ -292,47 +318,58 @@ static List getPluginBundles(Path pluginsDirectory) throws IOException { + plugin.getFileName() + "]. Was the plugin built before 2.0?", e); } - List urls = new ArrayList<>(); + Set urls = new LinkedHashSet<>(); try (DirectoryStream jarStream = Files.newDirectoryStream(plugin, "*.jar")) { for (Path jar : jarStream) { // normalize with toRealPath to get symlinks out of our hair - urls.add(jar.toRealPath().toUri().toURL()); + URL url = jar.toRealPath().toUri().toURL(); + if (urls.add(url) == false) { + throw new IllegalStateException("duplicate codebase: " + url); + } } } - final Bundle bundle = new Bundle(); - bundles.add(bundle); - bundle.plugins.add(info); - bundle.urls.addAll(urls); + if (bundles.add(new Bundle(info, urls)) == false) { + throw new IllegalStateException("duplicate plugin: " + info); + } } } return bundles; } - private List> loadBundles(List bundles) { + private List> loadBundles(Set bundles) { List> plugins = new ArrayList<>(); for (Bundle bundle : bundles) { // jar-hell check the bundle against the parent classloader // pluginmanager does it, but we do it again, in case lusers mess with jar files manually try { - final List jars = new ArrayList<>(); - jars.addAll(Arrays.asList(JarHell.parseClassPath())); - jars.addAll(bundle.urls); - JarHell.checkJarHell(jars.toArray(new URL[0])); + Set classpath = JarHell.parseClassPath(); + // check we don't have conflicting codebases + Set intersection = new HashSet<>(classpath); + intersection.retainAll(bundle.urls); + if (intersection.isEmpty() == false) { + throw new IllegalStateException("jar hell! duplicate codebases between" + + " plugin and core: " + intersection); + } + // check we don't have conflicting classes + Set union = new HashSet<>(classpath); + union.addAll(bundle.urls); + JarHell.checkJarHell(union); } catch (Exception e) { - throw new IllegalStateException("failed to load bundle " + bundle.urls + " due to jar hell", e); + throw new IllegalStateException("failed to load plugin " + bundle.plugin + + " due to jar hell", e); } - // create a child to load the plugins in this bundle - ClassLoader loader = URLClassLoader.newInstance(bundle.urls.toArray(new URL[0]), getClass().getClassLoader()); - for (PluginInfo pluginInfo : bundle.plugins) { - // reload lucene SPI with any new services from the plugin - reloadLuceneSPI(loader); - final Class pluginClass = loadPluginClass(pluginInfo.getClassname(), loader); - final Plugin plugin = loadPlugin(pluginClass, settings); - plugins.add(new Tuple<>(pluginInfo, plugin)); - } + // create a child to load the plugin in this bundle + ClassLoader loader = URLClassLoader.newInstance(bundle.urls.toArray(new URL[0]), + getClass().getClassLoader()); + // reload lucene SPI with any new services from the plugin + reloadLuceneSPI(loader); + final Class pluginClass = + loadPluginClass(bundle.plugin.getClassname(), loader); + final Plugin plugin = loadPlugin(pluginClass, settings); + plugins.add(new Tuple<>(bundle.plugin, plugin)); } return Collections.unmodifiableList(plugins); diff --git a/core/src/main/java/org/elasticsearch/plugins/RemovePluginCommand.java b/core/src/main/java/org/elasticsearch/plugins/RemovePluginCommand.java index 5219e50285cda..8e81f97d84ce4 100644 --- a/core/src/main/java/org/elasticsearch/plugins/RemovePluginCommand.java +++ b/core/src/main/java/org/elasticsearch/plugins/RemovePluginCommand.java @@ -19,12 +19,14 @@ package org.elasticsearch.plugins; +import java.io.IOException; import java.nio.file.AtomicMoveNotSupportedException; import java.nio.file.Files; import java.nio.file.Path; import java.nio.file.StandardCopyOption; import java.util.ArrayList; import java.util.List; +import java.util.Locale; import joptsimple.OptionSet; import joptsimple.OptionSpec; @@ -39,36 +41,51 @@ import static org.elasticsearch.cli.Terminal.Verbosity.VERBOSE; /** - * A command for the plugin cli to remove a plugin from elasticsearch. + * A command for the plugin CLI to remove a plugin from Elasticsearch. */ class RemovePluginCommand extends EnvironmentAwareCommand { private final OptionSpec arguments; RemovePluginCommand() { - super("Removes a plugin from elasticsearch"); + super("removes a plugin from Elasticsearch"); this.arguments = parser.nonOptions("plugin name"); } @Override - protected void execute(Terminal terminal, OptionSet options, Environment env) throws Exception { - String arg = arguments.value(options); - execute(terminal, arg, env); + protected void execute(final Terminal terminal, final OptionSet options, final Environment env) + throws Exception { + final String pluginName = arguments.value(options); + execute(terminal, pluginName, env); } - // pkg private for testing - void execute(Terminal terminal, String pluginName, Environment env) throws Exception { + /** + * Remove the plugin specified by {@code pluginName}. + * + * @param terminal the terminal to use for input/output + * @param pluginName the name of the plugin to remove + * @param env the environment for the local node + * @throws IOException if any I/O exception occurs while performing a file operation + * @throws UserException if plugin name is null + * @throws UserException if plugin directory does not exist + * @throws UserException if the plugin bin directory is not a directory + */ + void execute(final Terminal terminal, final String pluginName, final Environment env) + throws IOException, UserException { if (pluginName == null) { throw new UserException(ExitCodes.USAGE, "plugin name is required"); } - terminal.println("-> Removing " + Strings.coalesceToEmpty(pluginName) + "..."); + terminal.println("-> removing [" + Strings.coalesceToEmpty(pluginName) + "]..."); final Path pluginDir = env.pluginsFile().resolve(pluginName); if (Files.exists(pluginDir) == false) { - throw new UserException( - ExitCodes.CONFIG, - "plugin " + pluginName + " not found; run 'elasticsearch-plugin list' to get list of installed plugins"); + final String message = String.format( + Locale.ROOT, + "plugin [%s] not found; " + + "run 'elasticsearch-plugin list' to get list of installed plugins", + pluginName); + throw new UserException(ExitCodes.CONFIG, message); } final List pluginPaths = new ArrayList<>(); @@ -76,30 +93,41 @@ void execute(Terminal terminal, String pluginName, Environment env) throws Excep final Path pluginBinDir = env.binFile().resolve(pluginName); if (Files.exists(pluginBinDir)) { if (Files.isDirectory(pluginBinDir) == false) { - throw new UserException(ExitCodes.IO_ERROR, "Bin dir for " + pluginName + " is not a directory"); + throw new UserException( + ExitCodes.IO_ERROR, "bin dir for " + pluginName + " is not a directory"); } pluginPaths.add(pluginBinDir); - terminal.println(VERBOSE, "Removing: " + pluginBinDir); + terminal.println(VERBOSE, "removing [" + pluginBinDir + "]"); } - terminal.println(VERBOSE, "Removing: " + pluginDir); + terminal.println(VERBOSE, "removing [" + pluginDir + "]"); final Path tmpPluginDir = env.pluginsFile().resolve(".removing-" + pluginName); try { Files.move(pluginDir, tmpPluginDir, StandardCopyOption.ATOMIC_MOVE); } catch (final AtomicMoveNotSupportedException e) { - // this can happen on a union filesystem when a plugin is not installed on the top layer; we fall back to a non-atomic move + /* + * On a union file system if the plugin that we are removing is not installed on the + * top layer then atomic move will not be supported. In this case, we fall back to a + * non-atomic move. + */ Files.move(pluginDir, tmpPluginDir); } pluginPaths.add(tmpPluginDir); IOUtils.rm(pluginPaths.toArray(new Path[pluginPaths.size()])); - // we preserve the config files in case the user is upgrading the plugin, but we print - // a message so the user knows in case they want to remove manually + /* + * We preserve the config files in case the user is upgrading the plugin, but we print a + * message so the user knows in case they want to remove manually. + */ final Path pluginConfigDir = env.configFile().resolve(pluginName); if (Files.exists(pluginConfigDir)) { - terminal.println( - "-> Preserving plugin config files [" + pluginConfigDir + "] in case of upgrade, delete manually if not needed"); + final String message = String.format( + Locale.ROOT, + "-> preserving plugin config files [%s] in case of upgrade; " + + "delete manually if not needed", + pluginConfigDir); + terminal.println(message); } } diff --git a/core/src/main/java/org/elasticsearch/rest/action/RestFieldCapabilitiesAction.java b/core/src/main/java/org/elasticsearch/rest/action/RestFieldCapabilitiesAction.java new file mode 100644 index 0000000000000..e983bdc182a01 --- /dev/null +++ b/core/src/main/java/org/elasticsearch/rest/action/RestFieldCapabilitiesAction.java @@ -0,0 +1,88 @@ +/* + * Licensed to Elasticsearch under one or more contributor + * license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright + * ownership. Elasticsearch licenses this file to you under + * the Apache License, Version 2.0 (the "License"); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.elasticsearch.rest.action; + +import org.elasticsearch.action.fieldcaps.FieldCapabilitiesRequest; +import org.elasticsearch.action.fieldcaps.FieldCapabilitiesResponse; +import org.elasticsearch.action.support.IndicesOptions; +import org.elasticsearch.client.node.NodeClient; +import org.elasticsearch.common.Strings; +import org.elasticsearch.common.settings.Settings; +import org.elasticsearch.common.xcontent.XContentBuilder; +import org.elasticsearch.common.xcontent.XContentParser; +import org.elasticsearch.rest.BaseRestHandler; +import org.elasticsearch.rest.BytesRestResponse; +import org.elasticsearch.rest.RestController; +import org.elasticsearch.rest.RestRequest; +import org.elasticsearch.rest.RestResponse; +import org.elasticsearch.rest.RestStatus; + +import java.io.IOException; + +import static org.elasticsearch.rest.RestRequest.Method.GET; +import static org.elasticsearch.rest.RestRequest.Method.POST; +import static org.elasticsearch.rest.RestStatus.NOT_FOUND; +import static org.elasticsearch.rest.RestStatus.OK; + +public class RestFieldCapabilitiesAction extends BaseRestHandler { + public RestFieldCapabilitiesAction(Settings settings, RestController controller) { + super(settings); + controller.registerHandler(GET, "/_field_caps", this); + controller.registerHandler(POST, "/_field_caps", this); + controller.registerHandler(GET, "/{index}/_field_caps", this); + controller.registerHandler(POST, "/{index}/_field_caps", this); + } + + @Override + public RestChannelConsumer prepareRequest(final RestRequest request, + final NodeClient client) throws IOException { + if (request.hasContentOrSourceParam() && request.hasParam("fields")) { + throw new IllegalArgumentException("can't specify a request body and [fields]" + + " request parameter, either specify a request body or the" + + " [fields] request parameter"); + } + final String[] indices = Strings.splitStringByCommaToArray(request.param("index")); + final FieldCapabilitiesRequest fieldRequest; + if (request.hasContentOrSourceParam()) { + try (XContentParser parser = request.contentOrSourceParamParser()) { + fieldRequest = FieldCapabilitiesRequest.parseFields(parser); + } + } else { + fieldRequest = new FieldCapabilitiesRequest(); + fieldRequest.fields(Strings.splitStringByCommaToArray(request.param("fields"))); + } + fieldRequest.indices(indices); + fieldRequest.indicesOptions( + IndicesOptions.fromRequest(request, fieldRequest.indicesOptions()) + ); + return channel -> client.fieldCaps(fieldRequest, + new RestBuilderListener(channel) { + @Override + public RestResponse buildResponse(FieldCapabilitiesResponse response, + XContentBuilder builder) throws Exception { + RestStatus status = OK; + builder.startObject(); + response.toXContent(builder, request); + builder.endObject(); + return new BytesRestResponse(status, builder); + } + }); + } +} diff --git a/core/src/main/java/org/elasticsearch/rest/action/admin/indices/RestClearIndicesCacheAction.java b/core/src/main/java/org/elasticsearch/rest/action/admin/indices/RestClearIndicesCacheAction.java index 6654deb76fb24..1544a01f9f09b 100644 --- a/core/src/main/java/org/elasticsearch/rest/action/admin/indices/RestClearIndicesCacheAction.java +++ b/core/src/main/java/org/elasticsearch/rest/action/admin/indices/RestClearIndicesCacheAction.java @@ -81,7 +81,7 @@ public static ClearIndicesCacheRequest fromRequest(final RestRequest request, Cl if (Fields.QUERY.match(entry.getKey())) { clearIndicesCacheRequest.queryCache(request.paramAsBoolean(entry.getKey(), clearIndicesCacheRequest.queryCache())); } - if (Fields.REQUEST_CACHE.match(entry.getKey())) { + if (Fields.REQUEST.match(entry.getKey())) { clearIndicesCacheRequest.requestCache(request.paramAsBoolean(entry.getKey(), clearIndicesCacheRequest.requestCache())); } if (Fields.FIELD_DATA.match(entry.getKey())) { @@ -100,7 +100,7 @@ public static ClearIndicesCacheRequest fromRequest(final RestRequest request, Cl public static class Fields { public static final ParseField QUERY = new ParseField("query", "filter", "filter_cache"); - public static final ParseField REQUEST_CACHE = new ParseField("request_cache"); + public static final ParseField REQUEST = new ParseField("request", "request_cache"); public static final ParseField FIELD_DATA = new ParseField("field_data", "fielddata"); public static final ParseField RECYCLER = new ParseField("recycler"); public static final ParseField FIELDS = new ParseField("fields"); diff --git a/core/src/main/java/org/elasticsearch/rest/action/admin/indices/RestValidateQueryAction.java b/core/src/main/java/org/elasticsearch/rest/action/admin/indices/RestValidateQueryAction.java index d2f0f5ec3d2af..0c2374045dd9b 100644 --- a/core/src/main/java/org/elasticsearch/rest/action/admin/indices/RestValidateQueryAction.java +++ b/core/src/main/java/org/elasticsearch/rest/action/admin/indices/RestValidateQueryAction.java @@ -62,6 +62,7 @@ public RestChannelConsumer prepareRequest(final RestRequest request, final NodeC validateQueryRequest.explain(request.paramAsBoolean("explain", false)); validateQueryRequest.types(Strings.splitStringByCommaToArray(request.param("type"))); validateQueryRequest.rewrite(request.paramAsBoolean("rewrite", false)); + validateQueryRequest.allShards(request.paramAsBoolean("all_shards", false)); Exception bodyParsingException = null; try { @@ -98,6 +99,9 @@ public RestResponse buildResponse(ValidateQueryResponse response, XContentBuilde if (explanation.getIndex() != null) { builder.field(INDEX_FIELD, explanation.getIndex()); } + if(explanation.getShard() >= 0) { + builder.field(SHARD_FIELD, explanation.getShard()); + } builder.field(VALID_FIELD, explanation.isValid()); if (explanation.getError() != null) { builder.field(ERROR_FIELD, explanation.getError()); @@ -132,6 +136,7 @@ private static BytesRestResponse buildErrorResponse(XContentBuilder builder, Str } private static final String INDEX_FIELD = "index"; + private static final String SHARD_FIELD = "shard"; private static final String VALID_FIELD = "valid"; private static final String EXPLANATIONS_FIELD = "explanations"; private static final String ERROR_FIELD = "error"; diff --git a/core/src/main/java/org/elasticsearch/search/SearchHit.java b/core/src/main/java/org/elasticsearch/search/SearchHit.java index 05558fd6f09e8..71b0b9127b246 100644 --- a/core/src/main/java/org/elasticsearch/search/SearchHit.java +++ b/core/src/main/java/org/elasticsearch/search/SearchHit.java @@ -493,7 +493,7 @@ public static SearchHit fromXContent(XContentParser parser) { public static void declareInnerHitsParseFields(ObjectParser, Void> parser) { declareMetaDataFields(parser); - parser.declareString((map, value) -> map.put(Fields._TYPE, value), new ParseField(Fields._TYPE)); + parser.declareString((map, value) -> map.put(Fields._TYPE, new Text(value)), new ParseField(Fields._TYPE)); parser.declareString((map, value) -> map.put(Fields._INDEX, value), new ParseField(Fields._INDEX)); parser.declareString((map, value) -> map.put(Fields._ID, value), new ParseField(Fields._ID)); parser.declareString((map, value) -> map.put(Fields._NODE, value), new ParseField(Fields._NODE)); @@ -524,11 +524,11 @@ public static void declareInnerHitsParseFields(ObjectParser, public static SearchHit createFromMap(Map values) { String id = get(Fields._ID, values, null); - String type = get(Fields._TYPE, values, null); + Text type = get(Fields._TYPE, values, null); NestedIdentity nestedIdentity = get(NestedIdentity._NESTED, values, null); Map fields = get(Fields.FIELDS, values, null); - SearchHit searchHit = new SearchHit(-1, id, new Text(type), nestedIdentity, fields); + SearchHit searchHit = new SearchHit(-1, id, type, nestedIdentity, fields); searchHit.index = get(Fields._INDEX, values, null); searchHit.score(get(Fields._SCORE, values, DEFAULT_SCORE)); searchHit.version(get(Fields._VERSION, values, -1L)); diff --git a/core/src/main/java/org/elasticsearch/search/SearchPhaseResult.java b/core/src/main/java/org/elasticsearch/search/SearchPhaseResult.java index 003f37616f543..ede9f525a5a14 100644 --- a/core/src/main/java/org/elasticsearch/search/SearchPhaseResult.java +++ b/core/src/main/java/org/elasticsearch/search/SearchPhaseResult.java @@ -20,12 +20,63 @@ package org.elasticsearch.search; import org.elasticsearch.common.io.stream.Streamable; +import org.elasticsearch.search.fetch.FetchSearchResult; +import org.elasticsearch.search.query.QuerySearchResult; +import org.elasticsearch.transport.TransportResponse; -public interface SearchPhaseResult extends Streamable { +/** + * This class is a base class for all search releated results. It contains the shard target it + * was executed against, a shard index used to reference the result on the coordinating node + * and a request ID that is used to reference the request context on the executing node. The + * request ID is particularly important since it is used to reference and maintain a context + * across search phases to ensure the same point in time snapshot is used for querying and + * fetching etc. + */ +public abstract class SearchPhaseResult extends TransportResponse implements Streamable { + + private SearchShardTarget searchShardTarget; + private int shardIndex = -1; + protected long requestId; + + /** + * Returns the results request ID that is used to reference the search context on the executing + * node + */ + public long getRequestId() { + return requestId; + } + + /** + * Returns the shard index in the context of the currently executing search request that is + * used for accounting on the coordinating node + */ + public int getShardIndex() { + assert shardIndex != -1 : "shardIndex is not set"; + return shardIndex; + } + + public SearchShardTarget getSearchShardTarget() { + return searchShardTarget; + } + + public void setSearchShardTarget(SearchShardTarget shardTarget) { + this.searchShardTarget = shardTarget; + } - long id(); + public void setShardIndex(int shardIndex) { + assert shardIndex >= 0 : "shardIndex must be >= 0 but was: " + shardIndex; + this.shardIndex = shardIndex; + } - SearchShardTarget shardTarget(); + /** + * Returns the query result iff it's included in this response otherwise null + */ + public QuerySearchResult queryResult() { + return null; + } - void shardTarget(SearchShardTarget shardTarget); + /** + * Returns the fetch result iff it's included in this response otherwise null + */ + public FetchSearchResult fetchResult() { return null; } } diff --git a/core/src/main/java/org/elasticsearch/search/SearchService.java b/core/src/main/java/org/elasticsearch/search/SearchService.java index 3d093e5ae7282..a035228195235 100644 --- a/core/src/main/java/org/elasticsearch/search/SearchService.java +++ b/core/src/main/java/org/elasticsearch/search/SearchService.java @@ -75,7 +75,6 @@ import org.elasticsearch.search.query.QueryPhase; import org.elasticsearch.search.query.QuerySearchRequest; import org.elasticsearch.search.query.QuerySearchResult; -import org.elasticsearch.search.query.QuerySearchResultProvider; import org.elasticsearch.search.query.ScrollQuerySearchResult; import org.elasticsearch.search.rescore.RescoreBuilder; import org.elasticsearch.search.searchafter.SearchAfterBuilder; @@ -248,7 +247,7 @@ private void loadOrExecuteQueryPhase(final ShardSearchRequest request, final Sea } } - public QuerySearchResultProvider executeQueryPhase(ShardSearchRequest request, SearchTask task) throws IOException { + public SearchPhaseResult executeQueryPhase(ShardSearchRequest request, SearchTask task) throws IOException { final SearchContext context = createAndPutContext(request); final SearchOperationListener operationListener = context.indexShard().getSearchOperationListener(); context.incRef(); diff --git a/core/src/main/java/org/elasticsearch/search/aggregations/InternalAggregation.java b/core/src/main/java/org/elasticsearch/search/aggregations/InternalAggregation.java index 563a958109be2..4fb6a434c84d3 100644 --- a/core/src/main/java/org/elasticsearch/search/aggregations/InternalAggregation.java +++ b/core/src/main/java/org/elasticsearch/search/aggregations/InternalAggregation.java @@ -185,7 +185,7 @@ public final XContentBuilder toXContent(XContentBuilder builder, Params params) builder.startObject(getName()); } if (this.metaData != null) { - builder.field(CommonFields.META); + builder.field(CommonFields.META.getPreferredName()); builder.map(this.metaData); } doXContentBody(builder, params); @@ -240,18 +240,17 @@ protected boolean doEquals(Object obj) { * Common xcontent fields that are shared among addAggregation */ public static final class CommonFields extends ParseField.CommonFields { - // todo convert these to ParseField - public static final String META = "meta"; - public static final String BUCKETS = "buckets"; - public static final String VALUE = "value"; - public static final String VALUES = "values"; - public static final String VALUE_AS_STRING = "value_as_string"; - public static final String DOC_COUNT = "doc_count"; - public static final String KEY = "key"; - public static final String KEY_AS_STRING = "key_as_string"; - public static final String FROM = "from"; - public static final String FROM_AS_STRING = "from_as_string"; - public static final String TO = "to"; - public static final String TO_AS_STRING = "to_as_string"; + public static final ParseField META = new ParseField("meta"); + public static final ParseField BUCKETS = new ParseField("buckets"); + public static final ParseField VALUE = new ParseField("value"); + public static final ParseField VALUES = new ParseField("values"); + public static final ParseField VALUE_AS_STRING = new ParseField("value_as_string"); + public static final ParseField DOC_COUNT = new ParseField("doc_count"); + public static final ParseField KEY = new ParseField("key"); + public static final ParseField KEY_AS_STRING = new ParseField("key_as_string"); + public static final ParseField FROM = new ParseField("from"); + public static final ParseField FROM_AS_STRING = new ParseField("from_as_string"); + public static final ParseField TO = new ParseField("to"); + public static final ParseField TO_AS_STRING = new ParseField("to_as_string"); } } diff --git a/core/src/main/java/org/elasticsearch/search/aggregations/bucket/InternalSingleBucketAggregation.java b/core/src/main/java/org/elasticsearch/search/aggregations/bucket/InternalSingleBucketAggregation.java index 7ce66e4ae4476..490c7a3687844 100644 --- a/core/src/main/java/org/elasticsearch/search/aggregations/bucket/InternalSingleBucketAggregation.java +++ b/core/src/main/java/org/elasticsearch/search/aggregations/bucket/InternalSingleBucketAggregation.java @@ -131,7 +131,7 @@ public Object getProperty(List path) { @Override public XContentBuilder doXContentBody(XContentBuilder builder, Params params) throws IOException { - builder.field(CommonFields.DOC_COUNT, docCount); + builder.field(CommonFields.DOC_COUNT.getPreferredName(), docCount); aggregations.toXContentInternal(builder, params); return builder; } diff --git a/core/src/main/java/org/elasticsearch/search/aggregations/bucket/adjacency/InternalAdjacencyMatrix.java b/core/src/main/java/org/elasticsearch/search/aggregations/bucket/adjacency/InternalAdjacencyMatrix.java index 3a7dc284ab94c..3d0839b7fb477 100644 --- a/core/src/main/java/org/elasticsearch/search/aggregations/bucket/adjacency/InternalAdjacencyMatrix.java +++ b/core/src/main/java/org/elasticsearch/search/aggregations/bucket/adjacency/InternalAdjacencyMatrix.java @@ -105,8 +105,8 @@ InternalBucket reduce(List buckets, ReduceContext context) { @Override public XContentBuilder toXContent(XContentBuilder builder, Params params) throws IOException { builder.startObject(); - builder.field(CommonFields.KEY, key); - builder.field(CommonFields.DOC_COUNT, docCount); + builder.field(CommonFields.KEY.getPreferredName(), key); + builder.field(CommonFields.DOC_COUNT.getPreferredName(), docCount); aggregations.toXContentInternal(builder, params); builder.endObject(); return builder; @@ -207,7 +207,7 @@ public InternalAggregation doReduce(List aggregations, Redu @Override public XContentBuilder doXContentBody(XContentBuilder builder, Params params) throws IOException { - builder.startArray(CommonFields.BUCKETS); + builder.startArray(CommonFields.BUCKETS.getPreferredName()); for (InternalBucket bucket : buckets) { bucket.toXContent(builder, params); } diff --git a/core/src/main/java/org/elasticsearch/search/aggregations/bucket/filters/InternalFilters.java b/core/src/main/java/org/elasticsearch/search/aggregations/bucket/filters/InternalFilters.java index bd33f1608bc8b..5153122272564 100644 --- a/core/src/main/java/org/elasticsearch/search/aggregations/bucket/filters/InternalFilters.java +++ b/core/src/main/java/org/elasticsearch/search/aggregations/bucket/filters/InternalFilters.java @@ -108,7 +108,7 @@ public XContentBuilder toXContent(XContentBuilder builder, Params params) throws } else { builder.startObject(); } - builder.field(CommonFields.DOC_COUNT, docCount); + builder.field(CommonFields.DOC_COUNT.getPreferredName(), docCount); aggregations.toXContentInternal(builder, params); builder.endObject(); return builder; @@ -210,9 +210,9 @@ public InternalAggregation doReduce(List aggregations, Redu @Override public XContentBuilder doXContentBody(XContentBuilder builder, Params params) throws IOException { if (keyed) { - builder.startObject(CommonFields.BUCKETS); + builder.startObject(CommonFields.BUCKETS.getPreferredName()); } else { - builder.startArray(CommonFields.BUCKETS); + builder.startArray(CommonFields.BUCKETS.getPreferredName()); } for (InternalBucket bucket : buckets) { bucket.toXContent(builder, params); diff --git a/core/src/main/java/org/elasticsearch/search/aggregations/bucket/geogrid/InternalGeoHashGrid.java b/core/src/main/java/org/elasticsearch/search/aggregations/bucket/geogrid/InternalGeoHashGrid.java index b23faeba339e6..c1a6ad8be9ac5 100644 --- a/core/src/main/java/org/elasticsearch/search/aggregations/bucket/geogrid/InternalGeoHashGrid.java +++ b/core/src/main/java/org/elasticsearch/search/aggregations/bucket/geogrid/InternalGeoHashGrid.java @@ -120,8 +120,8 @@ public Bucket reduce(List buckets, ReduceContext context) { @Override public XContentBuilder toXContent(XContentBuilder builder, Params params) throws IOException { builder.startObject(); - builder.field(CommonFields.KEY, getKeyAsString()); - builder.field(CommonFields.DOC_COUNT, docCount); + builder.field(CommonFields.KEY.getPreferredName(), getKeyAsString()); + builder.field(CommonFields.DOC_COUNT.getPreferredName(), docCount); aggregations.toXContentInternal(builder, params); builder.endObject(); return builder; @@ -223,7 +223,7 @@ public InternalGeoHashGrid doReduce(List aggregations, Redu @Override public XContentBuilder doXContentBody(XContentBuilder builder, Params params) throws IOException { - builder.startArray(CommonFields.BUCKETS); + builder.startArray(CommonFields.BUCKETS.getPreferredName()); for (Bucket bucket : buckets) { bucket.toXContent(builder, params); } diff --git a/core/src/main/java/org/elasticsearch/search/aggregations/bucket/histogram/InternalDateHistogram.java b/core/src/main/java/org/elasticsearch/search/aggregations/bucket/histogram/InternalDateHistogram.java index 19e5dc1ec897d..9815fdd214437 100644 --- a/core/src/main/java/org/elasticsearch/search/aggregations/bucket/histogram/InternalDateHistogram.java +++ b/core/src/main/java/org/elasticsearch/search/aggregations/bucket/histogram/InternalDateHistogram.java @@ -142,10 +142,10 @@ public XContentBuilder toXContent(XContentBuilder builder, Params params) throws builder.startObject(); } if (format != DocValueFormat.RAW) { - builder.field(CommonFields.KEY_AS_STRING, keyAsString); + builder.field(CommonFields.KEY_AS_STRING.getPreferredName(), keyAsString); } - builder.field(CommonFields.KEY, key); - builder.field(CommonFields.DOC_COUNT, docCount); + builder.field(CommonFields.KEY.getPreferredName(), key); + builder.field(CommonFields.DOC_COUNT.getPreferredName(), docCount); aggregations.toXContentInternal(builder, params); builder.endObject(); return builder; @@ -437,9 +437,9 @@ public InternalAggregation doReduce(List aggregations, Redu @Override public XContentBuilder doXContentBody(XContentBuilder builder, Params params) throws IOException { if (keyed) { - builder.startObject(CommonFields.BUCKETS); + builder.startObject(CommonFields.BUCKETS.getPreferredName()); } else { - builder.startArray(CommonFields.BUCKETS); + builder.startArray(CommonFields.BUCKETS.getPreferredName()); } for (Bucket bucket : buckets) { bucket.toXContent(builder, params); diff --git a/core/src/main/java/org/elasticsearch/search/aggregations/bucket/histogram/InternalHistogram.java b/core/src/main/java/org/elasticsearch/search/aggregations/bucket/histogram/InternalHistogram.java index e6e23d3a615a1..046551fc58a63 100644 --- a/core/src/main/java/org/elasticsearch/search/aggregations/bucket/histogram/InternalHistogram.java +++ b/core/src/main/java/org/elasticsearch/search/aggregations/bucket/histogram/InternalHistogram.java @@ -138,10 +138,10 @@ public XContentBuilder toXContent(XContentBuilder builder, Params params) throws builder.startObject(); } if (format != DocValueFormat.RAW) { - builder.field(CommonFields.KEY_AS_STRING, keyAsString); + builder.field(CommonFields.KEY_AS_STRING.getPreferredName(), keyAsString); } - builder.field(CommonFields.KEY, key); - builder.field(CommonFields.DOC_COUNT, docCount); + builder.field(CommonFields.KEY.getPreferredName(), key); + builder.field(CommonFields.DOC_COUNT.getPreferredName(), docCount); aggregations.toXContentInternal(builder, params); builder.endObject(); return builder; @@ -421,9 +421,9 @@ public InternalAggregation doReduce(List aggregations, Redu @Override public XContentBuilder doXContentBody(XContentBuilder builder, Params params) throws IOException { if (keyed) { - builder.startObject(CommonFields.BUCKETS); + builder.startObject(CommonFields.BUCKETS.getPreferredName()); } else { - builder.startArray(CommonFields.BUCKETS); + builder.startArray(CommonFields.BUCKETS.getPreferredName()); } for (Bucket bucket : buckets) { bucket.toXContent(builder, params); diff --git a/core/src/main/java/org/elasticsearch/search/aggregations/bucket/range/InternalBinaryRange.java b/core/src/main/java/org/elasticsearch/search/aggregations/bucket/range/InternalBinaryRange.java index bb31d9a24804a..ea51586336311 100644 --- a/core/src/main/java/org/elasticsearch/search/aggregations/bucket/range/InternalBinaryRange.java +++ b/core/src/main/java/org/elasticsearch/search/aggregations/bucket/range/InternalBinaryRange.java @@ -133,16 +133,16 @@ public XContentBuilder toXContent(XContentBuilder builder, Params params) throws } else { builder.startObject(); if (key != null) { - builder.field(CommonFields.KEY, key); + builder.field(CommonFields.KEY.getPreferredName(), key); } } if (from != null) { - builder.field(CommonFields.FROM, getFrom()); + builder.field(CommonFields.FROM.getPreferredName(), getFrom()); } if (to != null) { - builder.field(CommonFields.TO, getTo()); + builder.field(CommonFields.TO.getPreferredName(), getTo()); } - builder.field(CommonFields.DOC_COUNT, docCount); + builder.field(CommonFields.DOC_COUNT.getPreferredName(), docCount); aggregations.toXContentInternal(builder, params); builder.endObject(); return builder; @@ -270,9 +270,9 @@ public InternalAggregation doReduce(List aggregations, Redu public XContentBuilder doXContentBody(XContentBuilder builder, Params params) throws IOException { if (keyed) { - builder.startObject(CommonFields.BUCKETS); + builder.startObject(CommonFields.BUCKETS.getPreferredName()); } else { - builder.startArray(CommonFields.BUCKETS); + builder.startArray(CommonFields.BUCKETS.getPreferredName()); } for (Bucket range : buckets) { range.toXContent(builder, params); diff --git a/core/src/main/java/org/elasticsearch/search/aggregations/bucket/range/InternalRange.java b/core/src/main/java/org/elasticsearch/search/aggregations/bucket/range/InternalRange.java index a2228b7a27a21..3d0cf1b5a8c7d 100644 --- a/core/src/main/java/org/elasticsearch/search/aggregations/bucket/range/InternalRange.java +++ b/core/src/main/java/org/elasticsearch/search/aggregations/bucket/range/InternalRange.java @@ -141,21 +141,21 @@ public XContentBuilder toXContent(XContentBuilder builder, Params params) throws builder.startObject(key); } else { builder.startObject(); - builder.field(CommonFields.KEY, key); + builder.field(CommonFields.KEY.getPreferredName(), key); } if (!Double.isInfinite(from)) { - builder.field(CommonFields.FROM, from); + builder.field(CommonFields.FROM.getPreferredName(), from); if (format != DocValueFormat.RAW) { - builder.field(CommonFields.FROM_AS_STRING, format.format(from)); + builder.field(CommonFields.FROM_AS_STRING.getPreferredName(), format.format(from)); } } if (!Double.isInfinite(to)) { - builder.field(CommonFields.TO, to); + builder.field(CommonFields.TO.getPreferredName(), to); if (format != DocValueFormat.RAW) { - builder.field(CommonFields.TO_AS_STRING, format.format(to)); + builder.field(CommonFields.TO_AS_STRING.getPreferredName(), format.format(to)); } } - builder.field(CommonFields.DOC_COUNT, docCount); + builder.field(CommonFields.DOC_COUNT.getPreferredName(), docCount); aggregations.toXContentInternal(builder, params); builder.endObject(); return builder; @@ -302,9 +302,9 @@ public InternalAggregation doReduce(List aggregations, Redu @Override public XContentBuilder doXContentBody(XContentBuilder builder, Params params) throws IOException { if (keyed) { - builder.startObject(CommonFields.BUCKETS); + builder.startObject(CommonFields.BUCKETS.getPreferredName()); } else { - builder.startArray(CommonFields.BUCKETS); + builder.startArray(CommonFields.BUCKETS.getPreferredName()); } for (B range : ranges) { range.toXContent(builder, params); diff --git a/core/src/main/java/org/elasticsearch/search/aggregations/bucket/sampler/UnmappedSampler.java b/core/src/main/java/org/elasticsearch/search/aggregations/bucket/sampler/UnmappedSampler.java index 6eb78c31c5ce4..5cabb618b8a01 100644 --- a/core/src/main/java/org/elasticsearch/search/aggregations/bucket/sampler/UnmappedSampler.java +++ b/core/src/main/java/org/elasticsearch/search/aggregations/bucket/sampler/UnmappedSampler.java @@ -59,7 +59,7 @@ public InternalAggregation doReduce(List aggregations, Redu @Override public XContentBuilder doXContentBody(XContentBuilder builder, Params params) throws IOException { - builder.field(InternalAggregation.CommonFields.DOC_COUNT, 0); + builder.field(InternalAggregation.CommonFields.DOC_COUNT.getPreferredName(), 0); return builder; } diff --git a/core/src/main/java/org/elasticsearch/search/aggregations/bucket/significant/SignificantLongTerms.java b/core/src/main/java/org/elasticsearch/search/aggregations/bucket/significant/SignificantLongTerms.java index ff2ea5b045b05..6a714a6b035d6 100644 --- a/core/src/main/java/org/elasticsearch/search/aggregations/bucket/significant/SignificantLongTerms.java +++ b/core/src/main/java/org/elasticsearch/search/aggregations/bucket/significant/SignificantLongTerms.java @@ -99,11 +99,11 @@ Bucket newBucket(long subsetDf, long subsetSize, long supersetDf, long supersetS @Override public XContentBuilder toXContent(XContentBuilder builder, Params params) throws IOException { builder.startObject(); - builder.field(CommonFields.KEY, term); + builder.field(CommonFields.KEY.getPreferredName(), term); if (format != DocValueFormat.RAW) { - builder.field(CommonFields.KEY_AS_STRING, format.format(term)); + builder.field(CommonFields.KEY_AS_STRING.getPreferredName(), format.format(term)); } - builder.field(CommonFields.DOC_COUNT, getDocCount()); + builder.field(CommonFields.DOC_COUNT.getPreferredName(), getDocCount()); builder.field("score", score); builder.field("bg_count", supersetDf); aggregations.toXContentInternal(builder, params); @@ -162,7 +162,7 @@ protected SignificantLongTerms create(long subsetSize, long supersetSize, List aggregations, Redu @Override public XContentBuilder doXContentBody(XContentBuilder builder, Params params) throws IOException { - builder.startArray(CommonFields.BUCKETS).endArray(); + builder.startArray(CommonFields.BUCKETS.getPreferredName()).endArray(); return builder; } diff --git a/core/src/main/java/org/elasticsearch/search/aggregations/bucket/terms/DoubleTerms.java b/core/src/main/java/org/elasticsearch/search/aggregations/bucket/terms/DoubleTerms.java index 86bccbed57527..ae18cb59d9523 100644 --- a/core/src/main/java/org/elasticsearch/search/aggregations/bucket/terms/DoubleTerms.java +++ b/core/src/main/java/org/elasticsearch/search/aggregations/bucket/terms/DoubleTerms.java @@ -86,18 +86,11 @@ Bucket newBucket(long docCount, InternalAggregations aggs, long docCountError) { } @Override - public XContentBuilder toXContent(XContentBuilder builder, Params params) throws IOException { - builder.startObject(); - builder.field(CommonFields.KEY, term); + protected final XContentBuilder keyToXContent(XContentBuilder builder) throws IOException { + builder.field(CommonFields.KEY.getPreferredName(), term); if (format != DocValueFormat.RAW) { - builder.field(CommonFields.KEY_AS_STRING, format.format(term)); + builder.field(CommonFields.KEY_AS_STRING.getPreferredName(), format.format(term)); } - builder.field(CommonFields.DOC_COUNT, getDocCount()); - if (showDocCountError) { - builder.field(InternalTerms.DOC_COUNT_ERROR_UPPER_BOUND_FIELD_NAME, getDocCountError()); - } - aggregations.toXContentInternal(builder, params); - builder.endObject(); return builder; } @@ -149,18 +142,6 @@ protected DoubleTerms create(String name, List buckets, long docCountErr shardSize, showTermDocCountError, otherDocCount, buckets, docCountError); } - @Override - public XContentBuilder doXContentBody(XContentBuilder builder, Params params) throws IOException { - builder.field(InternalTerms.DOC_COUNT_ERROR_UPPER_BOUND_FIELD_NAME, docCountError); - builder.field(SUM_OF_OTHER_DOC_COUNTS, otherDocCount); - builder.startArray(CommonFields.BUCKETS); - for (Bucket bucket : buckets) { - bucket.toXContent(builder, params); - } - builder.endArray(); - return builder; - } - @Override protected Bucket[] createBucketsArray(int size) { return new Bucket[size]; @@ -171,7 +152,7 @@ public InternalAggregation doReduce(List aggregations, Redu boolean promoteToDouble = false; for (InternalAggregation agg : aggregations) { if (agg instanceof LongTerms && ((LongTerms) agg).format == DocValueFormat.RAW) { - /** + /* * this terms agg mixes longs and doubles, we must promote longs to doubles to make the internal aggs * compatible */ diff --git a/core/src/main/java/org/elasticsearch/search/aggregations/bucket/terms/GlobalOrdinalsStringTermsAggregator.java b/core/src/main/java/org/elasticsearch/search/aggregations/bucket/terms/GlobalOrdinalsStringTermsAggregator.java index 5da8e7c3337ba..8e9d779cd08a1 100644 --- a/core/src/main/java/org/elasticsearch/search/aggregations/bucket/terms/GlobalOrdinalsStringTermsAggregator.java +++ b/core/src/main/java/org/elasticsearch/search/aggregations/bucket/terms/GlobalOrdinalsStringTermsAggregator.java @@ -246,7 +246,7 @@ protected void writeTermTo(StreamOutput out) throws IOException { } @Override - public XContentBuilder toXContent(XContentBuilder builder, Params params) throws IOException { + protected final XContentBuilder keyToXContent(XContentBuilder builder) throws IOException { throw new UnsupportedOperationException(); } } diff --git a/core/src/main/java/org/elasticsearch/search/aggregations/bucket/terms/InternalMappedTerms.java b/core/src/main/java/org/elasticsearch/search/aggregations/bucket/terms/InternalMappedTerms.java index e784d9bc72054..21c9c461d0f07 100644 --- a/core/src/main/java/org/elasticsearch/search/aggregations/bucket/terms/InternalMappedTerms.java +++ b/core/src/main/java/org/elasticsearch/search/aggregations/bucket/terms/InternalMappedTerms.java @@ -21,6 +21,7 @@ import org.elasticsearch.common.io.stream.StreamInput; import org.elasticsearch.common.io.stream.StreamOutput; +import org.elasticsearch.common.xcontent.XContentBuilder; import org.elasticsearch.search.DocValueFormat; import org.elasticsearch.search.aggregations.pipeline.PipelineAggregator; @@ -127,4 +128,9 @@ protected boolean doEquals(Object obj) { protected int doHashCode() { return Objects.hash(super.doHashCode(), buckets, format, otherDocCount, showTermDocCountError, shardSize); } + + @Override + public final XContentBuilder doXContentBody(XContentBuilder builder, Params params) throws IOException { + return doXContentCommon(builder, params, docCountError, otherDocCount, buckets); + } } diff --git a/core/src/main/java/org/elasticsearch/search/aggregations/bucket/terms/InternalTerms.java b/core/src/main/java/org/elasticsearch/search/aggregations/bucket/terms/InternalTerms.java index 3f7844683ca35..0fb4ceea33a4e 100644 --- a/core/src/main/java/org/elasticsearch/search/aggregations/bucket/terms/InternalTerms.java +++ b/core/src/main/java/org/elasticsearch/search/aggregations/bucket/terms/InternalTerms.java @@ -18,9 +18,11 @@ */ package org.elasticsearch.search.aggregations.bucket.terms; +import org.elasticsearch.common.ParseField; import org.elasticsearch.common.io.stream.StreamInput; import org.elasticsearch.common.io.stream.StreamOutput; import org.elasticsearch.common.xcontent.ToXContent; +import org.elasticsearch.common.xcontent.XContentBuilder; import org.elasticsearch.search.DocValueFormat; import org.elasticsearch.search.aggregations.AggregationExecutionException; import org.elasticsearch.search.aggregations.Aggregations; @@ -43,10 +45,11 @@ public abstract class InternalTerms, B extends InternalTerms.Bucket> extends InternalMultiBucketAggregation implements Terms, ToXContent { - protected static final String DOC_COUNT_ERROR_UPPER_BOUND_FIELD_NAME = "doc_count_error_upper_bound"; - protected static final String SUM_OF_OTHER_DOC_COUNTS = "sum_other_doc_count"; + protected static final ParseField DOC_COUNT_ERROR_UPPER_BOUND_FIELD_NAME = new ParseField("doc_count_error_upper_bound"); + protected static final ParseField SUM_OF_OTHER_DOC_COUNTS = new ParseField("sum_other_doc_count"); public abstract static class Bucket> extends Terms.Bucket { + /** * Reads a bucket. Should be a constructor reference. */ @@ -141,6 +144,21 @@ public B reduce(List buckets, ReduceContext context) { return newBucket(docCount, aggs, docCountError); } + @Override + public final XContentBuilder toXContent(XContentBuilder builder, Params params) throws IOException { + builder.startObject(); + keyToXContent(builder); + builder.field(CommonFields.DOC_COUNT.getPreferredName(), getDocCount()); + if (showDocCountError) { + builder.field(InternalTerms.DOC_COUNT_ERROR_UPPER_BOUND_FIELD_NAME.getPreferredName(), getDocCountError()); + } + aggregations.toXContentInternal(builder, params); + builder.endObject(); + return builder; + } + + protected abstract XContentBuilder keyToXContent(XContentBuilder builder) throws IOException; + @Override public boolean equals(Object obj) { if (obj == null || getClass() != obj.getClass()) { @@ -319,4 +337,16 @@ protected boolean doEquals(Object obj) { protected int doHashCode() { return Objects.hash(minDocCount, order, requiredSize); } + + protected static XContentBuilder doXContentCommon(XContentBuilder builder, Params params, + long docCountError, long otherDocCount, List buckets) throws IOException { + builder.field(DOC_COUNT_ERROR_UPPER_BOUND_FIELD_NAME.getPreferredName(), docCountError); + builder.field(SUM_OF_OTHER_DOC_COUNTS.getPreferredName(), otherDocCount); + builder.startArray(CommonFields.BUCKETS.getPreferredName()); + for (Bucket bucket : buckets) { + bucket.toXContent(builder, params); + } + builder.endArray(); + return builder; + } } diff --git a/core/src/main/java/org/elasticsearch/search/aggregations/bucket/terms/LongTerms.java b/core/src/main/java/org/elasticsearch/search/aggregations/bucket/terms/LongTerms.java index f3339de673892..98aa4825ee7a3 100644 --- a/core/src/main/java/org/elasticsearch/search/aggregations/bucket/terms/LongTerms.java +++ b/core/src/main/java/org/elasticsearch/search/aggregations/bucket/terms/LongTerms.java @@ -86,18 +86,11 @@ Bucket newBucket(long docCount, InternalAggregations aggs, long docCountError) { } @Override - public XContentBuilder toXContent(XContentBuilder builder, Params params) throws IOException { - builder.startObject(); - builder.field(CommonFields.KEY, term); + protected final XContentBuilder keyToXContent(XContentBuilder builder) throws IOException { + builder.field(CommonFields.KEY.getPreferredName(), term); if (format != DocValueFormat.RAW) { - builder.field(CommonFields.KEY_AS_STRING, format.format(term)); + builder.field(CommonFields.KEY_AS_STRING.getPreferredName(), format.format(term)); } - builder.field(CommonFields.DOC_COUNT, getDocCount()); - if (showDocCountError) { - builder.field(InternalTerms.DOC_COUNT_ERROR_UPPER_BOUND_FIELD_NAME, getDocCountError()); - } - aggregations.toXContentInternal(builder, params); - builder.endObject(); return builder; } @@ -149,18 +142,6 @@ protected LongTerms create(String name, List buckets, long docCountError showTermDocCountError, otherDocCount, buckets, docCountError); } - @Override - public XContentBuilder doXContentBody(XContentBuilder builder, Params params) throws IOException { - builder.field(InternalTerms.DOC_COUNT_ERROR_UPPER_BOUND_FIELD_NAME, docCountError); - builder.field(SUM_OF_OTHER_DOC_COUNTS, otherDocCount); - builder.startArray(CommonFields.BUCKETS); - for (Bucket bucket : buckets) { - bucket.toXContent(builder, params); - } - builder.endArray(); - return builder; - } - @Override protected Bucket[] createBucketsArray(int size) { return new Bucket[size]; diff --git a/core/src/main/java/org/elasticsearch/search/aggregations/bucket/terms/StringTerms.java b/core/src/main/java/org/elasticsearch/search/aggregations/bucket/terms/StringTerms.java index 3fd41dc3aed36..b48c443fac93a 100644 --- a/core/src/main/java/org/elasticsearch/search/aggregations/bucket/terms/StringTerms.java +++ b/core/src/main/java/org/elasticsearch/search/aggregations/bucket/terms/StringTerms.java @@ -85,16 +85,8 @@ Bucket newBucket(long docCount, InternalAggregations aggs, long docCountError) { } @Override - public XContentBuilder toXContent(XContentBuilder builder, Params params) throws IOException { - builder.startObject(); - builder.field(CommonFields.KEY, getKeyAsString()); - builder.field(CommonFields.DOC_COUNT, getDocCount()); - if (showDocCountError) { - builder.field(InternalTerms.DOC_COUNT_ERROR_UPPER_BOUND_FIELD_NAME, getDocCountError()); - } - aggregations.toXContentInternal(builder, params); - builder.endObject(); - return builder; + protected final XContentBuilder keyToXContent(XContentBuilder builder) throws IOException { + return builder.field(CommonFields.KEY.getPreferredName(), getKeyAsString()); } @Override @@ -145,18 +137,6 @@ protected StringTerms create(String name, List buckets, long docCountErr showTermDocCountError, otherDocCount, buckets, docCountError); } - @Override - public XContentBuilder doXContentBody(XContentBuilder builder, Params params) throws IOException { - builder.field(InternalTerms.DOC_COUNT_ERROR_UPPER_BOUND_FIELD_NAME, docCountError); - builder.field(SUM_OF_OTHER_DOC_COUNTS, otherDocCount); - builder.startArray(CommonFields.BUCKETS); - for (Bucket bucket : buckets) { - bucket.toXContent(builder, params); - } - builder.endArray(); - return builder; - } - @Override protected Bucket[] createBucketsArray(int size) { return new Bucket[size]; diff --git a/core/src/main/java/org/elasticsearch/search/aggregations/bucket/terms/UnmappedTerms.java b/core/src/main/java/org/elasticsearch/search/aggregations/bucket/terms/UnmappedTerms.java index 4db72b873a9f8..bdc95b2e87e96 100644 --- a/core/src/main/java/org/elasticsearch/search/aggregations/bucket/terms/UnmappedTerms.java +++ b/core/src/main/java/org/elasticsearch/search/aggregations/bucket/terms/UnmappedTerms.java @@ -27,6 +27,7 @@ import org.elasticsearch.search.aggregations.pipeline.PipelineAggregator; import java.io.IOException; +import java.util.Collections; import java.util.List; import java.util.Map; @@ -102,11 +103,8 @@ public InternalAggregation doReduce(List aggregations, Redu } @Override - public XContentBuilder doXContentBody(XContentBuilder builder, Params params) throws IOException { - builder.field(InternalTerms.DOC_COUNT_ERROR_UPPER_BOUND_FIELD_NAME, 0); - builder.field(SUM_OF_OTHER_DOC_COUNTS, 0); - builder.startArray(CommonFields.BUCKETS).endArray(); - return builder; + public final XContentBuilder doXContentBody(XContentBuilder builder, Params params) throws IOException { + return doXContentCommon(builder, params, 0, 0, Collections.emptyList()); } @Override diff --git a/core/src/main/java/org/elasticsearch/search/aggregations/metrics/avg/InternalAvg.java b/core/src/main/java/org/elasticsearch/search/aggregations/metrics/avg/InternalAvg.java index 2c44a08b8a489..d6fe2e6a93e45 100644 --- a/core/src/main/java/org/elasticsearch/search/aggregations/metrics/avg/InternalAvg.java +++ b/core/src/main/java/org/elasticsearch/search/aggregations/metrics/avg/InternalAvg.java @@ -96,9 +96,9 @@ public InternalAvg doReduce(List aggregations, ReduceContex @Override public XContentBuilder doXContentBody(XContentBuilder builder, Params params) throws IOException { - builder.field(CommonFields.VALUE, count != 0 ? getValue() : null); + builder.field(CommonFields.VALUE.getPreferredName(), count != 0 ? getValue() : null); if (count != 0 && format != DocValueFormat.RAW) { - builder.field(CommonFields.VALUE_AS_STRING, format.format(getValue())); + builder.field(CommonFields.VALUE_AS_STRING.getPreferredName(), format.format(getValue())); } return builder; } @@ -115,5 +115,4 @@ protected boolean doEquals(Object obj) { Objects.equals(count, other.count) && Objects.equals(format.getWriteableName(), other.format.getWriteableName()); } - } diff --git a/core/src/main/java/org/elasticsearch/search/aggregations/metrics/cardinality/HyperLogLogPlusPlus.java b/core/src/main/java/org/elasticsearch/search/aggregations/metrics/cardinality/HyperLogLogPlusPlus.java index 42b4561e07b3c..6425cc3b68a2e 100644 --- a/core/src/main/java/org/elasticsearch/search/aggregations/metrics/cardinality/HyperLogLogPlusPlus.java +++ b/core/src/main/java/org/elasticsearch/search/aggregations/metrics/cardinality/HyperLogLogPlusPlus.java @@ -34,6 +34,9 @@ import java.io.IOException; import java.nio.ByteBuffer; import java.nio.ByteOrder; +import java.util.HashSet; +import java.util.Objects; +import java.util.Set; /** * Hyperloglog++ counter, implemented based on pseudo code from @@ -420,6 +423,32 @@ public void close() { Releasables.close(runLens, hashSet.sizes); } + private Set getComparableData(long bucket) { + Set values = new HashSet<>(); + if (algorithm.get(bucket) == LINEAR_COUNTING) { + try (IntArray hashSetValues = hashSet.values(bucket)) { + for (long i = 0; i < hashSetValues.size(); i++) { + values.add(hashSetValues.get(i)); + } + } + } else { + for (long i = 0; i < runLens.size(); i++) { + values.add(runLens.get((bucket << p) + i)); + } + } + return values; + } + + public int hashCode(long bucket) { + return Objects.hash(p, algorithm.get(bucket), getComparableData(bucket)); + } + + public boolean equals(long bucket, HyperLogLogPlusPlus other) { + return Objects.equals(p, other.p) && + Objects.equals(algorithm.get(bucket), other.algorithm.get(bucket)) && + Objects.equals(getComparableData(bucket), getComparableData(bucket)); + } + /** * We are actually using HyperLogLog's runLens array but interpreting it as a hash set * for linear counting. diff --git a/core/src/main/java/org/elasticsearch/search/aggregations/metrics/cardinality/InternalCardinality.java b/core/src/main/java/org/elasticsearch/search/aggregations/metrics/cardinality/InternalCardinality.java index 02953abc2daa5..028e97a69ff82 100644 --- a/core/src/main/java/org/elasticsearch/search/aggregations/metrics/cardinality/InternalCardinality.java +++ b/core/src/main/java/org/elasticsearch/search/aggregations/metrics/cardinality/InternalCardinality.java @@ -109,8 +109,22 @@ public void merge(InternalCardinality other) { @Override public XContentBuilder doXContentBody(XContentBuilder builder, Params params) throws IOException { final long cardinality = getValue(); - builder.field(CommonFields.VALUE, cardinality); + builder.field(CommonFields.VALUE.getPreferredName(), cardinality); return builder; } + @Override + protected int doHashCode() { + return counts.hashCode(0); + } + + @Override + protected boolean doEquals(Object obj) { + InternalCardinality other = (InternalCardinality) obj; + return counts.equals(0, other.counts); + } + + HyperLogLogPlusPlus getState() { + return counts; + } } diff --git a/core/src/main/java/org/elasticsearch/search/aggregations/metrics/max/InternalMax.java b/core/src/main/java/org/elasticsearch/search/aggregations/metrics/max/InternalMax.java index c1634fdce4b6e..112d379362700 100644 --- a/core/src/main/java/org/elasticsearch/search/aggregations/metrics/max/InternalMax.java +++ b/core/src/main/java/org/elasticsearch/search/aggregations/metrics/max/InternalMax.java @@ -83,9 +83,9 @@ public InternalMax doReduce(List aggregations, ReduceContex @Override public XContentBuilder doXContentBody(XContentBuilder builder, Params params) throws IOException { boolean hasValue = !Double.isInfinite(max); - builder.field(CommonFields.VALUE, hasValue ? max : null); + builder.field(CommonFields.VALUE.getPreferredName(), hasValue ? max : null); if (hasValue && format != DocValueFormat.RAW) { - builder.field(CommonFields.VALUE_AS_STRING, format.format(max)); + builder.field(CommonFields.VALUE_AS_STRING.getPreferredName(), format.format(max)); } return builder; } diff --git a/core/src/main/java/org/elasticsearch/search/aggregations/metrics/min/InternalMin.java b/core/src/main/java/org/elasticsearch/search/aggregations/metrics/min/InternalMin.java index 46481d1837f6b..dcf180dde89a4 100644 --- a/core/src/main/java/org/elasticsearch/search/aggregations/metrics/min/InternalMin.java +++ b/core/src/main/java/org/elasticsearch/search/aggregations/metrics/min/InternalMin.java @@ -83,9 +83,9 @@ public InternalMin doReduce(List aggregations, ReduceContex @Override public XContentBuilder doXContentBody(XContentBuilder builder, Params params) throws IOException { boolean hasValue = !Double.isInfinite(min); - builder.field(CommonFields.VALUE, hasValue ? min : null); + builder.field(CommonFields.VALUE.getPreferredName(), hasValue ? min : null); if (hasValue && format != DocValueFormat.RAW) { - builder.field(CommonFields.VALUE_AS_STRING, format.format(min)); + builder.field(CommonFields.VALUE_AS_STRING.getPreferredName(), format.format(min)); } return builder; } diff --git a/core/src/main/java/org/elasticsearch/search/aggregations/metrics/percentiles/hdr/AbstractInternalHDRPercentiles.java b/core/src/main/java/org/elasticsearch/search/aggregations/metrics/percentiles/hdr/AbstractInternalHDRPercentiles.java index 892c6f2987ef1..29b8c38062cdd 100644 --- a/core/src/main/java/org/elasticsearch/search/aggregations/metrics/percentiles/hdr/AbstractInternalHDRPercentiles.java +++ b/core/src/main/java/org/elasticsearch/search/aggregations/metrics/percentiles/hdr/AbstractInternalHDRPercentiles.java @@ -115,7 +115,7 @@ protected abstract AbstractInternalHDRPercentiles createReduced(String name, dou @Override public XContentBuilder doXContentBody(XContentBuilder builder, Params params) throws IOException { if (keyed) { - builder.startObject(CommonFields.VALUES); + builder.startObject(CommonFields.VALUES.getPreferredName()); for(int i = 0; i < keys.length; ++i) { String key = String.valueOf(keys[i]); double value = value(keys[i]); @@ -126,14 +126,14 @@ public XContentBuilder doXContentBody(XContentBuilder builder, Params params) th } builder.endObject(); } else { - builder.startArray(CommonFields.VALUES); + builder.startArray(CommonFields.VALUES.getPreferredName()); for (int i = 0; i < keys.length; i++) { double value = value(keys[i]); builder.startObject(); - builder.field(CommonFields.KEY, keys[i]); - builder.field(CommonFields.VALUE, value); + builder.field(CommonFields.KEY.getPreferredName(), keys[i]); + builder.field(CommonFields.VALUE.getPreferredName(), value); if (format != DocValueFormat.RAW) { - builder.field(CommonFields.VALUE_AS_STRING, format.format(value)); + builder.field(CommonFields.VALUE_AS_STRING.getPreferredName(), format.format(value)); } builder.endObject(); } diff --git a/core/src/main/java/org/elasticsearch/search/aggregations/metrics/percentiles/tdigest/AbstractInternalTDigestPercentiles.java b/core/src/main/java/org/elasticsearch/search/aggregations/metrics/percentiles/tdigest/AbstractInternalTDigestPercentiles.java index 2d555edfb145c..687b5533ecb31 100644 --- a/core/src/main/java/org/elasticsearch/search/aggregations/metrics/percentiles/tdigest/AbstractInternalTDigestPercentiles.java +++ b/core/src/main/java/org/elasticsearch/search/aggregations/metrics/percentiles/tdigest/AbstractInternalTDigestPercentiles.java @@ -98,7 +98,7 @@ protected abstract AbstractInternalTDigestPercentiles createReduced(String name, @Override public XContentBuilder doXContentBody(XContentBuilder builder, Params params) throws IOException { if (keyed) { - builder.startObject(CommonFields.VALUES); + builder.startObject(CommonFields.VALUES.getPreferredName()); for(int i = 0; i < keys.length; ++i) { String key = String.valueOf(keys[i]); double value = value(keys[i]); @@ -109,14 +109,14 @@ public XContentBuilder doXContentBody(XContentBuilder builder, Params params) th } builder.endObject(); } else { - builder.startArray(CommonFields.VALUES); + builder.startArray(CommonFields.VALUES.getPreferredName()); for (int i = 0; i < keys.length; i++) { double value = value(keys[i]); builder.startObject(); - builder.field(CommonFields.KEY, keys[i]); - builder.field(CommonFields.VALUE, value); + builder.field(CommonFields.KEY.getPreferredName(), keys[i]); + builder.field(CommonFields.VALUE.getPreferredName(), value); if (format != DocValueFormat.RAW) { - builder.field(CommonFields.VALUE_AS_STRING, format.format(value)); + builder.field(CommonFields.VALUE_AS_STRING.getPreferredName(), format.format(value)); } builder.endObject(); } diff --git a/core/src/main/java/org/elasticsearch/search/aggregations/metrics/sum/InternalSum.java b/core/src/main/java/org/elasticsearch/search/aggregations/metrics/sum/InternalSum.java index a93b41543ce6b..576f1ea52cf6b 100644 --- a/core/src/main/java/org/elasticsearch/search/aggregations/metrics/sum/InternalSum.java +++ b/core/src/main/java/org/elasticsearch/search/aggregations/metrics/sum/InternalSum.java @@ -82,9 +82,9 @@ public InternalSum doReduce(List aggregations, ReduceContex @Override public XContentBuilder doXContentBody(XContentBuilder builder, Params params) throws IOException { - builder.field(CommonFields.VALUE, sum); + builder.field(CommonFields.VALUE.getPreferredName(), sum); if (format != DocValueFormat.RAW) { - builder.field(CommonFields.VALUE_AS_STRING, format.format(sum)); + builder.field(CommonFields.VALUE_AS_STRING.getPreferredName(), format.format(sum)); } return builder; } diff --git a/core/src/main/java/org/elasticsearch/search/aggregations/metrics/valuecount/InternalValueCount.java b/core/src/main/java/org/elasticsearch/search/aggregations/metrics/valuecount/InternalValueCount.java index 25dd4e2e123fc..0ac42ff9f45d6 100644 --- a/core/src/main/java/org/elasticsearch/search/aggregations/metrics/valuecount/InternalValueCount.java +++ b/core/src/main/java/org/elasticsearch/search/aggregations/metrics/valuecount/InternalValueCount.java @@ -81,7 +81,7 @@ public InternalAggregation doReduce(List aggregations, Redu @Override public XContentBuilder doXContentBody(XContentBuilder builder, Params params) throws IOException { - builder.field(CommonFields.VALUE, value); + builder.field(CommonFields.VALUE.getPreferredName(), value); return builder; } diff --git a/core/src/main/java/org/elasticsearch/search/aggregations/pipeline/InternalSimpleValue.java b/core/src/main/java/org/elasticsearch/search/aggregations/pipeline/InternalSimpleValue.java index b9d79b8e3297a..a3c7012f7cde1 100644 --- a/core/src/main/java/org/elasticsearch/search/aggregations/pipeline/InternalSimpleValue.java +++ b/core/src/main/java/org/elasticsearch/search/aggregations/pipeline/InternalSimpleValue.java @@ -25,15 +25,15 @@ import org.elasticsearch.search.DocValueFormat; import org.elasticsearch.search.aggregations.InternalAggregation; import org.elasticsearch.search.aggregations.metrics.InternalNumericMetricsAggregation; -import org.elasticsearch.search.aggregations.metrics.max.InternalMax; import java.io.IOException; import java.util.List; import java.util.Map; +import java.util.Objects; public class InternalSimpleValue extends InternalNumericMetricsAggregation.SingleValue implements SimpleValue { public static final String NAME = "simple_value"; - private final double value; + protected final double value; public InternalSimpleValue(String name, double value, DocValueFormat formatter, List pipelineAggregators, Map metaData) { @@ -72,17 +72,28 @@ public double getValue() { } @Override - public InternalMax doReduce(List aggregations, ReduceContext reduceContext) { + public InternalSimpleValue doReduce(List aggregations, ReduceContext reduceContext) { throw new UnsupportedOperationException("Not supported"); } @Override public XContentBuilder doXContentBody(XContentBuilder builder, Params params) throws IOException { boolean hasValue = !(Double.isInfinite(value) || Double.isNaN(value)); - builder.field(CommonFields.VALUE, hasValue ? value : null); + builder.field(CommonFields.VALUE.getPreferredName(), hasValue ? value : null); if (hasValue && format != DocValueFormat.RAW) { - builder.field(CommonFields.VALUE_AS_STRING, format.format(value)); + builder.field(CommonFields.VALUE_AS_STRING.getPreferredName(), format.format(value)); } return builder; } + + @Override + protected int doHashCode() { + return Objects.hash(value); + } + + @Override + protected boolean doEquals(Object obj) { + InternalSimpleValue other = (InternalSimpleValue) obj; + return Objects.equals(value, other.value); + } } diff --git a/core/src/main/java/org/elasticsearch/search/aggregations/pipeline/bucketmetrics/InternalBucketMetricValue.java b/core/src/main/java/org/elasticsearch/search/aggregations/pipeline/bucketmetrics/InternalBucketMetricValue.java index 6477728b3231d..76284d275553f 100644 --- a/core/src/main/java/org/elasticsearch/search/aggregations/pipeline/bucketmetrics/InternalBucketMetricValue.java +++ b/core/src/main/java/org/elasticsearch/search/aggregations/pipeline/bucketmetrics/InternalBucketMetricValue.java @@ -97,9 +97,9 @@ public Object getProperty(List path) { @Override public XContentBuilder doXContentBody(XContentBuilder builder, Params params) throws IOException { boolean hasValue = !Double.isInfinite(value); - builder.field(CommonFields.VALUE, hasValue ? value : null); + builder.field(CommonFields.VALUE.getPreferredName(), hasValue ? value : null); if (hasValue && format != DocValueFormat.RAW) { - builder.field(CommonFields.VALUE_AS_STRING, format.format(value)); + builder.field(CommonFields.VALUE_AS_STRING.getPreferredName(), format.format(value)); } builder.startArray("keys"); for (String key : keys) { diff --git a/core/src/main/java/org/elasticsearch/search/aggregations/pipeline/derivative/InternalDerivative.java b/core/src/main/java/org/elasticsearch/search/aggregations/pipeline/derivative/InternalDerivative.java index e18c0d81eebfb..db56f0f7c6f0f 100644 --- a/core/src/main/java/org/elasticsearch/search/aggregations/pipeline/derivative/InternalDerivative.java +++ b/core/src/main/java/org/elasticsearch/search/aggregations/pipeline/derivative/InternalDerivative.java @@ -29,6 +29,7 @@ import java.io.IOException; import java.util.List; import java.util.Map; +import java.util.Objects; public class InternalDerivative extends InternalSimpleValue implements Derivative { private final double normalizationFactor; @@ -89,4 +90,16 @@ public XContentBuilder doXContentBody(XContentBuilder builder, Params params) th } return builder; } + + @Override + protected int doHashCode() { + return Objects.hash(normalizationFactor, value); + } + + @Override + protected boolean doEquals(Object obj) { + InternalDerivative other = (InternalDerivative) obj; + return Objects.equals(value, other.value) + && Objects.equals(normalizationFactor, other.normalizationFactor); + } } diff --git a/core/src/main/java/org/elasticsearch/search/dfs/DfsSearchResult.java b/core/src/main/java/org/elasticsearch/search/dfs/DfsSearchResult.java index bf3d95272463a..0cd624b00a36b 100644 --- a/core/src/main/java/org/elasticsearch/search/dfs/DfsSearchResult.java +++ b/core/src/main/java/org/elasticsearch/search/dfs/DfsSearchResult.java @@ -30,44 +30,24 @@ import org.elasticsearch.common.io.stream.StreamOutput; import org.elasticsearch.search.SearchPhaseResult; import org.elasticsearch.search.SearchShardTarget; -import org.elasticsearch.transport.TransportResponse; import java.io.IOException; -public class DfsSearchResult extends TransportResponse implements SearchPhaseResult { +public class DfsSearchResult extends SearchPhaseResult { private static final Term[] EMPTY_TERMS = new Term[0]; private static final TermStatistics[] EMPTY_TERM_STATS = new TermStatistics[0]; - - private SearchShardTarget shardTarget; - private long id; private Term[] terms; private TermStatistics[] termStatistics; private ObjectObjectHashMap fieldStatistics = HppcMaps.newNoNullKeysMap(); private int maxDoc; public DfsSearchResult() { - } public DfsSearchResult(long id, SearchShardTarget shardTarget) { - this.id = id; - this.shardTarget = shardTarget; - } - - @Override - public long id() { - return this.id; - } - - @Override - public SearchShardTarget shardTarget() { - return shardTarget; - } - - @Override - public void shardTarget(SearchShardTarget shardTarget) { - this.shardTarget = shardTarget; + this.setSearchShardTarget(shardTarget); + this.requestId = id; } public DfsSearchResult maxDoc(int maxDoc) { @@ -105,7 +85,7 @@ public ObjectObjectHashMap fieldStatistics() { @Override public void readFrom(StreamInput in) throws IOException { super.readFrom(in); - id = in.readLong(); + requestId = in.readLong(); int termsSize = in.readVInt(); if (termsSize == 0) { terms = EMPTY_TERMS; @@ -125,7 +105,7 @@ public void readFrom(StreamInput in) throws IOException { @Override public void writeTo(StreamOutput out) throws IOException { super.writeTo(out); - out.writeLong(id); + out.writeLong(requestId); out.writeVInt(terms.length); for (Term term : terms) { out.writeString(term.field()); diff --git a/core/src/main/java/org/elasticsearch/search/fetch/FetchSearchResult.java b/core/src/main/java/org/elasticsearch/search/fetch/FetchSearchResult.java index 1e2def8cc61da..a5f27733ad28a 100644 --- a/core/src/main/java/org/elasticsearch/search/fetch/FetchSearchResult.java +++ b/core/src/main/java/org/elasticsearch/search/fetch/FetchSearchResult.java @@ -22,28 +22,25 @@ import org.elasticsearch.common.io.stream.StreamInput; import org.elasticsearch.common.io.stream.StreamOutput; import org.elasticsearch.search.SearchHit; +import org.elasticsearch.search.SearchPhaseResult; import org.elasticsearch.search.SearchShardTarget; import org.elasticsearch.search.SearchHits; import org.elasticsearch.search.query.QuerySearchResult; -import org.elasticsearch.search.query.QuerySearchResultProvider; import java.io.IOException; -public class FetchSearchResult extends QuerySearchResultProvider { +public final class FetchSearchResult extends SearchPhaseResult { - private long id; - private SearchShardTarget shardTarget; private SearchHits hits; // client side counter private transient int counter; public FetchSearchResult() { - } public FetchSearchResult(long id, SearchShardTarget shardTarget) { - this.id = id; - this.shardTarget = shardTarget; + this.requestId = id; + setSearchShardTarget(shardTarget); } @Override @@ -56,21 +53,6 @@ public FetchSearchResult fetchResult() { return this; } - @Override - public long id() { - return this.id; - } - - @Override - public SearchShardTarget shardTarget() { - return this.shardTarget; - } - - @Override - public void shardTarget(SearchShardTarget shardTarget) { - this.shardTarget = shardTarget; - } - public void hits(SearchHits hits) { assert assertNoSearchTarget(hits); this.hits = hits; @@ -105,14 +87,14 @@ public static FetchSearchResult readFetchSearchResult(StreamInput in) throws IOE @Override public void readFrom(StreamInput in) throws IOException { super.readFrom(in); - id = in.readLong(); + requestId = in.readLong(); hits = SearchHits.readSearchHits(in); } @Override public void writeTo(StreamOutput out) throws IOException { super.writeTo(out); - out.writeLong(id); + out.writeLong(requestId); hits.writeTo(out); } } diff --git a/core/src/main/java/org/elasticsearch/search/fetch/QueryFetchSearchResult.java b/core/src/main/java/org/elasticsearch/search/fetch/QueryFetchSearchResult.java index 35c4dbd65971d..8d1e6276e65d9 100644 --- a/core/src/main/java/org/elasticsearch/search/fetch/QueryFetchSearchResult.java +++ b/core/src/main/java/org/elasticsearch/search/fetch/QueryFetchSearchResult.java @@ -21,22 +21,21 @@ import org.elasticsearch.common.io.stream.StreamInput; import org.elasticsearch.common.io.stream.StreamOutput; +import org.elasticsearch.search.SearchPhaseResult; import org.elasticsearch.search.SearchShardTarget; import org.elasticsearch.search.query.QuerySearchResult; -import org.elasticsearch.search.query.QuerySearchResultProvider; import java.io.IOException; import static org.elasticsearch.search.fetch.FetchSearchResult.readFetchSearchResult; import static org.elasticsearch.search.query.QuerySearchResult.readQuerySearchResult; -public class QueryFetchSearchResult extends QuerySearchResultProvider { +public final class QueryFetchSearchResult extends SearchPhaseResult { private QuerySearchResult queryResult; private FetchSearchResult fetchResult; public QueryFetchSearchResult() { - } public QueryFetchSearchResult(QuerySearchResult queryResult, FetchSearchResult fetchResult) { @@ -45,19 +44,27 @@ public QueryFetchSearchResult(QuerySearchResult queryResult, FetchSearchResult f } @Override - public long id() { - return queryResult.id(); + public long getRequestId() { + return queryResult.getRequestId(); + } + + @Override + public SearchShardTarget getSearchShardTarget() { + return queryResult.getSearchShardTarget(); } @Override - public SearchShardTarget shardTarget() { - return queryResult.shardTarget(); + public void setSearchShardTarget(SearchShardTarget shardTarget) { + super.setSearchShardTarget(shardTarget); + queryResult.setSearchShardTarget(shardTarget); + fetchResult.setSearchShardTarget(shardTarget); } @Override - public void shardTarget(SearchShardTarget shardTarget) { - queryResult.shardTarget(shardTarget); - fetchResult.shardTarget(shardTarget); + public void setShardIndex(int requestIndex) { + super.setShardIndex(requestIndex); + queryResult.setShardIndex(requestIndex); + fetchResult.setShardIndex(requestIndex); } @Override diff --git a/core/src/main/java/org/elasticsearch/search/fetch/ScrollQueryFetchSearchResult.java b/core/src/main/java/org/elasticsearch/search/fetch/ScrollQueryFetchSearchResult.java index e8a9af00127e4..55aa4a96d018c 100644 --- a/core/src/main/java/org/elasticsearch/search/fetch/ScrollQueryFetchSearchResult.java +++ b/core/src/main/java/org/elasticsearch/search/fetch/ScrollQueryFetchSearchResult.java @@ -21,46 +21,64 @@ import org.elasticsearch.common.io.stream.StreamInput; import org.elasticsearch.common.io.stream.StreamOutput; +import org.elasticsearch.search.SearchPhaseResult; import org.elasticsearch.search.SearchShardTarget; -import org.elasticsearch.transport.TransportResponse; +import org.elasticsearch.search.query.QuerySearchResult; import java.io.IOException; import static org.elasticsearch.search.fetch.QueryFetchSearchResult.readQueryFetchSearchResult; -public class ScrollQueryFetchSearchResult extends TransportResponse { +public final class ScrollQueryFetchSearchResult extends SearchPhaseResult { private QueryFetchSearchResult result; - private SearchShardTarget shardTarget; public ScrollQueryFetchSearchResult() { } public ScrollQueryFetchSearchResult(QueryFetchSearchResult result, SearchShardTarget shardTarget) { this.result = result; - this.shardTarget = shardTarget; + setSearchShardTarget(shardTarget); } public QueryFetchSearchResult result() { return result; } - public SearchShardTarget shardTarget() { - return shardTarget; + @Override + public void setSearchShardTarget(SearchShardTarget shardTarget) { + super.setSearchShardTarget(shardTarget); + result.setSearchShardTarget(shardTarget); + } + + @Override + public void setShardIndex(int shardIndex) { + super.setShardIndex(shardIndex); + result.setShardIndex(shardIndex); + } + + @Override + public QuerySearchResult queryResult() { + return result.queryResult(); + } + + @Override + public FetchSearchResult fetchResult() { + return result.fetchResult(); } @Override public void readFrom(StreamInput in) throws IOException { super.readFrom(in); - shardTarget = new SearchShardTarget(in); + SearchShardTarget searchShardTarget = new SearchShardTarget(in); result = readQueryFetchSearchResult(in); - result.shardTarget(shardTarget); + setSearchShardTarget(searchShardTarget); } @Override public void writeTo(StreamOutput out) throws IOException { super.writeTo(out); - shardTarget.writeTo(out); + getSearchShardTarget().writeTo(out); result.writeTo(out); } } diff --git a/core/src/main/java/org/elasticsearch/search/query/QuerySearchResult.java b/core/src/main/java/org/elasticsearch/search/query/QuerySearchResult.java index a8d8ae7406224..15403f9967720 100644 --- a/core/src/main/java/org/elasticsearch/search/query/QuerySearchResult.java +++ b/core/src/main/java/org/elasticsearch/search/query/QuerySearchResult.java @@ -24,6 +24,7 @@ import org.elasticsearch.common.io.stream.StreamInput; import org.elasticsearch.common.io.stream.StreamOutput; import org.elasticsearch.search.DocValueFormat; +import org.elasticsearch.search.SearchPhaseResult; import org.elasticsearch.search.SearchShardTarget; import org.elasticsearch.search.aggregations.Aggregations; import org.elasticsearch.search.aggregations.InternalAggregations; @@ -40,10 +41,8 @@ import static org.elasticsearch.common.lucene.Lucene.readTopDocs; import static org.elasticsearch.common.lucene.Lucene.writeTopDocs; -public final class QuerySearchResult extends QuerySearchResultProvider { +public final class QuerySearchResult extends SearchPhaseResult { - private long id; - private SearchShardTarget shardTarget; private int from; private int size; private TopDocs topDocs; @@ -61,8 +60,8 @@ public QuerySearchResult() { } public QuerySearchResult(long id, SearchShardTarget shardTarget) { - this.id = id; - this.shardTarget = shardTarget; + this.requestId = id; + setSearchShardTarget(shardTarget); } @Override @@ -70,20 +69,6 @@ public QuerySearchResult queryResult() { return this; } - @Override - public long id() { - return this.id; - } - - @Override - public SearchShardTarget shardTarget() { - return shardTarget; - } - - @Override - public void shardTarget(SearchShardTarget shardTarget) { - this.shardTarget = shardTarget; - } public void searchTimedOut(boolean searchTimedOut) { this.searchTimedOut = searchTimedOut; @@ -230,7 +215,7 @@ public void readFrom(StreamInput in) throws IOException { } public void readFromWithId(long id, StreamInput in) throws IOException { - this.id = id; + this.requestId = id; from = in.readVInt(); size = in.readVInt(); int numSortFieldsPlus1 = in.readVInt(); @@ -260,7 +245,7 @@ public void readFromWithId(long id, StreamInput in) throws IOException { @Override public void writeTo(StreamOutput out) throws IOException { super.writeTo(out); - out.writeLong(id); + out.writeLong(requestId); writeToNoId(out); } diff --git a/core/src/main/java/org/elasticsearch/search/query/ScrollQuerySearchResult.java b/core/src/main/java/org/elasticsearch/search/query/ScrollQuerySearchResult.java index 9137a72acb558..6401459489955 100644 --- a/core/src/main/java/org/elasticsearch/search/query/ScrollQuerySearchResult.java +++ b/core/src/main/java/org/elasticsearch/search/query/ScrollQuerySearchResult.java @@ -21,46 +21,54 @@ import org.elasticsearch.common.io.stream.StreamInput; import org.elasticsearch.common.io.stream.StreamOutput; +import org.elasticsearch.search.SearchPhaseResult; import org.elasticsearch.search.SearchShardTarget; -import org.elasticsearch.transport.TransportResponse; import java.io.IOException; import static org.elasticsearch.search.query.QuerySearchResult.readQuerySearchResult; -public class ScrollQuerySearchResult extends TransportResponse { +public final class ScrollQuerySearchResult extends SearchPhaseResult { - private QuerySearchResult queryResult; - private SearchShardTarget shardTarget; + private QuerySearchResult result; public ScrollQuerySearchResult() { } - public ScrollQuerySearchResult(QuerySearchResult queryResult, SearchShardTarget shardTarget) { - this.queryResult = queryResult; - this.shardTarget = shardTarget; + public ScrollQuerySearchResult(QuerySearchResult result, SearchShardTarget shardTarget) { + this.result = result; + setSearchShardTarget(shardTarget); } - public QuerySearchResult queryResult() { - return queryResult; + @Override + public void setSearchShardTarget(SearchShardTarget shardTarget) { + super.setSearchShardTarget(shardTarget); + result.setSearchShardTarget(shardTarget); } - public SearchShardTarget shardTarget() { - return shardTarget; + @Override + public void setShardIndex(int shardIndex) { + super.setShardIndex(shardIndex); + result.setShardIndex(shardIndex); + } + + @Override + public QuerySearchResult queryResult() { + return result; } @Override public void readFrom(StreamInput in) throws IOException { super.readFrom(in); - shardTarget = new SearchShardTarget(in); - queryResult = readQuerySearchResult(in); - queryResult.shardTarget(shardTarget); + SearchShardTarget shardTarget = new SearchShardTarget(in); + result = readQuerySearchResult(in); + setSearchShardTarget(shardTarget); } @Override public void writeTo(StreamOutput out) throws IOException { super.writeTo(out); - shardTarget.writeTo(out); - queryResult.writeTo(out); + getSearchShardTarget().writeTo(out); + result.writeTo(out); } } diff --git a/core/src/main/java/org/elasticsearch/search/searchafter/SearchAfterBuilder.java b/core/src/main/java/org/elasticsearch/search/searchafter/SearchAfterBuilder.java index bc73ad7925b8f..8a19f254a8f3d 100644 --- a/core/src/main/java/org/elasticsearch/search/searchafter/SearchAfterBuilder.java +++ b/core/src/main/java/org/elasticsearch/search/searchafter/SearchAfterBuilder.java @@ -21,6 +21,8 @@ import org.apache.lucene.search.FieldDoc; import org.apache.lucene.search.SortField; +import org.apache.lucene.search.SortedNumericSortField; +import org.apache.lucene.search.SortedSetSortField; import org.elasticsearch.ElasticsearchException; import org.elasticsearch.common.ParseField; import org.elasticsearch.common.ParsingException; @@ -128,12 +130,23 @@ public static FieldDoc buildFieldDoc(SortAndFormats sort, Object[] values) { return new FieldDoc(Integer.MAX_VALUE, 0, fieldValues); } + private static SortField.Type extractSortType(SortField sortField) { + if (sortField instanceof SortedSetSortField) { + return SortField.Type.STRING; + } else if (sortField instanceof SortedNumericSortField) { + return ((SortedNumericSortField) sortField).getNumericType(); + } else { + return sortField.getType(); + } + } + private static Object convertValueFromSortField(Object value, SortField sortField, DocValueFormat format) { if (sortField.getComparatorSource() instanceof IndexFieldData.XFieldComparatorSource) { IndexFieldData.XFieldComparatorSource cmpSource = (IndexFieldData.XFieldComparatorSource) sortField.getComparatorSource(); return convertValueFromSortType(sortField.getField(), cmpSource.reducedType(), value, format); } - return convertValueFromSortType(sortField.getField(), sortField.getType(), value, format); + SortField.Type sortType = extractSortType(sortField); + return convertValueFromSortType(sortField.getField(), sortType, value, format); } private static Object convertValueFromSortType(String fieldName, SortField.Type sortType, Object value, DocValueFormat format) { diff --git a/core/src/main/java/org/elasticsearch/search/slice/TermsSliceQuery.java b/core/src/main/java/org/elasticsearch/search/slice/TermsSliceQuery.java index 429a3ebe89264..ddc02d32e55a9 100644 --- a/core/src/main/java/org/elasticsearch/search/slice/TermsSliceQuery.java +++ b/core/src/main/java/org/elasticsearch/search/slice/TermsSliceQuery.java @@ -33,6 +33,7 @@ import org.apache.lucene.search.ConstantScoreScorer; import org.apache.lucene.util.BytesRef; import org.apache.lucene.util.DocIdSetBuilder; +import org.apache.lucene.util.StringHelper; import java.io.IOException; @@ -46,6 +47,9 @@ * NOTE: Documents with no value for that field are ignored. */ public final class TermsSliceQuery extends SliceQuery { + // Fixed seed for computing term hashCode + public static final int SEED = 7919; + public TermsSliceQuery(String field, int id, int max) { super(field, id, max); } @@ -71,7 +75,9 @@ private DocIdSet build(LeafReader reader) throws IOException { final TermsEnum te = terms.iterator(); PostingsEnum docsEnum = null; for (BytesRef term = te.next(); term != null; term = te.next()) { - int hashCode = term.hashCode(); + // use a fixed seed instead of term.hashCode() otherwise this query may return inconsistent results when + // running on another replica (StringHelper sets its default seed at startup with current time) + int hashCode = StringHelper.murmurhash3_x86_32(term, SEED); if (contains(hashCode)) { docsEnum = te.postings(docsEnum, PostingsEnum.NONE); builder.add(docsEnum); diff --git a/core/src/main/java/org/elasticsearch/search/sort/FieldSortBuilder.java b/core/src/main/java/org/elasticsearch/search/sort/FieldSortBuilder.java index e1585d708cd4c..db6177ab36f53 100644 --- a/core/src/main/java/org/elasticsearch/search/sort/FieldSortBuilder.java +++ b/core/src/main/java/org/elasticsearch/search/sort/FieldSortBuilder.java @@ -279,9 +279,7 @@ public SortFieldAndFormat build(QueryShardContext context) throws IOException { && (sortMode == SortMode.SUM || sortMode == SortMode.AVG || sortMode == SortMode.MEDIAN)) { throw new QueryShardException(context, "we only support AVG, MEDIAN and SUM on number based fields"); } - IndexFieldData.XFieldComparatorSource fieldComparatorSource = fieldData - .comparatorSource(missing, localSortMode, nested); - SortField field = new SortField(fieldType.name(), fieldComparatorSource, reverse); + SortField field = fieldData.sortField(missing, localSortMode, nested, reverse); return new SortFieldAndFormat(field, fieldType.docValueFormat(null, null)); } } diff --git a/core/src/main/java/org/elasticsearch/snapshots/RestoreService.java b/core/src/main/java/org/elasticsearch/snapshots/RestoreService.java index 6afefec2c9496..e6e6bc82173bf 100644 --- a/core/src/main/java/org/elasticsearch/snapshots/RestoreService.java +++ b/core/src/main/java/org/elasticsearch/snapshots/RestoreService.java @@ -91,7 +91,6 @@ import static org.elasticsearch.cluster.metadata.IndexMetaData.SETTING_NUMBER_OF_REPLICAS; import static org.elasticsearch.cluster.metadata.IndexMetaData.SETTING_NUMBER_OF_SHARDS; import static org.elasticsearch.cluster.metadata.IndexMetaData.SETTING_VERSION_CREATED; -import static org.elasticsearch.cluster.metadata.IndexMetaData.SETTING_VERSION_MINIMUM_COMPATIBLE; import static org.elasticsearch.cluster.metadata.IndexMetaData.SETTING_VERSION_UPGRADED; import static org.elasticsearch.common.util.set.Sets.newHashSet; @@ -132,7 +131,6 @@ public class RestoreService extends AbstractComponent implements ClusterStateApp unremovable.add(SETTING_NUMBER_OF_REPLICAS); unremovable.add(SETTING_AUTO_EXPAND_REPLICAS); unremovable.add(SETTING_VERSION_UPGRADED); - unremovable.add(SETTING_VERSION_MINIMUM_COMPATIBLE); UNREMOVABLE_SETTINGS = unmodifiableSet(unremovable); } diff --git a/core/src/main/java/org/elasticsearch/snapshots/SnapshotsService.java b/core/src/main/java/org/elasticsearch/snapshots/SnapshotsService.java index d6f8cf8b58640..2a615649fcffe 100644 --- a/core/src/main/java/org/elasticsearch/snapshots/SnapshotsService.java +++ b/core/src/main/java/org/elasticsearch/snapshots/SnapshotsService.java @@ -1150,11 +1150,24 @@ public void onSnapshotCompletion(Snapshot completedSnapshot, SnapshotInfo snapsh @Override public void onSnapshotFailure(Snapshot failedSnapshot, Exception e) { if (failedSnapshot.equals(snapshot)) { - logger.trace("deleted snapshot failed - deleting files", e); + logger.warn("deleted snapshot failed - deleting files", e); removeListener(this); - threadPool.executor(ThreadPool.Names.SNAPSHOT).execute(() -> - deleteSnapshot(failedSnapshot.getRepository(), failedSnapshot.getSnapshotId().getName(), listener, true) - ); + threadPool.executor(ThreadPool.Names.SNAPSHOT).execute(() -> { + try { + deleteSnapshot(failedSnapshot.getRepository(), + failedSnapshot.getSnapshotId().getName(), + listener, + true); + } catch (SnapshotMissingException smex) { + logger.info((Supplier) () -> new ParameterizedMessage( + "Tried deleting in-progress snapshot [{}], but it " + + "could not be found after failing to abort.", + smex.getSnapshotName()), e); + listener.onFailure(new SnapshotException(snapshot, + "Tried deleting in-progress snapshot [{}], but it " + + "could not be found after failing to abort.", smex)); + } + }); } } }); diff --git a/core/src/main/java/org/elasticsearch/tasks/TaskAwareRequest.java b/core/src/main/java/org/elasticsearch/tasks/TaskAwareRequest.java new file mode 100644 index 0000000000000..a2364ac8e4047 --- /dev/null +++ b/core/src/main/java/org/elasticsearch/tasks/TaskAwareRequest.java @@ -0,0 +1,60 @@ +/* + * Licensed to Elasticsearch under one or more contributor + * license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright + * ownership. Elasticsearch licenses this file to you under + * the Apache License, Version 2.0 (the "License"); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.elasticsearch.tasks; + +/** + * An interface for a request that can be used to register a task manager task + */ +public interface TaskAwareRequest { + /** + * Set a reference to task that caused this task to be run. + */ + default void setParentTask(String parentTaskNode, long parentTaskId) { + setParentTask(new TaskId(parentTaskNode, parentTaskId)); + } + + /** + * Set a reference to task that created this request. + */ + void setParentTask(TaskId taskId); + + /** + * Get a reference to the task that created this request. Implementers should default to + * {@link TaskId#EMPTY_TASK_ID}, meaning "there is no parent". + */ + TaskId getParentTask(); + + /** + * Returns the task object that should be used to keep track of the processing of the request. + * + * A request can override this method and return null to avoid being tracked by the task + * manager. + */ + default Task createTask(long id, String type, String action, TaskId parentTaskId) { + return new Task(id, type, action, getDescription(), parentTaskId); + } + + /** + * Returns optional description of the request to be displayed by the task manager + */ + default String getDescription() { + return ""; + } +} diff --git a/core/src/main/java/org/elasticsearch/tasks/TaskManager.java b/core/src/main/java/org/elasticsearch/tasks/TaskManager.java index bf62b5bb013cb..afeeeeedd1168 100644 --- a/core/src/main/java/org/elasticsearch/tasks/TaskManager.java +++ b/core/src/main/java/org/elasticsearch/tasks/TaskManager.java @@ -35,18 +35,14 @@ import org.elasticsearch.common.unit.TimeValue; import org.elasticsearch.common.util.concurrent.ConcurrentCollections; import org.elasticsearch.common.util.concurrent.ConcurrentMapLong; -import org.elasticsearch.transport.TransportRequest; import java.io.IOException; import java.util.Collections; import java.util.HashMap; -import java.util.HashSet; import java.util.Iterator; import java.util.Map; -import java.util.Set; import java.util.concurrent.ConcurrentHashMap; import java.util.concurrent.atomic.AtomicLong; -import java.util.function.Consumer; import static org.elasticsearch.common.unit.TimeValue.timeValueMillis; @@ -83,7 +79,7 @@ public void setTaskResultsService(TaskResultsService taskResultsService) { *

* Returns the task manager tracked task or null if the task doesn't support the task manager */ - public Task register(String type, String action, TransportRequest request) { + public Task register(String type, String action, TaskAwareRequest request) { Task task = request.createTask(taskIdGenerator.incrementAndGet(), type, action, request.getParentTask()); if (task == null) { return null; diff --git a/core/src/main/java/org/elasticsearch/transport/TcpTransport.java b/core/src/main/java/org/elasticsearch/transport/TcpTransport.java index 79b4ff0f9f7dd..dd75ae295562e 100644 --- a/core/src/main/java/org/elasticsearch/transport/TcpTransport.java +++ b/core/src/main/java/org/elasticsearch/transport/TcpTransport.java @@ -26,6 +26,8 @@ import org.elasticsearch.ElasticsearchException; import org.elasticsearch.ExceptionsHelper; import org.elasticsearch.Version; +import org.elasticsearch.action.ActionListener; +import org.elasticsearch.action.NotifyOnceListener; import org.elasticsearch.cluster.node.DiscoveryNode; import org.elasticsearch.common.CheckedBiConsumer; import org.elasticsearch.common.Nullable; @@ -295,19 +297,25 @@ protected void doRunInLifecycle() throws Exception { DiscoveryNode node = entry.getKey(); NodeChannels channels = entry.getValue(); for (Channel channel : channels.getChannels()) { - try { - sendMessage(channel, pingHeader, successfulPings::inc); - } catch (Exception e) { - if (isOpen(channel)) { - logger.debug( - (Supplier) () -> new ParameterizedMessage("[{}] failed to send ping transport message", node), e); - failedPings.inc(); - } else { - logger.trace( - (Supplier) () -> new ParameterizedMessage( - "[{}] failed to send ping transport message (channel closed)", node), e); + internalSendMessage(channel, pingHeader, new NotifyOnceListener() { + @Override + public void innerOnResponse(Channel channel) { + successfulPings.inc(); } - } + + @Override + public void innerOnFailure(Exception e) { + if (isOpen(channel)) { + logger.debug( + (Supplier) () -> new ParameterizedMessage("[{}] failed to send ping transport message", node), e); + failedPings.inc(); + } else { + logger.trace((Supplier) () -> + new ParameterizedMessage("[{}] failed to send ping transport message (channel closed)", node), e); + } + + } + }); } } } @@ -358,7 +366,7 @@ public NodeChannels(DiscoveryNode node, Channel[] channels, ConnectionProfile co typeMapping = new EnumMap<>(TransportRequestOptions.Type.class); for (ConnectionProfile.ConnectionTypeHandle handle : connectionProfile.getHandles()) { for (TransportRequestOptions.Type type : handle.getTypes()) - typeMapping.put(type, handle); + typeMapping.put(type, handle); } version = node.getVersion(); } @@ -415,7 +423,7 @@ public void sendRequest(long requestId, String action, TransportRequest request, throw new NodeNotConnectedException(node, "connection already closed"); } Channel channel = channel(options.type()); - sendRequestToChannel(this.node, channel, requestId, action, request, options, getVersion(), (byte)0); + sendRequestToChannel(this.node, channel, requestId, action, request, options, getVersion(), (byte) 0); } } @@ -683,7 +691,7 @@ protected void bindServer(final String name, final Settings settings) { for (int i = 0; i < hostAddresses.length; i++) { addresses[i] = NetworkAddress.format(hostAddresses[i]); } - logger.debug("binding server bootstrap to: {}", (Object)addresses); + logger.debug("binding server bootstrap to: {}", (Object) addresses); } assert hostAddresses.length > 0; @@ -907,7 +915,7 @@ protected final void doStop() { } } - protected void onException(Channel channel, Exception e) throws IOException { + protected void onException(Channel channel, Exception e) { if (!lifecycle.started()) { // just close and ignore - we are already stopped and just need to make sure we release all resources disconnectFromNodeChannel(channel, e); @@ -940,23 +948,27 @@ protected void onException(Channel channel, Exception e) throws IOException { } else if (e instanceof TcpTransport.HttpOnTransportException) { // in case we are able to return data, serialize the exception content and sent it back to the client if (isOpen(channel)) { - final Runnable closeChannel = () -> { - try { - closeChannels(Collections.singletonList(channel)); - } catch (IOException e1) { - logger.debug("failed to close httpOnTransport channel", e1); + final NotifyOnceListener closeChannel = new NotifyOnceListener() { + @Override + public void innerOnResponse(Channel channel) { + try { + closeChannels(Collections.singletonList(channel)); + } catch (IOException e1) { + logger.debug("failed to close httpOnTransport channel", e1); + } } - }; - boolean success = false; - try { - sendMessage(channel, new BytesArray(e.getMessage().getBytes(StandardCharsets.UTF_8)), closeChannel); - success = true; - } finally { - if (success == false) { - // it's fine to call this more than once - closeChannel.run(); + + @Override + public void innerOnFailure(Exception e) { + try { + closeChannels(Collections.singletonList(channel)); + } catch (IOException e1) { + e.addSuppressed(e1); + logger.debug("failed to close httpOnTransport channel", e1); + } } - } + }; + internalSendMessage(channel, new BytesArray(e.getMessage().getBytes(StandardCharsets.UTF_8)), closeChannel); } } else { logger.warn( @@ -973,7 +985,8 @@ protected void onException(Channel channel, Exception e) throws IOException { /** * Binds to the given {@link InetSocketAddress} - * @param name the profile name + * + * @param name the profile name * @param address the address to bind to */ protected abstract Channel bind(String name, InetSocketAddress address) throws IOException; @@ -983,8 +996,14 @@ protected void onException(Channel channel, Exception e) throws IOException { */ protected abstract void closeChannels(List channel) throws IOException; - - protected abstract void sendMessage(Channel channel, BytesReference reference, Runnable sendListener) throws IOException; + /** + * Sends message to channel. The listener's onResponse method will be called when the send is complete unless an exception + * is thrown during the send. If an exception is thrown, the listener's onException method will be called. + * @param channel the destination channel + * @param reference the byte reference for the message + * @param listener the listener to call when the operation has completed + */ + protected abstract void sendMessage(Channel channel, BytesReference reference, ActionListener listener); protected abstract NodeChannels connectToChannels(DiscoveryNode node, ConnectionProfile connectionProfile) throws IOException; @@ -997,8 +1016,8 @@ public boolean canCompress(TransportRequest request) { return compress && (!(request instanceof BytesTransportRequest)); } - private void sendRequestToChannel(DiscoveryNode node, final Channel targetChannel, final long requestId, final String action, - final TransportRequest request, TransportRequestOptions options, Version channelVersion, + private void sendRequestToChannel(final DiscoveryNode node, final Channel targetChannel, final long requestId, final String action, + final TransportRequest request, TransportRequestOptions options, Version channelVersion, byte status) throws IOException, TransportException { if (compress) { @@ -1009,7 +1028,6 @@ private void sendRequestToChannel(DiscoveryNode node, final Channel targetChanne // we wrap this in a release once since if the onRequestSent callback throws an exception // we might release things twice and this should be prevented final Releasable toRelease = Releasables.releaseOnce(() -> Releasables.close(bStream.bytes())); - boolean addedReleaseListener = false; StreamOutput stream = bStream; try { // only compress if asked, and, the request is not bytes, since then only @@ -1029,43 +1047,31 @@ private void sendRequestToChannel(DiscoveryNode node, final Channel targetChanne stream.writeString(action); BytesReference message = buildMessage(requestId, status, node.getVersion(), request, stream, bStream); final TransportRequestOptions finalOptions = options; - Runnable onRequestSent = () -> { // this might be called in a different thread - try { - toRelease.close(); - } finally { - transportServiceAdapter.onRequestSent(node, requestId, action, request, finalOptions); - } - }; - addedReleaseListener = internalSendMessage(targetChannel, message, onRequestSent); + // this might be called in a different thread + SendListener onRequestSent = new SendListener(toRelease, + () -> transportServiceAdapter.onRequestSent(node, requestId, action, request, finalOptions)); + internalSendMessage(targetChannel, message, onRequestSent); } finally { IOUtils.close(stream); - if (!addedReleaseListener) { - toRelease.close(); - } } } /** - * sends a message view the given channel, using the given callbacks. - * - * @return true if the message was successfully sent or false when an error occurred and the error hanlding logic was activated - * + * sends a message to the given channel, using the given callbacks. */ - private boolean internalSendMessage(Channel targetChannel, BytesReference message, Runnable onRequestSent) throws IOException { - boolean success; + private void internalSendMessage(Channel targetChannel, BytesReference message, NotifyOnceListener listener) { try { - sendMessage(targetChannel, message, onRequestSent); - success = true; - } catch (IOException ex) { - // passing exception handling to deal with this and raise disconnect events and decide the right logging level + sendMessage(targetChannel, message, listener); + } catch (Exception ex) { + // call listener to ensure that any resources are released + listener.onFailure(ex); onException(targetChannel, ex); - success = false; } - return success; } /** * Sends back an error response to the caller via the given channel + * * @param nodeVersion the caller node version * @param channel the channel to send the response to * @param error the error to return @@ -1085,8 +1091,9 @@ public void sendErrorResponse(Version nodeVersion, Channel channel, final Except status = TransportStatus.setError(status); final BytesReference bytes = stream.bytes(); final BytesReference header = buildHeader(requestId, status, nodeVersion, bytes.length()); - Runnable onRequestSent = () -> transportServiceAdapter.onResponseSent(requestId, action, error); - sendMessage(channel, new CompositeBytesReference(header, bytes), onRequestSent); + SendListener onResponseSent = new SendListener(null, + () -> transportServiceAdapter.onResponseSent(requestId, action, error)); + internalSendMessage(channel, new CompositeBytesReference(header, bytes), onResponseSent); } } @@ -1097,7 +1104,7 @@ public void sendErrorResponse(Version nodeVersion, Channel channel, final Except */ public void sendResponse(Version nodeVersion, Channel channel, final TransportResponse response, final long requestId, final String action, TransportResponseOptions options) throws IOException { - sendResponse(nodeVersion, channel, response, requestId, action, options, (byte)0); + sendResponse(nodeVersion, channel, response, requestId, action, options, (byte) 0); } private void sendResponse(Version nodeVersion, Channel channel, final TransportResponse response, final long requestId, @@ -1110,7 +1117,6 @@ private void sendResponse(Version nodeVersion, Channel channel, final TransportR // we wrap this in a release once since if the onRequestSent callback throws an exception // we might release things twice and this should be prevented final Releasable toRelease = Releasables.releaseOnce(() -> Releasables.close(bStream.bytes())); - boolean addedReleaseListener = false; StreamOutput stream = bStream; try { if (options.compress()) { @@ -1122,24 +1128,12 @@ private void sendResponse(Version nodeVersion, Channel channel, final TransportR BytesReference reference = buildMessage(requestId, status, nodeVersion, response, stream, bStream); final TransportResponseOptions finalOptions = options; - Runnable onRequestSent = () -> { // this might be called in a different thread - try { - toRelease.close(); - } finally { - transportServiceAdapter.onResponseSent(requestId, action, response, finalOptions); - } - }; - addedReleaseListener = internalSendMessage(channel, reference, onRequestSent); + // this might be called in a different thread + SendListener listener = new SendListener(toRelease, + () -> transportServiceAdapter.onResponseSent(requestId, action, response, finalOptions)); + internalSendMessage(channel, reference, listener); } finally { - try { - IOUtils.close(stream); - } finally { - if (!addedReleaseListener) { - - toRelease.close(); - } - } - + IOUtils.close(stream); } } @@ -1242,7 +1236,7 @@ public static boolean validateMessageHeader(BytesReference buffer) throws IOExce // safety against too large frames being sent if (dataLen > NINETY_PER_HEAP_SIZE) { throw new IllegalArgumentException("transport content length received [" + new ByteSizeValue(dataLen) + "] exceeded [" - + new ByteSizeValue(NINETY_PER_HEAP_SIZE) + "]"); + + new ByteSizeValue(NINETY_PER_HEAP_SIZE) + "]"); } if (buffer.length() < dataLen + sizeHeaderLength) { @@ -1254,7 +1248,7 @@ public static boolean validateMessageHeader(BytesReference buffer) throws IOExce private static boolean bufferStartsWith(BytesReference buffer, int offset, String method) { char[] chars = method.toCharArray(); for (int i = 0; i < chars.length; i++) { - if (buffer.get(offset+ i) != chars[i]) { + if (buffer.get(offset + i) != chars[i]) { return false; } } @@ -1277,7 +1271,7 @@ public RestStatus status() { return RestStatus.BAD_REQUEST; } - public HttpOnTransportException(StreamInput in) throws IOException{ + public HttpOnTransportException(StreamInput in) throws IOException { super(in); } } @@ -1383,7 +1377,8 @@ public void onFailure(Exception e) { @Override protected void doRun() throws Exception { handler.handleResponse(response); - }}); + } + }); } @@ -1423,7 +1418,7 @@ protected String handleRequest(Channel channel, String profileName, final Stream if (TransportStatus.isHandshake(status)) { final VersionHandshakeResponse response = new VersionHandshakeResponse(getCurrentVersion()); sendResponse(version, channel, response, requestId, HANDSHAKE_ACTION_NAME, TransportResponseOptions.EMPTY, - TransportStatus.setHandshake((byte)0)); + TransportStatus.setHandshake((byte) 0)); } else { final RequestHandlerRegistry reg = transportServiceAdapter.getRequestHandler(action); if (reg == null) { @@ -1552,7 +1547,7 @@ protected Version executeHandshake(DiscoveryNode node, Channel channel, TimeValu // to as the payload. final Version minCompatVersion = getCurrentVersion().minimumCompatibilityVersion(); sendRequestToChannel(node, channel, requestId, HANDSHAKE_ACTION_NAME, TransportRequest.Empty.INSTANCE, - TransportRequestOptions.EMPTY, minCompatVersion, TransportStatus.setHandshake((byte)0)); + TransportRequestOptions.EMPTY, minCompatVersion, TransportStatus.setHandshake((byte) 0)); if (handler.latch.await(timeout.millis(), TimeUnit.MILLISECONDS) == false) { throw new ConnectTransportException(node, "handshake_timeout[" + timeout + "]"); } @@ -1594,7 +1589,7 @@ public long newRequestId() { protected final void onChannelClosed(Channel channel) { final Optional first = pendingHandshakes.entrySet().stream() .filter((entry) -> entry.getValue().channel == channel).map((e) -> e.getKey()).findFirst(); - if(first.isPresent()) { + if (first.isPresent()) { final Long requestId = first.get(); final HandshakeResponseHandler handler = pendingHandshakes.remove(requestId); if (handler != null) { @@ -1607,6 +1602,7 @@ protected final void onChannelClosed(Channel channel) { /** * Ensures this transport is still started / open + * * @throws IllegalStateException if the transport is not started / open */ protected final void ensureOpen() { @@ -1614,4 +1610,28 @@ protected final void ensureOpen() { throw new IllegalStateException("transport has been stopped"); } } + + private final class SendListener extends NotifyOnceListener { + private final Releasable optionalReleasable; + private final Runnable transportAdaptorCallback; + + private SendListener(Releasable optionalReleasable, Runnable transportAdaptorCallback) { + this.optionalReleasable = optionalReleasable; + this.transportAdaptorCallback = transportAdaptorCallback; + } + + @Override + public void innerOnResponse(Channel channel) { + release(); + } + + @Override + public void innerOnFailure(Exception e) { + release(); + } + + private void release() { + Releasables.close(optionalReleasable, transportAdaptorCallback::run); + } + } } diff --git a/core/src/main/java/org/elasticsearch/transport/TransportActionProxy.java b/core/src/main/java/org/elasticsearch/transport/TransportActionProxy.java index 30b7299f59d50..502ab51e0fa1b 100644 --- a/core/src/main/java/org/elasticsearch/transport/TransportActionProxy.java +++ b/core/src/main/java/org/elasticsearch/transport/TransportActionProxy.java @@ -134,11 +134,12 @@ public static void registerProxyAction(TransportService service, String action, true, false, new ProxyRequestHandler<>(service, action, responseSupplier)); } + private static final String PROXY_ACTION_PREFIX = "internal:transport/proxy/"; /** * Returns the corresponding proxy action for the given action */ public static String getProxyAction(String action) { - return "internal:transport/proxy/" + action; + return PROXY_ACTION_PREFIX + action; } /** @@ -147,4 +148,14 @@ public static String getProxyAction(String action) { public static TransportRequest wrapRequest(DiscoveryNode node, TransportRequest request) { return new ProxyRequest<>(request, node); } + + /** + * Unwraps a proxy request and returns the original request + */ + public static TransportRequest unwrapRequest(TransportRequest request) { + if (request instanceof ProxyRequest) { + return ((ProxyRequest)request).wrapped; + } + return request; + } } diff --git a/core/src/main/java/org/elasticsearch/transport/TransportRequest.java b/core/src/main/java/org/elasticsearch/transport/TransportRequest.java index 54f3a228a81be..c42ec24ad15a6 100644 --- a/core/src/main/java/org/elasticsearch/transport/TransportRequest.java +++ b/core/src/main/java/org/elasticsearch/transport/TransportRequest.java @@ -21,12 +21,12 @@ import org.elasticsearch.common.io.stream.StreamInput; import org.elasticsearch.common.io.stream.StreamOutput; -import org.elasticsearch.tasks.Task; +import org.elasticsearch.tasks.TaskAwareRequest; import org.elasticsearch.tasks.TaskId; import java.io.IOException; -public abstract class TransportRequest extends TransportMessage { +public abstract class TransportRequest extends TransportMessage implements TaskAwareRequest { public static class Empty extends TransportRequest { public static final Empty INSTANCE = new Empty(); } @@ -39,16 +39,10 @@ public static class Empty extends TransportRequest { public TransportRequest() { } - /** - * Set a reference to task that caused this task to be run. - */ - public void setParentTask(String parentTaskNode, long parentTaskId) { - setParentTask(new TaskId(parentTaskNode, parentTaskId)); - } - /** * Set a reference to task that created this request. */ + @Override public void setParentTask(TaskId taskId) { this.parentTaskId = taskId; } @@ -56,26 +50,11 @@ public void setParentTask(TaskId taskId) { /** * Get a reference to the task that created this request. Defaults to {@link TaskId#EMPTY_TASK_ID}, meaning "there is no parent". */ + @Override public TaskId getParentTask() { return parentTaskId; } - /** - * Returns the task object that should be used to keep track of the processing of the request. - * - * A request can override this method and return null to avoid being tracked by the task manager. - */ - public Task createTask(long id, String type, String action, TaskId parentTaskId) { - return new Task(id, type, action, getDescription(), parentTaskId); - } - - /** - * Returns optional description of the request to be displayed by the task manager - */ - public String getDescription() { - return ""; - } - @Override public void readFrom(StreamInput in) throws IOException { super.readFrom(in); diff --git a/core/src/main/resources/org/elasticsearch/bootstrap/security.policy b/core/src/main/resources/org/elasticsearch/bootstrap/security.policy index 4b393c0d6e745..5c9c56de18250 100644 --- a/core/src/main/resources/org/elasticsearch/bootstrap/security.policy +++ b/core/src/main/resources/org/elasticsearch/bootstrap/security.policy @@ -31,7 +31,7 @@ grant codeBase "${codebase.securesm-1.1.jar}" { //// Very special jar permissions: //// These are dangerous permissions that we don't want to grant to everything. -grant codeBase "${codebase.lucene-core-6.5.0-snapshot-d00c5ca.jar}" { +grant codeBase "${codebase.lucene-core-6.5.0.jar}" { // needed to allow MMapDirectory's "unmap hack" (die unmap hack, die) // java 8 package permission java.lang.RuntimePermission "accessClassInPackage.sun.misc"; @@ -42,7 +42,7 @@ grant codeBase "${codebase.lucene-core-6.5.0-snapshot-d00c5ca.jar}" { permission java.lang.RuntimePermission "accessDeclaredMembers"; }; -grant codeBase "${codebase.lucene-misc-6.5.0-snapshot-d00c5ca.jar}" { +grant codeBase "${codebase.lucene-misc-6.5.0.jar}" { // needed to allow shard shrinking to use hard-links if possible via lucenes HardlinkCopyDirectoryWrapper permission java.nio.file.LinkPermission "hard"; }; diff --git a/core/src/main/resources/org/elasticsearch/bootstrap/test-framework.policy b/core/src/main/resources/org/elasticsearch/bootstrap/test-framework.policy index 8769c80b84e84..2c39ccb350ea3 100644 --- a/core/src/main/resources/org/elasticsearch/bootstrap/test-framework.policy +++ b/core/src/main/resources/org/elasticsearch/bootstrap/test-framework.policy @@ -33,7 +33,7 @@ grant codeBase "${codebase.securemock-1.2.jar}" { permission java.lang.reflect.ReflectPermission "suppressAccessChecks"; }; -grant codeBase "${codebase.lucene-test-framework-6.5.0-snapshot-d00c5ca.jar}" { +grant codeBase "${codebase.lucene-test-framework-6.5.0.jar}" { // needed by RamUsageTester permission java.lang.reflect.ReflectPermission "suppressAccessChecks"; // needed for testing hardlinks in StoreRecoveryTests since we install MockFS diff --git a/core/src/test/java/org/apache/lucene/grouping/CollapsingTopDocsCollectorTests.java b/core/src/test/java/org/apache/lucene/grouping/CollapsingTopDocsCollectorTests.java index 1aff2af523528..aef354a04951f 100644 --- a/core/src/test/java/org/apache/lucene/grouping/CollapsingTopDocsCollectorTests.java +++ b/core/src/test/java/org/apache/lucene/grouping/CollapsingTopDocsCollectorTests.java @@ -198,7 +198,7 @@ private void assertSearchCollapse(CollapsingDocValuesProd subSearcher.search(weight, c); shardHits[shardIDX] = c.getTopDocs(); } - CollapseTopFieldDocs mergedFieldDocs = CollapseTopFieldDocs.merge(sort, 0, expectedNumGroups, shardHits); + CollapseTopFieldDocs mergedFieldDocs = CollapseTopFieldDocs.merge(sort, 0, expectedNumGroups, shardHits, true); assertTopDocsEquals(mergedFieldDocs, collapseTopFieldDocs); w.close(); reader.close(); diff --git a/core/src/test/java/org/elasticsearch/action/NotifyOnceListenerTests.java b/core/src/test/java/org/elasticsearch/action/NotifyOnceListenerTests.java new file mode 100644 index 0000000000000..501a40686ed96 --- /dev/null +++ b/core/src/test/java/org/elasticsearch/action/NotifyOnceListenerTests.java @@ -0,0 +1,76 @@ +/* + * Licensed to Elasticsearch under one or more contributor + * license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright + * ownership. Elasticsearch licenses this file to you under + * the Apache License, Version 2.0 (the "License"); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.elasticsearch.action; + +import org.elasticsearch.test.ESTestCase; + +import java.util.concurrent.atomic.AtomicReference; + +public class NotifyOnceListenerTests extends ESTestCase { + + public void testWhenSuccessCannotNotifyMultipleTimes() { + AtomicReference response = new AtomicReference<>(); + AtomicReference exception = new AtomicReference<>(); + + NotifyOnceListener listener = new NotifyOnceListener() { + @Override + public void innerOnResponse(String s) { + response.set(s); + } + + @Override + public void innerOnFailure(Exception e) { + exception.set(e); + } + }; + + listener.onResponse("response"); + listener.onResponse("wrong-response"); + listener.onFailure(new RuntimeException()); + + assertNull(exception.get()); + assertEquals("response", response.get()); + } + + public void testWhenErrorCannotNotifyMultipleTimes() { + AtomicReference response = new AtomicReference<>(); + AtomicReference exception = new AtomicReference<>(); + + NotifyOnceListener listener = new NotifyOnceListener() { + @Override + public void innerOnResponse(String s) { + response.set(s); + } + + @Override + public void innerOnFailure(Exception e) { + exception.set(e); + } + }; + + RuntimeException expected = new RuntimeException(); + listener.onFailure(expected); + listener.onFailure(new IllegalArgumentException()); + listener.onResponse("response"); + + assertNull(response.get()); + assertSame(expected, exception.get()); + } +} diff --git a/core/src/test/java/org/elasticsearch/action/admin/cluster/stats/ClusterStatsNodesTests.java b/core/src/test/java/org/elasticsearch/action/admin/cluster/stats/ClusterStatsNodesTests.java new file mode 100644 index 0000000000000..04edc775a2d53 --- /dev/null +++ b/core/src/test/java/org/elasticsearch/action/admin/cluster/stats/ClusterStatsNodesTests.java @@ -0,0 +1,76 @@ +/* + * Licensed to Elasticsearch under one or more contributor + * license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright + * ownership. Elasticsearch licenses this file to you under + * the Apache License, Version 2.0 (the "License"); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.elasticsearch.action.admin.cluster.stats; + +import org.elasticsearch.action.admin.cluster.node.info.NodeInfo; +import org.elasticsearch.cluster.node.DiscoveryNode; +import org.elasticsearch.common.network.NetworkModule; +import org.elasticsearch.common.settings.Settings; +import org.elasticsearch.common.xcontent.XContentType; +import org.elasticsearch.test.ESTestCase; + +import java.util.Arrays; +import java.util.List; + +import static java.util.Collections.emptyList; +import static java.util.Collections.singletonList; +import static org.elasticsearch.common.xcontent.XContentHelper.toXContent; + +public class ClusterStatsNodesTests extends ESTestCase { + + /** + * Test that empty transport/http types are not printed out as part + * of the cluster stats xcontent output. + */ + public void testNetworkTypesToXContent() throws Exception { + ClusterStatsNodes.NetworkTypes stats = new ClusterStatsNodes.NetworkTypes(emptyList()); + assertEquals("{\"transport_types\":{},\"http_types\":{}}", + toXContent(stats, XContentType.JSON, randomBoolean()).utf8ToString()); + + List nodeInfos = singletonList(createNodeInfo("node_0", null, null)); + stats = new ClusterStatsNodes.NetworkTypes(nodeInfos); + assertEquals("{\"transport_types\":{},\"http_types\":{}}", + toXContent(stats, XContentType.JSON, randomBoolean()).utf8ToString()); + + nodeInfos = Arrays.asList(createNodeInfo("node_1", "", ""), + createNodeInfo("node_2", "custom", "custom"), + createNodeInfo("node_3", null, "custom")); + stats = new ClusterStatsNodes.NetworkTypes(nodeInfos); + assertEquals("{" + + "\"transport_types\":{\"custom\":1}," + + "\"http_types\":{\"custom\":2}" + + "}", toXContent(stats, XContentType.JSON, randomBoolean()).utf8ToString()); + } + + private static NodeInfo createNodeInfo(String nodeId, String transportType, String httpType) { + Settings.Builder settings = Settings.builder(); + if (transportType != null) { + settings.put(randomFrom(NetworkModule.TRANSPORT_TYPE_KEY, + NetworkModule.TRANSPORT_TYPE_DEFAULT_KEY), transportType); + } + if (httpType != null) { + settings.put(randomFrom(NetworkModule.HTTP_TYPE_KEY, + NetworkModule.HTTP_TYPE_DEFAULT_KEY), httpType); + } + return new NodeInfo(null, null, + new DiscoveryNode(nodeId, buildNewFakeTransportAddress(), null), + settings.build(), null, null, null, null, null, null, null, null, null); + } +} diff --git a/core/src/test/java/org/elasticsearch/action/bulk/TransportShardBulkActionTests.java b/core/src/test/java/org/elasticsearch/action/bulk/TransportShardBulkActionTests.java index c30c2ac2f13b5..a11dea344ed1a 100644 --- a/core/src/test/java/org/elasticsearch/action/bulk/TransportShardBulkActionTests.java +++ b/core/src/test/java/org/elasticsearch/action/bulk/TransportShardBulkActionTests.java @@ -53,6 +53,8 @@ import org.elasticsearch.rest.RestStatus; import org.elasticsearch.test.ESTestCase; import org.elasticsearch.action.bulk.TransportShardBulkAction; +import org.elasticsearch.action.bulk.MappingUpdatePerformer; +import org.elasticsearch.action.bulk.BulkItemResultHolder; import java.io.IOException; import static org.hamcrest.CoreMatchers.equalTo; @@ -77,26 +79,30 @@ private IndexMetaData indexMetaData() throws IOException { public void testShouldExecuteReplicaItem() throws Exception { // Successful index request should be replicated - DocWriteRequest writeRequest = new IndexRequest("index", "type", "id").source(Requests.INDEX_CONTENT_TYPE, "foo", "bar"); + DocWriteRequest writeRequest = new IndexRequest("index", "type", "id") + .source(Requests.INDEX_CONTENT_TYPE, "foo", "bar"); DocWriteResponse response = new IndexResponse(shardId, "type", "id", 1, 1, randomBoolean()); BulkItemRequest request = new BulkItemRequest(0, writeRequest); request.setPrimaryResponse(new BulkItemResponse(0, DocWriteRequest.OpType.INDEX, response)); assertTrue(TransportShardBulkAction.shouldExecuteReplicaItem(request, 0)); // Failed index requests should not be replicated (for now!) - writeRequest = new IndexRequest("index", "type", "id").source(Requests.INDEX_CONTENT_TYPE, "foo", "bar"); + writeRequest = new IndexRequest("index", "type", "id") + .source(Requests.INDEX_CONTENT_TYPE, "foo", "bar"); response = new IndexResponse(shardId, "type", "id", 1, 1, randomBoolean()); request = new BulkItemRequest(0, writeRequest); request.setPrimaryResponse( new BulkItemResponse(0, DocWriteRequest.OpType.INDEX, - new BulkItemResponse.Failure("test", "type", "id", new IllegalArgumentException("i died")))); + new BulkItemResponse.Failure("test", "type", "id", + new IllegalArgumentException("i died")))); assertFalse(TransportShardBulkAction.shouldExecuteReplicaItem(request, 0)); // NOOP requests should not be replicated writeRequest = new UpdateRequest("index", "type", "id"); response = new UpdateResponse(shardId, "type", "id", 1, DocWriteResponse.Result.NOOP); request = new BulkItemRequest(0, writeRequest); - request.setPrimaryResponse(new BulkItemResponse(0, DocWriteRequest.OpType.UPDATE, response)); + request.setPrimaryResponse(new BulkItemResponse(0, DocWriteRequest.OpType.UPDATE, + response)); assertFalse(TransportShardBulkAction.shouldExecuteReplicaItem(request, 0)); } @@ -112,13 +118,15 @@ public void testExecuteBulkIndexRequest() throws Exception { .create(create); BulkItemRequest primaryRequest = new BulkItemRequest(0, writeRequest); items[0] = primaryRequest; - BulkShardRequest bulkShardRequest = new BulkShardRequest(shardId, RefreshPolicy.NONE, items); + BulkShardRequest bulkShardRequest = + new BulkShardRequest(shardId, RefreshPolicy.NONE, items); Translog.Location location = new Translog.Location(0, 0, 0); UpdateHelper updateHelper = null; - Translog.Location newLocation = TransportShardBulkAction.executeBulkItemRequest(metaData, shard, bulkShardRequest, - location, 0, updateHelper, threadPool::absoluteTimeInMillis, new NoopMappingUpdatePerformer()); + Translog.Location newLocation = TransportShardBulkAction.executeBulkItemRequest(metaData, + shard, bulkShardRequest, location, 0, updateHelper, + threadPool::absoluteTimeInMillis, new NoopMappingUpdatePerformer()); // Translog should change, since there were no problems assertThat(newLocation, not(location)); @@ -127,7 +135,8 @@ public void testExecuteBulkIndexRequest() throws Exception { assertThat(primaryResponse.getItemId(), equalTo(0)); assertThat(primaryResponse.getId(), equalTo("id")); - assertThat(primaryResponse.getOpType(), equalTo(create ? DocWriteRequest.OpType.CREATE : DocWriteRequest.OpType.INDEX)); + assertThat(primaryResponse.getOpType(), + equalTo(create ? DocWriteRequest.OpType.CREATE : DocWriteRequest.OpType.INDEX)); assertFalse(primaryResponse.isFailed()); // Assert that the document actually made it there @@ -140,8 +149,10 @@ public void testExecuteBulkIndexRequest() throws Exception { items[0] = primaryRequest; bulkShardRequest = new BulkShardRequest(shardId, RefreshPolicy.NONE, items); - Translog.Location secondLocation = TransportShardBulkAction.executeBulkItemRequest(metaData, shard, bulkShardRequest, - newLocation, 0, updateHelper, threadPool::absoluteTimeInMillis, new NoopMappingUpdatePerformer()); + Translog.Location secondLocation = + TransportShardBulkAction.executeBulkItemRequest( metaData, + shard, bulkShardRequest, newLocation, 0, updateHelper, + threadPool::absoluteTimeInMillis, new NoopMappingUpdatePerformer()); // Translog should not change, since the document was not indexed due to a version conflict assertThat(secondLocation, equalTo(newLocation)); @@ -177,9 +188,11 @@ public void testExecuteBulkIndexRequestWithRejection() throws Exception { IndexShard shard = newStartedShard(true); BulkItemRequest[] items = new BulkItemRequest[1]; - DocWriteRequest writeRequest = new IndexRequest("index", "type", "id").source(Requests.INDEX_CONTENT_TYPE, "foo", "bar"); + DocWriteRequest writeRequest = new IndexRequest("index", "type", "id") + .source(Requests.INDEX_CONTENT_TYPE, "foo", "bar"); items[0] = new BulkItemRequest(0, writeRequest); - BulkShardRequest bulkShardRequest = new BulkShardRequest(shardId, RefreshPolicy.NONE, items); + BulkShardRequest bulkShardRequest = + new BulkShardRequest(shardId, RefreshPolicy.NONE, items); Translog.Location location = new Translog.Location(0, 0, 0); UpdateHelper updateHelper = null; @@ -188,8 +201,9 @@ public void testExecuteBulkIndexRequestWithRejection() throws Exception { Exception err = new ReplicationOperation.RetryOnPrimaryException(shardId, "rejection"); try { - TransportShardBulkAction.executeBulkItemRequest(metaData, shard, bulkShardRequest, location, - 0, updateHelper, threadPool::absoluteTimeInMillis, new ThrowingMappingUpdatePerformer(err)); + TransportShardBulkAction.executeBulkItemRequest(metaData, shard, bulkShardRequest, + location, 0, updateHelper, threadPool::absoluteTimeInMillis, + new ThrowingMappingUpdatePerformer(err)); fail("should have thrown a retry exception"); } catch (ReplicationOperation.RetryOnPrimaryException e) { assertThat(e, equalTo(err)); @@ -203,9 +217,11 @@ public void testExecuteBulkIndexRequestWithConflictingMappings() throws Exceptio IndexShard shard = newStartedShard(true); BulkItemRequest[] items = new BulkItemRequest[1]; - DocWriteRequest writeRequest = new IndexRequest("index", "type", "id").source(Requests.INDEX_CONTENT_TYPE, "foo", "bar"); + DocWriteRequest writeRequest = new IndexRequest("index", "type", "id") + .source(Requests.INDEX_CONTENT_TYPE, "foo", "bar"); items[0] = new BulkItemRequest(0, writeRequest); - BulkShardRequest bulkShardRequest = new BulkShardRequest(shardId, RefreshPolicy.NONE, items); + BulkShardRequest bulkShardRequest = + new BulkShardRequest(shardId, RefreshPolicy.NONE, items); Translog.Location location = new Translog.Location(0, 0, 0); UpdateHelper updateHelper = null; @@ -213,8 +229,9 @@ public void testExecuteBulkIndexRequestWithConflictingMappings() throws Exceptio // Return a mapping conflict (IAE) when trying to update the mapping Exception err = new IllegalArgumentException("mapping conflict"); - Translog.Location newLocation = TransportShardBulkAction.executeBulkItemRequest(metaData, shard, bulkShardRequest, - location, 0, updateHelper, threadPool::absoluteTimeInMillis, new FailingMappingUpdatePerformer(err)); + Translog.Location newLocation = TransportShardBulkAction.executeBulkItemRequest(metaData, + shard, bulkShardRequest, location, 0, updateHelper, + threadPool::absoluteTimeInMillis, new ThrowingMappingUpdatePerformer(err)); // Translog shouldn't change, as there were conflicting mappings assertThat(newLocation, equalTo(location)); @@ -245,13 +262,15 @@ public void testExecuteBulkDeleteRequest() throws Exception { BulkItemRequest[] items = new BulkItemRequest[1]; DocWriteRequest writeRequest = new DeleteRequest("index", "type", "id"); items[0] = new BulkItemRequest(0, writeRequest); - BulkShardRequest bulkShardRequest = new BulkShardRequest(shardId, RefreshPolicy.NONE, items); + BulkShardRequest bulkShardRequest = + new BulkShardRequest(shardId, RefreshPolicy.NONE, items); Translog.Location location = new Translog.Location(0, 0, 0); UpdateHelper updateHelper = null; - Translog.Location newLocation = TransportShardBulkAction.executeBulkItemRequest(metaData, shard, bulkShardRequest, - location, 0, updateHelper, threadPool::absoluteTimeInMillis, new NoopMappingUpdatePerformer()); + Translog.Location newLocation = TransportShardBulkAction.executeBulkItemRequest(metaData, + shard, bulkShardRequest, location, 0, updateHelper, + threadPool::absoluteTimeInMillis, new NoopMappingUpdatePerformer()); // Translog changes, even though the document didn't exist assertThat(newLocation, not(location)); @@ -288,8 +307,9 @@ public void testExecuteBulkDeleteRequest() throws Exception { location = newLocation; - newLocation = TransportShardBulkAction.executeBulkItemRequest(metaData, shard, bulkShardRequest, - location, 0, updateHelper, threadPool::absoluteTimeInMillis, new NoopMappingUpdatePerformer()); + newLocation = TransportShardBulkAction.executeBulkItemRequest(metaData, shard, + bulkShardRequest, location, 0, updateHelper, threadPool::absoluteTimeInMillis, + new NoopMappingUpdatePerformer()); // Translog changes, because the document was deleted assertThat(newLocation, not(location)); @@ -322,19 +342,25 @@ public void testExecuteBulkDeleteRequest() throws Exception { } public void testNoopUpdateReplicaRequest() throws Exception { - DocWriteRequest writeRequest = new IndexRequest("index", "type", "id").source(Requests.INDEX_CONTENT_TYPE, "field", "value"); + DocWriteRequest writeRequest = new IndexRequest("index", "type", "id") + .source(Requests.INDEX_CONTENT_TYPE, "field", "value"); BulkItemRequest replicaRequest = new BulkItemRequest(0, writeRequest); - DocWriteResponse noopUpdateResponse = new UpdateResponse(shardId, "index", "id", 0, DocWriteResponse.Result.NOOP); - BulkItemResultHolder noopResults = new BulkItemResultHolder(noopUpdateResponse, null, replicaRequest); + DocWriteResponse noopUpdateResponse = new UpdateResponse(shardId, "index", "id", 0, + DocWriteResponse.Result.NOOP); + BulkItemResultHolder noopResults = new BulkItemResultHolder(noopUpdateResponse, null, + replicaRequest); Translog.Location location = new Translog.Location(0, 0, 0); BulkItemRequest[] items = new BulkItemRequest[0]; - BulkShardRequest bulkShardRequest = new BulkShardRequest(shardId, RefreshPolicy.NONE, items); - Translog.Location newLocation = TransportShardBulkAction.updateReplicaRequest(noopResults, - DocWriteRequest.OpType.UPDATE, location, bulkShardRequest); + BulkShardRequest bulkShardRequest = + new BulkShardRequest(shardId, RefreshPolicy.NONE, items); - BulkItemResponse primaryResponse = replicaRequest.getPrimaryResponse(); + BulkItemResponse primaryResponse = TransportShardBulkAction.createPrimaryResponse( + noopResults, DocWriteRequest.OpType.UPDATE, bulkShardRequest); + + Translog.Location newLocation = + TransportShardBulkAction.calculateTranslogLocation(location, noopResults); // Basically nothing changes in the request since it's a noop assertThat(newLocation, equalTo(location)); @@ -342,24 +368,30 @@ public void testNoopUpdateReplicaRequest() throws Exception { assertThat(primaryResponse.getId(), equalTo("id")); assertThat(primaryResponse.getOpType(), equalTo(DocWriteRequest.OpType.UPDATE)); assertThat(primaryResponse.getResponse(), equalTo(noopUpdateResponse)); - assertThat(primaryResponse.getResponse().getResult(), equalTo(DocWriteResponse.Result.NOOP)); + assertThat(primaryResponse.getResponse().getResult(), + equalTo(DocWriteResponse.Result.NOOP)); } public void testUpdateReplicaRequestWithFailure() throws Exception { - DocWriteRequest writeRequest = new IndexRequest("index", "type", "id").source(Requests.INDEX_CONTENT_TYPE, "field", "value"); + DocWriteRequest writeRequest = new IndexRequest("index", "type", "id") + .source(Requests.INDEX_CONTENT_TYPE, "field", "value"); BulkItemRequest replicaRequest = new BulkItemRequest(0, writeRequest); Exception err = new ElasticsearchException("I'm dead <(x.x)>"); Engine.IndexResult indexResult = new Engine.IndexResult(err, 0, 0); - BulkItemResultHolder failedResults = new BulkItemResultHolder(null, indexResult, replicaRequest); + BulkItemResultHolder failedResults = new BulkItemResultHolder(null, indexResult, + replicaRequest); Translog.Location location = new Translog.Location(0, 0, 0); BulkItemRequest[] items = new BulkItemRequest[0]; - BulkShardRequest bulkShardRequest = new BulkShardRequest(shardId, RefreshPolicy.NONE, items); - Translog.Location newLocation = TransportShardBulkAction.updateReplicaRequest(failedResults, - DocWriteRequest.OpType.UPDATE, location, bulkShardRequest); + BulkShardRequest bulkShardRequest = + new BulkShardRequest(shardId, RefreshPolicy.NONE, items); + BulkItemResponse primaryResponse = + TransportShardBulkAction.createPrimaryResponse( + failedResults, DocWriteRequest.OpType.UPDATE, bulkShardRequest); - BulkItemResponse primaryResponse = replicaRequest.getPrimaryResponse(); + Translog.Location newLocation = + TransportShardBulkAction.calculateTranslogLocation(location, failedResults); // Since this was not a conflict failure, the primary response // should be filled out with the failure information @@ -378,20 +410,26 @@ public void testUpdateReplicaRequestWithFailure() throws Exception { } public void testUpdateReplicaRequestWithConflictFailure() throws Exception { - DocWriteRequest writeRequest = new IndexRequest("index", "type", "id").source(Requests.INDEX_CONTENT_TYPE, "field", "value"); + DocWriteRequest writeRequest = new IndexRequest("index", "type", "id") + .source(Requests.INDEX_CONTENT_TYPE, "field", "value"); BulkItemRequest replicaRequest = new BulkItemRequest(0, writeRequest); - Exception err = new VersionConflictEngineException(shardId, "type", "id", "I'm conflicted <(;_;)>"); + Exception err = new VersionConflictEngineException(shardId, "type", "id", + "I'm conflicted <(;_;)>"); Engine.IndexResult indexResult = new Engine.IndexResult(err, 0, 0); - BulkItemResultHolder failedResults = new BulkItemResultHolder(null, indexResult, replicaRequest); + BulkItemResultHolder failedResults = new BulkItemResultHolder(null, indexResult, + replicaRequest); Translog.Location location = new Translog.Location(0, 0, 0); BulkItemRequest[] items = new BulkItemRequest[0]; - BulkShardRequest bulkShardRequest = new BulkShardRequest(shardId, RefreshPolicy.NONE, items); - Translog.Location newLocation = TransportShardBulkAction.updateReplicaRequest(failedResults, - DocWriteRequest.OpType.UPDATE, location, bulkShardRequest); + BulkShardRequest bulkShardRequest = + new BulkShardRequest(shardId, RefreshPolicy.NONE, items); + BulkItemResponse primaryResponse = + TransportShardBulkAction.createPrimaryResponse( + failedResults, DocWriteRequest.OpType.UPDATE, bulkShardRequest); - BulkItemResponse primaryResponse = replicaRequest.getPrimaryResponse(); + Translog.Location newLocation = + TransportShardBulkAction.calculateTranslogLocation(location, failedResults); // Since this was not a conflict failure, the primary response // should be filled out with the failure information @@ -410,22 +448,27 @@ public void testUpdateReplicaRequestWithConflictFailure() throws Exception { } public void testUpdateReplicaRequestWithSuccess() throws Exception { - DocWriteRequest writeRequest = new IndexRequest("index", "type", "id").source(Requests.INDEX_CONTENT_TYPE, "field", "value"); + DocWriteRequest writeRequest = new IndexRequest("index", "type", "id") + .source(Requests.INDEX_CONTENT_TYPE, "field", "value"); BulkItemRequest replicaRequest = new BulkItemRequest(0, writeRequest); boolean created = randomBoolean(); Translog.Location resultLocation = new Translog.Location(42, 42, 42); Engine.IndexResult indexResult = new FakeResult(1, 1, created, resultLocation); DocWriteResponse indexResponse = new IndexResponse(shardId, "index", "id", 1, 1, created); - BulkItemResultHolder goodResults = new BulkItemResultHolder(indexResponse, indexResult, replicaRequest); + BulkItemResultHolder goodResults = + new BulkItemResultHolder(indexResponse, indexResult, replicaRequest); Translog.Location originalLocation = new Translog.Location(21, 21, 21); BulkItemRequest[] items = new BulkItemRequest[0]; - BulkShardRequest bulkShardRequest = new BulkShardRequest(shardId, RefreshPolicy.NONE, items); - Translog.Location newLocation = TransportShardBulkAction.updateReplicaRequest(goodResults, - DocWriteRequest.OpType.INDEX, originalLocation, bulkShardRequest); + BulkShardRequest bulkShardRequest = + new BulkShardRequest(shardId, RefreshPolicy.NONE, items); + BulkItemResponse primaryResponse = + TransportShardBulkAction.createPrimaryResponse( + goodResults, DocWriteRequest.OpType.INDEX, bulkShardRequest); - BulkItemResponse primaryResponse = replicaRequest.getPrimaryResponse(); + Translog.Location newLocation = + TransportShardBulkAction.calculateTranslogLocation(originalLocation, goodResults); // Check that the translog is successfully advanced assertThat(newLocation, equalTo(resultLocation)); @@ -438,6 +481,61 @@ public void testUpdateReplicaRequestWithSuccess() throws Exception { assertThat(response.status(), equalTo(created ? RestStatus.CREATED : RestStatus.OK)); } + public void testCalculateTranslogLocation() throws Exception { + final Translog.Location original = new Translog.Location(0, 0, 0); + + DocWriteRequest writeRequest = new IndexRequest("index", "type", "id") + .source(Requests.INDEX_CONTENT_TYPE, "field", "value"); + BulkItemRequest replicaRequest = new BulkItemRequest(0, writeRequest); + BulkItemResultHolder results = new BulkItemResultHolder(null, null, replicaRequest); + + assertThat(TransportShardBulkAction.calculateTranslogLocation(original, results), + equalTo(original)); + + boolean created = randomBoolean(); + DocWriteResponse indexResponse = new IndexResponse(shardId, "index", "id", 1, 1, created); + Translog.Location newLocation = new Translog.Location(1, 1, 1); + Engine.IndexResult indexResult = new IndexResultWithLocation(randomNonNegativeLong(), + randomNonNegativeLong(), created, newLocation); + results = new BulkItemResultHolder(indexResponse, indexResult, replicaRequest); + assertThat(TransportShardBulkAction.calculateTranslogLocation(original, results), + equalTo(newLocation)); + + } + + public class IndexResultWithLocation extends Engine.IndexResult { + private final Translog.Location location; + public IndexResultWithLocation(long version, long seqNo, boolean created, + Translog.Location newLocation) { + super(version, seqNo, created); + this.location = newLocation; + } + + @Override + public Translog.Location getTranslogLocation() { + return this.location; + } + } + + public void testPrepareIndexOpOnReplica() throws Exception { + IndexMetaData metaData = indexMetaData(); + IndexShard shard = newStartedShard(false); + + DocWriteResponse primaryResponse = new IndexResponse(shardId, "index", "id", + 1, 1, randomBoolean()); + IndexRequest request = new IndexRequest("index", "type", "id") + .source(Requests.INDEX_CONTENT_TYPE, "field", "value"); + + Engine.Index op = TransportShardBulkAction.prepareIndexOperationOnReplica( + primaryResponse, request, shard); + + assertThat(op.version(), equalTo(primaryResponse.getVersion())); + assertThat(op.seqNo(), equalTo(primaryResponse.getSeqNo())); + assertThat(op.versionType(), equalTo(VersionType.EXTERNAL)); + + closeShards(shard); + } + /** * Fake IndexResult that has a settable translog location */ @@ -445,7 +543,8 @@ private static class FakeResult extends Engine.IndexResult { private final Translog.Location location; - protected FakeResult(long version, long seqNo, boolean created, Translog.Location location) { + protected FakeResult(long version, long seqNo, boolean created, + Translog.Location location) { super(version, seqNo, created); this.location = location; } @@ -458,35 +557,46 @@ public Translog.Location getTranslogLocation() { /** Doesn't perform any mapping updates */ public static class NoopMappingUpdatePerformer implements MappingUpdatePerformer { - public MappingUpdatePerformer.MappingUpdateResult updateMappingsIfNeeded(IndexShard primary, - IndexRequest request) throws Exception { - Engine.Index operation = TransportShardBulkAction.prepareIndexOperationOnPrimary(request, primary); - return new MappingUpdatePerformer.MappingUpdateResult(operation); + public void updateMappingsIfNeeded(Engine.Index operation, + ShardId shardId, + String type) throws Exception { + } + + public void verifyMappings(Engine.Index operation, ShardId shardId) throws Exception { } } - /** Always returns the given failure */ - private class FailingMappingUpdatePerformer implements MappingUpdatePerformer { + /** Always throw the given exception */ + private class ThrowingMappingUpdatePerformer implements MappingUpdatePerformer { private final Exception e; - FailingMappingUpdatePerformer(Exception e) { + ThrowingMappingUpdatePerformer(Exception e) { this.e = e; } - public MappingUpdatePerformer.MappingUpdateResult updateMappingsIfNeeded(IndexShard primary, - IndexRequest request) throws Exception { - return new MappingUpdatePerformer.MappingUpdateResult(e); + public void updateMappingsIfNeeded(Engine.Index operation, + ShardId shardId, + String type) throws Exception { + throw e; + } + + public void verifyMappings(Engine.Index operation, ShardId shardId) throws Exception { + fail("should not have gotten to this point"); } } /** Always throw the given exception */ - private class ThrowingMappingUpdatePerformer implements MappingUpdatePerformer { + private class ThrowingVerifyingMappingUpdatePerformer implements MappingUpdatePerformer { private final Exception e; - ThrowingMappingUpdatePerformer(Exception e) { + ThrowingVerifyingMappingUpdatePerformer(Exception e) { this.e = e; } - public MappingUpdatePerformer.MappingUpdateResult updateMappingsIfNeeded(IndexShard primary, - IndexRequest request) throws Exception { + public void updateMappingsIfNeeded(Engine.Index operation, + ShardId shardId, + String type) throws Exception { + } + + public void verifyMappings(Engine.Index operation, ShardId shardId) throws Exception { throw e; } } diff --git a/core/src/test/java/org/elasticsearch/action/bulk/byscroll/DeleteByQueryRequestTests.java b/core/src/test/java/org/elasticsearch/action/bulk/byscroll/DeleteByQueryRequestTests.java index 373ee7ab53c61..474072cc739bd 100644 --- a/core/src/test/java/org/elasticsearch/action/bulk/byscroll/DeleteByQueryRequestTests.java +++ b/core/src/test/java/org/elasticsearch/action/bulk/byscroll/DeleteByQueryRequestTests.java @@ -19,10 +19,16 @@ package org.elasticsearch.action.bulk.byscroll; +import org.elasticsearch.action.ActionRequestValidationException; import org.elasticsearch.action.search.SearchRequest; import org.elasticsearch.action.support.IndicesOptions; +import org.elasticsearch.index.query.QueryBuilders; import static org.apache.lucene.util.TestUtil.randomSimpleString; +import static org.hamcrest.Matchers.containsString; +import static org.hamcrest.Matchers.is; +import static org.hamcrest.Matchers.not; +import static org.hamcrest.Matchers.nullValue; public class DeleteByQueryRequestTests extends AbstractBulkByScrollRequestTestCase { public void testDeleteteByQueryRequestImplementsIndicesRequestReplaceable() { @@ -96,4 +102,26 @@ public void testTypesSetter() { request.types(types); assertArrayEquals(request.types(), types); } + + public void testValidateGivenNoQuery() { + SearchRequest searchRequest = new SearchRequest(); + DeleteByQueryRequest deleteByQueryRequest = new DeleteByQueryRequest(searchRequest); + deleteByQueryRequest.indices("*"); + + ActionRequestValidationException e = deleteByQueryRequest.validate(); + + assertThat(e, is(not(nullValue()))); + assertThat(e.getMessage(), containsString("query is missing")); + } + + public void testValidateGivenValid() { + SearchRequest searchRequest = new SearchRequest(); + DeleteByQueryRequest deleteByQueryRequest = new DeleteByQueryRequest(searchRequest); + deleteByQueryRequest.indices("*"); + searchRequest.source().query(QueryBuilders.matchAllQuery()); + + ActionRequestValidationException e = deleteByQueryRequest.validate(); + + assertThat(e, is(nullValue())); + } } diff --git a/core/src/test/java/org/elasticsearch/action/fieldcaps/FieldCapabilitiesRequestTests.java b/core/src/test/java/org/elasticsearch/action/fieldcaps/FieldCapabilitiesRequestTests.java new file mode 100644 index 0000000000000..abc89e356259e --- /dev/null +++ b/core/src/test/java/org/elasticsearch/action/fieldcaps/FieldCapabilitiesRequestTests.java @@ -0,0 +1,53 @@ +/* + * Licensed to Elasticsearch under one or more contributor + * license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright + * ownership. Elasticsearch licenses this file to you under + * the Apache License, Version 2.0 (the "License"); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.elasticsearch.action.fieldcaps; + +import org.elasticsearch.common.io.stream.BytesStreamOutput; +import org.elasticsearch.common.io.stream.StreamInput; +import org.elasticsearch.test.ESTestCase; + +import java.io.IOException; + +public class FieldCapabilitiesRequestTests extends ESTestCase { + private FieldCapabilitiesRequest randomRequest() { + FieldCapabilitiesRequest request = new FieldCapabilitiesRequest(); + int size = randomIntBetween(1, 20); + String[] randomFields = new String[size]; + for (int i = 0; i < size; i++) { + randomFields[i] = randomAsciiOfLengthBetween(5, 10); + } + request.fields(randomFields); + return request; + } + + public void testFieldCapsRequestSerialization() throws IOException { + for (int i = 0; i < 20; i++) { + FieldCapabilitiesRequest request = randomRequest(); + BytesStreamOutput output = new BytesStreamOutput(); + request.writeTo(output); + output.flush(); + StreamInput input = output.bytes().streamInput(); + FieldCapabilitiesRequest deserialized = new FieldCapabilitiesRequest(); + deserialized.readFrom(input); + assertEquals(deserialized, request); + assertEquals(deserialized.hashCode(), request.hashCode()); + } + } +} diff --git a/core/src/test/java/org/elasticsearch/action/fieldcaps/FieldCapabilitiesResponseTests.java b/core/src/test/java/org/elasticsearch/action/fieldcaps/FieldCapabilitiesResponseTests.java new file mode 100644 index 0000000000000..8d64f9a538c56 --- /dev/null +++ b/core/src/test/java/org/elasticsearch/action/fieldcaps/FieldCapabilitiesResponseTests.java @@ -0,0 +1,60 @@ +/* + * Licensed to Elasticsearch under one or more contributor + * license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright + * ownership. Elasticsearch licenses this file to you under + * the Apache License, Version 2.0 (the "License"); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.elasticsearch.action.fieldcaps; + +import org.elasticsearch.common.io.stream.BytesStreamOutput; +import org.elasticsearch.common.io.stream.StreamInput; +import org.elasticsearch.test.ESTestCase; + +import java.io.IOException; +import java.util.HashMap; +import java.util.Map; + +public class FieldCapabilitiesResponseTests extends ESTestCase { + private FieldCapabilitiesResponse randomResponse() { + Map > fieldMap = new HashMap<> (); + int numFields = randomInt(10); + for (int i = 0; i < numFields; i++) { + String fieldName = randomAsciiOfLengthBetween(5, 10); + int numIndices = randomIntBetween(1, 5); + Map indexFieldMap = new HashMap<> (); + for (int j = 0; j < numIndices; j++) { + String index = randomAsciiOfLengthBetween(10, 20); + indexFieldMap.put(index, FieldCapabilitiesTests.randomFieldCaps()); + } + fieldMap.put(fieldName, indexFieldMap); + } + return new FieldCapabilitiesResponse(fieldMap); + } + + public void testSerialization() throws IOException { + for (int i = 0; i < 20; i++) { + FieldCapabilitiesResponse response = randomResponse(); + BytesStreamOutput output = new BytesStreamOutput(); + response.writeTo(output); + output.flush(); + StreamInput input = output.bytes().streamInput(); + FieldCapabilitiesResponse deserialized = new FieldCapabilitiesResponse(); + deserialized.readFrom(input); + assertEquals(deserialized, response); + assertEquals(deserialized.hashCode(), response.hashCode()); + } + } +} diff --git a/core/src/test/java/org/elasticsearch/action/fieldcaps/FieldCapabilitiesTests.java b/core/src/test/java/org/elasticsearch/action/fieldcaps/FieldCapabilitiesTests.java new file mode 100644 index 0000000000000..a1df6b6529b8a --- /dev/null +++ b/core/src/test/java/org/elasticsearch/action/fieldcaps/FieldCapabilitiesTests.java @@ -0,0 +1,109 @@ +/* + * Licensed to Elasticsearch under one or more contributor + * license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright + * ownership. Elasticsearch licenses this file to you under + * the Apache License, Version 2.0 (the "License"); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.elasticsearch.action.fieldcaps; + +import org.elasticsearch.common.io.stream.Writeable; +import org.elasticsearch.test.AbstractWireSerializingTestCase; + +import static org.hamcrest.Matchers.equalTo; + +public class FieldCapabilitiesTests extends AbstractWireSerializingTestCase { + @Override + protected FieldCapabilities createTestInstance() { + return randomFieldCaps(); + } + + @Override + protected Writeable.Reader instanceReader() { + return FieldCapabilities::new; + } + + public void testBuilder() { + FieldCapabilities.Builder builder = new FieldCapabilities.Builder("field", "type"); + builder.add("index1", true, false); + builder.add("index2", true, false); + builder.add("index3", true, false); + + { + FieldCapabilities cap1 = builder.build(false); + assertThat(cap1.isSearchable(), equalTo(true)); + assertThat(cap1.isAggregatable(), equalTo(false)); + assertNull(cap1.indices()); + assertNull(cap1.nonSearchableIndices()); + assertNull(cap1.nonAggregatableIndices()); + + FieldCapabilities cap2 = builder.build(true); + assertThat(cap2.isSearchable(), equalTo(true)); + assertThat(cap2.isAggregatable(), equalTo(false)); + assertThat(cap2.indices().length, equalTo(3)); + assertThat(cap2.indices(), equalTo(new String[]{"index1", "index2", "index3"})); + assertNull(cap2.nonSearchableIndices()); + assertNull(cap2.nonAggregatableIndices()); + } + + builder = new FieldCapabilities.Builder("field", "type"); + builder.add("index1", false, true); + builder.add("index2", true, false); + builder.add("index3", false, false); + { + FieldCapabilities cap1 = builder.build(false); + assertThat(cap1.isSearchable(), equalTo(false)); + assertThat(cap1.isAggregatable(), equalTo(false)); + assertNull(cap1.indices()); + assertThat(cap1.nonSearchableIndices(), equalTo(new String[]{"index1", "index3"})); + assertThat(cap1.nonAggregatableIndices(), equalTo(new String[]{"index2", "index3"})); + + FieldCapabilities cap2 = builder.build(true); + assertThat(cap2.isSearchable(), equalTo(false)); + assertThat(cap2.isAggregatable(), equalTo(false)); + assertThat(cap2.indices().length, equalTo(3)); + assertThat(cap2.indices(), equalTo(new String[]{"index1", "index2", "index3"})); + assertThat(cap1.nonSearchableIndices(), equalTo(new String[]{"index1", "index3"})); + assertThat(cap1.nonAggregatableIndices(), equalTo(new String[]{"index2", "index3"})); + } + } + + static FieldCapabilities randomFieldCaps() { + String[] indices = null; + if (randomBoolean()) { + indices = new String[randomIntBetween(1, 5)]; + for (int i = 0; i < indices.length; i++) { + indices[i] = randomAsciiOfLengthBetween(5, 20); + } + } + String[] nonSearchableIndices = null; + if (randomBoolean()) { + nonSearchableIndices = new String[randomIntBetween(0, 5)]; + for (int i = 0; i < nonSearchableIndices.length; i++) { + nonSearchableIndices[i] = randomAsciiOfLengthBetween(5, 20); + } + } + String[] nonAggregatableIndices = null; + if (randomBoolean()) { + nonAggregatableIndices = new String[randomIntBetween(0, 5)]; + for (int i = 0; i < nonAggregatableIndices.length; i++) { + nonAggregatableIndices[i] = randomAsciiOfLengthBetween(5, 20); + } + } + return new FieldCapabilities(randomAsciiOfLengthBetween(5, 20), + randomAsciiOfLengthBetween(5, 20), randomBoolean(), randomBoolean(), + indices, nonSearchableIndices, nonAggregatableIndices); + } +} diff --git a/core/src/test/java/org/elasticsearch/action/get/MultiGetRequestTests.java b/core/src/test/java/org/elasticsearch/action/get/MultiGetRequestTests.java index 6c79b20774c8d..4e539d8582fc3 100644 --- a/core/src/test/java/org/elasticsearch/action/get/MultiGetRequestTests.java +++ b/core/src/test/java/org/elasticsearch/action/get/MultiGetRequestTests.java @@ -19,13 +19,78 @@ package org.elasticsearch.action.get; +import org.elasticsearch.common.ParsingException; +import org.elasticsearch.common.xcontent.XContentBuilder; import org.elasticsearch.common.xcontent.XContentFactory; import org.elasticsearch.common.xcontent.XContentParser; import org.elasticsearch.search.fetch.subphase.FetchSourceContext; import org.elasticsearch.test.ESTestCase; +import java.io.IOException; + +import static org.hamcrest.Matchers.containsString; + public class MultiGetRequestTests extends ESTestCase { + public void testAddWithInvalidKey() throws IOException { + final XContentBuilder builder = XContentFactory.jsonBuilder(); + builder.startObject(); + { + builder.startArray("doc"); + { + builder.startObject(); + { + builder.field("_type", "type"); + builder.field("_id", "1"); + } + builder.endObject(); + } + builder.endArray(); + } + builder.endObject(); + final XContentParser parser = createParser(builder); + final MultiGetRequest mgr = new MultiGetRequest(); + final ParsingException e = expectThrows( + ParsingException.class, + () -> { + final String defaultIndex = randomAsciiOfLength(5); + final String defaultType = randomAsciiOfLength(3); + final FetchSourceContext fetchSource = FetchSourceContext.FETCH_SOURCE; + mgr.add(defaultIndex, defaultType, null, fetchSource, null, parser, true); + }); + assertThat( + e.toString(), + containsString("unknown key [doc] for a START_ARRAY, expected [docs] or [ids]")); + } + + public void testUnexpectedField() throws IOException { + final XContentBuilder builder = XContentFactory.jsonBuilder(); + builder.startObject(); + { + builder.startObject("docs"); + { + builder.field("_type", "type"); + builder.field("_id", "1"); + } + builder.endObject(); + } + builder.endObject(); + final XContentParser parser = createParser(builder); + final MultiGetRequest mgr = new MultiGetRequest(); + final ParsingException e = expectThrows( + ParsingException.class, + () -> { + final String defaultIndex = randomAsciiOfLength(5); + final String defaultType = randomAsciiOfLength(3); + final FetchSourceContext fetchSource = FetchSourceContext.FETCH_SOURCE; + mgr.add(defaultIndex, defaultType, null, fetchSource, null, parser, true); + }); + assertThat( + e.toString(), + containsString( + "unexpected token [START_OBJECT], expected [FIELD_NAME] or [START_ARRAY]")); + } + public void testAddWithInvalidSourceValueIsRejected() throws Exception { String sourceValue = randomFrom("on", "off", "0", "1"); XContentParser parser = createParser(XContentFactory.jsonBuilder() diff --git a/core/src/test/java/org/elasticsearch/action/search/AbstractSearchAsyncActionTookTests.java b/core/src/test/java/org/elasticsearch/action/search/AbstractSearchAsyncActionTookTests.java new file mode 100644 index 0000000000000..beec582b13f2b --- /dev/null +++ b/core/src/test/java/org/elasticsearch/action/search/AbstractSearchAsyncActionTookTests.java @@ -0,0 +1,161 @@ +/* + * Licensed to Elasticsearch under one or more contributor + * license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright + * ownership. Elasticsearch licenses this file to you under + * the Apache License, Version 2.0 (the "License"); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.elasticsearch.action.search; + +import org.elasticsearch.action.ActionListener; +import org.elasticsearch.cluster.routing.GroupShardsIterator; +import org.elasticsearch.cluster.routing.ShardIterator; +import org.elasticsearch.cluster.routing.ShardRouting; +import org.elasticsearch.index.shard.ShardId; +import org.elasticsearch.search.SearchPhaseResult; +import org.elasticsearch.test.ESTestCase; + +import java.util.Collections; +import java.util.concurrent.TimeUnit; +import java.util.concurrent.atomic.AtomicLong; + +import static org.hamcrest.Matchers.equalTo; +import static org.hamcrest.Matchers.greaterThanOrEqualTo; + +public class AbstractSearchAsyncActionTookTests extends ESTestCase { + + private AbstractSearchAsyncAction createAction( + final boolean controlled, + final AtomicLong expected) { + + final Runnable runnable; + final TransportSearchAction.SearchTimeProvider timeProvider; + if (controlled) { + runnable = () -> expected.set(randomNonNegativeLong()); + timeProvider = new TransportSearchAction.SearchTimeProvider(0, 0, expected::get); + } else { + runnable = () -> { + long elapsed = spinForAtLeastNMilliseconds(randomIntBetween(1, 10)); + expected.set(elapsed); + }; + timeProvider = new TransportSearchAction.SearchTimeProvider( + 0, + System.nanoTime(), + System::nanoTime); + } + + final ShardIterator it = new ShardIterator() { + @Override + public ShardId shardId() { + return null; + } + + @Override + public void reset() { + + } + + @Override + public int compareTo(ShardIterator o) { + return 0; + } + + @Override + public int size() { + return 0; + } + + @Override + public int sizeActive() { + return 0; + } + + @Override + public ShardRouting nextOrNull() { + return null; + } + + @Override + public int remaining() { + return 0; + } + + @Override + public Iterable asUnordered() { + return null; + } + }; + + return new AbstractSearchAsyncAction( + "test", + null, + null, + null, + null, + null, + null, + null, + null, + new GroupShardsIterator(Collections.singletonList(it)), + timeProvider, + 0, + null, + null + ) { + @Override + protected SearchPhase getNextPhase( + final SearchPhaseResults results, + final SearchPhaseContext context) { + return null; + } + + @Override + protected void executePhaseOnShard( + final ShardIterator shardIt, + final ShardRouting shard, + final SearchActionListener listener) { + + } + + @Override + long buildTookInMillis() { + runnable.run(); + return super.buildTookInMillis(); + } + }; + } + + public void testTookWithControlledClock() { + runTestTook(true); + } + + public void testTookWithRealClock() { + runTestTook(false); + } + + private void runTestTook(final boolean controlled) { + final AtomicLong expected = new AtomicLong(); + AbstractSearchAsyncAction action = createAction(controlled, expected); + final long actual = action.buildTookInMillis(); + if (controlled) { + // with a controlled clock, we can assert the exact took time + assertThat(actual, equalTo(TimeUnit.NANOSECONDS.toMillis(expected.get()))); + } else { + // with a real clock, the best we can say is that it took as long as we spun for + assertThat(actual, greaterThanOrEqualTo(TimeUnit.NANOSECONDS.toMillis(expected.get()))); + } + } + +} diff --git a/core/src/test/java/org/elasticsearch/action/search/CountedCollectorTests.java b/core/src/test/java/org/elasticsearch/action/search/CountedCollectorTests.java index 6995ad93f25fd..ccb75ff3ab4d0 100644 --- a/core/src/test/java/org/elasticsearch/action/search/CountedCollectorTests.java +++ b/core/src/test/java/org/elasticsearch/action/search/CountedCollectorTests.java @@ -46,7 +46,7 @@ public void testCollect() throws InterruptedException { runnable.run(); } }; - CountedCollector collector = new CountedCollector<>(results::set, numResultsExpected, + CountedCollector collector = new CountedCollector<>(r -> results.set(r.getShardIndex(), r), numResultsExpected, latch::countDown, context); for (int i = 0; i < numResultsExpected; i++) { int shardID = i; @@ -57,8 +57,12 @@ public void testCollect() throws InterruptedException { break; case 1: state.add(1); - executor.execute(() -> collector.onResult(shardID, new DfsSearchResult(shardID, null), new SearchShardTarget("foo", - new Index("bar", "baz"), shardID))); + executor.execute(() -> { + DfsSearchResult dfsSearchResult = new DfsSearchResult(shardID, null); + dfsSearchResult.setShardIndex(shardID); + dfsSearchResult.setSearchShardTarget(new SearchShardTarget("foo", + new Index("bar", "baz"), shardID)); + collector.onResult(dfsSearchResult);}); break; case 2: state.add(2); @@ -79,7 +83,7 @@ public void testCollect() throws InterruptedException { break; case 1: assertNotNull(results.get(i)); - assertEquals(i, results.get(i).id()); + assertEquals(i, results.get(i).getRequestId()); break; case 2: final int shardId = i; diff --git a/core/src/test/java/org/elasticsearch/action/search/DfsQueryPhaseTests.java b/core/src/test/java/org/elasticsearch/action/search/DfsQueryPhaseTests.java index ba01559e0f063..c2f21a7cc2ca2 100644 --- a/core/src/test/java/org/elasticsearch/action/search/DfsQueryPhaseTests.java +++ b/core/src/test/java/org/elasticsearch/action/search/DfsQueryPhaseTests.java @@ -18,52 +18,42 @@ */ package org.elasticsearch.action.search; -import org.apache.logging.log4j.Logger; import org.apache.lucene.index.Term; import org.apache.lucene.search.ScoreDoc; import org.apache.lucene.search.TermStatistics; import org.apache.lucene.search.TopDocs; import org.apache.lucene.store.MockDirectoryWrapper; import org.elasticsearch.action.ActionListener; -import org.elasticsearch.cluster.routing.ShardIterator; -import org.elasticsearch.cluster.routing.ShardRouting; -import org.elasticsearch.common.Nullable; import org.elasticsearch.common.settings.Settings; import org.elasticsearch.common.util.BigArrays; import org.elasticsearch.common.util.concurrent.AtomicArray; import org.elasticsearch.index.Index; import org.elasticsearch.search.DocValueFormat; -import org.elasticsearch.search.SearchHit; -import org.elasticsearch.search.SearchHits; +import org.elasticsearch.search.SearchPhaseResult; import org.elasticsearch.search.SearchShardTarget; import org.elasticsearch.search.dfs.DfsSearchResult; -import org.elasticsearch.search.fetch.FetchSearchResult; -import org.elasticsearch.search.fetch.QueryFetchSearchResult; -import org.elasticsearch.search.fetch.ShardFetchSearchRequest; -import org.elasticsearch.search.internal.InternalSearchResponse; -import org.elasticsearch.search.internal.ShardSearchTransportRequest; import org.elasticsearch.search.query.QuerySearchRequest; import org.elasticsearch.search.query.QuerySearchResult; -import org.elasticsearch.search.query.QuerySearchResultProvider; import org.elasticsearch.test.ESTestCase; import org.elasticsearch.transport.Transport; import java.io.IOException; import java.io.UncheckedIOException; -import java.util.ArrayList; -import java.util.Collections; -import java.util.List; -import java.util.concurrent.CountDownLatch; -import java.util.concurrent.atomic.AtomicInteger; import java.util.concurrent.atomic.AtomicReference; public class DfsQueryPhaseTests extends ESTestCase { + private static DfsSearchResult newSearchResult(int shardIndex, long requestId, SearchShardTarget target) { + DfsSearchResult result = new DfsSearchResult(requestId, target); + result.setShardIndex(shardIndex); + return result; + } + public void testDfsWith2Shards() throws IOException { AtomicArray results = new AtomicArray<>(2); - AtomicReference> responseRef = new AtomicReference<>(); - results.set(0, new DfsSearchResult(1, new SearchShardTarget("node1", new Index("test", "na"), 0))); - results.set(1, new DfsSearchResult(2, new SearchShardTarget("node2", new Index("test", "na"), 0))); + AtomicReference> responseRef = new AtomicReference<>(); + results.set(0, newSearchResult(0, 1, new SearchShardTarget("node1", new Index("test", "na"), 0))); + results.set(1, newSearchResult(1, 2, new SearchShardTarget("node2", new Index("test", "na"), 0))); results.get(0).termsStatistics(new Term[0], new TermStatistics[0]); results.get(1).termsStatistics(new Term[0], new TermStatistics[0]); @@ -73,7 +63,7 @@ public void testDfsWith2Shards() throws IOException { @Override public void sendExecuteQuery(Transport.Connection connection, QuerySearchRequest request, SearchTask task, - ActionListener listener) { + SearchActionListener listener) { if (request.id() == 1) { QuerySearchResult queryResult = new QuerySearchResult(123, new SearchShardTarget("node1", new Index("test", "na"), 0)); queryResult.topDocs(new TopDocs(1, new ScoreDoc[] {new ScoreDoc(42, 1.0F)}, 2.0F), new DocValueFormat[0]); @@ -116,9 +106,9 @@ public void run() throws IOException { public void testDfsWith1ShardFailed() throws IOException { AtomicArray results = new AtomicArray<>(2); - AtomicReference> responseRef = new AtomicReference<>(); - results.set(0, new DfsSearchResult(1, new SearchShardTarget("node1", new Index("test", "na"), 0))); - results.set(1, new DfsSearchResult(2, new SearchShardTarget("node2", new Index("test", "na"), 0))); + AtomicReference> responseRef = new AtomicReference<>(); + results.set(0, newSearchResult(0, 1, new SearchShardTarget("node1", new Index("test", "na"), 0))); + results.set(1, newSearchResult(1, 2, new SearchShardTarget("node2", new Index("test", "na"), 0))); results.get(0).termsStatistics(new Term[0], new TermStatistics[0]); results.get(1).termsStatistics(new Term[0], new TermStatistics[0]); @@ -128,7 +118,7 @@ public void testDfsWith1ShardFailed() throws IOException { @Override public void sendExecuteQuery(Transport.Connection connection, QuerySearchRequest request, SearchTask task, - ActionListener listener) { + SearchActionListener listener) { if (request.id() == 1) { QuerySearchResult queryResult = new QuerySearchResult(123, new SearchShardTarget("node1", new Index("test", "na"), 0)); queryResult.topDocs(new TopDocs(1, new ScoreDoc[] {new ScoreDoc(42, 1.0F)}, 2.0F), new DocValueFormat[0]); @@ -171,9 +161,9 @@ public void run() throws IOException { public void testFailPhaseOnException() throws IOException { AtomicArray results = new AtomicArray<>(2); - AtomicReference> responseRef = new AtomicReference<>(); - results.set(0, new DfsSearchResult(1, new SearchShardTarget("node1", new Index("test", "na"), 0))); - results.set(1, new DfsSearchResult(2, new SearchShardTarget("node2", new Index("test", "na"), 0))); + AtomicReference> responseRef = new AtomicReference<>(); + results.set(0, newSearchResult(0, 1, new SearchShardTarget("node1", new Index("test", "na"), 0))); + results.set(1, newSearchResult(1, 2, new SearchShardTarget("node2", new Index("test", "na"), 0))); results.get(0).termsStatistics(new Term[0], new TermStatistics[0]); results.get(1).termsStatistics(new Term[0], new TermStatistics[0]); @@ -183,7 +173,7 @@ public void testFailPhaseOnException() throws IOException { @Override public void sendExecuteQuery(Transport.Connection connection, QuerySearchRequest request, SearchTask task, - ActionListener listener) { + SearchActionListener listener) { if (request.id() == 1) { QuerySearchResult queryResult = new QuerySearchResult(123, new SearchShardTarget("node1", new Index("test", "na"), 0)); queryResult.topDocs(new TopDocs(1, new ScoreDoc[] {new ScoreDoc(42, 1.0F)}, 2.0F), new DocValueFormat[0]); diff --git a/core/src/test/java/org/elasticsearch/action/search/FetchSearchPhaseTests.java b/core/src/test/java/org/elasticsearch/action/search/FetchSearchPhaseTests.java index 14c2eb6f63fd2..239f8f10a413a 100644 --- a/core/src/test/java/org/elasticsearch/action/search/FetchSearchPhaseTests.java +++ b/core/src/test/java/org/elasticsearch/action/search/FetchSearchPhaseTests.java @@ -21,20 +21,18 @@ import org.apache.lucene.search.ScoreDoc; import org.apache.lucene.search.TopDocs; import org.apache.lucene.store.MockDirectoryWrapper; -import org.elasticsearch.action.ActionListener; import org.elasticsearch.common.settings.Settings; import org.elasticsearch.common.util.BigArrays; -import org.elasticsearch.common.util.concurrent.AtomicArray; import org.elasticsearch.index.Index; import org.elasticsearch.search.DocValueFormat; import org.elasticsearch.search.SearchHit; import org.elasticsearch.search.SearchHits; +import org.elasticsearch.search.SearchPhaseResult; import org.elasticsearch.search.SearchShardTarget; import org.elasticsearch.search.fetch.FetchSearchResult; import org.elasticsearch.search.fetch.QueryFetchSearchResult; import org.elasticsearch.search.fetch.ShardFetchSearchRequest; import org.elasticsearch.search.query.QuerySearchResult; -import org.elasticsearch.search.query.QuerySearchResultProvider; import org.elasticsearch.test.ESTestCase; import org.elasticsearch.transport.Transport; @@ -48,7 +46,7 @@ public class FetchSearchPhaseTests extends ESTestCase { public void testShortcutQueryAndFetchOptimization() throws IOException { SearchPhaseController controller = new SearchPhaseController(Settings.EMPTY, BigArrays.NON_RECYCLING_INSTANCE, null); MockSearchPhaseContext mockSearchPhaseContext = new MockSearchPhaseContext(1); - InitialSearchPhase.SearchPhaseResults results = + InitialSearchPhase.SearchPhaseResults results = controller.newSearchPhaseResults(mockSearchPhaseContext.getRequest(), 1); AtomicReference responseRef = new AtomicReference<>(); boolean hasHits = randomBoolean(); @@ -59,7 +57,9 @@ public void testShortcutQueryAndFetchOptimization() throws IOException { queryResult.size(1); FetchSearchResult fetchResult = new FetchSearchResult(); fetchResult.hits(new SearchHits(new SearchHit[] {new SearchHit(42)}, 1, 1.0F)); - results.consumeResult(0, new QueryFetchSearchResult(queryResult, fetchResult)); + QueryFetchSearchResult fetchSearchResult = new QueryFetchSearchResult(queryResult, fetchResult); + fetchSearchResult.setShardIndex(0); + results.consumeResult(fetchSearchResult); numHits = 1; } else { numHits = 0; @@ -86,25 +86,27 @@ public void run() throws IOException { public void testFetchTwoDocument() throws IOException { MockSearchPhaseContext mockSearchPhaseContext = new MockSearchPhaseContext(2); SearchPhaseController controller = new SearchPhaseController(Settings.EMPTY, BigArrays.NON_RECYCLING_INSTANCE, null); - InitialSearchPhase.SearchPhaseResults results = + InitialSearchPhase.SearchPhaseResults results = controller.newSearchPhaseResults(mockSearchPhaseContext.getRequest(), 2); AtomicReference responseRef = new AtomicReference<>(); int resultSetSize = randomIntBetween(2, 10); QuerySearchResult queryResult = new QuerySearchResult(123, new SearchShardTarget("node1", new Index("test", "na"), 0)); queryResult.topDocs(new TopDocs(1, new ScoreDoc[] {new ScoreDoc(42, 1.0F)}, 2.0F), new DocValueFormat[0]); queryResult.size(resultSetSize); // the size of the result set - results.consumeResult(0, queryResult); + queryResult.setShardIndex(0); + results.consumeResult(queryResult); queryResult = new QuerySearchResult(321, new SearchShardTarget("node2", new Index("test", "na"), 1)); queryResult.topDocs(new TopDocs(1, new ScoreDoc[] {new ScoreDoc(84, 2.0F)}, 2.0F), new DocValueFormat[0]); queryResult.size(resultSetSize); - results.consumeResult(1, queryResult); + queryResult.setShardIndex(1); + results.consumeResult(queryResult); SearchTransportService searchTransportService = new SearchTransportService( Settings.builder().put("search.remote.connect", false).build(), null, null) { @Override public void sendExecuteFetch(Transport.Connection connection, ShardFetchSearchRequest request, SearchTask task, - ActionListener listener) { + SearchActionListener listener) { FetchSearchResult fetchResult = new FetchSearchResult(); if (request.id() == 321) { fetchResult.hits(new SearchHits(new SearchHit[] {new SearchHit(84)}, 1, 2.0F)); @@ -138,25 +140,27 @@ public void run() throws IOException { public void testFailFetchOneDoc() throws IOException { MockSearchPhaseContext mockSearchPhaseContext = new MockSearchPhaseContext(2); SearchPhaseController controller = new SearchPhaseController(Settings.EMPTY, BigArrays.NON_RECYCLING_INSTANCE, null); - InitialSearchPhase.SearchPhaseResults results = + InitialSearchPhase.SearchPhaseResults results = controller.newSearchPhaseResults(mockSearchPhaseContext.getRequest(), 2); AtomicReference responseRef = new AtomicReference<>(); int resultSetSize = randomIntBetween(2, 10); QuerySearchResult queryResult = new QuerySearchResult(123, new SearchShardTarget("node1", new Index("test", "na"), 0)); queryResult.topDocs(new TopDocs(1, new ScoreDoc[] {new ScoreDoc(42, 1.0F)}, 2.0F), new DocValueFormat[0]); queryResult.size(resultSetSize); // the size of the result set - results.consumeResult(0, queryResult); + queryResult.setShardIndex(0); + results.consumeResult(queryResult); queryResult = new QuerySearchResult(321, new SearchShardTarget("node2", new Index("test", "na"), 1)); queryResult.topDocs(new TopDocs(1, new ScoreDoc[] {new ScoreDoc(84, 2.0F)}, 2.0F), new DocValueFormat[0]); queryResult.size(resultSetSize); - results.consumeResult(1, queryResult); + queryResult.setShardIndex(1); + results.consumeResult(queryResult); SearchTransportService searchTransportService = new SearchTransportService( Settings.builder().put("search.remote.connect", false).build(), null, null) { @Override public void sendExecuteFetch(Transport.Connection connection, ShardFetchSearchRequest request, SearchTask task, - ActionListener listener) { + SearchActionListener listener) { if (request.id() == 321) { FetchSearchResult fetchResult = new FetchSearchResult(); fetchResult.hits(new SearchHits(new SearchHit[] {new SearchHit(84)}, 1, 2.0F)); @@ -195,20 +199,21 @@ public void testFetchDocsConcurrently() throws IOException, InterruptedException int numHits = randomIntBetween(2, 100); // also numshards --> 1 hit per shard SearchPhaseController controller = new SearchPhaseController(Settings.EMPTY, BigArrays.NON_RECYCLING_INSTANCE, null); MockSearchPhaseContext mockSearchPhaseContext = new MockSearchPhaseContext(numHits); - InitialSearchPhase.SearchPhaseResults results = + InitialSearchPhase.SearchPhaseResults results = controller.newSearchPhaseResults(mockSearchPhaseContext.getRequest(), numHits); AtomicReference responseRef = new AtomicReference<>(); for (int i = 0; i < numHits; i++) { QuerySearchResult queryResult = new QuerySearchResult(i, new SearchShardTarget("node1", new Index("test", "na"), 0)); queryResult.topDocs(new TopDocs(1, new ScoreDoc[] {new ScoreDoc(i+1, i)}, i), new DocValueFormat[0]); queryResult.size(resultSetSize); // the size of the result set - results.consumeResult(i, queryResult); + queryResult.setShardIndex(i); + results.consumeResult(queryResult); } SearchTransportService searchTransportService = new SearchTransportService( Settings.builder().put("search.remote.connect", false).build(), null, null) { @Override public void sendExecuteFetch(Transport.Connection connection, ShardFetchSearchRequest request, SearchTask task, - ActionListener listener) { + SearchActionListener listener) { new Thread(() -> { FetchSearchResult fetchResult = new FetchSearchResult(); fetchResult.hits(new SearchHits(new SearchHit[] {new SearchHit((int) (request.id()+1))}, 1, 100F)); @@ -249,25 +254,27 @@ public void run() throws IOException { public void testExceptionFailsPhase() throws IOException { MockSearchPhaseContext mockSearchPhaseContext = new MockSearchPhaseContext(2); SearchPhaseController controller = new SearchPhaseController(Settings.EMPTY, BigArrays.NON_RECYCLING_INSTANCE, null); - InitialSearchPhase.SearchPhaseResults results = + InitialSearchPhase.SearchPhaseResults results = controller.newSearchPhaseResults(mockSearchPhaseContext.getRequest(), 2); AtomicReference responseRef = new AtomicReference<>(); int resultSetSize = randomIntBetween(2, 10); QuerySearchResult queryResult = new QuerySearchResult(123, new SearchShardTarget("node1", new Index("test", "na"), 0)); queryResult.topDocs(new TopDocs(1, new ScoreDoc[] {new ScoreDoc(42, 1.0F)}, 2.0F), new DocValueFormat[0]); queryResult.size(resultSetSize); // the size of the result set - results.consumeResult(0, queryResult); + queryResult.setShardIndex(0); + results.consumeResult(queryResult); queryResult = new QuerySearchResult(321, new SearchShardTarget("node2", new Index("test", "na"), 1)); queryResult.topDocs(new TopDocs(1, new ScoreDoc[] {new ScoreDoc(84, 2.0F)}, 2.0F), new DocValueFormat[0]); queryResult.size(resultSetSize); - results.consumeResult(1, queryResult); + queryResult.setShardIndex(1); + results.consumeResult(queryResult); AtomicInteger numFetches = new AtomicInteger(0); SearchTransportService searchTransportService = new SearchTransportService( Settings.builder().put("search.remote.connect", false).build(), null, null) { @Override public void sendExecuteFetch(Transport.Connection connection, ShardFetchSearchRequest request, SearchTask task, - ActionListener listener) { + SearchActionListener listener) { FetchSearchResult fetchResult = new FetchSearchResult(); if (numFetches.incrementAndGet() == 1) { throw new RuntimeException("BOOM"); @@ -300,25 +307,27 @@ public void run() throws IOException { public void testCleanupIrrelevantContexts() throws IOException { // contexts that are not fetched should be cleaned up MockSearchPhaseContext mockSearchPhaseContext = new MockSearchPhaseContext(2); SearchPhaseController controller = new SearchPhaseController(Settings.EMPTY, BigArrays.NON_RECYCLING_INSTANCE, null); - InitialSearchPhase.SearchPhaseResults results = + InitialSearchPhase.SearchPhaseResults results = controller.newSearchPhaseResults(mockSearchPhaseContext.getRequest(), 2); AtomicReference responseRef = new AtomicReference<>(); int resultSetSize = 1; QuerySearchResult queryResult = new QuerySearchResult(123, new SearchShardTarget("node1", new Index("test", "na"), 0)); queryResult.topDocs(new TopDocs(1, new ScoreDoc[] {new ScoreDoc(42, 1.0F)}, 2.0F), new DocValueFormat[0]); queryResult.size(resultSetSize); // the size of the result set - results.consumeResult(0, queryResult); + queryResult.setShardIndex(0); + results.consumeResult(queryResult); queryResult = new QuerySearchResult(321, new SearchShardTarget("node2", new Index("test", "na"), 1)); queryResult.topDocs(new TopDocs(1, new ScoreDoc[] {new ScoreDoc(84, 2.0F)}, 2.0F), new DocValueFormat[0]); queryResult.size(resultSetSize); - results.consumeResult(1, queryResult); + queryResult.setShardIndex(1); + results.consumeResult(queryResult); SearchTransportService searchTransportService = new SearchTransportService( Settings.builder().put("search.remote.connect", false).build(), null, null) { @Override public void sendExecuteFetch(Transport.Connection connection, ShardFetchSearchRequest request, SearchTask task, - ActionListener listener) { + SearchActionListener listener) { FetchSearchResult fetchResult = new FetchSearchResult(); if (request.id() == 321) { fetchResult.hits(new SearchHits(new SearchHit[] {new SearchHit(84)}, 1, 2.0F)); diff --git a/core/src/test/java/org/elasticsearch/action/search/RemoteClusterConnectionTests.java b/core/src/test/java/org/elasticsearch/action/search/RemoteClusterConnectionTests.java index f2493adac1ead..15c735cafa683 100644 --- a/core/src/test/java/org/elasticsearch/action/search/RemoteClusterConnectionTests.java +++ b/core/src/test/java/org/elasticsearch/action/search/RemoteClusterConnectionTests.java @@ -36,6 +36,7 @@ import org.elasticsearch.common.settings.Settings; import org.elasticsearch.common.transport.TransportAddress; import org.elasticsearch.common.util.CancellableThreads; +import org.elasticsearch.discovery.Discovery; import org.elasticsearch.mocksocket.MockServerSocket; import org.elasticsearch.test.ESTestCase; import org.elasticsearch.test.transport.MockTransportService; @@ -81,23 +82,33 @@ private MockTransportService startTransport(String id, List known } public static MockTransportService startTransport(String id, List knownNodes, Version version, ThreadPool threadPool) { + return startTransport(id, knownNodes, version, threadPool, Settings.EMPTY); + } + + public static MockTransportService startTransport( + final String id, + final List knownNodes, + final Version version, + final ThreadPool threadPool, + final Settings settings) { boolean success = false; - MockTransportService newService = MockTransportService.createNewService(Settings.EMPTY, version, threadPool, null); + final Settings s = Settings.builder().put(settings).put("node.name", id).build(); + MockTransportService newService = MockTransportService.createNewService(s, version, threadPool, null); try { newService.registerRequestHandler(ClusterSearchShardsAction.NAME, ClusterSearchShardsRequest::new, ThreadPool.Names.SAME, - (request, channel) -> { - channel.sendResponse(new ClusterSearchShardsResponse(new ClusterSearchShardsGroup[0], - knownNodes.toArray(new DiscoveryNode[0]), Collections.emptyMap())); - }); + (request, channel) -> { + channel.sendResponse(new ClusterSearchShardsResponse(new ClusterSearchShardsGroup[0], + knownNodes.toArray(new DiscoveryNode[0]), Collections.emptyMap())); + }); newService.registerRequestHandler(ClusterStateAction.NAME, ClusterStateRequest::new, ThreadPool.Names.SAME, - (request, channel) -> { - DiscoveryNodes.Builder builder = DiscoveryNodes.builder(); - for (DiscoveryNode node : knownNodes) { - builder.add(node); - } - ClusterState build = ClusterState.builder(ClusterName.DEFAULT).nodes(builder.build()).build(); - channel.sendResponse(new ClusterStateResponse(ClusterName.DEFAULT, build, 0L)); - }); + (request, channel) -> { + DiscoveryNodes.Builder builder = DiscoveryNodes.builder(); + for (DiscoveryNode node : knownNodes) { + builder.add(node); + } + ClusterState build = ClusterState.builder(ClusterName.DEFAULT).nodes(builder.build()).build(); + channel.sendResponse(new ClusterStateResponse(ClusterName.DEFAULT, build, 0L)); + }); newService.start(); newService.acceptIncomingRequests(); success = true; diff --git a/core/src/test/java/org/elasticsearch/action/search/RemoteClusterServiceTests.java b/core/src/test/java/org/elasticsearch/action/search/RemoteClusterServiceTests.java index 1531d66e5da75..d0f0427e71084 100644 --- a/core/src/test/java/org/elasticsearch/action/search/RemoteClusterServiceTests.java +++ b/core/src/test/java/org/elasticsearch/action/search/RemoteClusterServiceTests.java @@ -19,6 +19,7 @@ package org.elasticsearch.action.search; import org.elasticsearch.Version; +import org.elasticsearch.action.ActionListener; import org.elasticsearch.action.admin.cluster.shards.ClusterSearchShardsGroup; import org.elasticsearch.action.admin.cluster.shards.ClusterSearchShardsResponse; import org.elasticsearch.cluster.node.DiscoveryNode; @@ -49,6 +50,7 @@ import java.util.List; import java.util.Map; import java.util.concurrent.CopyOnWriteArrayList; +import java.util.concurrent.CountDownLatch; import java.util.concurrent.TimeUnit; public class RemoteClusterServiceTests extends ESTestCase { @@ -62,7 +64,16 @@ public void tearDown() throws Exception { } private MockTransportService startTransport(String id, List knownNodes, Version version) { - return RemoteClusterConnectionTests.startTransport(id, knownNodes, version, threadPool); + return startTransport(id, knownNodes, version, Settings.EMPTY); + } + + private MockTransportService startTransport( + final String id, + final List knownNodes, + final Version version, + final Settings settings) { + return RemoteClusterConnectionTests.startTransport( + id, knownNodes, version, threadPool, settings); } public void testSettingsAreRegistered() { @@ -247,4 +258,81 @@ public void testProcessRemoteShards() throws IOException { assertEquals(new MatchAllQueryBuilder(), remoteAliases.get("bar_id").getQueryBuilder()); } } + + public void testRemoteNodeAttribute() throws IOException, InterruptedException { + final Settings settings = + Settings.builder().put("search.remote.node.attr", "gateway").build(); + final List knownNodes = new CopyOnWriteArrayList<>(); + final Settings gateway = Settings.builder().put("node.attr.gateway", true).build(); + try (MockTransportService c1N1 = + startTransport("cluster_1_node_1", knownNodes, Version.CURRENT); + MockTransportService c1N2 = + startTransport("cluster_1_node_2", knownNodes, Version.CURRENT, gateway); + MockTransportService c2N1 = + startTransport("cluster_2_node_1", knownNodes, Version.CURRENT); + MockTransportService c2N2 = + startTransport("cluster_2_node_2", knownNodes, Version.CURRENT, gateway)) { + final DiscoveryNode c1N1Node = c1N1.getLocalDiscoNode(); + final DiscoveryNode c1N2Node = c1N2.getLocalDiscoNode(); + final DiscoveryNode c2N1Node = c2N1.getLocalDiscoNode(); + final DiscoveryNode c2N2Node = c2N2.getLocalDiscoNode(); + knownNodes.add(c1N1Node); + knownNodes.add(c1N2Node); + knownNodes.add(c2N1Node); + knownNodes.add(c2N2Node); + Collections.shuffle(knownNodes, random()); + + try (MockTransportService transportService = MockTransportService.createNewService( + settings, + Version.CURRENT, + threadPool, + null)) { + transportService.start(); + transportService.acceptIncomingRequests(); + final Settings.Builder builder = Settings.builder(); + builder.putArray( + "search.remote.cluster_1.seeds", c1N1Node.getAddress().toString()); + builder.putArray( + "search.remote.cluster_2.seeds", c2N1Node.getAddress().toString()); + try (RemoteClusterService service = + new RemoteClusterService(settings, transportService)) { + assertFalse(service.isCrossClusterSearchEnabled()); + service.initializeRemoteClusters(); + assertFalse(service.isCrossClusterSearchEnabled()); + + final InetSocketAddress c1N1Address = c1N1Node.getAddress().address(); + final InetSocketAddress c1N2Address = c1N2Node.getAddress().address(); + final InetSocketAddress c2N1Address = c2N1Node.getAddress().address(); + final InetSocketAddress c2N2Address = c2N2Node.getAddress().address(); + + final CountDownLatch firstLatch = new CountDownLatch(1); + service.updateRemoteCluster( + "cluster_1", + Arrays.asList(c1N1Address, c1N2Address), + connectionListener(firstLatch)); + firstLatch.await(); + + final CountDownLatch secondLatch = new CountDownLatch(1); + service.updateRemoteCluster( + "cluster_2", + Arrays.asList(c2N1Address, c2N2Address), + connectionListener(secondLatch)); + secondLatch.await(); + + assertTrue(service.isCrossClusterSearchEnabled()); + assertTrue(service.isRemoteClusterRegistered("cluster_1")); + assertFalse(service.isRemoteNodeConnected("cluster_1", c1N1Node)); + assertTrue(service.isRemoteNodeConnected("cluster_1", c1N2Node)); + assertTrue(service.isRemoteClusterRegistered("cluster_2")); + assertFalse(service.isRemoteNodeConnected("cluster_2", c2N1Node)); + assertTrue(service.isRemoteNodeConnected("cluster_2", c2N2Node)); + } + } + } + } + + private ActionListener connectionListener(final CountDownLatch latch) { + return ActionListener.wrap(x -> latch.countDown(), x -> fail()); + } + } diff --git a/core/src/test/java/org/elasticsearch/action/search/SearchAsyncActionTests.java b/core/src/test/java/org/elasticsearch/action/search/SearchAsyncActionTests.java index 9b7fad265bfb3..4813dc8ae7d17 100644 --- a/core/src/test/java/org/elasticsearch/action/search/SearchAsyncActionTests.java +++ b/core/src/test/java/org/elasticsearch/action/search/SearchAsyncActionTests.java @@ -35,7 +35,6 @@ import org.elasticsearch.index.Index; import org.elasticsearch.index.shard.ShardId; import org.elasticsearch.search.SearchPhaseResult; -import org.elasticsearch.search.SearchShardTarget; import org.elasticsearch.search.internal.AliasFilter; import org.elasticsearch.test.ESTestCase; import org.elasticsearch.transport.Transport; @@ -92,19 +91,33 @@ public void sendFreeContext(Transport.Connection connection, long contextId, Sea lookup.put(primaryNode.getId(), new MockConnection(primaryNode)); lookup.put(replicaNode.getId(), new MockConnection(replicaNode)); Map aliasFilters = Collections.singletonMap("_na_", new AliasFilter(null, Strings.EMPTY_ARRAY)); - AbstractSearchAsyncAction asyncAction = new AbstractSearchAsyncAction("test", logger, transportService, - lookup::get, aliasFilters, Collections.emptyMap(), null, request, responseListener, shardsIter, 0, 0, null, - new InitialSearchPhase.SearchPhaseResults<>(shardsIter.size())) { + AbstractSearchAsyncAction asyncAction = + new AbstractSearchAsyncAction( + "test", + logger, + transportService, + lookup::get, + aliasFilters, + Collections.emptyMap(), + null, + request, + responseListener, + shardsIter, + new TransportSearchAction.SearchTimeProvider(0, 0, () -> 0), + 0, + null, + new InitialSearchPhase.SearchPhaseResults<>(shardsIter.size())) { TestSearchResponse response = new TestSearchResponse(); @Override - protected void executePhaseOnShard(ShardIterator shardIt, ShardRouting shard, ActionListener listener) { + protected void executePhaseOnShard(ShardIterator shardIt, ShardRouting shard, SearchActionListener + listener) { assertTrue("shard: " + shard.shardId() + " has been queried twice", response.queried.add(shard.shardId())); Transport.Connection connection = getConnection(shard.currentNodeId()); TestSearchPhaseResult testSearchPhaseResult = new TestSearchPhaseResult(contextIdGenerator.incrementAndGet(), connection.getNode()); Set ids = nodeToContextMap.computeIfAbsent(connection.getNode(), (n) -> new HashSet<>()); - ids.add(testSearchPhaseResult.id); + ids.add(testSearchPhaseResult.getRequestId()); if (randomBoolean()) { listener.onResponse(testSearchPhaseResult); } else { @@ -119,8 +132,8 @@ protected SearchPhase getNextPhase(SearchPhaseResults res public void run() throws IOException { for (int i = 0; i < results.getNumShards(); i++) { TestSearchPhaseResult result = results.results.get(i); - assertEquals(result.node.getId(), result.shardTarget().getNodeId()); - sendReleaseSearchContext(result.id(), new MockConnection(result.node)); + assertEquals(result.node.getId(), result.getSearchShardTarget().getNodeId()); + sendReleaseSearchContext(result.getRequestId(), new MockConnection(result.node)); } responseListener.onResponse(response); latch.countDown(); @@ -180,32 +193,14 @@ public static class TestSearchResponse extends SearchResponse { public final Set queried = new HashSet<>(); } - public static class TestSearchPhaseResult implements SearchPhaseResult { - final long id; + public static class TestSearchPhaseResult extends SearchPhaseResult { final DiscoveryNode node; - SearchShardTarget shardTarget; public TestSearchPhaseResult(long id, DiscoveryNode node) { - this.id = id; + this.requestId = id; this.node = node; } - @Override - public long id() { - return id; - } - - @Override - public SearchShardTarget shardTarget() { - return this.shardTarget; - } - - @Override - public void shardTarget(SearchShardTarget shardTarget) { - this.shardTarget = shardTarget; - - } - @Override public void readFrom(StreamInput in) throws IOException { diff --git a/core/src/test/java/org/elasticsearch/action/search/SearchPhaseControllerTests.java b/core/src/test/java/org/elasticsearch/action/search/SearchPhaseControllerTests.java index 36756aba946bd..76541a79bb904 100644 --- a/core/src/test/java/org/elasticsearch/action/search/SearchPhaseControllerTests.java +++ b/core/src/test/java/org/elasticsearch/action/search/SearchPhaseControllerTests.java @@ -28,6 +28,7 @@ import org.elasticsearch.common.util.concurrent.AtomicArray; import org.elasticsearch.index.Index; import org.elasticsearch.search.DocValueFormat; +import org.elasticsearch.search.SearchPhaseResult; import org.elasticsearch.search.SearchShardTarget; import org.elasticsearch.search.aggregations.AggregationBuilders; import org.elasticsearch.search.aggregations.InternalAggregations; @@ -38,7 +39,6 @@ import org.elasticsearch.search.SearchHits; import org.elasticsearch.search.internal.InternalSearchResponse; import org.elasticsearch.search.query.QuerySearchResult; -import org.elasticsearch.search.query.QuerySearchResultProvider; import org.elasticsearch.search.suggest.Suggest; import org.elasticsearch.search.suggest.completion.CompletionSuggestion; import org.elasticsearch.test.ESTestCase; @@ -74,8 +74,8 @@ public void testSort() throws Exception { } int nShards = randomIntBetween(1, 20); int queryResultSize = randomBoolean() ? 0 : randomIntBetween(1, nShards * 2); - AtomicArray results = generateQueryResults(nShards, suggestions, queryResultSize, false); - ScoreDoc[] sortedDocs = searchPhaseController.sortDocs(true, results); + AtomicArray results = generateQueryResults(nShards, suggestions, queryResultSize, false); + ScoreDoc[] sortedDocs = searchPhaseController.sortDocs(true, results.asList(), nShards); int accumulatedLength = Math.min(queryResultSize, getTotalQueryHits(results)); for (Suggest.Suggestion suggestion : reducedSuggest(results)) { int suggestionSize = suggestion.getEntries().get(0).getOptions().size(); @@ -87,12 +87,12 @@ public void testSort() throws Exception { public void testSortIsIdempotent() throws IOException { int nShards = randomIntBetween(1, 20); int queryResultSize = randomBoolean() ? 0 : randomIntBetween(1, nShards * 2); - AtomicArray results = generateQueryResults(nShards, Collections.emptyList(), queryResultSize, + AtomicArray results = generateQueryResults(nShards, Collections.emptyList(), queryResultSize, randomBoolean() || true); boolean ignoreFrom = randomBoolean(); - ScoreDoc[] sortedDocs = searchPhaseController.sortDocs(ignoreFrom, results); + ScoreDoc[] sortedDocs = searchPhaseController.sortDocs(ignoreFrom, results.asList(), nShards); - ScoreDoc[] sortedDocs2 = searchPhaseController.sortDocs(ignoreFrom, results); + ScoreDoc[] sortedDocs2 = searchPhaseController.sortDocs(ignoreFrom, results.asList(), nShards); assertArrayEquals(sortedDocs, sortedDocs2); } @@ -103,7 +103,7 @@ public void testMerge() throws IOException { } int nShards = randomIntBetween(1, 20); int queryResultSize = randomBoolean() ? 0 : randomIntBetween(1, nShards * 2); - AtomicArray queryResults = generateQueryResults(nShards, suggestions, queryResultSize, false); + AtomicArray queryResults = generateQueryResults(nShards, suggestions, queryResultSize, false); // calculate offsets and score doc array List mergedScoreDocs = new ArrayList<>(); @@ -119,9 +119,10 @@ public void testMerge() throws IOException { } } ScoreDoc[] sortedDocs = mergedScoreDocs.toArray(new ScoreDoc[mergedScoreDocs.size()]); + AtomicArray searchPhaseResultAtomicArray = generateFetchResults(nShards, mergedSearchDocs, mergedSuggest); InternalSearchResponse mergedResponse = searchPhaseController.merge(true, sortedDocs, searchPhaseController.reducedQueryPhase(queryResults.asList()), - generateFetchResults(nShards, mergedSearchDocs, mergedSuggest)); + searchPhaseResultAtomicArray.asList(), searchPhaseResultAtomicArray::get); assertThat(mergedResponse.hits().getHits().length, equalTo(mergedSearchDocs.length)); Suggest suggestResult = mergedResponse.suggest(); for (Suggest.Suggestion suggestion : mergedSuggest) { @@ -138,10 +139,10 @@ public void testMerge() throws IOException { } } - private AtomicArray generateQueryResults(int nShards, - List suggestions, - int searchHitsSize, boolean useConstantScore) { - AtomicArray queryResults = new AtomicArray<>(nShards); + private AtomicArray generateQueryResults(int nShards, + List suggestions, + int searchHitsSize, boolean useConstantScore) { + AtomicArray queryResults = new AtomicArray<>(nShards); for (int shardIndex = 0; shardIndex < nShards; shardIndex++) { QuerySearchResult querySearchResult = new QuerySearchResult(shardIndex, new SearchShardTarget("", new Index("", ""), shardIndex)); @@ -181,23 +182,24 @@ private AtomicArray generateQueryResults(int nShards, querySearchResult.topDocs(topDocs, null); querySearchResult.size(searchHitsSize); querySearchResult.suggest(new Suggest(new ArrayList<>(shardSuggestion))); + querySearchResult.setShardIndex(shardIndex); queryResults.set(shardIndex, querySearchResult); } return queryResults; } - private int getTotalQueryHits(AtomicArray results) { + private int getTotalQueryHits(AtomicArray results) { int resultCount = 0; - for (AtomicArray.Entry shardResult : results.asList()) { - resultCount += shardResult.value.queryResult().topDocs().totalHits; + for (SearchPhaseResult shardResult : results.asList()) { + resultCount += shardResult.queryResult().topDocs().totalHits; } return resultCount; } - private Suggest reducedSuggest(AtomicArray results) { + private Suggest reducedSuggest(AtomicArray results) { Map>> groupedSuggestion = new HashMap<>(); - for (AtomicArray.Entry entry : results.asList()) { - for (Suggest.Suggestion suggestion : entry.value.queryResult().suggest()) { + for (SearchPhaseResult entry : results.asList()) { + for (Suggest.Suggestion suggestion : entry.queryResult().suggest()) { List> suggests = groupedSuggestion.computeIfAbsent(suggestion.getName(), s -> new ArrayList<>()); suggests.add((Suggest.Suggestion) suggestion); @@ -207,18 +209,18 @@ private Suggest reducedSuggest(AtomicArray results) { .collect(Collectors.toList())); } - private ScoreDoc[] getTopShardDocs(AtomicArray results) throws IOException { - List> resultList = results.asList(); + private ScoreDoc[] getTopShardDocs(AtomicArray results) throws IOException { + List resultList = results.asList(); TopDocs[] shardTopDocs = new TopDocs[resultList.size()]; for (int i = 0; i < resultList.size(); i++) { - shardTopDocs[i] = resultList.get(i).value.queryResult().topDocs(); + shardTopDocs[i] = resultList.get(i).queryResult().topDocs(); } int topN = Math.min(results.get(0).queryResult().size(), getTotalQueryHits(results)); return TopDocs.merge(topN, shardTopDocs).scoreDocs; } - private AtomicArray generateFetchResults(int nShards, ScoreDoc[] mergedSearchDocs, Suggest mergedSuggest) { - AtomicArray fetchResults = new AtomicArray<>(nShards); + private AtomicArray generateFetchResults(int nShards, ScoreDoc[] mergedSearchDocs, Suggest mergedSuggest) { + AtomicArray fetchResults = new AtomicArray<>(nShards); for (int shardIndex = 0; shardIndex < nShards; shardIndex++) { float maxScore = -1F; SearchShardTarget shardTarget = new SearchShardTarget("", new Index("", ""), shardIndex); @@ -257,27 +259,30 @@ public void testConsumer() { SearchRequest request = new SearchRequest(); request.source(new SearchSourceBuilder().aggregation(AggregationBuilders.avg("foo"))); request.setBatchedReduceSize(bufferSize); - InitialSearchPhase.SearchPhaseResults consumer = searchPhaseController.newSearchPhaseResults(request, 3); + InitialSearchPhase.SearchPhaseResults consumer = searchPhaseController.newSearchPhaseResults(request, 3); QuerySearchResult result = new QuerySearchResult(0, new SearchShardTarget("node", new Index("a", "b"), 0)); result.topDocs(new TopDocs(0, new ScoreDoc[0], 0.0F), new DocValueFormat[0]); InternalAggregations aggs = new InternalAggregations(Arrays.asList(new InternalMax("test", 1.0D, DocValueFormat.RAW, Collections.emptyList(), Collections.emptyMap()))); result.aggregations(aggs); - consumer.consumeResult(0, result); + result.setShardIndex(0); + consumer.consumeResult(result); result = new QuerySearchResult(1, new SearchShardTarget("node", new Index("a", "b"), 0)); result.topDocs(new TopDocs(0, new ScoreDoc[0], 0.0F), new DocValueFormat[0]); aggs = new InternalAggregations(Arrays.asList(new InternalMax("test", 3.0D, DocValueFormat.RAW, Collections.emptyList(), Collections.emptyMap()))); result.aggregations(aggs); - consumer.consumeResult(2, result); + result.setShardIndex(2); + consumer.consumeResult(result); result = new QuerySearchResult(1, new SearchShardTarget("node", new Index("a", "b"), 0)); result.topDocs(new TopDocs(0, new ScoreDoc[0], 0.0F), new DocValueFormat[0]); aggs = new InternalAggregations(Arrays.asList(new InternalMax("test", 2.0D, DocValueFormat.RAW, Collections.emptyList(), Collections.emptyMap()))); result.aggregations(aggs); - consumer.consumeResult(1, result); + result.setShardIndex(1); + consumer.consumeResult(result); int numTotalReducePhases = 1; if (bufferSize == 2) { assertThat(consumer, instanceOf(SearchPhaseController.QueryPhaseResultConsumer.class)); @@ -301,7 +306,7 @@ public void testConsumerConcurrently() throws InterruptedException { SearchRequest request = new SearchRequest(); request.source(new SearchSourceBuilder().aggregation(AggregationBuilders.avg("foo"))); request.setBatchedReduceSize(bufferSize); - InitialSearchPhase.SearchPhaseResults consumer = + InitialSearchPhase.SearchPhaseResults consumer = searchPhaseController.newSearchPhaseResults(request, expectedNumResults); AtomicInteger max = new AtomicInteger(); CountDownLatch latch = new CountDownLatch(expectedNumResults); @@ -315,7 +320,8 @@ public void testConsumerConcurrently() throws InterruptedException { InternalAggregations aggs = new InternalAggregations(Arrays.asList(new InternalMax("test", (double) number, DocValueFormat.RAW, Collections.emptyList(), Collections.emptyMap()))); result.aggregations(aggs); - consumer.consumeResult(id, result); + result.setShardIndex(id); + consumer.consumeResult(result); latch.countDown(); }); @@ -337,7 +343,7 @@ public void testNewSearchPhaseResults() { request.source(new SearchSourceBuilder().aggregation(AggregationBuilders.avg("foo"))); } request.setBatchedReduceSize(bufferSize); - InitialSearchPhase.SearchPhaseResults consumer + InitialSearchPhase.SearchPhaseResults consumer = searchPhaseController.newSearchPhaseResults(request, expectedNumResults); if (hasAggs && expectedNumResults > bufferSize) { assertThat("expectedNumResults: " + expectedNumResults + " bufferSize: " + bufferSize, @@ -354,7 +360,7 @@ public void testFillTopDocs() { for (int iters = 0; iters < maxIters; iters++) { TopDocs[] topDocs = new TopDocs[randomIntBetween(2, 100)]; int numShards = topDocs.length; - AtomicArray resultProviderAtomicArray = generateQueryResults(numShards, Collections.emptyList(), + AtomicArray resultProviderAtomicArray = generateQueryResults(numShards, Collections.emptyList(), 2, randomBoolean()); if (randomBoolean()) { int maxNull = randomIntBetween(1, topDocs.length - 1); diff --git a/core/src/test/java/org/elasticsearch/action/update/UpdateRequestTests.java b/core/src/test/java/org/elasticsearch/action/update/UpdateRequestTests.java index 4b11697c16d9f..b75733fcf13c6 100644 --- a/core/src/test/java/org/elasticsearch/action/update/UpdateRequestTests.java +++ b/core/src/test/java/org/elasticsearch/action/update/UpdateRequestTests.java @@ -21,10 +21,12 @@ import org.elasticsearch.Version; import org.elasticsearch.action.index.IndexRequest; +import org.elasticsearch.action.support.replication.ReplicationRequest; import org.elasticsearch.common.bytes.BytesArray; import org.elasticsearch.common.bytes.BytesReference; import org.elasticsearch.common.io.stream.Streamable; import org.elasticsearch.common.settings.Settings; +import org.elasticsearch.common.xcontent.XContentBuilder; import org.elasticsearch.common.xcontent.XContentFactory; import org.elasticsearch.common.xcontent.XContentHelper; import org.elasticsearch.common.xcontent.XContentParser; @@ -43,16 +45,20 @@ import org.elasticsearch.test.ESTestCase; import org.elasticsearch.test.RandomObjects; import org.elasticsearch.watcher.ResourceWatcherService; +import org.junit.Before; import java.io.IOException; import java.nio.file.Path; -import java.util.Collections; import java.util.HashMap; import java.util.Map; import java.util.function.Function; +import static java.util.Collections.emptyList; +import static java.util.Collections.emptyMap; +import static java.util.Collections.singletonList; import static org.elasticsearch.common.xcontent.XContentFactory.jsonBuilder; import static org.elasticsearch.common.xcontent.XContentHelper.toXContent; +import static org.elasticsearch.script.MockScriptEngine.mockInlineScript; import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertToXContentEquivalent; import static org.hamcrest.Matchers.arrayContaining; import static org.hamcrest.Matchers.equalTo; @@ -61,6 +67,66 @@ public class UpdateRequestTests extends ESTestCase { + private UpdateHelper updateHelper; + + @Before + public void setUp() throws Exception { + super.setUp(); + final Path genericConfigFolder = createTempDir(); + final Settings baseSettings = Settings.builder() + .put(Environment.PATH_HOME_SETTING.getKey(), createTempDir().toString()) + .put(Environment.PATH_CONF_SETTING.getKey(), genericConfigFolder) + .build(); + final Environment environment = new Environment(baseSettings); + final Map, Object>> scripts = new HashMap<>(); + scripts.put( + "ctx._source.update_timestamp = ctx._now", + vars -> { + @SuppressWarnings("unchecked") + final Map ctx = (Map) vars.get("ctx"); + @SuppressWarnings("unchecked") + final Map source = (Map) ctx.get("_source"); + source.put("update_timestamp", ctx.get("_now")); + return null; + }); + scripts.put( + "ctx._timestamp = ctx._now", + vars -> { + @SuppressWarnings("unchecked") + final Map ctx = (Map) vars.get("ctx"); + ctx.put("_timestamp", ctx.get("_now")); + return null; + }); + scripts.put( + "ctx.op = delete", + vars -> { + @SuppressWarnings("unchecked") + final Map ctx = (Map) vars.get("ctx"); + ctx.put("op", "delete"); + return null; + }); + scripts.put("return", vars -> null); + final ScriptContextRegistry scriptContextRegistry = new ScriptContextRegistry(emptyList()); + final MockScriptEngine engine = new MockScriptEngine("mock", scripts); + final ScriptEngineRegistry scriptEngineRegistry = + new ScriptEngineRegistry(singletonList(engine)); + + final ScriptSettings scriptSettings = + new ScriptSettings(scriptEngineRegistry, scriptContextRegistry); + final ResourceWatcherService watcherService = + new ResourceWatcherService(baseSettings, null); + ScriptService scriptService = new ScriptService( + baseSettings, + environment, + watcherService, + scriptEngineRegistry, + scriptContextRegistry, + scriptSettings); + final Settings settings = settings(Version.CURRENT).build(); + + updateHelper = new UpdateHelper(settings, scriptService); + } + public void testFromXContent() throws Exception { UpdateRequest request = new UpdateRequest("test", "type", "1"); // simple script @@ -74,7 +140,7 @@ public void testFromXContent() throws Exception { assertThat(script.getType(), equalTo(ScriptType.INLINE)); assertThat(script.getLang(), equalTo(Script.DEFAULT_SCRIPT_LANG)); Map params = script.getParams(); - assertThat(params, equalTo(Collections.emptyMap())); + assertThat(params, equalTo(emptyMap())); // simple verbose script request.fromXContent(createParser(XContentFactory.jsonBuilder().startObject() @@ -86,7 +152,7 @@ public void testFromXContent() throws Exception { assertThat(script.getType(), equalTo(ScriptType.INLINE)); assertThat(script.getLang(), equalTo(Script.DEFAULT_SCRIPT_LANG)); params = script.getParams(); - assertThat(params, equalTo(Collections.emptyMap())); + assertThat(params, equalTo(emptyMap())); // script with params request = new UpdateRequest("test", "type", "1"); @@ -258,39 +324,6 @@ public void testFetchSourceParsing() throws Exception { } public void testNowInScript() throws IOException { - Path genericConfigFolder = createTempDir(); - Settings baseSettings = Settings.builder() - .put(Environment.PATH_HOME_SETTING.getKey(), createTempDir().toString()) - .put(Environment.PATH_CONF_SETTING.getKey(), genericConfigFolder) - .build(); - Environment environment = new Environment(baseSettings); - Map, Object>> scripts = new HashMap<>(); - scripts.put("ctx._source.update_timestamp = ctx._now", - (vars) -> { - Map vars2 = vars; - @SuppressWarnings("unchecked") - Map ctx = (Map) vars2.get("ctx"); - @SuppressWarnings("unchecked") - Map source = (Map) ctx.get("_source"); - source.put("update_timestamp", ctx.get("_now")); - return null;}); - scripts.put("ctx._timestamp = ctx._now", - (vars) -> { - @SuppressWarnings("unchecked") - Map ctx = (Map) vars.get("ctx"); - ctx.put("_timestamp", ctx.get("_now")); - return null;}); - ScriptContextRegistry scriptContextRegistry = new ScriptContextRegistry(Collections.emptyList()); - ScriptEngineRegistry scriptEngineRegistry = new ScriptEngineRegistry(Collections.singletonList(new MockScriptEngine("mock", - scripts))); - - ScriptSettings scriptSettings = new ScriptSettings(scriptEngineRegistry, scriptContextRegistry); - ScriptService scriptService = new ScriptService(baseSettings, environment, - new ResourceWatcherService(baseSettings, null), scriptEngineRegistry, scriptContextRegistry, scriptSettings); - Settings settings = settings(Version.CURRENT).build(); - - UpdateHelper updateHelper = new UpdateHelper(settings, scriptService); - // We just upsert one document with now() using a script IndexRequest indexRequest = new IndexRequest("test", "type1", "2") .source(jsonBuilder().startObject().field("foo", "bar").endObject()); @@ -298,7 +331,7 @@ public void testNowInScript() throws IOException { { UpdateRequest updateRequest = new UpdateRequest("test", "type1", "2") .upsert(indexRequest) - .script(new Script(ScriptType.INLINE, "mock", "ctx._source.update_timestamp = ctx._now", Collections.emptyMap())) + .script(mockInlineScript("ctx._source.update_timestamp = ctx._now")) .scriptedUpsert(true); long nowInMillis = randomNonNegativeLong(); // We simulate that the document is not existing yet @@ -307,12 +340,12 @@ public void testNowInScript() throws IOException { Streamable action = result.action(); assertThat(action, instanceOf(IndexRequest.class)); IndexRequest indexAction = (IndexRequest) action; - assertEquals(indexAction.sourceAsMap().get("update_timestamp"), nowInMillis); + assertEquals(nowInMillis, indexAction.sourceAsMap().get("update_timestamp")); } { UpdateRequest updateRequest = new UpdateRequest("test", "type1", "2") .upsert(indexRequest) - .script(new Script(ScriptType.INLINE, "mock", "ctx._timestamp = ctx._now", Collections.emptyMap())) + .script(mockInlineScript("ctx._timestamp = ctx._now")) .scriptedUpsert(true); // We simulate that the document is not existing yet GetResult getResult = new GetResult("test", "type1", "2", 0, true, new BytesArray("{}"), null); @@ -322,6 +355,57 @@ public void testNowInScript() throws IOException { } } + public void testIndexTimeout() { + final GetResult getResult = + new GetResult("test", "type", "1", 0, true, new BytesArray("{\"f\":\"v\"}"), null); + final UpdateRequest updateRequest = + new UpdateRequest("test", "type", "1") + .script(mockInlineScript("return")) + .timeout(randomTimeValue()); + runTimeoutTest(getResult, updateRequest); + } + + public void testDeleteTimeout() { + final GetResult getResult = + new GetResult("test", "type", "1", 0, true, new BytesArray("{\"f\":\"v\"}"), null); + final UpdateRequest updateRequest = + new UpdateRequest("test", "type", "1") + .script(mockInlineScript("ctx.op = delete")) + .timeout(randomTimeValue()); + runTimeoutTest(getResult, updateRequest); + } + + public void testUpsertTimeout() throws IOException { + final boolean exists = randomBoolean(); + final BytesReference source = exists ? new BytesArray("{\"f\":\"v\"}") : null; + final GetResult getResult = new GetResult("test", "type", "1", 0, exists, source, null); + final XContentBuilder sourceBuilder = jsonBuilder(); + sourceBuilder.startObject(); + { + sourceBuilder.field("f", "v"); + } + sourceBuilder.endObject(); + final IndexRequest upsert = new IndexRequest("test", "type", "1").source(sourceBuilder); + final UpdateRequest updateRequest = + new UpdateRequest("test", "type", "1") + .upsert(upsert) + .script(mockInlineScript("return")) + .timeout(randomTimeValue()); + runTimeoutTest(getResult, updateRequest); + } + + private void runTimeoutTest(final GetResult getResult, final UpdateRequest updateRequest) { + final UpdateHelper.Result result = updateHelper.prepare( + new ShardId("test", "", 0), + updateRequest, + getResult, + ESTestCase::randomNonNegativeLong); + final Streamable action = result.action(); + assertThat(action, instanceOf(ReplicationRequest.class)); + final ReplicationRequest request = (ReplicationRequest) action; + assertThat(request.timeout(), equalTo(updateRequest.timeout())); + } + public void testToAndFromXContent() throws IOException { UpdateRequest updateRequest = new UpdateRequest(); updateRequest.detectNoop(randomBoolean()); diff --git a/core/src/test/java/org/elasticsearch/bootstrap/BootstrapChecksTests.java b/core/src/test/java/org/elasticsearch/bootstrap/BootstrapChecksTests.java index bd553cff6e190..c3e08b81d6c3e 100644 --- a/core/src/test/java/org/elasticsearch/bootstrap/BootstrapChecksTests.java +++ b/core/src/test/java/org/elasticsearch/bootstrap/BootstrapChecksTests.java @@ -560,12 +560,48 @@ private void runMightForkTest( consumer.accept(e); } + public void testEarlyAccessCheck() throws NodeValidationException { + final AtomicReference javaVersion + = new AtomicReference<>(randomFrom("1.8.0_152-ea", "9-ea")); + final BootstrapChecks.EarlyAccessCheck eaCheck = new BootstrapChecks.EarlyAccessCheck() { + + @Override + String jvmVendor() { + return "Oracle Corporation"; + } + + @Override + String javaVersion() { + return javaVersion.get(); + } + + }; + + final List checks = Collections.singletonList(eaCheck); + final NodeValidationException e = expectThrows( + NodeValidationException.class, + () -> { + BootstrapChecks.check(true, checks, "testEarlyAccessCheck"); + }); + assertThat( + e.getMessage(), + containsString( + "Java version [" + + javaVersion.get() + + "] is an early-access build, only use release builds")); + + // if not on an early-access build, nothing should happen + javaVersion.set(randomFrom("1.8.0_152", "9")); + BootstrapChecks.check(true, checks, "testEarlyAccessCheck"); + + } + public void testG1GCCheck() throws NodeValidationException { final AtomicBoolean isG1GCEnabled = new AtomicBoolean(true); final AtomicBoolean isJava8 = new AtomicBoolean(true); final AtomicReference jvmVersion = new AtomicReference<>(String.format(Locale.ROOT, "25.%d-b%d", randomIntBetween(0, 39), randomIntBetween(1, 128))); - final BootstrapChecks.G1GCCheck oracleCheck = new BootstrapChecks.G1GCCheck() { + final BootstrapChecks.G1GCCheck g1GCCheck = new BootstrapChecks.G1GCCheck() { @Override String jvmVendor() { @@ -592,7 +628,7 @@ boolean isJava8() { final NodeValidationException e = expectThrows( NodeValidationException.class, - () -> BootstrapChecks.check(true, Collections.singletonList(oracleCheck), "testG1GCCheck")); + () -> BootstrapChecks.check(true, Collections.singletonList(g1GCCheck), "testG1GCCheck")); assertThat( e.getMessage(), containsString( @@ -600,12 +636,12 @@ boolean isJava8() { // if G1GC is disabled, nothing should happen isG1GCEnabled.set(false); - BootstrapChecks.check(true, Collections.singletonList(oracleCheck), "testG1GCCheck"); + BootstrapChecks.check(true, Collections.singletonList(g1GCCheck), "testG1GCCheck"); // if on or after update 40, nothing should happen independent of whether or not G1GC is enabled isG1GCEnabled.set(randomBoolean()); jvmVersion.set(String.format(Locale.ROOT, "25.%d-b%d", randomIntBetween(40, 112), randomIntBetween(1, 128))); - BootstrapChecks.check(true, Collections.singletonList(oracleCheck), "testG1GCCheck"); + BootstrapChecks.check(true, Collections.singletonList(g1GCCheck), "testG1GCCheck"); final BootstrapChecks.G1GCCheck nonOracleCheck = new BootstrapChecks.G1GCCheck() { diff --git a/core/src/test/java/org/elasticsearch/bootstrap/JarHellTests.java b/core/src/test/java/org/elasticsearch/bootstrap/JarHellTests.java index d38d346d6c14f..7003ef3d81efe 100644 --- a/core/src/test/java/org/elasticsearch/bootstrap/JarHellTests.java +++ b/core/src/test/java/org/elasticsearch/bootstrap/JarHellTests.java @@ -30,7 +30,9 @@ import java.nio.file.Path; import java.nio.file.StandardOpenOption; import java.util.ArrayList; +import java.util.Collections; import java.util.List; +import java.util.Set; import java.util.jar.Attributes; import java.util.jar.JarOutputStream; import java.util.jar.Manifest; @@ -62,7 +64,8 @@ URL makeFile(Path dir, String name) throws IOException { public void testDifferentJars() throws Exception { Path dir = createTempDir(); - URL[] jars = {makeJar(dir, "foo.jar", null, "DuplicateClass.class"), makeJar(dir, "bar.jar", null, "DuplicateClass.class")}; + Set jars = asSet(makeJar(dir, "foo.jar", null, "DuplicateClass.class"), + makeJar(dir, "bar.jar", null, "DuplicateClass.class")); try { JarHell.checkJarHell(jars); fail("did not get expected exception"); @@ -74,17 +77,11 @@ public void testDifferentJars() throws Exception { } } - public void testDuplicateClasspathLeniency() throws Exception { - Path dir = createTempDir(); - URL jar = makeJar(dir, "foo.jar", null, "Foo.class"); - URL[] jars = {jar, jar}; - JarHell.checkJarHell(jars); - } - public void testDirsOnClasspath() throws Exception { Path dir1 = createTempDir(); Path dir2 = createTempDir(); - URL[] dirs = {makeFile(dir1, "DuplicateClass.class"), makeFile(dir2, "DuplicateClass.class")}; + Set dirs = asSet(makeFile(dir1, "DuplicateClass.class"), + makeFile(dir2, "DuplicateClass.class")); try { JarHell.checkJarHell(dirs); fail("did not get expected exception"); @@ -99,7 +96,8 @@ public void testDirsOnClasspath() throws Exception { public void testDirAndJar() throws Exception { Path dir1 = createTempDir(); Path dir2 = createTempDir(); - URL[] dirs = {makeJar(dir1, "foo.jar", null, "DuplicateClass.class"), makeFile(dir2, "DuplicateClass.class")}; + Set dirs = asSet(makeJar(dir1, "foo.jar", null, "DuplicateClass.class"), + makeFile(dir2, "DuplicateClass.class")); try { JarHell.checkJarHell(dirs); fail("did not get expected exception"); @@ -113,8 +111,8 @@ public void testDirAndJar() throws Exception { public void testWithinSingleJar() throws Exception { // the java api for zip file does not allow creating duplicate entries (good!) so - // this bogus jar had to be constructed with ant - URL[] jars = {JarHellTests.class.getResource("duplicate-classes.jar")}; + // this bogus jar had to be with https://github.com/jasontedor/duplicate-classes + Set jars = Collections.singleton(JarHellTests.class.getResource("duplicate-classes.jar")); try { JarHell.checkJarHell(jars); fail("did not get expected exception"); @@ -127,7 +125,7 @@ public void testWithinSingleJar() throws Exception { } public void testXmlBeansLeniency() throws Exception { - URL[] jars = {JarHellTests.class.getResource("duplicate-xmlbeans-classes.jar")}; + Set jars = Collections.singleton(JarHellTests.class.getResource("duplicate-xmlbeans-classes.jar")); JarHell.checkJarHell(jars); } @@ -145,7 +143,7 @@ public void testRequiredJDKVersionTooOld() throws Exception { Attributes attributes = manifest.getMainAttributes(); attributes.put(Attributes.Name.MANIFEST_VERSION, "1.0.0"); attributes.put(new Attributes.Name("X-Compile-Target-JDK"), targetVersion.toString()); - URL[] jars = {makeJar(dir, "foo.jar", manifest, "Foo.class")}; + Set jars = Collections.singleton(makeJar(dir, "foo.jar", manifest, "Foo.class")); try { JarHell.checkJarHell(jars); fail("did not get expected exception"); @@ -161,7 +159,7 @@ public void testBadJDKVersionInJar() throws Exception { Attributes attributes = manifest.getMainAttributes(); attributes.put(Attributes.Name.MANIFEST_VERSION, "1.0.0"); attributes.put(new Attributes.Name("X-Compile-Target-JDK"), "bogus"); - URL[] jars = {makeJar(dir, "foo.jar", manifest, "Foo.class")}; + Set jars = Collections.singleton(makeJar(dir, "foo.jar", manifest, "Foo.class")); try { JarHell.checkJarHell(jars); fail("did not get expected exception"); @@ -176,8 +174,7 @@ public void testRequiredJDKVersionIsOK() throws Exception { Attributes attributes = manifest.getMainAttributes(); attributes.put(Attributes.Name.MANIFEST_VERSION, "1.0.0"); attributes.put(new Attributes.Name("X-Compile-Target-JDK"), "1.7"); - URL[] jars = {makeJar(dir, "foo.jar", manifest, "Foo.class")}; - + Set jars = Collections.singleton(makeJar(dir, "foo.jar", manifest, "Foo.class")); JarHell.checkJarHell(jars); } @@ -188,7 +185,7 @@ public void testGoodESVersionInJar() throws Exception { Attributes attributes = manifest.getMainAttributes(); attributes.put(Attributes.Name.MANIFEST_VERSION, "1.0.0"); attributes.put(new Attributes.Name("X-Compile-Elasticsearch-Version"), Version.CURRENT.toString()); - URL[] jars = {makeJar(dir, "foo.jar", manifest, "Foo.class")}; + Set jars = Collections.singleton(makeJar(dir, "foo.jar", manifest, "Foo.class")); JarHell.checkJarHell(jars); } @@ -199,7 +196,7 @@ public void testBadESVersionInJar() throws Exception { Attributes attributes = manifest.getMainAttributes(); attributes.put(Attributes.Name.MANIFEST_VERSION, "1.0.0"); attributes.put(new Attributes.Name("X-Compile-Elasticsearch-Version"), "1.0-bogus"); - URL[] jars = {makeJar(dir, "foo.jar", manifest, "Foo.class")}; + Set jars = Collections.singleton(makeJar(dir, "foo.jar", manifest, "Foo.class")); try { JarHell.checkJarHell(jars); fail("did not get expected exception"); @@ -242,8 +239,8 @@ public void testParseClassPathUnix() throws Exception { Path element1 = createTempDir(); Path element2 = createTempDir(); - URL expected[] = { element1.toUri().toURL(), element2.toUri().toURL() }; - assertArrayEquals(expected, JarHell.parseClassPath(element1.toString() + ":" + element2.toString())); + Set expected = asSet(element1.toUri().toURL(), element2.toUri().toURL()); + assertEquals(expected, JarHell.parseClassPath(element1.toString() + ":" + element2.toString())); } /** @@ -271,8 +268,8 @@ public void testParseClassPathWindows() throws Exception { Path element1 = createTempDir(); Path element2 = createTempDir(); - URL expected[] = { element1.toUri().toURL(), element2.toUri().toURL() }; - assertArrayEquals(expected, JarHell.parseClassPath(element1.toString() + ";" + element2.toString())); + Set expected = asSet(element1.toUri().toURL(), element2.toUri().toURL()); + assertEquals(expected, JarHell.parseClassPath(element1.toString() + ";" + element2.toString())); } /** @@ -298,13 +295,13 @@ public void testCrazyEclipseClassPathWindows() throws Exception { assumeTrue("test is designed for windows-like systems only", ";".equals(System.getProperty("path.separator"))); assumeTrue("test is designed for windows-like systems only", "\\".equals(System.getProperty("file.separator"))); - URL expected[] = { + Set expected = asSet( PathUtils.get("c:\\element1").toUri().toURL(), PathUtils.get("c:\\element2").toUri().toURL(), PathUtils.get("c:\\element3").toUri().toURL(), - PathUtils.get("c:\\element 4").toUri().toURL(), - }; - URL actual[] = JarHell.parseClassPath("c:\\element1;c:\\element2;/c:/element3;/c:/element 4"); - assertArrayEquals(expected, actual); + PathUtils.get("c:\\element 4").toUri().toURL() + ); + Set actual = JarHell.parseClassPath("c:\\element1;c:\\element2;/c:/element3;/c:/element 4"); + assertEquals(expected, actual); } } diff --git a/core/src/test/java/org/elasticsearch/cluster/metadata/MetaDataCreateIndexServiceTests.java b/core/src/test/java/org/elasticsearch/cluster/metadata/MetaDataCreateIndexServiceTests.java index 50fe3c88b65bb..5486868040fae 100644 --- a/core/src/test/java/org/elasticsearch/cluster/metadata/MetaDataCreateIndexServiceTests.java +++ b/core/src/test/java/org/elasticsearch/cluster/metadata/MetaDataCreateIndexServiceTests.java @@ -191,8 +191,6 @@ public void testShrinkIndexSettings() { assertEquals("1", builder.build().get("index.allocation.max_retries")); assertEquals(version, builder.build().getAsVersion("index.version.created", null)); assertEquals(upgraded, builder.build().getAsVersion("index.version.upgraded", null)); - assertEquals(minCompat.luceneVersion.toString(), builder.build().get("index.version.minimum_compatible", null)); - } private DiscoveryNode newNode(String nodeId) { diff --git a/core/src/test/java/org/elasticsearch/cluster/metadata/MetaDataIndexUpgradeServiceTests.java b/core/src/test/java/org/elasticsearch/cluster/metadata/MetaDataIndexUpgradeServiceTests.java index 88e2835b5abf4..24e969d06d62d 100644 --- a/core/src/test/java/org/elasticsearch/cluster/metadata/MetaDataIndexUpgradeServiceTests.java +++ b/core/src/test/java/org/elasticsearch/cluster/metadata/MetaDataIndexUpgradeServiceTests.java @@ -86,8 +86,7 @@ public void testFailUpgrade() { final IndexMetaData metaData = newIndexMeta("foo", Settings.builder() .put(IndexMetaData.SETTING_VERSION_UPGRADED, Version.V_5_0_0_beta1) .put(IndexMetaData.SETTING_VERSION_CREATED, Version.fromString("2.4.0")) - .put(IndexMetaData.SETTING_VERSION_MINIMUM_COMPATIBLE, - Version.CURRENT.luceneVersion.toString()).build()); + .build()); String message = expectThrows(IllegalStateException.class, () -> service.upgradeIndexMetaData(metaData, Version.CURRENT.minimumIndexCompatibilityVersion())).getMessage(); assertEquals(message, "The index [[foo/BOOM]] was created with version [2.4.0] but the minimum compatible version is [5.0.0]." + @@ -96,8 +95,7 @@ public void testFailUpgrade() { IndexMetaData goodMeta = newIndexMeta("foo", Settings.builder() .put(IndexMetaData.SETTING_VERSION_UPGRADED, Version.V_5_0_0_beta1) .put(IndexMetaData.SETTING_VERSION_CREATED, Version.fromString("5.1.0")) - .put(IndexMetaData.SETTING_VERSION_MINIMUM_COMPATIBLE, - Version.CURRENT.luceneVersion.toString()).build()); + .build()); service.upgradeIndexMetaData(goodMeta, Version.V_5_0_0.minimumIndexCompatibilityVersion()); } @@ -113,5 +111,4 @@ public static IndexMetaData newIndexMeta(String name, Settings indexSettings) { IndexMetaData metaData = IndexMetaData.builder(name).settings(build).build(); return metaData; } - } diff --git a/core/src/test/java/org/elasticsearch/common/util/concurrent/ThreadContextTests.java b/core/src/test/java/org/elasticsearch/common/util/concurrent/ThreadContextTests.java index 698e216ea1656..bee56c229c02a 100644 --- a/core/src/test/java/org/elasticsearch/common/util/concurrent/ThreadContextTests.java +++ b/core/src/test/java/org/elasticsearch/common/util/concurrent/ThreadContextTests.java @@ -430,11 +430,16 @@ public void testPreservesThreadsOriginalContextOnRunException() throws IOExcepti // create a abstract runnable, add headers and transient objects and verify in the methods try (ThreadContext.StoredContext ignored = threadContext.stashContext()) { threadContext.putHeader("foo", "bar"); + boolean systemContext = randomBoolean(); + if (systemContext) { + threadContext.markAsSystemContext(); + } threadContext.putTransient("foo", "bar_transient"); withContext = threadContext.preserveContext(new AbstractRunnable() { @Override public void onAfter() { + assertEquals(systemContext, threadContext.isSystemContext()); assertEquals("bar", threadContext.getHeader("foo")); assertEquals("bar_transient", threadContext.getTransient("foo")); assertNotNull(threadContext.getTransient("failure")); @@ -445,6 +450,7 @@ public void onAfter() { @Override public void onFailure(Exception e) { + assertEquals(systemContext, threadContext.isSystemContext()); assertEquals("exception from doRun", e.getMessage()); assertEquals("bar", threadContext.getHeader("foo")); assertEquals("bar_transient", threadContext.getTransient("foo")); @@ -454,6 +460,7 @@ public void onFailure(Exception e) { @Override protected void doRun() throws Exception { + assertEquals(systemContext, threadContext.isSystemContext()); assertEquals("bar", threadContext.getHeader("foo")); assertEquals("bar_transient", threadContext.getTransient("foo")); assertFalse(threadContext.isDefaultContext()); @@ -594,6 +601,18 @@ protected void doRun() throws Exception { } } + public void testMarkAsSystemContext() throws IOException { + try (ThreadContext threadContext = new ThreadContext(Settings.EMPTY)) { + assertFalse(threadContext.isSystemContext()); + try(ThreadContext.StoredContext context = threadContext.stashContext()){ + assertFalse(threadContext.isSystemContext()); + threadContext.markAsSystemContext(); + assertTrue(threadContext.isSystemContext()); + } + assertFalse(threadContext.isSystemContext()); + } + } + /** * Sometimes wraps a Runnable in an AbstractRunnable. */ diff --git a/core/src/test/java/org/elasticsearch/deps/joda/SimpleJodaTests.java b/core/src/test/java/org/elasticsearch/deps/joda/SimpleJodaTests.java index 9ed433918d190..b99dd8e835384 100644 --- a/core/src/test/java/org/elasticsearch/deps/joda/SimpleJodaTests.java +++ b/core/src/test/java/org/elasticsearch/deps/joda/SimpleJodaTests.java @@ -314,6 +314,36 @@ public void testForInvalidDatesInEpochMillis() { } } + public void testForInvalidTimeZoneWithEpochSeconds() { + DateTimeFormatter dateTimeFormatter = new DateTimeFormatterBuilder() + .append(new Joda.EpochTimeParser(false)) + .toFormatter() + .withZone(DateTimeZone.forOffsetHours(1)); + FormatDateTimeFormatter formatter = + new FormatDateTimeFormatter("epoch_seconds", dateTimeFormatter, Locale.ROOT); + try { + formatter.parser().parseDateTime("1433144433655"); + fail("Expected IllegalArgumentException"); + } catch (IllegalArgumentException e) { + assertThat(e.getMessage(), containsString("time_zone must be UTC")); + } + } + + public void testForInvalidTimeZoneWithEpochMillis() { + DateTimeFormatter dateTimeFormatter = new DateTimeFormatterBuilder() + .append(new Joda.EpochTimeParser(true)) + .toFormatter() + .withZone(DateTimeZone.forOffsetHours(1)); + FormatDateTimeFormatter formatter = + new FormatDateTimeFormatter("epoch_millis", dateTimeFormatter, Locale.ROOT); + try { + formatter.parser().parseDateTime("1433144433"); + fail("Expected IllegalArgumentException"); + } catch (IllegalArgumentException e) { + assertThat(e.getMessage(), containsString("time_zone must be UTC")); + } + } + public void testThatEpochParserIsPrinter() { FormatDateTimeFormatter formatter = Joda.forPattern("epoch_millis"); assertThat(formatter.parser().isPrinter(), is(true)); diff --git a/core/src/test/java/org/elasticsearch/discovery/zen/MembershipActionTests.java b/core/src/test/java/org/elasticsearch/discovery/zen/MembershipActionTests.java new file mode 100644 index 0000000000000..b8d9f175e64eb --- /dev/null +++ b/core/src/test/java/org/elasticsearch/discovery/zen/MembershipActionTests.java @@ -0,0 +1,80 @@ +/* + * Licensed to Elasticsearch under one or more contributor + * license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright + * ownership. Elasticsearch licenses this file to you under + * the Apache License, Version 2.0 (the "License"); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ +package org.elasticsearch.discovery.zen; + +import org.elasticsearch.Version; +import org.elasticsearch.cluster.metadata.IndexMetaData; +import org.elasticsearch.cluster.metadata.MetaData; +import org.elasticsearch.common.settings.Settings; +import org.elasticsearch.test.ESTestCase; +import org.elasticsearch.test.VersionUtils; + +public class MembershipActionTests extends ESTestCase { + + public void testPreventJoinClusterWithNewerIndices() { + Settings.builder().build(); + MetaData.Builder metaBuilder = MetaData.builder(); + IndexMetaData indexMetaData = IndexMetaData.builder("test") + .settings(settings(Version.CURRENT)) + .numberOfShards(1) + .numberOfReplicas(1).build(); + metaBuilder.put(indexMetaData, false); + MetaData metaData = metaBuilder.build(); + MembershipAction.ensureIndexCompatibility(Version.CURRENT, metaData); + + expectThrows(IllegalStateException.class, () -> + MembershipAction.ensureIndexCompatibility(VersionUtils.getPreviousVersion(Version.CURRENT), + metaData)); + } + + public void testPreventJoinClusterWithUnsupportedIndices() { + Settings.builder().build(); + MetaData.Builder metaBuilder = MetaData.builder(); + IndexMetaData indexMetaData = IndexMetaData.builder("test") + .settings(settings(VersionUtils.getPreviousVersion(Version.CURRENT + .minimumIndexCompatibilityVersion()))) + .numberOfShards(1) + .numberOfReplicas(1).build(); + metaBuilder.put(indexMetaData, false); + MetaData metaData = metaBuilder.build(); + expectThrows(IllegalStateException.class, () -> + MembershipAction.ensureIndexCompatibility(Version.CURRENT, + metaData)); + } + + public void testSuccess() { + Settings.builder().build(); + MetaData.Builder metaBuilder = MetaData.builder(); + IndexMetaData indexMetaData = IndexMetaData.builder("test") + .settings(settings(VersionUtils.randomVersionBetween(random(), + Version.CURRENT.minimumIndexCompatibilityVersion(), Version.CURRENT))) + .numberOfShards(1) + .numberOfReplicas(1).build(); + metaBuilder.put(indexMetaData, false); + indexMetaData = IndexMetaData.builder("test1") + .settings(settings(VersionUtils.randomVersionBetween(random(), + Version.CURRENT.minimumIndexCompatibilityVersion(), Version.CURRENT))) + .numberOfShards(1) + .numberOfReplicas(1).build(); + metaBuilder.put(indexMetaData, false); + MetaData metaData = metaBuilder.build(); + MembershipAction.ensureIndexCompatibility(Version.CURRENT, + metaData); + } +} diff --git a/core/src/test/java/org/elasticsearch/index/IndexSettingsTests.java b/core/src/test/java/org/elasticsearch/index/IndexSettingsTests.java index a32d076272ba6..bc3ee4b5f06f1 100644 --- a/core/src/test/java/org/elasticsearch/index/IndexSettingsTests.java +++ b/core/src/test/java/org/elasticsearch/index/IndexSettingsTests.java @@ -370,6 +370,27 @@ public void testTranslogFlushSizeThreshold() { assertEquals(actualNewTranslogFlushThresholdSize, settings.getFlushThresholdSize()); } + public void testTranslogGenerationSizeThreshold() { + final ByteSizeValue size = new ByteSizeValue(Math.abs(randomInt())); + final String key = IndexSettings.INDEX_TRANSLOG_GENERATION_THRESHOLD_SIZE_SETTING.getKey(); + final ByteSizeValue actualValue = + ByteSizeValue.parseBytesSizeValue(size.toString(), key); + final IndexMetaData metaData = + newIndexMeta( + "index", + Settings.builder() + .put(IndexMetaData.SETTING_VERSION_CREATED, Version.CURRENT) + .put(key, size.toString()) + .build()); + final IndexSettings settings = new IndexSettings(metaData, Settings.EMPTY); + assertEquals(actualValue, settings.getGenerationThresholdSize()); + final ByteSizeValue newSize = new ByteSizeValue(Math.abs(randomInt())); + final ByteSizeValue actual = ByteSizeValue.parseBytesSizeValue(newSize.toString(), key); + settings.updateIndexMetaData( + newIndexMeta("index", Settings.builder().put(key, newSize.toString()).build())); + assertEquals(actual, settings.getGenerationThresholdSize()); + } + public void testArchiveBrokenIndexSettings() { Settings settings = IndexScopedSettings.DEFAULT_SCOPED_SETTINGS.archiveUnknownOrInvalidSettings( diff --git a/core/src/test/java/org/elasticsearch/index/analysis/KeywordMarkerFilterFactoryTests.java b/core/src/test/java/org/elasticsearch/index/analysis/KeywordMarkerFilterFactoryTests.java new file mode 100644 index 0000000000000..3298537af97aa --- /dev/null +++ b/core/src/test/java/org/elasticsearch/index/analysis/KeywordMarkerFilterFactoryTests.java @@ -0,0 +1,103 @@ +/* + * Licensed to Elasticsearch under one or more contributor + * license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright + * ownership. Elasticsearch licenses this file to you under + * the Apache License, Version 2.0 (the "License"); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.elasticsearch.index.analysis; + +import org.apache.lucene.analysis.TokenStream; +import org.apache.lucene.analysis.core.WhitespaceTokenizer; +import org.apache.lucene.analysis.miscellaneous.PatternKeywordMarkerFilter; +import org.apache.lucene.analysis.miscellaneous.SetKeywordMarkerFilter; +import org.elasticsearch.common.settings.Settings; +import org.elasticsearch.env.Environment; +import org.elasticsearch.test.ESTestCase.TestAnalysis; +import org.elasticsearch.test.ESTokenStreamTestCase; + +import java.io.IOException; + +import static org.hamcrest.Matchers.instanceOf; + +/** + * Tests for the {@link KeywordMarkerTokenFilterFactory} class. + */ +public class KeywordMarkerFilterFactoryTests extends ESTokenStreamTestCase { + + /** + * Tests using a keyword set for the keyword marker filter. + */ + public void testKeywordSet() throws IOException { + Settings settings = Settings.builder() + .put("index.analysis.filter.my_keyword.type", "keyword_marker") + .put("index.analysis.filter.my_keyword.keywords", "running, sleeping") + .put("index.analysis.analyzer.my_keyword.type", "custom") + .put("index.analysis.analyzer.my_keyword.tokenizer", "standard") + .put("index.analysis.analyzer.my_keyword.filter", "my_keyword, porter_stem") + .put(Environment.PATH_HOME_SETTING.getKey(), createTempDir().toString()) + .build(); + TestAnalysis analysis = AnalysisTestsHelper.createTestAnalysisFromSettings(settings); + TokenFilterFactory tokenFilter = analysis.tokenFilter.get("my_keyword"); + assertThat(tokenFilter, instanceOf(KeywordMarkerTokenFilterFactory.class)); + TokenStream filter = tokenFilter.create(new WhitespaceTokenizer()); + assertThat(filter, instanceOf(SetKeywordMarkerFilter.class)); + NamedAnalyzer analyzer = analysis.indexAnalyzers.get("my_keyword"); + // jogging is not part of the keywords set, so verify that its the only stemmed word + assertAnalyzesTo(analyzer, "running jogging sleeping", + new String[] { "running", "jog", "sleeping" }); + } + + /** + * Tests using a regular expression pattern for the keyword marker filter. + */ + public void testKeywordPattern() throws IOException { + Settings settings = Settings.builder() + .put("index.analysis.filter.my_keyword.type", "keyword_marker") + .put("index.analysis.filter.my_keyword.keywords_pattern", "run[a-z]ing") + .put("index.analysis.analyzer.my_keyword.type", "custom") + .put("index.analysis.analyzer.my_keyword.tokenizer", "standard") + .put("index.analysis.analyzer.my_keyword.filter", "my_keyword, porter_stem") + .put(Environment.PATH_HOME_SETTING.getKey(), createTempDir().toString()) + .build(); + TestAnalysis analysis = AnalysisTestsHelper.createTestAnalysisFromSettings(settings); + TokenFilterFactory tokenFilter = analysis.tokenFilter.get("my_keyword"); + assertThat(tokenFilter, instanceOf(KeywordMarkerTokenFilterFactory.class)); + TokenStream filter = tokenFilter.create(new WhitespaceTokenizer()); + assertThat(filter, instanceOf(PatternKeywordMarkerFilter.class)); + NamedAnalyzer analyzer = analysis.indexAnalyzers.get("my_keyword"); + // running should match the pattern, so it should not be stemmed but sleeping should + assertAnalyzesTo(analyzer, "running sleeping", new String[] { "running", "sleep" }); + } + + /** + * Verifies that both keywords and patterns cannot be specified together. + */ + public void testCannotSpecifyBothKeywordsAndPattern() throws IOException { + Settings settings = Settings.builder() + .put("index.analysis.filter.my_keyword.type", "keyword_marker") + .put("index.analysis.filter.my_keyword.keywords", "running") + .put("index.analysis.filter.my_keyword.keywords_pattern", "run[a-z]ing") + .put("index.analysis.analyzer.my_keyword.type", "custom") + .put("index.analysis.analyzer.my_keyword.tokenizer", "standard") + .put("index.analysis.analyzer.my_keyword.filter", "my_keyword, porter_stem") + .put(Environment.PATH_HOME_SETTING.getKey(), createTempDir().toString()) + .build(); + IllegalArgumentException e = expectThrows(IllegalArgumentException.class, + () -> AnalysisTestsHelper.createTestAnalysisFromSettings(settings)); + assertEquals("cannot specify both `keywords_pattern` and `keywords` or `keywords_path`", + e.getMessage()); + } +} diff --git a/core/src/test/java/org/elasticsearch/index/fielddata/AbstractFieldDataImplTestCase.java b/core/src/test/java/org/elasticsearch/index/fielddata/AbstractFieldDataImplTestCase.java index 75f53b8a7e325..c96292d90e5fb 100644 --- a/core/src/test/java/org/elasticsearch/index/fielddata/AbstractFieldDataImplTestCase.java +++ b/core/src/test/java/org/elasticsearch/index/fielddata/AbstractFieldDataImplTestCase.java @@ -109,9 +109,9 @@ public void testSingleValueAllSet() throws Exception { IndexSearcher searcher = new IndexSearcher(readerContext.reader()); TopFieldDocs topDocs; - + SortField sortField = indexFieldData.sortField(null, MultiValueMode.MIN, null, false); topDocs = searcher.search(new MatchAllDocsQuery(), 10, - new Sort(new SortField("value", indexFieldData.comparatorSource(null, MultiValueMode.MIN, null)))); + new Sort(sortField)); assertThat(topDocs.totalHits, equalTo(3)); assertThat(topDocs.scoreDocs[0].doc, equalTo(1)); assertThat(toString(((FieldDoc) topDocs.scoreDocs[0]).fields[0]), equalTo(one())); @@ -120,8 +120,9 @@ public void testSingleValueAllSet() throws Exception { assertThat(topDocs.scoreDocs[2].doc, equalTo(2)); assertThat(toString(((FieldDoc) topDocs.scoreDocs[2]).fields[0]), equalTo(three())); + sortField = indexFieldData.sortField(null, MultiValueMode.MAX, null, true); topDocs = searcher.search(new MatchAllDocsQuery(), 10, - new Sort(new SortField("value", indexFieldData.comparatorSource(null, MultiValueMode.MAX, null), true))); + new Sort(sortField)); assertThat(topDocs.totalHits, equalTo(3)); assertThat(topDocs.scoreDocs[0].doc, equalTo(2)); assertThat(topDocs.scoreDocs[1].doc, equalTo(0)); @@ -182,14 +183,16 @@ public void testMultiValueAllSet() throws Exception { assertValues(bytesValues, 2, three()); IndexSearcher searcher = new IndexSearcher(DirectoryReader.open(writer)); - TopFieldDocs topDocs = searcher.search(new MatchAllDocsQuery(), 10, new Sort(new SortField("value", indexFieldData.comparatorSource(null, MultiValueMode.MIN, null)))); + SortField sortField = indexFieldData.sortField(null, MultiValueMode.MIN, null, false); + TopFieldDocs topDocs = searcher.search(new MatchAllDocsQuery(), 10, new Sort(sortField)); assertThat(topDocs.totalHits, equalTo(3)); assertThat(topDocs.scoreDocs.length, equalTo(3)); assertThat(topDocs.scoreDocs[0].doc, equalTo(1)); assertThat(topDocs.scoreDocs[1].doc, equalTo(0)); - assertThat(topDocs.scoreDocs[2].doc, equalTo(2)); - - topDocs = searcher.search(new MatchAllDocsQuery(), 10, new Sort(new SortField("value", indexFieldData.comparatorSource(null, MultiValueMode.MAX, null), true))); + assertThat(topDocs.scoreDocs[2].doc, equalTo(2)) + ; + sortField = indexFieldData.sortField(null, MultiValueMode.MAX, null, true); + topDocs = searcher.search(new MatchAllDocsQuery(), 10, new Sort(sortField)); assertThat(topDocs.totalHits, equalTo(3)); assertThat(topDocs.scoreDocs.length, equalTo(3)); assertThat(topDocs.scoreDocs[0].doc, equalTo(0)); @@ -245,8 +248,10 @@ public void testSortMultiValuesFields() throws Exception { IndexFieldData indexFieldData = getForField("value"); IndexSearcher searcher = new IndexSearcher(DirectoryReader.open(writer)); + SortField sortField = + indexFieldData.sortField(null, MultiValueMode.MIN, null, false); TopFieldDocs topDocs = searcher.search(new MatchAllDocsQuery(), 10, - new Sort(new SortField("value", indexFieldData.comparatorSource(null, MultiValueMode.MIN, null)))); + new Sort(sortField)); assertThat(topDocs.totalHits, equalTo(8)); assertThat(topDocs.scoreDocs.length, equalTo(8)); assertThat(topDocs.scoreDocs[0].doc, equalTo(7)); @@ -266,8 +271,9 @@ public void testSortMultiValuesFields() throws Exception { assertThat(topDocs.scoreDocs[7].doc, equalTo(5)); assertThat((BytesRef) ((FieldDoc) topDocs.scoreDocs[7]).fields[0], equalTo(null)); + sortField = indexFieldData.sortField(null, MultiValueMode.MAX, null, true); topDocs = searcher.search(new MatchAllDocsQuery(), 10, - new Sort(new SortField("value", indexFieldData.comparatorSource(null, MultiValueMode.MAX, null), true))); + new Sort(sortField)); assertThat(topDocs.totalHits, equalTo(8)); assertThat(topDocs.scoreDocs.length, equalTo(8)); assertThat(topDocs.scoreDocs[0].doc, equalTo(6)); diff --git a/core/src/test/java/org/elasticsearch/index/fielddata/AbstractStringFieldDataTestCase.java b/core/src/test/java/org/elasticsearch/index/fielddata/AbstractStringFieldDataTestCase.java index b3652ec9167aa..c24162788789c 100644 --- a/core/src/test/java/org/elasticsearch/index/fielddata/AbstractStringFieldDataTestCase.java +++ b/core/src/test/java/org/elasticsearch/index/fielddata/AbstractStringFieldDataTestCase.java @@ -264,8 +264,8 @@ public void testActualMissingValue(boolean reverse) throws IOException { final IndexFieldData indexFieldData = getForField("value"); final String missingValue = values[1]; IndexSearcher searcher = new IndexSearcher(DirectoryReader.open(writer)); - XFieldComparatorSource comparator = indexFieldData.comparatorSource(missingValue, MultiValueMode.MIN, null); - TopFieldDocs topDocs = searcher.search(new MatchAllDocsQuery(), randomBoolean() ? numDocs : randomIntBetween(10, numDocs), new Sort(new SortField("value", comparator, reverse))); + SortField sortField = indexFieldData.sortField(missingValue, MultiValueMode.MIN, null, reverse); + TopFieldDocs topDocs = searcher.search(new MatchAllDocsQuery(), randomBoolean() ? numDocs : randomIntBetween(10, numDocs), new Sort(sortField)); assertEquals(numDocs, topDocs.totalHits); BytesRef previousValue = reverse ? UnicodeUtil.BIG_TERM : new BytesRef(); for (int i = 0; i < topDocs.scoreDocs.length; ++i) { @@ -318,8 +318,8 @@ public void testSortMissing(boolean first, boolean reverse) throws IOException { } final IndexFieldData indexFieldData = getForField("value"); IndexSearcher searcher = new IndexSearcher(DirectoryReader.open(writer)); - XFieldComparatorSource comparator = indexFieldData.comparatorSource(first ? "_first" : "_last", MultiValueMode.MIN, null); - TopFieldDocs topDocs = searcher.search(new MatchAllDocsQuery(), randomBoolean() ? numDocs : randomIntBetween(10, numDocs), new Sort(new SortField("value", comparator, reverse))); + SortField sortField = indexFieldData.sortField(first ? "_first" : "_last", MultiValueMode.MIN, null, reverse); + TopFieldDocs topDocs = searcher.search(new MatchAllDocsQuery(), randomBoolean() ? numDocs : randomIntBetween(10, numDocs), new Sort(sortField)); assertEquals(numDocs, topDocs.totalHits); BytesRef previousValue = first ? null : reverse ? UnicodeUtil.BIG_TERM : new BytesRef(); for (int i = 0; i < topDocs.scoreDocs.length; ++i) { diff --git a/core/src/test/java/org/elasticsearch/index/fielddata/BinaryDVFieldDataTests.java b/core/src/test/java/org/elasticsearch/index/fielddata/BinaryDVFieldDataTests.java index 5231ccfc380cd..70c1486fe1f03 100644 --- a/core/src/test/java/org/elasticsearch/index/fielddata/BinaryDVFieldDataTests.java +++ b/core/src/test/java/org/elasticsearch/index/fielddata/BinaryDVFieldDataTests.java @@ -27,8 +27,10 @@ import org.elasticsearch.common.util.CollectionUtils; import org.elasticsearch.common.xcontent.XContentBuilder; import org.elasticsearch.common.xcontent.XContentFactory; +import org.elasticsearch.common.xcontent.XContentType; import org.elasticsearch.index.mapper.DocumentMapper; import org.elasticsearch.index.mapper.ParsedDocument; +import org.elasticsearch.index.mapper.SourceToParse; import java.util.List; import static org.hamcrest.Matchers.equalTo; @@ -56,16 +58,17 @@ public void testDocValue() throws Exception { bytesList1.add(randomBytes()); bytesList1.add(randomBytes()); XContentBuilder doc = XContentFactory.jsonBuilder().startObject().startArray("field").value(bytesList1.get(0)).value(bytesList1.get(1)).endArray().endObject(); - ParsedDocument d = mapper.parse("test", "test", "1", doc.bytes()); + ParsedDocument d = mapper.parse(SourceToParse.source("test", "test", "1", + doc.bytes(), XContentType.JSON)); writer.addDocument(d.rootDoc()); byte[] bytes1 = randomBytes(); doc = XContentFactory.jsonBuilder().startObject().field("field", bytes1).endObject(); - d = mapper.parse("test", "test", "2", doc.bytes()); + d = mapper.parse(SourceToParse.source("test", "test", "2", doc.bytes(), XContentType.JSON)); writer.addDocument(d.rootDoc()); doc = XContentFactory.jsonBuilder().startObject().endObject(); - d = mapper.parse("test", "test", "3", doc.bytes()); + d = mapper.parse(SourceToParse.source("test", "test", "3", doc.bytes(), XContentType.JSON)); writer.addDocument(d.rootDoc()); // test remove duplicate value @@ -73,7 +76,7 @@ public void testDocValue() throws Exception { bytesList2.add(randomBytes()); bytesList2.add(randomBytes()); doc = XContentFactory.jsonBuilder().startObject().startArray("field").value(bytesList2.get(0)).value(bytesList2.get(1)).value(bytesList2.get(0)).endArray().endObject(); - d = mapper.parse("test", "test", "4", doc.bytes()); + d = mapper.parse(SourceToParse.source("test", "test", "4", doc.bytes(), XContentType.JSON)); writer.addDocument(d.rootDoc()); List readers = refreshReader(); diff --git a/core/src/test/java/org/elasticsearch/index/fielddata/NoOrdinalsStringFieldDataTests.java b/core/src/test/java/org/elasticsearch/index/fielddata/NoOrdinalsStringFieldDataTests.java index 9fdede24d5cd7..33170eb39ece1 100644 --- a/core/src/test/java/org/elasticsearch/index/fielddata/NoOrdinalsStringFieldDataTests.java +++ b/core/src/test/java/org/elasticsearch/index/fielddata/NoOrdinalsStringFieldDataTests.java @@ -20,6 +20,8 @@ package org.elasticsearch.index.fielddata; import org.apache.lucene.index.LeafReaderContext; +import org.apache.lucene.search.SortField; +import org.elasticsearch.common.Nullable; import org.elasticsearch.index.Index; import org.elasticsearch.index.fielddata.IndexFieldData.XFieldComparatorSource.Nested; import org.elasticsearch.index.fielddata.fieldcomparator.BytesRefFieldComparatorSource; @@ -53,8 +55,9 @@ public AtomicFieldData loadDirect(LeafReaderContext context) throws Exception { } @Override - public XFieldComparatorSource comparatorSource(Object missingValue, MultiValueMode sortMode, Nested nested) { - return new BytesRefFieldComparatorSource(this, missingValue, sortMode, nested); + public SortField sortField(@Nullable Object missingValue, MultiValueMode sortMode, Nested nested, boolean reverse) { + XFieldComparatorSource source = new BytesRefFieldComparatorSource(this, missingValue, sortMode, nested); + return new SortField(getFieldName(), source, reverse); } @Override diff --git a/core/src/test/java/org/elasticsearch/index/fielddata/ParentChildFieldDataTests.java b/core/src/test/java/org/elasticsearch/index/fielddata/ParentChildFieldDataTests.java index b200a42b3f8dd..f426e5433c6f3 100644 --- a/core/src/test/java/org/elasticsearch/index/fielddata/ParentChildFieldDataTests.java +++ b/core/src/test/java/org/elasticsearch/index/fielddata/ParentChildFieldDataTests.java @@ -172,9 +172,8 @@ public void testGetBytesValues() throws Exception { public void testSorting() throws Exception { IndexFieldData indexFieldData = getForField(parentType); IndexSearcher searcher = new IndexSearcher(DirectoryReader.open(writer)); - IndexFieldData.XFieldComparatorSource comparator = indexFieldData.comparatorSource("_last", MultiValueMode.MIN, null); - - TopFieldDocs topDocs = searcher.search(new MatchAllDocsQuery(), 10, new Sort(new SortField(ParentFieldMapper.joinField(parentType), comparator, false))); + SortField sortField = indexFieldData.sortField("_last", MultiValueMode.MIN, null, false); + TopFieldDocs topDocs = searcher.search(new MatchAllDocsQuery(), 10, new Sort(sortField)); assertThat(topDocs.totalHits, equalTo(8)); assertThat(topDocs.scoreDocs.length, equalTo(8)); assertThat(topDocs.scoreDocs[0].doc, equalTo(0)); @@ -194,7 +193,8 @@ public void testSorting() throws Exception { assertThat(topDocs.scoreDocs[7].doc, equalTo(7)); assertThat(((BytesRef) ((FieldDoc) topDocs.scoreDocs[7]).fields[0]), equalTo(null)); - topDocs = searcher.search(new MatchAllDocsQuery(), 10, new Sort(new SortField(ParentFieldMapper.joinField(parentType), comparator, true))); + sortField = indexFieldData.sortField("_last", MultiValueMode.MIN, null, true); + topDocs = searcher.search(new MatchAllDocsQuery(), 10, new Sort(sortField)); assertThat(topDocs.totalHits, equalTo(8)); assertThat(topDocs.scoreDocs.length, equalTo(8)); assertThat(topDocs.scoreDocs[0].doc, equalTo(3)); diff --git a/core/src/test/java/org/elasticsearch/index/mapper/BinaryFieldMapperTests.java b/core/src/test/java/org/elasticsearch/index/mapper/BinaryFieldMapperTests.java index 2243c1182bd60..fac6e4c84b18c 100644 --- a/core/src/test/java/org/elasticsearch/index/mapper/BinaryFieldMapperTests.java +++ b/core/src/test/java/org/elasticsearch/index/mapper/BinaryFieldMapperTests.java @@ -30,6 +30,7 @@ import org.elasticsearch.common.io.stream.StreamOutput; import org.elasticsearch.common.settings.Settings; import org.elasticsearch.common.xcontent.XContentFactory; +import org.elasticsearch.common.xcontent.XContentType; import org.elasticsearch.plugins.Plugin; import org.elasticsearch.test.ESSingleNodeTestCase; import org.elasticsearch.test.InternalSettingsPlugin; @@ -92,7 +93,9 @@ public void testStoredValue() throws IOException { assertTrue(CompressorFactory.isCompressed(new BytesArray(binaryValue2))); for (byte[] value : Arrays.asList(binaryValue1, binaryValue2)) { - ParsedDocument doc = mapper.parse("test", "type", "id", XContentFactory.jsonBuilder().startObject().field("field", value).endObject().bytes()); + ParsedDocument doc = mapper.parse(SourceToParse.source("test", "type", "id", + XContentFactory.jsonBuilder().startObject().field("field", value).endObject().bytes(), + XContentType.JSON)); BytesRef indexedValue = doc.rootDoc().getBinaryValue("field"); assertEquals(new BytesRef(value), indexedValue); FieldMapper fieldMapper = mapper.mappers().smartNameFieldMapper("field"); diff --git a/core/src/test/java/org/elasticsearch/index/mapper/BooleanFieldMapperTests.java b/core/src/test/java/org/elasticsearch/index/mapper/BooleanFieldMapperTests.java index 2486f91ccd3d0..9c67292431752 100644 --- a/core/src/test/java/org/elasticsearch/index/mapper/BooleanFieldMapperTests.java +++ b/core/src/test/java/org/elasticsearch/index/mapper/BooleanFieldMapperTests.java @@ -39,6 +39,7 @@ import org.elasticsearch.common.xcontent.ToXContent; import org.elasticsearch.common.xcontent.XContentBuilder; import org.elasticsearch.common.xcontent.XContentFactory; +import org.elasticsearch.common.xcontent.XContentType; import org.elasticsearch.index.IndexService; import org.elasticsearch.index.mapper.ParseContext.Document; import org.elasticsearch.plugins.Plugin; @@ -80,11 +81,12 @@ public void testDefaults() throws IOException { DocumentMapper defaultMapper = parser.parse("type", new CompressedXContent(mapping)); - ParsedDocument doc = defaultMapper.parse("test", "type", "1", XContentFactory.jsonBuilder() + ParsedDocument doc = defaultMapper.parse(SourceToParse.source("test", "type", "1", XContentFactory.jsonBuilder() .startObject() .field("field", true) .endObject() - .bytes()); + .bytes(), + XContentType.JSON)); try (Directory dir = new RAMDirectory(); IndexWriter w = new IndexWriter(dir, new IndexWriterConfig(new MockAnalyzer(random())))) { @@ -150,12 +152,13 @@ public void testParsesPreEs6BooleansLenient() throws IOException { String falsy = randomFrom("false", "off", "no", "0"); String truthy = randomFrom("true", "on", "yes", "1"); - ParsedDocument parsedDoc = defaultMapper.parse("legacy", "type", "1", XContentFactory.jsonBuilder() + ParsedDocument parsedDoc = defaultMapper.parse(SourceToParse.source("legacy", "type", "1", XContentFactory.jsonBuilder() .startObject() .field("field1", falsy) .field("field2", truthy) .endObject() - .bytes()); + .bytes(), + XContentType.JSON)); Document doc = parsedDoc.rootDoc(); assertEquals("F", doc.getField("field1").stringValue()); assertEquals("T", doc.getField("field2").stringValue()); @@ -190,7 +193,8 @@ public void testParsesEs6BooleansStrict() throws IOException { // omit "false"/"true" here as they should still be parsed correctly .field("field", randomFrom("off", "no", "0", "on", "yes", "1")) .endObject().bytes(); - MapperParsingException ex = expectThrows(MapperParsingException.class, () -> defaultMapper.parse("test", "type", "1", source)); + MapperParsingException ex = expectThrows(MapperParsingException.class, + () -> defaultMapper.parse(SourceToParse.source("test", "type", "1", source, XContentType.JSON))); assertEquals("failed to parse [field]", ex.getMessage()); } @@ -213,7 +217,7 @@ public void testMultiFields() throws IOException { .startObject() .field("field", false) .endObject().bytes(); - ParsedDocument doc = mapper.parse("test", "type", "1", source); + ParsedDocument doc = mapper.parse(SourceToParse.source("test", "type", "1", source, XContentType.JSON)); assertNotNull(doc.rootDoc().getField("field.as_string")); } @@ -236,13 +240,14 @@ public void testDocValues() throws Exception { DocumentMapper defaultMapper = indexService.mapperService().documentMapperParser().parse("type", new CompressedXContent(mapping)); - ParsedDocument parsedDoc = defaultMapper.parse("test", "type", "1", XContentFactory.jsonBuilder() + ParsedDocument parsedDoc = defaultMapper.parse(SourceToParse.source("test", "type", "1", XContentFactory.jsonBuilder() .startObject() .field("bool1", true) .field("bool2", true) .field("bool3", true) .endObject() - .bytes()); + .bytes(), + XContentType.JSON)); Document doc = parsedDoc.rootDoc(); IndexableField[] fields = doc.getFields("bool1"); assertEquals(2, fields.length); diff --git a/core/src/test/java/org/elasticsearch/index/mapper/CamelCaseFieldNameTests.java b/core/src/test/java/org/elasticsearch/index/mapper/CamelCaseFieldNameTests.java index 6e841509c0d92..ac14f2905cf3b 100644 --- a/core/src/test/java/org/elasticsearch/index/mapper/CamelCaseFieldNameTests.java +++ b/core/src/test/java/org/elasticsearch/index/mapper/CamelCaseFieldNameTests.java @@ -35,9 +35,10 @@ public void testCamelCaseFieldNameStaysAsIs() throws Exception { client().admin().indices().preparePutMapping("test").setType("type").setSource(mapping, XContentType.JSON).get(); DocumentMapper documentMapper = index.mapperService().documentMapper("type"); - ParsedDocument doc = documentMapper.parse("test", "type", "1", XContentFactory.jsonBuilder().startObject() + ParsedDocument doc = documentMapper.parse(SourceToParse.source("test", "type", "1", XContentFactory.jsonBuilder().startObject() .field("thisIsCamelCase", "value1") - .endObject().bytes()); + .endObject().bytes(), + XContentType.JSON)); assertNotNull(doc.dynamicMappingsUpdate()); client().admin().indices().preparePutMapping("test").setType("type") diff --git a/core/src/test/java/org/elasticsearch/index/mapper/CompletionFieldMapperTests.java b/core/src/test/java/org/elasticsearch/index/mapper/CompletionFieldMapperTests.java index 802aee7f48274..5da524b69c0e6 100644 --- a/core/src/test/java/org/elasticsearch/index/mapper/CompletionFieldMapperTests.java +++ b/core/src/test/java/org/elasticsearch/index/mapper/CompletionFieldMapperTests.java @@ -34,6 +34,7 @@ import org.elasticsearch.common.xcontent.ToXContent; import org.elasticsearch.common.xcontent.XContentBuilder; import org.elasticsearch.common.xcontent.XContentFactory; +import org.elasticsearch.common.xcontent.XContentType; import org.elasticsearch.common.xcontent.json.JsonXContent; import org.elasticsearch.index.IndexService; import org.elasticsearch.index.analysis.NamedAnalyzer; @@ -152,11 +153,12 @@ public void testParsingMinimal() throws Exception { DocumentMapper defaultMapper = createIndex("test").mapperService().documentMapperParser().parse("type1", new CompressedXContent(mapping)); FieldMapper fieldMapper = defaultMapper.mappers().getMapper("completion"); MappedFieldType completionFieldType = fieldMapper.fieldType(); - ParsedDocument parsedDocument = defaultMapper.parse("test", "type1", "1", XContentFactory.jsonBuilder() + ParsedDocument parsedDocument = defaultMapper.parse(SourceToParse.source("test", "type1", "1", XContentFactory.jsonBuilder() .startObject() .field("completion", "suggestion") .endObject() - .bytes()); + .bytes(), + XContentType.JSON)); IndexableField[] fields = parsedDocument.rootDoc().getFields(completionFieldType.name()); assertSuggestFields(fields, 1); } @@ -171,11 +173,12 @@ public void testParsingMultiValued() throws Exception { DocumentMapper defaultMapper = createIndex("test").mapperService().documentMapperParser().parse("type1", new CompressedXContent(mapping)); FieldMapper fieldMapper = defaultMapper.mappers().getMapper("completion"); MappedFieldType completionFieldType = fieldMapper.fieldType(); - ParsedDocument parsedDocument = defaultMapper.parse("test", "type1", "1", XContentFactory.jsonBuilder() + ParsedDocument parsedDocument = defaultMapper.parse(SourceToParse.source("test", "type1", "1", XContentFactory.jsonBuilder() .startObject() .array("completion", "suggestion1", "suggestion2") .endObject() - .bytes()); + .bytes(), + XContentType.JSON)); IndexableField[] fields = parsedDocument.rootDoc().getFields(completionFieldType.name()); assertSuggestFields(fields, 2); } @@ -190,14 +193,15 @@ public void testParsingWithWeight() throws Exception { DocumentMapper defaultMapper = createIndex("test").mapperService().documentMapperParser().parse("type1", new CompressedXContent(mapping)); FieldMapper fieldMapper = defaultMapper.mappers().getMapper("completion"); MappedFieldType completionFieldType = fieldMapper.fieldType(); - ParsedDocument parsedDocument = defaultMapper.parse("test", "type1", "1", XContentFactory.jsonBuilder() + ParsedDocument parsedDocument = defaultMapper.parse(SourceToParse.source("test", "type1", "1", XContentFactory.jsonBuilder() .startObject() .startObject("completion") .field("input", "suggestion") .field("weight", 2) .endObject() .endObject() - .bytes()); + .bytes(), + XContentType.JSON)); IndexableField[] fields = parsedDocument.rootDoc().getFields(completionFieldType.name()); assertSuggestFields(fields, 1); } @@ -212,14 +216,15 @@ public void testParsingMultiValueWithWeight() throws Exception { DocumentMapper defaultMapper = createIndex("test").mapperService().documentMapperParser().parse("type1", new CompressedXContent(mapping)); FieldMapper fieldMapper = defaultMapper.mappers().getMapper("completion"); MappedFieldType completionFieldType = fieldMapper.fieldType(); - ParsedDocument parsedDocument = defaultMapper.parse("test", "type1", "1", XContentFactory.jsonBuilder() + ParsedDocument parsedDocument = defaultMapper.parse(SourceToParse.source("test", "type1", "1", XContentFactory.jsonBuilder() .startObject() .startObject("completion") .array("input", "suggestion1", "suggestion2", "suggestion3") .field("weight", 2) .endObject() .endObject() - .bytes()); + .bytes(), + XContentType.JSON)); IndexableField[] fields = parsedDocument.rootDoc().getFields(completionFieldType.name()); assertSuggestFields(fields, 3); } @@ -234,7 +239,7 @@ public void testParsingFull() throws Exception { DocumentMapper defaultMapper = createIndex("test").mapperService().documentMapperParser().parse("type1", new CompressedXContent(mapping)); FieldMapper fieldMapper = defaultMapper.mappers().getMapper("completion"); MappedFieldType completionFieldType = fieldMapper.fieldType(); - ParsedDocument parsedDocument = defaultMapper.parse("test", "type1", "1", XContentFactory.jsonBuilder() + ParsedDocument parsedDocument = defaultMapper.parse(SourceToParse.source("test", "type1", "1", XContentFactory.jsonBuilder() .startObject() .startArray("completion") .startObject() @@ -251,7 +256,8 @@ public void testParsingFull() throws Exception { .endObject() .endArray() .endObject() - .bytes()); + .bytes(), + XContentType.JSON)); IndexableField[] fields = parsedDocument.rootDoc().getFields(completionFieldType.name()); assertSuggestFields(fields, 3); } @@ -266,7 +272,7 @@ public void testParsingMixed() throws Exception { DocumentMapper defaultMapper = createIndex("test").mapperService().documentMapperParser().parse("type1", new CompressedXContent(mapping)); FieldMapper fieldMapper = defaultMapper.mappers().getMapper("completion"); MappedFieldType completionFieldType = fieldMapper.fieldType(); - ParsedDocument parsedDocument = defaultMapper.parse("test", "type1", "1", XContentFactory.jsonBuilder() + ParsedDocument parsedDocument = defaultMapper.parse(SourceToParse.source("test", "type1", "1", XContentFactory.jsonBuilder() .startObject() .startArray("completion") .startObject() @@ -283,7 +289,8 @@ public void testParsingMixed() throws Exception { .endObject() .endArray() .endObject() - .bytes()); + .bytes(), + XContentType.JSON)); IndexableField[] fields = parsedDocument.rootDoc().getFields(completionFieldType.name()); assertSuggestFields(fields, 6); } @@ -297,7 +304,7 @@ public void testNonContextEnabledParsingWithContexts() throws Exception { DocumentMapper defaultMapper = createIndex("test").mapperService().documentMapperParser().parse("type1", new CompressedXContent(mapping)); try { - defaultMapper.parse("test", "type1", "1", XContentFactory.jsonBuilder() + defaultMapper.parse(SourceToParse.source("test", "type1", "1", XContentFactory.jsonBuilder() .startObject() .startObject("field1") .field("input", "suggestion1") @@ -307,7 +314,8 @@ public void testNonContextEnabledParsingWithContexts() throws Exception { .field("weight", 3) .endObject() .endObject() - .bytes()); + .bytes(), + XContentType.JSON)); fail("Supplying contexts to a non context-enabled field should error"); } catch (MapperParsingException e) { assertThat(e.getRootCause().getMessage(), containsString("field1")); @@ -326,11 +334,12 @@ public void testFieldValueValidation() throws Exception { charsRefBuilder.append("sugg"); charsRefBuilder.setCharAt(2, '\u001F'); try { - defaultMapper.parse("test", "type1", "1", XContentFactory.jsonBuilder() + defaultMapper.parse(SourceToParse.source("test", "type1", "1", XContentFactory.jsonBuilder() .startObject() .field("completion", charsRefBuilder.get().toString()) .endObject() - .bytes()); + .bytes(), + XContentType.JSON)); fail("No error indexing value with reserved character [0x1F]"); } catch (MapperParsingException e) { Throwable cause = e.unwrapCause().getCause(); @@ -340,11 +349,12 @@ public void testFieldValueValidation() throws Exception { charsRefBuilder.setCharAt(2, '\u0000'); try { - defaultMapper.parse("test", "type1", "1", XContentFactory.jsonBuilder() + defaultMapper.parse(SourceToParse.source("test", "type1", "1", XContentFactory.jsonBuilder() .startObject() .field("completion", charsRefBuilder.get().toString()) .endObject() - .bytes()); + .bytes(), + XContentType.JSON)); fail("No error indexing value with reserved character [0x0]"); } catch (MapperParsingException e) { Throwable cause = e.unwrapCause().getCause(); @@ -354,11 +364,12 @@ public void testFieldValueValidation() throws Exception { charsRefBuilder.setCharAt(2, '\u001E'); try { - defaultMapper.parse("test", "type1", "1", XContentFactory.jsonBuilder() + defaultMapper.parse(SourceToParse.source("test", "type1", "1", XContentFactory.jsonBuilder() .startObject() .field("completion", charsRefBuilder.get().toString()) .endObject() - .bytes()); + .bytes(), + XContentType.JSON)); fail("No error indexing value with reserved character [0x1E]"); } catch (MapperParsingException e) { Throwable cause = e.unwrapCause().getCause(); diff --git a/core/src/test/java/org/elasticsearch/index/mapper/CopyToMapperTests.java b/core/src/test/java/org/elasticsearch/index/mapper/CopyToMapperTests.java index 85fddfc800103..4b2f629c36eea 100644 --- a/core/src/test/java/org/elasticsearch/index/mapper/CopyToMapperTests.java +++ b/core/src/test/java/org/elasticsearch/index/mapper/CopyToMapperTests.java @@ -94,7 +94,7 @@ public void testCopyToFieldsParsing() throws Exception { .field("int_to_str_test", 42) .endObject().bytes(); - ParsedDocument parsedDoc = docMapper.parse("test", "type1", "1", json); + ParsedDocument parsedDoc = docMapper.parse(SourceToParse.source("test", "type1", "1", json, XContentType.JSON)); ParseContext.Document doc = parsedDoc.rootDoc(); assertThat(doc.getFields("copy_test").length, equalTo(2)); assertThat(doc.getFields("copy_test")[0].stringValue(), equalTo("foo")); @@ -149,7 +149,8 @@ public void testCopyToFieldsInnerObjectParsing() throws Exception { .startObject("foo").startObject("bar").field("baz", "zoo").endObject().endObject() .endObject().bytes(); - ParseContext.Document doc = docMapper.parse("test", "type1", "1", json).rootDoc(); + ParseContext.Document doc = docMapper.parse(SourceToParse.source("test", "type1", "1", json, + XContentType.JSON)).rootDoc(); assertThat(doc.getFields("copy_test").length, equalTo(1)); assertThat(doc.getFields("copy_test")[0].stringValue(), equalTo("foo")); @@ -175,7 +176,8 @@ public void testCopyToDynamicInnerObjectParsing() throws Exception { .field("new_field", "bar") .endObject().bytes(); - ParseContext.Document doc = docMapper.parse("test", "type1", "1", json).rootDoc(); + ParseContext.Document doc = docMapper.parse(SourceToParse.source("test", "type1", "1", json, + XContentType.JSON)).rootDoc(); assertThat(doc.getFields("copy_test").length, equalTo(1)); assertThat(doc.getFields("copy_test")[0].stringValue(), equalTo("foo")); @@ -211,7 +213,8 @@ public void testCopyToDynamicInnerInnerObjectParsing() throws Exception { .field("new_field", "bar") .endObject().bytes(); - ParseContext.Document doc = docMapper.parse("test", "type1", "1", json).rootDoc(); + ParseContext.Document doc = docMapper.parse(SourceToParse.source("test", "type1", "1", json, + XContentType.JSON)).rootDoc(); assertThat(doc.getFields("copy_test").length, equalTo(1)); assertThat(doc.getFields("copy_test")[0].stringValue(), equalTo("foo")); @@ -240,7 +243,7 @@ public void testCopyToStrictDynamicInnerObjectParsing() throws Exception { .endObject().bytes(); try { - docMapper.parse("test", "type1", "1", json).rootDoc(); + docMapper.parse(SourceToParse.source("test", "type1", "1", json, XContentType.JSON)).rootDoc(); fail(); } catch (MapperParsingException ex) { assertThat(ex.getMessage(), startsWith("mapping set to strict, dynamic introduction of [very] within [type1] is not allowed")); @@ -274,7 +277,7 @@ public void testCopyToInnerStrictDynamicInnerObjectParsing() throws Exception { .endObject().bytes(); try { - docMapper.parse("test", "type1", "1", json).rootDoc(); + docMapper.parse(SourceToParse.source("test", "type1", "1", json, XContentType.JSON)).rootDoc(); fail(); } catch (MapperParsingException ex) { assertThat(ex.getMessage(), startsWith("mapping set to strict, dynamic introduction of [field] within [very.far] is not allowed")); @@ -377,7 +380,7 @@ public void testCopyToNestedField() throws Exception { .endArray() .endObject(); - ParsedDocument doc = mapper.parse("test", "type", "1", jsonDoc.bytes()); + ParsedDocument doc = mapper.parse(SourceToParse.source("test", "type", "1", jsonDoc.bytes(), XContentType.JSON)); assertEquals(6, doc.docs().size()); Document nested = doc.docs().get(0); @@ -439,7 +442,7 @@ public void testCopyToDynamicNestedObjectParsing() throws Exception { .endObject().bytes(); try { - docMapper.parse("test", "type1", "1", json).rootDoc(); + docMapper.parse(SourceToParse.source("test", "type1", "1", json, XContentType.JSON)).rootDoc(); fail(); } catch (MapperParsingException ex) { assertThat(ex.getMessage(), startsWith("It is forbidden to create dynamic nested objects ([very]) through `copy_to`")); diff --git a/core/src/test/java/org/elasticsearch/index/mapper/DateFieldMapperTests.java b/core/src/test/java/org/elasticsearch/index/mapper/DateFieldMapperTests.java index 300203b1a351c..24bfc93030672 100644 --- a/core/src/test/java/org/elasticsearch/index/mapper/DateFieldMapperTests.java +++ b/core/src/test/java/org/elasticsearch/index/mapper/DateFieldMapperTests.java @@ -23,6 +23,7 @@ import org.apache.lucene.index.IndexableField; import org.elasticsearch.common.compress.CompressedXContent; import org.elasticsearch.common.xcontent.XContentFactory; +import org.elasticsearch.common.xcontent.XContentType; import org.elasticsearch.index.IndexService; import org.elasticsearch.plugins.Plugin; import org.elasticsearch.test.ESSingleNodeTestCase; @@ -62,11 +63,12 @@ public void testDefaults() throws Exception { assertEquals(mapping, mapper.mappingSource().toString()); - ParsedDocument doc = mapper.parse("test", "type", "1", XContentFactory.jsonBuilder() + ParsedDocument doc = mapper.parse(SourceToParse.source("test", "type", "1", XContentFactory.jsonBuilder() .startObject() .field("field", "2016-03-11") .endObject() - .bytes()); + .bytes(), + XContentType.JSON)); IndexableField[] fields = doc.rootDoc().getFields("field"); assertEquals(2, fields.length); @@ -90,11 +92,12 @@ public void testNotIndexed() throws Exception { assertEquals(mapping, mapper.mappingSource().toString()); - ParsedDocument doc = mapper.parse("test", "type", "1", XContentFactory.jsonBuilder() + ParsedDocument doc = mapper.parse(SourceToParse.source("test", "type", "1", XContentFactory.jsonBuilder() .startObject() .field("field", "2016-03-11") .endObject() - .bytes()); + .bytes(), + XContentType.JSON)); IndexableField[] fields = doc.rootDoc().getFields("field"); assertEquals(1, fields.length); @@ -111,11 +114,12 @@ public void testNoDocValues() throws Exception { assertEquals(mapping, mapper.mappingSource().toString()); - ParsedDocument doc = mapper.parse("test", "type", "1", XContentFactory.jsonBuilder() + ParsedDocument doc = mapper.parse(SourceToParse.source("test", "type", "1", XContentFactory.jsonBuilder() .startObject() .field("field", "2016-03-11") .endObject() - .bytes()); + .bytes(), + XContentType.JSON)); IndexableField[] fields = doc.rootDoc().getFields("field"); assertEquals(1, fields.length); @@ -132,11 +136,12 @@ public void testStore() throws Exception { assertEquals(mapping, mapper.mappingSource().toString()); - ParsedDocument doc = mapper.parse("test", "type", "1", XContentFactory.jsonBuilder() + ParsedDocument doc = mapper.parse(SourceToParse.source("test", "type", "1", XContentFactory.jsonBuilder() .startObject() .field("field", "2016-03-11") .endObject() - .bytes()); + .bytes(), + XContentType.JSON)); IndexableField[] fields = doc.rootDoc().getFields("field"); assertEquals(3, fields.length); @@ -158,11 +163,12 @@ public void testIgnoreMalformed() throws Exception { assertEquals(mapping, mapper.mappingSource().toString()); - ThrowingRunnable runnable = () -> mapper.parse("test", "type", "1", XContentFactory.jsonBuilder() + ThrowingRunnable runnable = () -> mapper.parse(SourceToParse.source("test", "type", "1", XContentFactory.jsonBuilder() .startObject() .field("field", "2016-03-99") .endObject() - .bytes()); + .bytes(), + XContentType.JSON)); MapperParsingException e = expectThrows(MapperParsingException.class, runnable); assertThat(e.getCause().getMessage(), containsString("Cannot parse \"2016-03-99\"")); @@ -173,11 +179,12 @@ public void testIgnoreMalformed() throws Exception { DocumentMapper mapper2 = parser.parse("type", new CompressedXContent(mapping)); - ParsedDocument doc = mapper2.parse("test", "type", "1", XContentFactory.jsonBuilder() + ParsedDocument doc = mapper2.parse(SourceToParse.source("test", "type", "1", XContentFactory.jsonBuilder() .startObject() .field("field", ":1") .endObject() - .bytes()); + .bytes(), + XContentType.JSON)); IndexableField[] fields = doc.rootDoc().getFields("field"); assertEquals(0, fields.length); @@ -193,11 +200,12 @@ public void testChangeFormat() throws IOException { assertEquals(mapping, mapper.mappingSource().toString()); - ParsedDocument doc = mapper.parse("test", "type", "1", XContentFactory.jsonBuilder() + ParsedDocument doc = mapper.parse(SourceToParse.source("test", "type", "1", XContentFactory.jsonBuilder() .startObject() .field("field", 1457654400) .endObject() - .bytes()); + .bytes(), + XContentType.JSON)); IndexableField[] fields = doc.rootDoc().getFields("field"); assertEquals(2, fields.length); @@ -214,11 +222,12 @@ public void testChangeLocale() throws IOException { assertEquals(mapping, mapper.mappingSource().toString()); - mapper.parse("test", "type", "1", XContentFactory.jsonBuilder() + mapper.parse(SourceToParse.source("test", "type", "1", XContentFactory.jsonBuilder() .startObject() .field("field", 1457654400) .endObject() - .bytes()); + .bytes(), + XContentType.JSON)); } public void testNullValue() throws IOException { @@ -234,11 +243,12 @@ public void testNullValue() throws IOException { DocumentMapper mapper = parser.parse("type", new CompressedXContent(mapping)); assertEquals(mapping, mapper.mappingSource().toString()); - ParsedDocument doc = mapper.parse("test", "type", "1", XContentFactory.jsonBuilder() + ParsedDocument doc = mapper.parse(SourceToParse.source("test", "type", "1", XContentFactory.jsonBuilder() .startObject() .nullField("field") .endObject() - .bytes()); + .bytes(), + XContentType.JSON)); assertArrayEquals(new IndexableField[0], doc.rootDoc().getFields("field")); mapping = XContentFactory.jsonBuilder().startObject() @@ -254,11 +264,12 @@ public void testNullValue() throws IOException { mapper = parser.parse("type", new CompressedXContent(mapping)); assertEquals(mapping, mapper.mappingSource().toString()); - doc = mapper.parse("test", "type", "1", XContentFactory.jsonBuilder() + doc = mapper.parse(SourceToParse.source("test", "type", "1", XContentFactory.jsonBuilder() .startObject() .nullField("field") .endObject() - .bytes()); + .bytes(), + XContentType.JSON)); IndexableField[] fields = doc.rootDoc().getFields("field"); assertEquals(2, fields.length); IndexableField pointField = fields[0]; @@ -322,11 +333,12 @@ public void testTimeZoneParsing() throws Exception { final DateTimeZone randomTimeZone = randomBoolean() ? DateTimeZone.forID(randomFrom("UTC", "CET")) : randomDateTimeZone(); final DateTime randomDate = new DateTime(2016, 03, 11, 0, 0, 0, randomTimeZone); - ParsedDocument doc = mapper.parse("test", "type", "1", XContentFactory.jsonBuilder() + ParsedDocument doc = mapper.parse(SourceToParse.source("test", "type", "1", XContentFactory.jsonBuilder() .startObject() .field("field", DateTimeFormat.forPattern(timeZonePattern).print(randomDate)) .endObject() - .bytes()); + .bytes(), + XContentType.JSON)); IndexableField[] fields = doc.rootDoc().getFields("field"); assertEquals(2, fields.length); diff --git a/core/src/test/java/org/elasticsearch/index/mapper/DocumentMapperMergeTests.java b/core/src/test/java/org/elasticsearch/index/mapper/DocumentMapperMergeTests.java index a9b1753ba1c8a..e2fbbe7ebfe78 100644 --- a/core/src/test/java/org/elasticsearch/index/mapper/DocumentMapperMergeTests.java +++ b/core/src/test/java/org/elasticsearch/index/mapper/DocumentMapperMergeTests.java @@ -23,6 +23,7 @@ import org.elasticsearch.common.bytes.BytesReference; import org.elasticsearch.common.compress.CompressedXContent; import org.elasticsearch.common.xcontent.XContentFactory; +import org.elasticsearch.common.xcontent.XContentType; import org.elasticsearch.index.analysis.NamedAnalyzer; import org.elasticsearch.index.mapper.DocumentFieldMappers; import org.elasticsearch.index.mapper.DocumentMapper; @@ -167,7 +168,11 @@ public void run() { barrier.await(); for (int i = 0; i < 200 && stopped.get() == false; i++) { final String fieldName = Integer.toString(i); - ParsedDocument doc = documentMapper.parse("test", "test", fieldName, new BytesArray("{ \"" + fieldName + "\" : \"test\" }")); + ParsedDocument doc = documentMapper.parse(SourceToParse.source("test", + "test", + fieldName, + new BytesArray("{ \"" + fieldName + "\" : \"test\" }"), + XContentType.JSON)); Mapping update = doc.dynamicMappingsUpdate(); assert update != null; lastIntroducedFieldName.set(fieldName); @@ -186,7 +191,11 @@ public void run() { while(stopped.get() == false) { final String fieldName = lastIntroducedFieldName.get(); final BytesReference source = new BytesArray("{ \"" + fieldName + "\" : \"test\" }"); - ParsedDocument parsedDoc = documentMapper.parse("test", "test", "random", source); + ParsedDocument parsedDoc = documentMapper.parse(SourceToParse.source("test", + "test", + "random", + source, + XContentType.JSON)); if (parsedDoc.dynamicMappingsUpdate() != null) { // not in the mapping yet, try again continue; diff --git a/core/src/test/java/org/elasticsearch/index/mapper/DocumentParserTests.java b/core/src/test/java/org/elasticsearch/index/mapper/DocumentParserTests.java index db748143576f3..cd4d1d21c6cd0 100644 --- a/core/src/test/java/org/elasticsearch/index/mapper/DocumentParserTests.java +++ b/core/src/test/java/org/elasticsearch/index/mapper/DocumentParserTests.java @@ -71,7 +71,7 @@ public void testTypeDisabled() throws Exception { .startObject().startObject("foo") .field("field", "1234") .endObject().endObject().bytes(); - ParsedDocument doc = mapper.parse("test", "type", "1", bytes); + ParsedDocument doc = mapper.parse(SourceToParse.source("test", "type", "1", bytes, XContentType.JSON)); assertNull(doc.rootDoc().getField("field")); assertNotNull(doc.rootDoc().getField(UidFieldMapper.NAME)); } @@ -89,7 +89,7 @@ public void testFieldDisabled() throws Exception { .field("foo", "1234") .field("bar", 10) .endObject().bytes(); - ParsedDocument doc = mapper.parse("test", "type", "1", bytes); + ParsedDocument doc = mapper.parse(SourceToParse.source("test", "type", "1", bytes, XContentType.JSON)); assertNull(doc.rootDoc().getField("foo")); assertNotNull(doc.rootDoc().getField("bar")); assertNotNull(doc.rootDoc().getField(UidFieldMapper.NAME)); @@ -114,7 +114,7 @@ public void testDotsWithExistingMapper() throws Exception { .field("baz", 789) .endObject() .endObject().bytes(); - ParsedDocument doc = mapper.parse("test", "type", "1", bytes); + ParsedDocument doc = mapper.parse(SourceToParse.source("test", "type", "1", bytes, XContentType.JSON)); assertNull(doc.dynamicMappingsUpdate()); // no update! String[] values = doc.rootDoc().getValues("foo.bar.baz"); assertEquals(3, values.length); @@ -136,7 +136,7 @@ public void testDotsWithExistingNestedMapper() throws Exception { .field("foo.bar", 123) .endObject().bytes(); MapperParsingException e = expectThrows(MapperParsingException.class, - () -> mapper.parse("test", "type", "1", bytes)); + () -> mapper.parse(SourceToParse.source("test", "type", "1", bytes, XContentType.JSON))); assertEquals( "Cannot add a value for field [foo.bar] since one of the intermediate objects is mapped as a nested object: [foo]", e.getMessage()); @@ -162,7 +162,7 @@ public void testDotsWithDynamicNestedMapper() throws Exception { .field("foo.bar",42) .endObject().bytes(); MapperParsingException e = expectThrows(MapperParsingException.class, - () -> mapper.parse("test", "type", "1", bytes)); + () -> mapper.parse(SourceToParse.source("test", "type", "1", bytes, XContentType.JSON))); assertEquals( "It is forbidden to create dynamic nested objects ([foo]) through `copy_to` or dots in field names", e.getMessage()); @@ -183,7 +183,7 @@ public void testPropagateDynamicWithExistingMapper() throws Exception { .startObject().startObject("foo") .field("bar", "something") .endObject().endObject().bytes(); - ParsedDocument doc = mapper.parse("test", "type", "1", bytes); + ParsedDocument doc = mapper.parse(SourceToParse.source("test", "type", "1", bytes, XContentType.JSON)); assertNotNull(doc.dynamicMappingsUpdate()); assertNotNull(doc.rootDoc().getField("foo.bar")); } @@ -203,7 +203,7 @@ public void testPropagateDynamicWithDynamicMapper() throws Exception { .startObject().startObject("foo").startObject("bar") .field("baz", "something") .endObject().endObject().endObject().bytes(); - ParsedDocument doc = mapper.parse("test", "type", "1", bytes); + ParsedDocument doc = mapper.parse(SourceToParse.source("test", "type", "1", bytes, XContentType.JSON)); assertNotNull(doc.dynamicMappingsUpdate()); assertNotNull(doc.rootDoc().getField("foo.bar.baz")); } @@ -222,7 +222,7 @@ public void testDynamicRootFallback() throws Exception { .startObject().startObject("foo") .field("bar", "something") .endObject().endObject().bytes(); - ParsedDocument doc = mapper.parse("test", "type", "1", bytes); + ParsedDocument doc = mapper.parse(SourceToParse.source("test", "type", "1", bytes, XContentType.JSON)); assertNull(doc.dynamicMappingsUpdate()); assertNull(doc.rootDoc().getField("foo.bar")); } @@ -350,7 +350,7 @@ public void testDynamicGeoPointArrayWithTemplate() throws Exception { .startArray().value(0).value(0).endArray() .startArray().value(1).value(1).endArray() .endArray().endObject().bytes(); - ParsedDocument doc = mapper.parse("test", "type", "1", bytes); + ParsedDocument doc = mapper.parse(SourceToParse.source("test", "type", "1", bytes, XContentType.JSON)); assertEquals(2, doc.rootDoc().getFields("foo").length); } @@ -368,7 +368,7 @@ public void testDynamicLongArrayWithTemplate() throws Exception { .value(0) .value(1) .endArray().endObject().bytes(); - ParsedDocument doc = mapper.parse("test", "type", "1", bytes); + ParsedDocument doc = mapper.parse(SourceToParse.source("test", "type", "1", bytes, XContentType.JSON)); assertEquals(4, doc.rootDoc().getFields("foo").length); } @@ -383,7 +383,7 @@ public void testDynamicLongArray() throws Exception { .value(0) .value(1) .endArray().endObject().bytes(); - ParsedDocument doc = mapper.parse("test", "type", "1", bytes); + ParsedDocument doc = mapper.parse(SourceToParse.source("test", "type", "1", bytes, XContentType.JSON)); assertEquals(4, doc.rootDoc().getFields("foo").length); } @@ -398,7 +398,7 @@ public void testDynamicFalseLongArray() throws Exception { .value(0) .value(1) .endArray().endObject().bytes(); - ParsedDocument doc = mapper.parse("test", "type", "1", bytes); + ParsedDocument doc = mapper.parse(SourceToParse.source("test", "type", "1", bytes, XContentType.JSON)); assertEquals(0, doc.rootDoc().getFields("foo").length); } @@ -414,7 +414,7 @@ public void testDynamicStrictLongArray() throws Exception { .value(1) .endArray().endObject().bytes(); StrictDynamicMappingException exception = expectThrows(StrictDynamicMappingException.class, - () -> mapper.parse("test", "type", "1", bytes)); + () -> mapper.parse(SourceToParse.source("test", "type", "1", bytes, XContentType.JSON))); assertEquals("mapping set to strict, dynamic introduction of [foo] within [type] is not allowed", exception.getMessage()); } @@ -430,7 +430,7 @@ public void testMappedGeoPointArray() throws Exception { .startArray().value(0).value(0).endArray() .startArray().value(1).value(1).endArray() .endArray().endObject().bytes(); - ParsedDocument doc = mapper.parse("test", "type", "1", bytes); + ParsedDocument doc = mapper.parse(SourceToParse.source("test", "type", "1", bytes, XContentType.JSON)); assertEquals(2, doc.rootDoc().getFields("foo").length); } @@ -446,7 +446,7 @@ public void testMappedLongArray() throws Exception { .value(0) .value(1) .endArray().endObject().bytes(); - ParsedDocument doc = mapper.parse("test", "type", "1", bytes); + ParsedDocument doc = mapper.parse(SourceToParse.source("test", "type", "1", bytes, XContentType.JSON)); assertEquals(4, doc.rootDoc().getFields("foo").length); } @@ -464,7 +464,7 @@ public void testDynamicObjectWithTemplate() throws Exception { .startObject().startObject("foo") .field("bar", "baz") .endObject().endObject().bytes(); - ParsedDocument doc = mapper.parse("test", "type", "1", bytes); + ParsedDocument doc = mapper.parse(SourceToParse.source("test", "type", "1", bytes, XContentType.JSON)); assertEquals(2, doc.rootDoc().getFields("foo.bar").length); } @@ -478,7 +478,7 @@ public void testDynamicFalseObject() throws Exception { .startObject().startObject("foo") .field("bar", "baz") .endObject().endObject().bytes(); - ParsedDocument doc = mapper.parse("test", "type", "1", bytes); + ParsedDocument doc = mapper.parse(SourceToParse.source("test", "type", "1", bytes, XContentType.JSON)); assertEquals(0, doc.rootDoc().getFields("foo.bar").length); } @@ -493,7 +493,7 @@ public void testDynamicStrictObject() throws Exception { .field("bar", "baz") .endObject().endObject().bytes(); StrictDynamicMappingException exception = expectThrows(StrictDynamicMappingException.class, - () -> mapper.parse("test", "type", "1", bytes)); + () -> mapper.parse(SourceToParse.source("test", "type", "1", bytes, XContentType.JSON))); assertEquals("mapping set to strict, dynamic introduction of [foo] within [type] is not allowed", exception.getMessage()); } @@ -507,7 +507,7 @@ public void testDynamicFalseValue() throws Exception { .startObject() .field("bar", "baz") .endObject().bytes(); - ParsedDocument doc = mapper.parse("test", "type", "1", bytes); + ParsedDocument doc = mapper.parse(SourceToParse.source("test", "type", "1", bytes, XContentType.JSON)); assertEquals(0, doc.rootDoc().getFields("bar").length); } @@ -522,7 +522,7 @@ public void testDynamicStrictValue() throws Exception { .field("bar", "baz") .endObject().bytes(); StrictDynamicMappingException exception = expectThrows(StrictDynamicMappingException.class, - () -> mapper.parse("test", "type", "1", bytes)); + () -> mapper.parse(SourceToParse.source("test", "type", "1", bytes, XContentType.JSON))); assertEquals("mapping set to strict, dynamic introduction of [bar] within [type] is not allowed", exception.getMessage()); } @@ -536,7 +536,7 @@ public void testDynamicFalseNull() throws Exception { .startObject() .field("bar", (String) null) .endObject().bytes(); - ParsedDocument doc = mapper.parse("test", "type", "1", bytes); + ParsedDocument doc = mapper.parse(SourceToParse.source("test", "type", "1", bytes, XContentType.JSON)); assertEquals(0, doc.rootDoc().getFields("bar").length); } @@ -551,7 +551,7 @@ public void testDynamicStrictNull() throws Exception { .field("bar", (String) null) .endObject().bytes(); StrictDynamicMappingException exception = expectThrows(StrictDynamicMappingException.class, - () -> mapper.parse("test", "type", "1", bytes)); + () -> mapper.parse(SourceToParse.source("test", "type", "1", bytes, XContentType.JSON))); assertEquals("mapping set to strict, dynamic introduction of [bar] within [type] is not allowed", exception.getMessage()); } @@ -565,7 +565,7 @@ public void testMappedNullValue() throws Exception { BytesReference bytes = XContentFactory.jsonBuilder() .startObject().field("foo", (Long) null) .endObject().bytes(); - ParsedDocument doc = mapper.parse("test", "type", "1", bytes); + ParsedDocument doc = mapper.parse(SourceToParse.source("test", "type", "1", bytes, XContentType.JSON)); assertEquals(0, doc.rootDoc().getFields("foo").length); } @@ -580,7 +580,7 @@ public void testDynamicDottedFieldNameLongArray() throws Exception { .value(0) .value(1) .endArray().endObject().bytes(); - ParsedDocument doc = mapper.parse("test", "type", "1", bytes); + ParsedDocument doc = mapper.parse(SourceToParse.source("test", "type", "1", bytes, XContentType.JSON)); assertEquals(4, doc.rootDoc().getFields("foo.bar.baz").length); Mapper fooMapper = doc.dynamicMappingsUpdate().root().getMapper("foo"); assertNotNull(fooMapper); @@ -607,7 +607,7 @@ public void testDynamicDottedFieldNameLongArrayWithParentTemplate() throws Excep .value(0) .value(1) .endArray().endObject().bytes(); - ParsedDocument doc = mapper.parse("test", "type", "1", bytes); + ParsedDocument doc = mapper.parse(SourceToParse.source("test", "type", "1", bytes, XContentType.JSON)); assertEquals(4, doc.rootDoc().getFields("foo.bar.baz").length); Mapper fooMapper = doc.dynamicMappingsUpdate().root().getMapper("foo"); assertNotNull(fooMapper); @@ -633,7 +633,7 @@ public void testDynamicDottedFieldNameLongArrayWithExistingParent() throws Excep .value(0) .value(1) .endArray().endObject().bytes(); - ParsedDocument doc = mapper.parse("test", "type", "1", bytes); + ParsedDocument doc = mapper.parse(SourceToParse.source("test", "type", "1", bytes, XContentType.JSON)); assertEquals(4, doc.rootDoc().getFields("foo.bar.baz").length); Mapper fooMapper = doc.dynamicMappingsUpdate().root().getMapper("foo"); assertNotNull(fooMapper); @@ -659,7 +659,8 @@ public void testDynamicDottedFieldNameLongArrayWithExistingParentWrongType() thr .value(0) .value(1) .endArray().endObject().bytes(); - MapperParsingException exception = expectThrows(MapperParsingException.class, () -> mapper.parse("test", "type", "1", bytes)); + MapperParsingException exception = expectThrows(MapperParsingException.class, + () -> mapper.parse(SourceToParse.source("test", "type", "1", bytes, XContentType.JSON))); assertEquals("Could not dynamically add mapping for field [foo.bar.baz]. " + "Existing mapping for [foo] must be of type object but found [long].", exception.getMessage()); } @@ -675,7 +676,7 @@ public void testDynamicFalseDottedFieldNameLongArray() throws Exception { .value(0) .value(1) .endArray().endObject().bytes(); - ParsedDocument doc = mapper.parse("test", "type", "1", bytes); + ParsedDocument doc = mapper.parse(SourceToParse.source("test", "type", "1", bytes, XContentType.JSON)); assertEquals(0, doc.rootDoc().getFields("foo.bar.baz").length); } @@ -691,7 +692,7 @@ public void testDynamicStrictDottedFieldNameLongArray() throws Exception { .value(1) .endArray().endObject().bytes(); StrictDynamicMappingException exception = expectThrows(StrictDynamicMappingException.class, - () -> mapper.parse("test", "type", "1", bytes)); + () -> mapper.parse(SourceToParse.source("test", "type", "1", bytes, XContentType.JSON))); assertEquals("mapping set to strict, dynamic introduction of [foo] within [type] is not allowed", exception.getMessage()); } @@ -704,7 +705,7 @@ public void testDynamicDottedFieldNameLong() throws Exception { BytesReference bytes = XContentFactory.jsonBuilder() .startObject().field("foo.bar.baz", 0) .endObject().bytes(); - ParsedDocument doc = mapper.parse("test", "type", "1", bytes); + ParsedDocument doc = mapper.parse(SourceToParse.source("test", "type", "1", bytes, XContentType.JSON)); assertEquals(2, doc.rootDoc().getFields("foo.bar.baz").length); Mapper fooMapper = doc.dynamicMappingsUpdate().root().getMapper("foo"); assertNotNull(fooMapper); @@ -729,7 +730,7 @@ public void testDynamicDottedFieldNameLongWithParentTemplate() throws Exception BytesReference bytes = XContentFactory.jsonBuilder() .startObject().field("foo.bar.baz", 0) .endObject().bytes(); - ParsedDocument doc = mapper.parse("test", "type", "1", bytes); + ParsedDocument doc = mapper.parse(SourceToParse.source("test", "type", "1", bytes, XContentType.JSON)); assertEquals(2, doc.rootDoc().getFields("foo.bar.baz").length); Mapper fooMapper = doc.dynamicMappingsUpdate().root().getMapper("foo"); assertNotNull(fooMapper); @@ -753,7 +754,7 @@ public void testDynamicDottedFieldNameLongWithExistingParent() throws Exception BytesReference bytes = XContentFactory.jsonBuilder() .startObject().field("foo.bar.baz", 0) .endObject().bytes(); - ParsedDocument doc = mapper.parse("test", "type", "1", bytes); + ParsedDocument doc = mapper.parse(SourceToParse.source("test", "type", "1", bytes, XContentType.JSON)); assertEquals(2, doc.rootDoc().getFields("foo.bar.baz").length); Mapper fooMapper = doc.dynamicMappingsUpdate().root().getMapper("foo"); assertNotNull(fooMapper); @@ -777,7 +778,8 @@ public void testDynamicDottedFieldNameLongWithExistingParentWrongType() throws E BytesReference bytes = XContentFactory.jsonBuilder() .startObject().field("foo.bar.baz", 0) .endObject().bytes(); - MapperParsingException exception = expectThrows(MapperParsingException.class, () -> mapper.parse("test", "type", "1", bytes)); + MapperParsingException exception = expectThrows(MapperParsingException.class, + () -> mapper.parse(SourceToParse.source("test", "type", "1", bytes, XContentType.JSON))); assertEquals("Could not dynamically add mapping for field [foo.bar.baz]. " + "Existing mapping for [foo] must be of type object but found [long].", exception.getMessage()); } @@ -791,7 +793,7 @@ public void testDynamicFalseDottedFieldNameLong() throws Exception { BytesReference bytes = XContentFactory.jsonBuilder() .startObject().field("foo.bar.baz", 0) .endObject().bytes(); - ParsedDocument doc = mapper.parse("test", "type", "1", bytes); + ParsedDocument doc = mapper.parse(SourceToParse.source("test", "type", "1", bytes, XContentType.JSON)); assertEquals(0, doc.rootDoc().getFields("foo.bar.baz").length); } @@ -805,7 +807,7 @@ public void testDynamicStrictDottedFieldNameLong() throws Exception { .startObject().field("foo.bar.baz", 0) .endObject().bytes(); StrictDynamicMappingException exception = expectThrows(StrictDynamicMappingException.class, - () -> mapper.parse("test", "type", "1", bytes)); + () -> mapper.parse(SourceToParse.source("test", "type", "1", bytes, XContentType.JSON))); assertEquals("mapping set to strict, dynamic introduction of [foo] within [type] is not allowed", exception.getMessage()); } @@ -819,7 +821,7 @@ public void testDynamicDottedFieldNameObject() throws Exception { .startObject().startObject("foo.bar.baz") .field("a", 0) .endObject().endObject().bytes(); - ParsedDocument doc = mapper.parse("test", "type", "1", bytes); + ParsedDocument doc = mapper.parse(SourceToParse.source("test", "type", "1", bytes, XContentType.JSON)); assertEquals(2, doc.rootDoc().getFields("foo.bar.baz.a").length); Mapper fooMapper = doc.dynamicMappingsUpdate().root().getMapper("foo"); assertNotNull(fooMapper); @@ -848,7 +850,7 @@ public void testDynamicDottedFieldNameObjectWithParentTemplate() throws Exceptio .startObject().startObject("foo.bar.baz") .field("a", 0) .endObject().endObject().bytes(); - ParsedDocument doc = mapper.parse("test", "type", "1", bytes); + ParsedDocument doc = mapper.parse(SourceToParse.source("test", "type", "1", bytes, XContentType.JSON)); assertEquals(2, doc.rootDoc().getFields("foo.bar.baz.a").length); Mapper fooMapper = doc.dynamicMappingsUpdate().root().getMapper("foo"); assertNotNull(fooMapper); @@ -872,7 +874,7 @@ public void testDynamicDottedFieldNameObjectWithExistingParent() throws Exceptio BytesReference bytes = XContentFactory.jsonBuilder().startObject().startObject("foo.bar.baz").field("a", 0).endObject().endObject() .bytes(); - ParsedDocument doc = mapper.parse("test", "type", "1", bytes); + ParsedDocument doc = mapper.parse(SourceToParse.source("test", "type", "1", bytes, XContentType.JSON)); assertEquals(2, doc.rootDoc().getFields("foo.bar.baz.a").length); Mapper fooMapper = doc.dynamicMappingsUpdate().root().getMapper("foo"); assertNotNull(fooMapper); @@ -898,7 +900,9 @@ public void testDynamicDottedFieldNameObjectWithExistingParentWrongType() throws BytesReference bytes = XContentFactory.jsonBuilder().startObject().startObject("foo.bar.baz").field("a", 0).endObject().endObject() .bytes(); - MapperParsingException exception = expectThrows(MapperParsingException.class, () -> mapper.parse("test", "type", "1", bytes)); + MapperParsingException exception = expectThrows(MapperParsingException.class, + () -> mapper.parse(SourceToParse.source("test", "type", "1", bytes, XContentType.JSON))); + assertEquals("Could not dynamically add mapping for field [foo.bar.baz]. " + "Existing mapping for [foo] must be of type object but found [long].", exception.getMessage()); } @@ -913,7 +917,7 @@ public void testDynamicFalseDottedFieldNameObject() throws Exception { .startObject().startObject("foo.bar.baz") .field("a", 0) .endObject().endObject().bytes(); - ParsedDocument doc = mapper.parse("test", "type", "1", bytes); + ParsedDocument doc = mapper.parse(SourceToParse.source("test", "type", "1", bytes, XContentType.JSON)); assertEquals(0, doc.rootDoc().getFields("foo.bar.baz.a").length); } @@ -928,7 +932,7 @@ public void testDynamicStrictDottedFieldNameObject() throws Exception { .field("a", 0) .endObject().endObject().bytes(); StrictDynamicMappingException exception = expectThrows(StrictDynamicMappingException.class, - () -> mapper.parse("test", "type", "1", bytes)); + () -> mapper.parse(SourceToParse.source("test", "type", "1", bytes, XContentType.JSON))); assertEquals("mapping set to strict, dynamic introduction of [foo] within [type] is not allowed", exception.getMessage()); } @@ -939,12 +943,11 @@ public void testDocumentContainsMetadataField() throws Exception { BytesReference bytes = XContentFactory.jsonBuilder().startObject().field("_ttl", 0).endObject().bytes(); MapperParsingException e = expectThrows(MapperParsingException.class, () -> - mapper.parse("test", "type", "1", bytes) - ); + mapper.parse(SourceToParse.source("test", "type", "1", bytes, XContentType.JSON))); assertTrue(e.getMessage(), e.getMessage().contains("cannot be added inside a document")); BytesReference bytes2 = XContentFactory.jsonBuilder().startObject().field("foo._ttl", 0).endObject().bytes(); - mapper.parse("test", "type", "1", bytes2); // parses without error + mapper.parse(SourceToParse.source("test", "type", "1", bytes2, XContentType.JSON)); // parses without error } public void testSimpleMapper() throws Exception { @@ -955,10 +958,10 @@ public void testSimpleMapper() throws Exception { indexService.mapperService()).build(indexService.mapperService()); BytesReference json = new BytesArray(copyToBytesFromClasspath("/org/elasticsearch/index/mapper/simple/test1.json")); - Document doc = docMapper.parse("test", "person", "1", json).rootDoc(); + Document doc = docMapper.parse(SourceToParse.source("test", "person", "1", json, XContentType.JSON)).rootDoc(); assertThat(doc.get(docMapper.mappers().getMapper("name.first").fieldType().name()), equalTo("shay")); - doc = docMapper.parse("test", "person", "1", json).rootDoc(); + doc = docMapper.parse(SourceToParse.source("test", "person", "1", json, XContentType.JSON)).rootDoc(); } public void testParseToJsonAndParse() throws Exception { @@ -969,7 +972,7 @@ public void testParseToJsonAndParse() throws Exception { // reparse it DocumentMapper builtDocMapper = parser.parse("person", new CompressedXContent(builtMapping)); BytesReference json = new BytesArray(copyToBytesFromClasspath("/org/elasticsearch/index/mapper/simple/test1.json")); - Document doc = builtDocMapper.parse("test", "person", "1", json).rootDoc(); + Document doc = builtDocMapper.parse(SourceToParse.source("test", "person", "1", json, XContentType.JSON)).rootDoc(); assertThat(doc.get(docMapper.uidMapper().fieldType().name()), equalTo(Uid.createUid("person", "1"))); assertThat(doc.get(docMapper.mappers().getMapper("name.first").fieldType().name()), equalTo("shay")); } @@ -981,7 +984,7 @@ public void testSimpleParser() throws Exception { assertThat((String) docMapper.meta().get("param1"), equalTo("value1")); BytesReference json = new BytesArray(copyToBytesFromClasspath("/org/elasticsearch/index/mapper/simple/test1.json")); - Document doc = docMapper.parse("test", "person", "1", json).rootDoc(); + Document doc = docMapper.parse(SourceToParse.source("test", "person", "1", json, XContentType.JSON)).rootDoc(); assertThat(doc.get(docMapper.uidMapper().fieldType().name()), equalTo(Uid.createUid("person", "1"))); assertThat(doc.get(docMapper.mappers().getMapper("name.first").fieldType().name()), equalTo("shay")); } @@ -990,7 +993,7 @@ public void testSimpleParserNoTypeNoId() throws Exception { String mapping = copyToStringFromClasspath("/org/elasticsearch/index/mapper/simple/test-mapping.json"); DocumentMapper docMapper = createIndex("test").mapperService().documentMapperParser().parse("person", new CompressedXContent(mapping)); BytesReference json = new BytesArray(copyToBytesFromClasspath("/org/elasticsearch/index/mapper/simple/test1-notype-noid.json")); - Document doc = docMapper.parse("test", "person", "1", json).rootDoc(); + Document doc = docMapper.parse(SourceToParse.source("test", "person", "1", json, XContentType.JSON)).rootDoc(); assertThat(doc.get(docMapper.uidMapper().fieldType().name()), equalTo(Uid.createUid("person", "1"))); assertThat(doc.get(docMapper.mappers().getMapper("name.first").fieldType().name()), equalTo("shay")); } @@ -1016,7 +1019,7 @@ public void testNoDocumentSent() throws Exception { BytesReference json = new BytesArray("".getBytes(StandardCharsets.UTF_8)); try { - docMapper.parse("test", "person", "1", json).rootDoc(); + docMapper.parse(SourceToParse.source("test", "person", "1", json, XContentType.JSON)).rootDoc(); fail("this point is never reached"); } catch (MapperParsingException e) { assertThat(e.getMessage(), equalTo("failed to parse, document is empty")); @@ -1028,13 +1031,14 @@ public void testNoLevel() throws Exception { DocumentMapper defaultMapper = createIndex("test").mapperService().documentMapperParser().parse("type", new CompressedXContent(defaultMapping)); - ParsedDocument doc = defaultMapper.parse("test", "type", "1", XContentFactory.jsonBuilder() + ParsedDocument doc = defaultMapper.parse(SourceToParse.source("test", "type", "1", XContentFactory.jsonBuilder() .startObject() .field("test1", "value1") .field("test2", "value2") .startObject("inner").field("inner_field", "inner_value").endObject() .endObject() - .bytes()); + .bytes(), + XContentType.JSON)); assertThat(doc.rootDoc().get("test1"), equalTo("value1")); assertThat(doc.rootDoc().get("test2"), equalTo("value2")); @@ -1046,13 +1050,14 @@ public void testTypeLevel() throws Exception { DocumentMapper defaultMapper = createIndex("test").mapperService().documentMapperParser().parse("type", new CompressedXContent(defaultMapping)); - ParsedDocument doc = defaultMapper.parse("test", "type", "1", XContentFactory.jsonBuilder() + ParsedDocument doc = defaultMapper.parse(SourceToParse.source("test", "type", "1", XContentFactory.jsonBuilder() .startObject().startObject("type") .field("test1", "value1") .field("test2", "value2") .startObject("inner").field("inner_field", "inner_value").endObject() .endObject().endObject() - .bytes()); + .bytes(), + XContentType.JSON)); assertThat(doc.rootDoc().get("type.test1"), equalTo("value1")); assertThat(doc.rootDoc().get("type.test2"), equalTo("value2")); @@ -1064,14 +1069,15 @@ public void testNoLevelWithFieldTypeAsValue() throws Exception { DocumentMapper defaultMapper = createIndex("test").mapperService().documentMapperParser().parse("type", new CompressedXContent(defaultMapping)); - ParsedDocument doc = defaultMapper.parse("test", "type", "1", XContentFactory.jsonBuilder() + ParsedDocument doc = defaultMapper.parse(SourceToParse.source("test", "type", "1", XContentFactory.jsonBuilder() .startObject() .field("type", "value_type") .field("test1", "value1") .field("test2", "value2") .startObject("inner").field("inner_field", "inner_value").endObject() .endObject() - .bytes()); + .bytes(), + XContentType.JSON)); assertThat(doc.rootDoc().get("type"), equalTo("value_type")); assertThat(doc.rootDoc().get("test1"), equalTo("value1")); @@ -1084,14 +1090,15 @@ public void testTypeLevelWithFieldTypeAsValue() throws Exception { DocumentMapper defaultMapper = createIndex("test").mapperService().documentMapperParser().parse("type", new CompressedXContent(defaultMapping)); - ParsedDocument doc = defaultMapper.parse("test", "type", "1", XContentFactory.jsonBuilder() + ParsedDocument doc = defaultMapper.parse(SourceToParse.source("test", "type", "1", XContentFactory.jsonBuilder() .startObject().startObject("type") .field("type", "value_type") .field("test1", "value1") .field("test2", "value2") .startObject("inner").field("inner_field", "inner_value").endObject() .endObject().endObject() - .bytes()); + .bytes(), + XContentType.JSON)); assertThat(doc.rootDoc().get("type.type"), equalTo("value_type")); assertThat(doc.rootDoc().get("type.test1"), equalTo("value1")); @@ -1104,14 +1111,15 @@ public void testNoLevelWithFieldTypeAsObject() throws Exception { DocumentMapper defaultMapper = createIndex("test").mapperService().documentMapperParser().parse("type", new CompressedXContent(defaultMapping)); - ParsedDocument doc = defaultMapper.parse("test", "type", "1", XContentFactory.jsonBuilder() + ParsedDocument doc = defaultMapper.parse(SourceToParse.source("test", "type", "1", XContentFactory.jsonBuilder() .startObject() .startObject("type").field("type_field", "type_value").endObject() .field("test1", "value1") .field("test2", "value2") .startObject("inner").field("inner_field", "inner_value").endObject() .endObject() - .bytes()); + .bytes(), + XContentType.JSON)); // in this case, we analyze the type object as the actual document, and ignore the other same level fields assertThat(doc.rootDoc().get("type.type_field"), equalTo("type_value")); @@ -1124,14 +1132,15 @@ public void testTypeLevelWithFieldTypeAsObject() throws Exception { DocumentMapper defaultMapper = createIndex("test").mapperService().documentMapperParser().parse("type", new CompressedXContent(defaultMapping)); - ParsedDocument doc = defaultMapper.parse("test", "type", "1", XContentFactory.jsonBuilder() + ParsedDocument doc = defaultMapper.parse(SourceToParse.source("test", "type", "1", XContentFactory.jsonBuilder() .startObject().startObject("type") .startObject("type").field("type_field", "type_value").endObject() .field("test1", "value1") .field("test2", "value2") .startObject("inner").field("inner_field", "inner_value").endObject() .endObject().endObject() - .bytes()); + .bytes(), + XContentType.JSON)); assertThat(doc.rootDoc().get("type.type.type_field"), equalTo("type_value")); assertThat(doc.rootDoc().get("type.test1"), equalTo("value1")); @@ -1144,14 +1153,15 @@ public void testNoLevelWithFieldTypeAsValueNotFirst() throws Exception { DocumentMapper defaultMapper = createIndex("test").mapperService().documentMapperParser().parse("type", new CompressedXContent(defaultMapping)); - ParsedDocument doc = defaultMapper.parse("test", "type", "1", XContentFactory.jsonBuilder() + ParsedDocument doc = defaultMapper.parse(SourceToParse.source("test", "type", "1", XContentFactory.jsonBuilder() .startObject().startObject("type") .field("test1", "value1") .field("test2", "value2") .field("type", "value_type") .startObject("inner").field("inner_field", "inner_value").endObject() .endObject().endObject() - .bytes()); + .bytes(), + XContentType.JSON)); assertThat(doc.rootDoc().get("type.type"), equalTo("value_type")); assertThat(doc.rootDoc().get("type.test1"), equalTo("value1")); @@ -1164,14 +1174,15 @@ public void testTypeLevelWithFieldTypeAsValueNotFirst() throws Exception { DocumentMapper defaultMapper = createIndex("test").mapperService().documentMapperParser().parse("type", new CompressedXContent(defaultMapping)); - ParsedDocument doc = defaultMapper.parse("test", "type", "1", XContentFactory.jsonBuilder() + ParsedDocument doc = defaultMapper.parse(SourceToParse.source("test", "type", "1", XContentFactory.jsonBuilder() .startObject().startObject("type") .field("test1", "value1") .field("type", "value_type") .field("test2", "value2") .startObject("inner").field("inner_field", "inner_value").endObject() .endObject().endObject() - .bytes()); + .bytes(), + XContentType.JSON)); assertThat(doc.rootDoc().get("type.type"), equalTo("value_type")); assertThat(doc.rootDoc().get("type.test1"), equalTo("value1")); @@ -1184,14 +1195,15 @@ public void testNoLevelWithFieldTypeAsObjectNotFirst() throws Exception { DocumentMapper defaultMapper = createIndex("test").mapperService().documentMapperParser().parse("type", new CompressedXContent(defaultMapping)); - ParsedDocument doc = defaultMapper.parse("test", "type", "1", XContentFactory.jsonBuilder() + ParsedDocument doc = defaultMapper.parse(SourceToParse.source("test", "type", "1", XContentFactory.jsonBuilder() .startObject() .field("test1", "value1") .startObject("type").field("type_field", "type_value").endObject() .field("test2", "value2") .startObject("inner").field("inner_field", "inner_value").endObject() .endObject() - .bytes()); + .bytes(), + XContentType.JSON)); // when the type is not the first one, we don't confuse it... assertThat(doc.rootDoc().get("type.type_field"), equalTo("type_value")); @@ -1205,14 +1217,15 @@ public void testTypeLevelWithFieldTypeAsObjectNotFirst() throws Exception { DocumentMapper defaultMapper = createIndex("test").mapperService().documentMapperParser().parse("type", new CompressedXContent(defaultMapping)); - ParsedDocument doc = defaultMapper.parse("test", "type", "1", XContentFactory.jsonBuilder() + ParsedDocument doc = defaultMapper.parse(SourceToParse.source("test", "type", "1", XContentFactory.jsonBuilder() .startObject().startObject("type") .field("test1", "value1") .startObject("type").field("type_field", "type_value").endObject() .field("test2", "value2") .startObject("inner").field("inner_field", "inner_value").endObject() .endObject().endObject() - .bytes()); + .bytes(), + XContentType.JSON)); assertThat(doc.rootDoc().get("type.type.type_field"), equalTo("type_value")); assertThat(doc.rootDoc().get("type.test1"), equalTo("value1")); @@ -1235,7 +1248,7 @@ public void testDynamicDateDetectionDisabledOnNumbers() throws IOException { // Even though we matched the dynamic format, we do not match on numbers, // which are too likely to be false positives - ParsedDocument doc = mapper.parse("test", "type", "1", bytes); + ParsedDocument doc = mapper.parse(SourceToParse.source("test", "type", "1", bytes, XContentType.JSON)); Mapping update = doc.dynamicMappingsUpdate(); assertNotNull(update); Mapper dateMapper = update.root().getMapper("foo"); @@ -1257,7 +1270,7 @@ public void testDynamicDateDetectionEnabledWithNoSpecialCharacters() throws IOEx .endObject().bytes(); // We should have generated a date field - ParsedDocument doc = mapper.parse("test", "type", "1", bytes); + ParsedDocument doc = mapper.parse(SourceToParse.source("test", "type", "1", bytes, XContentType.JSON)); Mapping update = doc.dynamicMappingsUpdate(); assertNotNull(update); Mapper dateMapper = update.root().getMapper("foo"); diff --git a/core/src/test/java/org/elasticsearch/index/mapper/DoubleIndexingDocTests.java b/core/src/test/java/org/elasticsearch/index/mapper/DoubleIndexingDocTests.java index 85e186e2f8221..2acd6b5c987bb 100644 --- a/core/src/test/java/org/elasticsearch/index/mapper/DoubleIndexingDocTests.java +++ b/core/src/test/java/org/elasticsearch/index/mapper/DoubleIndexingDocTests.java @@ -48,7 +48,7 @@ public void testDoubleIndexingSameDoc() throws Exception { DocumentMapper mapper = index.mapperService().documentMapper("type"); QueryShardContext context = index.newQueryShardContext(0, null, () -> 0L); - ParsedDocument doc = mapper.parse("test", "type", "1", XContentFactory.jsonBuilder() + ParsedDocument doc = mapper.parse(SourceToParse.source("test", "type", "1", XContentFactory.jsonBuilder() .startObject() .field("field1", "value1") .field("field2", 1) @@ -56,7 +56,8 @@ public void testDoubleIndexingSameDoc() throws Exception { .field("field4", "2010-01-01") .startArray("field5").value(1).value(2).value(3).endArray() .endObject() - .bytes()); + .bytes(), + XContentType.JSON)); assertNotNull(doc.dynamicMappingsUpdate()); client().admin().indices().preparePutMapping("test").setType("type") .setSource(doc.dynamicMappingsUpdate().toString(), XContentType.JSON).get(); diff --git a/core/src/test/java/org/elasticsearch/index/mapper/DynamicMappingTests.java b/core/src/test/java/org/elasticsearch/index/mapper/DynamicMappingTests.java index 344f3debdf6b6..71ae77aa55e6e 100644 --- a/core/src/test/java/org/elasticsearch/index/mapper/DynamicMappingTests.java +++ b/core/src/test/java/org/elasticsearch/index/mapper/DynamicMappingTests.java @@ -58,12 +58,13 @@ public void testDynamicTrue() throws IOException { DocumentMapper defaultMapper = createIndex("test").mapperService().documentMapperParser().parse("type", new CompressedXContent(mapping)); - ParsedDocument doc = defaultMapper.parse("test", "type", "1", jsonBuilder() + ParsedDocument doc = defaultMapper.parse(SourceToParse.source("test", "type", "1", jsonBuilder() .startObject() .field("field1", "value1") .field("field2", "value2") .endObject() - .bytes()); + .bytes(), + XContentType.JSON)); assertThat(doc.rootDoc().get("field1"), equalTo("value1")); assertThat(doc.rootDoc().get("field2"), equalTo("value2")); @@ -79,12 +80,13 @@ public void testDynamicFalse() throws IOException { DocumentMapper defaultMapper = createIndex("test").mapperService().documentMapperParser().parse("type", new CompressedXContent(mapping)); - ParsedDocument doc = defaultMapper.parse("test", "type", "1", jsonBuilder() + ParsedDocument doc = defaultMapper.parse(SourceToParse.source("test", "type", "1", jsonBuilder() .startObject() .field("field1", "value1") .field("field2", "value2") .endObject() - .bytes()); + .bytes(), + XContentType.JSON)); assertThat(doc.rootDoc().get("field1"), equalTo("value1")); assertThat(doc.rootDoc().get("field2"), nullValue()); @@ -101,20 +103,22 @@ public void testDynamicStrict() throws IOException { DocumentMapper defaultMapper = createIndex("test").mapperService().documentMapperParser().parse("type", new CompressedXContent(mapping)); - StrictDynamicMappingException e = expectThrows(StrictDynamicMappingException.class, () -> defaultMapper.parse("test", "type", "1", jsonBuilder() + StrictDynamicMappingException e = expectThrows(StrictDynamicMappingException.class, () -> defaultMapper.parse(SourceToParse.source("test", "type", "1", jsonBuilder() .startObject() .field("field1", "value1") .field("field2", "value2") .endObject() - .bytes())); + .bytes(), + XContentType.JSON))); assertThat(e.getMessage(), equalTo("mapping set to strict, dynamic introduction of [field2] within [type] is not allowed")); - e = expectThrows(StrictDynamicMappingException.class, () -> defaultMapper.parse("test", "type", "1", XContentFactory.jsonBuilder() + e = expectThrows(StrictDynamicMappingException.class, () -> defaultMapper.parse(SourceToParse.source("test", "type", "1", XContentFactory.jsonBuilder() .startObject() .field("field1", "value1") .field("field2", (String) null) .endObject() - .bytes())); + .bytes(), + XContentType.JSON))); assertThat(e.getMessage(), equalTo("mapping set to strict, dynamic introduction of [field2] within [type] is not allowed")); } @@ -130,13 +134,14 @@ public void testDynamicFalseWithInnerObjectButDynamicSetOnRoot() throws IOExcept DocumentMapper defaultMapper = createIndex("test").mapperService().documentMapperParser().parse("type", new CompressedXContent(mapping)); - ParsedDocument doc = defaultMapper.parse("test", "type", "1", jsonBuilder() + ParsedDocument doc = defaultMapper.parse(SourceToParse.source("test", "type", "1", jsonBuilder() .startObject().startObject("obj1") .field("field1", "value1") .field("field2", "value2") .endObject() .endObject() - .bytes()); + .bytes(), + XContentType.JSON)); assertThat(doc.rootDoc().get("obj1.field1"), equalTo("value1")); assertThat(doc.rootDoc().get("obj1.field2"), nullValue()); @@ -155,13 +160,14 @@ public void testDynamicStrictWithInnerObjectButDynamicSetOnRoot() throws IOExcep DocumentMapper defaultMapper = createIndex("test").mapperService().documentMapperParser().parse("type", new CompressedXContent(mapping)); StrictDynamicMappingException e = expectThrows(StrictDynamicMappingException.class, () -> - defaultMapper.parse("test", "type", "1", jsonBuilder() + defaultMapper.parse(SourceToParse.source("test", "type", "1", jsonBuilder() .startObject().startObject("obj1") .field("field1", "value1") .field("field2", "value2") .endObject() .endObject() - .bytes())); + .bytes(), + XContentType.JSON))); assertThat(e.getMessage(), equalTo("mapping set to strict, dynamic introduction of [field2] within [obj1] is not allowed")); } @@ -585,7 +591,8 @@ private void doTestDefaultFloatingPointMappings(DocumentMapper mapper, XContentB .field("baz", (double) 3.2f) // double that can be accurately represented as a float .field("quux", "3.2") // float detected through numeric detection .endObject().bytes(); - ParsedDocument parsedDocument = mapper.parse("index", "type", "id", source); + ParsedDocument parsedDocument = mapper.parse(SourceToParse.source("index", "type", "id", source, + XContentType.JSON)); Mapping update = parsedDocument.dynamicMappingsUpdate(); assertNotNull(update); assertThat(((FieldMapper) update.root().getMapper("foo")).fieldType().typeName(), equalTo("float")); @@ -603,12 +610,13 @@ public void testNumericDetectionEnabled() throws Exception { client().admin().indices().preparePutMapping("test").setType("type").setSource(mapping, XContentType.JSON).get(); DocumentMapper defaultMapper = index.mapperService().documentMapper("type"); - ParsedDocument doc = defaultMapper.parse("test", "type", "1", XContentFactory.jsonBuilder() + ParsedDocument doc = defaultMapper.parse(SourceToParse.source("test", "type", "1", XContentFactory.jsonBuilder() .startObject() .field("s_long", "100") .field("s_double", "100.0") .endObject() - .bytes()); + .bytes(), + XContentType.JSON)); assertNotNull(doc.dynamicMappingsUpdate()); client().admin().indices().preparePutMapping("test").setType("type") .setSource(doc.dynamicMappingsUpdate().toString(), XContentType.JSON).get(); @@ -629,12 +637,13 @@ public void testNumericDetectionDefault() throws Exception { client().admin().indices().preparePutMapping("test").setType("type").setSource(mapping, XContentType.JSON).get(); DocumentMapper defaultMapper = index.mapperService().documentMapper("type"); - ParsedDocument doc = defaultMapper.parse("test", "type", "1", XContentFactory.jsonBuilder() + ParsedDocument doc = defaultMapper.parse(SourceToParse.source("test", "type", "1", XContentFactory.jsonBuilder() .startObject() .field("s_long", "100") .field("s_double", "100.0") .endObject() - .bytes()); + .bytes(), + XContentType.JSON)); assertNotNull(doc.dynamicMappingsUpdate()); assertAcked(client().admin().indices().preparePutMapping("test").setType("type") .setSource(doc.dynamicMappingsUpdate().toString(), XContentType.JSON).get()); @@ -677,13 +686,14 @@ public void testDateDetectionInheritsFormat() throws Exception { client().admin().indices().preparePutMapping("test").setType("type").setSource(mapping, XContentType.JSON).get(); DocumentMapper defaultMapper = index.mapperService().documentMapper("type"); - ParsedDocument doc = defaultMapper.parse("test", "type", "1", XContentFactory.jsonBuilder() + ParsedDocument doc = defaultMapper.parse(SourceToParse.source("test", "type", "1", XContentFactory.jsonBuilder() .startObject() .field("date1", "2016-11-20") .field("date2", "2016-11-20") .field("date3", "2016-11-20") .endObject() - .bytes()); + .bytes(), + XContentType.JSON)); assertNotNull(doc.dynamicMappingsUpdate()); assertAcked(client().admin().indices().preparePutMapping("test").setType("type") .setSource(doc.dynamicMappingsUpdate().toString(), XContentType.JSON).get()); diff --git a/core/src/test/java/org/elasticsearch/index/mapper/DynamicTemplatesTests.java b/core/src/test/java/org/elasticsearch/index/mapper/DynamicTemplatesTests.java index 437dd7cb99d41..70cc2c08441eb 100644 --- a/core/src/test/java/org/elasticsearch/index/mapper/DynamicTemplatesTests.java +++ b/core/src/test/java/org/elasticsearch/index/mapper/DynamicTemplatesTests.java @@ -51,7 +51,8 @@ public void testMatchTypeOnly() throws Exception { DocumentMapper docMapper = index.mapperService().documentMapper("person"); builder = JsonXContent.contentBuilder(); builder.startObject().field("s", "hello").field("l", 1).endObject(); - ParsedDocument parsedDoc = docMapper.parse("test", "person", "1", builder.bytes()); + ParsedDocument parsedDoc = docMapper.parse(SourceToParse.source("test", "person", "1", builder.bytes(), + XContentType.JSON)); client().admin().indices().preparePutMapping("test").setType("person") .setSource(parsedDoc.dynamicMappingsUpdate().toString(), XContentType.JSON).get(); @@ -73,7 +74,8 @@ public void testSimple() throws Exception { client().admin().indices().preparePutMapping("test").setType("person").setSource(mapping, XContentType.JSON).get(); DocumentMapper docMapper = index.mapperService().documentMapper("person"); byte[] json = copyToBytesFromClasspath("/org/elasticsearch/index/mapper/dynamictemplate/simple/test-data.json"); - ParsedDocument parsedDoc = docMapper.parse("test", "person", "1", new BytesArray(json)); + ParsedDocument parsedDoc = docMapper.parse(SourceToParse.source("test", "person", "1", new BytesArray(json), + XContentType.JSON)); client().admin().indices().preparePutMapping("test").setType("person") .setSource(parsedDoc.dynamicMappingsUpdate().toString(), XContentType.JSON).get(); docMapper = index.mapperService().documentMapper("person"); @@ -131,7 +133,8 @@ public void testSimpleWithXContentTraverse() throws Exception { client().admin().indices().preparePutMapping("test").setType("person").setSource(mapping, XContentType.JSON).get(); DocumentMapper docMapper = index.mapperService().documentMapper("person"); byte[] json = copyToBytesFromClasspath("/org/elasticsearch/index/mapper/dynamictemplate/simple/test-data.json"); - ParsedDocument parsedDoc = docMapper.parse("test", "person", "1", new BytesArray(json)); + ParsedDocument parsedDoc = docMapper.parse(SourceToParse.source("test", "person", "1", new BytesArray(json), + XContentType.JSON)); client().admin().indices().preparePutMapping("test").setType("person") .setSource(parsedDoc.dynamicMappingsUpdate().toString(), XContentType.JSON).get(); docMapper = index.mapperService().documentMapper("person"); diff --git a/core/src/test/java/org/elasticsearch/index/mapper/ExternalFieldMapperTests.java b/core/src/test/java/org/elasticsearch/index/mapper/ExternalFieldMapperTests.java index 46ffc4e5864cc..8088c8576fe5a 100644 --- a/core/src/test/java/org/elasticsearch/index/mapper/ExternalFieldMapperTests.java +++ b/core/src/test/java/org/elasticsearch/index/mapper/ExternalFieldMapperTests.java @@ -27,6 +27,7 @@ import org.elasticsearch.common.geo.GeoPoint; import org.elasticsearch.common.settings.Settings; import org.elasticsearch.common.xcontent.XContentFactory; +import org.elasticsearch.common.xcontent.XContentType; import org.elasticsearch.index.IndexService; import org.elasticsearch.index.query.QueryShardContext; import org.elasticsearch.indices.mapper.MapperRegistry; @@ -76,11 +77,12 @@ public void testExternalValues() throws Exception { .endObject().endObject().string() )); - ParsedDocument doc = documentMapper.parse("test", "type", "1", XContentFactory.jsonBuilder() + ParsedDocument doc = documentMapper.parse(SourceToParse.source("test", "type", "1", XContentFactory.jsonBuilder() .startObject() .field("field", "1234") .endObject() - .bytes()); + .bytes(), + XContentType.JSON)); assertThat(doc.rootDoc().getField("field.bool"), notNullValue()); assertThat(doc.rootDoc().getField("field.bool").stringValue(), is("T")); @@ -136,11 +138,12 @@ public void testExternalValuesWithMultifield() throws Exception { .endObject().endObject().endObject() .string())); - ParsedDocument doc = documentMapper.parse("test", "type", "1", XContentFactory.jsonBuilder() + ParsedDocument doc = documentMapper.parse(SourceToParse.source("test", "type", "1", XContentFactory.jsonBuilder() .startObject() .field("field", "1234") .endObject() - .bytes()); + .bytes(), + XContentType.JSON)); assertThat(doc.rootDoc().getField("field.bool"), notNullValue()); assertThat(doc.rootDoc().getField("field.bool").stringValue(), is("T")); @@ -204,11 +207,12 @@ public void testExternalValuesWithMultifieldTwoLevels() throws Exception { .endObject().endObject().endObject() .string())); - ParsedDocument doc = documentMapper.parse("test", "type", "1", XContentFactory.jsonBuilder() + ParsedDocument doc = documentMapper.parse(SourceToParse.source("test", "type", "1", XContentFactory.jsonBuilder() .startObject() .field("field", "1234") .endObject() - .bytes()); + .bytes(), + XContentType.JSON)); assertThat(doc.rootDoc().getField("field.bool"), notNullValue()); assertThat(doc.rootDoc().getField("field.bool").stringValue(), is("T")); diff --git a/core/src/test/java/org/elasticsearch/index/mapper/FieldNamesFieldMapperTests.java b/core/src/test/java/org/elasticsearch/index/mapper/FieldNamesFieldMapperTests.java index dc2de3e3dd9ab..aa66526bf42f7 100644 --- a/core/src/test/java/org/elasticsearch/index/mapper/FieldNamesFieldMapperTests.java +++ b/core/src/test/java/org/elasticsearch/index/mapper/FieldNamesFieldMapperTests.java @@ -26,6 +26,7 @@ import org.elasticsearch.common.compress.CompressedXContent; import org.elasticsearch.common.settings.Settings; import org.elasticsearch.common.xcontent.XContentFactory; +import org.elasticsearch.common.xcontent.XContentType; import org.elasticsearch.index.IndexService; import org.elasticsearch.index.query.QueryShardContext; import org.elasticsearch.indices.IndicesModule; @@ -88,14 +89,15 @@ public void testInjectIntoDocDuringParsing() throws Exception { String mapping = XContentFactory.jsonBuilder().startObject().startObject("type").endObject().endObject().string(); DocumentMapper defaultMapper = createIndex("test").mapperService().documentMapperParser().parse("type", new CompressedXContent(mapping)); - ParsedDocument doc = defaultMapper.parse("test", "type", "1", XContentFactory.jsonBuilder() + ParsedDocument doc = defaultMapper.parse(SourceToParse.source("test", "type", "1", XContentFactory.jsonBuilder() .startObject() .field("a", "100") .startObject("b") .field("c", 42) .endObject() .endObject() - .bytes()); + .bytes(), + XContentType.JSON)); assertFieldNames(set("a", "a.keyword", "b", "b.c", "_uid", "_type", "_version", "_seq_no", "_primary_term", "_source"), doc); } @@ -108,11 +110,12 @@ public void testExplicitEnabled() throws Exception { FieldNamesFieldMapper fieldNamesMapper = docMapper.metadataMapper(FieldNamesFieldMapper.class); assertTrue(fieldNamesMapper.fieldType().isEnabled()); - ParsedDocument doc = docMapper.parse("test", "type", "1", XContentFactory.jsonBuilder() + ParsedDocument doc = docMapper.parse(SourceToParse.source("test", "type", "1", XContentFactory.jsonBuilder() .startObject() .field("field", "value") .endObject() - .bytes()); + .bytes(), + XContentType.JSON)); assertFieldNames(set("field", "field.keyword", "_uid", "_type", "_version", "_seq_no", "_primary_term", "_source"), doc); } @@ -125,11 +128,12 @@ public void testDisabled() throws Exception { FieldNamesFieldMapper fieldNamesMapper = docMapper.metadataMapper(FieldNamesFieldMapper.class); assertFalse(fieldNamesMapper.fieldType().isEnabled()); - ParsedDocument doc = docMapper.parse("test", "type", "1", XContentFactory.jsonBuilder() + ParsedDocument doc = docMapper.parse(SourceToParse.source("test", "type", "1", XContentFactory.jsonBuilder() .startObject() .field("field", "value") .endObject() - .bytes()); + .bytes(), + XContentType.JSON)); assertNull(doc.rootDoc().get("_field_names")); } @@ -244,7 +248,8 @@ public void testSeesFieldsFromPlugins() throws IOException { queryShardContext); String mapping = XContentFactory.jsonBuilder().startObject().startObject("type").endObject().endObject().string(); DocumentMapper mapper = parser.parse("type", new CompressedXContent(mapping)); - ParsedDocument parsedDocument = mapper.parse("index", "type", "id", new BytesArray("{}")); + ParsedDocument parsedDocument = mapper.parse(SourceToParse.source("index", "type", "id", new BytesArray("{}"), + XContentType.JSON)); IndexableField[] fields = parsedDocument.rootDoc().getFields(FieldNamesFieldMapper.NAME); boolean found = false; for (IndexableField f : fields) { diff --git a/core/src/test/java/org/elasticsearch/index/mapper/GenericStoreDynamicTemplateTests.java b/core/src/test/java/org/elasticsearch/index/mapper/GenericStoreDynamicTemplateTests.java index 8afe07a6e6848..57a6173bc657d 100644 --- a/core/src/test/java/org/elasticsearch/index/mapper/GenericStoreDynamicTemplateTests.java +++ b/core/src/test/java/org/elasticsearch/index/mapper/GenericStoreDynamicTemplateTests.java @@ -40,7 +40,8 @@ public void testSimple() throws Exception { client().admin().indices().preparePutMapping("test").setType("person").setSource(mapping, XContentType.JSON).get(); DocumentMapper docMapper = index.mapperService().documentMapper("person"); byte[] json = copyToBytesFromClasspath("/org/elasticsearch/index/mapper/dynamictemplate/genericstore/test-data.json"); - ParsedDocument parsedDoc = docMapper.parse("test", "person", "1", new BytesArray(json)); + ParsedDocument parsedDoc = docMapper.parse(SourceToParse.source("test", "person", "1", new BytesArray(json), + XContentType.JSON)); client().admin().indices().preparePutMapping("test").setType("person") .setSource(parsedDoc.dynamicMappingsUpdate().toString(), XContentType.JSON).get(); docMapper = index.mapperService().documentMapper("person"); diff --git a/core/src/test/java/org/elasticsearch/index/mapper/GeoPointFieldMapperTests.java b/core/src/test/java/org/elasticsearch/index/mapper/GeoPointFieldMapperTests.java index 1b4602ef7f22f..f8775073e2169 100644 --- a/core/src/test/java/org/elasticsearch/index/mapper/GeoPointFieldMapperTests.java +++ b/core/src/test/java/org/elasticsearch/index/mapper/GeoPointFieldMapperTests.java @@ -55,11 +55,12 @@ public void testGeoHashValue() throws Exception { String mapping = xContentBuilder.endObject().endObject().endObject().endObject().string(); DocumentMapper defaultMapper = createIndex("test").mapperService().documentMapperParser().parse("type", new CompressedXContent(mapping)); - ParsedDocument doc = defaultMapper.parse("test", "type", "1", XContentFactory.jsonBuilder() + ParsedDocument doc = defaultMapper.parse(SourceToParse.source("test", "type", "1", XContentFactory.jsonBuilder() .startObject() .field("point", stringEncode(1.3, 1.2)) .endObject() - .bytes()); + .bytes(), + XContentType.JSON)); assertThat(doc.rootDoc().getField("point"), notNullValue()); } @@ -70,11 +71,12 @@ public void testLatLonValuesStored() throws Exception { String mapping = xContentBuilder.field("store", true).endObject().endObject().endObject().endObject().string(); DocumentMapper defaultMapper = createIndex("test").mapperService().documentMapperParser().parse("type", new CompressedXContent(mapping)); - ParsedDocument doc = defaultMapper.parse("test", "type", "1", XContentFactory.jsonBuilder() + ParsedDocument doc = defaultMapper.parse(SourceToParse.source("test", "type", "1", XContentFactory.jsonBuilder() .startObject() .startObject("point").field("lat", 1.2).field("lon", 1.3).endObject() .endObject() - .bytes()); + .bytes(), + XContentType.JSON)); assertThat(doc.rootDoc().getField("point"), notNullValue()); } @@ -85,14 +87,15 @@ public void testArrayLatLonValues() throws Exception { String mapping = xContentBuilder.field("store", true).endObject().endObject().endObject().endObject().string(); DocumentMapper defaultMapper = createIndex("test").mapperService().documentMapperParser().parse("type", new CompressedXContent(mapping)); - ParsedDocument doc = defaultMapper.parse("test", "type", "1", XContentFactory.jsonBuilder() + ParsedDocument doc = defaultMapper.parse(SourceToParse.source("test", "type", "1", XContentFactory.jsonBuilder() .startObject() .startArray("point") .startObject().field("lat", 1.2).field("lon", 1.3).endObject() .startObject().field("lat", 1.4).field("lon", 1.5).endObject() .endArray() .endObject() - .bytes()); + .bytes(), + XContentType.JSON)); // doc values are enabled by default, but in this test we disable them; we should only have 2 points assertThat(doc.rootDoc().getFields("point"), notNullValue()); @@ -106,11 +109,12 @@ public void testLatLonInOneValue() throws Exception { DocumentMapper defaultMapper = createIndex("test").mapperService().documentMapperParser().parse("type", new CompressedXContent(mapping)); - ParsedDocument doc = defaultMapper.parse("test", "type", "1", XContentFactory.jsonBuilder() + ParsedDocument doc = defaultMapper.parse(SourceToParse.source("test", "type", "1", XContentFactory.jsonBuilder() .startObject() .field("point", "1.2,1.3") .endObject() - .bytes()); + .bytes(), + XContentType.JSON)); assertThat(doc.rootDoc().getField("point"), notNullValue()); } @@ -122,11 +126,12 @@ public void testLatLonInOneValueStored() throws Exception { DocumentMapper defaultMapper = createIndex("test").mapperService().documentMapperParser().parse("type", new CompressedXContent(mapping)); - ParsedDocument doc = defaultMapper.parse("test", "type", "1", XContentFactory.jsonBuilder() + ParsedDocument doc = defaultMapper.parse(SourceToParse.source("test", "type", "1", XContentFactory.jsonBuilder() .startObject() .field("point", "1.2,1.3") .endObject() - .bytes()); + .bytes(), + XContentType.JSON)); assertThat(doc.rootDoc().getField("point"), notNullValue()); } @@ -137,14 +142,15 @@ public void testLatLonInOneValueArray() throws Exception { DocumentMapper defaultMapper = createIndex("test").mapperService().documentMapperParser().parse("type", new CompressedXContent(mapping)); - ParsedDocument doc = defaultMapper.parse("test", "type", "1", XContentFactory.jsonBuilder() + ParsedDocument doc = defaultMapper.parse(SourceToParse.source("test", "type", "1", XContentFactory.jsonBuilder() .startObject() .startArray("point") .value("1.2,1.3") .value("1.4,1.5") .endArray() .endObject() - .bytes()); + .bytes(), + XContentType.JSON)); // doc values are enabled by default, but in this test we disable them; we should only have 2 points assertThat(doc.rootDoc().getFields("point"), notNullValue()); @@ -157,11 +163,12 @@ public void testLonLatArray() throws Exception { String mapping = xContentBuilder.endObject().endObject().endObject().endObject().string(); DocumentMapper defaultMapper = createIndex("test").mapperService().documentMapperParser().parse("type", new CompressedXContent(mapping)); - ParsedDocument doc = defaultMapper.parse("test", "type", "1", XContentFactory.jsonBuilder() + ParsedDocument doc = defaultMapper.parse(SourceToParse.source("test", "type", "1", XContentFactory.jsonBuilder() .startObject() .startArray("point").value(1.3).value(1.2).endArray() .endObject() - .bytes()); + .bytes(), + XContentType.JSON)); assertThat(doc.rootDoc().getField("point"), notNullValue()); } @@ -173,11 +180,12 @@ public void testLonLatArrayDynamic() throws Exception { String mapping = xContentBuilder.endObject().endObject().endObject().endArray().endObject().endObject().string(); DocumentMapper defaultMapper = createIndex("test").mapperService().documentMapperParser().parse("type", new CompressedXContent(mapping)); - ParsedDocument doc = defaultMapper.parse("test", "type", "1", XContentFactory.jsonBuilder() + ParsedDocument doc = defaultMapper.parse(SourceToParse.source("test", "type", "1", XContentFactory.jsonBuilder() .startObject() .startArray("point").value(1.3).value(1.2).endArray() .endObject() - .bytes()); + .bytes(), + XContentType.JSON)); assertThat(doc.rootDoc().getField("point"), notNullValue()); } @@ -188,11 +196,12 @@ public void testLonLatArrayStored() throws Exception { String mapping = xContentBuilder.field("store", true).endObject().endObject().endObject().endObject().string(); DocumentMapper defaultMapper = createIndex("test").mapperService().documentMapperParser().parse("type", new CompressedXContent(mapping)); - ParsedDocument doc = defaultMapper.parse("test", "type", "1", XContentFactory.jsonBuilder() + ParsedDocument doc = defaultMapper.parse(SourceToParse.source("test", "type", "1", XContentFactory.jsonBuilder() .startObject() .startArray("point").value(1.3).value(1.2).endArray() .endObject() - .bytes()); + .bytes(), + XContentType.JSON)); assertThat(doc.rootDoc().getField("point"), notNullValue()); assertThat(doc.rootDoc().getFields("point").length, equalTo(3)); @@ -205,14 +214,15 @@ public void testLonLatArrayArrayStored() throws Exception { .endObject().endObject().string(); DocumentMapper defaultMapper = createIndex("test").mapperService().documentMapperParser().parse("type", new CompressedXContent(mapping)); - ParsedDocument doc = defaultMapper.parse("test", "type", "1", XContentFactory.jsonBuilder() + ParsedDocument doc = defaultMapper.parse(SourceToParse.source("test", "type", "1", XContentFactory.jsonBuilder() .startObject() .startArray("point") .startArray().value(1.3).value(1.2).endArray() .startArray().value(1.5).value(1.4).endArray() .endArray() .endObject() - .bytes()); + .bytes(), + XContentType.JSON)); assertThat(doc.rootDoc().getFields("point"), notNullValue()); assertThat(doc.rootDoc().getFields("point").length, CoreMatchers.equalTo(4)); diff --git a/core/src/test/java/org/elasticsearch/index/mapper/IdFieldMapperTests.java b/core/src/test/java/org/elasticsearch/index/mapper/IdFieldMapperTests.java index b7ad6a7e4c3de..3c12d18b12861 100644 --- a/core/src/test/java/org/elasticsearch/index/mapper/IdFieldMapperTests.java +++ b/core/src/test/java/org/elasticsearch/index/mapper/IdFieldMapperTests.java @@ -40,10 +40,11 @@ public void testId() throws Exception { .endObject().endObject().string(); DocumentMapper docMapper = createIndex("test").mapperService().documentMapperParser().parse("type", new CompressedXContent(mapping)); - ParsedDocument doc = docMapper.parse("test", "type", "1", XContentFactory.jsonBuilder() + ParsedDocument doc = docMapper.parse(SourceToParse.source("test", "type", "1", XContentFactory.jsonBuilder() .startObject() .endObject() - .bytes()); + .bytes(), + XContentType.JSON)); assertThat(doc.rootDoc().get(UidFieldMapper.NAME), notNullValue()); assertThat(doc.rootDoc().get(IdFieldMapper.NAME), nullValue()); diff --git a/core/src/test/java/org/elasticsearch/index/mapper/IndexFieldMapperTests.java b/core/src/test/java/org/elasticsearch/index/mapper/IndexFieldMapperTests.java index f0a0b818f9d3f..910fa0f74faba 100644 --- a/core/src/test/java/org/elasticsearch/index/mapper/IndexFieldMapperTests.java +++ b/core/src/test/java/org/elasticsearch/index/mapper/IndexFieldMapperTests.java @@ -24,6 +24,7 @@ import org.elasticsearch.common.compress.CompressedXContent; import org.elasticsearch.common.settings.Settings; import org.elasticsearch.common.xcontent.XContentFactory; +import org.elasticsearch.common.xcontent.XContentType; import org.elasticsearch.index.mapper.DocumentMapper; import org.elasticsearch.index.mapper.DocumentMapperParser; import org.elasticsearch.index.mapper.MapperParsingException; @@ -50,11 +51,12 @@ public void testDefaultDisabledIndexMapper() throws Exception { .endObject().endObject().string(); DocumentMapper docMapper = createIndex("test").mapperService().documentMapperParser().parse("type", new CompressedXContent(mapping)); - ParsedDocument doc = docMapper.parse("test", "type", "1", XContentFactory.jsonBuilder() + ParsedDocument doc = docMapper.parse(SourceToParse.source("test", "type", "1", XContentFactory.jsonBuilder() .startObject() .field("field", "value") .endObject() - .bytes()); + .bytes(), + XContentType.JSON)); assertThat(doc.rootDoc().get("_index"), nullValue()); assertThat(doc.rootDoc().get("field"), equalTo("value")); diff --git a/core/src/test/java/org/elasticsearch/index/mapper/IpFieldMapperTests.java b/core/src/test/java/org/elasticsearch/index/mapper/IpFieldMapperTests.java index 9878267d3536c..88db0b1b274fd 100644 --- a/core/src/test/java/org/elasticsearch/index/mapper/IpFieldMapperTests.java +++ b/core/src/test/java/org/elasticsearch/index/mapper/IpFieldMapperTests.java @@ -28,6 +28,7 @@ import org.elasticsearch.common.xcontent.ToXContent; import org.elasticsearch.common.xcontent.XContentBuilder; import org.elasticsearch.common.xcontent.XContentFactory; +import org.elasticsearch.common.xcontent.XContentType; import org.elasticsearch.index.IndexService; import org.elasticsearch.plugins.Plugin; import org.elasticsearch.test.ESSingleNodeTestCase; @@ -65,11 +66,12 @@ public void testDefaults() throws Exception { assertEquals(mapping, mapper.mappingSource().toString()); - ParsedDocument doc = mapper.parse("test", "type", "1", XContentFactory.jsonBuilder() + ParsedDocument doc = mapper.parse(SourceToParse.source("test", "type", "1", XContentFactory.jsonBuilder() .startObject() .field("field", "::1") .endObject() - .bytes()); + .bytes(), + XContentType.JSON)); IndexableField[] fields = doc.rootDoc().getFields("field"); assertEquals(2, fields.length); @@ -93,11 +95,12 @@ public void testNotIndexed() throws Exception { assertEquals(mapping, mapper.mappingSource().toString()); - ParsedDocument doc = mapper.parse("test", "type", "1", XContentFactory.jsonBuilder() + ParsedDocument doc = mapper.parse(SourceToParse.source("test", "type", "1", XContentFactory.jsonBuilder() .startObject() .field("field", "::1") .endObject() - .bytes()); + .bytes(), + XContentType.JSON)); IndexableField[] fields = doc.rootDoc().getFields("field"); assertEquals(1, fields.length); @@ -114,11 +117,12 @@ public void testNoDocValues() throws Exception { assertEquals(mapping, mapper.mappingSource().toString()); - ParsedDocument doc = mapper.parse("test", "type", "1", XContentFactory.jsonBuilder() + ParsedDocument doc = mapper.parse(SourceToParse.source("test", "type", "1", XContentFactory.jsonBuilder() .startObject() .field("field", "::1") .endObject() - .bytes()); + .bytes(), + XContentType.JSON)); IndexableField[] fields = doc.rootDoc().getFields("field"); assertEquals(1, fields.length); @@ -136,11 +140,12 @@ public void testStore() throws Exception { assertEquals(mapping, mapper.mappingSource().toString()); - ParsedDocument doc = mapper.parse("test", "type", "1", XContentFactory.jsonBuilder() + ParsedDocument doc = mapper.parse(SourceToParse.source("test", "type", "1", XContentFactory.jsonBuilder() .startObject() .field("field", "::1") .endObject() - .bytes()); + .bytes(), + XContentType.JSON)); IndexableField[] fields = doc.rootDoc().getFields("field"); assertEquals(3, fields.length); @@ -163,11 +168,12 @@ public void testIgnoreMalformed() throws Exception { assertEquals(mapping, mapper.mappingSource().toString()); - ThrowingRunnable runnable = () -> mapper.parse("test", "type", "1", XContentFactory.jsonBuilder() + ThrowingRunnable runnable = () -> mapper.parse(SourceToParse.source("test", "type", "1", XContentFactory.jsonBuilder() .startObject() .field("field", ":1") .endObject() - .bytes()); + .bytes(), + XContentType.JSON)); MapperParsingException e = expectThrows(MapperParsingException.class, runnable); assertThat(e.getCause().getMessage(), containsString("':1' is not an IP string literal")); @@ -177,11 +183,12 @@ public void testIgnoreMalformed() throws Exception { DocumentMapper mapper2 = parser.parse("type", new CompressedXContent(mapping)); - ParsedDocument doc = mapper2.parse("test", "type", "1", XContentFactory.jsonBuilder() + ParsedDocument doc = mapper2.parse(SourceToParse.source("test", "type", "1", XContentFactory.jsonBuilder() .startObject() .field("field", ":1") .endObject() - .bytes()); + .bytes(), + XContentType.JSON)); IndexableField[] fields = doc.rootDoc().getFields("field"); assertEquals(0, fields.length); @@ -200,11 +207,12 @@ public void testNullValue() throws IOException { DocumentMapper mapper = parser.parse("type", new CompressedXContent(mapping)); assertEquals(mapping, mapper.mappingSource().toString()); - ParsedDocument doc = mapper.parse("test", "type", "1", XContentFactory.jsonBuilder() + ParsedDocument doc = mapper.parse(SourceToParse.source("test", "type", "1", XContentFactory.jsonBuilder() .startObject() .nullField("field") .endObject() - .bytes()); + .bytes(), + XContentType.JSON)); assertArrayEquals(new IndexableField[0], doc.rootDoc().getFields("field")); mapping = XContentFactory.jsonBuilder().startObject() @@ -220,11 +228,12 @@ public void testNullValue() throws IOException { mapper = parser.parse("type", new CompressedXContent(mapping)); assertEquals(mapping, mapper.mappingSource().toString()); - doc = mapper.parse("test", "type", "1", XContentFactory.jsonBuilder() + doc = mapper.parse(SourceToParse.source("test", "type", "1", XContentFactory.jsonBuilder() .startObject() .nullField("field") .endObject() - .bytes()); + .bytes(), + XContentType.JSON)); IndexableField[] fields = doc.rootDoc().getFields("field"); assertEquals(2, fields.length); IndexableField pointField = fields[0]; diff --git a/core/src/test/java/org/elasticsearch/index/mapper/JavaMultiFieldMergeTests.java b/core/src/test/java/org/elasticsearch/index/mapper/JavaMultiFieldMergeTests.java index f43bf73a3d7b8..c17df90b5a21d 100644 --- a/core/src/test/java/org/elasticsearch/index/mapper/JavaMultiFieldMergeTests.java +++ b/core/src/test/java/org/elasticsearch/index/mapper/JavaMultiFieldMergeTests.java @@ -24,6 +24,7 @@ import org.elasticsearch.common.bytes.BytesReference; import org.elasticsearch.common.compress.CompressedXContent; import org.elasticsearch.common.xcontent.XContentFactory; +import org.elasticsearch.common.xcontent.XContentType; import org.elasticsearch.index.mapper.DocumentMapper; import org.elasticsearch.index.mapper.MapperService; import org.elasticsearch.index.mapper.ParseContext.Document; @@ -45,7 +46,7 @@ public void testMergeMultiField() throws Exception { assertThat(docMapper.mappers().getMapper("name.indexed"), nullValue()); BytesReference json = XContentFactory.jsonBuilder().startObject().field("name", "some name").endObject().bytes(); - Document doc = docMapper.parse("test", "person", "1", json).rootDoc(); + Document doc = docMapper.parse(SourceToParse.source("test", "person", "1", json, XContentType.JSON)).rootDoc(); IndexableField f = doc.getField("name"); assertThat(f, notNullValue()); f = doc.getField("name.indexed"); @@ -62,7 +63,7 @@ public void testMergeMultiField() throws Exception { assertThat(docMapper.mappers().getMapper("name.not_indexed2"), nullValue()); assertThat(docMapper.mappers().getMapper("name.not_indexed3"), nullValue()); - doc = docMapper.parse("test", "person", "1", json).rootDoc(); + doc = docMapper.parse(SourceToParse.source("test", "person", "1", json, XContentType.JSON)).rootDoc(); f = doc.getField("name"); assertThat(f, notNullValue()); f = doc.getField("name.indexed"); @@ -101,7 +102,7 @@ public void testUpgradeFromMultiFieldTypeToMultiFields() throws Exception { assertThat(docMapper.mappers().getMapper("name.indexed"), nullValue()); BytesReference json = XContentFactory.jsonBuilder().startObject().field("name", "some name").endObject().bytes(); - Document doc = docMapper.parse("test", "person", "1", json).rootDoc(); + Document doc = docMapper.parse(SourceToParse.source("test", "person", "1", json, XContentType.JSON)).rootDoc(); IndexableField f = doc.getField("name"); assertThat(f, notNullValue()); f = doc.getField("name.indexed"); @@ -119,7 +120,7 @@ public void testUpgradeFromMultiFieldTypeToMultiFields() throws Exception { assertThat(docMapper.mappers().getMapper("name.not_indexed2"), nullValue()); assertThat(docMapper.mappers().getMapper("name.not_indexed3"), nullValue()); - doc = docMapper.parse("test", "person", "1", json).rootDoc(); + doc = docMapper.parse(SourceToParse.source("test", "person", "1", json, XContentType.JSON)).rootDoc(); f = doc.getField("name"); assertThat(f, notNullValue()); f = doc.getField("name.indexed"); diff --git a/core/src/test/java/org/elasticsearch/index/mapper/KeywordFieldMapperTests.java b/core/src/test/java/org/elasticsearch/index/mapper/KeywordFieldMapperTests.java index bffe58db3a626..2da44d57f00aa 100644 --- a/core/src/test/java/org/elasticsearch/index/mapper/KeywordFieldMapperTests.java +++ b/core/src/test/java/org/elasticsearch/index/mapper/KeywordFieldMapperTests.java @@ -27,6 +27,7 @@ import org.elasticsearch.common.compress.CompressedXContent; import org.elasticsearch.common.settings.Settings; import org.elasticsearch.common.xcontent.XContentFactory; +import org.elasticsearch.common.xcontent.XContentType; import org.elasticsearch.index.IndexService; import org.elasticsearch.index.mapper.MapperService.MergeReason; import org.elasticsearch.plugins.Plugin; @@ -70,11 +71,12 @@ public void testDefaults() throws Exception { assertEquals(mapping, mapper.mappingSource().toString()); - ParsedDocument doc = mapper.parse("test", "type", "1", XContentFactory.jsonBuilder() + ParsedDocument doc = mapper.parse(SourceToParse.source("test", "type", "1", XContentFactory.jsonBuilder() .startObject() .field("field", "1234") .endObject() - .bytes()); + .bytes(), + XContentType.JSON)); IndexableField[] fields = doc.rootDoc().getFields("field"); assertEquals(2, fields.length); @@ -106,20 +108,22 @@ public void testIgnoreAbove() throws IOException { assertEquals(mapping, mapper.mappingSource().toString()); - ParsedDocument doc = mapper.parse("test", "type", "1", XContentFactory.jsonBuilder() + ParsedDocument doc = mapper.parse(SourceToParse.source("test", "type", "1", XContentFactory.jsonBuilder() .startObject() .field("field", "elk") .endObject() - .bytes()); + .bytes(), + XContentType.JSON)); IndexableField[] fields = doc.rootDoc().getFields("field"); assertEquals(2, fields.length); - doc = mapper.parse("test", "type", "1", XContentFactory.jsonBuilder() + doc = mapper.parse(SourceToParse.source("test", "type", "1", XContentFactory.jsonBuilder() .startObject() .field("field", "elasticsearch") .endObject() - .bytes()); + .bytes(), + XContentType.JSON)); fields = doc.rootDoc().getFields("field"); assertEquals(0, fields.length); @@ -133,11 +137,12 @@ public void testNullValue() throws IOException { DocumentMapper mapper = parser.parse("type", new CompressedXContent(mapping)); assertEquals(mapping, mapper.mappingSource().toString()); - ParsedDocument doc = mapper.parse("test", "type", "1", XContentFactory.jsonBuilder() + ParsedDocument doc = mapper.parse(SourceToParse.source("test", "type", "1", XContentFactory.jsonBuilder() .startObject() .nullField("field") .endObject() - .bytes()); + .bytes(), + XContentType.JSON)); assertArrayEquals(new IndexableField[0], doc.rootDoc().getFields("field")); mapping = XContentFactory.jsonBuilder().startObject().startObject("type") @@ -148,19 +153,21 @@ public void testNullValue() throws IOException { assertEquals(mapping, mapper.mappingSource().toString()); - doc = mapper.parse("test", "type", "1", XContentFactory.jsonBuilder() + doc = mapper.parse(SourceToParse.source("test", "type", "1", XContentFactory.jsonBuilder() .startObject() .endObject() - .bytes()); + .bytes(), + XContentType.JSON)); IndexableField[] fields = doc.rootDoc().getFields("field"); assertEquals(0, fields.length); - doc = mapper.parse("test", "type", "1", XContentFactory.jsonBuilder() + doc = mapper.parse(SourceToParse.source("test", "type", "1", XContentFactory.jsonBuilder() .startObject() .nullField("field") .endObject() - .bytes()); + .bytes(), + XContentType.JSON)); fields = doc.rootDoc().getFields("field"); assertEquals(2, fields.length); @@ -176,11 +183,12 @@ public void testEnableStore() throws IOException { assertEquals(mapping, mapper.mappingSource().toString()); - ParsedDocument doc = mapper.parse("test", "type", "1", XContentFactory.jsonBuilder() + ParsedDocument doc = mapper.parse(SourceToParse.source("test", "type", "1", XContentFactory.jsonBuilder() .startObject() .field("field", "1234") .endObject() - .bytes()); + .bytes(), + XContentType.JSON)); IndexableField[] fields = doc.rootDoc().getFields("field"); assertEquals(2, fields.length); @@ -196,11 +204,12 @@ public void testDisableIndex() throws IOException { assertEquals(mapping, mapper.mappingSource().toString()); - ParsedDocument doc = mapper.parse("test", "type", "1", XContentFactory.jsonBuilder() + ParsedDocument doc = mapper.parse(SourceToParse.source("test", "type", "1", XContentFactory.jsonBuilder() .startObject() .field("field", "1234") .endObject() - .bytes()); + .bytes(), + XContentType.JSON)); IndexableField[] fields = doc.rootDoc().getFields("field"); assertEquals(1, fields.length); @@ -217,11 +226,12 @@ public void testDisableDocValues() throws IOException { assertEquals(mapping, mapper.mappingSource().toString()); - ParsedDocument doc = mapper.parse("test", "type", "1", XContentFactory.jsonBuilder() + ParsedDocument doc = mapper.parse(SourceToParse.source("test", "type", "1", XContentFactory.jsonBuilder() .startObject() .field("field", "1234") .endObject() - .bytes()); + .bytes(), + XContentType.JSON)); IndexableField[] fields = doc.rootDoc().getFields("field"); assertEquals(1, fields.length); @@ -238,11 +248,12 @@ public void testIndexOptions() throws IOException { assertEquals(mapping, mapper.mappingSource().toString()); - ParsedDocument doc = mapper.parse("test", "type", "1", XContentFactory.jsonBuilder() + ParsedDocument doc = mapper.parse(SourceToParse.source("test", "type", "1", XContentFactory.jsonBuilder() .startObject() .field("field", "1234") .endObject() - .bytes()); + .bytes(), + XContentType.JSON)); IndexableField[] fields = doc.rootDoc().getFields("field"); assertEquals(2, fields.length); @@ -278,11 +289,12 @@ public void testEnableNorms() throws IOException { assertEquals(mapping, mapper.mappingSource().toString()); - ParsedDocument doc = mapper.parse("test", "type", "1", XContentFactory.jsonBuilder() + ParsedDocument doc = mapper.parse(SourceToParse.source("test", "type", "1", XContentFactory.jsonBuilder() .startObject() .field("field", "1234") .endObject() - .bytes()); + .bytes(), + XContentType.JSON)); IndexableField[] fields = doc.rootDoc().getFields("field"); assertEquals(2, fields.length); @@ -299,11 +311,12 @@ public void testNormalizer() throws IOException { assertEquals(mapping, mapper.mappingSource().toString()); - ParsedDocument doc = mapper.parse("test", "type", "1", XContentFactory.jsonBuilder() + ParsedDocument doc = mapper.parse(SourceToParse.source("test", "type", "1", XContentFactory.jsonBuilder() .startObject() .field("field", "AbC") .endObject() - .bytes()); + .bytes(), + XContentType.JSON)); IndexableField[] fields = doc.rootDoc().getFields("field"); assertEquals(2, fields.length); diff --git a/core/src/test/java/org/elasticsearch/index/mapper/MapperServiceTests.java b/core/src/test/java/org/elasticsearch/index/mapper/MapperServiceTests.java index 5c6ffb70c73e4..0a6a8f8d46954 100644 --- a/core/src/test/java/org/elasticsearch/index/mapper/MapperServiceTests.java +++ b/core/src/test/java/org/elasticsearch/index/mapper/MapperServiceTests.java @@ -38,6 +38,7 @@ import java.util.HashMap; import java.util.HashSet; import java.util.Map; +import java.util.Set; import java.util.concurrent.ExecutionException; import java.util.function.Function; @@ -189,6 +190,22 @@ public void testMergeWithMap() throws Throwable { assertThat(e.getMessage(), startsWith("Failed to parse mapping [type1]: ")); } + public void testMergeParentTypesSame() { + // Verifies that a merge (absent a DocumentMapper change) + // doesn't change the parentTypes reference. + // The collection was being rewrapped with each merge + // in v5.2 resulting in eventual StackOverflowErrors. + // https://github.com/elastic/elasticsearch/issues/23604 + + IndexService indexService1 = createIndex("index1"); + MapperService mapperService = indexService1.mapperService(); + Set parentTypes = mapperService.getParentTypes(); + + Map> mappings = new HashMap<>(); + mapperService.merge(mappings, MergeReason.MAPPING_UPDATE, false); + assertSame(parentTypes, mapperService.getParentTypes()); + } + public void testOtherDocumentMappersOnlyUpdatedWhenChangingFieldType() throws IOException { IndexService indexService = createIndex("test"); diff --git a/core/src/test/java/org/elasticsearch/index/mapper/MultiFieldTests.java b/core/src/test/java/org/elasticsearch/index/mapper/MultiFieldTests.java index 8f17b3e0e0d64..e66e0532737c4 100644 --- a/core/src/test/java/org/elasticsearch/index/mapper/MultiFieldTests.java +++ b/core/src/test/java/org/elasticsearch/index/mapper/MultiFieldTests.java @@ -27,6 +27,7 @@ import org.elasticsearch.common.compress.CompressedXContent; import org.elasticsearch.common.xcontent.XContentBuilder; import org.elasticsearch.common.xcontent.XContentHelper; +import org.elasticsearch.common.xcontent.XContentType; import org.elasticsearch.common.xcontent.support.XContentMapValues; import org.elasticsearch.index.IndexService; import org.elasticsearch.index.mapper.DateFieldMapper; @@ -62,7 +63,7 @@ public void testMultiFieldMultiFields() throws Exception { private void testMultiField(String mapping) throws Exception { DocumentMapper docMapper = createIndex("test").mapperService().documentMapperParser().parse("person", new CompressedXContent(mapping)); BytesReference json = new BytesArray(copyToBytesFromClasspath("/org/elasticsearch/index/mapper/multifield/test-data.json")); - Document doc = docMapper.parse("test", "person", "1", json).rootDoc(); + Document doc = docMapper.parse(SourceToParse.source("test", "person", "1", json, XContentType.JSON)).rootDoc(); IndexableField f = doc.getField("name"); assertThat(f.name(), equalTo("name")); @@ -146,7 +147,7 @@ public void testBuildThenParse() throws Exception { BytesReference json = new BytesArray(copyToBytesFromClasspath("/org/elasticsearch/index/mapper/multifield/test-data.json")); - Document doc = docMapper.parse("test", "person", "1", json).rootDoc(); + Document doc = docMapper.parse(SourceToParse.source("test", "person", "1", json, XContentType.JSON)).rootDoc(); IndexableField f = doc.getField("name"); assertThat(f.name(), equalTo("name")); diff --git a/core/src/test/java/org/elasticsearch/index/mapper/NestedObjectMapperTests.java b/core/src/test/java/org/elasticsearch/index/mapper/NestedObjectMapperTests.java index ae306009f2558..91cf8fdde851b 100644 --- a/core/src/test/java/org/elasticsearch/index/mapper/NestedObjectMapperTests.java +++ b/core/src/test/java/org/elasticsearch/index/mapper/NestedObjectMapperTests.java @@ -22,6 +22,7 @@ import org.elasticsearch.common.compress.CompressedXContent; import org.elasticsearch.common.settings.Settings; import org.elasticsearch.common.xcontent.XContentFactory; +import org.elasticsearch.common.xcontent.XContentType; import org.elasticsearch.index.mapper.MapperService.MergeReason; import org.elasticsearch.index.mapper.ObjectMapper.Dynamic; import org.elasticsearch.test.ESSingleNodeTestCase; @@ -42,21 +43,23 @@ public void testEmptyNested() throws Exception { DocumentMapper docMapper = createIndex("test").mapperService().documentMapperParser().parse("type", new CompressedXContent(mapping)); - ParsedDocument doc = docMapper.parse("test", "type", "1", XContentFactory.jsonBuilder() + ParsedDocument doc = docMapper.parse(SourceToParse.source("test", "type", "1", XContentFactory.jsonBuilder() .startObject() .field("field", "value") .nullField("nested1") .endObject() - .bytes()); + .bytes(), + XContentType.JSON)); assertThat(doc.docs().size(), equalTo(1)); - doc = docMapper.parse("test", "type", "1", XContentFactory.jsonBuilder() + doc = docMapper.parse(SourceToParse.source("test", "type", "1", XContentFactory.jsonBuilder() .startObject() .field("field", "value") .startArray("nested").endArray() .endObject() - .bytes()); + .bytes(), + XContentType.JSON)); assertThat(doc.docs().size(), equalTo(1)); } @@ -72,12 +75,13 @@ public void testSingleNested() throws Exception { ObjectMapper nested1Mapper = docMapper.objectMappers().get("nested1"); assertThat(nested1Mapper.nested().isNested(), equalTo(true)); - ParsedDocument doc = docMapper.parse("test", "type", "1", XContentFactory.jsonBuilder() + ParsedDocument doc = docMapper.parse(SourceToParse.source("test", "type", "1", XContentFactory.jsonBuilder() .startObject() .field("field", "value") .startObject("nested1").field("field1", "1").field("field2", "2").endObject() .endObject() - .bytes()); + .bytes(), + XContentType.JSON)); assertThat(doc.docs().size(), equalTo(2)); assertThat(doc.docs().get(0).get(TypeFieldMapper.NAME), equalTo(nested1Mapper.nestedTypePathAsString())); @@ -87,7 +91,7 @@ public void testSingleNested() throws Exception { assertThat(doc.docs().get(1).get("field"), equalTo("value")); - doc = docMapper.parse("test", "type", "1", XContentFactory.jsonBuilder() + doc = docMapper.parse(SourceToParse.source("test", "type", "1", XContentFactory.jsonBuilder() .startObject() .field("field", "value") .startArray("nested1") @@ -95,7 +99,8 @@ public void testSingleNested() throws Exception { .startObject().field("field1", "3").field("field2", "4").endObject() .endArray() .endObject() - .bytes()); + .bytes(), + XContentType.JSON)); assertThat(doc.docs().size(), equalTo(3)); assertThat(doc.docs().get(0).get(TypeFieldMapper.NAME), equalTo(nested1Mapper.nestedTypePathAsString())); @@ -127,7 +132,7 @@ public void testMultiNested() throws Exception { assertThat(nested2Mapper.nested().isIncludeInParent(), equalTo(false)); assertThat(nested2Mapper.nested().isIncludeInRoot(), equalTo(false)); - ParsedDocument doc = docMapper.parse("test", "type", "1", XContentFactory.jsonBuilder() + ParsedDocument doc = docMapper.parse(SourceToParse.source("test", "type", "1", XContentFactory.jsonBuilder() .startObject() .field("field", "value") .startArray("nested1") @@ -135,7 +140,8 @@ public void testMultiNested() throws Exception { .startObject().field("field1", "4").startArray("nested2").startObject().field("field2", "5").endObject().startObject().field("field2", "6").endObject().endArray().endObject() .endArray() .endObject() - .bytes()); + .bytes(), + XContentType.JSON)); assertThat(doc.docs().size(), equalTo(7)); assertThat(doc.docs().get(0).get("nested1.nested2.field2"), equalTo("6")); @@ -178,7 +184,7 @@ public void testMultiObjectAndNested1() throws Exception { assertThat(nested2Mapper.nested().isIncludeInParent(), equalTo(true)); assertThat(nested2Mapper.nested().isIncludeInRoot(), equalTo(false)); - ParsedDocument doc = docMapper.parse("test", "type", "1", XContentFactory.jsonBuilder() + ParsedDocument doc = docMapper.parse(SourceToParse.source("test", "type", "1", XContentFactory.jsonBuilder() .startObject() .field("field", "value") .startArray("nested1") @@ -186,7 +192,8 @@ public void testMultiObjectAndNested1() throws Exception { .startObject().field("field1", "4").startArray("nested2").startObject().field("field2", "5").endObject().startObject().field("field2", "6").endObject().endArray().endObject() .endArray() .endObject() - .bytes()); + .bytes(), + XContentType.JSON)); assertThat(doc.docs().size(), equalTo(7)); assertThat(doc.docs().get(0).get("nested1.nested2.field2"), equalTo("6")); @@ -229,7 +236,7 @@ public void testMultiObjectAndNested2() throws Exception { assertThat(nested2Mapper.nested().isIncludeInParent(), equalTo(true)); assertThat(nested2Mapper.nested().isIncludeInRoot(), equalTo(false)); - ParsedDocument doc = docMapper.parse("test", "type", "1", XContentFactory.jsonBuilder() + ParsedDocument doc = docMapper.parse(SourceToParse.source("test", "type", "1", XContentFactory.jsonBuilder() .startObject() .field("field", "value") .startArray("nested1") @@ -237,7 +244,8 @@ public void testMultiObjectAndNested2() throws Exception { .startObject().field("field1", "4").startArray("nested2").startObject().field("field2", "5").endObject().startObject().field("field2", "6").endObject().endArray().endObject() .endArray() .endObject() - .bytes()); + .bytes(), + XContentType.JSON)); assertThat(doc.docs().size(), equalTo(7)); assertThat(doc.docs().get(0).get("nested1.nested2.field2"), equalTo("6")); @@ -280,7 +288,7 @@ public void testMultiRootAndNested1() throws Exception { assertThat(nested2Mapper.nested().isIncludeInParent(), equalTo(false)); assertThat(nested2Mapper.nested().isIncludeInRoot(), equalTo(true)); - ParsedDocument doc = docMapper.parse("test", "type", "1", XContentFactory.jsonBuilder() + ParsedDocument doc = docMapper.parse(SourceToParse.source("test", "type", "1", XContentFactory.jsonBuilder() .startObject() .field("field", "value") .startArray("nested1") @@ -288,7 +296,8 @@ public void testMultiRootAndNested1() throws Exception { .startObject().field("field1", "4").startArray("nested2").startObject().field("field2", "5").endObject().startObject().field("field2", "6").endObject().endArray().endObject() .endArray() .endObject() - .bytes()); + .bytes(), + XContentType.JSON)); assertThat(doc.docs().size(), equalTo(7)); assertThat(doc.docs().get(0).get("nested1.nested2.field2"), equalTo("6")); @@ -326,7 +335,7 @@ public void testNestedArrayStrict() throws Exception { assertThat(nested1Mapper.nested().isNested(), equalTo(true)); assertThat(nested1Mapper.dynamic(), equalTo(Dynamic.STRICT)); - ParsedDocument doc = docMapper.parse("test", "type", "1", XContentFactory.jsonBuilder() + ParsedDocument doc = docMapper.parse(SourceToParse.source("test", "type", "1", XContentFactory.jsonBuilder() .startObject() .field("field", "value") .startArray("nested1") @@ -334,7 +343,8 @@ public void testNestedArrayStrict() throws Exception { .startObject().field("field1", "4").endObject() .endArray() .endObject() - .bytes()); + .bytes(), + XContentType.JSON)); assertThat(doc.docs().size(), equalTo(3)); assertThat(doc.docs().get(0).get("nested1.field1"), equalTo("4")); diff --git a/core/src/test/java/org/elasticsearch/index/mapper/NullValueObjectMappingTests.java b/core/src/test/java/org/elasticsearch/index/mapper/NullValueObjectMappingTests.java index d48fc3c0b6cd9..8a46f24998db9 100644 --- a/core/src/test/java/org/elasticsearch/index/mapper/NullValueObjectMappingTests.java +++ b/core/src/test/java/org/elasticsearch/index/mapper/NullValueObjectMappingTests.java @@ -21,6 +21,7 @@ import org.elasticsearch.common.compress.CompressedXContent; import org.elasticsearch.common.xcontent.XContentFactory; +import org.elasticsearch.common.xcontent.XContentType; import org.elasticsearch.index.mapper.DocumentMapper; import org.elasticsearch.index.mapper.ParsedDocument; import org.elasticsearch.test.ESSingleNodeTestCase; @@ -37,30 +38,33 @@ public void testNullValueObject() throws IOException { DocumentMapper defaultMapper = createIndex("test").mapperService().documentMapperParser().parse("type", new CompressedXContent(mapping)); - ParsedDocument doc = defaultMapper.parse("test", "type", "1", XContentFactory.jsonBuilder() + ParsedDocument doc = defaultMapper.parse(SourceToParse.source("test", "type", "1", XContentFactory.jsonBuilder() .startObject() .startObject("obj1").endObject() .field("value1", "test1") .endObject() - .bytes()); + .bytes(), + XContentType.JSON)); assertThat(doc.rootDoc().get("value1"), equalTo("test1")); - doc = defaultMapper.parse("test", "type", "1", XContentFactory.jsonBuilder() + doc = defaultMapper.parse(SourceToParse.source("test", "type", "1", XContentFactory.jsonBuilder() .startObject() .nullField("obj1") .field("value1", "test1") .endObject() - .bytes()); + .bytes(), + XContentType.JSON)); assertThat(doc.rootDoc().get("value1"), equalTo("test1")); - doc = defaultMapper.parse("test", "type", "1", XContentFactory.jsonBuilder() + doc = defaultMapper.parse(SourceToParse.source("test", "type", "1", XContentFactory.jsonBuilder() .startObject() .startObject("obj1").field("field", "value").endObject() .field("value1", "test1") .endObject() - .bytes()); + .bytes(), + XContentType.JSON)); assertThat(doc.rootDoc().get("obj1.field"), equalTo("value")); assertThat(doc.rootDoc().get("value1"), equalTo("test1")); diff --git a/core/src/test/java/org/elasticsearch/index/mapper/NumberFieldMapperTests.java b/core/src/test/java/org/elasticsearch/index/mapper/NumberFieldMapperTests.java index de3a15f865ce1..871d62d8bd6be 100644 --- a/core/src/test/java/org/elasticsearch/index/mapper/NumberFieldMapperTests.java +++ b/core/src/test/java/org/elasticsearch/index/mapper/NumberFieldMapperTests.java @@ -23,6 +23,7 @@ import org.apache.lucene.index.IndexableField; import org.elasticsearch.common.compress.CompressedXContent; import org.elasticsearch.common.xcontent.XContentFactory; +import org.elasticsearch.common.xcontent.XContentType; import java.io.IOException; import java.util.Arrays; @@ -47,11 +48,12 @@ public void doTestDefaults(String type) throws Exception { assertEquals(mapping, mapper.mappingSource().toString()); - ParsedDocument doc = mapper.parse("test", "type", "1", XContentFactory.jsonBuilder() + ParsedDocument doc = mapper.parse(SourceToParse.source("test", "type", "1", XContentFactory.jsonBuilder() .startObject() .field("field", 123) .endObject() - .bytes()); + .bytes(), + XContentType.JSON)); IndexableField[] fields = doc.rootDoc().getFields("field"); assertEquals(2, fields.length); @@ -74,11 +76,12 @@ public void doTestNotIndexed(String type) throws Exception { assertEquals(mapping, mapper.mappingSource().toString()); - ParsedDocument doc = mapper.parse("test", "type", "1", XContentFactory.jsonBuilder() + ParsedDocument doc = mapper.parse(SourceToParse.source("test", "type", "1", XContentFactory.jsonBuilder() .startObject() .field("field", 123) .endObject() - .bytes()); + .bytes(), + XContentType.JSON)); IndexableField[] fields = doc.rootDoc().getFields("field"); assertEquals(1, fields.length); @@ -96,11 +99,12 @@ public void doTestNoDocValues(String type) throws Exception { assertEquals(mapping, mapper.mappingSource().toString()); - ParsedDocument doc = mapper.parse("test", "type", "1", XContentFactory.jsonBuilder() + ParsedDocument doc = mapper.parse(SourceToParse.source("test", "type", "1", XContentFactory.jsonBuilder() .startObject() .field("field", 123) .endObject() - .bytes()); + .bytes(), + XContentType.JSON)); IndexableField[] fields = doc.rootDoc().getFields("field"); assertEquals(1, fields.length); @@ -119,11 +123,12 @@ public void doTestStore(String type) throws Exception { assertEquals(mapping, mapper.mappingSource().toString()); - ParsedDocument doc = mapper.parse("test", "type", "1", XContentFactory.jsonBuilder() + ParsedDocument doc = mapper.parse(SourceToParse.source("test", "type", "1", XContentFactory.jsonBuilder() .startObject() .field("field", 123) .endObject() - .bytes()); + .bytes(), + XContentType.JSON)); IndexableField[] fields = doc.rootDoc().getFields("field"); assertEquals(3, fields.length); @@ -147,11 +152,12 @@ public void doTestCoerce(String type) throws IOException { assertEquals(mapping, mapper.mappingSource().toString()); - ParsedDocument doc = mapper.parse("test", "type", "1", XContentFactory.jsonBuilder() + ParsedDocument doc = mapper.parse(SourceToParse.source("test", "type", "1", XContentFactory.jsonBuilder() .startObject() .field("field", "123") .endObject() - .bytes()); + .bytes(), + XContentType.JSON)); IndexableField[] fields = doc.rootDoc().getFields("field"); assertEquals(2, fields.length); @@ -169,11 +175,12 @@ public void doTestCoerce(String type) throws IOException { assertEquals(mapping, mapper2.mappingSource().toString()); - ThrowingRunnable runnable = () -> mapper2.parse("test", "type", "1", XContentFactory.jsonBuilder() + ThrowingRunnable runnable = () -> mapper2.parse(SourceToParse.source("test", "type", "1", XContentFactory.jsonBuilder() .startObject() .field("field", "123") .endObject() - .bytes()); + .bytes(), + XContentType.JSON)); MapperParsingException e = expectThrows(MapperParsingException.class, runnable); assertThat(e.getCause().getMessage(), containsString("passed as String")); } @@ -193,11 +200,12 @@ private void doTestIgnoreMalformed(String type) throws IOException { assertEquals(mapping, mapper.mappingSource().toString()); - ThrowingRunnable runnable = () -> mapper.parse("test", "type", "1", XContentFactory.jsonBuilder() + ThrowingRunnable runnable = () -> mapper.parse(SourceToParse.source("test", "type", "1", XContentFactory.jsonBuilder() .startObject() .field("field", "a") .endObject() - .bytes()); + .bytes(), + XContentType.JSON)); MapperParsingException e = expectThrows(MapperParsingException.class, runnable); assertThat(e.getCause().getMessage(), containsString("For input string: \"a\"")); @@ -207,11 +215,12 @@ private void doTestIgnoreMalformed(String type) throws IOException { DocumentMapper mapper2 = parser.parse("type", new CompressedXContent(mapping)); - ParsedDocument doc = mapper2.parse("test", "type", "1", XContentFactory.jsonBuilder() + ParsedDocument doc = mapper2.parse(SourceToParse.source("test", "type", "1", XContentFactory.jsonBuilder() .startObject() .field("field", "a") .endObject() - .bytes()); + .bytes(), + XContentType.JSON)); IndexableField[] fields = doc.rootDoc().getFields("field"); assertEquals(0, fields.length); @@ -248,11 +257,12 @@ protected void doTestNullValue(String type) throws IOException { DocumentMapper mapper = parser.parse("type", new CompressedXContent(mapping)); assertEquals(mapping, mapper.mappingSource().toString()); - ParsedDocument doc = mapper.parse("test", "type", "1", XContentFactory.jsonBuilder() + ParsedDocument doc = mapper.parse(SourceToParse.source("test", "type", "1", XContentFactory.jsonBuilder() .startObject() .nullField("field") .endObject() - .bytes()); + .bytes(), + XContentType.JSON)); assertArrayEquals(new IndexableField[0], doc.rootDoc().getFields("field")); Object missing; @@ -274,11 +284,12 @@ protected void doTestNullValue(String type) throws IOException { mapper = parser.parse("type", new CompressedXContent(mapping)); assertEquals(mapping, mapper.mappingSource().toString()); - doc = mapper.parse("test", "type", "1", XContentFactory.jsonBuilder() + doc = mapper.parse(SourceToParse.source("test", "type", "1", XContentFactory.jsonBuilder() .startObject() .nullField("field") .endObject() - .bytes()); + .bytes(), + XContentType.JSON)); IndexableField[] fields = doc.rootDoc().getFields("field"); assertEquals(2, fields.length); IndexableField pointField = fields[0]; diff --git a/core/src/test/java/org/elasticsearch/index/mapper/ObjectMapperTests.java b/core/src/test/java/org/elasticsearch/index/mapper/ObjectMapperTests.java index e974a02943b37..0e1bead111452 100644 --- a/core/src/test/java/org/elasticsearch/index/mapper/ObjectMapperTests.java +++ b/core/src/test/java/org/elasticsearch/index/mapper/ObjectMapperTests.java @@ -25,6 +25,7 @@ import org.elasticsearch.common.compress.CompressedXContent; import org.elasticsearch.common.settings.Settings; import org.elasticsearch.common.xcontent.XContentFactory; +import org.elasticsearch.common.xcontent.XContentType; import org.elasticsearch.index.mapper.DocumentMapper; import org.elasticsearch.index.mapper.DocumentMapperParser; import org.elasticsearch.index.mapper.MapperParsingException; @@ -48,7 +49,7 @@ public void testDifferentInnerObjectTokenFailure() throws Exception { DocumentMapper defaultMapper = createIndex("test").mapperService().documentMapperParser().parse("type", new CompressedXContent(mapping)); IllegalArgumentException e = expectThrows(IllegalArgumentException.class, () -> { - defaultMapper.parse("test", "type", "1", new BytesArray(" {\n" + + defaultMapper.parse(SourceToParse.source("test", "type", "1", new BytesArray(" {\n" + " \"object\": {\n" + " \"array\":[\n" + " {\n" + @@ -60,7 +61,8 @@ public void testDifferentInnerObjectTokenFailure() throws Exception { " ]\n" + " },\n" + " \"value\":\"value\"\n" + - " }")); + " }"), + XContentType.JSON)); }); assertTrue(e.getMessage(), e.getMessage().contains("different type")); } diff --git a/core/src/test/java/org/elasticsearch/index/mapper/PathMatchDynamicTemplateTests.java b/core/src/test/java/org/elasticsearch/index/mapper/PathMatchDynamicTemplateTests.java index 7728e09c7326f..3ad53513c5185 100644 --- a/core/src/test/java/org/elasticsearch/index/mapper/PathMatchDynamicTemplateTests.java +++ b/core/src/test/java/org/elasticsearch/index/mapper/PathMatchDynamicTemplateTests.java @@ -40,7 +40,8 @@ public void testSimple() throws Exception { client().admin().indices().preparePutMapping("test").setType("person").setSource(mapping, XContentType.JSON).get(); DocumentMapper docMapper = index.mapperService().documentMapper("person"); byte[] json = copyToBytesFromClasspath("/org/elasticsearch/index/mapper/dynamictemplate/pathmatch/test-data.json"); - ParsedDocument parsedDoc = docMapper.parse("test", "person", "1", new BytesArray(json)); + ParsedDocument parsedDoc = docMapper.parse(SourceToParse.source("test", "person", "1", new BytesArray(json), + XContentType.JSON)); client().admin().indices().preparePutMapping("test").setType("person") .setSource(parsedDoc.dynamicMappingsUpdate().toString(), XContentType.JSON).get(); docMapper = index.mapperService().documentMapper("person"); diff --git a/core/src/test/java/org/elasticsearch/index/mapper/RangeFieldMapperTests.java b/core/src/test/java/org/elasticsearch/index/mapper/RangeFieldMapperTests.java index c065888f91149..18a771bb46716 100644 --- a/core/src/test/java/org/elasticsearch/index/mapper/RangeFieldMapperTests.java +++ b/core/src/test/java/org/elasticsearch/index/mapper/RangeFieldMapperTests.java @@ -23,6 +23,7 @@ import org.elasticsearch.common.xcontent.ToXContent; import org.elasticsearch.common.xcontent.XContentBuilder; import org.elasticsearch.common.xcontent.XContentFactory; +import org.elasticsearch.common.xcontent.XContentType; import java.io.IOException; import java.util.Arrays; @@ -95,13 +96,14 @@ public void doTestDefaults(String type) throws Exception { DocumentMapper mapper = parser.parse("type", new CompressedXContent(mapping.string())); assertEquals(mapping.string(), mapper.mappingSource().toString()); - ParsedDocument doc = mapper.parse("test", "type", "1", XContentFactory.jsonBuilder() + ParsedDocument doc = mapper.parse(SourceToParse.source("test", "type", "1", XContentFactory.jsonBuilder() .startObject() .startObject("field") .field(getFromField(), getFrom(type)) .field(getToField(), getTo(type)) .endObject() - .endObject().bytes()); + .endObject().bytes(), + XContentType.JSON)); IndexableField[] fields = doc.rootDoc().getFields("field"); assertEquals(1, fields.length); @@ -122,13 +124,14 @@ protected void doTestNotIndexed(String type) throws Exception { DocumentMapper mapper = parser.parse("type", new CompressedXContent(mapping.string())); assertEquals(mapping.string(), mapper.mappingSource().toString()); - ParsedDocument doc = mapper.parse("test", "type", "1", XContentFactory.jsonBuilder() + ParsedDocument doc = mapper.parse(SourceToParse.source("test", "type", "1", XContentFactory.jsonBuilder() .startObject() .startObject("field") .field(getFromField(), getFrom(type)) .field(getToField(), getTo(type)) .endObject() - .endObject().bytes()); + .endObject().bytes(), + XContentType.JSON)); IndexableField[] fields = doc.rootDoc().getFields("field"); assertEquals(0, fields.length); @@ -145,13 +148,14 @@ protected void doTestNoDocValues(String type) throws Exception { DocumentMapper mapper = parser.parse("type", new CompressedXContent(mapping.string())); assertEquals(mapping.string(), mapper.mappingSource().toString()); - ParsedDocument doc = mapper.parse("test", "type", "1", XContentFactory.jsonBuilder() + ParsedDocument doc = mapper.parse(SourceToParse.source("test", "type", "1", XContentFactory.jsonBuilder() .startObject() .startObject("field") .field(getFromField(), getFrom(type)) .field(getToField(), getTo(type)) .endObject() - .endObject().bytes()); + .endObject().bytes(), + XContentType.JSON)); IndexableField[] fields = doc.rootDoc().getFields("field"); assertEquals(1, fields.length); @@ -170,13 +174,14 @@ protected void doTestStore(String type) throws Exception { DocumentMapper mapper = parser.parse("type", new CompressedXContent(mapping.string())); assertEquals(mapping.string(), mapper.mappingSource().toString()); - ParsedDocument doc = mapper.parse("test", "type", "1", XContentFactory.jsonBuilder() + ParsedDocument doc = mapper.parse(SourceToParse.source("test", "type", "1", XContentFactory.jsonBuilder() .startObject() .startObject("field") .field(getFromField(), getFrom(type)) .field(getToField(), getTo(type)) .endObject() - .endObject().bytes()); + .endObject().bytes(), + XContentType.JSON)); IndexableField[] fields = doc.rootDoc().getFields("field"); assertEquals(2, fields.length); @@ -199,13 +204,14 @@ public void doTestCoerce(String type) throws IOException { assertEquals(mapping.string(), mapper.mappingSource().toString()); - ParsedDocument doc = mapper.parse("test", "type", "1", XContentFactory.jsonBuilder() + ParsedDocument doc = mapper.parse(SourceToParse.source("test", "type", "1", XContentFactory.jsonBuilder() .startObject() .startObject("field") .field(getFromField(), getFrom(type)) .field(getToField(), getTo(type)) .endObject() - .endObject().bytes()); + .endObject().bytes(), + XContentType.JSON)); IndexableField[] fields = doc.rootDoc().getFields("field"); assertEquals(1, fields.length); @@ -219,13 +225,14 @@ public void doTestCoerce(String type) throws IOException { assertEquals(mapping.string(), mapper2.mappingSource().toString()); - ThrowingRunnable runnable = () -> mapper2.parse("test", "type", "1", XContentFactory.jsonBuilder() + ThrowingRunnable runnable = () -> mapper2.parse(SourceToParse.source("test", "type", "1", XContentFactory.jsonBuilder() .startObject() .startObject("field") .field(getFromField(), "5.2") .field(getToField(), "10") .endObject() - .endObject().bytes()); + .endObject().bytes(), + XContentType.JSON)); MapperParsingException e = expectThrows(MapperParsingException.class, runnable); assertThat(e.getCause().getMessage(), anyOf(containsString("passed as String"), containsString("failed to parse date"))); } @@ -243,26 +250,28 @@ protected void doTestNullValue(String type) throws IOException { assertEquals(mapping.string(), mapper.mappingSource().toString()); // test null value for min and max - ParsedDocument doc = mapper.parse("test", "type", "1", XContentFactory.jsonBuilder() + ParsedDocument doc = mapper.parse(SourceToParse.source("test", "type", "1", XContentFactory.jsonBuilder() .startObject() .startObject("field") .nullField(getFromField()) .nullField(getToField()) .endObject() - .endObject().bytes()); + .endObject().bytes(), + XContentType.JSON)); assertEquals(2, doc.rootDoc().getFields("field").length); IndexableField[] fields = doc.rootDoc().getFields("field"); IndexableField storedField = fields[1]; assertThat(storedField.stringValue(), containsString(type.equals("date_range") ? Long.MAX_VALUE+"" : getMax(type)+"")); // test null max value - doc = mapper.parse("test", "type", "1", XContentFactory.jsonBuilder() + doc = mapper.parse(SourceToParse.source("test", "type", "1", XContentFactory.jsonBuilder() .startObject() .startObject("field") .field(getFromField(), getFrom(type)) .nullField(getToField()) .endObject() - .endObject().bytes()); + .endObject().bytes(), + XContentType.JSON)); fields = doc.rootDoc().getFields("field"); assertEquals(2, fields.length); @@ -293,11 +302,12 @@ public void doTestNoBounds(String type) throws IOException { assertEquals(mapping.string(), mapper.mappingSource().toString()); // test no bounds specified - ParsedDocument doc = mapper.parse("test", "type", "1", XContentFactory.jsonBuilder() + ParsedDocument doc = mapper.parse(SourceToParse.source("test", "type", "1", XContentFactory.jsonBuilder() .startObject() .startObject("field") .endObject() - .endObject().bytes()); + .endObject().bytes(), + XContentType.JSON)); IndexableField[] fields = doc.rootDoc().getFields("field"); assertEquals(2, fields.length); diff --git a/core/src/test/java/org/elasticsearch/index/mapper/RangeFieldTypeTests.java b/core/src/test/java/org/elasticsearch/index/mapper/RangeFieldTypeTests.java index f70db120fcfc3..ce6f7246ff527 100644 --- a/core/src/test/java/org/elasticsearch/index/mapper/RangeFieldTypeTests.java +++ b/core/src/test/java/org/elasticsearch/index/mapper/RangeFieldTypeTests.java @@ -19,10 +19,10 @@ package org.elasticsearch.index.mapper; import com.carrotsearch.randomizedtesting.generators.RandomPicks; -import org.apache.lucene.document.DoubleRangeField; -import org.apache.lucene.document.FloatRangeField; -import org.apache.lucene.document.IntRangeField; -import org.apache.lucene.document.LongRangeField; +import org.apache.lucene.document.DoubleRange; +import org.apache.lucene.document.FloatRange; +import org.apache.lucene.document.IntRange; +import org.apache.lucene.document.LongRange; import org.apache.lucene.index.IndexOptions; import org.apache.lucene.search.Query; import org.elasticsearch.Version; @@ -113,44 +113,44 @@ private Query getIntRangeQuery(ShapeRelation relation, int from, int to, boolean int[] lower = new int[] {from + (includeLower ? 0 : 1)}; int[] upper = new int[] {to - (includeUpper ? 0 : 1)}; if (relation == ShapeRelation.WITHIN) { - return IntRangeField.newWithinQuery(FIELDNAME, lower, upper); + return IntRange.newWithinQuery(FIELDNAME, lower, upper); } else if (relation == ShapeRelation.CONTAINS) { - return IntRangeField.newContainsQuery(FIELDNAME, lower, upper); + return IntRange.newContainsQuery(FIELDNAME, lower, upper); } - return IntRangeField.newIntersectsQuery(FIELDNAME, lower, upper); + return IntRange.newIntersectsQuery(FIELDNAME, lower, upper); } private Query getLongRangeQuery(ShapeRelation relation, long from, long to, boolean includeLower, boolean includeUpper) { long[] lower = new long[] {from + (includeLower ? 0 : 1)}; long[] upper = new long[] {to - (includeUpper ? 0 : 1)}; if (relation == ShapeRelation.WITHIN) { - return LongRangeField.newWithinQuery(FIELDNAME, lower, upper); + return LongRange.newWithinQuery(FIELDNAME, lower, upper); } else if (relation == ShapeRelation.CONTAINS) { - return LongRangeField.newContainsQuery(FIELDNAME, lower, upper); + return LongRange.newContainsQuery(FIELDNAME, lower, upper); } - return LongRangeField.newIntersectsQuery(FIELDNAME, lower, upper); + return LongRange.newIntersectsQuery(FIELDNAME, lower, upper); } private Query getFloatRangeQuery(ShapeRelation relation, float from, float to, boolean includeLower, boolean includeUpper) { float[] lower = new float[] {includeLower ? from : Math.nextUp(from)}; float[] upper = new float[] {includeUpper ? to : Math.nextDown(to)}; if (relation == ShapeRelation.WITHIN) { - return FloatRangeField.newWithinQuery(FIELDNAME, lower, upper); + return FloatRange.newWithinQuery(FIELDNAME, lower, upper); } else if (relation == ShapeRelation.CONTAINS) { - return FloatRangeField.newContainsQuery(FIELDNAME, lower, upper); + return FloatRange.newContainsQuery(FIELDNAME, lower, upper); } - return FloatRangeField.newIntersectsQuery(FIELDNAME, lower, upper); + return FloatRange.newIntersectsQuery(FIELDNAME, lower, upper); } private Query getDoubleRangeQuery(ShapeRelation relation, double from, double to, boolean includeLower, boolean includeUpper) { double[] lower = new double[] {includeLower ? from : Math.nextUp(from)}; double[] upper = new double[] {includeUpper ? to : Math.nextDown(to)}; if (relation == ShapeRelation.WITHIN) { - return DoubleRangeField.newWithinQuery(FIELDNAME, lower, upper); + return DoubleRange.newWithinQuery(FIELDNAME, lower, upper); } else if (relation == ShapeRelation.CONTAINS) { - return DoubleRangeField.newContainsQuery(FIELDNAME, lower, upper); + return DoubleRange.newContainsQuery(FIELDNAME, lower, upper); } - return DoubleRangeField.newIntersectsQuery(FIELDNAME, lower, upper); + return DoubleRange.newIntersectsQuery(FIELDNAME, lower, upper); } private Object nextFrom() { diff --git a/core/src/test/java/org/elasticsearch/index/mapper/RoutingFieldMapperTests.java b/core/src/test/java/org/elasticsearch/index/mapper/RoutingFieldMapperTests.java index 9c26a9806e331..fb98f42f105eb 100644 --- a/core/src/test/java/org/elasticsearch/index/mapper/RoutingFieldMapperTests.java +++ b/core/src/test/java/org/elasticsearch/index/mapper/RoutingFieldMapperTests.java @@ -41,7 +41,8 @@ public void testRoutingMapper() throws Exception { .startObject() .field("field", "value") .endObject() - .bytes(), XContentType.JSON).routing("routing_value")); + .bytes(), + XContentType.JSON).routing("routing_value")); assertThat(doc.rootDoc().get("_routing"), equalTo("routing_value")); assertThat(doc.rootDoc().get("field"), equalTo("value")); @@ -52,8 +53,8 @@ public void testIncludeInObjectNotAllowed() throws Exception { DocumentMapper docMapper = createIndex("test").mapperService().documentMapperParser().parse("type", new CompressedXContent(mapping)); try { - docMapper.parse("test", "type", "1", XContentFactory.jsonBuilder() - .startObject().field("_routing", "foo").endObject().bytes()); + docMapper.parse(SourceToParse.source("test", "type", "1", XContentFactory.jsonBuilder() + .startObject().field("_routing", "foo").endObject().bytes(),XContentType.JSON)); fail("Expected failure to parse metadata field"); } catch (MapperParsingException e) { assertTrue(e.getMessage(), e.getMessage().contains("Field [_routing] is a metadata field and cannot be added inside a document")); diff --git a/core/src/test/java/org/elasticsearch/index/mapper/ScaledFloatFieldMapperTests.java b/core/src/test/java/org/elasticsearch/index/mapper/ScaledFloatFieldMapperTests.java index 4163378db017a..c2d0317ea070f 100644 --- a/core/src/test/java/org/elasticsearch/index/mapper/ScaledFloatFieldMapperTests.java +++ b/core/src/test/java/org/elasticsearch/index/mapper/ScaledFloatFieldMapperTests.java @@ -23,6 +23,7 @@ import org.apache.lucene.index.IndexableField; import org.elasticsearch.common.compress.CompressedXContent; import org.elasticsearch.common.xcontent.XContentFactory; +import org.elasticsearch.common.xcontent.XContentType; import org.elasticsearch.index.IndexService; import org.elasticsearch.plugins.Plugin; import org.elasticsearch.test.ESSingleNodeTestCase; @@ -60,11 +61,12 @@ public void testDefaults() throws Exception { assertEquals(mapping, mapper.mappingSource().toString()); - ParsedDocument doc = mapper.parse("test", "type", "1", XContentFactory.jsonBuilder() + ParsedDocument doc = mapper.parse(SourceToParse.source("test", "type", "1", XContentFactory.jsonBuilder() .startObject() .field("field", 123) .endObject() - .bytes()); + .bytes(), + XContentType.JSON)); IndexableField[] fields = doc.rootDoc().getFields("field"); assertEquals(2, fields.length); @@ -109,11 +111,12 @@ public void testNotIndexed() throws Exception { assertEquals(mapping, mapper.mappingSource().toString()); - ParsedDocument doc = mapper.parse("test", "type", "1", XContentFactory.jsonBuilder() + ParsedDocument doc = mapper.parse(SourceToParse.source("test", "type", "1", XContentFactory.jsonBuilder() .startObject() .field("field", 123) .endObject() - .bytes()); + .bytes(), + XContentType.JSON)); IndexableField[] fields = doc.rootDoc().getFields("field"); assertEquals(1, fields.length); @@ -132,11 +135,12 @@ public void testNoDocValues() throws Exception { assertEquals(mapping, mapper.mappingSource().toString()); - ParsedDocument doc = mapper.parse("test", "type", "1", XContentFactory.jsonBuilder() + ParsedDocument doc = mapper.parse(SourceToParse.source("test", "type", "1", XContentFactory.jsonBuilder() .startObject() .field("field", 123) .endObject() - .bytes()); + .bytes(), + XContentType.JSON)); IndexableField[] fields = doc.rootDoc().getFields("field"); assertEquals(1, fields.length); @@ -155,11 +159,12 @@ public void testStore() throws Exception { assertEquals(mapping, mapper.mappingSource().toString()); - ParsedDocument doc = mapper.parse("test", "type", "1", XContentFactory.jsonBuilder() + ParsedDocument doc = mapper.parse(SourceToParse.source("test", "type", "1", XContentFactory.jsonBuilder() .startObject() .field("field", 123) .endObject() - .bytes()); + .bytes(), + XContentType.JSON)); IndexableField[] fields = doc.rootDoc().getFields("field"); assertEquals(3, fields.length); @@ -183,11 +188,12 @@ public void testCoerce() throws Exception { assertEquals(mapping, mapper.mappingSource().toString()); - ParsedDocument doc = mapper.parse("test", "type", "1", XContentFactory.jsonBuilder() + ParsedDocument doc = mapper.parse(SourceToParse.source("test", "type", "1", XContentFactory.jsonBuilder() .startObject() .field("field", "123") .endObject() - .bytes()); + .bytes(), + XContentType.JSON)); IndexableField[] fields = doc.rootDoc().getFields("field"); assertEquals(2, fields.length); @@ -206,11 +212,12 @@ public void testCoerce() throws Exception { assertEquals(mapping, mapper2.mappingSource().toString()); - ThrowingRunnable runnable = () -> mapper2.parse("test", "type", "1", XContentFactory.jsonBuilder() + ThrowingRunnable runnable = () -> mapper2.parse(SourceToParse.source("test", "type", "1", XContentFactory.jsonBuilder() .startObject() .field("field", "123") .endObject() - .bytes()); + .bytes(), + XContentType.JSON)); MapperParsingException e = expectThrows(MapperParsingException.class, runnable); assertThat(e.getCause().getMessage(), containsString("passed as String")); } @@ -225,11 +232,12 @@ public void testIgnoreMalformed() throws Exception { assertEquals(mapping, mapper.mappingSource().toString()); - ThrowingRunnable runnable = () -> mapper.parse("test", "type", "1", XContentFactory.jsonBuilder() + ThrowingRunnable runnable = () -> mapper.parse(SourceToParse.source("test", "type", "1", XContentFactory.jsonBuilder() .startObject() .field("field", "a") .endObject() - .bytes()); + .bytes(), + XContentType.JSON)); MapperParsingException e = expectThrows(MapperParsingException.class, runnable); assertThat(e.getCause().getMessage(), containsString("For input string: \"a\"")); @@ -240,11 +248,12 @@ public void testIgnoreMalformed() throws Exception { DocumentMapper mapper2 = parser.parse("type", new CompressedXContent(mapping)); - ParsedDocument doc = mapper2.parse("test", "type", "1", XContentFactory.jsonBuilder() + ParsedDocument doc = mapper2.parse(SourceToParse.source("test", "type", "1", XContentFactory.jsonBuilder() .startObject() .field("field", "a") .endObject() - .bytes()); + .bytes(), + XContentType.JSON)); IndexableField[] fields = doc.rootDoc().getFields("field"); assertEquals(0, fields.length); @@ -264,11 +273,12 @@ public void testNullValue() throws IOException { DocumentMapper mapper = parser.parse("type", new CompressedXContent(mapping)); assertEquals(mapping, mapper.mappingSource().toString()); - ParsedDocument doc = mapper.parse("test", "type", "1", XContentFactory.jsonBuilder() + ParsedDocument doc = mapper.parse(SourceToParse.source("test", "type", "1", XContentFactory.jsonBuilder() .startObject() .nullField("field") .endObject() - .bytes()); + .bytes(), + XContentType.JSON)); assertArrayEquals(new IndexableField[0], doc.rootDoc().getFields("field")); mapping = XContentFactory.jsonBuilder().startObject() @@ -285,11 +295,12 @@ public void testNullValue() throws IOException { mapper = parser.parse("type", new CompressedXContent(mapping)); assertEquals(mapping, mapper.mappingSource().toString()); - doc = mapper.parse("test", "type", "1", XContentFactory.jsonBuilder() + doc = mapper.parse(SourceToParse.source("test", "type", "1", XContentFactory.jsonBuilder() .startObject() .nullField("field") .endObject() - .bytes()); + .bytes(), + XContentType.JSON)); IndexableField[] fields = doc.rootDoc().getFields("field"); assertEquals(2, fields.length); IndexableField pointField = fields[0]; diff --git a/core/src/test/java/org/elasticsearch/index/mapper/SourceFieldMapperTests.java b/core/src/test/java/org/elasticsearch/index/mapper/SourceFieldMapperTests.java index 5803d7d957e1d..83594d982577c 100644 --- a/core/src/test/java/org/elasticsearch/index/mapper/SourceFieldMapperTests.java +++ b/core/src/test/java/org/elasticsearch/index/mapper/SourceFieldMapperTests.java @@ -51,16 +51,18 @@ public void testNoFormat() throws Exception { DocumentMapperParser parser = createIndex("test").mapperService().documentMapperParser(); DocumentMapper documentMapper = parser.parse("type", new CompressedXContent(mapping)); - ParsedDocument doc = documentMapper.parse("test", "type", "1", XContentFactory.jsonBuilder().startObject() + ParsedDocument doc = documentMapper.parse(SourceToParse.source("test", "type", "1", XContentFactory.jsonBuilder().startObject() .field("field", "value") - .endObject().bytes()); + .endObject().bytes(), + XContentType.JSON)); assertThat(XContentFactory.xContentType(doc.source()), equalTo(XContentType.JSON)); documentMapper = parser.parse("type", new CompressedXContent(mapping)); - doc = documentMapper.parse("test", "type", "1", XContentFactory.smileBuilder().startObject() + doc = documentMapper.parse(SourceToParse.source("test", "type", "1", XContentFactory.smileBuilder().startObject() .field("field", "value") - .endObject().bytes()); + .endObject().bytes(), + XContentType.JSON)); assertThat(XContentFactory.xContentType(doc.source()), equalTo(XContentType.SMILE)); } @@ -72,10 +74,11 @@ public void testIncludes() throws Exception { DocumentMapper documentMapper = createIndex("test").mapperService().documentMapperParser().parse("type", new CompressedXContent(mapping)); - ParsedDocument doc = documentMapper.parse("test", "type", "1", XContentFactory.jsonBuilder().startObject() + ParsedDocument doc = documentMapper.parse(SourceToParse.source("test", "type", "1", XContentFactory.jsonBuilder().startObject() .startObject("path1").field("field1", "value1").endObject() .startObject("path2").field("field2", "value2").endObject() - .endObject().bytes()); + .endObject().bytes(), + XContentType.JSON)); IndexableField sourceField = doc.rootDoc().getField("_source"); Map sourceAsMap; @@ -93,10 +96,11 @@ public void testExcludes() throws Exception { DocumentMapper documentMapper = createIndex("test").mapperService().documentMapperParser().parse("type", new CompressedXContent(mapping)); - ParsedDocument doc = documentMapper.parse("test", "type", "1", XContentFactory.jsonBuilder().startObject() + ParsedDocument doc = documentMapper.parse(SourceToParse.source("test", "type", "1", XContentFactory.jsonBuilder().startObject() .startObject("path1").field("field1", "value1").endObject() .startObject("path2").field("field2", "value2").endObject() - .endObject().bytes()); + .endObject().bytes(), + XContentType.JSON)); IndexableField sourceField = doc.rootDoc().getField("_source"); Map sourceAsMap; @@ -276,7 +280,7 @@ public void testSourceObjectContainsExtraTokens() throws Exception { DocumentMapper documentMapper = createIndex("test").mapperService().documentMapperParser().parse("type", new CompressedXContent(mapping)); try { - documentMapper.parse("test", "type", "1", new BytesArray("{}}")); // extra end object (invalid JSON) + documentMapper.parse(SourceToParse.source("test", "type", "1", new BytesArray("{}}"), XContentType.JSON)); // extra end object (invalid JSON) fail("Expected parse exception"); } catch (MapperParsingException e) { assertNotNull(e.getRootCause()); diff --git a/core/src/test/java/org/elasticsearch/index/mapper/StoredNumericValuesTests.java b/core/src/test/java/org/elasticsearch/index/mapper/StoredNumericValuesTests.java index 47b062a42dff0..438ccd5fa8688 100644 --- a/core/src/test/java/org/elasticsearch/index/mapper/StoredNumericValuesTests.java +++ b/core/src/test/java/org/elasticsearch/index/mapper/StoredNumericValuesTests.java @@ -27,6 +27,7 @@ import org.elasticsearch.common.compress.CompressedXContent; import org.elasticsearch.common.lucene.Lucene; import org.elasticsearch.common.xcontent.XContentFactory; +import org.elasticsearch.common.xcontent.XContentType; import org.elasticsearch.index.fieldvisitor.CustomFieldsVisitor; import org.elasticsearch.index.mapper.DocumentMapper; import org.elasticsearch.index.mapper.MapperService; @@ -63,7 +64,7 @@ public void testBytesAndNumericRepresentation() throws Exception { MapperService mapperService = createIndex("test").mapperService(); DocumentMapper mapper = mapperService.merge("type", new CompressedXContent(mapping), MergeReason.MAPPING_UPDATE, false); - ParsedDocument doc = mapper.parse("test", "type", "1", XContentFactory.jsonBuilder() + ParsedDocument doc = mapper.parse(SourceToParse.source("test", "type", "1", XContentFactory.jsonBuilder() .startObject() .field("field1", 1) .field("field2", 1) @@ -76,7 +77,8 @@ public void testBytesAndNumericRepresentation() throws Exception { .field("field9", "2016-04-05") .field("field10", true) .endObject() - .bytes()); + .bytes(), + XContentType.JSON)); writer.addDocument(doc.rootDoc()); diff --git a/core/src/test/java/org/elasticsearch/index/mapper/TextFieldMapperTests.java b/core/src/test/java/org/elasticsearch/index/mapper/TextFieldMapperTests.java index 4df4361db6a7a..e2dc7bc7a094c 100644 --- a/core/src/test/java/org/elasticsearch/index/mapper/TextFieldMapperTests.java +++ b/core/src/test/java/org/elasticsearch/index/mapper/TextFieldMapperTests.java @@ -32,6 +32,7 @@ import org.elasticsearch.common.xcontent.ToXContent; import org.elasticsearch.common.xcontent.XContentBuilder; import org.elasticsearch.common.xcontent.XContentFactory; +import org.elasticsearch.common.xcontent.XContentType; import org.elasticsearch.index.IndexService; import org.elasticsearch.index.engine.Engine; import org.elasticsearch.index.mapper.MapperService.MergeReason; @@ -77,11 +78,12 @@ public void testDefaults() throws Exception { assertEquals(mapping, mapper.mappingSource().toString()); - ParsedDocument doc = mapper.parse("test", "type", "1", XContentFactory.jsonBuilder() + ParsedDocument doc = mapper.parse(SourceToParse.source("test", "type", "1", XContentFactory.jsonBuilder() .startObject() .field("field", "1234") .endObject() - .bytes()); + .bytes(), + XContentType.JSON)); IndexableField[] fields = doc.rootDoc().getFields("field"); assertEquals(1, fields.length); @@ -108,11 +110,12 @@ public void testEnableStore() throws IOException { assertEquals(mapping, mapper.mappingSource().toString()); - ParsedDocument doc = mapper.parse("test", "type", "1", XContentFactory.jsonBuilder() + ParsedDocument doc = mapper.parse(SourceToParse.source("test", "type", "1", XContentFactory.jsonBuilder() .startObject() .field("field", "1234") .endObject() - .bytes()); + .bytes(), + XContentType.JSON)); IndexableField[] fields = doc.rootDoc().getFields("field"); assertEquals(1, fields.length); @@ -128,11 +131,12 @@ public void testDisableIndex() throws IOException { assertEquals(mapping, mapper.mappingSource().toString()); - ParsedDocument doc = mapper.parse("test", "type", "1", XContentFactory.jsonBuilder() + ParsedDocument doc = mapper.parse(SourceToParse.source("test", "type", "1", XContentFactory.jsonBuilder() .startObject() .field("field", "1234") .endObject() - .bytes()); + .bytes(), + XContentType.JSON)); IndexableField[] fields = doc.rootDoc().getFields("field"); assertEquals(0, fields.length); @@ -150,11 +154,12 @@ public void testDisableNorms() throws IOException { assertEquals(mapping, mapper.mappingSource().toString()); - ParsedDocument doc = mapper.parse("test", "type", "1", XContentFactory.jsonBuilder() + ParsedDocument doc = mapper.parse(SourceToParse.source("test", "type", "1", XContentFactory.jsonBuilder() .startObject() .field("field", "1234") .endObject() - .bytes()); + .bytes(), + XContentType.JSON)); IndexableField[] fields = doc.rootDoc().getFields("field"); assertEquals(1, fields.length); @@ -180,7 +185,8 @@ public void testIndexOptions() throws IOException { for (String option : supportedOptions.keySet()) { jsonDoc.field(option, "1234"); } - ParsedDocument doc = mapper.parse("test", "type", "1", jsonDoc.endObject().bytes()); + ParsedDocument doc = mapper.parse(SourceToParse.source("test", "type", "1", jsonDoc.endObject().bytes(), + XContentType.JSON)); for (Map.Entry entry : supportedOptions.entrySet()) { String field = entry.getKey(); @@ -201,11 +207,12 @@ public void testDefaultPositionIncrementGap() throws IOException { assertEquals(mapping, mapper.mappingSource().toString()); - ParsedDocument doc = mapper.parse("test", "type", "1", XContentFactory.jsonBuilder() + ParsedDocument doc = mapper.parse(SourceToParse.source("test", "type", "1", XContentFactory.jsonBuilder() .startObject() .array("field", new String[] {"a", "b"}) .endObject() - .bytes()); + .bytes(), + XContentType.JSON)); IndexableField[] fields = doc.rootDoc().getFields("field"); assertEquals(2, fields.length); @@ -240,11 +247,12 @@ public void testPositionIncrementGap() throws IOException { assertEquals(mapping, mapper.mappingSource().toString()); - ParsedDocument doc = mapper.parse("test", "type", "1", XContentFactory.jsonBuilder() + ParsedDocument doc = mapper.parse(SourceToParse.source("test", "type", "1", XContentFactory.jsonBuilder() .startObject() .array("field", new String[] {"a", "b"}) .endObject() - .bytes()); + .bytes(), + XContentType.JSON)); IndexableField[] fields = doc.rootDoc().getFields("field"); assertEquals(2, fields.length); @@ -396,7 +404,7 @@ public void testTermVectors() throws Exception { DocumentMapper defaultMapper = parser.parse("type", new CompressedXContent(mapping)); - ParsedDocument doc = defaultMapper.parse("test", "type", "1", XContentFactory.jsonBuilder() + ParsedDocument doc = defaultMapper.parse(SourceToParse.source("test", "type", "1", XContentFactory.jsonBuilder() .startObject() .field("field1", "1234") .field("field2", "1234") @@ -405,7 +413,8 @@ public void testTermVectors() throws Exception { .field("field5", "1234") .field("field6", "1234") .endObject() - .bytes()); + .bytes(), + XContentType.JSON)); assertThat(doc.rootDoc().getField("field1").fieldType().storeTermVectors(), equalTo(false)); assertThat(doc.rootDoc().getField("field1").fieldType().storeTermVectorOffsets(), equalTo(false)); diff --git a/core/src/test/java/org/elasticsearch/index/query/FuzzyQueryBuilderTests.java b/core/src/test/java/org/elasticsearch/index/query/FuzzyQueryBuilderTests.java index d5fbf05e26977..39612c6d37c6b 100644 --- a/core/src/test/java/org/elasticsearch/index/query/FuzzyQueryBuilderTests.java +++ b/core/src/test/java/org/elasticsearch/index/query/FuzzyQueryBuilderTests.java @@ -190,4 +190,17 @@ public void testParseFailsWithMultipleFields() throws IOException { e = expectThrows(ParsingException.class, () -> parseQuery(shortJson)); assertEquals("[fuzzy] query doesn't support multiple fields, found [message1] and [message2]", e.getMessage()); } + + public void testParseFailsWithValueArray() { + String query = "{\n" + + " \"fuzzy\" : {\n" + + " \"message1\" : {\n" + + " \"value\" : [ \"one\", \"two\", \"three\"]\n" + + " }\n" + + " }\n" + + "}"; + + ParsingException e = expectThrows(ParsingException.class, () -> parseQuery(query)); + assertEquals("[fuzzy] unexpected token [START_ARRAY] after [value]", e.getMessage()); + } } diff --git a/core/src/test/java/org/elasticsearch/index/query/functionscore/FunctionScoreTests.java b/core/src/test/java/org/elasticsearch/index/query/functionscore/FunctionScoreTests.java index 471a02feb4723..16e66f50e0d1c 100644 --- a/core/src/test/java/org/elasticsearch/index/query/functionscore/FunctionScoreTests.java +++ b/core/src/test/java/org/elasticsearch/index/query/functionscore/FunctionScoreTests.java @@ -37,6 +37,7 @@ import org.apache.lucene.search.TermQuery; import org.apache.lucene.search.TopDocs; import org.apache.lucene.search.Weight; +import org.apache.lucene.search.SortField; import org.apache.lucene.store.Directory; import org.apache.lucene.util.Accountable; import org.apache.lucene.util.BytesRef; @@ -135,8 +136,7 @@ public AtomicFieldData loadDirect(LeafReaderContext context) throws Exception { } @Override - public IndexFieldData.XFieldComparatorSource comparatorSource(@Nullable Object missingValue, MultiValueMode sortMode, - IndexFieldData.XFieldComparatorSource.Nested nested) { + public SortField sortField(@Nullable Object missingValue, MultiValueMode sortMode, XFieldComparatorSource.Nested nested, boolean reverse) { throw new UnsupportedOperationException(UNSUPPORTED); } @@ -225,8 +225,7 @@ public AtomicNumericFieldData loadDirect(LeafReaderContext context) throws Excep } @Override - public XFieldComparatorSource comparatorSource(@Nullable Object missingValue, MultiValueMode sortMode, - XFieldComparatorSource.Nested nested) { + public SortField sortField(@Nullable Object missingValue, MultiValueMode sortMode, XFieldComparatorSource.Nested nested, boolean reverse) { throw new UnsupportedOperationException(UNSUPPORTED); } diff --git a/core/src/test/java/org/elasticsearch/index/search/nested/NestedSortingTests.java b/core/src/test/java/org/elasticsearch/index/search/nested/NestedSortingTests.java index b0999b2fa33f2..c9c48a9f969bd 100644 --- a/core/src/test/java/org/elasticsearch/index/search/nested/NestedSortingTests.java +++ b/core/src/test/java/org/elasticsearch/index/search/nested/NestedSortingTests.java @@ -113,9 +113,9 @@ public void testDuel() throws Exception { private TopDocs getTopDocs(IndexSearcher searcher, IndexFieldData indexFieldData, String missingValue, MultiValueMode sortMode, int n, boolean reverse) throws IOException { Query parentFilter = new TermQuery(new Term("__type", "parent")); Query childFilter = new TermQuery(new Term("__type", "child")); - XFieldComparatorSource nestedComparatorSource = indexFieldData.comparatorSource(missingValue, sortMode, createNested(searcher, parentFilter, childFilter)); + SortField sortField = indexFieldData.sortField(missingValue, sortMode, createNested(searcher, parentFilter, childFilter), reverse); Query query = new ConstantScoreQuery(parentFilter); - Sort sort = new Sort(new SortField("f", nestedComparatorSource, reverse)); + Sort sort = new Sort(sortField); return searcher.search(query, n, sort); } diff --git a/core/src/test/java/org/elasticsearch/index/shard/IndexShardIT.java b/core/src/test/java/org/elasticsearch/index/shard/IndexShardIT.java index 97c96c8af12f7..3313736ffcfe3 100644 --- a/core/src/test/java/org/elasticsearch/index/shard/IndexShardIT.java +++ b/core/src/test/java/org/elasticsearch/index/shard/IndexShardIT.java @@ -363,49 +363,105 @@ public void testMaybeFlush() throws Exception { assertEquals(0, shard.getEngine().getTranslog().totalOperations()); } - public void testStressMaybeFlush() throws Exception { + public void testMaybeRollTranslogGeneration() throws Exception { + final int generationThreshold = randomIntBetween(64, 512); + final Settings settings = + Settings + .builder() + .put("index.number_of_shards", 1) + .put("index.translog.generation_threshold_size", generationThreshold + "b") + .put() + .build(); + createIndex("test", settings); + ensureGreen("test"); + final IndicesService indicesService = getInstanceFromNode(IndicesService.class); + final IndexService test = indicesService.indexService(resolveIndex("test")); + final IndexShard shard = test.getShardOrNull(0); + int rolls = 0; + final Translog translog = shard.getEngine().getTranslog(); + final long generation = translog.currentFileGeneration(); + final int numberOfDocuments = randomIntBetween(32, 128); + for (int i = 0; i < numberOfDocuments; i++) { + assertThat(translog.currentFileGeneration(), equalTo(generation + rolls)); + final ParsedDocument doc = testParsedDocument( + "1", + "test", + null, + SequenceNumbersService.UNASSIGNED_SEQ_NO, + new ParseContext.Document(), + new BytesArray(new byte[]{1}), XContentType.JSON, null); + final Engine.Index index = new Engine.Index(new Term("_uid", doc.uid()), doc); + final Engine.IndexResult result = shard.index(index); + final Translog.Location location = result.getTranslogLocation(); + shard.afterWriteOperation(); + if (location.translogLocation + location.size > generationThreshold) { + // wait until the roll completes + assertBusy(() -> assertFalse(shard.shouldRollTranslogGeneration())); + rolls++; + assertThat(translog.currentFileGeneration(), equalTo(generation + rolls)); + } + } + } + + public void testStressMaybeFlushOrRollTranslogGeneration() throws Exception { createIndex("test"); ensureGreen(); IndicesService indicesService = getInstanceFromNode(IndicesService.class); IndexService test = indicesService.indexService(resolveIndex("test")); final IndexShard shard = test.getShardOrNull(0); assertFalse(shard.shouldFlush()); - client().admin().indices().prepareUpdateSettings("test").setSettings(Settings.builder().put( - IndexSettings.INDEX_TRANSLOG_FLUSH_THRESHOLD_SIZE_SETTING.getKey(), - new ByteSizeValue(117/* size of the operation + header&footer*/, ByteSizeUnit.BYTES)).build()).get(); - client().prepareIndex("test", "test", "0").setSource("{}", XContentType.JSON) - .setRefreshPolicy(randomBoolean() ? IMMEDIATE : NONE).get(); + final String key; + final boolean flush = randomBoolean(); + if (flush) { + key = "index.translog.flush_threshold_size"; + } else { + key = "index.translog.generation_threshold_size"; + } + // size of the operation plus header and footer + final Settings settings = Settings.builder().put(key, "117b").build(); + client().admin().indices().prepareUpdateSettings("test").setSettings(settings).get(); + client().prepareIndex("test", "test", "0") + .setSource("{}", XContentType.JSON) + .setRefreshPolicy(randomBoolean() ? IMMEDIATE : NONE) + .get(); assertFalse(shard.shouldFlush()); final AtomicBoolean running = new AtomicBoolean(true); final int numThreads = randomIntBetween(2, 4); - Thread[] threads = new Thread[numThreads]; - CyclicBarrier barrier = new CyclicBarrier(numThreads + 1); + final Thread[] threads = new Thread[numThreads]; + final CyclicBarrier barrier = new CyclicBarrier(numThreads + 1); for (int i = 0; i < threads.length; i++) { - threads[i] = new Thread() { - @Override - public void run() { - try { - barrier.await(); - } catch (InterruptedException | BrokenBarrierException e) { - throw new RuntimeException(e); - } - while (running.get()) { - shard.maybeFlush(); - } + threads[i] = new Thread(() -> { + try { + barrier.await(); + } catch (final InterruptedException | BrokenBarrierException e) { + throw new RuntimeException(e); + } + while (running.get()) { + shard.afterWriteOperation(); } - }; + }); threads[i].start(); } barrier.await(); - FlushStats flushStats = shard.flushStats(); - long total = flushStats.getTotal(); - client().prepareIndex("test", "test", "1").setSource("{}", XContentType.JSON).get(); - assertBusy(() -> assertEquals(total + 1, shard.flushStats().getTotal())); + final Runnable check; + if (flush) { + final FlushStats flushStats = shard.flushStats(); + final long total = flushStats.getTotal(); + client().prepareIndex("test", "test", "1").setSource("{}", XContentType.JSON).get(); + check = () -> assertEquals(total + 1, shard.flushStats().getTotal()); + } else { + final long generation = shard.getEngine().getTranslog().currentFileGeneration(); + client().prepareIndex("test", "test", "1").setSource("{}", XContentType.JSON).get(); + check = () -> assertEquals( + generation + 1, + shard.getEngine().getTranslog().currentFileGeneration()); + } + assertBusy(check); running.set(false); for (int i = 0; i < threads.length; i++) { threads[i].join(); } - assertEquals(total + 1, shard.flushStats().getTotal()); + check.run(); } public void testShardHasMemoryBufferOnTranslogRecover() throws Throwable { diff --git a/core/src/test/java/org/elasticsearch/index/similarity/SimilarityTests.java b/core/src/test/java/org/elasticsearch/index/similarity/SimilarityTests.java index 653eea4196c61..e9183876aecb6 100644 --- a/core/src/test/java/org/elasticsearch/index/similarity/SimilarityTests.java +++ b/core/src/test/java/org/elasticsearch/index/similarity/SimilarityTests.java @@ -19,6 +19,7 @@ package org.elasticsearch.index.similarity; +import org.apache.lucene.search.similarities.BooleanSimilarity; import org.apache.lucene.search.similarities.ClassicSimilarity; import org.apache.lucene.search.similarities.DFISimilarity; import org.apache.lucene.search.similarities.AfterEffectL; @@ -64,6 +65,7 @@ public void testResolveDefaultSimilarities() { SimilarityService similarityService = createIndex("foo").similarityService(); assertThat(similarityService.getSimilarity("classic").get(), instanceOf(ClassicSimilarity.class)); assertThat(similarityService.getSimilarity("BM25").get(), instanceOf(BM25Similarity.class)); + assertThat(similarityService.getSimilarity("boolean").get(), instanceOf(BooleanSimilarity.class)); assertThat(similarityService.getSimilarity("default"), equalTo(null)); } @@ -109,6 +111,21 @@ public void testResolveSimilaritiesFromMapping_bm25() throws IOException { assertThat(similarity.getDiscountOverlaps(), equalTo(false)); } + public void testResolveSimilaritiesFromMapping_boolean() throws IOException { + String mapping = XContentFactory.jsonBuilder().startObject().startObject("type") + .startObject("properties") + .startObject("field1").field("type", "text").field("similarity", "boolean").endObject() + .endObject() + .endObject().endObject().string(); + + IndexService indexService = createIndex("foo", Settings.EMPTY); + DocumentMapper documentMapper = indexService.mapperService() + .documentMapperParser() + .parse("type", new CompressedXContent(mapping)); + assertThat(documentMapper.mappers().getMapper("field1").fieldType().similarity(), + instanceOf(BooleanSimilarityProvider.class)); + } + public void testResolveSimilaritiesFromMapping_DFR() throws IOException { String mapping = XContentFactory.jsonBuilder().startObject().startObject("type") .startObject("properties") diff --git a/core/src/test/java/org/elasticsearch/index/translog/TranslogTests.java b/core/src/test/java/org/elasticsearch/index/translog/TranslogTests.java index e47a5652b2431..36401deed4bc2 100644 --- a/core/src/test/java/org/elasticsearch/index/translog/TranslogTests.java +++ b/core/src/test/java/org/elasticsearch/index/translog/TranslogTests.java @@ -41,16 +41,18 @@ import org.elasticsearch.common.io.FileSystemUtils; import org.elasticsearch.common.io.stream.BytesStreamOutput; import org.elasticsearch.common.io.stream.StreamInput; +import org.elasticsearch.common.lease.Releasable; import org.elasticsearch.common.settings.Settings; import org.elasticsearch.common.unit.ByteSizeUnit; import org.elasticsearch.common.unit.ByteSizeValue; -import org.elasticsearch.common.util.BigArrays; import org.elasticsearch.common.util.concurrent.AbstractRunnable; import org.elasticsearch.common.util.concurrent.ConcurrentCollections; +import org.elasticsearch.common.util.concurrent.ReleasableLock; import org.elasticsearch.common.xcontent.ToXContent; import org.elasticsearch.common.xcontent.XContentBuilder; import org.elasticsearch.common.xcontent.XContentFactory; import org.elasticsearch.common.xcontent.XContentType; +import org.elasticsearch.index.IndexSettings; import org.elasticsearch.index.VersionType; import org.elasticsearch.index.engine.Engine; import org.elasticsearch.index.engine.Engine.Operation.Origin; @@ -100,6 +102,7 @@ import java.util.concurrent.atomic.AtomicReference; import java.util.stream.Collectors; +import static org.elasticsearch.common.util.BigArrays.NON_RECYCLING_INSTANCE; import static org.hamcrest.Matchers.containsString; import static org.hamcrest.Matchers.equalTo; import static org.hamcrest.Matchers.greaterThan; @@ -156,12 +159,25 @@ private Translog create(Path path) throws IOException { return new Translog(getTranslogConfig(path), null, () -> globalCheckpoint.get()); } - private TranslogConfig getTranslogConfig(Path path) { - Settings build = Settings.builder() - .put(IndexMetaData.SETTING_VERSION_CREATED, org.elasticsearch.Version.CURRENT) - .build(); - ByteSizeValue bufferSize = randomBoolean() ? TranslogConfig.DEFAULT_BUFFER_SIZE : new ByteSizeValue(10 + randomInt(128 * 1024), ByteSizeUnit.BYTES); - return new TranslogConfig(shardId, path, IndexSettingsModule.newIndexSettings(shardId.getIndex(), build), BigArrays.NON_RECYCLING_INSTANCE, bufferSize); + private TranslogConfig getTranslogConfig(final Path path) { + final Settings settings = Settings + .builder() + .put(IndexMetaData.SETTING_VERSION_CREATED, org.elasticsearch.Version.CURRENT) + .build(); + return getTranslogConfig(path, settings); + } + + private TranslogConfig getTranslogConfig(final Path path, final Settings settings) { + final ByteSizeValue bufferSize; + if (randomBoolean()) { + bufferSize = TranslogConfig.DEFAULT_BUFFER_SIZE; + } else { + bufferSize = new ByteSizeValue(10 + randomInt(128 * 1024), ByteSizeUnit.BYTES); + } + + final IndexSettings indexSettings = + IndexSettingsModule.newIndexSettings(shardId.getIndex(), settings); + return new TranslogConfig(shardId, path, indexSettings, NON_RECYCLING_INSTANCE, bufferSize); } protected void addToTranslogAndList(Translog translog, ArrayList list, Translog.Operation op) throws IOException { @@ -2073,4 +2089,93 @@ public void testTranslogOpSerialization() throws Exception { Translog.Delete serializedDelete = new Translog.Delete(in); assertEquals(delete, serializedDelete); } + + public void testRollGeneration() throws IOException { + final long generation = translog.currentFileGeneration(); + final int rolls = randomIntBetween(1, 16); + int totalOperations = 0; + int seqNo = 0; + for (int i = 0; i < rolls; i++) { + final int operations = randomIntBetween(1, 128); + for (int j = 0; j < operations; j++) { + translog.add(new Translog.NoOp(seqNo++, 0, "test")); + totalOperations++; + } + try (ReleasableLock ignored = translog.writeLock.acquire()) { + translog.rollGeneration(); + } + assertThat(translog.currentFileGeneration(), equalTo(generation + i + 1)); + assertThat(translog.totalOperations(), equalTo(totalOperations)); + } + for (int i = 0; i <= rolls; i++) { + assertFileIsPresent(translog, generation + i); + } + translog.commit(); + assertThat(translog.currentFileGeneration(), equalTo(generation + rolls + 1)); + assertThat(translog.totalOperations(), equalTo(0)); + for (int i = 0; i <= rolls; i++) { + assertFileDeleted(translog, generation + i); + } + assertFileIsPresent(translog, generation + rolls + 1); + } + + public void testRollGenerationBetweenPrepareCommitAndCommit() throws IOException { + final long generation = translog.currentFileGeneration(); + int seqNo = 0; + + final int rollsBefore = randomIntBetween(0, 16); + for (int r = 1; r <= rollsBefore; r++) { + final int operationsBefore = randomIntBetween(1, 256); + for (int i = 0; i < operationsBefore; i++) { + translog.add(new Translog.NoOp(seqNo++, 0, "test")); + } + + try (Releasable ignored = translog.writeLock.acquire()) { + translog.rollGeneration(); + } + + assertThat(translog.currentFileGeneration(), equalTo(generation + r)); + for (int i = 0; i <= r; i++) { + assertFileIsPresent(translog, generation + r); + } + } + + assertThat(translog.currentFileGeneration(), equalTo(generation + rollsBefore)); + translog.prepareCommit(); + assertThat(translog.currentFileGeneration(), equalTo(generation + rollsBefore + 1)); + + for (int i = 0; i <= rollsBefore + 1; i++) { + assertFileIsPresent(translog, generation + i); + } + + final int rollsBetween = randomIntBetween(0, 16); + for (int r = 1; r <= rollsBetween; r++) { + final int operationsBetween = randomIntBetween(1, 256); + for (int i = 0; i < operationsBetween; i++) { + translog.add(new Translog.NoOp(seqNo++, 0, "test")); + } + + try (Releasable ignored = translog.writeLock.acquire()) { + translog.rollGeneration(); + } + + assertThat( + translog.currentFileGeneration(), + equalTo(generation + rollsBefore + 1 + r)); + for (int i = 0; i <= rollsBefore + 1 + r; i++) { + assertFileIsPresent(translog, generation + i); + } + } + + translog.commit(); + + for (int i = 0; i <= rollsBefore; i++) { + assertFileDeleted(translog, generation + i); + } + for (int i = rollsBefore + 1; i <= rollsBefore + 1 + rollsBetween; i++) { + assertFileIsPresent(translog, generation + i); + } + + } + } diff --git a/core/src/test/java/org/elasticsearch/indices/settings/UpdateSettingsIT.java b/core/src/test/java/org/elasticsearch/indices/settings/UpdateSettingsIT.java index ae6b4588271b4..762d409b6b75e 100644 --- a/core/src/test/java/org/elasticsearch/indices/settings/UpdateSettingsIT.java +++ b/core/src/test/java/org/elasticsearch/indices/settings/UpdateSettingsIT.java @@ -93,7 +93,11 @@ public void testResetDefault() { .admin() .indices() .prepareUpdateSettings("test") - .setSettings(Settings.builder().put("index.refresh_interval", -1).put("index.translog.flush_threshold_size", "1024b")) + .setSettings( + Settings.builder() + .put("index.refresh_interval", -1) + .put("index.translog.flush_threshold_size", "1024b") + .put("index.translog.generation_threshold_size", "4096b")) .execute() .actionGet(); IndexMetaData indexMetaData = client().admin().cluster().prepareState().execute().actionGet().getState().metaData().index("test"); @@ -103,6 +107,7 @@ public void testResetDefault() { if (indexService != null) { assertEquals(indexService.getIndexSettings().getRefreshInterval().millis(), -1); assertEquals(indexService.getIndexSettings().getFlushThresholdSize().getBytes(), 1024); + assertEquals(indexService.getIndexSettings().getGenerationThresholdSize().getBytes(), 4096); } } client() @@ -119,6 +124,7 @@ public void testResetDefault() { if (indexService != null) { assertEquals(indexService.getIndexSettings().getRefreshInterval().millis(), 1000); assertEquals(indexService.getIndexSettings().getFlushThresholdSize().getBytes(), 1024); + assertEquals(indexService.getIndexSettings().getGenerationThresholdSize().getBytes(), 4096); } } } diff --git a/core/src/test/java/org/elasticsearch/monitor/fs/FsProbeTests.java b/core/src/test/java/org/elasticsearch/monitor/fs/FsProbeTests.java index 3d4903e04724d..0db1709e92c15 100644 --- a/core/src/test/java/org/elasticsearch/monitor/fs/FsProbeTests.java +++ b/core/src/test/java/org/elasticsearch/monitor/fs/FsProbeTests.java @@ -36,6 +36,8 @@ import java.util.List; import java.util.Set; import java.util.concurrent.atomic.AtomicReference; +import java.util.function.Function; +import java.util.function.Supplier; import static org.hamcrest.CoreMatchers.equalTo; import static org.hamcrest.Matchers.greaterThan; @@ -93,26 +95,64 @@ public void testFsInfo() throws IOException { } public void testFsInfoOverflow() throws Exception { - FsInfo.Path pathStats = new FsInfo.Path("/foo/bar", null, - randomNonNegativeLong(), randomNonNegativeLong(), randomNonNegativeLong()); - - // While not overflowing, keep adding - FsInfo.Path pathToAdd = new FsInfo.Path("/foo/baz", null, - randomNonNegativeLong(), randomNonNegativeLong(), randomNonNegativeLong()); - while ((pathStats.total + pathToAdd.total) > 0) { - // Add itself as a path, to increase the total bytes until it overflows - logger.info("--> adding {} bytes to {}, will be: {}", pathToAdd.total, pathStats.total, pathToAdd.total + pathStats.total); + final FsInfo.Path pathStats = + new FsInfo.Path( + "/foo/bar", + null, + randomNonNegativeLong(), + randomNonNegativeLong(), + randomNonNegativeLong()); + + addUntilOverflow( + pathStats, + p -> p.total, + "total", + () -> new FsInfo.Path("/foo/baz", null, randomNonNegativeLong(), 0, 0)); + + addUntilOverflow( + pathStats, + p -> p.free, + "free", + () -> new FsInfo.Path("/foo/baz", null, 0, randomNonNegativeLong(), 0)); + + addUntilOverflow( + pathStats, + p -> p.available, + "available", + () -> new FsInfo.Path("/foo/baz", null, 0, 0, randomNonNegativeLong())); + + // even after overflowing these should not be negative + assertThat(pathStats.total, greaterThan(0L)); + assertThat(pathStats.free, greaterThan(0L)); + assertThat(pathStats.available, greaterThan(0L)); + } + + private void addUntilOverflow( + final FsInfo.Path pathStats, + final Function getter, + final String field, + final Supplier supplier) { + FsInfo.Path pathToAdd = supplier.get(); + while ((getter.apply(pathStats) + getter.apply(pathToAdd)) > 0) { + // add a path to increase the total bytes until it overflows + logger.info( + "--> adding {} bytes to {}, {} will be: {}", + getter.apply(pathToAdd), + getter.apply(pathStats), + field, + getter.apply(pathStats) + getter.apply(pathToAdd)); pathStats.add(pathToAdd); - pathToAdd = new FsInfo.Path("/foo/baz", null, - randomNonNegativeLong(), randomNonNegativeLong(), randomNonNegativeLong()); + pathToAdd = supplier.get(); } - - logger.info("--> adding {} bytes to {}, will be: {}", pathToAdd.total, pathStats.total, pathToAdd.total + pathStats.total); - assertThat(pathStats.total + pathToAdd.total, lessThan(0L)); + // this overflows + logger.info( + "--> adding {} bytes to {}, {} will be: {}", + getter.apply(pathToAdd), + getter.apply(pathStats), + field, + getter.apply(pathStats) + getter.apply(pathToAdd)); + assertThat(getter.apply(pathStats) + getter.apply(pathToAdd), lessThan(0L)); pathStats.add(pathToAdd); - - // Even after overflowing, it should not be negative - assertThat(pathStats.total, greaterThan(0L)); } public void testIoStats() { diff --git a/core/src/test/java/org/elasticsearch/nodesinfo/NodeInfoStreamingTests.java b/core/src/test/java/org/elasticsearch/nodesinfo/NodeInfoStreamingTests.java index 9c36a7a649ad6..51aad14227883 100644 --- a/core/src/test/java/org/elasticsearch/nodesinfo/NodeInfoStreamingTests.java +++ b/core/src/test/java/org/elasticsearch/nodesinfo/NodeInfoStreamingTests.java @@ -143,13 +143,13 @@ private static NodeInfo createNodeInfo() { List plugins = new ArrayList<>(); for (int i = 0; i < numPlugins; i++) { plugins.add(new PluginInfo(randomAsciiOfLengthBetween(3, 10), randomAsciiOfLengthBetween(3, 10), - randomAsciiOfLengthBetween(3, 10), randomAsciiOfLengthBetween(3, 10))); + randomAsciiOfLengthBetween(3, 10), randomAsciiOfLengthBetween(3, 10), randomBoolean())); } int numModules = randomIntBetween(0, 5); List modules = new ArrayList<>(); for (int i = 0; i < numModules; i++) { modules.add(new PluginInfo(randomAsciiOfLengthBetween(3, 10), randomAsciiOfLengthBetween(3, 10), - randomAsciiOfLengthBetween(3, 10), randomAsciiOfLengthBetween(3, 10))); + randomAsciiOfLengthBetween(3, 10), randomAsciiOfLengthBetween(3, 10), randomBoolean())); } pluginsAndModules = new PluginsAndModules(plugins, modules); } diff --git a/core/src/test/java/org/elasticsearch/plugins/PluginInfoTests.java b/core/src/test/java/org/elasticsearch/plugins/PluginInfoTests.java index 4ad52be8866a8..04afdd5839181 100644 --- a/core/src/test/java/org/elasticsearch/plugins/PluginInfoTests.java +++ b/core/src/test/java/org/elasticsearch/plugins/PluginInfoTests.java @@ -56,7 +56,7 @@ public void testReadFromPropertiesNameMissing() throws Exception { PluginInfo.readFromProperties(pluginDir); fail("expected missing name exception"); } catch (IllegalArgumentException e) { - assertTrue(e.getMessage().contains("Property [name] is missing in")); + assertTrue(e.getMessage().contains("property [name] is missing in")); } PluginTestUtil.writeProperties(pluginDir, "name", ""); @@ -64,7 +64,7 @@ public void testReadFromPropertiesNameMissing() throws Exception { PluginInfo.readFromProperties(pluginDir); fail("expected missing name exception"); } catch (IllegalArgumentException e) { - assertTrue(e.getMessage().contains("Property [name] is missing in")); + assertTrue(e.getMessage().contains("property [name] is missing in")); } } @@ -81,7 +81,8 @@ public void testReadFromPropertiesDescriptionMissing() throws Exception { public void testReadFromPropertiesVersionMissing() throws Exception { Path pluginDir = createTempDir().resolve("fake-plugin"); - PluginTestUtil.writeProperties(pluginDir, "description", "fake desc", "name", "fake-plugin"); + PluginTestUtil.writeProperties( + pluginDir, "description", "fake desc", "name", "fake-plugin"); try { PluginInfo.readFromProperties(pluginDir); fail("expected missing version exception"); @@ -151,7 +152,11 @@ public void testReadFromPropertiesBadJavaVersionFormat() throws Exception { PluginInfo.readFromProperties(pluginDir); fail("expected bad java version format exception"); } catch (IllegalStateException e) { - assertTrue(e.getMessage(), e.getMessage().equals("version string must be a sequence of nonnegative decimal integers separated by \".\"'s and may have leading zeros but was 1.7.0_80")); + assertTrue( + e.getMessage(), + e.getMessage().equals("version string must be a sequence of nonnegative " + + "decimal integers separated by \".\"'s and may have leading zeros " + + "but was 1.7.0_80")); } } @@ -166,7 +171,8 @@ public void testReadFromPropertiesBogusElasticsearchVersion() throws Exception { PluginInfo.readFromProperties(pluginDir); fail("expected bogus elasticsearch version exception"); } catch (IllegalArgumentException e) { - assertTrue(e.getMessage().contains("version needs to contain major, minor, and revision")); + assertTrue(e.getMessage().contains( + "version needs to contain major, minor, and revision")); } } @@ -181,7 +187,7 @@ public void testReadFromPropertiesOldElasticsearchVersion() throws Exception { PluginInfo.readFromProperties(pluginDir); fail("expected old elasticsearch version exception"); } catch (IllegalArgumentException e) { - assertTrue(e.getMessage().contains("Was designed for version [2.0.0]")); + assertTrue(e.getMessage().contains("was designed for version [2.0.0]")); } } @@ -197,17 +203,17 @@ public void testReadFromPropertiesJvmMissingClassname() throws Exception { PluginInfo.readFromProperties(pluginDir); fail("expected old elasticsearch version exception"); } catch (IllegalArgumentException e) { - assertTrue(e.getMessage().contains("Property [classname] is missing")); + assertTrue(e.getMessage().contains("property [classname] is missing")); } } public void testPluginListSorted() { List plugins = new ArrayList<>(); - plugins.add(new PluginInfo("c", "foo", "dummy", "dummyclass")); - plugins.add(new PluginInfo("b", "foo", "dummy", "dummyclass")); - plugins.add(new PluginInfo("e", "foo", "dummy", "dummyclass")); - plugins.add(new PluginInfo("a", "foo", "dummy", "dummyclass")); - plugins.add(new PluginInfo("d", "foo", "dummy", "dummyclass")); + plugins.add(new PluginInfo("c", "foo", "dummy", "dummyclass", randomBoolean())); + plugins.add(new PluginInfo("b", "foo", "dummy", "dummyclass", randomBoolean())); + plugins.add(new PluginInfo("e", "foo", "dummy", "dummyclass", randomBoolean())); + plugins.add(new PluginInfo("a", "foo", "dummy", "dummyclass", randomBoolean())); + plugins.add(new PluginInfo("d", "foo", "dummy", "dummyclass", randomBoolean())); PluginsAndModules pluginsInfo = new PluginsAndModules(plugins, Collections.emptyList()); @@ -215,4 +221,5 @@ public void testPluginListSorted() { List names = infos.stream().map(PluginInfo::getName).collect(Collectors.toList()); assertThat(names, contains("a", "b", "c", "d", "e")); } + } diff --git a/core/src/test/java/org/elasticsearch/bootstrap/SpawnerTests.java b/core/src/test/java/org/elasticsearch/plugins/PluginsTests.java similarity index 60% rename from core/src/test/java/org/elasticsearch/bootstrap/SpawnerTests.java rename to core/src/test/java/org/elasticsearch/plugins/PluginsTests.java index 58c112ba96d3a..a26bcb1991e4f 100644 --- a/core/src/test/java/org/elasticsearch/bootstrap/SpawnerTests.java +++ b/core/src/test/java/org/elasticsearch/plugins/PluginsTests.java @@ -17,20 +17,17 @@ * under the License. */ -package org.elasticsearch.bootstrap; +package org.elasticsearch.plugins; import org.apache.lucene.util.Constants; import org.elasticsearch.test.ESTestCase; import java.util.Locale; -/** - * Doesn't actually test spawning a process, as a system call filter is installed before tests run and forbids it. - */ -public class SpawnerTests extends ESTestCase { +public class PluginsTests extends ESTestCase { public void testMakePlatformName() { - String platformName = Spawner.makePlatformName(Constants.OS_NAME, Constants.OS_ARCH); + final String platformName = Platforms.platformName(Constants.OS_NAME, Constants.OS_ARCH); assertFalse(platformName, platformName.isEmpty()); assertTrue(platformName, platformName.equals(platformName.toLowerCase(Locale.ROOT))); @@ -40,13 +37,13 @@ public void testMakePlatformName() { } public void testMakeSpecificPlatformNames() { - assertEquals("darwin-x86_64", Spawner.makePlatformName("Mac OS X", "x86_64")); - assertEquals("linux-x86_64", Spawner.makePlatformName("Linux", "amd64")); - assertEquals("linux-x86", Spawner.makePlatformName("Linux", "i386")); - assertEquals("windows-x86_64", Spawner.makePlatformName("Windows Server 2008 R2", "amd64")); - assertEquals("windows-x86", Spawner.makePlatformName("Windows Server 2008", "x86")); - assertEquals("windows-x86_64", Spawner.makePlatformName("Windows 8.1", "amd64")); - assertEquals("sunos-x86_64", Spawner.makePlatformName("SunOS", "amd64")); + assertEquals("darwin-x86_64", Platforms.platformName("Mac OS X", "x86_64")); + assertEquals("linux-x86_64", Platforms.platformName("Linux", "amd64")); + assertEquals("linux-x86", Platforms.platformName("Linux", "i386")); + assertEquals("windows-x86_64", Platforms.platformName("Windows Server 2008 R2", "amd64")); + assertEquals("windows-x86", Platforms.platformName("Windows Server 2008", "x86")); + assertEquals("windows-x86_64", Platforms.platformName("Windows 8.1", "amd64")); + assertEquals("sunos-x86_64", Platforms.platformName("SunOS", "amd64")); } } diff --git a/core/src/test/java/org/elasticsearch/rest/action/admin/indices/RestClearIndicesCacheActionTests.java b/core/src/test/java/org/elasticsearch/rest/action/admin/indices/RestClearIndicesCacheActionTests.java new file mode 100644 index 0000000000000..25a8f350d9a32 --- /dev/null +++ b/core/src/test/java/org/elasticsearch/rest/action/admin/indices/RestClearIndicesCacheActionTests.java @@ -0,0 +1,42 @@ +/* + * Licensed to Elasticsearch under one or more contributor + * license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright + * ownership. Elasticsearch licenses this file to you under + * the Apache License, Version 2.0 (the "License"); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.elasticsearch.rest.action.admin.indices; + +import org.elasticsearch.action.admin.indices.cache.clear.ClearIndicesCacheRequest; +import org.elasticsearch.rest.RestRequest; +import org.elasticsearch.test.ESTestCase; +import org.elasticsearch.test.rest.FakeRestRequest; + +import java.util.HashMap; + +import static org.hamcrest.Matchers.equalTo; + +public class RestClearIndicesCacheActionTests extends ESTestCase { + + public void testRequestCacheSet() throws Exception { + final HashMap params = new HashMap<>(); + params.put("request", "true"); + final RestRequest restRequest = new FakeRestRequest.Builder(xContentRegistry()) + .withParams(params).build(); + ClearIndicesCacheRequest cacheRequest = new ClearIndicesCacheRequest(); + cacheRequest = RestClearIndicesCacheAction.fromRequest(restRequest, cacheRequest); + assertThat(cacheRequest.requestCache(), equalTo(true)); + } +} diff --git a/core/src/test/java/org/elasticsearch/search/SearchHitTests.java b/core/src/test/java/org/elasticsearch/search/SearchHitTests.java index 4c7a6f10e76f2..f03190203b281 100644 --- a/core/src/test/java/org/elasticsearch/search/SearchHitTests.java +++ b/core/src/test/java/org/elasticsearch/search/SearchHitTests.java @@ -149,6 +149,24 @@ public void testFromXContent() throws IOException { assertToXContentEquivalent(originalBytes, toXContent(parsed, xContentType, humanReadable), xContentType); } + /** + * When e.g. with "stored_fields": "_none_", only "_index" and "_score" are returned. + */ + public void testFromXContentWithoutTypeAndId() throws IOException { + String hit = "{\"_index\": \"my_index\", \"_score\": 1}"; + SearchHit parsed; + try (XContentParser parser = createParser(JsonXContent.jsonXContent, hit)) { + parser.nextToken(); // jump to first START_OBJECT + parsed = SearchHit.fromXContent(parser); + assertEquals(XContentParser.Token.END_OBJECT, parser.currentToken()); + assertNull(parser.nextToken()); + } + assertEquals("my_index", parsed.getIndex()); + assertEquals(1, parsed.getScore(), Float.MIN_VALUE); + assertNull(parsed.getType()); + assertNull(parsed.getId()); + } + public void testToXContent() throws IOException { SearchHit searchHit = new SearchHit(1, "id1", new Text("type"), Collections.emptyMap()); searchHit.score(1.5f); diff --git a/core/src/test/java/org/elasticsearch/search/SearchServiceTests.java b/core/src/test/java/org/elasticsearch/search/SearchServiceTests.java index 0f626903609be..f3ff6be1cc12c 100644 --- a/core/src/test/java/org/elasticsearch/search/SearchServiceTests.java +++ b/core/src/test/java/org/elasticsearch/search/SearchServiceTests.java @@ -48,7 +48,6 @@ import org.elasticsearch.search.internal.AliasFilter; import org.elasticsearch.search.internal.SearchContext; import org.elasticsearch.search.internal.ShardSearchLocalRequest; -import org.elasticsearch.search.query.QuerySearchResultProvider; import org.elasticsearch.test.ESSingleNodeTestCase; import java.io.IOException; @@ -184,13 +183,13 @@ public void onFailure(Exception e) { final int rounds = scaledRandomIntBetween(100, 10000); for (int i = 0; i < rounds; i++) { try { - QuerySearchResultProvider querySearchResultProvider = service.executeQueryPhase( + SearchPhaseResult searchPhaseResult = service.executeQueryPhase( new ShardSearchLocalRequest(indexShard.shardId(), 1, SearchType.DEFAULT, new SearchSourceBuilder(), new String[0], false, new AliasFilter(null, Strings.EMPTY_ARRAY), 1.0f), new SearchTask(123L, "", "", "", null)); IntArrayList intCursors = new IntArrayList(1); intCursors.add(0); - ShardFetchRequest req = new ShardFetchRequest(querySearchResultProvider.id(), intCursors, null /* not a scroll */); + ShardFetchRequest req = new ShardFetchRequest(searchPhaseResult.getRequestId(), intCursors, null /* not a scroll */); service.executeFetchPhase(req, new SearchTask(123L, "", "", "", null)); } catch (AlreadyClosedException ex) { throw ex; diff --git a/core/src/test/java/org/elasticsearch/search/aggregations/AggregatorTestCase.java b/core/src/test/java/org/elasticsearch/search/aggregations/AggregatorTestCase.java index 65f2965df9716..363e972456efb 100644 --- a/core/src/test/java/org/elasticsearch/search/aggregations/AggregatorTestCase.java +++ b/core/src/test/java/org/elasticsearch/search/aggregations/AggregatorTestCase.java @@ -34,13 +34,20 @@ import org.elasticsearch.common.settings.Settings; import org.elasticsearch.common.util.MockBigArrays; import org.elasticsearch.index.IndexSettings; +import org.elasticsearch.index.cache.bitset.BitsetFilterCache; +import org.elasticsearch.index.cache.bitset.BitsetFilterCache.Listener; import org.elasticsearch.index.cache.query.DisabledQueryCache; import org.elasticsearch.index.engine.Engine; import org.elasticsearch.index.fielddata.IndexFieldDataCache; import org.elasticsearch.index.fielddata.IndexFieldDataService; +import org.elasticsearch.index.mapper.ContentPath; import org.elasticsearch.index.mapper.MappedFieldType; +import org.elasticsearch.index.mapper.Mapper.BuilderContext; import org.elasticsearch.index.mapper.MapperService; +import org.elasticsearch.index.mapper.ObjectMapper; +import org.elasticsearch.index.mapper.ObjectMapper.Nested; import org.elasticsearch.index.query.QueryShardContext; +import org.elasticsearch.index.query.support.NestedScope; import org.elasticsearch.indices.breaker.CircuitBreakerService; import org.elasticsearch.indices.breaker.NoneCircuitBreakerService; import org.elasticsearch.indices.fielddata.cache.IndicesFieldDataCache; @@ -59,6 +66,7 @@ import java.util.List; import static org.mockito.Matchers.anyObject; +import static org.mockito.Matchers.anyString; import static org.mockito.Mockito.doAnswer; import static org.mockito.Mockito.mock; import static org.mockito.Mockito.when; @@ -69,6 +77,7 @@ * {@link AggregationBuilder} instance. */ public abstract class AggregatorTestCase extends ESTestCase { + private static final String NESTEDFIELD_PREFIX = "nested_"; private List releasables = new ArrayList<>(); protected A createAggregator(B aggregationBuilder, @@ -119,6 +128,15 @@ public boolean shouldCache(Query query) throws IOException { when(searchContext.searcher()).thenReturn(contextIndexSearcher); when(searchContext.fetchPhase()) .thenReturn(new FetchPhase(Arrays.asList(new FetchSourceSubPhase(), new DocValueFieldsFetchSubPhase()))); + when(searchContext.getObjectMapper(anyString())).thenAnswer(invocation -> { + String fieldName = (String) invocation.getArguments()[0]; + if (fieldName.startsWith(NESTEDFIELD_PREFIX)) { + BuilderContext context = new BuilderContext(indexSettings.getSettings(), new ContentPath()); + return new ObjectMapper.Builder<>(fieldName).nested(Nested.newNested(false, false)).build(context); + } + return null; + }); + when(searchContext.bitsetFilterCache()).thenReturn(new BitsetFilterCache(indexSettings, mock(Listener.class))); doAnswer(invocation -> { /* Store the releasables so we can release them at the end of the test case. This is important because aggregations don't * close their sub-aggregations. This is fairly similar to what the production code does. */ @@ -157,6 +175,8 @@ protected QueryShardContext queryShardContextMock(MappedFieldType[] fieldTypes, when(queryShardContext.getForField(fieldType)).then(invocation -> fieldType.fielddataBuilder().build(indexSettings, fieldType, new IndexFieldDataCache.None(), circuitBreakerService, mock(MapperService.class))); } + NestedScope nestedScope = new NestedScope(); + when(queryShardContext.nestedScope()).thenReturn(nestedScope); return queryShardContext; } diff --git a/core/src/test/java/org/elasticsearch/search/aggregations/InternalAggregationTestCase.java b/core/src/test/java/org/elasticsearch/search/aggregations/InternalAggregationTestCase.java index f5a06e09fd334..05d75d9af77f1 100644 --- a/core/src/test/java/org/elasticsearch/search/aggregations/InternalAggregationTestCase.java +++ b/core/src/test/java/org/elasticsearch/search/aggregations/InternalAggregationTestCase.java @@ -50,7 +50,7 @@ protected T createUnmappedInstance(String name, return createTestInstance(name, pipelineAggregators, metaData); } - public final void testReduceRandom() { + public void testReduceRandom() { String name = randomAsciiOfLength(5); List inputs = new ArrayList<>(); List toReduce = new ArrayList<>(); diff --git a/core/src/test/java/org/elasticsearch/search/aggregations/bucket/FilterAggregatorTests.java b/core/src/test/java/org/elasticsearch/search/aggregations/bucket/FilterAggregatorTests.java new file mode 100644 index 0000000000000..c56178905055c --- /dev/null +++ b/core/src/test/java/org/elasticsearch/search/aggregations/bucket/FilterAggregatorTests.java @@ -0,0 +1,105 @@ +/* + * Licensed to Elasticsearch under one or more contributor + * license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright + * ownership. Elasticsearch licenses this file to you under + * the Apache License, Version 2.0 (the "License"); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ +package org.elasticsearch.search.aggregations.bucket; + +import org.apache.lucene.document.Document; +import org.apache.lucene.document.Field; +import org.apache.lucene.index.DirectoryReader; +import org.apache.lucene.index.IndexOptions; +import org.apache.lucene.index.IndexReader; +import org.apache.lucene.index.RandomIndexWriter; +import org.apache.lucene.search.IndexSearcher; +import org.apache.lucene.search.MatchAllDocsQuery; +import org.apache.lucene.store.Directory; +import org.elasticsearch.index.mapper.KeywordFieldMapper; +import org.elasticsearch.index.mapper.MappedFieldType; +import org.elasticsearch.index.query.QueryBuilder; +import org.elasticsearch.index.query.QueryBuilders; +import org.elasticsearch.search.aggregations.AggregatorTestCase; +import org.elasticsearch.search.aggregations.bucket.filter.FilterAggregationBuilder; +import org.elasticsearch.search.aggregations.bucket.filter.InternalFilter; +import org.junit.Before; + +public class FilterAggregatorTests extends AggregatorTestCase { + private MappedFieldType fieldType; + + @Before + public void setUpTest() throws Exception { + super.setUp(); + fieldType = new KeywordFieldMapper.KeywordFieldType(); + fieldType.setHasDocValues(true); + fieldType.setIndexOptions(IndexOptions.DOCS); + fieldType.setName("field"); + } + + public void testEmpty() throws Exception { + Directory directory = newDirectory(); + RandomIndexWriter indexWriter = new RandomIndexWriter(random(), directory); + indexWriter.close(); + IndexReader indexReader = DirectoryReader.open(directory); + IndexSearcher indexSearcher = newSearcher(indexReader, true, true); + QueryBuilder filter = QueryBuilders.termQuery("field", randomAsciiOfLength(5)); + FilterAggregationBuilder builder = new FilterAggregationBuilder("test", filter); + InternalFilter response = search(indexSearcher, new MatchAllDocsQuery(), builder, + fieldType); + assertEquals(response.getDocCount(), 0); + indexReader.close(); + directory.close(); + } + + public void testRandom() throws Exception { + Directory directory = newDirectory(); + RandomIndexWriter indexWriter = new RandomIndexWriter(random(), directory); + int numDocs = randomIntBetween(100, 200); + int maxTerm = randomIntBetween(10, 50); + int[] expectedBucketCount = new int[maxTerm]; + Document document = new Document(); + for (int i = 0; i < numDocs; i++) { + if (frequently()) { + // make sure we have more than one segment to test the merge + indexWriter.getReader().close(); + } + int value = randomInt(maxTerm-1); + expectedBucketCount[value] += 1; + document.add(new Field("field", Integer.toString(value), fieldType)); + indexWriter.addDocument(document); + document.clear(); + } + indexWriter.close(); + + IndexReader indexReader = DirectoryReader.open(directory); + IndexSearcher indexSearcher = newSearcher(indexReader, true, true); + int value = randomInt(maxTerm - 1); + QueryBuilder filter = QueryBuilders.termQuery("field", Integer.toString(value)); + FilterAggregationBuilder builder = new FilterAggregationBuilder("test", filter); + + for (boolean doReduce : new boolean[] {true, false}) { + final InternalFilter response; + if (doReduce) { + response = searchAndReduce(indexSearcher, new MatchAllDocsQuery(), builder, + fieldType); + } else { + response = search(indexSearcher, new MatchAllDocsQuery(), builder, fieldType); + } + assertEquals(response.getDocCount(), (long) expectedBucketCount[value]); + } + indexReader.close(); + directory.close(); + } +} diff --git a/core/src/test/java/org/elasticsearch/search/aggregations/bucket/nested/ReverseNestedAggregatorTests.java b/core/src/test/java/org/elasticsearch/search/aggregations/bucket/nested/ReverseNestedAggregatorTests.java new file mode 100644 index 0000000000000..5e260b0cf15a9 --- /dev/null +++ b/core/src/test/java/org/elasticsearch/search/aggregations/bucket/nested/ReverseNestedAggregatorTests.java @@ -0,0 +1,145 @@ +/* + * Licensed to Elasticsearch under one or more contributor + * license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright + * ownership. Elasticsearch licenses this file to you under + * the Apache License, Version 2.0 (the "License"); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.elasticsearch.search.aggregations.bucket.nested; + +import org.apache.lucene.document.Document; +import org.apache.lucene.document.Field; +import org.apache.lucene.document.SortedNumericDocValuesField; +import org.apache.lucene.index.DirectoryReader; +import org.apache.lucene.index.IndexReader; +import org.apache.lucene.index.RandomIndexWriter; +import org.apache.lucene.search.MatchAllDocsQuery; +import org.apache.lucene.store.Directory; +import org.elasticsearch.index.mapper.MappedFieldType; +import org.elasticsearch.index.mapper.NumberFieldMapper; +import org.elasticsearch.index.mapper.TypeFieldMapper; +import org.elasticsearch.index.mapper.UidFieldMapper; +import org.elasticsearch.search.aggregations.AggregatorTestCase; +import org.elasticsearch.search.aggregations.metrics.max.InternalMax; +import org.elasticsearch.search.aggregations.metrics.max.MaxAggregationBuilder; + +import java.io.IOException; +import java.util.ArrayList; +import java.util.List; + +public class ReverseNestedAggregatorTests extends AggregatorTestCase { + + private static final String VALUE_FIELD_NAME = "number"; + private static final String NESTED_OBJECT = "nested_object"; + private static final String NESTED_AGG = "nestedAgg"; + private static final String REVERSE_AGG_NAME = "reverseNestedAgg"; + private static final String MAX_AGG_NAME = "maxAgg"; + + + public void testNoDocs() throws IOException { + try (Directory directory = newDirectory()) { + try (RandomIndexWriter iw = new RandomIndexWriter(random(), directory)) { + // intentionally not writing any docs + } + try (IndexReader indexReader = DirectoryReader.open(directory)) { + NestedAggregationBuilder nestedBuilder = new NestedAggregationBuilder(NESTED_AGG, + NESTED_OBJECT); + ReverseNestedAggregationBuilder reverseNestedBuilder + = new ReverseNestedAggregationBuilder(REVERSE_AGG_NAME); + nestedBuilder.subAggregation(reverseNestedBuilder); + MaxAggregationBuilder maxAgg = new MaxAggregationBuilder(MAX_AGG_NAME) + .field(VALUE_FIELD_NAME); + reverseNestedBuilder.subAggregation(maxAgg); + MappedFieldType fieldType = new NumberFieldMapper.NumberFieldType( + NumberFieldMapper.NumberType.LONG); + fieldType.setName(VALUE_FIELD_NAME); + + Nested nested = search(newSearcher(indexReader, true, true), + new MatchAllDocsQuery(), nestedBuilder, fieldType); + ReverseNested reverseNested = (ReverseNested) nested.getProperty(REVERSE_AGG_NAME); + assertEquals(REVERSE_AGG_NAME, reverseNested.getName()); + assertEquals(0, reverseNested.getDocCount()); + + InternalMax max = (InternalMax) reverseNested.getProperty(MAX_AGG_NAME); + assertEquals(MAX_AGG_NAME, max.getName()); + assertEquals(Double.NEGATIVE_INFINITY, max.getValue(), Double.MIN_VALUE); + } + } + } + + public void testMaxFromParentDocs() throws IOException { + int numParentDocs = randomIntBetween(1, 20); + int expectedParentDocs = 0; + int expectedNestedDocs = 0; + double expectedMaxValue = Double.NEGATIVE_INFINITY; + try (Directory directory = newDirectory()) { + try (RandomIndexWriter iw = new RandomIndexWriter(random(), directory)) { + for (int i = 0; i < numParentDocs; i++) { + List documents = new ArrayList<>(); + int numNestedDocs = randomIntBetween(0, 20); + for (int nested = 0; nested < numNestedDocs; nested++) { + Document document = new Document(); + document.add(new Field(UidFieldMapper.NAME, "type#" + i, + UidFieldMapper.Defaults.NESTED_FIELD_TYPE)); + document.add(new Field(TypeFieldMapper.NAME, "__" + NESTED_OBJECT, + TypeFieldMapper.Defaults.FIELD_TYPE)); + documents.add(document); + expectedNestedDocs++; + } + Document document = new Document(); + document.add(new Field(UidFieldMapper.NAME, "type#" + i, + UidFieldMapper.Defaults.FIELD_TYPE)); + document.add(new Field(TypeFieldMapper.NAME, "test", + TypeFieldMapper.Defaults.FIELD_TYPE)); + long value = randomNonNegativeLong() % 10000; + document.add(new SortedNumericDocValuesField(VALUE_FIELD_NAME, value)); + if (numNestedDocs > 0) { + expectedMaxValue = Math.max(expectedMaxValue, value); + expectedParentDocs++; + } + documents.add(document); + iw.addDocuments(documents); + } + iw.commit(); + } + try (IndexReader indexReader = DirectoryReader.open(directory)) { + NestedAggregationBuilder nestedBuilder = new NestedAggregationBuilder(NESTED_AGG, + NESTED_OBJECT); + ReverseNestedAggregationBuilder reverseNestedBuilder + = new ReverseNestedAggregationBuilder(REVERSE_AGG_NAME); + nestedBuilder.subAggregation(reverseNestedBuilder); + MaxAggregationBuilder maxAgg = new MaxAggregationBuilder(MAX_AGG_NAME) + .field(VALUE_FIELD_NAME); + reverseNestedBuilder.subAggregation(maxAgg); + MappedFieldType fieldType = new NumberFieldMapper.NumberFieldType( + NumberFieldMapper.NumberType.LONG); + fieldType.setName(VALUE_FIELD_NAME); + + Nested nested = search(newSearcher(indexReader, true, true), + new MatchAllDocsQuery(), nestedBuilder, fieldType); + assertEquals(expectedNestedDocs, nested.getDocCount()); + + ReverseNested reverseNested = (ReverseNested) nested.getProperty(REVERSE_AGG_NAME); + assertEquals(REVERSE_AGG_NAME, reverseNested.getName()); + assertEquals(expectedParentDocs, reverseNested.getDocCount()); + + InternalMax max = (InternalMax) reverseNested.getProperty(MAX_AGG_NAME); + assertEquals(MAX_AGG_NAME, max.getName()); + assertEquals(expectedMaxValue, max.getValue(), Double.MIN_VALUE); + } + } + } + +} diff --git a/core/src/test/java/org/elasticsearch/search/aggregations/metrics/CardinalityAggregatorTests.java b/core/src/test/java/org/elasticsearch/search/aggregations/metrics/CardinalityAggregatorTests.java new file mode 100644 index 0000000000000..b80dd163fc97f --- /dev/null +++ b/core/src/test/java/org/elasticsearch/search/aggregations/metrics/CardinalityAggregatorTests.java @@ -0,0 +1,131 @@ +/* + * Licensed to Elasticsearch under one or more contributor + * license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright + * ownership. Elasticsearch licenses this file to you under + * the Apache License, Version 2.0 (the "License"); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.elasticsearch.search.aggregations.metrics; + +import org.apache.lucene.document.IntPoint; +import org.apache.lucene.document.NumericDocValuesField; +import org.apache.lucene.document.SortedNumericDocValuesField; +import org.apache.lucene.index.DirectoryReader; +import org.apache.lucene.index.IndexReader; +import org.apache.lucene.index.RandomIndexWriter; +import org.apache.lucene.search.FieldValueQuery; +import org.apache.lucene.search.IndexSearcher; +import org.apache.lucene.search.MatchAllDocsQuery; +import org.apache.lucene.search.Query; +import org.apache.lucene.store.Directory; +import org.elasticsearch.common.CheckedConsumer; +import org.elasticsearch.index.mapper.MappedFieldType; +import org.elasticsearch.index.mapper.NumberFieldMapper; +import org.elasticsearch.search.aggregations.AggregatorTestCase; +import org.elasticsearch.search.aggregations.metrics.cardinality.CardinalityAggregationBuilder; +import org.elasticsearch.search.aggregations.metrics.cardinality.CardinalityAggregator; +import org.elasticsearch.search.aggregations.metrics.cardinality.InternalCardinality; +import org.elasticsearch.search.aggregations.support.ValueType; + +import java.io.IOException; +import java.util.Arrays; +import java.util.function.Consumer; + +import static java.util.Collections.singleton; + +public class CardinalityAggregatorTests extends AggregatorTestCase { + public void testNoDocs() throws IOException { + testCase(new MatchAllDocsQuery(), iw -> { + // Intentionally not writing any docs + }, card -> { + assertEquals(0.0, card.getValue(), 0); + }); + } + + public void testNoMatchingField() throws IOException { + testCase(new MatchAllDocsQuery(), iw -> { + iw.addDocument(singleton(new SortedNumericDocValuesField("wrong_number", 7))); + iw.addDocument(singleton(new SortedNumericDocValuesField("wrong_number", 1))); + }, card -> { + assertEquals(0.0, card.getValue(), 0); + }); + } + + public void testSomeMatchesSortedNumericDocValues() throws IOException { + testCase(new FieldValueQuery("number"), iw -> { + iw.addDocument(singleton(new SortedNumericDocValuesField("number", 7))); + iw.addDocument(singleton(new SortedNumericDocValuesField("number", 1))); + }, card -> { + assertEquals(2, card.getValue(), 0); + }); + } + + public void testSomeMatchesNumericDocValues() throws IOException { + testCase(new FieldValueQuery("number"), iw -> { + iw.addDocument(singleton(new NumericDocValuesField("number", 7))); + iw.addDocument(singleton(new NumericDocValuesField("number", 1))); + }, card -> { + assertEquals(2, card.getValue(), 0); + }); + } + + public void testQueryFiltering() throws IOException { + testCase(IntPoint.newRangeQuery("number", 0, 5), iw -> { + iw.addDocument(Arrays.asList(new IntPoint("number", 7), + new SortedNumericDocValuesField("number", 7))); + iw.addDocument(Arrays.asList(new IntPoint("number", 1), + new SortedNumericDocValuesField("number", 1))); + }, card -> { + assertEquals(1, card.getValue(), 0); + }); + } + + public void testQueryFiltersAll() throws IOException { + testCase(IntPoint.newRangeQuery("number", -1, 0), iw -> { + iw.addDocument(Arrays.asList(new IntPoint("number", 7), + new SortedNumericDocValuesField("number", 7))); + iw.addDocument(Arrays.asList(new IntPoint("number", 1), + new SortedNumericDocValuesField("number", 1))); + }, card -> { + assertEquals(0.0, card.getValue(), 0); + }); + } + + private void testCase(Query query, CheckedConsumer buildIndex, + Consumer verify) throws IOException { + Directory directory = newDirectory(); + RandomIndexWriter indexWriter = new RandomIndexWriter(random(), directory); + buildIndex.accept(indexWriter); + indexWriter.close(); + + IndexReader indexReader = DirectoryReader.open(directory); + IndexSearcher indexSearcher = newSearcher(indexReader, true, true); + + CardinalityAggregationBuilder aggregationBuilder = new CardinalityAggregationBuilder( + "_name", ValueType.NUMERIC).field("number"); + MappedFieldType fieldType = new NumberFieldMapper.NumberFieldType( + NumberFieldMapper.NumberType.LONG); + fieldType.setName("number"); + try (CardinalityAggregator aggregator = createAggregator(aggregationBuilder, indexSearcher, + fieldType)) { + aggregator.preCollection(); + indexSearcher.search(query, aggregator); + aggregator.postCollection(); + verify.accept((InternalCardinality) aggregator.buildAggregation(0L)); + } + indexReader.close(); + directory.close(); + } +} diff --git a/core/src/test/java/org/elasticsearch/search/aggregations/metrics/avg/InternalAvgTests.java b/core/src/test/java/org/elasticsearch/search/aggregations/metrics/avg/InternalAvgTests.java index 9dd5715ff93de..0600d7299b46f 100644 --- a/core/src/test/java/org/elasticsearch/search/aggregations/metrics/avg/InternalAvgTests.java +++ b/core/src/test/java/org/elasticsearch/search/aggregations/metrics/avg/InternalAvgTests.java @@ -50,7 +50,7 @@ protected void assertReduced(InternalAvg reduced, List inputs) { counts += in.getCount(); } assertEquals(counts, reduced.getCount()); - assertEquals(sum, reduced.getSum(), 0.00000001); - assertEquals(sum / counts, reduced.value(), 0.00000001); + assertEquals(sum, reduced.getSum(), 0.0000001); + assertEquals(sum / counts, reduced.value(), 0.0000001); } } diff --git a/core/src/test/java/org/elasticsearch/search/aggregations/metrics/cardinality/InternalCardinalityTests.java b/core/src/test/java/org/elasticsearch/search/aggregations/metrics/cardinality/InternalCardinalityTests.java new file mode 100644 index 0000000000000..7c5809f323bdf --- /dev/null +++ b/core/src/test/java/org/elasticsearch/search/aggregations/metrics/cardinality/InternalCardinalityTests.java @@ -0,0 +1,82 @@ +/* + * Licensed to Elasticsearch under one or more contributor + * license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright + * ownership. Elasticsearch licenses this file to you under + * the Apache License, Version 2.0 (the "License"); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.elasticsearch.search.aggregations.metrics.cardinality; + +import org.elasticsearch.common.io.stream.Writeable.Reader; +import org.elasticsearch.common.lease.Releasables; +import org.elasticsearch.common.settings.Settings; +import org.elasticsearch.common.util.MockBigArrays; +import org.elasticsearch.indices.breaker.NoneCircuitBreakerService; +import org.elasticsearch.search.aggregations.InternalAggregationTestCase; +import org.elasticsearch.search.aggregations.pipeline.PipelineAggregator; +import org.junit.After; +import org.junit.Before; + +import java.util.ArrayList; +import java.util.List; +import java.util.Map; + +public class InternalCardinalityTests extends InternalAggregationTestCase { + private static List algos; + private static int p; + + @Before + public void setup() { + algos = new ArrayList<>(); + p = randomIntBetween(HyperLogLogPlusPlus.MIN_PRECISION, HyperLogLogPlusPlus.MAX_PRECISION); + } + + @Override + protected InternalCardinality createTestInstance(String name, + List pipelineAggregators, Map metaData) { + HyperLogLogPlusPlus hllpp = new HyperLogLogPlusPlus(p, + new MockBigArrays(Settings.EMPTY, new NoneCircuitBreakerService()), 1); + algos.add(hllpp); + for (int i = 0; i < 100; i++) { + hllpp.collect(0, randomIntBetween(1, 100)); + } + return new InternalCardinality(name, hllpp, pipelineAggregators, metaData); + } + + @Override + protected Reader instanceReader() { + return InternalCardinality::new; + } + + @Override + protected void assertReduced(InternalCardinality reduced, List inputs) { + HyperLogLogPlusPlus[] algos = inputs.stream().map(InternalCardinality::getState) + .toArray(size -> new HyperLogLogPlusPlus[size]); + if (algos.length > 0) { + HyperLogLogPlusPlus result = algos[0]; + for (int i = 1; i < algos.length; i++) { + result.merge(0, algos[i], 0); + } + assertEquals(result.cardinality(0), reduced.value(), 0); + } + } + + @After + public void cleanup() { + Releasables.close(algos); + algos.clear(); + algos = null; + } +} diff --git a/core/src/test/java/org/elasticsearch/search/aggregations/pipeline/InternalSimpleValueTests.java b/core/src/test/java/org/elasticsearch/search/aggregations/pipeline/InternalSimpleValueTests.java new file mode 100644 index 0000000000000..afb5d869d0e02 --- /dev/null +++ b/core/src/test/java/org/elasticsearch/search/aggregations/pipeline/InternalSimpleValueTests.java @@ -0,0 +1,58 @@ +/* + * Licensed to Elasticsearch under one or more contributor + * license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright + * ownership. Elasticsearch licenses this file to you under + * the Apache License, Version 2.0 (the "License"); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.elasticsearch.search.aggregations.pipeline; + +import org.elasticsearch.common.io.stream.Writeable.Reader; +import org.elasticsearch.search.DocValueFormat; +import org.elasticsearch.search.aggregations.InternalAggregationTestCase; + +import java.util.Collections; +import java.util.List; +import java.util.Map; + +public class InternalSimpleValueTests extends InternalAggregationTestCase{ + + @Override + protected InternalSimpleValue createTestInstance(String name, + List pipelineAggregators, Map metaData) { + DocValueFormat formatter = randomFrom(DocValueFormat.BOOLEAN, DocValueFormat.GEOHASH, + DocValueFormat.IP, DocValueFormat.RAW); + double value = randomDoubleBetween(0, 100000, true); + return new InternalSimpleValue(name, value, formatter, pipelineAggregators, metaData); + } + + @Override + public void testReduceRandom() { + expectThrows(UnsupportedOperationException.class, + () -> createTestInstance("name", Collections.emptyList(), null).reduce(null, + null)); + } + + @Override + protected void assertReduced(InternalSimpleValue reduced, List inputs) { + // no test since reduce operation is unsupported + } + + @Override + protected Reader instanceReader() { + return InternalSimpleValue::new; + } + +} diff --git a/core/src/test/java/org/elasticsearch/search/aggregations/pipeline/derivative/InternalDerivativeTests.java b/core/src/test/java/org/elasticsearch/search/aggregations/pipeline/derivative/InternalDerivativeTests.java new file mode 100644 index 0000000000000..1889723fa8aec --- /dev/null +++ b/core/src/test/java/org/elasticsearch/search/aggregations/pipeline/derivative/InternalDerivativeTests.java @@ -0,0 +1,61 @@ +/* + * Licensed to Elasticsearch under one or more contributor + * license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright + * ownership. Elasticsearch licenses this file to you under + * the Apache License, Version 2.0 (the "License"); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.elasticsearch.search.aggregations.pipeline.derivative; + +import org.elasticsearch.common.io.stream.Writeable.Reader; +import org.elasticsearch.search.DocValueFormat; +import org.elasticsearch.search.aggregations.InternalAggregationTestCase; +import org.elasticsearch.search.aggregations.pipeline.PipelineAggregator; + +import java.util.Collections; +import java.util.List; +import java.util.Map; + +public class InternalDerivativeTests extends InternalAggregationTestCase { + + @Override + protected InternalDerivative createTestInstance(String name, + List pipelineAggregators, Map metaData) { + DocValueFormat formatter = randomFrom(DocValueFormat.BOOLEAN, DocValueFormat.GEOHASH, + DocValueFormat.IP, DocValueFormat.RAW); + double value = randomDoubleBetween(0, 100000, true); + double normalizationFactor = randomDoubleBetween(0, 100000, true); + return new InternalDerivative(name, value, normalizationFactor, formatter, + pipelineAggregators, metaData); + } + + @Override + public void testReduceRandom() { + expectThrows(UnsupportedOperationException.class, + () -> createTestInstance("name", Collections.emptyList(), null).reduce(null, + null)); + } + + @Override + protected void assertReduced(InternalDerivative reduced, List inputs) { + // no test since reduce operation is unsupported + } + + @Override + protected Reader instanceReader() { + return InternalDerivative::new; + } + +} diff --git a/core/src/test/java/org/elasticsearch/search/slice/TermsSliceQueryTests.java b/core/src/test/java/org/elasticsearch/search/slice/TermsSliceQueryTests.java index e00dabc63638f..3fa4ce410529a 100644 --- a/core/src/test/java/org/elasticsearch/search/slice/TermsSliceQueryTests.java +++ b/core/src/test/java/org/elasticsearch/search/slice/TermsSliceQueryTests.java @@ -34,6 +34,7 @@ import org.apache.lucene.store.Directory; import org.apache.lucene.util.BytesRef; +import org.apache.lucene.util.StringHelper; import org.elasticsearch.common.UUIDs; import org.elasticsearch.test.ESTestCase; @@ -72,7 +73,8 @@ public void testSearch() throws Exception { Document doc = new Document(); String uuid = UUIDs.base64UUID(); BytesRef br = new BytesRef(uuid); - int id = Math.floorMod(br.hashCode(), max); + int hashCode = StringHelper.murmurhash3_x86_32(br, TermsSliceQuery.SEED); + int id = Math.floorMod(hashCode, max); sliceCounters[id] ++; doc.add(new StringField("uuid", uuid, Field.Store.YES)); w.addDocument(doc); diff --git a/core/src/test/java/org/elasticsearch/search/sort/FieldSortBuilderTests.java b/core/src/test/java/org/elasticsearch/search/sort/FieldSortBuilderTests.java index 111d4256eb310..55e6b0eabf455 100644 --- a/core/src/test/java/org/elasticsearch/search/sort/FieldSortBuilderTests.java +++ b/core/src/test/java/org/elasticsearch/search/sort/FieldSortBuilderTests.java @@ -39,7 +39,7 @@ protected FieldSortBuilder createTestItem() { private List missingContent = Arrays.asList( "_last", "_first", - randomAsciiOfLength(10), randomUnicodeOfCodepointLengthBetween(5, 15), + Integer.toString(randomInt()), randomInt()); diff --git a/core/src/test/java/org/elasticsearch/search/suggest/completion/CategoryContextMappingTests.java b/core/src/test/java/org/elasticsearch/search/suggest/completion/CategoryContextMappingTests.java index 7c864320abe8a..15c05a5622625 100644 --- a/core/src/test/java/org/elasticsearch/search/suggest/completion/CategoryContextMappingTests.java +++ b/core/src/test/java/org/elasticsearch/search/suggest/completion/CategoryContextMappingTests.java @@ -26,12 +26,14 @@ import org.elasticsearch.common.compress.CompressedXContent; import org.elasticsearch.common.xcontent.XContentBuilder; import org.elasticsearch.common.xcontent.XContentParser; +import org.elasticsearch.common.xcontent.XContentType; import org.elasticsearch.common.xcontent.json.JsonXContent; import org.elasticsearch.index.mapper.DocumentMapper; import org.elasticsearch.index.mapper.FieldMapper; import org.elasticsearch.index.mapper.MappedFieldType; import org.elasticsearch.index.mapper.ParseContext; import org.elasticsearch.index.mapper.ParsedDocument; +import org.elasticsearch.index.mapper.SourceToParse; import org.elasticsearch.index.query.QueryParseContext; import org.elasticsearch.search.suggest.completion.context.CategoryContextMapping; import org.elasticsearch.search.suggest.completion.context.ContextBuilder; @@ -62,7 +64,7 @@ public void testIndexingWithNoContexts() throws Exception { DocumentMapper defaultMapper = createIndex("test").mapperService().documentMapperParser().parse("type1", new CompressedXContent(mapping)); FieldMapper fieldMapper = defaultMapper.mappers().getMapper("completion"); MappedFieldType completionFieldType = fieldMapper.fieldType(); - ParsedDocument parsedDocument = defaultMapper.parse("test", "type1", "1", jsonBuilder() + ParsedDocument parsedDocument = defaultMapper.parse(SourceToParse.source("test", "type1", "1", jsonBuilder() .startObject() .startArray("completion") .startObject() @@ -79,7 +81,8 @@ public void testIndexingWithNoContexts() throws Exception { .endObject() .endArray() .endObject() - .bytes()); + .bytes(), + XContentType.JSON)); IndexableField[] fields = parsedDocument.rootDoc().getFields(completionFieldType.name()); assertContextSuggestFields(fields, 7); } @@ -100,7 +103,7 @@ public void testIndexingWithSimpleContexts() throws Exception { DocumentMapper defaultMapper = createIndex("test").mapperService().documentMapperParser().parse("type1", new CompressedXContent(mapping)); FieldMapper fieldMapper = defaultMapper.mappers().getMapper("completion"); MappedFieldType completionFieldType = fieldMapper.fieldType(); - ParsedDocument parsedDocument = defaultMapper.parse("test", "type1", "1", jsonBuilder() + ParsedDocument parsedDocument = defaultMapper.parse(SourceToParse.source("test", "type1", "1", jsonBuilder() .startObject() .startArray("completion") .startObject() @@ -112,7 +115,8 @@ public void testIndexingWithSimpleContexts() throws Exception { .endObject() .endArray() .endObject() - .bytes()); + .bytes(), + XContentType.JSON)); IndexableField[] fields = parsedDocument.rootDoc().getFields(completionFieldType.name()); assertContextSuggestFields(fields, 3); } @@ -133,7 +137,7 @@ public void testIndexingWithContextList() throws Exception { DocumentMapper defaultMapper = createIndex("test").mapperService().documentMapperParser().parse("type1", new CompressedXContent(mapping)); FieldMapper fieldMapper = defaultMapper.mappers().getMapper("completion"); MappedFieldType completionFieldType = fieldMapper.fieldType(); - ParsedDocument parsedDocument = defaultMapper.parse("test", "type1", "1", jsonBuilder() + ParsedDocument parsedDocument = defaultMapper.parse(SourceToParse.source("test", "type1", "1", jsonBuilder() .startObject() .startObject("completion") .array("input", "suggestion5", "suggestion6", "suggestion7") @@ -143,7 +147,8 @@ public void testIndexingWithContextList() throws Exception { .field("weight", 5) .endObject() .endObject() - .bytes()); + .bytes(), + XContentType.JSON)); IndexableField[] fields = parsedDocument.rootDoc().getFields(completionFieldType.name()); assertContextSuggestFields(fields, 3); } @@ -181,7 +186,8 @@ public void testIndexingWithMultipleContexts() throws Exception { .endObject() .endArray() .endObject(); - ParsedDocument parsedDocument = defaultMapper.parse("test", "type1", "1", builder.bytes()); + ParsedDocument parsedDocument = defaultMapper.parse(SourceToParse.source("test", "type1", "1", builder.bytes(), + XContentType.JSON)); IndexableField[] fields = parsedDocument.rootDoc().getFields(completionFieldType.name()); assertContextSuggestFields(fields, 3); } diff --git a/core/src/test/java/org/elasticsearch/search/suggest/completion/GeoContextMappingTests.java b/core/src/test/java/org/elasticsearch/search/suggest/completion/GeoContextMappingTests.java index f4c65b247800e..a0a278c831d9d 100644 --- a/core/src/test/java/org/elasticsearch/search/suggest/completion/GeoContextMappingTests.java +++ b/core/src/test/java/org/elasticsearch/search/suggest/completion/GeoContextMappingTests.java @@ -23,11 +23,13 @@ import org.elasticsearch.common.compress.CompressedXContent; import org.elasticsearch.common.xcontent.XContentBuilder; import org.elasticsearch.common.xcontent.XContentParser; +import org.elasticsearch.common.xcontent.XContentType; import org.elasticsearch.common.xcontent.json.JsonXContent; import org.elasticsearch.index.mapper.DocumentMapper; import org.elasticsearch.index.mapper.FieldMapper; import org.elasticsearch.index.mapper.MappedFieldType; import org.elasticsearch.index.mapper.ParsedDocument; +import org.elasticsearch.index.mapper.SourceToParse; import org.elasticsearch.index.query.QueryParseContext; import org.elasticsearch.search.suggest.completion.context.ContextBuilder; import org.elasticsearch.search.suggest.completion.context.ContextMapping; @@ -62,7 +64,7 @@ public void testIndexingWithNoContexts() throws Exception { DocumentMapper defaultMapper = createIndex("test").mapperService().documentMapperParser().parse("type1", new CompressedXContent(mapping)); FieldMapper fieldMapper = defaultMapper.mappers().getMapper("completion"); MappedFieldType completionFieldType = fieldMapper.fieldType(); - ParsedDocument parsedDocument = defaultMapper.parse("test", "type1", "1", jsonBuilder() + ParsedDocument parsedDocument = defaultMapper.parse(SourceToParse.source("test", "type1", "1", jsonBuilder() .startObject() .startArray("completion") .startObject() @@ -79,7 +81,8 @@ public void testIndexingWithNoContexts() throws Exception { .endObject() .endArray() .endObject() - .bytes()); + .bytes(), + XContentType.JSON)); IndexableField[] fields = parsedDocument.rootDoc().getFields(completionFieldType.name()); assertContextSuggestFields(fields, 7); } @@ -101,7 +104,7 @@ public void testIndexingWithSimpleContexts() throws Exception { DocumentMapper defaultMapper = createIndex("test").mapperService().documentMapperParser().parse("type1", new CompressedXContent(mapping)); FieldMapper fieldMapper = defaultMapper.mappers().getMapper("completion"); MappedFieldType completionFieldType = fieldMapper.fieldType(); - ParsedDocument parsedDocument = defaultMapper.parse("test", "type1", "1", jsonBuilder() + ParsedDocument parsedDocument = defaultMapper.parse(SourceToParse.source("test", "type1", "1", jsonBuilder() .startObject() .startArray("completion") .startObject() @@ -116,7 +119,8 @@ public void testIndexingWithSimpleContexts() throws Exception { .endObject() .endArray() .endObject() - .bytes()); + .bytes(), + XContentType.JSON)); IndexableField[] fields = parsedDocument.rootDoc().getFields(completionFieldType.name()); assertContextSuggestFields(fields, 3); } @@ -137,7 +141,7 @@ public void testIndexingWithContextList() throws Exception { DocumentMapper defaultMapper = createIndex("test").mapperService().documentMapperParser().parse("type1", new CompressedXContent(mapping)); FieldMapper fieldMapper = defaultMapper.mappers().getMapper("completion"); MappedFieldType completionFieldType = fieldMapper.fieldType(); - ParsedDocument parsedDocument = defaultMapper.parse("test", "type1", "1", jsonBuilder() + ParsedDocument parsedDocument = defaultMapper.parse(SourceToParse.source("test", "type1", "1", jsonBuilder() .startObject() .startObject("completion") .array("input", "suggestion5", "suggestion6", "suggestion7") @@ -156,7 +160,8 @@ public void testIndexingWithContextList() throws Exception { .field("weight", 5) .endObject() .endObject() - .bytes()); + .bytes(), + XContentType.JSON)); IndexableField[] fields = parsedDocument.rootDoc().getFields(completionFieldType.name()); assertContextSuggestFields(fields, 3); } @@ -194,7 +199,8 @@ public void testIndexingWithMultipleContexts() throws Exception { .endObject() .endArray() .endObject(); - ParsedDocument parsedDocument = defaultMapper.parse("test", "type1", "1", builder.bytes()); + ParsedDocument parsedDocument = defaultMapper.parse(SourceToParse.source("test", "type1", "1", builder.bytes(), + XContentType.JSON)); IndexableField[] fields = parsedDocument.rootDoc().getFields(completionFieldType.name()); assertContextSuggestFields(fields, 3); } diff --git a/core/src/test/java/org/elasticsearch/snapshots/SharedClusterSnapshotRestoreIT.java b/core/src/test/java/org/elasticsearch/snapshots/SharedClusterSnapshotRestoreIT.java index b552daafb09cd..77a0514a140ba 100644 --- a/core/src/test/java/org/elasticsearch/snapshots/SharedClusterSnapshotRestoreIT.java +++ b/core/src/test/java/org/elasticsearch/snapshots/SharedClusterSnapshotRestoreIT.java @@ -845,6 +845,11 @@ public void testDataFileCorruptionDuringRestore() throws Exception { RestoreSnapshotResponse restoreSnapshotResponse = client.admin().cluster().prepareRestoreSnapshot("test-repo", "test-snap").setWaitForCompletion(true).execute().actionGet(); assertThat(restoreSnapshotResponse.getRestoreInfo().totalShards(), greaterThan(0)); assertThat(restoreSnapshotResponse.getRestoreInfo().failedShards(), equalTo(restoreSnapshotResponse.getRestoreInfo().totalShards())); + // we have to delete the index here manually, otherwise the cluster will keep + // trying to allocate the shards for the index, even though the restore operation + // is completed and marked as failed, which can lead to nodes having pending + // cluster states to process in their queue when the test is finished + client.admin().indices().prepareDelete("test-idx").get(); } public void testDeletionOfFailingToRecoverIndexShouldStopRestore() throws Exception { diff --git a/core/src/test/java/org/elasticsearch/transport/TCPTransportTests.java b/core/src/test/java/org/elasticsearch/transport/TCPTransportTests.java index c84fa38edc0c8..c14d6ec9e05fe 100644 --- a/core/src/test/java/org/elasticsearch/transport/TCPTransportTests.java +++ b/core/src/test/java/org/elasticsearch/transport/TCPTransportTests.java @@ -20,6 +20,7 @@ package org.elasticsearch.transport; import org.elasticsearch.Version; +import org.elasticsearch.action.ActionListener; import org.elasticsearch.cluster.node.DiscoveryNode; import org.elasticsearch.common.bytes.BytesReference; import org.elasticsearch.common.compress.CompressorFactory; @@ -38,6 +39,8 @@ import java.util.List; import java.util.concurrent.TimeUnit; import java.util.concurrent.atomic.AtomicBoolean; +import java.util.concurrent.atomic.AtomicReference; +import java.util.function.Consumer; import static org.hamcrest.Matchers.equalTo; @@ -152,6 +155,7 @@ public void testCompressRequest() throws IOException { final AtomicBoolean called = new AtomicBoolean(false); Req request = new Req(randomRealisticUnicodeOfLengthBetween(10, 100)); ThreadPool threadPool = new TestThreadPool(TCPTransportTests.class.getName()); + AtomicReference exceptionReference = new AtomicReference<>(); try { TcpTransport transport = new TcpTransport("test", Settings.builder().put("transport.tcp.compress", compressed).build(), threadPool, new BigArrays(Settings.EMPTY, null), null, null, null) { @@ -171,27 +175,31 @@ protected void closeChannels(List channel) throws IOException { } @Override - protected void sendMessage(Object o, BytesReference reference, Runnable sendListener) throws IOException { - StreamInput streamIn = reference.streamInput(); - streamIn.skip(TcpHeader.MARKER_BYTES_SIZE); - int len = streamIn.readInt(); - long requestId = streamIn.readLong(); - assertEquals(42, requestId); - byte status = streamIn.readByte(); - Version version = Version.fromId(streamIn.readInt()); - assertEquals(Version.CURRENT, version); - assertEquals(compressed, TransportStatus.isCompress(status)); - called.compareAndSet(false, true); - if (compressed) { - final int bytesConsumed = TcpHeader.HEADER_SIZE; - streamIn = CompressorFactory.compressor(reference.slice(bytesConsumed, reference.length() - bytesConsumed)) - .streamInput(streamIn); + protected void sendMessage(Object o, BytesReference reference, ActionListener listener) { + try { + StreamInput streamIn = reference.streamInput(); + streamIn.skip(TcpHeader.MARKER_BYTES_SIZE); + int len = streamIn.readInt(); + long requestId = streamIn.readLong(); + assertEquals(42, requestId); + byte status = streamIn.readByte(); + Version version = Version.fromId(streamIn.readInt()); + assertEquals(Version.CURRENT, version); + assertEquals(compressed, TransportStatus.isCompress(status)); + called.compareAndSet(false, true); + if (compressed) { + final int bytesConsumed = TcpHeader.HEADER_SIZE; + streamIn = CompressorFactory.compressor(reference.slice(bytesConsumed, reference.length() - bytesConsumed)) + .streamInput(streamIn); + } + threadPool.getThreadContext().readHeaders(streamIn); + assertEquals("foobar", streamIn.readString()); + Req readReq = new Req(""); + readReq.readFrom(streamIn); + assertEquals(request.value, readReq.value); + } catch (IOException e) { + exceptionReference.set(e); } - threadPool.getThreadContext().readHeaders(streamIn); - assertEquals("foobar", streamIn.readString()); - Req readReq = new Req(""); - readReq.readFrom(streamIn); - assertEquals(request.value, readReq.value); } @Override @@ -219,6 +227,7 @@ public NodeChannels getConnection(DiscoveryNode node) { Transport.Connection connection = transport.getConnection(node); connection.sendRequest(42, "foobar", request, TransportRequestOptions.EMPTY); assertTrue(called.get()); + assertNull("IOException while sending message.", exceptionReference.get()); } finally { ThreadPool.terminate(threadPool, 10, TimeUnit.SECONDS); } diff --git a/core/src/test/java/org/elasticsearch/transport/TransportActionProxyTests.java b/core/src/test/java/org/elasticsearch/transport/TransportActionProxyTests.java index e1cfc08dbd00c..bb1c70da34417 100644 --- a/core/src/test/java/org/elasticsearch/transport/TransportActionProxyTests.java +++ b/core/src/test/java/org/elasticsearch/transport/TransportActionProxyTests.java @@ -246,4 +246,16 @@ public void writeTo(StreamOutput out) throws IOException { } } + public void testGetAction() { + String action = "foo/bar"; + String proxyAction = TransportActionProxy.getProxyAction(action); + assertTrue(proxyAction.endsWith(action)); + assertEquals("internal:transport/proxy/foo/bar", proxyAction); + } + + public void testUnwrap() { + TransportRequest transportRequest = TransportActionProxy.wrapRequest(nodeA, TransportService.HandshakeRequest.INSTANCE); + assertTrue(transportRequest instanceof TransportActionProxy.ProxyRequest); + assertSame(TransportService.HandshakeRequest.INSTANCE, TransportActionProxy.unwrapRequest(transportRequest)); + } } diff --git a/core/src/test/java/org/elasticsearch/validate/SimpleValidateQueryIT.java b/core/src/test/java/org/elasticsearch/validate/SimpleValidateQueryIT.java index eb5c00b76d93c..65a54feabbe69 100644 --- a/core/src/test/java/org/elasticsearch/validate/SimpleValidateQueryIT.java +++ b/core/src/test/java/org/elasticsearch/validate/SimpleValidateQueryIT.java @@ -39,11 +39,14 @@ import java.io.IOException; import java.nio.charset.StandardCharsets; +import java.util.Arrays; +import java.util.List; import static org.elasticsearch.cluster.metadata.IndexMetaData.SETTING_NUMBER_OF_SHARDS; import static org.elasticsearch.index.query.QueryBuilders.queryStringQuery; import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertAcked; import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertNoFailures; +import static org.hamcrest.Matchers.allOf; import static org.hamcrest.Matchers.containsString; import static org.hamcrest.Matchers.equalTo; import static org.hamcrest.Matchers.is; @@ -252,6 +255,37 @@ public void testExplainWithRewriteValidateQuery() throws Exception { containsString("field:huge field:pidgin"), true); } + public void testExplainWithRewriteValidateQueryAllShards() throws Exception { + client().admin().indices().prepareCreate("test") + .addMapping("type1", "field", "type=text,analyzer=whitespace") + .setSettings(SETTING_NUMBER_OF_SHARDS, 2).get(); + // We are relying on specific routing behaviors for the result to be right, so + // we cannot randomize the number of shards or change ids here. + client().prepareIndex("test", "type1", "1") + .setSource("field", "quick lazy huge brown pidgin").get(); + client().prepareIndex("test", "type1", "2") + .setSource("field", "the quick brown fox").get(); + client().prepareIndex("test", "type1", "3") + .setSource("field", "the quick lazy huge brown fox jumps over the tree").get(); + client().prepareIndex("test", "type1", "4") + .setSource("field", "the lazy dog quacks like a duck").get(); + refresh(); + + // prefix queries + assertExplanations(QueryBuilders.matchPhrasePrefixQuery("field", "qu"), + Arrays.asList( + equalTo("field:quick"), + allOf(containsString("field:quick"), containsString("field:quacks")) + ), true, true); + assertExplanations(QueryBuilders.matchPhrasePrefixQuery("field", "ju"), + Arrays.asList( + equalTo("field:jumps"), + equalTo("+MatchNoDocsQuery(\"empty MultiPhraseQuery\") +MatchNoDocsQuery(\"No " + + "terms supplied for org.elasticsearch.common.lucene.search." + + "MultiPhrasePrefixQuery\")") + ), true, true); + } + public void testIrrelevantPropertiesBeforeQuery() throws IOException { createIndex("test"); ensureGreen(); @@ -280,4 +314,22 @@ private static void assertExplanation(QueryBuilder queryBuilder, Matcher assertThat(response.getQueryExplanation().get(0).getExplanation(), matcher); assertThat(response.isValid(), equalTo(true)); } + + private static void assertExplanations(QueryBuilder queryBuilder, + List> matchers, boolean withRewrite, + boolean allShards) { + ValidateQueryResponse response = client().admin().indices().prepareValidateQuery("test") + .setTypes("type1") + .setQuery(queryBuilder) + .setExplain(true) + .setRewrite(withRewrite) + .setAllShards(allShards) + .execute().actionGet(); + assertThat(response.getQueryExplanation().size(), equalTo(matchers.size())); + for (int i = 0; i < matchers.size(); i++) { + assertThat(response.getQueryExplanation().get(i).getError(), nullValue()); + assertThat(response.getQueryExplanation().get(i).getExplanation(), matchers.get(i)); + assertThat(response.isValid(), equalTo(true)); + } + } } diff --git a/core/src/test/resources/indices/bwc/index-5.3.0.zip b/core/src/test/resources/indices/bwc/index-5.3.0.zip new file mode 100644 index 0000000000000..091f7c1067cbf Binary files /dev/null and b/core/src/test/resources/indices/bwc/index-5.3.0.zip differ diff --git a/core/src/test/resources/indices/bwc/repo-5.3.0.zip b/core/src/test/resources/indices/bwc/repo-5.3.0.zip new file mode 100644 index 0000000000000..81a31dcb2a403 Binary files /dev/null and b/core/src/test/resources/indices/bwc/repo-5.3.0.zip differ diff --git a/dev-tools/smoke_test_rc.py b/dev-tools/smoke_test_rc.py index c35e6a0075a25..bfba16efe4234 100644 --- a/dev-tools/smoke_test_rc.py +++ b/dev-tools/smoke_test_rc.py @@ -44,6 +44,7 @@ import argparse import tempfile import os +from os.path import basename, dirname, isdir, join import signal import shutil import urllib @@ -57,26 +58,14 @@ from http.client import HTTPConnection -DEFAULT_PLUGINS = ["analysis-icu", - "analysis-kuromoji", - "analysis-phonetic", - "analysis-smartcn", - "analysis-stempel", - "discovery-azure-classic", - "discovery-ec2", - "discovery-file", - "discovery-gce", - "ingest-attachment", - "ingest-geoip", - "ingest-user-agent", - "mapper-attachments", - "mapper-murmur3", - "mapper-size", - "repository-azure", - "repository-gcs", - "repository-hdfs", - "repository-s3", - "store-smb"] +def find_official_plugins(): + plugins_dir = join(dirname(dirname(__file__)), 'plugins') + plugins = [] + for plugin in os.listdir(plugins_dir): + if isdir(join(plugins_dir, plugin)): + plugins.append(plugin) + return plugins +DEFAULT_PLUGINS = find_official_plugins() try: JAVA_HOME = os.environ['JAVA_HOME'] diff --git a/distribution/build.gradle b/distribution/build.gradle index e95ccf0932000..2a8094eb800e9 100644 --- a/distribution/build.gradle +++ b/distribution/build.gradle @@ -40,7 +40,8 @@ buildscript { } } -Collection distributions = project.subprojects.findAll { it.path.contains(':tools') == false } +Collection distributions = project.subprojects.findAll { + it.path.contains(':tools') == false && it.name != 'bwc-zip' } /***************************************************************************** * Notice file * @@ -82,7 +83,7 @@ project.rootProject.subprojects.findAll { it.path.startsWith(':modules:') }.each } // We would like to make sure integ tests for the distribution run after // integ tests for the modules included in the distribution. - project.configure(distributions.findAll { it.name != 'integ-test-zip' }) { Project distribution -> + project.configure(distributions.findAll { it.name != 'integ-test-zip'}) { Project distribution -> distribution.afterEvaluate({ // some integTest tasks will have multiple finalizers distribution.integTest.mustRunAfter module.tasks.find { t -> t.name.matches(".*integTest\$") } diff --git a/distribution/bwc-zip/build.gradle b/distribution/bwc-zip/build.gradle new file mode 100644 index 0000000000000..4370fae11fd42 --- /dev/null +++ b/distribution/bwc-zip/build.gradle @@ -0,0 +1,89 @@ +/* + * Licensed to Elasticsearch under one or more contributor + * license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright + * ownership. Elasticsearch licenses this file to you under + * the Apache License, Version 2.0 (the "License"); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +import java.util.regex.Matcher +import org.elasticsearch.gradle.LoggedExec + +/** + * This is a dummy project which does a local worktree checkout of the previous + * major version's stable branch, and builds a snapshot. This allows backcompat + * tests in the next major version to test against the next unreleased minor + * version, without relying on snapshots. + */ + +apply plugin: 'distribution' + +File checkoutDir = file("${buildDir}/bwc/checkout-5.x") +task createClone(type: LoggedExec) { + onlyIf { checkoutDir.exists() == false } + commandLine = ['git', 'clone', rootDir, checkoutDir] +} + +// we use regular Exec here to ensure we always get output, regardless of logging level +task findUpstream(type: Exec) { + dependsOn createClone + workingDir = checkoutDir + commandLine = ['git', 'remote', '-v'] + ignoreExitValue = true + ByteArrayOutputStream output = new ByteArrayOutputStream() + standardOutput = output + doLast { + if (execResult.exitValue != 0) { + output.toString('UTF-8').eachLine { line -> logger.error(line) } + execResult.assertNormalExitValue() + } + project.ext.upstreamExists = false + output.toString('UTF-8').eachLine { + if (it.contains("upstream")) { + project.ext.upstreamExists = true + } + } + } +} + +task addUpstream(type: LoggedExec) { + dependsOn findUpstream + onlyIf { project.ext.upstreamExists == false } + workingDir = checkoutDir + commandLine = ['git', 'remote', 'add', 'upstream', 'https://github.com/elastic/elasticsearch.git'] +} + +task fetchLatest(type: LoggedExec) { + dependsOn addUpstream + workingDir = checkoutDir + commandLine = ['git', 'fetch', 'upstream'] +} + +task checkoutBwcBranch(type: LoggedExec) { + dependsOn fetchLatest + workingDir = checkoutDir + commandLine = ['git', 'checkout', 'upstream/5.x'] +} + +File bwcZip = file("${checkoutDir}/distribution/zip/build/distributions/elasticsearch-${bwcVersion}.zip") +task buildBwcVersion(type: GradleBuild) { + dependsOn checkoutBwcBranch + dir = checkoutDir + tasks = [':distribution:zip:assemble'] +} + +artifacts { + 'default' file: bwcZip, name: 'elasticsearch', type: 'zip', builtBy: buildBwcVersion +} + diff --git a/distribution/src/main/resources/bin/elasticsearch.in.bat b/distribution/src/main/resources/bin/elasticsearch.in.bat index 98b6a16316c2d..a25008338729c 100644 --- a/distribution/src/main/resources/bin/elasticsearch.in.bat +++ b/distribution/src/main/resources/bin/elasticsearch.in.bat @@ -15,7 +15,7 @@ for %%I in ("%SCRIPT_DIR%..") do set ES_HOME=%%~dpfI REM check in case a user was using this mechanism if "%ES_CLASSPATH%" == "" ( -set ES_CLASSPATH=!ES_HOME!/lib/elasticsearch-${project.version}.jar;!ES_HOME!/lib/* +set ES_CLASSPATH=!ES_HOME!/lib/* ) else ( ECHO Error: Don't modify the classpath with ES_CLASSPATH, Best is to add 1>&2 ECHO additional elements via the plugin mechanism, or if code must really be 1>&2 diff --git a/distribution/src/main/resources/bin/elasticsearch.in.sh b/distribution/src/main/resources/bin/elasticsearch.in.sh index 58b26a2d6ebc7..2d22439225697 100644 --- a/distribution/src/main/resources/bin/elasticsearch.in.sh +++ b/distribution/src/main/resources/bin/elasticsearch.in.sh @@ -10,4 +10,4 @@ EOF exit 1 fi -ES_CLASSPATH="$ES_HOME/lib/elasticsearch-${project.version}.jar:$ES_HOME/lib/*" +ES_CLASSPATH="$ES_HOME/lib/*" diff --git a/docs/build.gradle b/docs/build.gradle index 1cb86472f70ba..aa24a3ac8a77e 100644 --- a/docs/build.gradle +++ b/docs/build.gradle @@ -24,8 +24,6 @@ apply plugin: 'elasticsearch.docs-test' * only remove entries from this list. When it is empty we'll remove it * entirely and have a party! There will be cake and everything.... */ buildRestTests.expectedUnconvertedCandidates = [ - 'reference/aggregations/bucket/geodistance-aggregation.asciidoc', - 'reference/aggregations/bucket/geohashgrid-aggregation.asciidoc', 'reference/aggregations/bucket/iprange-aggregation.asciidoc', 'reference/aggregations/bucket/missing-aggregation.asciidoc', 'reference/aggregations/bucket/nested-aggregation.asciidoc', @@ -36,8 +34,6 @@ buildRestTests.expectedUnconvertedCandidates = [ 'reference/aggregations/matrix/stats-aggregation.asciidoc', 'reference/aggregations/metrics/cardinality-aggregation.asciidoc', 'reference/aggregations/metrics/extendedstats-aggregation.asciidoc', - 'reference/aggregations/metrics/geobounds-aggregation.asciidoc', - 'reference/aggregations/metrics/geocentroid-aggregation.asciidoc', 'reference/aggregations/metrics/percentile-aggregation.asciidoc', 'reference/aggregations/metrics/percentile-rank-aggregation.asciidoc', 'reference/aggregations/metrics/scripted-metric-aggregation.asciidoc', @@ -57,31 +53,6 @@ buildRestTests.expectedUnconvertedCandidates = [ 'reference/aggregations/pipeline/serial-diff-aggregation.asciidoc', 'reference/aggregations/pipeline/stats-bucket-aggregation.asciidoc', 'reference/aggregations/pipeline/sum-bucket-aggregation.asciidoc', - 'reference/analysis/analyzers/lang-analyzer.asciidoc', - 'reference/analysis/analyzers/pattern-analyzer.asciidoc', - 'reference/analysis/charfilters/htmlstrip-charfilter.asciidoc', - 'reference/analysis/charfilters/pattern-replace-charfilter.asciidoc', - 'reference/analysis/tokenfilters/asciifolding-tokenfilter.asciidoc', - 'reference/analysis/tokenfilters/cjk-bigram-tokenfilter.asciidoc', - 'reference/analysis/tokenfilters/common-grams-tokenfilter.asciidoc', - 'reference/analysis/tokenfilters/compound-word-tokenfilter.asciidoc', - 'reference/analysis/tokenfilters/elision-tokenfilter.asciidoc', - 'reference/analysis/tokenfilters/hunspell-tokenfilter.asciidoc', - 'reference/analysis/tokenfilters/keep-types-tokenfilter.asciidoc', - 'reference/analysis/tokenfilters/keep-words-tokenfilter.asciidoc', - 'reference/analysis/tokenfilters/keyword-marker-tokenfilter.asciidoc', - 'reference/analysis/tokenfilters/keyword-repeat-tokenfilter.asciidoc', - 'reference/analysis/tokenfilters/limit-token-count-tokenfilter.asciidoc', - 'reference/analysis/tokenfilters/lowercase-tokenfilter.asciidoc', - 'reference/analysis/tokenfilters/pattern-capture-tokenfilter.asciidoc', - 'reference/analysis/tokenfilters/snowball-tokenfilter.asciidoc', - 'reference/analysis/tokenfilters/stemmer-override-tokenfilter.asciidoc', - 'reference/analysis/tokenfilters/stemmer-tokenfilter.asciidoc', - 'reference/analysis/tokenfilters/stop-tokenfilter.asciidoc', - 'reference/analysis/tokenfilters/synonym-tokenfilter.asciidoc', - 'reference/analysis/tokenfilters/synonym-graph-tokenfilter.asciidoc', - 'reference/analysis/tokenfilters/word-delimiter-tokenfilter.asciidoc', - 'reference/analysis/tokenfilters/word-delimiter-graph-tokenfilter.asciidoc', 'reference/cat/snapshots.asciidoc', 'reference/cat/templates.asciidoc', 'reference/cat/thread_pool.asciidoc', @@ -112,18 +83,13 @@ buildRestTests.expectedUnconvertedCandidates = [ 'reference/mapping/fields/all-field.asciidoc', 'reference/mapping/params/analyzer.asciidoc', 'reference/mapping/types/binary.asciidoc', - 'reference/mapping/types/geo-point.asciidoc', - 'reference/mapping/types/geo-shape.asciidoc', 'reference/mapping/types/ip.asciidoc', 'reference/mapping/types/nested.asciidoc', 'reference/mapping/types/object.asciidoc', 'reference/mapping/types/percolator.asciidoc', 'reference/modules/scripting/security.asciidoc', - 'reference/modules/scripting/using.asciidoc', 'reference/modules/cross-cluster-search.asciidoc', // this is hard to test since we need 2 clusters -- maybe we can trick it into referencing itself... - 'reference/query-dsl/exists-query.asciidoc', 'reference/query-dsl/function-score-query.asciidoc', - 'reference/query-dsl/geo-shape-query.asciidoc', 'reference/search/field-stats.asciidoc', 'reference/search/profile.asciidoc', 'reference/search/request/highlighting.asciidoc', @@ -140,13 +106,20 @@ integTestCluster { Closure configFile = { extraConfigFile it, "src/test/cluster/config/$it" } + configFile 'scripts/calculate_score.painless' configFile 'scripts/my_script.painless' configFile 'scripts/my_init_script.painless' configFile 'scripts/my_map_script.painless' configFile 'scripts/my_combine_script.painless' configFile 'scripts/my_reduce_script.painless' + configFile 'analysis/example_word_list.txt' + configFile 'analysis/hyphenation_patterns.xml' + configFile 'analysis/synonym.txt' + configFile 'analysis/stemmer_override.txt' configFile 'userdict_ja.txt' configFile 'KeywordTokenizer.rbbi' + extraConfigFile 'hunspell/en_US/en_US.aff', '../core/src/test/resources/indices/analyze/conf_dir/hunspell/en_US/en_US.aff' + extraConfigFile 'hunspell/en_US/en_US.dic', '../core/src/test/resources/indices/analyze/conf_dir/hunspell/en_US/en_US.dic' // Whitelist reindexing from the local node so we can test it. setting 'reindex.remote.whitelist', '127.0.0.1:*' } diff --git a/docs/java-api/admin/indices/put-mapping.asciidoc b/docs/java-api/admin/indices/put-mapping.asciidoc index 9b085975077cc..6c2a5406528bc 100644 --- a/docs/java-api/admin/indices/put-mapping.asciidoc +++ b/docs/java-api/admin/indices/put-mapping.asciidoc @@ -60,7 +60,7 @@ You can use the same API to update an existing mapping: [source,java] -------------------------------------------------- client.admin().indices().preparePutMapping("twitter") <1> - .setType("tweet") <2> + .setType("user") <2> .setSource("{\n" + <3> " \"properties\": {\n" + " \"user_name\": {\n" + diff --git a/docs/java-rest/high-level/document/delete.asciidoc b/docs/java-rest/high-level/document/delete.asciidoc index 36cfc54994e62..e9ba8b1940856 100644 --- a/docs/java-rest/high-level/document/delete.asciidoc +++ b/docs/java-rest/high-level/document/delete.asciidoc @@ -6,19 +6,20 @@ The most simple Delete Request needs is: -["source","java",subs="attributes,callouts"] +["source","java",subs="attributes,callouts,macros"] -------------------------------------------------- -sys2::[perl -ne 'exit if /end::delete-request/; print if $tag; $tag = $tag || /tag::delete-request/' {docdir}/../../client/rest-high-level/src/test/java/org/elasticsearch/client/documentation/DeleteDocumentationIT.java] +include-tagged::{doc-tests}/DeleteDocumentationIT.java[delete-request] -------------------------------------------------- <1> Index name <2> Type <3> Document id + You can also provide the following properties: -["source","java",subs="attributes,callouts"] +["source","java",subs="attributes,callouts,macros"] -------------------------------------------------- -sys2::[perl -ne 'exit if /end::delete-request-props/; print if $tag; $tag = $tag || /tag::delete-request-props/' {docdir}/../../client/rest-high-level/src/test/java/org/elasticsearch/client/documentation/DeleteDocumentationIT.java] +include-tagged::{doc-tests}/DeleteDocumentationIT.java[delete-request-props] -------------------------------------------------- <1> Timeout <2> Timeout as String @@ -30,17 +31,17 @@ sys2::[perl -ne 'exit if /end::delete-request-props/; print if $tag; $tag = $tag [[java-rest-high-document-delete-sync]] ==== Execution -["source","java",subs="attributes,callouts"] +["source","java",subs="attributes,callouts,macros"] -------------------------------------------------- -sys2::[perl -ne 'exit if /end::delete-execute/; print if $tag; $tag = $tag || /tag::delete-execute/' {docdir}/../../client/rest-high-level/src/test/java/org/elasticsearch/client/documentation/DeleteDocumentationIT.java] +include-tagged::{doc-tests}/DeleteDocumentationIT.java[delete-execute] -------------------------------------------------- [[java-rest-high-document-delete-async]] ==== Asynchronous Execution -["source","java",subs="attributes,callouts"] +["source","java",subs="attributes,callouts,macros"] -------------------------------------------------- -sys2::[perl -ne 'exit if /end::delete-execute-async/; print if $tag; $tag = $tag || /tag::delete-execute-async/' {docdir}/../../client/rest-high-level/src/test/java/org/elasticsearch/client/documentation/DeleteDocumentationIT.java] +include-tagged::{doc-tests}/DeleteDocumentationIT.java[delete-execute-async] -------------------------------------------------- <1> Implement if needed when execution did not throw an exception <2> Implement if needed in case of failure @@ -50,18 +51,17 @@ sys2::[perl -ne 'exit if /end::delete-execute-async/; print if $tag; $tag = $tag In the Delete Response object, you can check for example the result of the operation: -["source","java",subs="attributes,callouts"] +["source","java",subs="attributes,callouts,macros"] -------------------------------------------------- -sys2::[perl -ne 'exit if /end::delete-notfound/; print if $tag; $tag = $tag || /tag::delete-notfound/' {docdir}/../../client/rest-high-level/src/test/java/org/elasticsearch/client/documentation/DeleteDocumentationIT.java] +include-tagged::{doc-tests}/DeleteDocumentationIT.java[delete-notfound] -------------------------------------------------- <1> Do something if we did not find the document which should have been deleted Note that if you have a version conflict because you defined the version within the <>, it will raise an `ElasticsearchException` like: -["source","java",subs="attributes,callouts"] +["source","java",subs="attributes,callouts,macros"] -------------------------------------------------- -sys2::[perl -ne 'exit if /end::delete-conflict/; print if $tag; $tag = $tag || /tag::delete-conflict/' {docdir}/../../client/rest-high-level/src/test/java/org/elasticsearch/client/documentation/DeleteDocumentationIT.java] +include-tagged::{doc-tests}/DeleteDocumentationIT.java[delete-conflict] -------------------------------------------------- <1> We got a version conflict - diff --git a/docs/java-rest/high-level/document/index.asciidoc b/docs/java-rest/high-level/document/index.asciidoc index 15eee483baab0..32815794a1ba2 100644 --- a/docs/java-rest/high-level/document/index.asciidoc +++ b/docs/java-rest/high-level/document/index.asciidoc @@ -1 +1,5 @@ +:doc-tests: {docdir}/../../client/rest-high-level/src/test/java/org/elasticsearch/client/documentation + include::delete.asciidoc[] + +:doc-tests!: diff --git a/docs/plugins/plugin-script.asciidoc b/docs/plugins/plugin-script.asciidoc index 3a3616ac0e7ef..9eb6cae11692d 100644 --- a/docs/plugins/plugin-script.asciidoc +++ b/docs/plugins/plugin-script.asciidoc @@ -80,8 +80,8 @@ to a local Java truststore and pass the location to the script as follows: sudo ES_JAVA_OPTS="-Djavax.net.ssl.trustStore=/path/to/trustStore.jks" bin/elasticsearch-plugin install https://.... ----------------------------------- -[[listing-removing]] -=== Listing and Removing Installed Plugins +[[listing-removing-updating]] +=== Listing, Removing and Updating Installed Plugins [float] === Listing plugins @@ -109,6 +109,18 @@ sudo bin/elasticsearch-plugin remove [pluginname] After a Java plugin has been removed, you will need to restart the node to complete the removal process. +[float] +=== Updating plugins + +Plugins are built for a specific version of Elasticsearch, and therefore must be reinstalled +each time Elasticsearch is updated. + +[source,shell] +----------------------------------- +sudo bin/elasticsearch-plugin remove [pluginname] +sudo bin/elasticsearch-plugin install [pluginname] +----------------------------------- + === Other command line parameters The `plugin` scripts supports a number of other command line parameters: @@ -175,8 +187,8 @@ Or on Windows: [source,shell] ------------------------------------ -set ES_JAVA_OPTS="-DproxyHost=host_name -DproxyPort=port_number -Dhttps.proxyHost=host_name -Dhttps.proxyPort=https_port_number" -bin/elasticsearch-plugin install analysis-icu +set ES_JAVA_OPTS="-Dhttp.proxyHost=host_name -Dhttp.proxyPort=port_number -Dhttps.proxyHost=host_name -Dhttps.proxyPort=https_port_number" +bin\elasticsearch-plugin install analysis-icu ------------------------------------ === Plugins directory diff --git a/docs/plugins/repository-azure.asciidoc b/docs/plugins/repository-azure.asciidoc index e6d8c682ba7a1..ab2f9dd05e9b2 100644 --- a/docs/plugins/repository-azure.asciidoc +++ b/docs/plugins/repository-azure.asciidoc @@ -72,6 +72,12 @@ It's not set by default which means that elasticsearch is using the http://azure.github.io/azure-storage-java/com/microsoft/azure/storage/RequestOptions.html#setTimeoutIntervalInMs(java.lang.Integer)[default value] set by the azure client (known as 5 minutes). +`max_retries` can help to control the exponential backoff policy. It will fix the number of retries +in case of failures before considering the snapshot is failing. Defaults to `3` retries. +The initial backoff period is defined by Azure SDK as `30s`. Which means `30s` of wait time +before retrying after a first timeout or failure. The maximum backoff period is defined by Azure SDK as +`90s`. + [source,yaml] ---- cloud: @@ -82,13 +88,15 @@ cloud: account: your_azure_storage_account1 key: your_azure_storage_key1 default: true + max_retries: 7 my_account2: account: your_azure_storage_account2 key: your_azure_storage_key2 timeout: 30s ---- -In this example, timeout will be 10s for `my_account1` and 30s for `my_account2`. +In this example, timeout will be `10s` per try for `my_account1` with `7` retries before failing +and `30s` per try for `my_account2` with `3` retries. [[repository-azure-repository-settings]] ===== Repository settings @@ -185,22 +193,6 @@ client.admin().cluster().preparePutRepository("my_backup_java1") ).get(); ---- -[[repository-azure-global-settings]] -===== Global repositories settings - -All those repository settings can also be defined globally in `elasticsearch.yml` file using prefix -`repositories.azure.`. For example: - -[source,yaml] ----- -repositories.azure: - container: backup-container - base_path: backups - chunk_size: 32m - compress": true ----- - - [[repository-azure-validation]] ===== Repository validation rules diff --git a/docs/reference/aggregations/bucket/geodistance-aggregation.asciidoc b/docs/reference/aggregations/bucket/geodistance-aggregation.asciidoc index a051fc00c4f2e..d3d13d4ac6f98 100644 --- a/docs/reference/aggregations/bucket/geodistance-aggregation.asciidoc +++ b/docs/reference/aggregations/bucket/geodistance-aggregation.asciidoc @@ -5,6 +5,34 @@ A multi-bucket aggregation that works on `geo_point` fields and conceptually wor [source,js] -------------------------------------------------- +PUT /museums +{ + "mappings": { + "doc": { + "properties": { + "location": { + "type": "geo_point" + } + } + } + } +} + +POST /museums/doc/_bulk?refresh +{"index":{"_id":1}} +{"location": "52.374081,4.912350", "name": "NEMO Science Museum"} +{"index":{"_id":2}} +{"location": "52.369219,4.901618", "name": "Museum Het Rembrandthuis"} +{"index":{"_id":3}} +{"location": "52.371667,4.914722", "name": "Nederlands Scheepvaartmuseum"} +{"index":{"_id":4}} +{"location": "51.222900,4.405200", "name": "Letterenhuis"} +{"index":{"_id":5}} +{"location": "48.861111,2.336389", "name": "Musée du Louvre"} +{"index":{"_id":6}} +{"location": "48.860000,2.327000", "name": "Musée d'Orsay"} + +POST /museums/_search?size=0 { "aggs" : { "rings_around_amsterdam" : { @@ -12,46 +40,49 @@ A multi-bucket aggregation that works on `geo_point` fields and conceptually wor "field" : "location", "origin" : "52.3760, 4.894", "ranges" : [ - { "to" : 100 }, - { "from" : 100, "to" : 300 }, - { "from" : 300 } + { "to" : 100000 }, + { "from" : 100000, "to" : 300000 }, + { "from" : 300000 } ] } } } } -------------------------------------------------- +// CONSOLE Response: [source,js] -------------------------------------------------- { + ... "aggregations": { - "rings" : { + "rings_around_amsterdam" : { "buckets": [ { - "key": "*-100.0", - "from": 0, - "to": 100.0, + "key": "*-100000.0", + "from": 0.0, + "to": 100000.0, "doc_count": 3 }, { - "key": "100.0-300.0", - "from": 100.0, - "to": 300.0, + "key": "100000.0-300000.0", + "from": 100000.0, + "to": 300000.0, "doc_count": 1 }, { - "key": "300.0-*", - "from": 300.0, - "doc_count": 7 + "key": "300000.0-*", + "from": 300000.0, + "doc_count": 2 } ] } } } -------------------------------------------------- +// TESTRESPONSE[s/\.\.\./"took": $body.took,"_shards": $body._shards,"hits":$body.hits,"timed_out":false,/] The specified field must be of type `geo_point` (which can only be set explicitly in the mappings). And it can also hold an array of `geo_point` fields, in which case all will be taken into account during aggregation. The origin point can accept all formats supported by the <>: @@ -59,17 +90,18 @@ The specified field must be of type `geo_point` (which can only be set explicitl * String format: `"52.3760, 4.894"` - where the first number is the `lat` and the second is the `lon` * Array format: `[4.894, 52.3760]` - which is based on the `GeoJson` standard and where the first number is the `lon` and the second one is the `lat` -By default, the distance unit is `m` (metres) but it can also accept: `mi` (miles), `in` (inches), `yd` (yards), `km` (kilometers), `cm` (centimeters), `mm` (millimeters). +By default, the distance unit is `m` (meters) but it can also accept: `mi` (miles), `in` (inches), `yd` (yards), `km` (kilometers), `cm` (centimeters), `mm` (millimeters). [source,js] -------------------------------------------------- +POST /museums/_search?size=0 { "aggs" : { "rings" : { "geo_distance" : { "field" : "location", "origin" : "52.3760, 4.894", - "unit" : "mi", <1> + "unit" : "km", <1> "ranges" : [ { "to" : 100 }, { "from" : 100, "to" : 300 }, @@ -80,19 +112,23 @@ By default, the distance unit is `m` (metres) but it can also accept: `mi` (mile } } -------------------------------------------------- +// CONSOLE +// TEST[continued] -<1> The distances will be computed as miles +<1> The distances will be computed in kilometers There are two distance calculation modes: `arc` (the default), and `plane`. The `arc` calculation is the most accurate. The `plane` is the fastest but least accurate. Consider using `plane` when your search context is "narrow", and spans smaller geographical areas (~5km). `plane` will return higher error margins for searches across very large areas (e.g. cross continent search). The distance calculation type can be set using the `distance_type` parameter: [source,js] -------------------------------------------------- +POST /museums/_search?size=0 { "aggs" : { "rings" : { "geo_distance" : { "field" : "location", "origin" : "52.3760, 4.894", + "unit" : "km", "distance_type" : "plane", "ranges" : [ { "to" : 100 }, @@ -104,3 +140,5 @@ There are two distance calculation modes: `arc` (the default), and `plane`. The } } -------------------------------------------------- +// CONSOLE +// TEST[continued] diff --git a/docs/reference/aggregations/bucket/geohashgrid-aggregation.asciidoc b/docs/reference/aggregations/bucket/geohashgrid-aggregation.asciidoc index 17ca509e3c5df..84f70185aa97f 100644 --- a/docs/reference/aggregations/bucket/geohashgrid-aggregation.asciidoc +++ b/docs/reference/aggregations/bucket/geohashgrid-aggregation.asciidoc @@ -19,9 +19,37 @@ The specified field must be of type `geo_point` (which can only be set explicitl [source,js] -------------------------------------------------- +PUT /museums +{ + "mappings": { + "doc": { + "properties": { + "location": { + "type": "geo_point" + } + } + } + } +} + +POST /museums/doc/_bulk?refresh +{"index":{"_id":1}} +{"location": "52.374081,4.912350", "name": "NEMO Science Museum"} +{"index":{"_id":2}} +{"location": "52.369219,4.901618", "name": "Museum Het Rembrandthuis"} +{"index":{"_id":3}} +{"location": "52.371667,4.914722", "name": "Nederlands Scheepvaartmuseum"} +{"index":{"_id":4}} +{"location": "51.222900,4.405200", "name": "Letterenhuis"} +{"index":{"_id":5}} +{"location": "48.861111,2.336389", "name": "Musée du Louvre"} +{"index":{"_id":6}} +{"location": "48.860000,2.327000", "name": "Musée d'Orsay"} + +POST /museums/_search?size=0 { "aggregations" : { - "myLarge-GrainGeoHashGrid" : { + "large-grid" : { "geohash_grid" : { "field" : "location", "precision" : 3 @@ -30,30 +58,35 @@ The specified field must be of type `geo_point` (which can only be set explicitl } } -------------------------------------------------- +// CONSOLE Response: [source,js] -------------------------------------------------- { + ... "aggregations": { - "myLarge-GrainGeoHashGrid": { + "large-grid": { "buckets": [ { - "key": "svz", - "doc_count": 10964 + "key": "u17", + "doc_count": 3 + }, + { + "key": "u09", + "doc_count": 2 }, { - "key": "sv8", - "doc_count": 3198 + "key": "u15", + "doc_count": 1 } ] } } } -------------------------------------------------- - - +// TESTRESPONSE[s/\.\.\./"took": $body.took,"_shards": $body._shards,"hits":$body.hits,"timed_out":false,/] ==== High-precision requests @@ -61,29 +94,32 @@ When requesting detailed buckets (typically for displaying a "zoomed in" map) a [source,js] -------------------------------------------------- +POST /museums/_search?size=0 { "aggregations" : { - "zoomedInView" : { + "zoomed-in" : { "filter" : { "geo_bounding_box" : { "location" : { - "top_left" : "51.73, 0.9", - "bottom_right" : "51.55, 1.1" + "top_left" : "52.4, 4.9", + "bottom_right" : "52.3, 5.0" } } }, "aggregations":{ "zoom1":{ "geohash_grid" : { - "field":"location", - "precision":8 + "field": "location", + "precision": 8 } } } } } - } +} -------------------------------------------------- +// CONSOLE +// TEST[continued] ==== Cell dimensions at the equator The table below shows the metric dimensions for cells covered by various string lengths of geohash. diff --git a/docs/reference/aggregations/metrics/geobounds-aggregation.asciidoc b/docs/reference/aggregations/metrics/geobounds-aggregation.asciidoc index ade59477ee372..4d78e0c30821f 100644 --- a/docs/reference/aggregations/metrics/geobounds-aggregation.asciidoc +++ b/docs/reference/aggregations/metrics/geobounds-aggregation.asciidoc @@ -8,9 +8,37 @@ Example: [source,js] -------------------------------------------------- +PUT /museums +{ + "mappings": { + "doc": { + "properties": { + "location": { + "type": "geo_point" + } + } + } + } +} + +POST /museums/doc/_bulk?refresh +{"index":{"_id":1}} +{"location": "52.374081,4.912350", "name": "NEMO Science Museum"} +{"index":{"_id":2}} +{"location": "52.369219,4.901618", "name": "Museum Het Rembrandthuis"} +{"index":{"_id":3}} +{"location": "52.371667,4.914722", "name": "Nederlands Scheepvaartmuseum"} +{"index":{"_id":4}} +{"location": "51.222900,4.405200", "name": "Letterenhuis"} +{"index":{"_id":5}} +{"location": "48.861111,2.336389", "name": "Musée du Louvre"} +{"index":{"_id":6}} +{"location": "48.860000,2.327000", "name": "Musée d'Orsay"} + +POST /museums/_search?size=0 { "query" : { - "match" : { "business_type" : "shop" } + "match" : { "name" : "musée" } }, "aggs" : { "viewport" : { @@ -22,6 +50,7 @@ Example: } } -------------------------------------------------- +// CONSOLE <1> The `geo_bounds` aggregation specifies the field to use to obtain the bounds <2> `wrap_longitude` is an optional parameter which specifies whether the bounding box should be allowed to overlap the international date line. The default value is `true` @@ -34,20 +63,20 @@ The response for the above aggregation: -------------------------------------------------- { ... - "aggregations": { "viewport": { "bounds": { "top_left": { - "lat": 80.45, - "lon": -160.22 + "lat": 48.86111099738628, + "lon": 2.3269999679178 }, "bottom_right": { - "lat": 40.65, - "lon": 42.57 + "lat": 48.85999997612089, + "lon": 2.3363889567553997 } } } } } -------------------------------------------------- +// TESTRESPONSE[s/\.\.\./"took": $body.took,"_shards": $body._shards,"hits":$body.hits,"timed_out":false,/] diff --git a/docs/reference/aggregations/metrics/geocentroid-aggregation.asciidoc b/docs/reference/aggregations/metrics/geocentroid-aggregation.asciidoc index 8f871dc8dbcbc..89aa091bba2f0 100644 --- a/docs/reference/aggregations/metrics/geocentroid-aggregation.asciidoc +++ b/docs/reference/aggregations/metrics/geocentroid-aggregation.asciidoc @@ -3,15 +3,39 @@ A metric aggregation that computes the weighted centroid from all coordinate values for a <> field. - Example: [source,js] -------------------------------------------------- +PUT /museums +{ + "mappings": { + "doc": { + "properties": { + "location": { + "type": "geo_point" + } + } + } + } +} + +POST /museums/doc/_bulk?refresh +{"index":{"_id":1}} +{"location": "52.374081,4.912350", "city": "Amsterdam", "name": "NEMO Science Museum"} +{"index":{"_id":2}} +{"location": "52.369219,4.901618", "city": "Amsterdam", "name": "Museum Het Rembrandthuis"} +{"index":{"_id":3}} +{"location": "52.371667,4.914722", "city": "Amsterdam", "name": "Nederlands Scheepvaartmuseum"} +{"index":{"_id":4}} +{"location": "51.222900,4.405200", "city": "Antwerp", "name": "Letterenhuis"} +{"index":{"_id":5}} +{"location": "48.861111,2.336389", "city": "Paris", "name": "Musée du Louvre"} +{"index":{"_id":6}} +{"location": "48.860000,2.327000", "city": "Paris", "name": "Musée d'Orsay"} + +POST /museums/_search?size=0 { - "query" : { - "match" : { "crime" : "burglary" } - }, "aggs" : { "centroid" : { "geo_centroid" : { @@ -21,6 +45,7 @@ Example: } } -------------------------------------------------- +// CONSOLE <1> The `geo_centroid` aggregation specifies the field to use for computing the centroid. (NOTE: field must be a <> type) @@ -32,18 +57,17 @@ The response for the above aggregation: -------------------------------------------------- { ... - "aggregations": { "centroid": { "location": { - "lat": 80.45, - "lon": -160.22 + "lat": 51.009829603135586, + "lon": 3.966213036328554 } } } } -------------------------------------------------- - +// TESTRESPONSE[s/\.\.\./"took": $body.took,"_shards": $body._shards,"hits":$body.hits,"timed_out":false,/] The `geo_centroid` aggregation is more interesting when combined as a sub-aggregation to other bucket aggregations. @@ -51,13 +75,11 @@ Example: [source,js] -------------------------------------------------- +POST /museums/_search?size=0 { - "query" : { - "match" : { "crime" : "burglary" } - }, "aggs" : { - "towns" : { - "terms" : { "field" : "town" }, + "cities" : { + "terms" : { "field" : "city.keyword" }, "aggs" : { "centroid" : { "geo_centroid" : { "field" : "location" } @@ -67,9 +89,12 @@ Example: } } -------------------------------------------------- +// CONSOLE +// TEST[continued] -The above example uses `geo_centroid` as a sub-aggregation to a <> bucket aggregation -for finding the central location for all crimes of type burglary in each town. +The above example uses `geo_centroid` as a sub-aggregation to a +<> bucket aggregation +for finding the central location for museums in each city. The response for the above aggregation: @@ -77,28 +102,44 @@ The response for the above aggregation: -------------------------------------------------- { ... - - "buckets": [ - { - "key": "Los Altos", - "doc_count": 113, - "centroid": { - "location": { - "lat": 37.3924582824111, - "lon": -122.12104808539152 - } - } - }, - { - "key": "Mountain View", - "doc_count": 92, - "centroid": { - "location": { - "lat": 37.382152481004596, - "lon": -122.08116559311748 - } - } + "aggregations": { + "cities": { + "sum_other_doc_count": 0, + "doc_count_error_upper_bound": 0, + "buckets": [ + { + "key": "Amsterdam", + "doc_count": 3, + "centroid": { + "location": { + "lat": 52.371655656024814, + "lon": 4.909563269466162 + } + } + }, + { + "key": "Paris", + "doc_count": 2, + "centroid": { + "location": { + "lat": 48.86055544484407, + "lon": 2.331694420427084 + } + } + }, + { + "key": "Antwerp", + "doc_count": 1, + "centroid": { + "location": { + "lat": 51.222899928689, + "lon": 4.405199903994799 + } + } + } + ] } - ] + } } --------------------------------------------------- \ No newline at end of file +-------------------------------------------------- +// TESTRESPONSE[s/\.\.\./"took": $body.took,"_shards": $body._shards,"hits":$body.hits,"timed_out":false,/] diff --git a/docs/reference/analysis/analyzers/lang-analyzer.asciidoc b/docs/reference/analysis/analyzers/lang-analyzer.asciidoc index 02843789c4b3b..65cc30780b1a7 100644 --- a/docs/reference/analysis/analyzers/lang-analyzer.asciidoc +++ b/docs/reference/analysis/analyzers/lang-analyzer.asciidoc @@ -77,6 +77,7 @@ The `arabic` analyzer could be reimplemented as a `custom` analyzer as follows: [source,js] ---------------------------------------------------- +PUT /arabic_example { "settings": { "analysis": { @@ -87,7 +88,7 @@ The `arabic` analyzer could be reimplemented as a `custom` analyzer as follows: }, "arabic_keywords": { "type": "keyword_marker", - "keywords": [] <2> + "keywords": ["مثال"] <2> }, "arabic_stemmer": { "type": "stemmer", @@ -110,6 +111,7 @@ The `arabic` analyzer could be reimplemented as a `custom` analyzer as follows: } } ---------------------------------------------------- +// CONSOLE <1> The default stopwords can be overridden with the `stopwords` or `stopwords_path` parameters. <2> This filter should be removed unless there are words which should @@ -122,6 +124,7 @@ The `armenian` analyzer could be reimplemented as a `custom` analyzer as follows [source,js] ---------------------------------------------------- +PUT /armenian_example { "settings": { "analysis": { @@ -132,7 +135,7 @@ The `armenian` analyzer could be reimplemented as a `custom` analyzer as follows }, "armenian_keywords": { "type": "keyword_marker", - "keywords": [] <2> + "keywords": ["օրինակ"] <2> }, "armenian_stemmer": { "type": "stemmer", @@ -154,6 +157,7 @@ The `armenian` analyzer could be reimplemented as a `custom` analyzer as follows } } ---------------------------------------------------- +// CONSOLE <1> The default stopwords can be overridden with the `stopwords` or `stopwords_path` parameters. <2> This filter should be removed unless there are words which should @@ -166,6 +170,7 @@ The `basque` analyzer could be reimplemented as a `custom` analyzer as follows: [source,js] ---------------------------------------------------- +PUT /armenian_example { "settings": { "analysis": { @@ -176,7 +181,7 @@ The `basque` analyzer could be reimplemented as a `custom` analyzer as follows: }, "basque_keywords": { "type": "keyword_marker", - "keywords": [] <2> + "keywords": ["Adibidez"] <2> }, "basque_stemmer": { "type": "stemmer", @@ -198,6 +203,7 @@ The `basque` analyzer could be reimplemented as a `custom` analyzer as follows: } } ---------------------------------------------------- +// CONSOLE <1> The default stopwords can be overridden with the `stopwords` or `stopwords_path` parameters. <2> This filter should be removed unless there are words which should @@ -210,6 +216,7 @@ The `brazilian` analyzer could be reimplemented as a `custom` analyzer as follow [source,js] ---------------------------------------------------- +PUT /brazilian_example { "settings": { "analysis": { @@ -220,7 +227,7 @@ The `brazilian` analyzer could be reimplemented as a `custom` analyzer as follow }, "brazilian_keywords": { "type": "keyword_marker", - "keywords": [] <2> + "keywords": ["exemplo"] <2> }, "brazilian_stemmer": { "type": "stemmer", @@ -242,6 +249,7 @@ The `brazilian` analyzer could be reimplemented as a `custom` analyzer as follow } } ---------------------------------------------------- +// CONSOLE <1> The default stopwords can be overridden with the `stopwords` or `stopwords_path` parameters. <2> This filter should be removed unless there are words which should @@ -254,6 +262,7 @@ The `bulgarian` analyzer could be reimplemented as a `custom` analyzer as follow [source,js] ---------------------------------------------------- +PUT /bulgarian_example { "settings": { "analysis": { @@ -264,7 +273,7 @@ The `bulgarian` analyzer could be reimplemented as a `custom` analyzer as follow }, "bulgarian_keywords": { "type": "keyword_marker", - "keywords": [] <2> + "keywords": ["пример"] <2> }, "bulgarian_stemmer": { "type": "stemmer", @@ -286,6 +295,7 @@ The `bulgarian` analyzer could be reimplemented as a `custom` analyzer as follow } } ---------------------------------------------------- +// CONSOLE <1> The default stopwords can be overridden with the `stopwords` or `stopwords_path` parameters. <2> This filter should be removed unless there are words which should @@ -298,6 +308,7 @@ The `catalan` analyzer could be reimplemented as a `custom` analyzer as follows: [source,js] ---------------------------------------------------- +PUT /catalan_example { "settings": { "analysis": { @@ -312,7 +323,7 @@ The `catalan` analyzer could be reimplemented as a `custom` analyzer as follows: }, "catalan_keywords": { "type": "keyword_marker", - "keywords": [] <2> + "keywords": ["exemple"] <2> }, "catalan_stemmer": { "type": "stemmer", @@ -335,6 +346,7 @@ The `catalan` analyzer could be reimplemented as a `custom` analyzer as follows: } } ---------------------------------------------------- +// CONSOLE <1> The default stopwords can be overridden with the `stopwords` or `stopwords_path` parameters. <2> This filter should be removed unless there are words which should @@ -347,6 +359,7 @@ The `cjk` analyzer could be reimplemented as a `custom` analyzer as follows: [source,js] ---------------------------------------------------- +PUT /cjk_example { "settings": { "analysis": { @@ -371,6 +384,7 @@ The `cjk` analyzer could be reimplemented as a `custom` analyzer as follows: } } ---------------------------------------------------- +// CONSOLE <1> The default stopwords can be overridden with the `stopwords` or `stopwords_path` parameters. @@ -381,6 +395,7 @@ The `czech` analyzer could be reimplemented as a `custom` analyzer as follows: [source,js] ---------------------------------------------------- +PUT /czech_example { "settings": { "analysis": { @@ -391,7 +406,7 @@ The `czech` analyzer could be reimplemented as a `custom` analyzer as follows: }, "czech_keywords": { "type": "keyword_marker", - "keywords": [] <2> + "keywords": ["příklad"] <2> }, "czech_stemmer": { "type": "stemmer", @@ -413,6 +428,7 @@ The `czech` analyzer could be reimplemented as a `custom` analyzer as follows: } } ---------------------------------------------------- +// CONSOLE <1> The default stopwords can be overridden with the `stopwords` or `stopwords_path` parameters. <2> This filter should be removed unless there are words which should @@ -425,6 +441,7 @@ The `danish` analyzer could be reimplemented as a `custom` analyzer as follows: [source,js] ---------------------------------------------------- +PUT /danish_example { "settings": { "analysis": { @@ -435,7 +452,7 @@ The `danish` analyzer could be reimplemented as a `custom` analyzer as follows: }, "danish_keywords": { "type": "keyword_marker", - "keywords": [] <2> + "keywords": ["eksempel"] <2> }, "danish_stemmer": { "type": "stemmer", @@ -457,6 +474,7 @@ The `danish` analyzer could be reimplemented as a `custom` analyzer as follows: } } ---------------------------------------------------- +// CONSOLE <1> The default stopwords can be overridden with the `stopwords` or `stopwords_path` parameters. <2> This filter should be removed unless there are words which should @@ -469,6 +487,7 @@ The `dutch` analyzer could be reimplemented as a `custom` analyzer as follows: [source,js] ---------------------------------------------------- +PUT /detch_example { "settings": { "analysis": { @@ -479,7 +498,7 @@ The `dutch` analyzer could be reimplemented as a `custom` analyzer as follows: }, "dutch_keywords": { "type": "keyword_marker", - "keywords": [] <2> + "keywords": ["voorbeeld"] <2> }, "dutch_stemmer": { "type": "stemmer", @@ -511,6 +530,7 @@ The `dutch` analyzer could be reimplemented as a `custom` analyzer as follows: } } ---------------------------------------------------- +// CONSOLE <1> The default stopwords can be overridden with the `stopwords` or `stopwords_path` parameters. <2> This filter should be removed unless there are words which should @@ -523,6 +543,7 @@ The `english` analyzer could be reimplemented as a `custom` analyzer as follows: [source,js] ---------------------------------------------------- +PUT /english_example { "settings": { "analysis": { @@ -533,7 +554,7 @@ The `english` analyzer could be reimplemented as a `custom` analyzer as follows: }, "english_keywords": { "type": "keyword_marker", - "keywords": [] <2> + "keywords": ["example"] <2> }, "english_stemmer": { "type": "stemmer", @@ -560,6 +581,7 @@ The `english` analyzer could be reimplemented as a `custom` analyzer as follows: } } ---------------------------------------------------- +// CONSOLE <1> The default stopwords can be overridden with the `stopwords` or `stopwords_path` parameters. <2> This filter should be removed unless there are words which should @@ -572,6 +594,7 @@ The `finnish` analyzer could be reimplemented as a `custom` analyzer as follows: [source,js] ---------------------------------------------------- +PUT /finnish_example { "settings": { "analysis": { @@ -582,7 +605,7 @@ The `finnish` analyzer could be reimplemented as a `custom` analyzer as follows: }, "finnish_keywords": { "type": "keyword_marker", - "keywords": [] <2> + "keywords": ["esimerkki"] <2> }, "finnish_stemmer": { "type": "stemmer", @@ -604,6 +627,7 @@ The `finnish` analyzer could be reimplemented as a `custom` analyzer as follows: } } ---------------------------------------------------- +// CONSOLE <1> The default stopwords can be overridden with the `stopwords` or `stopwords_path` parameters. <2> This filter should be removed unless there are words which should @@ -616,6 +640,7 @@ The `french` analyzer could be reimplemented as a `custom` analyzer as follows: [source,js] ---------------------------------------------------- +PUT /french_example { "settings": { "analysis": { @@ -635,7 +660,7 @@ The `french` analyzer could be reimplemented as a `custom` analyzer as follows: }, "french_keywords": { "type": "keyword_marker", - "keywords": [] <2> + "keywords": ["Exemple"] <2> }, "french_stemmer": { "type": "stemmer", @@ -658,6 +683,7 @@ The `french` analyzer could be reimplemented as a `custom` analyzer as follows: } } ---------------------------------------------------- +// CONSOLE <1> The default stopwords can be overridden with the `stopwords` or `stopwords_path` parameters. <2> This filter should be removed unless there are words which should @@ -670,6 +696,7 @@ The `galician` analyzer could be reimplemented as a `custom` analyzer as follows [source,js] ---------------------------------------------------- +PUT /galician_example { "settings": { "analysis": { @@ -680,7 +707,7 @@ The `galician` analyzer could be reimplemented as a `custom` analyzer as follows }, "galician_keywords": { "type": "keyword_marker", - "keywords": [] <2> + "keywords": ["exemplo"] <2> }, "galician_stemmer": { "type": "stemmer", @@ -702,6 +729,7 @@ The `galician` analyzer could be reimplemented as a `custom` analyzer as follows } } ---------------------------------------------------- +// CONSOLE <1> The default stopwords can be overridden with the `stopwords` or `stopwords_path` parameters. <2> This filter should be removed unless there are words which should @@ -714,6 +742,7 @@ The `german` analyzer could be reimplemented as a `custom` analyzer as follows: [source,js] ---------------------------------------------------- +PUT /german_example { "settings": { "analysis": { @@ -724,7 +753,7 @@ The `german` analyzer could be reimplemented as a `custom` analyzer as follows: }, "german_keywords": { "type": "keyword_marker", - "keywords": [] <2> + "keywords": ["Beispiel"] <2> }, "german_stemmer": { "type": "stemmer", @@ -747,6 +776,7 @@ The `german` analyzer could be reimplemented as a `custom` analyzer as follows: } } ---------------------------------------------------- +// CONSOLE <1> The default stopwords can be overridden with the `stopwords` or `stopwords_path` parameters. <2> This filter should be removed unless there are words which should @@ -759,6 +789,7 @@ The `greek` analyzer could be reimplemented as a `custom` analyzer as follows: [source,js] ---------------------------------------------------- +PUT /greek_example { "settings": { "analysis": { @@ -773,7 +804,7 @@ The `greek` analyzer could be reimplemented as a `custom` analyzer as follows: }, "greek_keywords": { "type": "keyword_marker", - "keywords": [] <2> + "keywords": ["παράδειγμα"] <2> }, "greek_stemmer": { "type": "stemmer", @@ -795,6 +826,7 @@ The `greek` analyzer could be reimplemented as a `custom` analyzer as follows: } } ---------------------------------------------------- +// CONSOLE <1> The default stopwords can be overridden with the `stopwords` or `stopwords_path` parameters. <2> This filter should be removed unless there are words which should @@ -807,6 +839,7 @@ The `hindi` analyzer could be reimplemented as a `custom` analyzer as follows: [source,js] ---------------------------------------------------- +PUT /hindi_example { "settings": { "analysis": { @@ -817,7 +850,7 @@ The `hindi` analyzer could be reimplemented as a `custom` analyzer as follows: }, "hindi_keywords": { "type": "keyword_marker", - "keywords": [] <2> + "keywords": ["उदाहरण"] <2> }, "hindi_stemmer": { "type": "stemmer", @@ -841,6 +874,7 @@ The `hindi` analyzer could be reimplemented as a `custom` analyzer as follows: } } ---------------------------------------------------- +// CONSOLE <1> The default stopwords can be overridden with the `stopwords` or `stopwords_path` parameters. <2> This filter should be removed unless there are words which should @@ -853,6 +887,7 @@ The `hungarian` analyzer could be reimplemented as a `custom` analyzer as follow [source,js] ---------------------------------------------------- +PUT /hungarian_example { "settings": { "analysis": { @@ -863,7 +898,7 @@ The `hungarian` analyzer could be reimplemented as a `custom` analyzer as follow }, "hungarian_keywords": { "type": "keyword_marker", - "keywords": [] <2> + "keywords": ["példa"] <2> }, "hungarian_stemmer": { "type": "stemmer", @@ -885,6 +920,7 @@ The `hungarian` analyzer could be reimplemented as a `custom` analyzer as follow } } ---------------------------------------------------- +// CONSOLE <1> The default stopwords can be overridden with the `stopwords` or `stopwords_path` parameters. <2> This filter should be removed unless there are words which should @@ -898,6 +934,7 @@ The `indonesian` analyzer could be reimplemented as a `custom` analyzer as follo [source,js] ---------------------------------------------------- +PUT /indonesian_example { "settings": { "analysis": { @@ -908,7 +945,7 @@ The `indonesian` analyzer could be reimplemented as a `custom` analyzer as follo }, "indonesian_keywords": { "type": "keyword_marker", - "keywords": [] <2> + "keywords": ["contoh"] <2> }, "indonesian_stemmer": { "type": "stemmer", @@ -930,6 +967,7 @@ The `indonesian` analyzer could be reimplemented as a `custom` analyzer as follo } } ---------------------------------------------------- +// CONSOLE <1> The default stopwords can be overridden with the `stopwords` or `stopwords_path` parameters. <2> This filter should be removed unless there are words which should @@ -942,6 +980,7 @@ The `irish` analyzer could be reimplemented as a `custom` analyzer as follows: [source,js] ---------------------------------------------------- +PUT /irish_example { "settings": { "analysis": { @@ -960,7 +999,7 @@ The `irish` analyzer could be reimplemented as a `custom` analyzer as follows: }, "irish_keywords": { "type": "keyword_marker", - "keywords": [] <2> + "keywords": ["sampla"] <2> }, "irish_stemmer": { "type": "stemmer", @@ -983,6 +1022,7 @@ The `irish` analyzer could be reimplemented as a `custom` analyzer as follows: } } ---------------------------------------------------- +// CONSOLE <1> The default stopwords can be overridden with the `stopwords` or `stopwords_path` parameters. <2> This filter should be removed unless there are words which should @@ -995,6 +1035,7 @@ The `italian` analyzer could be reimplemented as a `custom` analyzer as follows: [source,js] ---------------------------------------------------- +PUT /italian_example { "settings": { "analysis": { @@ -1014,7 +1055,7 @@ The `italian` analyzer could be reimplemented as a `custom` analyzer as follows: }, "italian_keywords": { "type": "keyword_marker", - "keywords": [] <2> + "keywords": ["esempio"] <2> }, "italian_stemmer": { "type": "stemmer", @@ -1037,6 +1078,7 @@ The `italian` analyzer could be reimplemented as a `custom` analyzer as follows: } } ---------------------------------------------------- +// CONSOLE <1> The default stopwords can be overridden with the `stopwords` or `stopwords_path` parameters. <2> This filter should be removed unless there are words which should @@ -1049,6 +1091,7 @@ The `latvian` analyzer could be reimplemented as a `custom` analyzer as follows: [source,js] ---------------------------------------------------- +PUT /latvian_example { "settings": { "analysis": { @@ -1059,7 +1102,7 @@ The `latvian` analyzer could be reimplemented as a `custom` analyzer as follows: }, "latvian_keywords": { "type": "keyword_marker", - "keywords": [] <2> + "keywords": ["piemērs"] <2> }, "latvian_stemmer": { "type": "stemmer", @@ -1081,6 +1124,7 @@ The `latvian` analyzer could be reimplemented as a `custom` analyzer as follows: } } ---------------------------------------------------- +// CONSOLE <1> The default stopwords can be overridden with the `stopwords` or `stopwords_path` parameters. <2> This filter should be removed unless there are words which should @@ -1093,6 +1137,7 @@ The `lithuanian` analyzer could be reimplemented as a `custom` analyzer as follo [source,js] ---------------------------------------------------- +PUT /lithuanian_example { "settings": { "analysis": { @@ -1103,7 +1148,7 @@ The `lithuanian` analyzer could be reimplemented as a `custom` analyzer as follo }, "lithuanian_keywords": { "type": "keyword_marker", - "keywords": [] <2> + "keywords": ["pavyzdys"] <2> }, "lithuanian_stemmer": { "type": "stemmer", @@ -1125,6 +1170,7 @@ The `lithuanian` analyzer could be reimplemented as a `custom` analyzer as follo } } ---------------------------------------------------- +// CONSOLE <1> The default stopwords can be overridden with the `stopwords` or `stopwords_path` parameters. <2> This filter should be removed unless there are words which should @@ -1137,6 +1183,7 @@ The `norwegian` analyzer could be reimplemented as a `custom` analyzer as follow [source,js] ---------------------------------------------------- +PUT /norwegian_example { "settings": { "analysis": { @@ -1147,7 +1194,7 @@ The `norwegian` analyzer could be reimplemented as a `custom` analyzer as follow }, "norwegian_keywords": { "type": "keyword_marker", - "keywords": [] <2> + "keywords": ["eksempel"] <2> }, "norwegian_stemmer": { "type": "stemmer", @@ -1169,6 +1216,7 @@ The `norwegian` analyzer could be reimplemented as a `custom` analyzer as follow } } ---------------------------------------------------- +// CONSOLE <1> The default stopwords can be overridden with the `stopwords` or `stopwords_path` parameters. <2> This filter should be removed unless there are words which should @@ -1181,6 +1229,7 @@ The `persian` analyzer could be reimplemented as a `custom` analyzer as follows: [source,js] ---------------------------------------------------- +PUT /persian_example { "settings": { "analysis": { @@ -1212,6 +1261,7 @@ The `persian` analyzer could be reimplemented as a `custom` analyzer as follows: } } ---------------------------------------------------- +// CONSOLE <1> Replaces zero-width non-joiners with an ASCII space. <2> The default stopwords can be overridden with the `stopwords` or `stopwords_path` parameters. @@ -1223,6 +1273,7 @@ The `portuguese` analyzer could be reimplemented as a `custom` analyzer as follo [source,js] ---------------------------------------------------- +PUT /portuguese_example { "settings": { "analysis": { @@ -1233,7 +1284,7 @@ The `portuguese` analyzer could be reimplemented as a `custom` analyzer as follo }, "portuguese_keywords": { "type": "keyword_marker", - "keywords": [] <2> + "keywords": ["exemplo"] <2> }, "portuguese_stemmer": { "type": "stemmer", @@ -1255,6 +1306,7 @@ The `portuguese` analyzer could be reimplemented as a `custom` analyzer as follo } } ---------------------------------------------------- +// CONSOLE <1> The default stopwords can be overridden with the `stopwords` or `stopwords_path` parameters. <2> This filter should be removed unless there are words which should @@ -1267,6 +1319,7 @@ The `romanian` analyzer could be reimplemented as a `custom` analyzer as follows [source,js] ---------------------------------------------------- +PUT /romanian_example { "settings": { "analysis": { @@ -1277,7 +1330,7 @@ The `romanian` analyzer could be reimplemented as a `custom` analyzer as follows }, "romanian_keywords": { "type": "keyword_marker", - "keywords": [] <2> + "keywords": ["exemplu"] <2> }, "romanian_stemmer": { "type": "stemmer", @@ -1299,6 +1352,7 @@ The `romanian` analyzer could be reimplemented as a `custom` analyzer as follows } } ---------------------------------------------------- +// CONSOLE <1> The default stopwords can be overridden with the `stopwords` or `stopwords_path` parameters. <2> This filter should be removed unless there are words which should @@ -1312,6 +1366,7 @@ The `russian` analyzer could be reimplemented as a `custom` analyzer as follows: [source,js] ---------------------------------------------------- +PUT /russian_example { "settings": { "analysis": { @@ -1322,7 +1377,7 @@ The `russian` analyzer could be reimplemented as a `custom` analyzer as follows: }, "russian_keywords": { "type": "keyword_marker", - "keywords": [] <2> + "keywords": ["пример"] <2> }, "russian_stemmer": { "type": "stemmer", @@ -1344,6 +1399,7 @@ The `russian` analyzer could be reimplemented as a `custom` analyzer as follows: } } ---------------------------------------------------- +// CONSOLE <1> The default stopwords can be overridden with the `stopwords` or `stopwords_path` parameters. <2> This filter should be removed unless there are words which should @@ -1356,6 +1412,7 @@ The `sorani` analyzer could be reimplemented as a `custom` analyzer as follows: [source,js] ---------------------------------------------------- +PUT /sorani_example { "settings": { "analysis": { @@ -1366,7 +1423,7 @@ The `sorani` analyzer could be reimplemented as a `custom` analyzer as follows: }, "sorani_keywords": { "type": "keyword_marker", - "keywords": [] <2> + "keywords": ["mînak"] <2> }, "sorani_stemmer": { "type": "stemmer", @@ -1389,6 +1446,7 @@ The `sorani` analyzer could be reimplemented as a `custom` analyzer as follows: } } ---------------------------------------------------- +// CONSOLE <1> The default stopwords can be overridden with the `stopwords` or `stopwords_path` parameters. <2> This filter should be removed unless there are words which should @@ -1401,6 +1459,7 @@ The `spanish` analyzer could be reimplemented as a `custom` analyzer as follows: [source,js] ---------------------------------------------------- +PUT /spanish_example { "settings": { "analysis": { @@ -1411,7 +1470,7 @@ The `spanish` analyzer could be reimplemented as a `custom` analyzer as follows: }, "spanish_keywords": { "type": "keyword_marker", - "keywords": [] <2> + "keywords": ["ejemplo"] <2> }, "spanish_stemmer": { "type": "stemmer", @@ -1433,6 +1492,7 @@ The `spanish` analyzer could be reimplemented as a `custom` analyzer as follows: } } ---------------------------------------------------- +// CONSOLE <1> The default stopwords can be overridden with the `stopwords` or `stopwords_path` parameters. <2> This filter should be removed unless there are words which should @@ -1445,6 +1505,7 @@ The `swedish` analyzer could be reimplemented as a `custom` analyzer as follows: [source,js] ---------------------------------------------------- +PUT /swidish_example { "settings": { "analysis": { @@ -1455,7 +1516,7 @@ The `swedish` analyzer could be reimplemented as a `custom` analyzer as follows: }, "swedish_keywords": { "type": "keyword_marker", - "keywords": [] <2> + "keywords": ["exempel"] <2> }, "swedish_stemmer": { "type": "stemmer", @@ -1477,6 +1538,7 @@ The `swedish` analyzer could be reimplemented as a `custom` analyzer as follows: } } ---------------------------------------------------- +// CONSOLE <1> The default stopwords can be overridden with the `stopwords` or `stopwords_path` parameters. <2> This filter should be removed unless there are words which should @@ -1489,6 +1551,7 @@ The `turkish` analyzer could be reimplemented as a `custom` analyzer as follows: [source,js] ---------------------------------------------------- +PUT /turkish_example { "settings": { "analysis": { @@ -1503,7 +1566,7 @@ The `turkish` analyzer could be reimplemented as a `custom` analyzer as follows: }, "turkish_keywords": { "type": "keyword_marker", - "keywords": [] <2> + "keywords": ["örnek"] <2> }, "turkish_stemmer": { "type": "stemmer", @@ -1526,6 +1589,7 @@ The `turkish` analyzer could be reimplemented as a `custom` analyzer as follows: } } ---------------------------------------------------- +// CONSOLE <1> The default stopwords can be overridden with the `stopwords` or `stopwords_path` parameters. <2> This filter should be removed unless there are words which should @@ -1538,6 +1602,7 @@ The `thai` analyzer could be reimplemented as a `custom` analyzer as follows: [source,js] ---------------------------------------------------- +PUT /thai_example { "settings": { "analysis": { @@ -1560,5 +1625,6 @@ The `thai` analyzer could be reimplemented as a `custom` analyzer as follows: } } ---------------------------------------------------- +// CONSOLE <1> The default stopwords can be overridden with the `stopwords` or `stopwords_path` parameters. diff --git a/docs/reference/analysis/analyzers/pattern-analyzer.asciidoc b/docs/reference/analysis/analyzers/pattern-analyzer.asciidoc index 7d059253e7058..64ab3999ef9a9 100644 --- a/docs/reference/analysis/analyzers/pattern-analyzer.asciidoc +++ b/docs/reference/analysis/analyzers/pattern-analyzer.asciidoc @@ -366,9 +366,8 @@ The above example produces the following terms: The regex above is easier to understand as: -[source,js] +[source,regex] -------------------------------------------------- - ([^\p{L}\d]+) # swallow non letters and numbers, | (?<=\D)(?=\d) # or non-number followed by number, | (?<=\d)(?=\D) # or number followed by non-number, diff --git a/docs/reference/analysis/charfilters/htmlstrip-charfilter.asciidoc b/docs/reference/analysis/charfilters/htmlstrip-charfilter.asciidoc index 217b618c9c227..6c1a1875d67ca 100644 --- a/docs/reference/analysis/charfilters/htmlstrip-charfilter.asciidoc +++ b/docs/reference/analysis/charfilters/htmlstrip-charfilter.asciidoc @@ -43,14 +43,14 @@ POST _analyze The above example returns the term: -[source,js] +[source,text] --------------------------- [ \nI'm so happy!\n ] --------------------------- The same example with the `standard` tokenizer would return the following terms: -[source,js] +[source,text] --------------------------- [ I'm, so, happy ] --------------------------- diff --git a/docs/reference/analysis/charfilters/pattern-replace-charfilter.asciidoc b/docs/reference/analysis/charfilters/pattern-replace-charfilter.asciidoc index 32ee14d8f552a..6f21f4521d3b6 100644 --- a/docs/reference/analysis/charfilters/pattern-replace-charfilter.asciidoc +++ b/docs/reference/analysis/charfilters/pattern-replace-charfilter.asciidoc @@ -79,7 +79,9 @@ POST my_index/_analyze } ---------------------------- // CONSOLE -// TEST[skip:Test interprets $1 as a stashed variable] +// TEST[s/\$1//] +// the test framework doesn't like the $1 so we just throw it away rather than +// try to get it to work properly. At least we are still testing the charfilter. The above example produces the following term: @@ -88,7 +90,6 @@ The above example produces the following term: [ My, credit, card, is 123_456_789 ] --------------------------- - WARNING: Using a replacement string that changes the length of the original text will work for search purposes, but will result in incorrect highlighting, as can be seen in the following example. @@ -193,7 +194,7 @@ POST my_index/_analyze The above returns the following terms: -[source,js] +[source,text] ---------------------------- [ the, foo, bar, baz, method ] ---------------------------- diff --git a/docs/reference/analysis/tokenfilters/asciifolding-tokenfilter.asciidoc b/docs/reference/analysis/tokenfilters/asciifolding-tokenfilter.asciidoc index 68891c18e2365..73d35549da8b6 100644 --- a/docs/reference/analysis/tokenfilters/asciifolding-tokenfilter.asciidoc +++ b/docs/reference/analysis/tokenfilters/asciifolding-tokenfilter.asciidoc @@ -8,17 +8,21 @@ equivalents, if one exists. Example: [source,js] -------------------------------------------------- -"index" : { - "analysis" : { - "analyzer" : { - "default" : { - "tokenizer" : "standard", - "filter" : ["standard", "asciifolding"] +PUT /asciifold_example +{ + "settings" : { + "analysis" : { + "analyzer" : { + "default" : { + "tokenizer" : "standard", + "filter" : ["standard", "asciifolding"] + } } } } } -------------------------------------------------- +// CONSOLE Accepts `preserve_original` setting which defaults to false but if true will keep the original token as well as emit the folded token. For @@ -26,20 +30,24 @@ example: [source,js] -------------------------------------------------- -"index" : { - "analysis" : { - "analyzer" : { - "default" : { - "tokenizer" : "standard", - "filter" : ["standard", "my_ascii_folding"] - } - }, - "filter" : { - "my_ascii_folding" : { - "type" : "asciifolding", - "preserve_original" : true +PUT /asciifold_example +{ + "settings" : { + "analysis" : { + "analyzer" : { + "default" : { + "tokenizer" : "standard", + "filter" : ["standard", "my_ascii_folding"] + } + }, + "filter" : { + "my_ascii_folding" : { + "type" : "asciifolding", + "preserve_original" : true + } } } } } -------------------------------------------------- +// CONSOLE diff --git a/docs/reference/analysis/tokenfilters/cjk-bigram-tokenfilter.asciidoc b/docs/reference/analysis/tokenfilters/cjk-bigram-tokenfilter.asciidoc index c1e278b2183b3..cc26d025f04f9 100644 --- a/docs/reference/analysis/tokenfilters/cjk-bigram-tokenfilter.asciidoc +++ b/docs/reference/analysis/tokenfilters/cjk-bigram-tokenfilter.asciidoc @@ -16,8 +16,9 @@ Bigrams are generated for characters in `han`, `hiragana`, `katakana` and [source,js] -------------------------------------------------- +PUT /cjk_bigram_example { - "index" : { + "settings" : { "analysis" : { "analyzer" : { "han_bigrams" : { @@ -40,3 +41,4 @@ Bigrams are generated for characters in `han`, `hiragana`, `katakana` and } } -------------------------------------------------- +// CONSOLE diff --git a/docs/reference/analysis/tokenfilters/common-grams-tokenfilter.asciidoc b/docs/reference/analysis/tokenfilters/common-grams-tokenfilter.asciidoc index eb1469af80344..c7d8ff660d347 100644 --- a/docs/reference/analysis/tokenfilters/common-grams-tokenfilter.asciidoc +++ b/docs/reference/analysis/tokenfilters/common-grams-tokenfilter.asciidoc @@ -41,21 +41,33 @@ Here is an example: [source,js] -------------------------------------------------- -index : - analysis : - analyzer : - index_grams : - tokenizer : whitespace - filter : [common_grams] - search_grams : - tokenizer : whitespace - filter : [common_grams_query] - filter : - common_grams : - type : common_grams - common_words: [a, an, the] - common_grams_query : - type : common_grams - query_mode: true - common_words: [a, an, the] +PUT /common_grams_example +{ + "settings": { + "analysis": { + "my_analyzer": { + "index_grams": { + "tokenizer": "whitespace", + "filter": ["common_grams"] + }, + "search_grams": { + "tokenizer": "whitespace", + "filter": ["common_grams_query"] + } + }, + "filter": { + "common_grams": { + "type": "common_grams", + "common_words": ["a", "an", "the"] + }, + "common_grams_query": { + "type": "common_grams", + "query_mode": true, + "common_words": ["a", "an", "the"] + } + } + } + } +} -------------------------------------------------- +// CONSOLE diff --git a/docs/reference/analysis/tokenfilters/compound-word-tokenfilter.asciidoc b/docs/reference/analysis/tokenfilters/compound-word-tokenfilter.asciidoc index 1268727b2efd8..e790ed4c4b5b1 100644 --- a/docs/reference/analysis/tokenfilters/compound-word-tokenfilter.asciidoc +++ b/docs/reference/analysis/tokenfilters/compound-word-tokenfilter.asciidoc @@ -1,5 +1,5 @@ [[analysis-compound-word-tokenfilter]] -=== Compound Word Token Filter +=== Compound Word Token Filters The `hyphenation_decompounder` and `dictionary_decompounder` token filters can decompose compound words found in many German languages into word parts. @@ -26,7 +26,7 @@ output tokens is directly connected to the quality of the grammar file you use. For languages like German they are quite good. XML based hyphenation grammar files can be found in the -http://offo.sourceforge.net/hyphenation/#FOP+XML+Hyphenation+Patterns[Objects For Formatting Objects] +http://offo.sourceforge.net/#FOP+XML+Hyphenation+Patterns[Objects For Formatting Objects] (OFFO) Sourceforge project. Currently only FOP v1.2 compatible hyphenation files are supported. You can download https://sourceforge.net/projects/offo/files/offo-hyphenation/1.2/offo-hyphenation_v1.2.zip/download[offo-hyphenation_v1.2.zip] directly and look in the `offo-hyphenation/hyph/` directory. @@ -84,20 +84,31 @@ Here is an example: [source,js] -------------------------------------------------- -index : - analysis : - analyzer : - myAnalyzer2 : - type : custom - tokenizer : standard - filter : [myTokenFilter1, myTokenFilter2] - filter : - myTokenFilter1 : - type : dictionary_decompounder - word_list: [one, two, three] - myTokenFilter2 : - type : hyphenation_decompounder - word_list_path: path/to/words.txt - hyphenation_patterns_path: path/to/fop.xml - max_subword_size : 22 +PUT /compound_word_example +{ + "index": { + "analysis": { + "analyzer": { + "my_analyzer": { + "type": "custom", + "tokenizer": "standard", + "filter": ["dictionary_decompounder", "hyphenation_decompounder"] + } + }, + "filter": { + "dictionary_decompounder": { + "type": "dictionary_decompounder", + "word_list": ["one", "two", "three"] + }, + "hyphenation_decompounder": { + "type" : "hyphenation_decompounder", + "word_list_path": "analysis/example_word_list.txt", + "hyphenation_patterns_path": "analysis/hyphenation_patterns.xml", + "max_subword_size": 22 + } + } + } + } +} -------------------------------------------------- +// CONSOLE diff --git a/docs/reference/analysis/tokenfilters/elision-tokenfilter.asciidoc b/docs/reference/analysis/tokenfilters/elision-tokenfilter.asciidoc index c44ccffd51e61..956c5ad13d034 100644 --- a/docs/reference/analysis/tokenfilters/elision-tokenfilter.asciidoc +++ b/docs/reference/analysis/tokenfilters/elision-tokenfilter.asciidoc @@ -9,20 +9,24 @@ example: [source,js] -------------------------------------------------- -"index" : { - "analysis" : { - "analyzer" : { - "default" : { - "tokenizer" : "standard", - "filter" : ["standard", "elision"] - } - }, - "filter" : { - "elision" : { - "type" : "elision", - "articles" : ["l", "m", "t", "qu", "n", "s", "j"] +PUT /elision_example +{ + "settings" : { + "analysis" : { + "analyzer" : { + "default" : { + "tokenizer" : "standard", + "filter" : ["standard", "elision"] + } + }, + "filter" : { + "elision" : { + "type" : "elision", + "articles" : ["l", "m", "t", "qu", "n", "s", "j"] + } } } } } -------------------------------------------------- +// CONSOLE diff --git a/docs/reference/analysis/tokenfilters/hunspell-tokenfilter.asciidoc b/docs/reference/analysis/tokenfilters/hunspell-tokenfilter.asciidoc index 9b3f188d95145..cef687f761905 100644 --- a/docs/reference/analysis/tokenfilters/hunspell-tokenfilter.asciidoc +++ b/docs/reference/analysis/tokenfilters/hunspell-tokenfilter.asciidoc @@ -10,7 +10,7 @@ one or more `*.dic` files (all of which will automatically be picked up). For example, assuming the default hunspell location is used, the following directory layout will define the `en_US` dictionary: -[source,js] +[source,txt] -------------------------------------------------- - conf |-- hunspell @@ -42,24 +42,28 @@ settings: [source,js] -------------------------------------------------- +PUT /hunspell_example { - "analysis" : { - "analyzer" : { - "en" : { - "tokenizer" : "standard", - "filter" : [ "lowercase", "en_US" ] - } - }, - "filter" : { - "en_US" : { - "type" : "hunspell", - "locale" : "en_US", - "dedup" : true + "settings": { + "analysis" : { + "analyzer" : { + "en" : { + "tokenizer" : "standard", + "filter" : [ "lowercase", "en_US" ] + } + }, + "filter" : { + "en_US" : { + "type" : "hunspell", + "locale" : "en_US", + "dedup" : true + } } } } } -------------------------------------------------- +// CONSOLE The hunspell token filter accepts four options: diff --git a/docs/reference/analysis/tokenfilters/keep-types-tokenfilter.asciidoc b/docs/reference/analysis/tokenfilters/keep-types-tokenfilter.asciidoc index bb1103dff8a2d..afaf4f8fa8c46 100644 --- a/docs/reference/analysis/tokenfilters/keep-types-tokenfilter.asciidoc +++ b/docs/reference/analysis/tokenfilters/keep-types-tokenfilter.asciidoc @@ -1,7 +1,7 @@ [[analysis-keep-types-tokenfilter]] === Keep Types Token Filter -A token filter of type `keep_types` that only keeps tokens with a token type +A token filter of type `keep_types` that only keeps tokens with a token type contained in a predefined set. @@ -14,24 +14,61 @@ types:: a list of types to keep [float] === Settings example +You can set it up like: + [source,js] -------------------------------------------------- +PUT /keep_types_example { - "index" : { + "settings" : { "analysis" : { "analyzer" : { "my_analyzer" : { "tokenizer" : "standard", "filter" : ["standard", "lowercase", "extract_numbers"] - }, + } }, "filter" : { "extract_numbers" : { "type" : "keep_types", "types" : [ "" ] - }, + } } } } } -------------------------------------------------- +// CONSOLE + +And test it like: + +[source,js] +-------------------------------------------------- +POST /keep_types_example/_analyze +{ + "analyzer" : "my_analyzer", + "text" : "this is just 1 a test" +} +-------------------------------------------------- +// CONSOLE +// TEST[continued] + +And it'd respond: + +[source,js] +-------------------------------------------------- +{ + "tokens": [ + { + "token": "1", + "start_offset": 13, + "end_offset": 14, + "type": "", + "position": 3 + } + ] +} +-------------------------------------------------- +// TESTRESPONSE + +Note how only the `` token is in the output. diff --git a/docs/reference/analysis/tokenfilters/keep-words-tokenfilter.asciidoc b/docs/reference/analysis/tokenfilters/keep-words-tokenfilter.asciidoc index e4abbeff15dea..50c74942a0101 100644 --- a/docs/reference/analysis/tokenfilters/keep-words-tokenfilter.asciidoc +++ b/docs/reference/analysis/tokenfilters/keep-words-tokenfilter.asciidoc @@ -20,17 +20,18 @@ keep_words_case:: a boolean indicating whether to lower case the words (defaults [source,js] -------------------------------------------------- +PUT /keep_words_example { - "index" : { + "settings" : { "analysis" : { "analyzer" : { - "my_analyzer" : { + "example_1" : { "tokenizer" : "standard", "filter" : ["standard", "lowercase", "words_till_three"] }, - "my_analyzer1" : { + "example_2" : { "tokenizer" : "standard", - "filter" : ["standard", "lowercase", "words_on_file"] + "filter" : ["standard", "lowercase", "words_in_file"] } }, "filter" : { @@ -38,12 +39,13 @@ keep_words_case:: a boolean indicating whether to lower case the words (defaults "type" : "keep", "keep_words" : [ "one", "two", "three"] }, - "words_on_file" : { + "words_in_file" : { "type" : "keep", - "keep_words_path" : "/path/to/word/file" + "keep_words_path" : "analysis/example_word_list.txt" } } } } } -------------------------------------------------- +// CONSOLE diff --git a/docs/reference/analysis/tokenfilters/keyword-marker-tokenfilter.asciidoc b/docs/reference/analysis/tokenfilters/keyword-marker-tokenfilter.asciidoc index e43687e29dc1c..1f1e4e655c55e 100644 --- a/docs/reference/analysis/tokenfilters/keyword-marker-tokenfilter.asciidoc +++ b/docs/reference/analysis/tokenfilters/keyword-marker-tokenfilter.asciidoc @@ -12,23 +12,131 @@ any stemming filters. |`keywords_path` |A path (either relative to `config` location, or absolute) to a list of words. +|`keywords_pattern` |A regular expression pattern to match against words +in the text. + |`ignore_case` |Set to `true` to lower case all words first. Defaults to `false`. |======================================================================= -Here is an example: +You can configure it like: + +[source,js] +-------------------------------------------------- +PUT /keyword_marker_example +{ + "settings": { + "analysis": { + "analyzer": { + "protect_cats": { + "type": "custom", + "tokenizer": "standard", + "filter": ["lowercase", "protect_cats", "porter_stem"] + }, + "normal": { + "type": "custom", + "tokenizer": "standard", + "filter": ["lowercase", "porter_stem"] + } + }, + "filter": { + "protect_cats": { + "type": "keyword_marker", + "keywords": ["cats"] + } + } + } + } +} +-------------------------------------------------- +// CONSOLE + +And test it with: + +[source,js] +-------------------------------------------------- +POST /keyword_marker_example/_analyze +{ + "analyzer" : "protect_cats", + "text" : "I like cats" +} +-------------------------------------------------- +// CONSOLE +// TEST[continued] + +And it'd respond: + +[source,js] +-------------------------------------------------- +{ + "tokens": [ + { + "token": "i", + "start_offset": 0, + "end_offset": 1, + "type": "", + "position": 0 + }, + { + "token": "like", + "start_offset": 2, + "end_offset": 6, + "type": "", + "position": 1 + }, + { + "token": "cats", + "start_offset": 7, + "end_offset": 11, + "type": "", + "position": 2 + } + ] +} +-------------------------------------------------- +// TESTRESPONSE + +As compared to the `normal` analyzer which has `cats` stemmed to `cat`: + +[source,js] +-------------------------------------------------- +POST /keyword_marker_example/_analyze +{ + "analyzer" : "normal", + "text" : "I like cats" +} +-------------------------------------------------- +// CONSOLE +// TEST[continued] + +Response: [source,js] -------------------------------------------------- -index : - analysis : - analyzer : - myAnalyzer : - type : custom - tokenizer : standard - filter : [lowercase, protwords, porter_stem] - filter : - protwords : - type : keyword_marker - keywords_path : analysis/protwords.txt +{ + "tokens": [ + { + "token": "i", + "start_offset": 0, + "end_offset": 1, + "type": "", + "position": 0 + }, + { + "token": "like", + "start_offset": 2, + "end_offset": 6, + "type": "", + "position": 1 + }, + { + "token": "cat", + "start_offset": 7, + "end_offset": 11, + "type": "", + "position": 2 + } + ] +} -------------------------------------------------- +// TESTRESPONSE diff --git a/docs/reference/analysis/tokenfilters/keyword-repeat-tokenfilter.asciidoc b/docs/reference/analysis/tokenfilters/keyword-repeat-tokenfilter.asciidoc index aa8c7a9b75285..044e8c1476951 100644 --- a/docs/reference/analysis/tokenfilters/keyword-repeat-tokenfilter.asciidoc +++ b/docs/reference/analysis/tokenfilters/keyword-repeat-tokenfilter.asciidoc @@ -9,18 +9,85 @@ subsequent stemmer will be indexed twice. Therefore, consider adding a `unique` filter with `only_on_same_position` set to `true` to drop unnecessary duplicates. -Here is an example: +Here is an example of using the `keyword_repeat` token filter to +preserve both the stemmed and unstemmed version of tokens: [source,js] -------------------------------------------------- -index : - analysis : - analyzer : - myAnalyzer : - type : custom - tokenizer : standard - filter : [lowercase, keyword_repeat, porter_stem, unique_stem] - unique_stem: - type: unique - only_on_same_position : true +PUT /keyword_repeat_example +{ + "settings": { + "analysis": { + "analyzer": { + "stemmed_and_unstemmed": { + "type": "custom", + "tokenizer": "standard", + "filter": ["lowercase", "keyword_repeat", "porter_stem", "unique_stem"] + } + }, + "filter": { + "unique_stem": { + "type": "unique", + "only_on_same_position": true + } + } + } + } +} -------------------------------------------------- +// CONSOLE + +And you can test it with: + +[source,js] +-------------------------------------------------- +POST /keyword_repeat_example/_analyze +{ + "analyzer" : "stemmed_and_unstemmed", + "text" : "I like cats" +} +-------------------------------------------------- +// CONSOLE +// TEST[continued] + +And it'd respond: + +[source,js] +-------------------------------------------------- +{ + "tokens": [ + { + "token": "i", + "start_offset": 0, + "end_offset": 1, + "type": "", + "position": 0 + }, + { + "token": "like", + "start_offset": 2, + "end_offset": 6, + "type": "", + "position": 1 + }, + { + "token": "cats", + "start_offset": 7, + "end_offset": 11, + "type": "", + "position": 2 + }, + { + "token": "cat", + "start_offset": 7, + "end_offset": 11, + "type": "", + "position": 2 + } + ] +} +-------------------------------------------------- +// TESTRESPONSE + +Which preserves both the `cat` and `cats` tokens. Compare this to the example +on the <>. diff --git a/docs/reference/analysis/tokenfilters/limit-token-count-tokenfilter.asciidoc b/docs/reference/analysis/tokenfilters/limit-token-count-tokenfilter.asciidoc index a6598be6095ec..ba2018c107626 100644 --- a/docs/reference/analysis/tokenfilters/limit-token-count-tokenfilter.asciidoc +++ b/docs/reference/analysis/tokenfilters/limit-token-count-tokenfilter.asciidoc @@ -18,15 +18,25 @@ Here is an example: [source,js] -------------------------------------------------- -index : - analysis : - analyzer : - myAnalyzer : - type : custom - tokenizer : standard - filter : [lowercase, five_token_limit] - filter : - five_token_limit : - type : limit - max_token_count : 5 +PUT /limit_example +{ + "settings": { + "analysis": { + "analyzer": { + "limit_example": { + "type": "custom", + "tokenizer": "standard", + "filter": ["lowercase", "five_token_limit"] + } + }, + "filter": { + "five_token_limit": { + "type": "limit", + "max_token_count": 5 + } + } + } + } +} -------------------------------------------------- +// CONSOLE diff --git a/docs/reference/analysis/tokenfilters/lowercase-tokenfilter.asciidoc b/docs/reference/analysis/tokenfilters/lowercase-tokenfilter.asciidoc index 674dfe541c9cf..519fd77ba2afd 100644 --- a/docs/reference/analysis/tokenfilters/lowercase-tokenfilter.asciidoc +++ b/docs/reference/analysis/tokenfilters/lowercase-tokenfilter.asciidoc @@ -10,28 +10,30 @@ custom analyzer [source,js] -------------------------------------------------- -index : - analysis : - analyzer : - myAnalyzer2 : - type : custom - tokenizer : myTokenizer1 - filter : [myTokenFilter1, myGreekLowerCaseFilter] - char_filter : [my_html] - tokenizer : - myTokenizer1 : - type : standard - max_token_length : 900 - filter : - myTokenFilter1 : - type : stop - stopwords : [stop1, stop2, stop3, stop4] - myGreekLowerCaseFilter : - type : lowercase - language : greek - char_filter : - my_html : - type : html_strip - escaped_tags : [xxx, yyy] - read_ahead : 1024 +PUT /lowercase_example +{ + "settings": { + "analysis": { + "analyzer": { + "standard_lowercase_example": { + "type": "custom", + "tokenizer": "standard", + "filter": ["lowercase"] + }, + "greek_lowercase_example": { + "type": "custom", + "tokenizer": "standard", + "filter": ["greek_lowercase"] + } + }, + "filter": { + "greek_lowercase": { + "type": "lowercase", + "language": "greek" + } + } + } + } +} -------------------------------------------------- +// CONSOLE diff --git a/docs/reference/analysis/tokenfilters/pattern-capture-tokenfilter.asciidoc b/docs/reference/analysis/tokenfilters/pattern-capture-tokenfilter.asciidoc index 177c4195bbff6..4dac79b6571b1 100644 --- a/docs/reference/analysis/tokenfilters/pattern-capture-tokenfilter.asciidoc +++ b/docs/reference/analysis/tokenfilters/pattern-capture-tokenfilter.asciidoc @@ -23,14 +23,14 @@ Read more about http://www.regular-expressions.info/catastrophic.html[pathologic For instance a pattern like : -[source,js] +[source,text] -------------------------------------------------- "(([a-z]+)(\d*))" -------------------------------------------------- when matched against: -[source,js] +[source,text] -------------------------------------------------- "abc123def456" -------------------------------------------------- @@ -74,7 +74,7 @@ PUT test When used to analyze the text -[source,js] +[source,java] -------------------------------------------------- import static org.apache.commons.lang.StringEscapeUtils.escapeHtml -------------------------------------------------- @@ -117,7 +117,7 @@ PUT test When the above analyzer is used on an email address like: -[source,js] +[source,text] -------------------------------------------------- john-smith_123@foo-bar.com -------------------------------------------------- @@ -136,14 +136,14 @@ original token will be highlighted, not just the matching subset. For instance, querying the above email address for `"smith"` would highlight: -[source,js] +[source,html] -------------------------------------------------- john-smith_123@foo-bar.com -------------------------------------------------- not: -[source,js] +[source,html] -------------------------------------------------- john-smith_123@foo-bar.com -------------------------------------------------- diff --git a/docs/reference/analysis/tokenfilters/snowball-tokenfilter.asciidoc b/docs/reference/analysis/tokenfilters/snowball-tokenfilter.asciidoc index 6042642027cd9..93e1eed26b4b2 100644 --- a/docs/reference/analysis/tokenfilters/snowball-tokenfilter.asciidoc +++ b/docs/reference/analysis/tokenfilters/snowball-tokenfilter.asciidoc @@ -12,8 +12,9 @@ For example: [source,js] -------------------------------------------------- +PUT /my_index { - "index" : { + "settings": { "analysis" : { "analyzer" : { "my_analyzer" : { @@ -31,3 +32,4 @@ For example: } } -------------------------------------------------- +// CONSOLE diff --git a/docs/reference/analysis/tokenfilters/stemmer-override-tokenfilter.asciidoc b/docs/reference/analysis/tokenfilters/stemmer-override-tokenfilter.asciidoc index 6e010894f419b..33191805fe64e 100644 --- a/docs/reference/analysis/tokenfilters/stemmer-override-tokenfilter.asciidoc +++ b/docs/reference/analysis/tokenfilters/stemmer-override-tokenfilter.asciidoc @@ -20,15 +20,60 @@ Here is an example: [source,js] -------------------------------------------------- -index : - analysis : - analyzer : - myAnalyzer : - type : custom - tokenizer : standard - filter : [lowercase, custom_stems, porter_stem] - filter: - custom_stems: - type: stemmer_override - rules_path : analysis/custom_stems.txt +PUT /my_index +{ + "settings": { + "analysis" : { + "analyzer" : { + "my_analyzer" : { + "tokenizer" : "standard", + "filter" : ["lowercase", "custom_stems", "porter_stem"] + } + }, + "filter" : { + "custom_stems" : { + "type" : "stemmer_override", + "rules_path" : "analysis/stemmer_override.txt" + } + } + } + } +} -------------------------------------------------- +// CONSOLE + +Where the file looks like: + +[source,stemmer_override] +-------------------------------------------------- +include::{docdir}/../src/test/cluster/config/analysis/stemmer_override.txt[] +-------------------------------------------------- + +You can also define the overrides rules inline: + +[source,js] +-------------------------------------------------- +PUT /my_index +{ + "settings": { + "analysis" : { + "analyzer" : { + "my_analyzer" : { + "tokenizer" : "standard", + "filter" : ["lowercase", "custom_stems", "porter_stem"] + } + }, + "filter" : { + "custom_stems" : { + "type" : "stemmer_override", + "rules" : [ + "running => run", + "stemmer => stemmer" + ] + } + } + } + } +} +-------------------------------------------------- +// CONSOLE diff --git a/docs/reference/analysis/tokenfilters/stemmer-tokenfilter.asciidoc b/docs/reference/analysis/tokenfilters/stemmer-tokenfilter.asciidoc index 548342c521bdf..a052a4a7a5877 100644 --- a/docs/reference/analysis/tokenfilters/stemmer-tokenfilter.asciidoc +++ b/docs/reference/analysis/tokenfilters/stemmer-tokenfilter.asciidoc @@ -6,8 +6,9 @@ filters through a single unified interface. For example: [source,js] -------------------------------------------------- +PUT /my_index { - "index" : { + "settings": { "analysis" : { "analyzer" : { "my_analyzer" : { @@ -25,6 +26,7 @@ filters through a single unified interface. For example: } } -------------------------------------------------- +// CONSOLE The `language`/`name` parameter controls the stemmer with the following available values (the preferred filters are marked in *bold*): @@ -177,4 +179,3 @@ http://clef.isti.cnr.it/2003/WN_web/22.pdf[`light_swedish`] Turkish:: http://snowball.tartarus.org/algorithms/turkish/stemmer.html[*`turkish`*] - diff --git a/docs/reference/analysis/tokenfilters/stop-tokenfilter.asciidoc b/docs/reference/analysis/tokenfilters/stop-tokenfilter.asciidoc index f3b5a195662f3..b20f9c9418dc7 100644 --- a/docs/reference/analysis/tokenfilters/stop-tokenfilter.asciidoc +++ b/docs/reference/analysis/tokenfilters/stop-tokenfilter.asciidoc @@ -47,6 +47,7 @@ PUT /my_index } } ------------------------------------ +// CONSOLE or a predefined language-specific list: @@ -66,6 +67,7 @@ PUT /my_index } } ------------------------------------ +// CONSOLE Elasticsearch provides the following predefined list of languages: diff --git a/docs/reference/analysis/tokenfilters/synonym-graph-tokenfilter.asciidoc b/docs/reference/analysis/tokenfilters/synonym-graph-tokenfilter.asciidoc index 5ab498802d930..09707fdeb1cdb 100644 --- a/docs/reference/analysis/tokenfilters/synonym-graph-tokenfilter.asciidoc +++ b/docs/reference/analysis/tokenfilters/synonym-graph-tokenfilter.asciidoc @@ -3,7 +3,7 @@ experimental[] -The `synonym_graph` token filter allows to easily handle synonyms, +The `synonym_graph` token filter allows to easily handle synonyms, including multi-word synonyms correctly during the analysis process. In order to properly handle multi-word synonyms this token filter @@ -13,8 +13,8 @@ http://blog.mikemccandless.com/2012/04/lucenes-tokenstreams-are-actually.html[Lu ["NOTE",id="synonym-graph-index-note"] =============================== -This token filter is designed to be used as part of a search analyzer -only. If you want to apply synonyms during indexing please use the +This token filter is designed to be used as part of a search analyzer +only. If you want to apply synonyms during indexing please use the standard <>. =============================== @@ -23,30 +23,34 @@ Here is an example: [source,js] -------------------------------------------------- +PUT /test_index { - "index" : { - "analysis" : { - "analyzer" : { - "search_synonyms" : { - "tokenizer" : "whitespace", - "filter" : ["graph_synonyms"] - } - }, - "filter" : { - "graph_synonyms" : { - "type" : "synonym_graph", - "synonyms_path" : "analysis/synonym.txt" + "settings": { + "index" : { + "analysis" : { + "analyzer" : { + "search_synonyms" : { + "tokenizer" : "whitespace", + "filter" : ["graph_synonyms"] + } + }, + "filter" : { + "graph_synonyms" : { + "type" : "synonym_graph", + "synonyms_path" : "analysis/synonym.txt" + } } } } } } -------------------------------------------------- +// CONSOLE The above configures a `search_synonyms` filter, with a path of `analysis/synonym.txt` (relative to the `config` location). The -`search_synonyms` analyzer is then configured with the filter. -Additional settings are: `ignore_case` (defaults to `false`), and +`search_synonyms` analyzer is then configured with the filter. +Additional settings are: `ignore_case` (defaults to `false`), and `expand` (defaults to `true`). The `tokenizer` parameter controls the tokenizers that will be used to @@ -59,39 +63,9 @@ Two synonym formats are supported: Solr, WordNet. The following is a sample format of the file: -[source,js] +[source,synonyms] -------------------------------------------------- -# Blank lines and lines starting with pound are comments. - -# Explicit mappings match any token sequence on the LHS of "=>" -# and replace with all alternatives on the RHS. These types of mappings -# ignore the expand parameter in the schema. -# Examples: -i-pod, i pod => ipod, -sea biscuit, sea biscit => seabiscuit - -# Equivalent synonyms may be separated with commas and give -# no explicit mapping. In this case the mapping behavior will -# be taken from the expand parameter in the schema. This allows -# the same synonym file to be used in different synonym handling strategies. -# Examples: -ipod, i-pod, i pod -foozball , foosball -universe , cosmos -lol, laughing out loud - -# If expand==true, "ipod, i-pod, i pod" is equivalent -# to the explicit mapping: -ipod, i-pod, i pod => ipod, i-pod, i pod -# If expand==false, "ipod, i-pod, i pod" is equivalent -# to the explicit mapping: -ipod, i-pod, i pod => ipod - -# Multiple synonym mapping entries are merged. -foo => foo bar -foo => baz -# is equivalent to -foo => foo bar, baz +include::{docdir}/../src/test/cluster/config/analysis/synonym.txt[] -------------------------------------------------- You can also define synonyms for the filter directly in the @@ -99,18 +73,26 @@ configuration file (note use of `synonyms` instead of `synonyms_path`): [source,js] -------------------------------------------------- +PUT /test_index { - "filter" : { - "synonym" : { - "type" : "synonym_graph", - "synonyms" : [ - "lol, laughing out loud", - "universe, cosmos" - ] + "settings": { + "index" : { + "analysis" : { + "filter" : { + "synonym" : { + "type" : "synonym_graph", + "synonyms" : [ + "lol, laughing out loud", + "universe, cosmos" + ] + } + } + } } } } -------------------------------------------------- +// CONSOLE However, it is recommended to define large synonyms set in a file using `synonyms_path`, because specifying them inline increases cluster size unnecessarily. @@ -123,20 +105,28 @@ declared using `format`: [source,js] -------------------------------------------------- +PUT /test_index { - "filter" : { - "synonym" : { - "type" : "synonym_graph", - "format" : "wordnet", - "synonyms" : [ - "s(100000001,1,'abstain',v,1,0).", - "s(100000001,2,'refrain',v,1,0).", - "s(100000001,3,'desist',v,1,0)." - ] + "settings": { + "index" : { + "analysis" : { + "filter" : { + "synonym" : { + "type" : "synonym_graph", + "format" : "wordnet", + "synonyms" : [ + "s(100000001,1,'abstain',v,1,0).", + "s(100000001,2,'refrain',v,1,0).", + "s(100000001,3,'desist',v,1,0)." + ] + } + } + } } } } -------------------------------------------------- +// CONSOLE Using `synonyms_path` to define WordNet synonyms in a file is supported as well. diff --git a/docs/reference/analysis/tokenfilters/synonym-tokenfilter.asciidoc b/docs/reference/analysis/tokenfilters/synonym-tokenfilter.asciidoc index 12da352b51cfe..c4961d1e5f9bc 100644 --- a/docs/reference/analysis/tokenfilters/synonym-tokenfilter.asciidoc +++ b/docs/reference/analysis/tokenfilters/synonym-tokenfilter.asciidoc @@ -7,25 +7,29 @@ Here is an example: [source,js] -------------------------------------------------- +PUT /test_index { - "index" : { - "analysis" : { - "analyzer" : { - "synonym" : { - "tokenizer" : "whitespace", - "filter" : ["synonym"] - } - }, - "filter" : { - "synonym" : { - "type" : "synonym", - "synonyms_path" : "analysis/synonym.txt" + "settings": { + "index" : { + "analysis" : { + "analyzer" : { + "synonym" : { + "tokenizer" : "whitespace", + "filter" : ["synonym"] + } + }, + "filter" : { + "synonym" : { + "type" : "synonym", + "synonyms_path" : "analysis/synonym.txt" + } } } } } } -------------------------------------------------- +// CONSOLE The above configures a `synonym` filter, with a path of `analysis/synonym.txt` (relative to the `config` location). The @@ -43,38 +47,9 @@ Two synonym formats are supported: Solr, WordNet. The following is a sample format of the file: -[source,js] +[source,synonyms] -------------------------------------------------- -# Blank lines and lines starting with pound are comments. - -# Explicit mappings match any token sequence on the LHS of "=>" -# and replace with all alternatives on the RHS. These types of mappings -# ignore the expand parameter in the schema. -# Examples: -i-pod, i pod => ipod, -sea biscuit, sea biscit => seabiscuit - -# Equivalent synonyms may be separated with commas and give -# no explicit mapping. In this case the mapping behavior will -# be taken from the expand parameter in the schema. This allows -# the same synonym file to be used in different synonym handling strategies. -# Examples: -ipod, i-pod, i pod -foozball , foosball -universe , cosmos - -# If expand==true, "ipod, i-pod, i pod" is equivalent -# to the explicit mapping: -ipod, i-pod, i pod => ipod, i-pod, i pod -# If expand==false, "ipod, i-pod, i pod" is equivalent -# to the explicit mapping: -ipod, i-pod, i pod => ipod - -# Multiple synonym mapping entries are merged. -foo => foo bar -foo => baz -# is equivalent to -foo => foo bar, baz +include::{docdir}/../src/test/cluster/config/analysis/synonym.txt[] -------------------------------------------------- You can also define synonyms for the filter directly in the @@ -82,18 +57,26 @@ configuration file (note use of `synonyms` instead of `synonyms_path`): [source,js] -------------------------------------------------- +PUT /test_index { - "filter" : { - "synonym" : { - "type" : "synonym", - "synonyms" : [ - "i-pod, i pod => ipod", - "universe, cosmos" - ] + "settings": { + "index" : { + "analysis" : { + "filter" : { + "synonym" : { + "type" : "synonym", + "synonyms" : [ + "i-pod, i pod => ipod", + "universe, cosmos" + ] + } + } + } } } } -------------------------------------------------- +// CONSOLE However, it is recommended to define large synonyms set in a file using `synonyms_path`, because specifying them inline increases cluster size unnecessarily. @@ -106,20 +89,28 @@ declared using `format`: [source,js] -------------------------------------------------- +PUT /test_index { - "filter" : { - "synonym" : { - "type" : "synonym", - "format" : "wordnet", - "synonyms" : [ - "s(100000001,1,'abstain',v,1,0).", - "s(100000001,2,'refrain',v,1,0).", - "s(100000001,3,'desist',v,1,0)." - ] + "settings": { + "index" : { + "analysis" : { + "filter" : { + "synonym" : { + "type" : "synonym", + "format" : "wordnet", + "synonyms" : [ + "s(100000001,1,'abstain',v,1,0).", + "s(100000001,2,'refrain',v,1,0).", + "s(100000001,3,'desist',v,1,0)." + ] + } + } + } } } } -------------------------------------------------- +// CONSOLE Using `synonyms_path` to define WordNet synonyms in a file is supported as well. diff --git a/docs/reference/analysis/tokenfilters/word-delimiter-graph-tokenfilter.asciidoc b/docs/reference/analysis/tokenfilters/word-delimiter-graph-tokenfilter.asciidoc index 01176fa5636c8..c221075b49f1f 100644 --- a/docs/reference/analysis/tokenfilters/word-delimiter-graph-tokenfilter.asciidoc +++ b/docs/reference/analysis/tokenfilters/word-delimiter-graph-tokenfilter.asciidoc @@ -75,7 +75,7 @@ Advance settings include: A custom type mapping table, for example (when configured using `type_table_path`): -[source,js] +[source,type_table] -------------------------------------------------- # Map the $, %, '.', and ',' characters to DIGIT # This might be useful for financial data. @@ -94,4 +94,3 @@ NOTE: Using a tokenizer like the `standard` tokenizer may interfere with the `catenate_*` and `preserve_original` parameters, as the original string may already have lost punctuation during tokenization. Instead, you may want to use the `whitespace` tokenizer. - diff --git a/docs/reference/analysis/tokenfilters/word-delimiter-tokenfilter.asciidoc b/docs/reference/analysis/tokenfilters/word-delimiter-tokenfilter.asciidoc index edb3f3b5590e6..009b027b9ef2d 100644 --- a/docs/reference/analysis/tokenfilters/word-delimiter-tokenfilter.asciidoc +++ b/docs/reference/analysis/tokenfilters/word-delimiter-tokenfilter.asciidoc @@ -64,7 +64,7 @@ Advance settings include: A custom type mapping table, for example (when configured using `type_table_path`): -[source,js] +[source,type_table] -------------------------------------------------- # Map the $, %, '.', and ',' characters to DIGIT # This might be useful for financial data. @@ -83,4 +83,3 @@ NOTE: Using a tokenizer like the `standard` tokenizer may interfere with the `catenate_*` and `preserve_original` parameters, as the original string may already have lost punctuation during tokenization. Instead, you may want to use the `whitespace` tokenizer. - diff --git a/docs/reference/cluster/allocation-explain.asciidoc b/docs/reference/cluster/allocation-explain.asciidoc index 8336cf73a7916..8749970aeb27c 100644 --- a/docs/reference/cluster/allocation-explain.asciidoc +++ b/docs/reference/cluster/allocation-explain.asciidoc @@ -5,7 +5,7 @@ The purpose of the cluster allocation explain API is to provide explanations for shard allocations in the cluster. For unassigned shards, the explain API provides an explanation for why the shard is unassigned. For assigned shards, the explain API provides an explanation for why the -shard is remaining on its current moved and has not moved or rebalanced to +shard is remaining on its current node and has not moved or rebalanced to another node. This API can be very useful when attempting to diagnose why a shard is unassigned or why a shard continues to remain on its current node when you might expect otherwise. diff --git a/docs/reference/cluster/tasks.asciidoc b/docs/reference/cluster/tasks.asciidoc index e087eebd9c7e3..f1f9a66931285 100644 --- a/docs/reference/cluster/tasks.asciidoc +++ b/docs/reference/cluster/tasks.asciidoc @@ -165,9 +165,10 @@ If a long-running task supports cancellation, it can be cancelled by the followi [source,js] -------------------------------------------------- -POST _tasks/task_id:1/_cancel +POST _tasks/node_id:task_id/_cancel -------------------------------------------------- // CONSOLE +// TEST[s/task_id/1/] The task cancellation command supports the same task selection parameters as the list tasks command, so multiple tasks can be cancelled at the same time. For example, the following command will cancel all reindex tasks running on the diff --git a/docs/reference/getting-started.asciidoc b/docs/reference/getting-started.asciidoc index 3cf371644f034..87d20b35221c7 100755 --- a/docs/reference/getting-started.asciidoc +++ b/docs/reference/getting-started.asciidoc @@ -216,8 +216,9 @@ And the response: epoch timestamp cluster status node.total node.data shards pri relo init unassign pending_tasks max_task_wait_time active_shards_percent 1475247709 17:01:49 elasticsearch green 1 1 0 0 0 0 0 0 - 100.0% -------------------------------------------------- -// TESTRESPONSE[s/0 0/0 [01]/] -// TESTRESPONSE[s/1475247709 17:01:49 elasticsearch/\\d+ \\d+:\\d+:\\d+ docs_integTestCluster/ _cat] +// TESTRESPONSE[s/1475247709 17:01:49 elasticsearch/\\d+ \\d+:\\d+:\\d+ docs_integTestCluster/] +// TESTRESPONSE[s/0 0 -/0 \\d+ -/] +// TESTRESPONSE[_cat] We can see that our cluster named "elasticsearch" is up with a green status. diff --git a/docs/reference/indices/put-mapping.asciidoc b/docs/reference/indices/put-mapping.asciidoc index d2e0bf5fd7761..6bc623b95e251 100644 --- a/docs/reference/indices/put-mapping.asciidoc +++ b/docs/reference/indices/put-mapping.asciidoc @@ -76,7 +76,6 @@ exceptions to this rule. For instance: * new <> can be added to <> fields. * new <> can be added to existing fields. -* <> can be disabled, but not enabled. * the <> parameter can be updated. For example: diff --git a/docs/reference/ingest/ingest-node.asciidoc b/docs/reference/ingest/ingest-node.asciidoc index d6acabd87f17a..970863041a9d1 100644 --- a/docs/reference/ingest/ingest-node.asciidoc +++ b/docs/reference/ingest/ingest-node.asciidoc @@ -1788,7 +1788,7 @@ Converts a string to its uppercase equivalent. Expands a field with dots into an object field. This processor allows fields with dots in the name to be accessible by other processors in the pipeline. -Otherwise these < can't be accessed by any processor. +Otherwise these <> can't be accessed by any processor. [[dot-expender-options]] .Dot Expand Options diff --git a/docs/reference/mapping/fields/parent-field.asciidoc b/docs/reference/mapping/fields/parent-field.asciidoc index 9197c7184ea69..82343b1a908b6 100644 --- a/docs/reference/mapping/fields/parent-field.asciidoc +++ b/docs/reference/mapping/fields/parent-field.asciidoc @@ -121,11 +121,17 @@ Global ordinals need to be rebuilt after any change to a shard. The more parent id values are stored in a shard, the longer it takes to rebuild the global ordinals for the `_parent` field. -Global ordinals, by default, are built lazily: the first parent-child query or -aggregation after a refresh will trigger building of global ordinals. This can -introduce a significant latency spike for your users. You can use -<> to shift the cost of building global -ordinals from query time to refresh time, by mapping the `_parent` field as follows: +Global ordinals, by default, are built eagerly: if the index has changed, +global ordinals for the `_parent` field will be rebuilt as part of the refresh. +This can add significant time the refresh. However most of the times this is the +right trade-off, otherwise global ordinals are rebuilt when the first parent-child +query or aggregation is used. This can introduce a significant latency spike for +your users and usually this is worse as multiple global ordinals for the `_parent` +field may be attempt rebuilt within a single refresh interval when many writes +are occurring. + +When the parent/child is used infrequently and writes occur frequently it may +make sense to disable eager loading: [source,js] -------------------------------------------------- @@ -136,7 +142,7 @@ PUT my_index "my_child": { "_parent": { "type": "my_parent", - "eager_global_ordinals": true + "eager_global_ordinals": false } } } diff --git a/docs/reference/mapping/params/similarity.asciidoc b/docs/reference/mapping/params/similarity.asciidoc index 66b1d8a42cf31..0a5979c9d3272 100644 --- a/docs/reference/mapping/params/similarity.asciidoc +++ b/docs/reference/mapping/params/similarity.asciidoc @@ -3,7 +3,7 @@ Elasticsearch allows you to configure a scoring algorithm or _similarity_ per field. The `similarity` setting provides a simple way of choosing a similarity -algorithm other than the default TF/IDF, such as `BM25`. +algorithm other than the default `BM25`, such as `TF/IDF`. Similarities are mostly useful for <> fields, but can also apply to other field types. @@ -25,6 +25,11 @@ configuration are: Lucene. See {defguide}/practical-scoring-function.html[Lucene’s Practical Scoring Function] for more information. +`boolean`:: + A simple boolean similarity, which is used when full-text ranking is not needed + and the score should only be based on whether the query terms match or not. + Boolean similarity gives terms a score equal to their query boost. + The `similarity` can be set on the field level when a field is first created, as follows: @@ -42,6 +47,10 @@ PUT my_index "classic_field": { "type": "text", "similarity": "classic" <2> + }, + "boolean_sim_field": { + "type": "text", + "similarity": "boolean" <3> } } } @@ -51,3 +60,4 @@ PUT my_index // CONSOLE <1> The `default_field` uses the `BM25` similarity. <2> The `classic_field` uses the `classic` similarity (ie TF/IDF). +<3> The `boolean_sim_field` uses the `boolean` similarity. diff --git a/docs/reference/mapping/types/geo-point.asciidoc b/docs/reference/mapping/types/geo-point.asciidoc index a839117c7c5b0..ccfc93e34c58e 100644 --- a/docs/reference/mapping/types/geo-point.asciidoc +++ b/docs/reference/mapping/types/geo-point.asciidoc @@ -111,20 +111,17 @@ When accessing the value of a geo-point in a script, the value is returned as a `GeoPoint` object, which allows access to the `.lat` and `.lon` values respectively: - -[source,js] +[source,painless] -------------------------------------------------- -geopoint = doc['location'].value; -lat = geopoint.lat; -lon = geopoint.lon; +def geopoint = doc['location'].value; +def lat = geopoint.lat; +def lon = geopoint.lon; -------------------------------------------------- For performance reasons, it is better to access the lat/lon values directly: -[source,js] +[source,painless] -------------------------------------------------- -lat = doc['location'].lat; -lon = doc['location'].lon; +def lat = doc['location'].lat; +def lon = doc['location'].lon; -------------------------------------------------- - - diff --git a/docs/reference/mapping/types/geo-shape.asciidoc b/docs/reference/mapping/types/geo-shape.asciidoc index 4fe185fe463e6..18ffdbcbc6363 100644 --- a/docs/reference/mapping/types/geo-shape.asciidoc +++ b/docs/reference/mapping/types/geo-shape.asciidoc @@ -156,16 +156,23 @@ cell right next to it -- even though the shape is very close to the point. [source,js] -------------------------------------------------- +PUT /example { - "properties": { - "location": { - "type": "geo_shape", - "tree": "quadtree", - "precision": "1m" + "mappings": { + "doc": { + "properties": { + "location": { + "type": "geo_shape", + "tree": "quadtree", + "precision": "1m" + } + } } } } -------------------------------------------------- +// CONSOLE +// TESTSETUP This mapping maps the location field to the geo_shape type using the quad_tree implementation and a precision of 1m. Elasticsearch translates @@ -240,6 +247,7 @@ API. [source,js] -------------------------------------------------- +POST /example/doc { "location" : { "type" : "point", @@ -247,6 +255,7 @@ API. } } -------------------------------------------------- +// CONSOLE [float] ===== http://geojson.org/geojson-spec.html#id3[LineString] @@ -257,6 +266,7 @@ line. Specifying more than two points creates an arbitrary path. [source,js] -------------------------------------------------- +POST /example/doc { "location" : { "type" : "linestring", @@ -264,6 +274,7 @@ line. Specifying more than two points creates an arbitrary path. } } -------------------------------------------------- +// CONSOLE The above `linestring` would draw a straight line starting at the White House to the US Capitol Building. @@ -277,6 +288,7 @@ closed). [source,js] -------------------------------------------------- +POST /example/doc { "location" : { "type" : "polygon", @@ -286,12 +298,14 @@ closed). } } -------------------------------------------------- +// CONSOLE The first array represents the outer boundary of the polygon, the other arrays represent the interior shapes ("holes"): [source,js] -------------------------------------------------- +POST /example/doc { "location" : { "type" : "polygon", @@ -302,6 +316,8 @@ arrays represent the interior shapes ("holes"): } } -------------------------------------------------- +// CONSOLE +// TEST[skip:https://github.com/elastic/elasticsearch/issues/23836] *IMPORTANT NOTE:* GeoJSON does not mandate a specific order for vertices thus ambiguous polygons around the dateline and poles are possible. To alleviate ambiguity @@ -322,6 +338,7 @@ OGC standards to eliminate ambiguity resulting in a polygon that crosses the dat [source,js] -------------------------------------------------- +POST /example/doc { "location" : { "type" : "polygon", @@ -332,6 +349,8 @@ OGC standards to eliminate ambiguity resulting in a polygon that crosses the dat } } -------------------------------------------------- +// CONSOLE +// TEST[skip:https://github.com/elastic/elasticsearch/issues/23836] An `orientation` parameter can be defined when setting the geo_shape mapping (see <>). This will define vertex order for the coordinate list on the mapped geo_shape field. It can also be overridden on each document. The following is an example for @@ -339,6 +358,7 @@ overriding the orientation on a document: [source,js] -------------------------------------------------- +POST /example/doc { "location" : { "type" : "polygon", @@ -350,6 +370,8 @@ overriding the orientation on a document: } } -------------------------------------------------- +// CONSOLE +// TEST[skip:https://github.com/elastic/elasticsearch/issues/23836] [float] ===== http://www.geojson.org/geojson-spec.html#id5[MultiPoint] @@ -358,6 +380,7 @@ A list of geojson points. [source,js] -------------------------------------------------- +POST /example/doc { "location" : { "type" : "multipoint", @@ -367,6 +390,7 @@ A list of geojson points. } } -------------------------------------------------- +// CONSOLE [float] ===== http://www.geojson.org/geojson-spec.html#id6[MultiLineString] @@ -375,6 +399,7 @@ A list of geojson linestrings. [source,js] -------------------------------------------------- +POST /example/doc { "location" : { "type" : "multilinestring", @@ -386,6 +411,8 @@ A list of geojson linestrings. } } -------------------------------------------------- +// CONSOLE +// TEST[skip:https://github.com/elastic/elasticsearch/issues/23836] [float] ===== http://www.geojson.org/geojson-spec.html#id7[MultiPolygon] @@ -394,18 +421,20 @@ A list of geojson polygons. [source,js] -------------------------------------------------- +POST /example/doc { "location" : { "type" : "multipolygon", "coordinates" : [ [ [[102.0, 2.0], [103.0, 2.0], [103.0, 3.0], [102.0, 3.0], [102.0, 2.0]] ], - [ [[100.0, 0.0], [101.0, 0.0], [101.0, 1.0], [100.0, 1.0], [100.0, 0.0]], [[100.2, 0.2], [100.8, 0.2], [100.8, 0.8], [100.2, 0.8], [100.2, 0.2]] ] ] } } -------------------------------------------------- +// CONSOLE +// TEST[skip:https://github.com/elastic/elasticsearch/issues/23836] [float] ===== http://geojson.org/geojson-spec.html#geometrycollection[Geometry Collection] @@ -414,6 +443,7 @@ A collection of geojson geometry objects. [source,js] -------------------------------------------------- +POST /example/doc { "location" : { "type": "geometrycollection", @@ -430,7 +460,8 @@ A collection of geojson geometry objects. } } -------------------------------------------------- - +// CONSOLE +// TEST[skip:https://github.com/elastic/elasticsearch/issues/23836] [float] ===== Envelope @@ -441,6 +472,7 @@ bounding rectangle: [source,js] -------------------------------------------------- +POST /example/doc { "location" : { "type" : "envelope", @@ -448,6 +480,8 @@ bounding rectangle: } } -------------------------------------------------- +// CONSOLE +// TEST[skip:https://github.com/elastic/elasticsearch/issues/23836] [float] ===== Circle @@ -457,6 +491,7 @@ point with a radius: [source,js] -------------------------------------------------- +POST /example/doc { "location" : { "type" : "circle", @@ -465,6 +500,7 @@ point with a radius: } } -------------------------------------------------- +// CONSOLE Note: The inner `radius` field is required. If not specified, then the units of the `radius` will default to `METERS`. diff --git a/docs/reference/migration/migrate_6_0/java.asciidoc b/docs/reference/migration/migrate_6_0/java.asciidoc index e5f251c1c4357..991fe165fb231 100644 --- a/docs/reference/migration/migrate_6_0/java.asciidoc +++ b/docs/reference/migration/migrate_6_0/java.asciidoc @@ -7,3 +7,9 @@ Previously the `setSource` methods and other methods that accepted byte/string r an object source did not require the XContentType to be specified. The auto-detection of the content type is no longer used, so these methods now require the XContentType as an additional argument when providing the source in bytes or as a string. + +=== `DeleteByQueryRequest` requires an explicitly set query + +In previous versions of Elasticsearch, delete by query requests without an explicit query +were accepted, match_all was used as the default query and all documents were deleted +as a result. From version 6.0.0, a `DeleteByQueryRequest` requires an explicit query be set. diff --git a/docs/reference/migration/migrate_6_0/plugins.asciidoc b/docs/reference/migration/migrate_6_0/plugins.asciidoc index be650a71bd0eb..d2032d683b552 100644 --- a/docs/reference/migration/migrate_6_0/plugins.asciidoc +++ b/docs/reference/migration/migrate_6_0/plugins.asciidoc @@ -25,3 +25,11 @@ the region of the configured bucket. * The container an azure repository is configured with will no longer be created automatically. It must exist before the azure repository is created. + +* Global repositories settings you are able to set in elasticsearch config file under `repositories.azure` +name space have been removed. This includes `repositories.azure.account`, `repositories.azure.container`, +`repositories.azure.base_path`, `repositories.azure.location_mode`, `repositories.azure.chunk_size` and +`repositories.azure.compress`. +You must set those settings per repository instead. Respectively `account`, `container`, `base_path`, +`location_mode`, `chunk_size` and `compress`. +See {plugins}/repository-azure-usage.html#repository-azure-repository-settings[Azure Repository settings]. diff --git a/docs/reference/migration/migrate_6_0/rest.asciidoc b/docs/reference/migration/migrate_6_0/rest.asciidoc index 934d2c2e6477f..5ef09a15bff11 100644 --- a/docs/reference/migration/migrate_6_0/rest.asciidoc +++ b/docs/reference/migration/migrate_6_0/rest.asciidoc @@ -47,3 +47,9 @@ requests. Refresh requests that are broadcast to multiple shards that can have one or more shards fail during the request now return a 500 response instead of a 200 response in the event there is at least one failure. + +=== Delete by Query API requires an explicit query + +In previous versions of Elasticsearch, delete by query requests without an explicit query +were accepted, match_all was used as the default query and all documents were deleted +as a result. From version 6.0.0, delete by query requests require an explicit query. diff --git a/docs/reference/modules/indices/request_cache.asciidoc b/docs/reference/modules/indices/request_cache.asciidoc index 22c203b48650a..e3896f718d91a 100644 --- a/docs/reference/modules/indices/request_cache.asciidoc +++ b/docs/reference/modules/indices/request_cache.asciidoc @@ -42,7 +42,7 @@ The cache can be expired manually with the < node.data: false <2> node.ingest: true <3> +search.remote.connect: false <4> ------------------- <1> Disable the `node.master` role (enabled by default). <2> Disable the `node.data` role (enabled by default). <3> The `node.ingest` role is enabled by default. +<4> Disable cross-cluster search (enabled by default). [float] [[coordinating-only-node]] @@ -235,17 +237,19 @@ acknowledgement of cluster state updates from every node! The benefit of coordinating only nodes should not be overstated -- data nodes can happily serve the same purpose. -To create a coordinating only node, set: +To create a dedicated coordinating node, set: [source,yaml] ------------------- node.master: false <1> node.data: false <2> node.ingest: false <3> +search.remote.connect: false <4> ------------------- <1> Disable the `node.master` role (enabled by default). <2> Disable the `node.data` role (enabled by default). <3> Disable the `node.ingest` role (enabled by default). +<4> Disable cross-cluster search (enabled by default). [float] == Node data path settings diff --git a/docs/reference/modules/scripting/using.asciidoc b/docs/reference/modules/scripting/using.asciidoc index 7c89201c7fc95..0a64758f5aa2a 100644 --- a/docs/reference/modules/scripting/using.asciidoc +++ b/docs/reference/modules/scripting/using.asciidoc @@ -12,6 +12,7 @@ the same pattern: "params": { ... } <3> } ------------------------------------- +// NOTCONSOLE <1> The language the script is written in, which defaults to `painless`. <2> The script itself which may be specified as `inline`, `stored`, or `file`. <3> Any named parameters that should be passed into the script. @@ -89,6 +90,7 @@ multipliers, don't hard-code the multiplier into the script: ---------------------- "inline": "doc['my_field'] * 2" ---------------------- +// NOTCONSOLE Instead, pass it in as a named parameter: @@ -99,6 +101,7 @@ Instead, pass it in as a named parameter: "multiplier": 2 } ---------------------- +// NOTCONSOLE The first version has to be recompiled every time the multiplier changes. The second version is only compiled once. @@ -134,7 +137,7 @@ the following example creates a Groovy script called `calculate-score`: [source,sh] -------------------------------------------------- -cat "Math.log(_score * 2) + my_modifier" > config/scripts/calculate-score.painless +cat "Math.log(_score * 2) + params.my_modifier" > config/scripts/calculate_score.painless -------------------------------------------------- This script can be used as follows: @@ -147,7 +150,7 @@ GET my_index/_search "script": { "script": { "lang": "painless", <1> - "file": "calculate-score", <2> + "file": "calculate_score", <2> "params": { "my_modifier": 2 } @@ -156,6 +159,8 @@ GET my_index/_search } } -------------------------------------------------- +// CONSOLE +// TEST[continued] <1> The language of the script, which should correspond with the script file suffix. <2> The name of the script, which should be the name of the file. @@ -206,16 +211,10 @@ delete and put requests. ==== Request Examples -The following are examples of stored script requests: - -[source,js] ------------------------------------ -/_scripts/{id} <1> ------------------------------------ -<1> The `id` is a unique identifier for the stored script. +The following are examples of using a stored script that lives at +`/_scripts/{id}`. -This example stores a Painless script called `calculate-score` in the cluster -state: +First, create the script called `calculate-score` in the cluster state: [source,js] ----------------------------------- diff --git a/docs/reference/modules/snapshots.asciidoc b/docs/reference/modules/snapshots.asciidoc index 09f7008a6fbd3..94138dbdb0f34 100644 --- a/docs/reference/modules/snapshots.asciidoc +++ b/docs/reference/modules/snapshots.asciidoc @@ -16,6 +16,19 @@ a 2.x cluster and use <> to rebuild the index in a 5.x cluster. This is as time consuming as restoring from archival copies of the original data. +Note: If a repository is connected to a 2.x cluster, and you want to connect +a 5.x cluster to the same repository, you will have to either first set the 2.x +repository to `readonly` mode (see below for details on `readonly` mode) or create +the 5.x repository in `readonly` mode. A 5.x cluster will update the repository +to conform to 5.x specific formats, which will mean that any new snapshots written +via the 2.x cluster will not be visible to the 5.x cluster, and vice versa. +In fact, as a general rule, only one cluster should connect to the same repository +location with write access; all other clusters connected to the same repository +should be set to `readonly` mode. While setting all but one repositories to +`readonly` should work with multiple clusters differing by one major version, +it is not a supported configuration. + + [float] === Repositories @@ -287,6 +300,35 @@ GET /_snapshot/my_backup/snapshot_1 // CONSOLE // TEST[continued] +This command returns basic information about the snapshot including start and end time, version of +elasticsearch that created the snapshot, the list of included indices, the current state of the +snapshot and the list of failures that occurred during the snapshot. The snapshot `state` can be + +[horizontal] +`IN_PROGRESS`:: + + The snapshot is currently running. + +`SUCCESS`:: + + The snapshot finished and all shards were stored successfully. + +`FAILED`:: + + The snapshot finished with an error and failed to store any data. + +`PARTIAL`:: + + The global cluster state was stored, but data of at least one shard wasn't stored successfully. + The `failure` section in this case should contain more detailed information about shards + that were not processed correctly. + +`INCOMPATIBLE`:: + + The snapshot was created with an old version of elasticsearch and therefore is incompatible with + the current version of the cluster. + + Similar as for repositories, information about multiple snapshots can be queried in one go, supporting wildcards as well: [source,sh] diff --git a/docs/reference/query-dsl/exists-query.asciidoc b/docs/reference/query-dsl/exists-query.asciidoc index 4971219366f94..f0e76852da1e3 100644 --- a/docs/reference/query-dsl/exists-query.asciidoc +++ b/docs/reference/query-dsl/exists-query.asciidoc @@ -24,6 +24,7 @@ For instance, these documents would all match the above query: { "user": ["jane"] } { "user": ["jane", null ] } <3> -------------------------------------------------- +// NOTCONSOLE <1> An empty string is a non-`null` value. <2> Even though the `standard` analyzer would emit zero tokens, the original field is non-`null`. <3> At least one non-`null` value is required. @@ -37,6 +38,7 @@ These documents would *not* match the above query: { "user": [null] } <2> { "foo": "bar" } <3> -------------------------------------------------- +// NOTCONSOLE <1> This field has no values. <2> At least one non-`null` value is required. <3> The `user` field is missing completely. @@ -50,11 +52,21 @@ instance, if the `user` field were mapped as follows: [source,js] -------------------------------------------------- - "user": { - "type": "text", - "null_value": "_null_" +PUT /example +{ + "mappings": { + "doc": { + "properties": { + "user": { + "type": "keyword", + "null_value": "_null_" + } + } + } } +} -------------------------------------------------- +// CONSOLE then explicit `null` values would be indexed as the string `_null_`, and the following docs would match the `exists` filter: @@ -64,6 +76,7 @@ following docs would match the `exists` filter: { "user": null } { "user": [null] } -------------------------------------------------- +// NOTCONSOLE However, these docs--without explicit `null` values--would still have no values in the `user` field and thus would not match the `exists` filter: @@ -73,11 +86,12 @@ no values in the `user` field and thus would not match the `exists` filter: { "user": [] } { "foo": "bar" } -------------------------------------------------- +// NOTCONSOLE ==== `missing` query -'missing' query has been removed because it can be advantageously replaced by an `exists` query inside a must_not -clause as follows: +There isn't a `missing` query. Instead use the `exists` query inside a +`must_not` clause as follows: [source,js] -------------------------------------------------- @@ -97,4 +111,3 @@ GET /_search // CONSOLE This query returns documents that have no value in the user field. - diff --git a/docs/reference/query-dsl/geo-shape-query.asciidoc b/docs/reference/query-dsl/geo-shape-query.asciidoc index 1220306133670..00fd3b5609b54 100644 --- a/docs/reference/query-dsl/geo-shape-query.asciidoc +++ b/docs/reference/query-dsl/geo-shape-query.asciidoc @@ -6,7 +6,7 @@ Filter documents indexed using the `geo_shape` type. Requires the <>. The `geo_shape` query uses the same grid square representation as the -geo_shape mapping to find documents that have a shape that intersects +`geo_shape` mapping to find documents that have a shape that intersects with the query shape. It will also use the same PrefixTree configuration as defined for the field mapping. @@ -17,28 +17,44 @@ examples. ==== Inline Shape Definition -Similar to the `geo_shape` type, the `geo_shape` Filter uses +Similar to the `geo_shape` type, the `geo_shape` query uses http://www.geojson.org[GeoJSON] to represent shapes. -Given a document that looks like this: +Given the following index: [source,js] -------------------------------------------------- +PUT /example { - "name": "Wind & Wetter, Berlin, Germany", - "location": { - "type": "Point", - "coordinates": [13.400544, 52.530286] + "mappings": { + "doc": { + "properties": { + "location": { + "type": "geo_shape" + } + } } + } +} + +POST /example/doc?refresh +{ + "name": "Wind & Wetter, Berlin, Germany", + "location": { + "type": "point", + "coordinates": [13.400544, 52.530286] + } } -------------------------------------------------- +// CONSOLE +// TESTSETUP The following query will find the point using the Elasticsearch's `envelope` GeoJSON extension: [source,js] -------------------------------------------------- -GET /_search +GET /example/_search { "query":{ "bool": { @@ -83,25 +99,43 @@ shape: [source,js] -------------------------------------------------- -GET /_search +PUT /shapes +{ + "mappings": { + "doc": { + "properties": { + "location": { + "type": "geo_shape" + } + } + } + } +} + +PUT /shapes/doc/deu +{ + "location": { + "type": "envelope", + "coordinates" : [[13.0, 53.0], [14.0, 52.0]] + } +} + +GET /example/_search { "query": { "bool": { - "must": { - "match_all": {} - }, - "filter": { - "geo_shape": { - "location": { - "indexed_shape": { - "id": "DEU", - "type": "countries", - "index": "shapes", - "path": "location" - } + "filter": { + "geo_shape": { + "location": { + "indexed_shape": { + "index": "shapes", + "type": "doc", + "id": "deu", + "path": "location" } } } + } } } } diff --git a/docs/reference/search/field-caps.asciidoc b/docs/reference/search/field-caps.asciidoc new file mode 100644 index 0000000000000..d327362f81c7b --- /dev/null +++ b/docs/reference/search/field-caps.asciidoc @@ -0,0 +1,126 @@ +[[search-field-caps]] +== Field Capabilities API + +experimental[] + +The field capabilities API allows to retrieve the capabilities of fields among multiple indices. + +The field capabilities api by default executes on all indices: + +[source,js] +-------------------------------------------------- +GET _field_caps?fields=rating +-------------------------------------------------- +// CONSOLE + +... but the request can also be restricted to specific indices: + +[source,js] +-------------------------------------------------- +GET twitter/_field_caps?fields=rating +-------------------------------------------------- +// CONSOLE +// TEST[setup:twitter] + +Alternatively the `fields` option can also be defined in the request body: + +[source,js] +-------------------------------------------------- +POST _field_caps +{ + "fields" : ["rating"] +} +-------------------------------------------------- +// CONSOLE + +This is equivalent to the previous request. + +Supported request options: + +[horizontal] +`fields`:: A list of fields to compute stats for. The field name supports wildcard notation. For example, using `text_*` + will cause all fields that match the expression to be returned. + +[float] +=== Field Capabilities + +The field capabilities api returns the following information per field: + +[horizontal] +`is_searchable`:: + +Whether this field is indexed for search on all indices. + +`is_aggregatable`:: + +Whether this field can be aggregated on all indices. + +`indices`:: + +The list of indices where this field has the same type, +or null if all indices have the same type for the field. + +`non_searchable_indices`:: + +The list of indices where this field is not searchable, +or null if all indices have the same definition for the field. + +`non_aggregatable_indices`:: + +The list of indices where this field is not aggregatable, +or null if all indices have the same definition for the field. + + +[float] +=== Response format + +Request: + +[source,js] +-------------------------------------------------- +GET _field_caps?fields=rating,title +-------------------------------------------------- +// CONSOLE + +[source,js] +-------------------------------------------------- +{ + "fields": { + "rating": { <1> + "long": { + "is_searchable": true, + "is_aggregatable": false, + "indices": ["index1", "index2"], + "non_aggregatable_indices": ["index1"] <2> + }, + "keyword": { + "is_searchable": false, + "is_aggregatable": true, + "indices": ["index3", "index4"], + "non_searchable_indices": ["index4"] <3> + } + }, + "title": { <4> + "text": { + "is_searchable": true, + "is_aggregatable": false + + } + } + } +} +-------------------------------------------------- +// NOTCONSOLE + +<1> The field `rating` is defined as a long in `index1` and `index2` +and as a `keyword` in `index3` and `index4`. +<2> The field `rating` is not aggregatable in `index1`. +<3> The field `rating` is not searchable in `index4`. +<4> The field `title` is defined as `text` in all indices. + + + + + + + diff --git a/docs/reference/search/request/preference.asciidoc b/docs/reference/search/request/preference.asciidoc index ea9f4fea8751b..d0f60d700a82c 100644 --- a/docs/reference/search/request/preference.asciidoc +++ b/docs/reference/search/request/preference.asciidoc @@ -1,9 +1,8 @@ [[search-request-preference]] === Preference -Controls a `preference` of which shard replicas to execute the search -request on. By default, the operation is randomized between the shard -replicas. +Controls a `preference` of which shard copies on which to execute the +search. By default, the operation is randomized among the available shard copies. The `preference` is a query string parameter which can be set to: diff --git a/docs/reference/search/validate.asciidoc b/docs/reference/search/validate.asciidoc index 5c98f3f7f2d55..2b0ce48152ef3 100644 --- a/docs/reference/search/validate.asciidoc +++ b/docs/reference/search/validate.asciidoc @@ -146,24 +146,24 @@ When the query is valid, the explanation defaults to the string representation of that query. With `rewrite` set to `true`, the explanation is more detailed showing the actual Lucene query that will be executed. -For Fuzzy Queries: +For More Like This: [source,js] -------------------------------------------------- GET twitter/tweet/_validate/query?rewrite=true { "query": { - "match": { - "user": { - "query": "kimchy", - "fuzziness": "auto" - } + "more_like_this": { + "like": { + "_id": "2" + }, + "boost_terms": 1 } } } -------------------------------------------------- // CONSOLE -// TEST[skip:https://github.com/elastic/elasticsearch/issues/18254] +// TEST[skip:the output is randomized depending on which shard we hit] Response: @@ -180,54 +180,80 @@ Response: { "index": "twitter", "valid": true, - "explanation": "+user:kimchy +user:kimchi^0.75 #(ConstantScore(_type:tweet))^0.0" + "explanation": "((user:terminator^3.71334 plot:future^2.763601 plot:human^2.8415773 plot:sarah^3.4193945 plot:kyle^3.8244398 plot:cyborg^3.9177752 plot:connor^4.040236 plot:reese^4.7133346 ... )~6) -ConstantScore(_uid:tweet#2)) #(ConstantScore(_type:tweet))^0.0" } ] } -------------------------------------------------- // TESTRESPONSE -For More Like This: +By default, the request is executed on a single shard only, which is randomly +selected. The detailed explanation of the query may depend on which shard is +being hit, and therefore may vary from one request to another. So, in case of +query rewrite the `all_shards` parameter should be used to get response from +all available shards. + +For Fuzzy Queries: [source,js] -------------------------------------------------- -GET twitter/tweet/_validate/query?rewrite=true +GET twitter/tweet/_validate/query?rewrite=true&all_shards=true { "query": { - "more_like_this": { - "like": { - "_id": "2" - }, - "boost_terms": 1 + "match": { + "user": { + "query": "kimchy", + "fuzziness": "auto" + } } } } -------------------------------------------------- // CONSOLE -// TEST[skip:https://github.com/elastic/elasticsearch/issues/18254] Response: [source,js] -------------------------------------------------- { - "valid": true, - "_shards": { - "total": 1, - "successful": 1, - "failed": 0 - }, - "explanations": [ - { - "index": "twitter", - "valid": true, - "explanation": "((user:terminator^3.71334 plot:future^2.763601 plot:human^2.8415773 plot:sarah^3.4193945 plot:kyle^3.8244398 plot:cyborg^3.9177752 plot:connor^4.040236 plot:reese^4.7133346 ... )~6) -ConstantScore(_uid:tweet#2)) #(ConstantScore(_type:tweet))^0.0" - } - ] + "valid": true, + "_shards": { + "total": 5, + "successful": 5, + "failed": 0 + }, + "explanations": [ + { + "index": "twitter", + "shard": 0, + "valid": true, + "explanation": "+MatchNoDocsQuery(\"empty BooleanQuery\") #ConstantScore(MatchNoDocsQuery(\"empty BooleanQuery\"))" + }, + { + "index": "twitter", + "shard": 1, + "valid": true, + "explanation": "+MatchNoDocsQuery(\"empty BooleanQuery\") #ConstantScore(MatchNoDocsQuery(\"empty BooleanQuery\"))" + }, + { + "index": "twitter", + "shard": 2, + "valid": true, + "explanation": "(user:kimchi)^0.8333333" + }, + { + "index": "twitter", + "shard": 3, + "valid": true, + "explanation": "user:kimchy" + }, + { + "index": "twitter", + "shard": 4, + "valid": true, + "explanation": "+MatchNoDocsQuery(\"empty BooleanQuery\") #ConstantScore(MatchNoDocsQuery(\"empty BooleanQuery\"))" + } + ] } -------------------------------------------------- // TESTRESPONSE - -CAUTION: The request is executed on a single shard only, which is randomly -selected. The detailed explanation of the query may depend on which shard is -being hit, and therefore may vary from one request to another. diff --git a/docs/reference/setup/bootstrap-checks.asciidoc b/docs/reference/setup/bootstrap-checks.asciidoc index 22c3f03cd3633..2d18911beb67e 100644 --- a/docs/reference/setup/bootstrap-checks.asciidoc +++ b/docs/reference/setup/bootstrap-checks.asciidoc @@ -179,6 +179,13 @@ use the JVM flag `ExitOnOutOfMemoryError`. While this does not have the full capabilities of `OnError` nor `OnOutOfMemoryError`, arbitrary forking will not be supported with seccomp enabled. +=== Early-access check + +The OpenJDK project provides early-access snapshots of upcoming releases. These +releases are not suitable for production. The early-access check detects these +early-access snapshots. To pass this check, you must start Elasticsearch on a +release build of the JVM. + === G1GC check Early versions of the HotSpot JVM that shipped with JDK 8 are known to have diff --git a/docs/reference/setup/install/deb.asciidoc b/docs/reference/setup/install/deb.asciidoc index ca1219761450c..c73ed327e908c 100644 --- a/docs/reference/setup/install/deb.asciidoc +++ b/docs/reference/setup/install/deb.asciidoc @@ -114,7 +114,7 @@ endif::[] ifeval::["{release-state}"!="unreleased"] -The Debian package for Elastisearch v{version} can be downloaded from the website and installed as follows: +The Debian package for Elasticsearch v{version} can be downloaded from the website and installed as follows: ["source","sh",subs="attributes"] -------------------------------------------- diff --git a/docs/reference/setup/install/rpm.asciidoc b/docs/reference/setup/install/rpm.asciidoc index b737fbff12746..269eb6720e60e 100644 --- a/docs/reference/setup/install/rpm.asciidoc +++ b/docs/reference/setup/install/rpm.asciidoc @@ -99,7 +99,7 @@ endif::[] ifeval::["{release-state}"!="unreleased"] -The RPM for Elastisearch v{version} can be downloaded from the website and installed as follows: +The RPM for Elasticsearch v{version} can be downloaded from the website and installed as follows: ["source","sh",subs="attributes"] -------------------------------------------- diff --git a/docs/reference/setup/sysconfig/swap.asciidoc b/docs/reference/setup/sysconfig/swap.asciidoc index 19b6f751ee7c5..78ca7d40beeee 100644 --- a/docs/reference/setup/sysconfig/swap.asciidoc +++ b/docs/reference/setup/sysconfig/swap.asciidoc @@ -33,7 +33,7 @@ After starting Elasticsearch, you can see whether this setting was applied successfully by checking the value of `mlockall` in the output from this request: -[source,sh] +[source,js] -------------- GET _nodes?filter_path=**.mlockall -------------- diff --git a/docs/src/test/cluster/config/analysis/example_word_list.txt b/docs/src/test/cluster/config/analysis/example_word_list.txt new file mode 100644 index 0000000000000..f79aea42af203 --- /dev/null +++ b/docs/src/test/cluster/config/analysis/example_word_list.txt @@ -0,0 +1,4 @@ +test +list +of +words diff --git a/docs/src/test/cluster/config/analysis/hyphenation_patterns.xml b/docs/src/test/cluster/config/analysis/hyphenation_patterns.xml new file mode 100644 index 0000000000000..6241b3fc6cc80 --- /dev/null +++ b/docs/src/test/cluster/config/analysis/hyphenation_patterns.xml @@ -0,0 +1,21 @@ + + + + + + + + + + + +aA + + + + + + +.a2 + + diff --git a/docs/src/test/cluster/config/analysis/stemmer_override.txt b/docs/src/test/cluster/config/analysis/stemmer_override.txt new file mode 100644 index 0000000000000..6f6cd771cf572 --- /dev/null +++ b/docs/src/test/cluster/config/analysis/stemmer_override.txt @@ -0,0 +1,3 @@ +running => run + +stemmer => stemmer diff --git a/docs/src/test/cluster/config/analysis/synonym.txt b/docs/src/test/cluster/config/analysis/synonym.txt new file mode 100644 index 0000000000000..b51d975d8f5f2 --- /dev/null +++ b/docs/src/test/cluster/config/analysis/synonym.txt @@ -0,0 +1,31 @@ +# Blank lines and lines starting with pound are comments. + +# Explicit mappings match any token sequence on the LHS of "=>" +# and replace with all alternatives on the RHS. These types of mappings +# ignore the expand parameter in the schema. +# Examples: +i-pod, i pod => ipod, +sea biscuit, sea biscit => seabiscuit + +# Equivalent synonyms may be separated with commas and give +# no explicit mapping. In this case the mapping behavior will +# be taken from the expand parameter in the schema. This allows +# the same synonym file to be used in different synonym handling strategies. +# Examples: +ipod, i-pod, i pod +foozball , foosball +universe , cosmos +lol, laughing out loud + +# If expand==true, "ipod, i-pod, i pod" is equivalent +# to the explicit mapping: +ipod, i-pod, i pod => ipod, i-pod, i pod +# If expand==false, "ipod, i-pod, i pod" is equivalent +# to the explicit mapping: +ipod, i-pod, i pod => ipod + +# Multiple synonym mapping entries are merged. +foo => foo bar +foo => baz +# is equivalent to +foo => foo bar, baz diff --git a/docs/src/test/cluster/config/scripts/calculate_score.painless b/docs/src/test/cluster/config/scripts/calculate_score.painless new file mode 100644 index 0000000000000..0fad3fc59f950 --- /dev/null +++ b/docs/src/test/cluster/config/scripts/calculate_score.painless @@ -0,0 +1 @@ +Math.log(_score * 2) + params.my_modifier diff --git a/modules/lang-expression/licenses/lucene-expressions-6.5.0-snapshot-d00c5ca.jar.sha1 b/modules/lang-expression/licenses/lucene-expressions-6.5.0-snapshot-d00c5ca.jar.sha1 deleted file mode 100644 index f2b1d1ddfef20..0000000000000 --- a/modules/lang-expression/licenses/lucene-expressions-6.5.0-snapshot-d00c5ca.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -eb201cc666e834f5f128cea00acdf2c046fcbb87 \ No newline at end of file diff --git a/modules/lang-expression/licenses/lucene-expressions-6.5.0.jar.sha1 b/modules/lang-expression/licenses/lucene-expressions-6.5.0.jar.sha1 new file mode 100644 index 0000000000000..10514bb3d1f7e --- /dev/null +++ b/modules/lang-expression/licenses/lucene-expressions-6.5.0.jar.sha1 @@ -0,0 +1 @@ +5dfd44932fc77187a233a1cbf228c1a96ac8924f \ No newline at end of file diff --git a/modules/lang-painless/ant.xml b/modules/lang-painless/ant.xml deleted file mode 100644 index 90e66b7b1a9b9..0000000000000 --- a/modules/lang-painless/ant.xml +++ /dev/null @@ -1,157 +0,0 @@ - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - diff --git a/modules/lang-painless/build.gradle b/modules/lang-painless/build.gradle index c9e013d8c6d7f..31b41261b3a77 100644 --- a/modules/lang-painless/build.gradle +++ b/modules/lang-painless/build.gradle @@ -33,20 +33,6 @@ dependencyLicenses { mapping from: /asm-.*/, to: 'asm' } -// regeneration logic, comes in via ant right now -// don't port it to gradle, it works fine. - -configurations { - regenerate -} - -dependencies { - regenerate 'org.antlr:antlr4:4.5.1-1' -} - -ant.references['regenerate.classpath'] = new Path(ant.project, configurations.regenerate.asPath) -ant.importBuild 'ant.xml' - integTestCluster { setting 'script.max_compilations_per_minute', '1000' } @@ -70,3 +56,95 @@ task generatePainlessApi(type: JavaExec) { classpath = sourceSets.test.runtimeClasspath args file('../../docs/reference/painless-api-reference') } + +/********************************************** + * Parser regeneration * + **********************************************/ + +configurations { + regenerate +} + +dependencies { + regenerate 'org.antlr:antlr4:4.5.1-1' +} + +String grammarPath = 'src/main/antlr' +String outputPath = 'src/main/java/org/elasticsearch/painless/antlr' + +task cleanGenerated(type: Delete) { + delete fileTree(grammarPath) { + include '*.tokens' + } + delete fileTree(outputPath) { + include 'Painless*.java' + } +} + +task regenLexer(type: JavaExec) { + dependsOn cleanGenerated + main = 'org.antlr.v4.Tool' + classpath = configurations.regenerate + systemProperty 'file.encoding', 'UTF-8' + systemProperty 'user.language', 'en' + systemProperty 'user.country', 'US' + systemProperty 'user.variant', '' + args '-Werror', + '-package', 'org.elasticsearch.painless.antlr', + '-o', outputPath, + "${file(grammarPath)}/PainlessLexer.g4" +} + +task regenParser(type: JavaExec) { + dependsOn regenLexer + main = 'org.antlr.v4.Tool' + classpath = configurations.regenerate + systemProperty 'file.encoding', 'UTF-8' + systemProperty 'user.language', 'en' + systemProperty 'user.country', 'US' + systemProperty 'user.variant', '' + args '-Werror', + '-package', 'org.elasticsearch.painless.antlr', + '-no-listener', + '-visitor', + // '-Xlog', + '-o', outputPath, + "${file(grammarPath)}/PainlessParser.g4" +} + +task regen { + dependsOn regenParser + doLast { + // moves token files to grammar directory for use with IDE's + ant.move(file: "${outputPath}/PainlessLexer.tokens", toDir: grammarPath) + ant.move(file: "${outputPath}/PainlessParser.tokens", toDir: grammarPath) + // make the generated classes package private + ant.replaceregexp(match: 'public ((interface|class) \\QPainless\\E\\w+)', + replace: '\\1', + encoding: 'UTF-8') { + fileset(dir: outputPath, includes: 'Painless*.java') + } + // make the lexer abstract + ant.replaceregexp(match: '(class \\QPainless\\ELexer)', + replace: 'abstract \\1', + encoding: 'UTF-8') { + fileset(dir: outputPath, includes: 'PainlessLexer.java') + } + // nuke timestamps/filenames in generated files + ant.replaceregexp(match: '\\Q// Generated from \\E.*', + replace: '\\/\\/ ANTLR GENERATED CODE: DO NOT EDIT', + encoding: 'UTF-8') { + fileset(dir: outputPath, includes: 'Painless*.java') + } + // remove tabs in antlr generated files + ant.replaceregexp(match: '\t', flags: 'g', replace: ' ', encoding: 'UTF-8') { + fileset(dir: outputPath, includes: 'Painless*.java') + } + // fix line endings + ant.fixcrlf(srcdir: outputPath) { + patternset(includes: 'Painless*.java') + } + } +} + + diff --git a/modules/lang-painless/src/main/antlr/PainlessLexer.g4 b/modules/lang-painless/src/main/antlr/PainlessLexer.g4 index f60d48efcc744..6ab6a86113595 100644 --- a/modules/lang-painless/src/main/antlr/PainlessLexer.g4 +++ b/modules/lang-painless/src/main/antlr/PainlessLexer.g4 @@ -120,7 +120,7 @@ INTEGER: ( '0' | [1-9] [0-9]* ) [lLfFdD]?; DECIMAL: ( '0' | [1-9] [0-9]* ) (DOT [0-9]+)? ( [eE] [+\-]? [0-9]+ )? [fFdD]?; STRING: ( '"' ( '\\"' | '\\\\' | ~[\\"] )*? '"' ) | ( '\'' ( '\\\'' | '\\\\' | ~[\\'] )*? '\'' ); -REGEX: '/' ( ~('/' | '\n') | '\\' ~'\n' )+ '/' [cilmsUux]* { slashIsRegex() }?; +REGEX: '/' ( '\\' ~'\n' | ~('/' | '\n') )+? '/' [cilmsUux]* { slashIsRegex() }?; TRUE: 'true'; FALSE: 'false'; diff --git a/modules/lang-painless/src/main/java/org/elasticsearch/painless/antlr/PainlessLexer.java b/modules/lang-painless/src/main/java/org/elasticsearch/painless/antlr/PainlessLexer.java index 44972061b5902..fd32c59b4ff03 100644 --- a/modules/lang-painless/src/main/java/org/elasticsearch/painless/antlr/PainlessLexer.java +++ b/modules/lang-painless/src/main/java/org/elasticsearch/painless/antlr/PainlessLexer.java @@ -1,7 +1,5 @@ // ANTLR GENERATED CODE: DO NOT EDIT package org.elasticsearch.painless.antlr; - - import org.antlr.v4.runtime.Lexer; import org.antlr.v4.runtime.CharStream; import org.antlr.v4.runtime.Token; @@ -211,29 +209,29 @@ private boolean TYPE_sempred(RuleContext _localctx, int predIndex) { "\3N\3N\7N\u021a\nN\fN\16N\u021d\13N\3N\3N\3O\3O\3O\3O\3O\3P\3P\3P\3P\3"+ "P\3P\3Q\3Q\3Q\3Q\3Q\3R\3R\3R\3R\7R\u0235\nR\fR\16R\u0238\13R\3R\3R\3S"+ "\3S\7S\u023e\nS\fS\16S\u0241\13S\3T\3T\3T\7T\u0246\nT\fT\16T\u0249\13"+ - "T\5T\u024b\nT\3T\3T\3U\3U\7U\u0251\nU\fU\16U\u0254\13U\3U\3U\6\u00b9\u00c3"+ - "\u01fd\u0209\2V\4\3\6\4\b\5\n\6\f\7\16\b\20\t\22\n\24\13\26\f\30\r\32"+ - "\16\34\17\36\20 \21\"\22$\23&\24(\25*\26,\27.\30\60\31\62\32\64\33\66"+ - "\348\35:\36<\37> @!B\"D#F$H%J&L\'N(P)R*T+V,X-Z.\\/^\60`\61b\62d\63f\64"+ - "h\65j\66l\67n8p9r:t;v|?~@\u0080A\u0082B\u0084C\u0086D\u0088E\u008a"+ - "F\u008cG\u008eH\u0090I\u0092J\u0094K\u0096L\u0098M\u009aN\u009cO\u009e"+ - "P\u00a0Q\u00a2R\u00a4S\u00a6T\u00a8U\u00aaV\4\2\3\25\5\2\13\f\17\17\""+ - "\"\4\2\f\f\17\17\3\2\629\4\2NNnn\4\2ZZzz\5\2\62;CHch\3\2\63;\3\2\62;\b"+ - "\2FFHHNNffhhnn\4\2GGgg\4\2--//\6\2FFHHffhh\4\2$$^^\4\2))^^\4\2\f\f\61"+ - "\61\3\2\f\f\t\2WWeekknouuwwzz\5\2C\\aac|\6\2\62;C\\aac|\u0277\2\4\3\2"+ - "\2\2\2\6\3\2\2\2\2\b\3\2\2\2\2\n\3\2\2\2\2\f\3\2\2\2\2\16\3\2\2\2\2\20"+ - "\3\2\2\2\2\22\3\2\2\2\2\24\3\2\2\2\2\26\3\2\2\2\2\30\3\2\2\2\2\32\3\2"+ - "\2\2\2\34\3\2\2\2\2\36\3\2\2\2\2 \3\2\2\2\2\"\3\2\2\2\2$\3\2\2\2\2&\3"+ - "\2\2\2\2(\3\2\2\2\2*\3\2\2\2\2,\3\2\2\2\2.\3\2\2\2\2\60\3\2\2\2\2\62\3"+ - "\2\2\2\2\64\3\2\2\2\2\66\3\2\2\2\28\3\2\2\2\2:\3\2\2\2\2<\3\2\2\2\2>\3"+ - "\2\2\2\2@\3\2\2\2\2B\3\2\2\2\2D\3\2\2\2\2F\3\2\2\2\2H\3\2\2\2\2J\3\2\2"+ - "\2\2L\3\2\2\2\2N\3\2\2\2\2P\3\2\2\2\2R\3\2\2\2\2T\3\2\2\2\2V\3\2\2\2\2"+ - "X\3\2\2\2\2Z\3\2\2\2\2\\\3\2\2\2\2^\3\2\2\2\2`\3\2\2\2\2b\3\2\2\2\2d\3"+ - "\2\2\2\2f\3\2\2\2\2h\3\2\2\2\2j\3\2\2\2\2l\3\2\2\2\2n\3\2\2\2\2p\3\2\2"+ - "\2\2r\3\2\2\2\2t\3\2\2\2\2v\3\2\2\2\2x\3\2\2\2\2z\3\2\2\2\2|\3\2\2\2\2"+ - "~\3\2\2\2\2\u0080\3\2\2\2\2\u0082\3\2\2\2\2\u0084\3\2\2\2\2\u0086\3\2"+ - "\2\2\2\u0088\3\2\2\2\2\u008a\3\2\2\2\2\u008c\3\2\2\2\2\u008e\3\2\2\2\2"+ - "\u0090\3\2\2\2\2\u0092\3\2\2\2\2\u0094\3\2\2\2\2\u0096\3\2\2\2\2\u0098"+ + "T\5T\u024b\nT\3T\3T\3U\3U\7U\u0251\nU\fU\16U\u0254\13U\3U\3U\7\u00b9\u00c3"+ + "\u01fd\u0209\u0215\2V\4\3\6\4\b\5\n\6\f\7\16\b\20\t\22\n\24\13\26\f\30"+ + "\r\32\16\34\17\36\20 \21\"\22$\23&\24(\25*\26,\27.\30\60\31\62\32\64\33"+ + "\66\348\35:\36<\37> @!B\"D#F$H%J&L\'N(P)R*T+V,X-Z.\\/^\60`\61b\62d\63"+ + "f\64h\65j\66l\67n8p9r:t;v|?~@\u0080A\u0082B\u0084C\u0086D\u0088E"+ + "\u008aF\u008cG\u008eH\u0090I\u0092J\u0094K\u0096L\u0098M\u009aN\u009c"+ + "O\u009eP\u00a0Q\u00a2R\u00a4S\u00a6T\u00a8U\u00aaV\4\2\3\25\5\2\13\f\17"+ + "\17\"\"\4\2\f\f\17\17\3\2\629\4\2NNnn\4\2ZZzz\5\2\62;CHch\3\2\63;\3\2"+ + "\62;\b\2FFHHNNffhhnn\4\2GGgg\4\2--//\6\2FFHHffhh\4\2$$^^\4\2))^^\3\2\f"+ + "\f\4\2\f\f\61\61\t\2WWeekknouuwwzz\5\2C\\aac|\6\2\62;C\\aac|\u0277\2\4"+ + "\3\2\2\2\2\6\3\2\2\2\2\b\3\2\2\2\2\n\3\2\2\2\2\f\3\2\2\2\2\16\3\2\2\2"+ + "\2\20\3\2\2\2\2\22\3\2\2\2\2\24\3\2\2\2\2\26\3\2\2\2\2\30\3\2\2\2\2\32"+ + "\3\2\2\2\2\34\3\2\2\2\2\36\3\2\2\2\2 \3\2\2\2\2\"\3\2\2\2\2$\3\2\2\2\2"+ + "&\3\2\2\2\2(\3\2\2\2\2*\3\2\2\2\2,\3\2\2\2\2.\3\2\2\2\2\60\3\2\2\2\2\62"+ + "\3\2\2\2\2\64\3\2\2\2\2\66\3\2\2\2\28\3\2\2\2\2:\3\2\2\2\2<\3\2\2\2\2"+ + ">\3\2\2\2\2@\3\2\2\2\2B\3\2\2\2\2D\3\2\2\2\2F\3\2\2\2\2H\3\2\2\2\2J\3"+ + "\2\2\2\2L\3\2\2\2\2N\3\2\2\2\2P\3\2\2\2\2R\3\2\2\2\2T\3\2\2\2\2V\3\2\2"+ + "\2\2X\3\2\2\2\2Z\3\2\2\2\2\\\3\2\2\2\2^\3\2\2\2\2`\3\2\2\2\2b\3\2\2\2"+ + "\2d\3\2\2\2\2f\3\2\2\2\2h\3\2\2\2\2j\3\2\2\2\2l\3\2\2\2\2n\3\2\2\2\2p"+ + "\3\2\2\2\2r\3\2\2\2\2t\3\2\2\2\2v\3\2\2\2\2x\3\2\2\2\2z\3\2\2\2\2|\3\2"+ + "\2\2\2~\3\2\2\2\2\u0080\3\2\2\2\2\u0082\3\2\2\2\2\u0084\3\2\2\2\2\u0086"+ + "\3\2\2\2\2\u0088\3\2\2\2\2\u008a\3\2\2\2\2\u008c\3\2\2\2\2\u008e\3\2\2"+ + "\2\2\u0090\3\2\2\2\2\u0092\3\2\2\2\2\u0094\3\2\2\2\2\u0096\3\2\2\2\2\u0098"+ "\3\2\2\2\2\u009a\3\2\2\2\2\u009c\3\2\2\2\2\u009e\3\2\2\2\2\u00a0\3\2\2"+ "\2\2\u00a2\3\2\2\2\2\u00a4\3\2\2\2\2\u00a6\3\2\2\2\3\u00a8\3\2\2\2\3\u00aa"+ "\3\2\2\2\4\u00ad\3\2\2\2\6\u00c8\3\2\2\2\b\u00cc\3\2\2\2\n\u00ce\3\2\2"+ @@ -358,9 +356,9 @@ private boolean TYPE_sempred(RuleContext _localctx, int predIndex) { "\3\2\2\2\u0207\u0206\3\2\2\2\u0208\u020b\3\2\2\2\u0209\u020a\3\2\2\2\u0209"+ "\u0207\3\2\2\2\u020a\u020c\3\2\2\2\u020b\u0209\3\2\2\2\u020c\u020e\7)"+ "\2\2\u020d\u01f5\3\2\2\2\u020d\u0201\3\2\2\2\u020e\u009b\3\2\2\2\u020f"+ - "\u0213\7\61\2\2\u0210\u0214\n\20\2\2\u0211\u0212\7^\2\2\u0212\u0214\n"+ - "\21\2\2\u0213\u0210\3\2\2\2\u0213\u0211\3\2\2\2\u0214\u0215\3\2\2\2\u0215"+ - "\u0213\3\2\2\2\u0215\u0216\3\2\2\2\u0216\u0217\3\2\2\2\u0217\u021b\7\61"+ + "\u0213\7\61\2\2\u0210\u0211\7^\2\2\u0211\u0214\n\20\2\2\u0212\u0214\n"+ + "\21\2\2\u0213\u0210\3\2\2\2\u0213\u0212\3\2\2\2\u0214\u0215\3\2\2\2\u0215"+ + "\u0216\3\2\2\2\u0215\u0213\3\2\2\2\u0216\u0217\3\2\2\2\u0217\u021b\7\61"+ "\2\2\u0218\u021a\t\22\2\2\u0219\u0218\3\2\2\2\u021a\u021d\3\2\2\2\u021b"+ "\u0219\3\2\2\2\u021b\u021c\3\2\2\2\u021c\u021e\3\2\2\2\u021d\u021b\3\2"+ "\2\2\u021e\u021f\6N\3\2\u021f\u009d\3\2\2\2\u0220\u0221\7v\2\2\u0221\u0222"+ diff --git a/modules/lang-painless/src/main/java/org/elasticsearch/painless/node/ERegex.java b/modules/lang-painless/src/main/java/org/elasticsearch/painless/node/ERegex.java index 1d1f41948c43a..4b38868b1b1fc 100644 --- a/modules/lang-painless/src/main/java/org/elasticsearch/painless/node/ERegex.java +++ b/modules/lang-painless/src/main/java/org/elasticsearch/painless/node/ERegex.java @@ -68,8 +68,9 @@ void analyze(Locals locals) { try { Pattern.compile(pattern, flags); - } catch (PatternSyntaxException exception) { - throw createError(exception); + } catch (PatternSyntaxException e) { + throw new Location(location.getSourceName(), location.getOffset() + 1 + e.getIndex()).createError( + new IllegalArgumentException("Error compiling regex: " + e.getDescription())); } constant = new Constant(location, Definition.PATTERN_TYPE.type, "regexAt$" + location.getOffset(), this::initializeConstant); diff --git a/modules/lang-painless/src/test/java/org/elasticsearch/painless/RegexTests.java b/modules/lang-painless/src/test/java/org/elasticsearch/painless/RegexTests.java index 1c53692ad741a..83a592b3f2632 100644 --- a/modules/lang-painless/src/test/java/org/elasticsearch/painless/RegexTests.java +++ b/modules/lang-painless/src/test/java/org/elasticsearch/painless/RegexTests.java @@ -20,6 +20,7 @@ package org.elasticsearch.painless; import org.elasticsearch.common.settings.Settings; +import org.elasticsearch.script.ScriptException; import java.nio.CharBuffer; import java.util.Arrays; @@ -44,8 +45,17 @@ public void testPatternAfterReturn() { assertEquals(false, exec("return 'bar' ==~ /foo/")); } - public void testSlashesEscapePattern() { - assertEquals(true, exec("return '//' ==~ /\\/\\//")); + public void testBackslashEscapesForwardSlash() { + assertEquals(true, exec("'//' ==~ /\\/\\//")); + } + + public void testBackslashEscapeBackslash() { + // Both of these are single backslashes but java escaping + Painless escaping.... + assertEquals(true, exec("'\\\\' ==~ /\\\\/")); + } + + public void testRegexIsNonGreedy() { + assertEquals(true, exec("def s = /\\\\/.split('.\\\\.'); return s[1] ==~ /\\./")); } public void testPatternAfterAssignment() { @@ -248,11 +258,14 @@ public void testCantUsePatternCompile() { } public void testBadRegexPattern() { - PatternSyntaxException e = expectScriptThrows(PatternSyntaxException.class, () -> { + ScriptException e = expectThrows(ScriptException.class, () -> { exec("/\\ujjjj/"); // Invalid unicode }); - assertThat(e.getMessage(), containsString("Illegal Unicode escape sequence near index 2")); - assertThat(e.getMessage(), containsString("\\ujjjj")); + assertEquals("Error compiling regex: Illegal Unicode escape sequence", e.getCause().getMessage()); + + // And make sure the location of the error points to the offset inside the pattern + assertEquals("/\\ujjjj/", e.getScriptStack().get(0)); + assertEquals(" ^---- HERE", e.getScriptStack().get(1)); } public void testRegexAgainstNumber() { diff --git a/modules/percolator/src/test/java/org/elasticsearch/percolator/PercolatorFieldMapperTests.java b/modules/percolator/src/test/java/org/elasticsearch/percolator/PercolatorFieldMapperTests.java index 244091595b3ca..cd702784ee514 100644 --- a/modules/percolator/src/test/java/org/elasticsearch/percolator/PercolatorFieldMapperTests.java +++ b/modules/percolator/src/test/java/org/elasticsearch/percolator/PercolatorFieldMapperTests.java @@ -44,6 +44,7 @@ import org.elasticsearch.common.xcontent.XContentFactory; import org.elasticsearch.common.xcontent.XContentHelper; import org.elasticsearch.common.xcontent.XContentParser; +import org.elasticsearch.common.xcontent.XContentType; import org.elasticsearch.common.xcontent.support.XContentMapValues; import org.elasticsearch.index.IndexService; import org.elasticsearch.index.mapper.DocumentMapper; @@ -52,6 +53,7 @@ import org.elasticsearch.index.mapper.MapperService; import org.elasticsearch.index.mapper.ParseContext; import org.elasticsearch.index.mapper.ParsedDocument; +import org.elasticsearch.index.mapper.SourceToParse; import org.elasticsearch.index.query.BoolQueryBuilder; import org.elasticsearch.index.query.BoostingQueryBuilder; import org.elasticsearch.index.query.ConstantScoreQueryBuilder; @@ -244,9 +246,12 @@ private void assertTermIterator(PrefixCodedTerms.TermIterator termIterator, Stri public void testPercolatorFieldMapper() throws Exception { addQueryMapping(); QueryBuilder queryBuilder = termQuery("field", "value"); - ParsedDocument doc = mapperService.documentMapper(typeName).parse("test", typeName, "1", XContentFactory.jsonBuilder().startObject() - .field(fieldName, queryBuilder) - .endObject().bytes()); + ParsedDocument doc = mapperService.documentMapper(typeName).parse(SourceToParse.source("test", typeName, "1", XContentFactory + .jsonBuilder() + .startObject() + .field(fieldName, queryBuilder) + .endObject().bytes(), + XContentType.JSON)); assertThat(doc.rootDoc().getFields(fieldType.queryTermsField.name()).length, equalTo(1)); assertThat(doc.rootDoc().getFields(fieldType.queryTermsField.name())[0].binaryValue().utf8ToString(), equalTo("field\0value")); @@ -259,9 +264,12 @@ public void testPercolatorFieldMapper() throws Exception { // add an query for which we don't extract terms from queryBuilder = rangeQuery("field").from("a").to("z"); - doc = mapperService.documentMapper(typeName).parse("test", typeName, "1", XContentFactory.jsonBuilder().startObject() + doc = mapperService.documentMapper(typeName).parse(SourceToParse.source("test", typeName, "1", XContentFactory + .jsonBuilder() + .startObject() .field(fieldName, queryBuilder) - .endObject().bytes()); + .endObject().bytes(), + XContentType.JSON)); assertThat(doc.rootDoc().getFields(fieldType.extractionResultField.name()).length, equalTo(1)); assertThat(doc.rootDoc().getFields(fieldType.extractionResultField.name())[0].stringValue(), equalTo(EXTRACTION_FAILED)); @@ -282,10 +290,11 @@ public void testStoringQueries() throws Exception { // (it can't use shard data for rewriting purposes, because percolator queries run on MemoryIndex) for (QueryBuilder query : queries) { - ParsedDocument doc = mapperService.documentMapper(typeName).parse("test", typeName, "1", + ParsedDocument doc = mapperService.documentMapper(typeName).parse(SourceToParse.source("test", typeName, "1", XContentFactory.jsonBuilder().startObject() .field(fieldName, query) - .endObject().bytes()); + .endObject().bytes(), + XContentType.JSON)); BytesRef qbSource = doc.rootDoc().getFields(fieldType.queryBuilderField.name())[0].binaryValue(); assertQueryBuilder(qbSource, query); } @@ -295,9 +304,12 @@ public void testQueryWithRewrite() throws Exception { addQueryMapping(); client().prepareIndex("remote", "type", "1").setSource("field", "value").get(); QueryBuilder queryBuilder = termsLookupQuery("field", new TermsLookup("remote", "type", "1", "field")); - ParsedDocument doc = mapperService.documentMapper(typeName).parse("test", typeName, "1", XContentFactory.jsonBuilder().startObject() + ParsedDocument doc = mapperService.documentMapper(typeName).parse(SourceToParse.source("test", typeName, "1", XContentFactory + .jsonBuilder() + .startObject() .field(fieldName, queryBuilder) - .endObject().bytes()); + .endObject().bytes(), + XContentType.JSON)); BytesRef qbSource = doc.rootDoc().getFields(fieldType.queryBuilderField.name())[0].binaryValue(); assertQueryBuilder(qbSource, queryBuilder.rewrite(indexService.newQueryShardContext( randomInt(20), null, () -> { throw new UnsupportedOperationException(); }))); @@ -307,9 +319,12 @@ public void testQueryWithRewrite() throws Exception { public void testPercolatorFieldMapperUnMappedField() throws Exception { addQueryMapping(); MapperParsingException exception = expectThrows(MapperParsingException.class, () -> { - mapperService.documentMapper(typeName).parse("test", typeName, "1", XContentFactory.jsonBuilder().startObject() + mapperService.documentMapper(typeName).parse(SourceToParse.source("test", typeName, "1", XContentFactory + .jsonBuilder() + .startObject() .field(fieldName, termQuery("unmapped_field", "value")) - .endObject().bytes()); + .endObject().bytes(), + XContentType.JSON)); }); assertThat(exception.getCause(), instanceOf(QueryShardException.class)); assertThat(exception.getCause().getMessage(), equalTo("No field mapping can be found for the field with name [unmapped_field]")); @@ -318,14 +333,21 @@ public void testPercolatorFieldMapperUnMappedField() throws Exception { public void testPercolatorFieldMapper_noQuery() throws Exception { addQueryMapping(); - ParsedDocument doc = mapperService.documentMapper(typeName).parse("test", typeName, "1", XContentFactory.jsonBuilder().startObject() - .endObject().bytes()); + ParsedDocument doc = mapperService.documentMapper(typeName).parse(SourceToParse.source("test", typeName, "1", XContentFactory + .jsonBuilder() + .startObject() + .endObject() + .bytes(), + XContentType.JSON)); assertThat(doc.rootDoc().getFields(fieldType.queryBuilderField.name()).length, equalTo(0)); try { - mapperService.documentMapper(typeName).parse("test", typeName, "1", XContentFactory.jsonBuilder().startObject() - .nullField(fieldName) - .endObject().bytes()); + mapperService.documentMapper(typeName).parse(SourceToParse.source("test", typeName, "1", XContentFactory + .jsonBuilder() + .startObject() + .nullField(fieldName) + .endObject().bytes(), + XContentType.JSON)); } catch (MapperParsingException e) { assertThat(e.getDetailedMessage(), containsString("query malformed, must start with start_object")); } @@ -357,12 +379,12 @@ public void testMultiplePercolatorFields() throws Exception { mapperService.merge(typeName, new CompressedXContent(percolatorMapper), MapperService.MergeReason.MAPPING_UPDATE, true); QueryBuilder queryBuilder = matchQuery("field", "value"); - ParsedDocument doc = mapperService.documentMapper(typeName).parse("test", typeName, "1", + ParsedDocument doc = mapperService.documentMapper(typeName).parse(SourceToParse.source("test", typeName, "1", jsonBuilder().startObject() .field("query_field1", queryBuilder) .field("query_field2", queryBuilder) - .endObject().bytes() - ); + .endObject().bytes(), + XContentType.JSON)); assertThat(doc.rootDoc().getFields().size(), equalTo(14)); // also includes all other meta fields BytesRef queryBuilderAsBytes = doc.rootDoc().getField("query_field1.query_builder_field").binaryValue(); assertQueryBuilder(queryBuilderAsBytes, queryBuilder); @@ -388,35 +410,35 @@ public void testNestedPercolatorField() throws Exception { mapperService.merge(typeName, new CompressedXContent(percolatorMapper), MapperService.MergeReason.MAPPING_UPDATE, true); QueryBuilder queryBuilder = matchQuery("field", "value"); - ParsedDocument doc = mapperService.documentMapper(typeName).parse("test", typeName, "1", + ParsedDocument doc = mapperService.documentMapper(typeName).parse(SourceToParse.source("test", typeName, "1", jsonBuilder().startObject().startObject("object_field") .field("query_field", queryBuilder) - .endObject().endObject().bytes() - ); + .endObject().endObject().bytes(), + XContentType.JSON)); assertThat(doc.rootDoc().getFields().size(), equalTo(11)); // also includes all other meta fields BytesRef queryBuilderAsBytes = doc.rootDoc().getField("object_field.query_field.query_builder_field").binaryValue(); assertQueryBuilder(queryBuilderAsBytes, queryBuilder); - doc = mapperService.documentMapper(typeName).parse("test", typeName, "1", + doc = mapperService.documentMapper(typeName).parse(SourceToParse.source("test", typeName, "1", jsonBuilder().startObject() .startArray("object_field") .startObject().field("query_field", queryBuilder).endObject() .endArray() - .endObject().bytes() - ); + .endObject().bytes(), + XContentType.JSON)); assertThat(doc.rootDoc().getFields().size(), equalTo(11)); // also includes all other meta fields queryBuilderAsBytes = doc.rootDoc().getField("object_field.query_field.query_builder_field").binaryValue(); assertQueryBuilder(queryBuilderAsBytes, queryBuilder); MapperParsingException e = expectThrows(MapperParsingException.class, () -> { - mapperService.documentMapper(typeName).parse("test", typeName, "1", + mapperService.documentMapper(typeName).parse(SourceToParse.source("test", typeName, "1", jsonBuilder().startObject() .startArray("object_field") .startObject().field("query_field", queryBuilder).endObject() .startObject().field("query_field", queryBuilder).endObject() .endArray() - .endObject().bytes() - ); + .endObject().bytes(), + XContentType.JSON)); } ); assertThat(e.getCause(), instanceOf(IllegalArgumentException.class)); @@ -426,42 +448,47 @@ public void testNestedPercolatorField() throws Exception { public void testRangeQueryWithNowRangeIsForbidden() throws Exception { addQueryMapping(); MapperParsingException e = expectThrows(MapperParsingException.class, () -> { - mapperService.documentMapper(typeName).parse("test", typeName, "1", + mapperService.documentMapper(typeName).parse(SourceToParse.source("test", typeName, "1", jsonBuilder().startObject() .field(fieldName, rangeQuery("date_field").from("2016-01-01||/D").to("now")) - .endObject().bytes()); + .endObject().bytes(), + XContentType.JSON)); } ); assertThat(e.getCause(), instanceOf(IllegalArgumentException.class)); e = expectThrows(MapperParsingException.class, () -> { - mapperService.documentMapper(typeName).parse("test", typeName, "1", + mapperService.documentMapper(typeName).parse(SourceToParse.source("test", typeName, "1", jsonBuilder().startObject() .field(fieldName, rangeQuery("date_field").from("2016-01-01||/D").to("now/D")) - .endObject().bytes()); + .endObject().bytes(), + XContentType.JSON)); } ); assertThat(e.getCause(), instanceOf(IllegalArgumentException.class)); e = expectThrows(MapperParsingException.class, () -> { - mapperService.documentMapper(typeName).parse("test", typeName, "1", + mapperService.documentMapper(typeName).parse(SourceToParse.source("test", typeName, "1", jsonBuilder().startObject() .field(fieldName, rangeQuery("date_field").from("now-1d").to("now")) - .endObject().bytes()); + .endObject().bytes(), + XContentType.JSON)); } ); assertThat(e.getCause(), instanceOf(IllegalArgumentException.class)); e = expectThrows(MapperParsingException.class, () -> { - mapperService.documentMapper(typeName).parse("test", typeName, "1", + mapperService.documentMapper(typeName).parse(SourceToParse.source("test", typeName, "1", jsonBuilder().startObject() .field(fieldName, rangeQuery("date_field").from("now")) - .endObject().bytes()); + .endObject().bytes(), + XContentType.JSON)); } ); assertThat(e.getCause(), instanceOf(IllegalArgumentException.class)); e = expectThrows(MapperParsingException.class, () -> { - mapperService.documentMapper(typeName).parse("test", typeName, "1", + mapperService.documentMapper(typeName).parse(SourceToParse.source("test", typeName, "1", jsonBuilder().startObject() .field(fieldName, rangeQuery("date_field").to("now")) - .endObject().bytes()); + .endObject().bytes(), + XContentType.JSON)); } ); assertThat(e.getCause(), instanceOf(IllegalArgumentException.class)); @@ -471,31 +498,39 @@ public void testRangeQueryWithNowRangeIsForbidden() throws Exception { public void testVerifyRangeQueryWithNullBounds() throws Exception { addQueryMapping(); MapperParsingException e = expectThrows(MapperParsingException.class, () -> { - mapperService.documentMapper(typeName).parse("test", typeName, "1", + mapperService.documentMapper(typeName).parse(SourceToParse.source("test", typeName, "1", jsonBuilder().startObject() .field(fieldName, rangeQuery("date_field").from("now").to(null)) - .endObject().bytes()); + .endObject().bytes(), + XContentType.JSON)); + } ); assertThat(e.getCause(), instanceOf(IllegalArgumentException.class)); e = expectThrows(MapperParsingException.class, () -> { - mapperService.documentMapper(typeName).parse("test", typeName, "1", + mapperService.documentMapper(typeName).parse(SourceToParse.source("test", typeName, "1", jsonBuilder().startObject() .field(fieldName, rangeQuery("date_field").from(null).to("now")) - .endObject().bytes()); + .endObject().bytes(), + XContentType.JSON)); + } ); assertThat(e.getCause(), instanceOf(IllegalArgumentException.class)); // No validation failures: - mapperService.documentMapper(typeName).parse("test", typeName, "1", + mapperService.documentMapper(typeName).parse(SourceToParse.source("test", typeName, "1", jsonBuilder().startObject() .field(fieldName, rangeQuery("date_field").from("2016-01-01").to(null)) - .endObject().bytes()); - mapperService.documentMapper(typeName).parse("test", typeName, "1", + .endObject().bytes(), + XContentType.JSON)); + + mapperService.documentMapper(typeName).parse(SourceToParse.source("test", typeName, "1", jsonBuilder().startObject() .field(fieldName, rangeQuery("date_field").from(null).to("2016-01-01")) - .endObject().bytes()); + .endObject().bytes(), + XContentType.JSON)); + } public void testUnsupportedQueries() { @@ -570,10 +605,11 @@ public void testImplicitlySetDefaultScriptLang() throws Exception { query.endObject(); query.endObject(); - ParsedDocument doc = mapperService.documentMapper(typeName).parse("test", typeName, "1", + ParsedDocument doc = mapperService.documentMapper(typeName).parse(SourceToParse.source("test", typeName, "1", XContentFactory.jsonBuilder().startObject() .rawField(fieldName, new BytesArray(query.string()), query.contentType()) - .endObject().bytes()); + .endObject().bytes(), + XContentType.JSON)); BytesRef querySource = doc.rootDoc().getFields(fieldType.queryBuilderField.name())[0].binaryValue(); Map parsedQuery = XContentHelper.convertToMap(new BytesArray(querySource), true).v2(); assertEquals(Script.DEFAULT_SCRIPT_LANG, XContentMapValues.extractValue("script.script.lang", parsedQuery)); @@ -597,10 +633,11 @@ public void testImplicitlySetDefaultScriptLang() throws Exception { query.endObject(); query.endObject(); - doc = mapperService.documentMapper(typeName).parse("test", typeName, "1", + doc = mapperService.documentMapper(typeName).parse(SourceToParse.source("test", typeName, "1", XContentFactory.jsonBuilder().startObject() .rawField(fieldName, new BytesArray(query.string()), query.contentType()) - .endObject().bytes()); + .endObject().bytes(), + XContentType.JSON)); querySource = doc.rootDoc().getFields(fieldType.queryBuilderField.name())[0].binaryValue(); parsedQuery = XContentHelper.convertToMap(new BytesArray(querySource), true).v2(); assertEquals(Script.DEFAULT_SCRIPT_LANG, diff --git a/modules/reindex/src/main/java/org/elasticsearch/index/reindex/remote/RemoteRequestBuilders.java b/modules/reindex/src/main/java/org/elasticsearch/index/reindex/remote/RemoteRequestBuilders.java index a6afa6df39d77..4804818890e47 100644 --- a/modules/reindex/src/main/java/org/elasticsearch/index/reindex/remote/RemoteRequestBuilders.java +++ b/modules/reindex/src/main/java/org/elasticsearch/index/reindex/remote/RemoteRequestBuilders.java @@ -171,7 +171,11 @@ static Map scrollParams(TimeValue keepAlive) { return singletonMap("scroll", keepAlive.toString()); } - static HttpEntity scrollEntity(String scroll) { + static HttpEntity scrollEntity(String scroll, Version remoteVersion) { + if (remoteVersion.before(Version.V_2_0_0)) { + // Versions before 2.0.0 extract the plain scroll_id from the body + return new StringEntity(scroll, ContentType.TEXT_PLAIN); + } try (XContentBuilder entity = JsonXContent.contentBuilder()) { return new StringEntity(entity.startObject() .field("scroll_id", scroll) @@ -181,7 +185,11 @@ static HttpEntity scrollEntity(String scroll) { } } - static HttpEntity clearScrollEntity(String scroll) { + static HttpEntity clearScrollEntity(String scroll, Version remoteVersion) { + if (remoteVersion.before(Version.V_2_0_0)) { + // Versions before 2.0.0 extract the plain scroll_id from the body + return new StringEntity(scroll, ContentType.TEXT_PLAIN); + } try (XContentBuilder entity = JsonXContent.contentBuilder()) { return new StringEntity(entity.startObject() .array("scroll_id", scroll) diff --git a/modules/reindex/src/main/java/org/elasticsearch/index/reindex/remote/RemoteScrollableHitSource.java b/modules/reindex/src/main/java/org/elasticsearch/index/reindex/remote/RemoteScrollableHitSource.java index 796106c269e50..974fd9438d2b6 100644 --- a/modules/reindex/src/main/java/org/elasticsearch/index/reindex/remote/RemoteScrollableHitSource.java +++ b/modules/reindex/src/main/java/org/elasticsearch/index/reindex/remote/RemoteScrollableHitSource.java @@ -107,12 +107,12 @@ private void onStartResponse(Consumer onResponse, Response res @Override protected void doStartNextScroll(String scrollId, TimeValue extraKeepAlive, Consumer onResponse) { execute("POST", scrollPath(), scrollParams(timeValueNanos(searchRequest.scroll().keepAlive().nanos() + extraKeepAlive.nanos())), - scrollEntity(scrollId), RESPONSE_PARSER, onResponse); + scrollEntity(scrollId, remoteVersion), RESPONSE_PARSER, onResponse); } @Override protected void clearScroll(String scrollId, Runnable onCompletion) { - client.performRequestAsync("DELETE", scrollPath(), emptyMap(), clearScrollEntity(scrollId), new ResponseListener() { + client.performRequestAsync("DELETE", scrollPath(), emptyMap(), clearScrollEntity(scrollId, remoteVersion), new ResponseListener() { @Override public void onSuccess(org.elasticsearch.client.Response response) { logger.debug("Successfully cleared [{}]", scrollId); @@ -141,15 +141,18 @@ private void logFailure(Exception e) { } @Override - protected void cleanup() { - /* This is called on the RestClient's thread pool and attempting to close the client on its own threadpool causes it to fail to - * close. So we always shutdown the RestClient asynchronously on a thread in Elasticsearch's generic thread pool. */ + protected void cleanup(Runnable onCompletion) { + /* This is called on the RestClient's thread pool and attempting to close the client on its + * own threadpool causes it to fail to close. So we always shutdown the RestClient + * asynchronously on a thread in Elasticsearch's generic thread pool. */ threadPool.generic().submit(() -> { try { client.close(); logger.debug("Shut down remote connection"); } catch (IOException e) { logger.error("Failed to shutdown the remote connection", e); + } finally { + onCompletion.run(); } }); } diff --git a/modules/reindex/src/test/java/org/elasticsearch/index/reindex/CancelTests.java b/modules/reindex/src/test/java/org/elasticsearch/index/reindex/CancelTests.java index 01c5977e8223d..2785d53507899 100644 --- a/modules/reindex/src/test/java/org/elasticsearch/index/reindex/CancelTests.java +++ b/modules/reindex/src/test/java/org/elasticsearch/index/reindex/CancelTests.java @@ -32,6 +32,7 @@ import org.elasticsearch.index.IndexModule; import org.elasticsearch.index.engine.Engine; import org.elasticsearch.index.engine.Engine.Operation.Origin; +import org.elasticsearch.index.query.QueryBuilders; import org.elasticsearch.index.shard.IndexingOperationListener; import org.elasticsearch.index.shard.ShardId; import org.elasticsearch.ingest.IngestTestPlugin; @@ -160,7 +161,14 @@ private void testCancel(String action, AbstractBulkByScrollRequestBuilder }); // And check the status of the response - BulkByScrollResponse response = future.get(); + BulkByScrollResponse response; + try { + response = future.get(30, TimeUnit.SECONDS); + } catch (Exception e) { + String tasks = client().admin().cluster().prepareListTasks().setParentTaskId(mainTask.getTaskId()) + .setDetailed(true).get().toString(); + throw new RuntimeException("Exception while waiting for the response. Running tasks: " + tasks, e); + } assertThat(response.getReasonCancelled(), equalTo("by user request")); assertThat(response.getBulkFailures(), emptyIterable()); assertThat(response.getSearchFailures(), emptyIterable()); @@ -216,9 +224,10 @@ public void testUpdateByQueryCancel() throws Exception { } public void testDeleteByQueryCancel() throws Exception { - testCancel(DeleteByQueryAction.NAME, deleteByQuery().source(INDEX), (response, total, modified) -> { - assertThat(response, matcher().deleted(modified).reasonCancelled(equalTo("by user request"))); - assertHitCount(client().prepareSearch(INDEX).setSize(0).get(), total - modified); + testCancel(DeleteByQueryAction.NAME, deleteByQuery().source(INDEX).filter(QueryBuilders.matchAllQuery()), + (response, total, modified) -> { + assertThat(response, matcher().deleted(modified).reasonCancelled(equalTo("by user request"))); + assertHitCount(client().prepareSearch(INDEX).setSize(0).get(), total - modified); }, equalTo("delete-by-query [" + INDEX + "]")); } @@ -250,9 +259,10 @@ public void testUpdateByQueryCancelWithWorkers() throws Exception { } public void testDeleteByQueryCancelWithWorkers() throws Exception { - testCancel(DeleteByQueryAction.NAME, deleteByQuery().source(INDEX).setSlices(5), (response, total, modified) -> { - assertThat(response, matcher().deleted(modified).reasonCancelled(equalTo("by user request")).slices(hasSize(5))); - assertHitCount(client().prepareSearch(INDEX).setSize(0).get(), total - modified); + testCancel(DeleteByQueryAction.NAME, deleteByQuery().source(INDEX).filter(QueryBuilders.matchAllQuery()).setSlices(5), + (response, total, modified) -> { + assertThat(response, matcher().deleted(modified).reasonCancelled(equalTo("by user request")).slices(hasSize(5))); + assertHitCount(client().prepareSearch(INDEX).setSize(0).get(), total - modified); }, equalTo("delete-by-query [" + INDEX + "]")); } diff --git a/modules/reindex/src/test/java/org/elasticsearch/index/reindex/DeleteByQueryBasicTests.java b/modules/reindex/src/test/java/org/elasticsearch/index/reindex/DeleteByQueryBasicTests.java index 4d920600a5d9a..aaab77f543f8b 100644 --- a/modules/reindex/src/test/java/org/elasticsearch/index/reindex/DeleteByQueryBasicTests.java +++ b/modules/reindex/src/test/java/org/elasticsearch/index/reindex/DeleteByQueryBasicTests.java @@ -56,7 +56,7 @@ public void testBasics() throws Exception { assertHitCount(client().prepareSearch("test").setTypes("test").setSize(0).get(), 5); // Deletes the two first docs with limit by size - DeleteByQueryRequestBuilder request = deleteByQuery().source("test").size(2).refresh(true); + DeleteByQueryRequestBuilder request = deleteByQuery().source("test").filter(QueryBuilders.matchAllQuery()).size(2).refresh(true); request.source().addSort("foo.keyword", SortOrder.ASC); assertThat(request.get(), matcher().deleted(2)); assertHitCount(client().prepareSearch("test").setTypes("test").setSize(0).get(), 3); @@ -66,7 +66,7 @@ public void testBasics() throws Exception { assertHitCount(client().prepareSearch("test").setTypes("test").setSize(0).get(), 3); // Deletes all remaining docs - assertThat(deleteByQuery().source("test").refresh(true).get(), matcher().deleted(3)); + assertThat(deleteByQuery().source("test").filter(QueryBuilders.matchAllQuery()).refresh(true).get(), matcher().deleted(3)); assertHitCount(client().prepareSearch("test").setTypes("test").setSize(0).get(), 0); } @@ -79,7 +79,7 @@ public void testDeleteByQueryWithOneIndex() throws Exception { } indexRandom(true, true, true, builders); - assertThat(deleteByQuery().source("t*").refresh(true).get(), matcher().deleted(docs)); + assertThat(deleteByQuery().source("t*").filter(QueryBuilders.matchAllQuery()).refresh(true).get(), matcher().deleted(docs)); assertHitCount(client().prepareSearch("test").setSize(0).get(), 0); } @@ -122,7 +122,7 @@ public void testDeleteByQueryWithMissingIndex() throws Exception { assertHitCount(client().prepareSearch().setSize(0).get(), 1); try { - deleteByQuery().source("missing").get(); + deleteByQuery().source("missing").filter(QueryBuilders.matchAllQuery()).get(); fail("should have thrown an exception because of a missing index"); } catch (IndexNotFoundException e) { // Ok @@ -151,7 +151,7 @@ public void testDeleteByQueryWithRouting() throws Exception { long expected = client().prepareSearch().setSize(0).setRouting(routing).get().getHits().getTotalHits(); logger.info("--> delete all documents with routing [{}] with a delete-by-query", routing); - DeleteByQueryRequestBuilder delete = deleteByQuery().source("test"); + DeleteByQueryRequestBuilder delete = deleteByQuery().source("test").filter(QueryBuilders.matchAllQuery()); delete.source().setRouting(routing); assertThat(delete.refresh(true).get(), matcher().deleted(expected)); @@ -202,7 +202,8 @@ public void testDeleteByQueryOnReadOnlyIndex() throws Exception { try { enableIndexBlock("test", IndexMetaData.SETTING_READ_ONLY); - assertThat(deleteByQuery().source("test").refresh(true).get(), matcher().deleted(0).failures(docs)); + assertThat(deleteByQuery().source("test").filter(QueryBuilders.matchAllQuery()).refresh(true).get(), + matcher().deleted(0).failures(docs)); } finally { disableIndexBlock("test", IndexMetaData.SETTING_READ_ONLY); } @@ -228,7 +229,8 @@ public void testWorkers() throws Exception { assertHitCount(client().prepareSearch("test").setTypes("test").setSize(0).get(), 5); // Delete remaining docs - DeleteByQueryRequestBuilder request = deleteByQuery().source("test").refresh(true).setSlices(5); + DeleteByQueryRequestBuilder request = deleteByQuery().source("test").filter(QueryBuilders.matchAllQuery()).refresh(true) + .setSlices(5); assertThat(request.get(), matcher().deleted(5).slices(hasSize(5))); assertHitCount(client().prepareSearch("test").setTypes("test").setSize(0).get(), 0); } diff --git a/modules/reindex/src/test/java/org/elasticsearch/index/reindex/RethrottleTests.java b/modules/reindex/src/test/java/org/elasticsearch/index/reindex/RethrottleTests.java index a29b7238d4ee3..91c184e16a6fd 100644 --- a/modules/reindex/src/test/java/org/elasticsearch/index/reindex/RethrottleTests.java +++ b/modules/reindex/src/test/java/org/elasticsearch/index/reindex/RethrottleTests.java @@ -26,6 +26,7 @@ import org.elasticsearch.action.bulk.byscroll.BulkByScrollResponse; import org.elasticsearch.action.bulk.byscroll.BulkByScrollTask; import org.elasticsearch.action.index.IndexRequestBuilder; +import org.elasticsearch.index.query.QueryBuilders; import org.elasticsearch.tasks.TaskId; import java.util.ArrayList; @@ -57,7 +58,7 @@ public void testUpdateByQuery() throws Exception { } public void testDeleteByQuery() throws Exception { - testCase(deleteByQuery().source("test"), DeleteByQueryAction.NAME); + testCase(deleteByQuery().source("test").filter(QueryBuilders.matchAllQuery()), DeleteByQueryAction.NAME); } public void testReindexWithWorkers() throws Exception { @@ -69,13 +70,13 @@ public void testUpdateByQueryWithWorkers() throws Exception { } public void testDeleteByQueryWithWorkers() throws Exception { - testCase(deleteByQuery().source("test").setSlices(between(2, 10)), DeleteByQueryAction.NAME); + testCase(deleteByQuery().source("test").filter(QueryBuilders.matchAllQuery()).setSlices(between(2, 10)), DeleteByQueryAction.NAME); } private void testCase(AbstractBulkByScrollRequestBuilder request, String actionName) throws Exception { logger.info("Starting test for [{}] with [{}] slices", actionName, request.request().getSlices()); /* Add ten documents per slice so most slices will have many documents to process, having to go to multiple batches. - * we can't rely on all of them doing so, but + * we can't rely on all of them doing so, but */ List docs = new ArrayList<>(); for (int i = 0; i < request.request().getSlices() * 10; i++) { @@ -158,7 +159,7 @@ private void testCase(AbstractBulkByScrollRequestBuilder request, String a * are rethrottled, the finished ones just keep whatever requests per second they had while they were running. But it might * also be less than newRequestsPerSecond because the newRequestsPerSecond is divided among running sub-requests and then the * requests are rethrottled. If one request finishes in between the division and the application of the new throttle then it - * won't be rethrottled, thus only contributing its lower total. */ + * won't be rethrottled, thus only contributing its lower total. */ assertEquals(totalRequestsPerSecond, status.getRequestsPerSecond(), totalRequestsPerSecond * 0.0001f); } diff --git a/modules/reindex/src/test/java/org/elasticsearch/index/reindex/RetryTests.java b/modules/reindex/src/test/java/org/elasticsearch/index/reindex/RetryTests.java index e94dcfeb12206..6e8da59eee39f 100644 --- a/modules/reindex/src/test/java/org/elasticsearch/index/reindex/RetryTests.java +++ b/modules/reindex/src/test/java/org/elasticsearch/index/reindex/RetryTests.java @@ -35,6 +35,8 @@ import org.elasticsearch.common.settings.Settings; import org.elasticsearch.common.transport.TransportAddress; import org.elasticsearch.common.util.concurrent.EsRejectedExecutionException; +import org.elasticsearch.index.query.QueryBuilder; +import org.elasticsearch.index.query.QueryBuilders; import org.elasticsearch.index.reindex.remote.RemoteInfo; import org.elasticsearch.plugins.Plugin; import org.elasticsearch.test.ESSingleNodeTestCase; @@ -134,8 +136,8 @@ public void testUpdateByQuery() throws Exception { } public void testDeleteByQuery() throws Exception { - testCase(DeleteByQueryAction.NAME, DeleteByQueryAction.INSTANCE.newRequestBuilder(client()).source("source"), - matcher().deleted(DOC_COUNT)); + testCase(DeleteByQueryAction.NAME, DeleteByQueryAction.INSTANCE.newRequestBuilder(client()).source("source") + .filter(QueryBuilders.matchAllQuery()), matcher().deleted(DOC_COUNT)); } private void testCase(String action, AbstractBulkByScrollRequestBuilder request, BulkIndexByScrollResponseMatcher matcher) diff --git a/modules/reindex/src/test/java/org/elasticsearch/index/reindex/remote/RemoteRequestBuildersTests.java b/modules/reindex/src/test/java/org/elasticsearch/index/reindex/remote/RemoteRequestBuildersTests.java index f9ab72506a84d..b77dba6e9c9b3 100644 --- a/modules/reindex/src/test/java/org/elasticsearch/index/reindex/remote/RemoteRequestBuildersTests.java +++ b/modules/reindex/src/test/java/org/elasticsearch/index/reindex/remote/RemoteRequestBuildersTests.java @@ -39,6 +39,7 @@ import static org.elasticsearch.index.reindex.remote.RemoteRequestBuilders.initialSearchParams; import static org.elasticsearch.index.reindex.remote.RemoteRequestBuilders.initialSearchPath; import static org.elasticsearch.index.reindex.remote.RemoteRequestBuilders.scrollEntity; +import static org.elasticsearch.index.reindex.remote.RemoteRequestBuilders.clearScrollEntity; import static org.elasticsearch.index.reindex.remote.RemoteRequestBuilders.scrollParams; import static org.hamcrest.Matchers.containsString; import static org.hamcrest.Matchers.either; @@ -185,9 +186,27 @@ public void testScrollParams() { public void testScrollEntity() throws IOException { String scroll = randomAsciiOfLength(30); - HttpEntity entity = scrollEntity(scroll); + HttpEntity entity = scrollEntity(scroll, Version.V_5_0_0); assertEquals(ContentType.APPLICATION_JSON.toString(), entity.getContentType().getValue()); assertThat(Streams.copyToString(new InputStreamReader(entity.getContent(), StandardCharsets.UTF_8)), containsString("\"" + scroll + "\"")); + + // Test with version < 2.0.0 + entity = scrollEntity(scroll, Version.fromId(1070499)); + assertEquals(ContentType.TEXT_PLAIN.toString(), entity.getContentType().getValue()); + assertEquals(scroll, Streams.copyToString(new InputStreamReader(entity.getContent(), StandardCharsets.UTF_8))); + } + + public void testClearScrollEntity() throws IOException { + String scroll = randomAsciiOfLength(30); + HttpEntity entity = clearScrollEntity(scroll, Version.V_5_0_0); + assertEquals(ContentType.APPLICATION_JSON.toString(), entity.getContentType().getValue()); + assertThat(Streams.copyToString(new InputStreamReader(entity.getContent(), StandardCharsets.UTF_8)), + containsString("\"" + scroll + "\"")); + + // Test with version < 2.0.0 + entity = clearScrollEntity(scroll, Version.fromId(1070499)); + assertEquals(ContentType.TEXT_PLAIN.toString(), entity.getContentType().getValue()); + assertEquals(scroll, Streams.copyToString(new InputStreamReader(entity.getContent(), StandardCharsets.UTF_8))); } } diff --git a/modules/reindex/src/test/java/org/elasticsearch/index/reindex/remote/RemoteScrollableHitSourceTests.java b/modules/reindex/src/test/java/org/elasticsearch/index/reindex/remote/RemoteScrollableHitSourceTests.java index 7376ed543490a..eb7abea6af5d1 100644 --- a/modules/reindex/src/test/java/org/elasticsearch/index/reindex/remote/RemoteScrollableHitSourceTests.java +++ b/modules/reindex/src/test/java/org/elasticsearch/index/reindex/remote/RemoteScrollableHitSourceTests.java @@ -80,7 +80,9 @@ import static org.hamcrest.Matchers.hasSize; import static org.hamcrest.Matchers.instanceOf; import static org.mockito.Matchers.any; +import static org.mockito.Mockito.doThrow; import static org.mockito.Mockito.mock; +import static org.mockito.Mockito.verify; import static org.mockito.Mockito.when; public class RemoteScrollableHitSourceTests extends ESTestCase { @@ -478,6 +480,25 @@ public void testUnexpectedJsonThinksRemoveIsNotES() throws IOException { e.getCause().getCause().getCause().getMessage()); } + public void testCleanupSuccessful() throws Exception { + AtomicBoolean cleanupCallbackCalled = new AtomicBoolean(); + RestClient client = mock(RestClient.class); + TestRemoteScrollableHitSource hitSource = new TestRemoteScrollableHitSource(client); + hitSource.cleanup(() -> cleanupCallbackCalled.set(true)); + verify(client).close(); + assertTrue(cleanupCallbackCalled.get()); + } + + public void testCleanupFailure() throws Exception { + AtomicBoolean cleanupCallbackCalled = new AtomicBoolean(); + RestClient client = mock(RestClient.class); + doThrow(new RuntimeException("test")).when(client).close(); + TestRemoteScrollableHitSource hitSource = new TestRemoteScrollableHitSource(client); + hitSource.cleanup(() -> cleanupCallbackCalled.set(true)); + verify(client).close(); + assertTrue(cleanupCallbackCalled.get()); + } + private RemoteScrollableHitSource sourceWithMockedRemoteCall(String... paths) throws Exception { return sourceWithMockedRemoteCall(true, ContentType.APPLICATION_JSON, paths); } diff --git a/modules/reindex/src/test/resources/rest-api-spec/test/delete_by_query/20_validation.yaml b/modules/reindex/src/test/resources/rest-api-spec/test/delete_by_query/20_validation.yaml index 4aa63facc240d..7527db948422c 100644 --- a/modules/reindex/src/test/resources/rest-api-spec/test/delete_by_query/20_validation.yaml +++ b/modules/reindex/src/test/resources/rest-api-spec/test/delete_by_query/20_validation.yaml @@ -5,6 +5,19 @@ delete_by_query: index: _all +--- +"no query fails": + + - skip: + version: " - 5.99.99" + reason: explicit query is required since 6.0.0 + + - do: + catch: /query is missing/ + delete_by_query: + index: _all + body: {} + --- "invalid conflicts fails": - do: diff --git a/modules/transport-netty4/src/main/java/org/elasticsearch/transport/netty4/Netty4Transport.java b/modules/transport-netty4/src/main/java/org/elasticsearch/transport/netty4/Netty4Transport.java index 439827fdb71eb..4b515aab86958 100644 --- a/modules/transport-netty4/src/main/java/org/elasticsearch/transport/netty4/Netty4Transport.java +++ b/modules/transport-netty4/src/main/java/org/elasticsearch/transport/netty4/Netty4Transport.java @@ -39,6 +39,7 @@ import org.apache.logging.log4j.util.Supplier; import org.elasticsearch.ElasticsearchException; import org.elasticsearch.ExceptionsHelper; +import org.elasticsearch.action.ActionListener; import org.elasticsearch.cluster.node.DiscoveryNode; import org.elasticsearch.common.SuppressForbidden; import org.elasticsearch.common.bytes.BytesReference; @@ -73,8 +74,10 @@ import java.util.Iterator; import java.util.List; import java.util.Map; +import java.util.concurrent.ExecutionException; import java.util.concurrent.ThreadFactory; import java.util.concurrent.TimeUnit; +import java.util.function.Consumer; import static org.elasticsearch.common.settings.Setting.byteSizeSetting; import static org.elasticsearch.common.settings.Setting.intSetting; @@ -393,9 +396,20 @@ public void operationComplete(final ChannelFuture future) throws Exception { } @Override - protected void sendMessage(Channel channel, BytesReference reference, Runnable sendListener) { + protected void sendMessage(Channel channel, BytesReference reference, ActionListener listener) { final ChannelFuture future = channel.writeAndFlush(Netty4Utils.toByteBuf(reference)); - future.addListener(f -> sendListener.run()); + future.addListener(f -> { + if (f.isSuccess()) { + listener.onResponse(channel); + } else { + Throwable cause = f.cause(); + // If the Throwable is an Error something has gone very wrong and Netty4MessageChannelHandler is + // going to cause that to bubble up and kill the process. + if (cause instanceof Exception) { + listener.onFailure((Exception) cause); + } + } + }); } @Override diff --git a/plugins/analysis-icu/licenses/lucene-analyzers-icu-6.5.0-snapshot-d00c5ca.jar.sha1 b/plugins/analysis-icu/licenses/lucene-analyzers-icu-6.5.0-snapshot-d00c5ca.jar.sha1 deleted file mode 100644 index 12314f6881950..0000000000000 --- a/plugins/analysis-icu/licenses/lucene-analyzers-icu-6.5.0-snapshot-d00c5ca.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -165f826617aa6cb7af67b2c3f87df3b46216a155 \ No newline at end of file diff --git a/plugins/analysis-icu/licenses/lucene-analyzers-icu-6.5.0.jar.sha1 b/plugins/analysis-icu/licenses/lucene-analyzers-icu-6.5.0.jar.sha1 new file mode 100644 index 0000000000000..95df77a75218a --- /dev/null +++ b/plugins/analysis-icu/licenses/lucene-analyzers-icu-6.5.0.jar.sha1 @@ -0,0 +1 @@ +3a71465f63887f871bc377d87a0838c29b0a857d \ No newline at end of file diff --git a/plugins/analysis-kuromoji/licenses/lucene-analyzers-kuromoji-6.5.0-snapshot-d00c5ca.jar.sha1 b/plugins/analysis-kuromoji/licenses/lucene-analyzers-kuromoji-6.5.0-snapshot-d00c5ca.jar.sha1 deleted file mode 100644 index 8e844e3ad9b58..0000000000000 --- a/plugins/analysis-kuromoji/licenses/lucene-analyzers-kuromoji-6.5.0-snapshot-d00c5ca.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -50ed8c505a120bfcd1d5a7d3fae837027153f0dd \ No newline at end of file diff --git a/plugins/analysis-kuromoji/licenses/lucene-analyzers-kuromoji-6.5.0.jar.sha1 b/plugins/analysis-kuromoji/licenses/lucene-analyzers-kuromoji-6.5.0.jar.sha1 new file mode 100644 index 0000000000000..0c928699fc67d --- /dev/null +++ b/plugins/analysis-kuromoji/licenses/lucene-analyzers-kuromoji-6.5.0.jar.sha1 @@ -0,0 +1 @@ +03353b0d030f6d5a63c4c0d5b64c770f5ba9d829 \ No newline at end of file diff --git a/plugins/analysis-phonetic/licenses/lucene-analyzers-phonetic-6.5.0-snapshot-d00c5ca.jar.sha1 b/plugins/analysis-phonetic/licenses/lucene-analyzers-phonetic-6.5.0-snapshot-d00c5ca.jar.sha1 deleted file mode 100644 index b1573a888d7af..0000000000000 --- a/plugins/analysis-phonetic/licenses/lucene-analyzers-phonetic-6.5.0-snapshot-d00c5ca.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -f4c04ecad541aa9526c4e2bd4e98aa08898ffa1c \ No newline at end of file diff --git a/plugins/analysis-phonetic/licenses/lucene-analyzers-phonetic-6.5.0.jar.sha1 b/plugins/analysis-phonetic/licenses/lucene-analyzers-phonetic-6.5.0.jar.sha1 new file mode 100644 index 0000000000000..ba2bee28476bd --- /dev/null +++ b/plugins/analysis-phonetic/licenses/lucene-analyzers-phonetic-6.5.0.jar.sha1 @@ -0,0 +1 @@ +77ce4fb8c62688d8a094f08a07685c464ec46345 \ No newline at end of file diff --git a/plugins/analysis-smartcn/licenses/lucene-analyzers-smartcn-6.5.0-snapshot-d00c5ca.jar.sha1 b/plugins/analysis-smartcn/licenses/lucene-analyzers-smartcn-6.5.0-snapshot-d00c5ca.jar.sha1 deleted file mode 100644 index 9f1ab3b052c11..0000000000000 --- a/plugins/analysis-smartcn/licenses/lucene-analyzers-smartcn-6.5.0-snapshot-d00c5ca.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -bc5ca65f0db1ec9f71481c6ad4e146bbf56df32e \ No newline at end of file diff --git a/plugins/analysis-smartcn/licenses/lucene-analyzers-smartcn-6.5.0.jar.sha1 b/plugins/analysis-smartcn/licenses/lucene-analyzers-smartcn-6.5.0.jar.sha1 new file mode 100644 index 0000000000000..0a0ae4cf4011b --- /dev/null +++ b/plugins/analysis-smartcn/licenses/lucene-analyzers-smartcn-6.5.0.jar.sha1 @@ -0,0 +1 @@ +60a780d900e48b0cead42d82fe405ad54bd658c3 \ No newline at end of file diff --git a/plugins/analysis-stempel/licenses/lucene-analyzers-stempel-6.5.0-snapshot-d00c5ca.jar.sha1 b/plugins/analysis-stempel/licenses/lucene-analyzers-stempel-6.5.0-snapshot-d00c5ca.jar.sha1 deleted file mode 100644 index ac599127442c6..0000000000000 --- a/plugins/analysis-stempel/licenses/lucene-analyzers-stempel-6.5.0-snapshot-d00c5ca.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -dae2a3e6b79197d4e48ee1ae8d0ef31b8b20069e \ No newline at end of file diff --git a/plugins/analysis-stempel/licenses/lucene-analyzers-stempel-6.5.0.jar.sha1 b/plugins/analysis-stempel/licenses/lucene-analyzers-stempel-6.5.0.jar.sha1 new file mode 100644 index 0000000000000..9a1387fa22fdd --- /dev/null +++ b/plugins/analysis-stempel/licenses/lucene-analyzers-stempel-6.5.0.jar.sha1 @@ -0,0 +1 @@ +894c42c011d291e72d14db660499c75281de9efd \ No newline at end of file diff --git a/plugins/analysis-ukrainian/licenses/lucene-analyzers-morfologik-6.5.0-snapshot-d00c5ca.jar.sha1 b/plugins/analysis-ukrainian/licenses/lucene-analyzers-morfologik-6.5.0-snapshot-d00c5ca.jar.sha1 deleted file mode 100644 index fc2ac0b8a2f40..0000000000000 --- a/plugins/analysis-ukrainian/licenses/lucene-analyzers-morfologik-6.5.0-snapshot-d00c5ca.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -318fcd0d1d33d45088ac3f4ab8291a4a22060078 \ No newline at end of file diff --git a/plugins/analysis-ukrainian/licenses/lucene-analyzers-morfologik-6.5.0.jar.sha1 b/plugins/analysis-ukrainian/licenses/lucene-analyzers-morfologik-6.5.0.jar.sha1 new file mode 100644 index 0000000000000..89a0283d52e7f --- /dev/null +++ b/plugins/analysis-ukrainian/licenses/lucene-analyzers-morfologik-6.5.0.jar.sha1 @@ -0,0 +1 @@ +72f0172cf947ab563a7c8166855cf7cbdfe33136 \ No newline at end of file diff --git a/plugins/discovery-gce/src/main/java/org/elasticsearch/cloud/gce/util/Access.java b/plugins/discovery-gce/src/main/java/org/elasticsearch/cloud/gce/util/Access.java index ae96dee64c436..40136f8fc0138 100644 --- a/plugins/discovery-gce/src/main/java/org/elasticsearch/cloud/gce/util/Access.java +++ b/plugins/discovery-gce/src/main/java/org/elasticsearch/cloud/gce/util/Access.java @@ -22,27 +22,29 @@ import org.elasticsearch.SpecialPermission; import java.io.IOException; +import java.net.SocketPermission; import java.security.AccessController; import java.security.PrivilegedAction; import java.security.PrivilegedActionException; import java.security.PrivilegedExceptionAction; /** - * GCE's http client changes access levels. Specifically it needs {@link RuntimePermission} accessDeclaredMembers and - * setFactory and {@link java.lang.reflect.ReflectPermission} suppressAccessChecks. For remote calls the plugin needs - * SocketPermissions for 'connect'. This class wraps the operations requiring access in + * GCE's HTTP client changes access levels. Specifically it needs {@link RuntimePermission} {@code + * accessDeclaredMembers} and {@code setFactory}, and {@link java.lang.reflect.ReflectPermission} + * {@code suppressAccessChecks}. For remote calls, the plugin needs {@link SocketPermission} for + * {@code connect}. This class wraps the operations requiring access in * {@link AccessController#doPrivileged(PrivilegedAction)} blocks. */ public final class Access { private Access() {} - public static T doPrivileged(PrivilegedAction operation) { + public static T doPrivileged(final PrivilegedAction operation) { SpecialPermission.check(); return AccessController.doPrivileged(operation); } - public static void doPrivilegedVoid(Runnable action) { + public static void doPrivilegedVoid(final Runnable action) { SpecialPermission.check(); AccessController.doPrivileged((PrivilegedAction) () -> { action.run(); @@ -50,12 +52,14 @@ public static void doPrivilegedVoid(Runnable action) { }); } - public static T doPrivilegedIOException(PrivilegedExceptionAction operation) throws IOException { + public static T doPrivilegedIOException(final PrivilegedExceptionAction operation) + throws IOException { SpecialPermission.check(); try { return AccessController.doPrivileged(operation); - } catch (PrivilegedActionException e) { + } catch (final PrivilegedActionException e) { throw (IOException) e.getCause(); } } + } diff --git a/plugins/ingest-attachment/src/main/java/org/elasticsearch/ingest/attachment/TikaImpl.java b/plugins/ingest-attachment/src/main/java/org/elasticsearch/ingest/attachment/TikaImpl.java index c7ffe4f287f0c..3c0f3b0433c0c 100644 --- a/plugins/ingest-attachment/src/main/java/org/elasticsearch/ingest/attachment/TikaImpl.java +++ b/plugins/ingest-attachment/src/main/java/org/elasticsearch/ingest/attachment/TikaImpl.java @@ -47,7 +47,9 @@ import java.security.PrivilegedExceptionAction; import java.security.ProtectionDomain; import java.security.SecurityPermission; +import java.util.Arrays; import java.util.Collections; +import java.util.LinkedHashSet; import java.util.PropertyPermission; import java.util.Set; @@ -128,7 +130,12 @@ static PermissionCollection getRestrictedPermissions() { addReadPermissions(perms, JarHell.parseClassPath()); // plugin jars if (TikaImpl.class.getClassLoader() instanceof URLClassLoader) { - addReadPermissions(perms, ((URLClassLoader)TikaImpl.class.getClassLoader()).getURLs()); + URL[] urls = ((URLClassLoader)TikaImpl.class.getClassLoader()).getURLs(); + Set set = new LinkedHashSet<>(Arrays.asList(urls)); + if (set.size() != urls.length) { + throw new AssertionError("duplicate jars: " + Arrays.toString(urls)); + } + addReadPermissions(perms, set); } // jvm's java.io.tmpdir (needs read/write) perms.add(new FilePermission(System.getProperty("java.io.tmpdir") + System.getProperty("file.separator") + "-", @@ -145,7 +152,7 @@ static PermissionCollection getRestrictedPermissions() { // add resources to (what is typically) a jar, but might not be (e.g. in tests/IDE) @SuppressForbidden(reason = "adds access to jar resources") - static void addReadPermissions(Permissions perms, URL resources[]) { + static void addReadPermissions(Permissions perms, Set resources) { try { for (URL url : resources) { Path path = PathUtils.get(url.toURI()); diff --git a/plugins/mapper-murmur3/src/test/java/org/elasticsearch/index/mapper/murmur3/Murmur3FieldMapperTests.java b/plugins/mapper-murmur3/src/test/java/org/elasticsearch/index/mapper/murmur3/Murmur3FieldMapperTests.java index 325ac72671391..e16a8f05203dd 100644 --- a/plugins/mapper-murmur3/src/test/java/org/elasticsearch/index/mapper/murmur3/Murmur3FieldMapperTests.java +++ b/plugins/mapper-murmur3/src/test/java/org/elasticsearch/index/mapper/murmur3/Murmur3FieldMapperTests.java @@ -27,11 +27,13 @@ import org.elasticsearch.common.compress.CompressedXContent; import org.elasticsearch.common.settings.Settings; import org.elasticsearch.common.xcontent.XContentFactory; +import org.elasticsearch.common.xcontent.XContentType; import org.elasticsearch.index.IndexService; import org.elasticsearch.index.mapper.DocumentMapper; import org.elasticsearch.index.mapper.DocumentMapperParser; import org.elasticsearch.index.mapper.MapperParsingException; import org.elasticsearch.index.mapper.ParsedDocument; +import org.elasticsearch.index.mapper.SourceToParse; import org.elasticsearch.index.query.QueryShardContext; import org.elasticsearch.indices.mapper.MapperRegistry; import org.elasticsearch.plugins.Plugin; @@ -78,7 +80,11 @@ public void testDefaults() throws Exception { .field("type", "murmur3") .endObject().endObject().endObject().endObject().string(); DocumentMapper mapper = parser.parse("type", new CompressedXContent(mapping)); - ParsedDocument parsedDoc = mapper.parse("test", "type", "1", XContentFactory.jsonBuilder().startObject().field("field", "value").endObject().bytes()); + ParsedDocument parsedDoc = mapper.parse(SourceToParse.source("test", "type", "1", XContentFactory.jsonBuilder() + .startObject() + .field("field", "value") + .endObject().bytes(), + XContentType.JSON)); IndexableField[] fields = parsedDoc.rootDoc().getFields("field"); assertNotNull(fields); assertEquals(Arrays.toString(fields), 1, fields.length); diff --git a/plugins/repository-azure/build.gradle b/plugins/repository-azure/build.gradle index a2c3df17d80a1..14a0c37a20028 100644 --- a/plugins/repository-azure/build.gradle +++ b/plugins/repository-azure/build.gradle @@ -23,8 +23,9 @@ esplugin { } dependencies { - compile 'com.microsoft.azure:azure-storage:4.0.0' - compile 'org.apache.commons:commons-lang3:3.3.2' + compile 'com.microsoft.azure:azure-storage:5.0.0' + compile 'com.microsoft.azure:azure-keyvault-core:0.8.0' + compile 'org.apache.commons:commons-lang3:3.4' } dependencyLicenses { diff --git a/plugins/repository-azure/licenses/azure-keyvault-core-0.8.0.jar.sha1 b/plugins/repository-azure/licenses/azure-keyvault-core-0.8.0.jar.sha1 new file mode 100644 index 0000000000000..b86c58db8423a --- /dev/null +++ b/plugins/repository-azure/licenses/azure-keyvault-core-0.8.0.jar.sha1 @@ -0,0 +1 @@ +35f7ac687462f491d0f8b0d96733dfe347493d70 \ No newline at end of file diff --git a/plugins/repository-azure/licenses/azure-storage-4.0.0.jar.sha1 b/plugins/repository-azure/licenses/azure-storage-4.0.0.jar.sha1 deleted file mode 100644 index 9ef89531d6d81..0000000000000 --- a/plugins/repository-azure/licenses/azure-storage-4.0.0.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -b31504f0fb3f9c4458ad053b426357a9b0df6e08 \ No newline at end of file diff --git a/plugins/repository-azure/licenses/azure-storage-5.0.0.jar.sha1 b/plugins/repository-azure/licenses/azure-storage-5.0.0.jar.sha1 new file mode 100644 index 0000000000000..9882cb80204ff --- /dev/null +++ b/plugins/repository-azure/licenses/azure-storage-5.0.0.jar.sha1 @@ -0,0 +1 @@ +ba8f04bfeac08016c0f88423a202d0f3aac03aed \ No newline at end of file diff --git a/plugins/repository-azure/licenses/commons-lang3-3.3.2.jar.sha1 b/plugins/repository-azure/licenses/commons-lang3-3.3.2.jar.sha1 deleted file mode 100644 index bdd913cf2358d..0000000000000 --- a/plugins/repository-azure/licenses/commons-lang3-3.3.2.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -90a3822c38ec8c996e84c16a3477ef632cbc87a3 diff --git a/plugins/repository-azure/licenses/commons-lang3-3.4.jar.sha1 b/plugins/repository-azure/licenses/commons-lang3-3.4.jar.sha1 new file mode 100644 index 0000000000000..fdd7040377b8f --- /dev/null +++ b/plugins/repository-azure/licenses/commons-lang3-3.4.jar.sha1 @@ -0,0 +1 @@ +5fe28b9518e58819180a43a850fbc0dd24b7c050 \ No newline at end of file diff --git a/plugins/repository-azure/src/main/java/org/elasticsearch/cloud/azure/blobstore/AzureBlobStore.java b/plugins/repository-azure/src/main/java/org/elasticsearch/cloud/azure/blobstore/AzureBlobStore.java index 73b0f07835ce7..0f32180860c37 100644 --- a/plugins/repository-azure/src/main/java/org/elasticsearch/cloud/azure/blobstore/AzureBlobStore.java +++ b/plugins/repository-azure/src/main/java/org/elasticsearch/cloud/azure/blobstore/AzureBlobStore.java @@ -39,7 +39,6 @@ import java.util.Locale; import java.util.Map; -import static org.elasticsearch.cloud.azure.storage.AzureStorageSettings.getValue; import static org.elasticsearch.repositories.azure.AzureRepository.Repository; public class AzureBlobStore extends AbstractComponent implements BlobStore { @@ -55,11 +54,11 @@ public AzureBlobStore(RepositoryMetaData metadata, Settings settings, AzureStorageService client) throws URISyntaxException, StorageException { super(settings); this.client = client; - this.container = getValue(metadata.settings(), settings, Repository.CONTAINER_SETTING, Storage.CONTAINER_SETTING); + this.container = Repository.CONTAINER_SETTING.get(metadata.settings()); this.repositoryName = metadata.name(); - this.accountName = getValue(metadata.settings(), settings, Repository.ACCOUNT_SETTING, Storage.ACCOUNT_SETTING); + this.accountName = Repository.ACCOUNT_SETTING.get(metadata.settings()); - String modeStr = getValue(metadata.settings(), settings, Repository.LOCATION_MODE_SETTING, Storage.LOCATION_MODE_SETTING); + String modeStr = Repository.LOCATION_MODE_SETTING.get(metadata.settings()); if (Strings.hasLength(modeStr)) { this.locMode = LocationMode.valueOf(modeStr.toUpperCase(Locale.ROOT)); } else { diff --git a/plugins/repository-azure/src/main/java/org/elasticsearch/cloud/azure/storage/AzureStorageService.java b/plugins/repository-azure/src/main/java/org/elasticsearch/cloud/azure/storage/AzureStorageService.java index 6343541aed323..5b6575a4d1471 100644 --- a/plugins/repository-azure/src/main/java/org/elasticsearch/cloud/azure/storage/AzureStorageService.java +++ b/plugins/repository-azure/src/main/java/org/elasticsearch/cloud/azure/storage/AzureStorageService.java @@ -21,7 +21,6 @@ import com.microsoft.azure.storage.LocationMode; import com.microsoft.azure.storage.StorageException; - import org.elasticsearch.common.blobstore.BlobMetaData; import org.elasticsearch.common.settings.Setting; import org.elasticsearch.common.settings.Setting.Property; @@ -35,7 +34,6 @@ import java.io.OutputStream; import java.net.URISyntaxException; import java.util.Map; -import java.util.function.Function; /** * Azure Storage Service interface @@ -53,18 +51,6 @@ final class Storage { public static final Setting TIMEOUT_SETTING = Setting.timeSetting("cloud.azure.storage.timeout", TimeValue.timeValueMinutes(-1), Property.NodeScope); - public static final Setting ACCOUNT_SETTING = - Setting.simpleString("repositories.azure.account", Property.NodeScope, Property.Filtered); - public static final Setting CONTAINER_SETTING = - new Setting<>("repositories.azure.container", "elasticsearch-snapshots", Function.identity(), Property.NodeScope); - public static final Setting BASE_PATH_SETTING = - Setting.simpleString("repositories.azure.base_path", Property.NodeScope); - public static final Setting LOCATION_MODE_SETTING = - Setting.simpleString("repositories.azure.location_mode", Property.NodeScope); - public static final Setting CHUNK_SIZE_SETTING = - Setting.byteSizeSetting("repositories.azure.chunk_size", MAX_CHUNK_SIZE, MIN_CHUNK_SIZE, MAX_CHUNK_SIZE, Property.NodeScope); - public static final Setting COMPRESS_SETTING = - Setting.boolSetting("repositories.azure.compress", false, Property.NodeScope); } boolean doesContainerExist(String account, LocationMode mode, String container); diff --git a/plugins/repository-azure/src/main/java/org/elasticsearch/cloud/azure/storage/AzureStorageServiceImpl.java b/plugins/repository-azure/src/main/java/org/elasticsearch/cloud/azure/storage/AzureStorageServiceImpl.java index e1eea1f57f4f2..594715b845c7c 100644 --- a/plugins/repository-azure/src/main/java/org/elasticsearch/cloud/azure/storage/AzureStorageServiceImpl.java +++ b/plugins/repository-azure/src/main/java/org/elasticsearch/cloud/azure/storage/AzureStorageServiceImpl.java @@ -21,6 +21,8 @@ import com.microsoft.azure.storage.CloudStorageAccount; import com.microsoft.azure.storage.LocationMode; +import com.microsoft.azure.storage.RetryExponentialRetry; +import com.microsoft.azure.storage.RetryPolicy; import com.microsoft.azure.storage.StorageException; import com.microsoft.azure.storage.blob.BlobProperties; import com.microsoft.azure.storage.blob.CloudBlobClient; @@ -147,6 +149,11 @@ CloudBlobClient getSelectedClient(String account, LocationMode mode) { "]. It can not be longer than 2,147,483,647ms."); } } + + // We define a default exponential retry policy + client.getDefaultRequestOptions().setRetryPolicyFactory( + new RetryExponentialRetry(RetryPolicy.DEFAULT_CLIENT_BACKOFF, azureStorageSettings.getMaxRetries())); + return client; } diff --git a/plugins/repository-azure/src/main/java/org/elasticsearch/cloud/azure/storage/AzureStorageSettings.java b/plugins/repository-azure/src/main/java/org/elasticsearch/cloud/azure/storage/AzureStorageSettings.java index 5e0de46f65a64..600d5fe97f879 100644 --- a/plugins/repository-azure/src/main/java/org/elasticsearch/cloud/azure/storage/AzureStorageSettings.java +++ b/plugins/repository-azure/src/main/java/org/elasticsearch/cloud/azure/storage/AzureStorageSettings.java @@ -19,20 +19,19 @@ package org.elasticsearch.cloud.azure.storage; +import com.microsoft.azure.storage.RetryPolicy; import org.elasticsearch.cloud.azure.storage.AzureStorageService.Storage; import org.elasticsearch.common.collect.Tuple; import org.elasticsearch.common.settings.Setting; import org.elasticsearch.common.settings.Settings; import org.elasticsearch.common.settings.SettingsException; import org.elasticsearch.common.unit.TimeValue; -import org.elasticsearch.node.Node; import java.util.ArrayList; import java.util.Collections; import java.util.HashMap; import java.util.List; import java.util.Map; -import java.util.function.Function; public final class AzureStorageSettings { private static final Setting TIMEOUT_SETTING = Setting.affixKeySetting(Storage.PREFIX, "timeout", @@ -43,20 +42,27 @@ public final class AzureStorageSettings { Setting.affixKeySetting(Storage.PREFIX, "key", (key) -> Setting.simpleString(key, Setting.Property.NodeScope)); private static final Setting DEFAULT_SETTING = Setting.affixKeySetting(Storage.PREFIX, "default", (key) -> Setting.boolSetting(key, false, Setting.Property.NodeScope)); - + /** + * max_retries: Number of retries in case of Azure errors. Defaults to 3 (RetryPolicy.DEFAULT_CLIENT_RETRY_COUNT). + */ + private static final Setting MAX_RETRIES_SETTING = + Setting.affixKeySetting(Storage.PREFIX, "max_retries", + (key) -> Setting.intSetting(key, RetryPolicy.DEFAULT_CLIENT_RETRY_COUNT, Setting.Property.NodeScope)); private final String name; private final String account; private final String key; private final TimeValue timeout; private final boolean activeByDefault; + private final int maxRetries; - public AzureStorageSettings(String name, String account, String key, TimeValue timeout, boolean activeByDefault) { + public AzureStorageSettings(String name, String account, String key, TimeValue timeout, boolean activeByDefault, int maxRetries) { this.name = name; this.account = account; this.key = key; this.timeout = timeout; this.activeByDefault = activeByDefault; + this.maxRetries = maxRetries; } public String getName() { @@ -79,6 +85,10 @@ public boolean isActiveByDefault() { return activeByDefault; } + public int getMaxRetries() { + return maxRetries; + } + @Override public String toString() { final StringBuilder sb = new StringBuilder("AzureStorageSettings{"); @@ -87,6 +97,7 @@ public String toString() { sb.append(", key='").append(key).append('\''); sb.append(", activeByDefault='").append(activeByDefault).append('\''); sb.append(", timeout=").append(timeout); + sb.append(", maxRetries=").append(maxRetries); sb.append('}'); return sb.toString(); } @@ -112,7 +123,8 @@ private static List createStorageSettings(Settings setting getValue(settings, groupName, ACCOUNT_SETTING), getValue(settings, groupName, KEY_SETTING), getValue(settings, groupName, TIMEOUT_SETTING), - getValue(settings, groupName, DEFAULT_SETTING)) + getValue(settings, groupName, DEFAULT_SETTING), + getValue(settings, groupName, MAX_RETRIES_SETTING)) ); } return storageSettings; @@ -130,7 +142,8 @@ private static AzureStorageSettings getPrimary(List settin } else if (settings.size() == 1) { // the only storage settings belong (implicitly) to the default primary storage AzureStorageSettings storage = settings.get(0); - return new AzureStorageSettings(storage.getName(), storage.getAccount(), storage.getKey(), storage.getTimeout(), true); + return new AzureStorageSettings(storage.getName(), storage.getAccount(), storage.getKey(), storage.getTimeout(), true, + storage.getMaxRetries()); } else { AzureStorageSettings primary = null; for (AzureStorageSettings setting : settings) { @@ -161,25 +174,4 @@ private static Map getSecondaries(List T getValue(Settings repositorySettings, - Settings globalSettings, - Setting repositorySetting, - Setting repositoriesSetting) { - if (repositorySetting.exists(repositorySettings)) { - return repositorySetting.get(repositorySettings); - } else { - return repositoriesSetting.get(globalSettings); - } - } - - public static Setting getEffectiveSetting(Settings repositorySettings, - Setting repositorySetting, - Setting repositoriesSetting) { - if (repositorySetting.exists(repositorySettings)) { - return repositorySetting; - } else { - return repositoriesSetting; - } - } } diff --git a/plugins/repository-azure/src/main/java/org/elasticsearch/plugin/repository/azure/AzureRepositoryPlugin.java b/plugins/repository-azure/src/main/java/org/elasticsearch/plugin/repository/azure/AzureRepositoryPlugin.java index f895383307469..bfed1fc254d54 100644 --- a/plugins/repository-azure/src/main/java/org/elasticsearch/plugin/repository/azure/AzureRepositoryPlugin.java +++ b/plugins/repository-azure/src/main/java/org/elasticsearch/plugin/repository/azure/AzureRepositoryPlugin.java @@ -53,13 +53,7 @@ public Map getRepositories(Environment env, NamedXCo @Override public List> getSettings() { - return Arrays.asList(AzureStorageService.Storage.STORAGE_ACCOUNTS, - AzureStorageService.Storage.ACCOUNT_SETTING, - AzureStorageService.Storage.COMPRESS_SETTING, - AzureStorageService.Storage.CONTAINER_SETTING, - AzureStorageService.Storage.BASE_PATH_SETTING, - AzureStorageService.Storage.CHUNK_SIZE_SETTING, - AzureStorageService.Storage.LOCATION_MODE_SETTING); + return Collections.singletonList(AzureStorageService.Storage.STORAGE_ACCOUNTS); } @Override diff --git a/plugins/repository-azure/src/main/java/org/elasticsearch/repositories/azure/AzureRepository.java b/plugins/repository-azure/src/main/java/org/elasticsearch/repositories/azure/AzureRepository.java index 50d087e713d78..2b99e6a6f8e68 100644 --- a/plugins/repository-azure/src/main/java/org/elasticsearch/repositories/azure/AzureRepository.java +++ b/plugins/repository-azure/src/main/java/org/elasticsearch/repositories/azure/AzureRepository.java @@ -23,7 +23,6 @@ import com.microsoft.azure.storage.StorageException; import org.elasticsearch.cloud.azure.blobstore.AzureBlobStore; import org.elasticsearch.cloud.azure.storage.AzureStorageService; -import org.elasticsearch.cloud.azure.storage.AzureStorageService.Storage; import org.elasticsearch.cluster.metadata.MetaData; import org.elasticsearch.cluster.metadata.RepositoryMetaData; import org.elasticsearch.common.Strings; @@ -46,7 +45,6 @@ import static org.elasticsearch.cloud.azure.storage.AzureStorageService.MAX_CHUNK_SIZE; import static org.elasticsearch.cloud.azure.storage.AzureStorageService.MIN_CHUNK_SIZE; -import static org.elasticsearch.cloud.azure.storage.AzureStorageSettings.getValue; /** * Azure file system implementation of the BlobStoreRepository @@ -86,10 +84,10 @@ public AzureRepository(RepositoryMetaData metadata, Environment environment, super(metadata, environment.settings(), namedXContentRegistry); blobStore = new AzureBlobStore(metadata, environment.settings(), storageService); - String container = getValue(metadata.settings(), settings, Repository.CONTAINER_SETTING, Storage.CONTAINER_SETTING); - this.chunkSize = getValue(metadata.settings(), settings, Repository.CHUNK_SIZE_SETTING, Storage.CHUNK_SIZE_SETTING); - this.compress = getValue(metadata.settings(), settings, Repository.COMPRESS_SETTING, Storage.COMPRESS_SETTING); - String modeStr = getValue(metadata.settings(), settings, Repository.LOCATION_MODE_SETTING, Storage.LOCATION_MODE_SETTING); + String container = Repository.CONTAINER_SETTING.get(metadata.settings()); + this.chunkSize = Repository.CHUNK_SIZE_SETTING.get(metadata.settings()); + this.compress = Repository.COMPRESS_SETTING.get(metadata.settings()); + String modeStr = Repository.LOCATION_MODE_SETTING.get(metadata.settings()); Boolean forcedReadonly = metadata.settings().getAsBoolean("readonly", null); // If the user explicitly did not define a readonly value, we set it by ourselves depending on the location mode setting. // For secondary_only setting, the repository should be read only @@ -104,7 +102,7 @@ public AzureRepository(RepositoryMetaData metadata, Environment environment, readonly = forcedReadonly; } - String basePath = getValue(metadata.settings(), settings, Repository.BASE_PATH_SETTING, Storage.BASE_PATH_SETTING); + String basePath = Repository.BASE_PATH_SETTING.get(metadata.settings()); if (Strings.hasLength(basePath)) { // Remove starting / if any diff --git a/plugins/repository-azure/src/test/java/org/elasticsearch/cloud/azure/storage/AzureStorageServiceTests.java b/plugins/repository-azure/src/test/java/org/elasticsearch/cloud/azure/storage/AzureStorageServiceTests.java index ba377c03c47f0..0452380ceb98f 100644 --- a/plugins/repository-azure/src/test/java/org/elasticsearch/cloud/azure/storage/AzureStorageServiceTests.java +++ b/plugins/repository-azure/src/test/java/org/elasticsearch/cloud/azure/storage/AzureStorageServiceTests.java @@ -20,6 +20,7 @@ package org.elasticsearch.cloud.azure.storage; import com.microsoft.azure.storage.LocationMode; +import com.microsoft.azure.storage.RetryExponentialRetry; import com.microsoft.azure.storage.blob.CloudBlobClient; import org.elasticsearch.common.settings.Settings; import org.elasticsearch.test.ESTestCase; @@ -28,7 +29,9 @@ import java.net.URISyntaxException; import static org.elasticsearch.cloud.azure.storage.AzureStorageServiceImpl.blobNameFromUri; +import static org.hamcrest.Matchers.instanceOf; import static org.hamcrest.Matchers.is; +import static org.hamcrest.Matchers.notNullValue; import static org.hamcrest.Matchers.nullValue; public class AzureStorageServiceTests extends ESTestCase { @@ -143,6 +146,31 @@ public void testGetSelectedClientNoTimeout() { assertThat(client1.getDefaultRequestOptions().getTimeoutIntervalInMs(), is(nullValue())); } + public void testGetSelectedClientBackoffPolicy() { + Settings timeoutSettings = Settings.builder() + .put("cloud.azure.storage.azure.account", "myaccount") + .put("cloud.azure.storage.azure.key", "mykey") + .build(); + + AzureStorageServiceImpl azureStorageService = new AzureStorageServiceMock(timeoutSettings); + CloudBlobClient client1 = azureStorageService.getSelectedClient("azure", LocationMode.PRIMARY_ONLY); + assertThat(client1.getDefaultRequestOptions().getRetryPolicyFactory(), is(notNullValue())); + assertThat(client1.getDefaultRequestOptions().getRetryPolicyFactory(), instanceOf(RetryExponentialRetry.class)); + } + + public void testGetSelectedClientBackoffPolicyNbRetries() { + Settings timeoutSettings = Settings.builder() + .put("cloud.azure.storage.azure.account", "myaccount") + .put("cloud.azure.storage.azure.key", "mykey") + .put("cloud.azure.storage.azure.max_retries", 7) + .build(); + + AzureStorageServiceImpl azureStorageService = new AzureStorageServiceMock(timeoutSettings); + CloudBlobClient client1 = azureStorageService.getSelectedClient("azure", LocationMode.PRIMARY_ONLY); + assertThat(client1.getDefaultRequestOptions().getRetryPolicyFactory(), is(notNullValue())); + assertThat(client1.getDefaultRequestOptions().getRetryPolicyFactory(), instanceOf(RetryExponentialRetry.class)); + } + /** * This internal class just overload createClient method which is called by AzureStorageServiceImpl.doStart() */ diff --git a/qa/backwards-5.0/build.gradle b/qa/backwards-5.0/build.gradle index 90dd2f2d8bbf6..b68573ef3897d 100644 --- a/qa/backwards-5.0/build.gradle +++ b/qa/backwards-5.0/build.gradle @@ -20,19 +20,7 @@ apply plugin: 'elasticsearch.standalone-rest-test' apply plugin: 'elasticsearch.rest-test' -/* This project runs the core REST tests against a 2 node cluster where one of the nodes has a different minor. - * Since we don't have a version to test against we currently use the hardcoded snapshot for to basically run - * against ourselves. To test that using a different version go to distribution/zip and execute: - * gradle clean publishToMavenLocal -Dbuild.snapshot=false - * - * This installs the release-build into a local .m2 repository, then change this version here to: - * bwcVersion = "5.0.0" - * - * now you can run the bwc tests with: - * gradle check -Drepos.mavenlocal=true - * - * (-Drepos.mavenlocal=true will force gradle to look for the zip distribution in the local .m2 repository) - */ +/* This project runs the core REST tests against a 2 node cluster where one of the nodes has a different minor. */ integTest { includePackaged = true } @@ -40,12 +28,7 @@ integTest { integTestCluster { numNodes = 4 numBwcNodes = 2 - bwcVersion = "5.4.0-SNAPSHOT" + bwcVersion = project.bwcVersion setting 'logger.org.elasticsearch', 'DEBUG' } -repositories { - maven { - url "https://oss.sonatype.org/content/repositories/snapshots/" - } -} diff --git a/qa/evil-tests/src/test/java/org/elasticsearch/bootstrap/EvilJNANativesTests.java b/qa/evil-tests/src/test/java/org/elasticsearch/bootstrap/EvilJNANativesTests.java index 069f7e1aeffe6..eb679df9f6adb 100644 --- a/qa/evil-tests/src/test/java/org/elasticsearch/bootstrap/EvilJNANativesTests.java +++ b/qa/evil-tests/src/test/java/org/elasticsearch/bootstrap/EvilJNANativesTests.java @@ -33,14 +33,18 @@ public class EvilJNANativesTests extends ESTestCase { + @AwaitsFix(bugUrl = "https://github.com/elastic/elasticsearch/issues/23640") public void testSetMaximumNumberOfThreads() throws IOException { if (Constants.LINUX) { final List lines = Files.readAllLines(PathUtils.get("/proc/self/limits")); if (!lines.isEmpty()) { - for (String line : lines) { + for (final String line : lines) { if (line != null && line.startsWith("Max processes")) { final String[] fields = line.split("\\s+"); - final long limit = "unlimited".equals(fields[2]) ? JNACLibrary.RLIM_INFINITY : Long.parseLong(fields[2]); + final long limit = + "unlimited".equals(fields[2]) + ? JNACLibrary.RLIM_INFINITY + : Long.parseLong(fields[2]); assertThat(JNANatives.MAX_NUMBER_OF_THREADS, equalTo(limit)); return; } @@ -52,22 +56,27 @@ public void testSetMaximumNumberOfThreads() throws IOException { } } + @AwaitsFix(bugUrl = "https://github.com/elastic/elasticsearch/issues/23640") public void testSetMaxSizeVirtualMemory() throws IOException { if (Constants.LINUX) { final List lines = Files.readAllLines(PathUtils.get("/proc/self/limits")); if (!lines.isEmpty()) { - for (String line : lines) { + for (final String line : lines) { if (line != null && line.startsWith("Max address space")) { final String[] fields = line.split("\\s+"); final String limit = fields[3]; - assertEquals(JNANatives.rlimitToString(JNANatives.MAX_SIZE_VIRTUAL_MEMORY), limit); + assertThat( + JNANatives.rlimitToString(JNANatives.MAX_SIZE_VIRTUAL_MEMORY), + equalTo(limit)); return; } } } fail("should have read max size virtual memory from /proc/self/limits"); } else if (Constants.MAC_OS_X) { - assertThat(JNANatives.MAX_SIZE_VIRTUAL_MEMORY, anyOf(equalTo(Long.MIN_VALUE), greaterThanOrEqualTo(0L))); + assertThat( + JNANatives.MAX_SIZE_VIRTUAL_MEMORY, + anyOf(equalTo(Long.MIN_VALUE), greaterThanOrEqualTo(0L))); } else { assertThat(JNANatives.MAX_SIZE_VIRTUAL_MEMORY, equalTo(Long.MIN_VALUE)); } diff --git a/qa/evil-tests/src/test/java/org/elasticsearch/plugins/InstallPluginCommandTests.java b/qa/evil-tests/src/test/java/org/elasticsearch/plugins/InstallPluginCommandTests.java index 8d99ab8e89d42..732852ca1533b 100644 --- a/qa/evil-tests/src/test/java/org/elasticsearch/plugins/InstallPluginCommandTests.java +++ b/qa/evil-tests/src/test/java/org/elasticsearch/plugins/InstallPluginCommandTests.java @@ -595,7 +595,7 @@ public void testZipRelativeOutsideEntryName() throws Exception { stream.putNextEntry(new ZipEntry("elasticsearch/../blah")); } String pluginZip = zip.toUri().toURL().toString(); - IOException e = expectThrows(IOException.class, () -> installPlugin(pluginZip, env.v1())); + UserException e = expectThrows(UserException.class, () -> installPlugin(pluginZip, env.v1())); assertTrue(e.getMessage(), e.getMessage().contains("resolving outside of plugin directory")); } diff --git a/qa/evil-tests/src/test/java/org/elasticsearch/plugins/ListPluginsCommandTests.java b/qa/evil-tests/src/test/java/org/elasticsearch/plugins/ListPluginsCommandTests.java index 4412d19394f8a..7ebc2f0709bce 100644 --- a/qa/evil-tests/src/test/java/org/elasticsearch/plugins/ListPluginsCommandTests.java +++ b/qa/evil-tests/src/test/java/org/elasticsearch/plugins/ListPluginsCommandTests.java @@ -25,18 +25,16 @@ import java.nio.file.NoSuchFileException; import java.nio.file.Path; import java.util.Arrays; -import java.util.HashMap; -import java.util.Map; +import java.util.Locale; import java.util.stream.Collectors; import org.apache.lucene.util.LuceneTestCase; +import org.elasticsearch.Version; import org.elasticsearch.cli.ExitCodes; import org.elasticsearch.cli.MockTerminal; -import org.elasticsearch.common.inject.spi.HasDependencies; import org.elasticsearch.common.settings.Settings; import org.elasticsearch.env.Environment; import org.elasticsearch.test.ESTestCase; -import org.elasticsearch.Version; import org.junit.Before; @LuceneTestCase.SuppressFileSystems("*") @@ -75,25 +73,39 @@ protected boolean addShutdownHook() { return terminal; } - static String buildMultiline(String... args){ - return Arrays.asList(args).stream().collect(Collectors.joining("\n", "", "\n")); + private static String buildMultiline(String... args){ + return Arrays.stream(args).collect(Collectors.joining("\n", "", "\n")); + } + + private static void buildFakePlugin( + final Environment env, + final String description, + final String name, + final String classname) throws IOException { + buildFakePlugin(env, description, name, classname, false); } - static void buildFakePlugin(Environment env, String description, String name, String classname) throws IOException { - PluginTestUtil.writeProperties(env.pluginsFile().resolve(name), + private static void buildFakePlugin( + final Environment env, + final String description, + final String name, + final String classname, + final boolean hasNativeController) throws IOException { + PluginTestUtil.writeProperties( + env.pluginsFile().resolve(name), "description", description, "name", name, "version", "1.0", "elasticsearch.version", Version.CURRENT.toString(), "java.version", System.getProperty("java.specification.version"), - "classname", classname); + "classname", classname, + "has.native.controller", Boolean.toString(hasNativeController)); } - public void testPluginsDirMissing() throws Exception { Files.delete(env.pluginsFile()); IOException e = expectThrows(IOException.class, () -> listPlugins(home)); - assertEquals(e.getMessage(), "Plugins directory missing: " + env.pluginsFile()); + assertEquals("Plugins directory missing: " + env.pluginsFile(), e.getMessage()); } public void testNoPlugins() throws Exception { @@ -104,22 +116,48 @@ public void testNoPlugins() throws Exception { public void testOnePlugin() throws Exception { buildFakePlugin(env, "fake desc", "fake", "org.fake"); MockTerminal terminal = listPlugins(home); - assertEquals(terminal.getOutput(), buildMultiline("fake")); + assertEquals(buildMultiline("fake"), terminal.getOutput()); } public void testTwoPlugins() throws Exception { buildFakePlugin(env, "fake desc", "fake1", "org.fake"); buildFakePlugin(env, "fake desc 2", "fake2", "org.fake"); MockTerminal terminal = listPlugins(home); - assertEquals(terminal.getOutput(), buildMultiline("fake1", "fake2")); + assertEquals(buildMultiline("fake1", "fake2"), terminal.getOutput()); } public void testPluginWithVerbose() throws Exception { buildFakePlugin(env, "fake desc", "fake_plugin", "org.fake"); String[] params = { "-v" }; MockTerminal terminal = listPlugins(home, params); - assertEquals(terminal.getOutput(), buildMultiline("Plugins directory: " + env.pluginsFile(), "fake_plugin", - "- Plugin information:", "Name: fake_plugin", "Description: fake desc", "Version: 1.0", " * Classname: org.fake")); + assertEquals( + buildMultiline( + "Plugins directory: " + env.pluginsFile(), + "fake_plugin", + "- Plugin information:", + "Name: fake_plugin", + "Description: fake desc", + "Version: 1.0", + "Native Controller: false", + " * Classname: org.fake"), + terminal.getOutput()); + } + + public void testPluginWithNativeController() throws Exception { + buildFakePlugin(env, "fake desc 1", "fake_plugin1", "org.fake", true); + String[] params = { "-v" }; + MockTerminal terminal = listPlugins(home, params); + assertEquals( + buildMultiline( + "Plugins directory: " + env.pluginsFile(), + "fake_plugin1", + "- Plugin information:", + "Name: fake_plugin1", + "Description: fake desc 1", + "Version: 1.0", + "Native Controller: true", + " * Classname: org.fake"), + terminal.getOutput()); } public void testPluginWithVerboseMultiplePlugins() throws Exception { @@ -127,10 +165,24 @@ public void testPluginWithVerboseMultiplePlugins() throws Exception { buildFakePlugin(env, "fake desc 2", "fake_plugin2", "org.fake2"); String[] params = { "-v" }; MockTerminal terminal = listPlugins(home, params); - assertEquals(terminal.getOutput(), buildMultiline("Plugins directory: " + env.pluginsFile(), - "fake_plugin1", "- Plugin information:", "Name: fake_plugin1", "Description: fake desc 1", "Version: 1.0", - " * Classname: org.fake", "fake_plugin2", "- Plugin information:", "Name: fake_plugin2", - "Description: fake desc 2", "Version: 1.0", " * Classname: org.fake2")); + assertEquals( + buildMultiline( + "Plugins directory: " + env.pluginsFile(), + "fake_plugin1", + "- Plugin information:", + "Name: fake_plugin1", + "Description: fake desc 1", + "Version: 1.0", + "Native Controller: false", + " * Classname: org.fake", + "fake_plugin2", + "- Plugin information:", + "Name: fake_plugin2", + "Description: fake desc 2", + "Version: 1.0", + "Native Controller: false", + " * Classname: org.fake2"), + terminal.getOutput()); } public void testPluginWithoutVerboseMultiplePlugins() throws Exception { @@ -138,21 +190,51 @@ public void testPluginWithoutVerboseMultiplePlugins() throws Exception { buildFakePlugin(env, "fake desc 2", "fake_plugin2", "org.fake2"); MockTerminal terminal = listPlugins(home, new String[0]); String output = terminal.getOutput(); - assertEquals(output, buildMultiline("fake_plugin1", "fake_plugin2")); + assertEquals(buildMultiline("fake_plugin1", "fake_plugin2"), output); } public void testPluginWithoutDescriptorFile() throws Exception{ - Files.createDirectories(env.pluginsFile().resolve("fake1")); + final Path pluginDir = env.pluginsFile().resolve("fake1"); + Files.createDirectories(pluginDir); NoSuchFileException e = expectThrows(NoSuchFileException.class, () -> listPlugins(home)); - assertEquals(e.getFile(), env.pluginsFile().resolve("fake1").resolve(PluginInfo.ES_PLUGIN_PROPERTIES).toString()); + assertEquals(pluginDir.resolve(PluginInfo.ES_PLUGIN_PROPERTIES).toString(), e.getFile()); } public void testPluginWithWrongDescriptorFile() throws Exception{ - PluginTestUtil.writeProperties(env.pluginsFile().resolve("fake1"), - "description", "fake desc"); - IllegalArgumentException e = expectThrows(IllegalArgumentException.class, () -> listPlugins(home)); - assertEquals(e.getMessage(), "Property [name] is missing in [" + - env.pluginsFile().resolve("fake1").resolve(PluginInfo.ES_PLUGIN_PROPERTIES).toString() + "]"); + final Path pluginDir = env.pluginsFile().resolve("fake1"); + PluginTestUtil.writeProperties(pluginDir, "description", "fake desc"); + IllegalArgumentException e = expectThrows( + IllegalArgumentException.class, + () -> listPlugins(home)); + final Path descriptorPath = pluginDir.resolve(PluginInfo.ES_PLUGIN_PROPERTIES); + assertEquals( + "property [name] is missing in [" + descriptorPath.toString() + "]", + e.getMessage()); + } + + public void testExistingIncompatiblePlugin() throws Exception { + PluginTestUtil.writeProperties(env.pluginsFile().resolve("fake_plugin1"), + "description", "fake desc 1", + "name", "fake_plugin1", + "version", "1.0", + "elasticsearch.version", Version.fromString("1.0.0").toString(), + "java.version", System.getProperty("java.specification.version"), + "classname", "org.fake1"); + buildFakePlugin(env, "fake desc 2", "fake_plugin2", "org.fake2"); + + MockTerminal terminal = listPlugins(home); + final String message = String.format(Locale.ROOT, + "plugin [%s] is incompatible with version [%s]; was designed for version [%s]", + "fake_plugin1", + Version.CURRENT.toString(), + "1.0.0"); + assertEquals( + "fake_plugin1\n" + "WARNING: " + message + "\n" + "fake_plugin2\n", + terminal.getOutput()); + + String[] params = {"-s"}; + terminal = listPlugins(home, params); + assertEquals("fake_plugin1\nfake_plugin2\n", terminal.getOutput()); } } diff --git a/qa/evil-tests/src/test/java/org/elasticsearch/plugins/PluginSecurityTests.java b/qa/evil-tests/src/test/java/org/elasticsearch/plugins/PluginSecurityTests.java index 466f7d05cd1d2..77ecd12f786fb 100644 --- a/qa/evil-tests/src/test/java/org/elasticsearch/plugins/PluginSecurityTests.java +++ b/qa/evil-tests/src/test/java/org/elasticsearch/plugins/PluginSecurityTests.java @@ -19,60 +19,132 @@ package org.elasticsearch.plugins; +import org.apache.lucene.util.LuceneTestCase; +import org.elasticsearch.Version; +import org.elasticsearch.cli.MockTerminal; import org.elasticsearch.cli.Terminal; import org.elasticsearch.test.ESTestCase; +import java.io.IOException; import java.nio.file.Path; import java.security.Permission; import java.security.PermissionCollection; import java.security.Permissions; import java.util.Collections; import java.util.List; +import java.util.function.Supplier; + +import static org.hamcrest.Matchers.containsString; +import static org.hamcrest.Matchers.hasToString; +import static org.hamcrest.Matchers.not; /** Tests plugin manager security check */ public class PluginSecurityTests extends ESTestCase { + private final Supplier tmpFile = LuceneTestCase::createTempDir; + + public void testHasNativeController() throws IOException { + assumeTrue( + "test cannot run with security manager enabled", + System.getSecurityManager() == null); + final PluginInfo info = + new PluginInfo("fake", "fake", Version.CURRENT.toString(), "Fake", true); + final MockTerminal terminal = new MockTerminal(); + terminal.addTextInput("y"); + terminal.addTextInput("y"); + final Path policyFile = this.getDataPath("security/simple-plugin-security.policy"); + PluginSecurity.readPolicy(info, policyFile, terminal, tmpFile, false); + final String output = terminal.getOutput(); + assertThat(output, containsString("plugin forks a native controller")); + } + + public void testDeclineNativeController() throws IOException { + assumeTrue( + "test cannot run with security manager enabled", + System.getSecurityManager() == null); + final PluginInfo info = + new PluginInfo("fake", "fake", Version.CURRENT.toString(), "Fake", true); + final MockTerminal terminal = new MockTerminal(); + terminal.addTextInput("y"); + terminal.addTextInput("n"); + final Path policyFile = this.getDataPath("security/simple-plugin-security.policy"); + RuntimeException e = expectThrows( + RuntimeException.class, + () -> PluginSecurity.readPolicy(info, policyFile, terminal, tmpFile, false)); + assertThat(e, hasToString(containsString("installation aborted by user"))); + } + + public void testDoesNotHaveNativeController() throws IOException { + assumeTrue( + "test cannot run with security manager enabled", + System.getSecurityManager() == null); + final PluginInfo info = + new PluginInfo("fake", "fake", Version.CURRENT.toString(), "Fake", false); + final MockTerminal terminal = new MockTerminal(); + terminal.addTextInput("y"); + final Path policyFile = this.getDataPath("security/simple-plugin-security.policy"); + PluginSecurity.readPolicy(info, policyFile, terminal, tmpFile, false); + final String output = terminal.getOutput(); + assertThat(output, not(containsString("plugin forks a native controller"))); + } + /** Test that we can parse the set of permissions correctly for a simple policy */ public void testParsePermissions() throws Exception { - assumeTrue("test cannot run with security manager enabled", System.getSecurityManager() == null); + assumeTrue( + "test cannot run with security manager enabled", + System.getSecurityManager() == null); Path scratch = createTempDir(); Path testFile = this.getDataPath("security/simple-plugin-security.policy"); Permissions expected = new Permissions(); expected.add(new RuntimePermission("queuePrintJob")); - PermissionCollection actual = PluginSecurity.parsePermissions(Terminal.DEFAULT, testFile, scratch); + PermissionCollection actual = + PluginSecurity.parsePermissions(Terminal.DEFAULT, testFile, scratch); assertEquals(expected, actual); } /** Test that we can parse the set of permissions correctly for a complex policy */ public void testParseTwoPermissions() throws Exception { - assumeTrue("test cannot run with security manager enabled", System.getSecurityManager() == null); + assumeTrue( + "test cannot run with security manager enabled", + System.getSecurityManager() == null); Path scratch = createTempDir(); Path testFile = this.getDataPath("security/complex-plugin-security.policy"); Permissions expected = new Permissions(); expected.add(new RuntimePermission("getClassLoader")); expected.add(new RuntimePermission("closeClassLoader")); - PermissionCollection actual = PluginSecurity.parsePermissions(Terminal.DEFAULT, testFile, scratch); + PermissionCollection actual = + PluginSecurity.parsePermissions(Terminal.DEFAULT, testFile, scratch); assertEquals(expected, actual); } /** Test that we can format some simple permissions properly */ public void testFormatSimplePermission() throws Exception { - assertEquals("java.lang.RuntimePermission queuePrintJob", PluginSecurity.formatPermission(new RuntimePermission("queuePrintJob"))); + assertEquals( + "java.lang.RuntimePermission queuePrintJob", + PluginSecurity.formatPermission(new RuntimePermission("queuePrintJob"))); } /** Test that we can format an unresolved permission properly */ public void testFormatUnresolvedPermission() throws Exception { - assumeTrue("test cannot run with security manager enabled", System.getSecurityManager() == null); + assumeTrue( + "test cannot run with security manager enabled", + System.getSecurityManager() == null); Path scratch = createTempDir(); Path testFile = this.getDataPath("security/unresolved-plugin-security.policy"); - PermissionCollection actual = PluginSecurity.parsePermissions(Terminal.DEFAULT, testFile, scratch); + PermissionCollection actual = + PluginSecurity.parsePermissions(Terminal.DEFAULT, testFile, scratch); List permissions = Collections.list(actual.elements()); assertEquals(1, permissions.size()); - assertEquals("org.fake.FakePermission fakeName", PluginSecurity.formatPermission(permissions.get(0))); + assertEquals( + "org.fake.FakePermission fakeName", + PluginSecurity.formatPermission(permissions.get(0))); } /** no guaranteed equals on these classes, we assert they contain the same set */ private void assertEquals(PermissionCollection expected, PermissionCollection actual) { - assertEquals(asSet(Collections.list(expected.elements())), asSet(Collections.list(actual.elements()))); + assertEquals( + asSet(Collections.list(expected.elements())), + asSet(Collections.list(actual.elements()))); } + } diff --git a/qa/evil-tests/src/test/java/org/elasticsearch/plugins/RemovePluginCommandTests.java b/qa/evil-tests/src/test/java/org/elasticsearch/plugins/RemovePluginCommandTests.java index 997fbeffadd94..a42e66fe872f7 100644 --- a/qa/evil-tests/src/test/java/org/elasticsearch/plugins/RemovePluginCommandTests.java +++ b/qa/evil-tests/src/test/java/org/elasticsearch/plugins/RemovePluginCommandTests.java @@ -79,7 +79,7 @@ static void assertRemoveCleaned(Environment env) throws IOException { public void testMissing() throws Exception { UserException e = expectThrows(UserException.class, () -> removePlugin("dne", home)); - assertTrue(e.getMessage(), e.getMessage().contains("plugin dne not found")); + assertTrue(e.getMessage(), e.getMessage().contains("plugin [dne] not found")); assertRemoveCleaned(env); } @@ -136,7 +136,7 @@ public void testNoConfigDirPreserved() throws Exception { public void testRemoveUninstalledPluginErrors() throws Exception { UserException e = expectThrows(UserException.class, () -> removePlugin("fake", home)); assertEquals(ExitCodes.CONFIG, e.exitCode); - assertEquals("plugin fake not found; run 'elasticsearch-plugin list' to get list of installed plugins", e.getMessage()); + assertEquals("plugin [fake] not found; run 'elasticsearch-plugin list' to get list of installed plugins", e.getMessage()); MockTerminal terminal = new MockTerminal(); new RemovePluginCommand() { @@ -146,8 +146,8 @@ protected boolean addShutdownHook() { } }.main(new String[] { "-Epath.home=" + home, "fake" }, terminal); try (BufferedReader reader = new BufferedReader(new StringReader(terminal.getOutput()))) { - assertEquals("-> Removing fake...", reader.readLine()); - assertEquals("ERROR: plugin fake not found; run 'elasticsearch-plugin list' to get list of installed plugins", + assertEquals("-> removing [fake]...", reader.readLine()); + assertEquals("ERROR: plugin [fake] not found; run 'elasticsearch-plugin list' to get list of installed plugins", reader.readLine()); assertNull(reader.readLine()); } @@ -160,7 +160,7 @@ public void testMissingPluginName() throws Exception { } private String expectedConfigDirPreservedMessage(final Path configDir) { - return "-> Preserving plugin config files [" + configDir + "] in case of upgrade, delete manually if not needed"; + return "-> preserving plugin config files [" + configDir + "] in case of upgrade; delete manually if not needed"; } } diff --git a/qa/no-bootstrap-tests/src/test/java/org/elasticsearch/bootstrap/SpawnerNoBootstrapTests.java b/qa/no-bootstrap-tests/src/test/java/org/elasticsearch/bootstrap/SpawnerNoBootstrapTests.java index 743d2408b9d49..f81bd3b2d4794 100644 --- a/qa/no-bootstrap-tests/src/test/java/org/elasticsearch/bootstrap/SpawnerNoBootstrapTests.java +++ b/qa/no-bootstrap-tests/src/test/java/org/elasticsearch/bootstrap/SpawnerNoBootstrapTests.java @@ -21,8 +21,11 @@ import org.apache.lucene.util.Constants; import org.apache.lucene.util.LuceneTestCase; +import org.elasticsearch.Version; import org.elasticsearch.common.settings.Settings; import org.elasticsearch.env.Environment; +import org.elasticsearch.plugins.PluginTestUtil; +import org.elasticsearch.plugins.Platforms; import java.io.BufferedReader; import java.io.IOException; @@ -36,11 +39,15 @@ import java.util.Set; import java.util.concurrent.TimeUnit; +import static org.hamcrest.Matchers.equalTo; +import static org.hamcrest.Matchers.hasSize; + /** * Create a simple "daemon controller", put it in the right place and check that it runs. * - * Extends LuceneTestCase rather than ESTestCase as ESTestCase installs a system call filter, and that prevents the Spawner class doing its - * job. Also needs to run in a separate JVM to other tests that extend ESTestCase for the same reason. + * Extends LuceneTestCase rather than ESTestCase as ESTestCase installs a system call filter, and + * that prevents the Spawner class from doing its job. Also needs to run in a separate JVM to other + * tests that extend ESTestCase for the same reason. */ public class SpawnerNoBootstrapTests extends LuceneTestCase { @@ -64,10 +71,19 @@ public void testNoControllerSpawn() throws IOException, InterruptedException { // This plugin will NOT have a controller daemon Path plugin = environment.pluginsFile().resolve("a_plugin"); Files.createDirectories(plugin); + PluginTestUtil.writeProperties( + plugin, + "description", "a_plugin", + "version", Version.CURRENT.toString(), + "elasticsearch.version", Version.CURRENT.toString(), + "name", "a_plugin", + "java.version", "1.8", + "classname", "APlugin", + "has.native.controller", "false"); try (Spawner spawner = new Spawner()) { spawner.spawnNativePluginControllers(environment); - assertTrue(spawner.getProcesses().isEmpty()); + assertThat(spawner.getProcesses(), hasSize(0)); } } @@ -75,10 +91,10 @@ public void testNoControllerSpawn() throws IOException, InterruptedException { * Two plugins - one with a controller daemon and one without. */ public void testControllerSpawn() throws IOException, InterruptedException { - // On Windows you cannot directly run a batch file - you have to run cmd.exe with the batch file - // as an argument and that's out of the remit of the controller daemon process spawner. If - // you need to build on Windows, just don't run this test. The process spawner itself will work - // with native processes. + /* + * On Windows you can not directly run a batch file - you have to run cmd.exe with the batch + * file as an argument and that's out of the remit of the controller daemon process spawner. + */ assumeFalse("This test does not work on Windows", Constants.WINDOWS); Path esHome = createTempDir().resolve("esHome"); @@ -88,32 +104,90 @@ public void testControllerSpawn() throws IOException, InterruptedException { Environment environment = new Environment(settings); - // This plugin WILL have a controller daemon + // this plugin will have a controller daemon Path plugin = environment.pluginsFile().resolve("test_plugin"); Files.createDirectories(plugin); - Path controllerProgram = Spawner.makeSpawnPath(plugin); + PluginTestUtil.writeProperties( + plugin, + "description", "test_plugin", + "version", Version.CURRENT.toString(), + "elasticsearch.version", Version.CURRENT.toString(), + "name", "test_plugin", + "java.version", "1.8", + "classname", "TestPlugin", + "has.native.controller", "true"); + Path controllerProgram = Platforms.nativeControllerPath(plugin); createControllerProgram(controllerProgram); - // This plugin will NOT have a controller daemon + // this plugin will not have a controller daemon Path otherPlugin = environment.pluginsFile().resolve("other_plugin"); Files.createDirectories(otherPlugin); + PluginTestUtil.writeProperties( + otherPlugin, + "description", "other_plugin", + "version", Version.CURRENT.toString(), + "elasticsearch.version", Version.CURRENT.toString(), + "name", "other_plugin", + "java.version", "1.8", + "classname", "OtherPlugin", + "has.native.controller", "false"); Spawner spawner = new Spawner(); spawner.spawnNativePluginControllers(environment); List processes = spawner.getProcesses(); - // 1 because there should only be a reference in the list for the plugin that had the controller daemon, not the other plugin - assertEquals(1, processes.size()); + /* + * As there should only be a reference in the list for the plugin that had the controller + * daemon, we expect one here. + */ + assertThat(processes, hasSize(1)); Process process = processes.get(0); - try (BufferedReader stdoutReader = new BufferedReader(new InputStreamReader(process.getInputStream(), StandardCharsets.UTF_8))) { + final InputStreamReader in = + new InputStreamReader(process.getInputStream(), StandardCharsets.UTF_8); + try (BufferedReader stdoutReader = new BufferedReader(in)) { String line = stdoutReader.readLine(); assertEquals("I am alive", line); spawner.close(); - // Fail if the process doesn't die within 1 second - usually it will be even quicker but it depends on OS scheduling + /* + * Fail if the process does not die within one second; usually it will be even quicker + * but it depends on OS scheduling. + */ assertTrue(process.waitFor(1, TimeUnit.SECONDS)); } } + public void testControllerSpawnWithIncorrectDescriptor() throws IOException { + // this plugin will have a controller daemon + Path esHome = createTempDir().resolve("esHome"); + Settings.Builder settingsBuilder = Settings.builder(); + settingsBuilder.put(Environment.PATH_HOME_SETTING.getKey(), esHome.toString()); + Settings settings = settingsBuilder.build(); + + Environment environment = new Environment(settings); + + Path plugin = environment.pluginsFile().resolve("test_plugin"); + Files.createDirectories(plugin); + PluginTestUtil.writeProperties( + plugin, + "description", "test_plugin", + "version", Version.CURRENT.toString(), + "elasticsearch.version", Version.CURRENT.toString(), + "name", "test_plugin", + "java.version", "1.8", + "classname", "TestPlugin", + "has.native.controller", "false"); + Path controllerProgram = Platforms.nativeControllerPath(plugin); + createControllerProgram(controllerProgram); + + Spawner spawner = new Spawner(); + IllegalArgumentException e = expectThrows( + IllegalArgumentException.class, + () -> spawner.spawnNativePluginControllers(environment)); + assertThat( + e.getMessage(), + equalTo("plugin [test_plugin] does not have permission to fork native controller")); + } + private void createControllerProgram(Path outputFile) throws IOException { Path outputDir = outputFile.getParent(); Files.createDirectories(outputDir); @@ -128,4 +202,5 @@ private void createControllerProgram(Path outputFile) throws IOException { perms.add(PosixFilePermission.OTHERS_EXECUTE); Files.setPosixFilePermissions(outputFile, perms); } + } diff --git a/qa/rolling-upgrade/build.gradle b/qa/rolling-upgrade/build.gradle index f4cde58d75577..9b8259eecb514 100644 --- a/qa/rolling-upgrade/build.gradle +++ b/qa/rolling-upgrade/build.gradle @@ -27,7 +27,7 @@ task oldClusterTest(type: RestIntegTestTask) { oldClusterTestCluster { distribution = 'zip' - bwcVersion = '5.4.0-SNAPSHOT' // TODO: either randomize, or make this settable with sysprop + bwcVersion = project.bwcVersion // TODO: either randomize, or make this settable with sysprop numBwcNodes = 2 numNodes = 2 clusterName = 'rolling-upgrade' diff --git a/qa/smoke-test-client/src/test/java/org/elasticsearch/smoketest/SmokeTestClientIT.java b/qa/smoke-test-client/src/test/java/org/elasticsearch/smoketest/SmokeTestClientIT.java index 6380ed90e18c9..2e86ce82221d2 100644 --- a/qa/smoke-test-client/src/test/java/org/elasticsearch/smoketest/SmokeTestClientIT.java +++ b/qa/smoke-test-client/src/test/java/org/elasticsearch/smoketest/SmokeTestClientIT.java @@ -19,7 +19,6 @@ package org.elasticsearch.smoketest; -import org.apache.lucene.util.Constants; import org.elasticsearch.action.admin.cluster.health.ClusterHealthResponse; import org.elasticsearch.action.search.SearchResponse; import org.elasticsearch.client.Client; @@ -29,35 +28,29 @@ public class SmokeTestClientIT extends ESSmokeClientTestCase { - // needed to avoid the test suite from failing for having no tests - // TODO: remove when Netty 4.1.5 is upgraded to Netty 4.1.6 including https://github.com/netty/netty/pull/5778 - public void testSoThatTestsDoNotFail() { - - } - /** * Check that we are connected to a cluster named "elasticsearch". */ public void testSimpleClient() { - // TODO: remove when Netty 4.1.5 is upgraded to Netty 4.1.6 including https://github.com/netty/netty/pull/5778 - assumeFalse("JDK is JDK 9", Constants.JRE_IS_MINIMUM_JAVA9); - Client client = getClient(); + final Client client = getClient(); // START SNIPPET: java-doc-admin-cluster-health - ClusterHealthResponse health = client.admin().cluster().prepareHealth().setWaitForYellowStatus().get(); - String clusterName = health.getClusterName(); - int numberOfNodes = health.getNumberOfNodes(); + final ClusterHealthResponse health = + client.admin().cluster().prepareHealth().setWaitForYellowStatus().get(); + final String clusterName = health.getClusterName(); + final int numberOfNodes = health.getNumberOfNodes(); // END SNIPPET: java-doc-admin-cluster-health - assertThat("cluster [" + clusterName + "] should have at least 1 node", numberOfNodes, greaterThan(0)); + assertThat( + "cluster [" + clusterName + "] should have at least 1 node", + numberOfNodes, + greaterThan(0)); } /** * Create an index and index some docs */ public void testPutDocument() { - // TODO: remove when Netty 4.1.5 is upgraded to Netty 4.1.6 including https://github.com/netty/netty/pull/5778 - assumeFalse("JDK is JDK 9", Constants.JRE_IS_MINIMUM_JAVA9); - Client client = getClient(); + final Client client = getClient(); // START SNIPPET: java-doc-index-doc-simple client.prepareIndex(index, "doc", "1") // Index, Type, Id @@ -71,7 +64,7 @@ public void testPutDocument() { // END SNIPPET: java-doc-admin-indices-refresh // START SNIPPET: java-doc-search-simple - SearchResponse searchResponse = client.prepareSearch(index).get(); + final SearchResponse searchResponse = client.prepareSearch(index).get(); assertThat(searchResponse.getHits().getTotalHits(), is(1L)); // END SNIPPET: java-doc-search-simple } diff --git a/qa/vagrant/src/test/resources/packaging/utils/utils.bash b/qa/vagrant/src/test/resources/packaging/utils/utils.bash index 143430a542fd2..f8abe1b526621 100644 --- a/qa/vagrant/src/test/resources/packaging/utils/utils.bash +++ b/qa/vagrant/src/test/resources/packaging/utils/utils.bash @@ -435,7 +435,7 @@ wait_for_elasticsearch_status() { if [ $? -eq 0 ]; then echo "Connected" else - echo "Unable to connect to Elastisearch" + echo "Unable to connect to Elasticsearch" false fi diff --git a/qa/vagrant/versions b/qa/vagrant/versions index 40ad2117f7d7e..6269784d34120 100644 --- a/qa/vagrant/versions +++ b/qa/vagrant/versions @@ -6,3 +6,4 @@ 5.2.0 5.2.1 5.2.2 +5.3.0 diff --git a/rest-api-spec/src/main/resources/rest-api-spec/api/delete_by_query.json b/rest-api-spec/src/main/resources/rest-api-spec/api/delete_by_query.json index b76d560bcb450..8ed3202e9af81 100644 --- a/rest-api-spec/src/main/resources/rest-api-spec/api/delete_by_query.json +++ b/rest-api-spec/src/main/resources/rest-api-spec/api/delete_by_query.json @@ -146,7 +146,7 @@ }, "wait_for_completion": { "type" : "boolean", - "default": false, + "default": true, "description" : "Should the request should block until the delete-by-query is complete." }, "requests_per_second": { diff --git a/rest-api-spec/src/main/resources/rest-api-spec/api/field_caps.json b/rest-api-spec/src/main/resources/rest-api-spec/api/field_caps.json new file mode 100644 index 0000000000000..d993dc0545b74 --- /dev/null +++ b/rest-api-spec/src/main/resources/rest-api-spec/api/field_caps.json @@ -0,0 +1,43 @@ +{ + "field_caps": { + "documentation": "http://www.elastic.co/guide/en/elasticsearch/reference/master/search-field-caps.html", + "methods": ["GET", "POST"], + "url": { + "path": "/_field_caps", + "paths": [ + "/_field_caps", + "/{index}/_field_caps" + ], + "parts": { + "index": { + "type" : "list", + "description" : "A comma-separated list of index names; use `_all` or empty string to perform the operation on all indices" + } + }, + "params": { + "fields": { + "type" : "list", + "description" : "A comma-separated list of field names" + }, + "ignore_unavailable": { + "type" : "boolean", + "description" : "Whether specified concrete indices should be ignored when unavailable (missing or closed)" + }, + "allow_no_indices": { + "type" : "boolean", + "description" : "Whether to ignore if a wildcard indices expression resolves into no concrete indices. (This includes `_all` string or when no indices have been specified)" + }, + "expand_wildcards": { + "type" : "enum", + "options" : ["open","closed","none","all"], + "default" : "open", + "description" : "Whether to expand wildcard expression to concrete indices that are open, closed or both." + } + } + }, + "body": { + "description": "Field json objects containing an array of field names", + "required": false + } + } +} \ No newline at end of file diff --git a/rest-api-spec/src/main/resources/rest-api-spec/api/indices.clear_cache.json b/rest-api-spec/src/main/resources/rest-api-spec/api/indices.clear_cache.json index 31a50c4a8c420..1f24199fad468 100644 --- a/rest-api-spec/src/main/resources/rest-api-spec/api/indices.clear_cache.json +++ b/rest-api-spec/src/main/resources/rest-api-spec/api/indices.clear_cache.json @@ -50,6 +50,10 @@ "type" : "boolean", "description" : "Clear the recycler cache" }, + "request_cache": { + "type" : "boolean", + "description" : "Clear request cache" + }, "request": { "type" : "boolean", "description" : "Clear request cache" diff --git a/rest-api-spec/src/main/resources/rest-api-spec/api/indices.validate_query.json b/rest-api-spec/src/main/resources/rest-api-spec/api/indices.validate_query.json index 7a0977da194f7..3fec822f1d9c3 100644 --- a/rest-api-spec/src/main/resources/rest-api-spec/api/indices.validate_query.json +++ b/rest-api-spec/src/main/resources/rest-api-spec/api/indices.validate_query.json @@ -66,6 +66,10 @@ "rewrite": { "type": "boolean", "description": "Provide a more detailed explanation showing the actual Lucene query that will be executed." + }, + "all_shards": { + "type": "boolean", + "description": "Execute validation on all shards instead of one random shard per index" } } }, diff --git a/rest-api-spec/src/main/resources/rest-api-spec/api/reindex.json b/rest-api-spec/src/main/resources/rest-api-spec/api/reindex.json index 4756361ca1651..fc701c29d6021 100644 --- a/rest-api-spec/src/main/resources/rest-api-spec/api/reindex.json +++ b/rest-api-spec/src/main/resources/rest-api-spec/api/reindex.json @@ -22,7 +22,7 @@ }, "wait_for_completion": { "type" : "boolean", - "default": false, + "default": true, "description" : "Should the request should block until the reindex is complete." }, "requests_per_second": { diff --git a/rest-api-spec/src/main/resources/rest-api-spec/api/update_by_query.json b/rest-api-spec/src/main/resources/rest-api-spec/api/update_by_query.json index 8130be8a11fd0..072e950686aa2 100644 --- a/rest-api-spec/src/main/resources/rest-api-spec/api/update_by_query.json +++ b/rest-api-spec/src/main/resources/rest-api-spec/api/update_by_query.json @@ -154,7 +154,7 @@ }, "wait_for_completion": { "type" : "boolean", - "default": false, + "default": true, "description" : "Should the request should block until the update by query operation is complete." }, "requests_per_second": { diff --git a/rest-api-spec/src/main/resources/rest-api-spec/test/bulk/20_list_of_strings.yaml b/rest-api-spec/src/main/resources/rest-api-spec/test/bulk/20_list_of_strings.yaml index def91f4280722..e25626cf3ae28 100644 --- a/rest-api-spec/src/main/resources/rest-api-spec/test/bulk/20_list_of_strings.yaml +++ b/rest-api-spec/src/main/resources/rest-api-spec/test/bulk/20_list_of_strings.yaml @@ -11,6 +11,8 @@ - do: count: + # we count through the primary in case there is a replica that has not yet fully recovered + preference: _primary index: test_index - match: {count: 2} diff --git a/rest-api-spec/src/main/resources/rest-api-spec/test/cat.shards/10_basic.yaml b/rest-api-spec/src/main/resources/rest-api-spec/test/cat.shards/10_basic.yaml index 579408dbc0b0e..8d72d40b10ba5 100755 --- a/rest-api-spec/src/main/resources/rest-api-spec/test/cat.shards/10_basic.yaml +++ b/rest-api-spec/src/main/resources/rest-api-spec/test/cat.shards/10_basic.yaml @@ -277,7 +277,8 @@ h: [index, docs] s: [docs] - - match: # don't use the store here it's cached and might be stale +# don't use the store here it's cached and might be stale + - match: $body: | /^ foo \s+ 0\n bar \s+ 1\n diff --git a/rest-api-spec/src/main/resources/rest-api-spec/test/cat.templates/10_basic.yaml b/rest-api-spec/src/main/resources/rest-api-spec/test/cat.templates/10_basic.yaml index 7c7445fc67da5..7dd43e33bec03 100644 --- a/rest-api-spec/src/main/resources/rest-api-spec/test/cat.templates/10_basic.yaml +++ b/rest-api-spec/src/main/resources/rest-api-spec/test/cat.templates/10_basic.yaml @@ -61,7 +61,8 @@ cat.templates: {} - match: - $body: / + $body: > + / (^|\n)test \s+ \[test-\*\] \s+ 0 \s+ @@ -70,7 +71,8 @@ / - match: - $body: / + $body: > + / (^|\n)test_2 \s+ \[test-2\*\] \s+ 1 \s+ diff --git a/rest-api-spec/src/main/resources/rest-api-spec/test/cluster.put_settings/10_basic.yaml b/rest-api-spec/src/main/resources/rest-api-spec/test/cluster.put_settings/10_basic.yaml index 5031c977ccdd5..083466f94a55b 100644 --- a/rest-api-spec/src/main/resources/rest-api-spec/test/cluster.put_settings/10_basic.yaml +++ b/rest-api-spec/src/main/resources/rest-api-spec/test/cluster.put_settings/10_basic.yaml @@ -64,8 +64,9 @@ --- "Test get a default settings": +# this can't be bumped to 5.0.2 until snapshots are published - skip: - version: " - 5.99.99" # this can't be bumped to 5.0.2 until snapshots are published + version: " - 5.99.99" reason: Fetching default group setting was buggy until 5.0.3 - do: diff --git a/rest-api-spec/src/main/resources/rest-api-spec/test/create/30_internal_version.yaml b/rest-api-spec/src/main/resources/rest-api-spec/test/create/30_internal_version.yaml index e4cbc443055ac..e220d98816161 100644 --- a/rest-api-spec/src/main/resources/rest-api-spec/test/create/30_internal_version.yaml +++ b/rest-api-spec/src/main/resources/rest-api-spec/test/create/30_internal_version.yaml @@ -26,13 +26,13 @@ reason: validation logic only fixed from 5.1.2 onwards - do: + catch: request create: index: test type: test id: 3 body: { foo: bar } version: 5 - ignore: 400 - match: { status: 400 } - match: { error.type: action_request_validation_exception } diff --git a/rest-api-spec/src/main/resources/rest-api-spec/test/create/35_external_version.yaml b/rest-api-spec/src/main/resources/rest-api-spec/test/create/35_external_version.yaml index 96503aae8aeac..e29690fe8d03b 100644 --- a/rest-api-spec/src/main/resources/rest-api-spec/test/create/35_external_version.yaml +++ b/rest-api-spec/src/main/resources/rest-api-spec/test/create/35_external_version.yaml @@ -6,6 +6,7 @@ reason: validation logic only fixed from 5.1.2 onwards - do: + catch: request create: index: test type: test @@ -13,13 +14,13 @@ body: { foo: bar } version_type: external version: 0 - ignore: 400 - match: { status: 400 } - match: { error.type: action_request_validation_exception } - match: { error.reason: "Validation Failed: 1: create operations only support internal versioning. use index instead;" } - do: + catch: request create: index: test type: test @@ -27,7 +28,6 @@ body: { foo: bar } version_type: external version: 5 - ignore: 400 - match: { status: 400 } - match: { error.type: action_request_validation_exception } diff --git a/rest-api-spec/src/main/resources/rest-api-spec/test/explain/10_basic.yaml b/rest-api-spec/src/main/resources/rest-api-spec/test/explain/10_basic.yaml index 4ea921a3fa0cd..b5a9212d36b52 100644 --- a/rest-api-spec/src/main/resources/rest-api-spec/test/explain/10_basic.yaml +++ b/rest-api-spec/src/main/resources/rest-api-spec/test/explain/10_basic.yaml @@ -4,9 +4,9 @@ setup: index: test_1 body: aliases: - alias_1: { + alias_1: "filter" : { "term" : { "foo" : "bar"} } - } + - do: index: index: test_1 diff --git a/rest-api-spec/src/main/resources/rest-api-spec/test/field_caps/10_basic.yaml b/rest-api-spec/src/main/resources/rest-api-spec/test/field_caps/10_basic.yaml new file mode 100644 index 0000000000000..edda7b6dbf3d0 --- /dev/null +++ b/rest-api-spec/src/main/resources/rest-api-spec/test/field_caps/10_basic.yaml @@ -0,0 +1,167 @@ +--- +setup: + - do: + indices.create: + index: test1 + body: + mappings: + t: + properties: + text: + type: text + keyword: + type: keyword + number: + type: double + geo: + type: geo_point + object: + type: object + properties: + nested1 : + type : text + index: false + nested2: + type: float + doc_values: false + - do: + indices.create: + index: test2 + body: + mappings: + t: + properties: + text: + type: text + keyword: + type: keyword + number: + type: double + geo: + type: geo_point + object: + type: object + properties: + nested1 : + type : text + index: true + nested2: + type: float + doc_values: true + - do: + indices.create: + index: test3 + body: + mappings: + t: + properties: + text: + type: text + keyword: + type: keyword + number: + type: long + geo: + type: keyword + object: + type: object + properties: + nested1 : + type : long + index: false + nested2: + type: keyword + doc_values: false + +--- +"Get simple field caps": + - skip: + version: " - 5.99.99" + reason: this uses a new API that has been added in 6.0 + + - do: + field_caps: + index: 'test1,test2,test3' + fields: [text, keyword, number, geo] + + - match: {fields.text.text.searchable: true} + - match: {fields.text.text.aggregatable: false} + - is_false: fields.text.text.indices + - is_false: fields.text.text.non_searchable_indices + - is_false: fields.text.text.non_aggregatable_indices + - match: {fields.keyword.keyword.searchable: true} + - match: {fields.keyword.keyword.aggregatable: true} + - is_false: fields.text.keyword.indices + - is_false: fields.text.keyword.non_searchable_indices + - is_false: fields.text.keyword.non_aggregatable_indices + - match: {fields.number.double.searchable: true} + - match: {fields.number.double.aggregatable: true} + - match: {fields.number.double.indices: ["test1", "test2"]} + - is_false: fields.number.double.non_searchable_indices + - is_false: fields.number.double.non_aggregatable_indices + - match: {fields.number.long.searchable: true} + - match: {fields.number.long.aggregatable: true} + - match: {fields.number.long.indices: ["test3"]} + - is_false: fields.number.long.non_searchable_indices + - is_false: fields.number.long.non_aggregatable_indices + - match: {fields.geo.geo_point.searchable: true} + - match: {fields.geo.geo_point.aggregatable: true} + - match: {fields.geo.geo_point.indices: ["test1", "test2"]} + - is_false: fields.geo.geo_point.non_searchable_indices + - is_false: fields.geo.geo_point.non_aggregatable_indices + - match: {fields.geo.keyword.searchable: true} + - match: {fields.geo.keyword.aggregatable: true} + - match: {fields.geo.keyword.indices: ["test3"]} + - is_false: fields.geo.keyword.non_searchable_indices + - is_false: fields.geo.keyword.on_aggregatable_indices +--- +"Get nested field caps": + - skip: + version: " - 5.99.99" + reason: this uses a new API that has been added in 6.0 + + - do: + field_caps: + index: 'test1,test2,test3' + fields: object* + + - match: {fields.object\.nested1.long.searchable: false} + - match: {fields.object\.nested1.long.aggregatable: true} + - match: {fields.object\.nested1.long.indices: ["test3"]} + - is_false: fields.object\.nested1.long.non_searchable_indices + - is_false: fields.object\.nested1.long.non_aggregatable_indices + - match: {fields.object\.nested1.text.searchable: false} + - match: {fields.object\.nested1.text.aggregatable: false} + - match: {fields.object\.nested1.text.indices: ["test1", "test2"]} + - match: {fields.object\.nested1.text.non_searchable_indices: ["test1"]} + - is_false: fields.object\.nested1.text.non_aggregatable_indices + - match: {fields.object\.nested2.float.searchable: true} + - match: {fields.object\.nested2.float.aggregatable: false} + - match: {fields.object\.nested2.float.indices: ["test1", "test2"]} + - match: {fields.object\.nested2.float.non_aggregatable_indices: ["test1"]} + - is_false: fields.object\.nested2.float.non_searchable_indices + - match: {fields.object\.nested2.keyword.searchable: true} + - match: {fields.object\.nested2.keyword.aggregatable: false} + - match: {fields.object\.nested2.keyword.indices: ["test3"]} + - is_false: fields.object\.nested2.keyword.non_aggregatable_indices + - is_false: fields.object\.nested2.keyword.non_searchable_indices +--- +"Get prefix field caps": + - skip: + version: " - 5.99.99" + reason: this uses a new API that has been added in 6.0 + + - do: + field_caps: + index: _all + fields: "n*" + - match: {fields.number.double.searchable: true} + - match: {fields.number.double.aggregatable: true} + - match: {fields.number.double.indices: ["test1", "test2"]} + - is_false: fields.number.double.non_searchable_indices + - is_false: fields.number.double.non_aggregatable_indices + - match: {fields.number.long.searchable: true} + - match: {fields.number.long.aggregatable: true} + - match: {fields.number.long.indices: ["test3"]} + - is_false: fields.number.long.non_searchable_indices + - is_false: fields.number.long.non_aggregatable_indices diff --git a/rest-api-spec/src/main/resources/rest-api-spec/test/indices.clear_cache/10_basic.yaml b/rest-api-spec/src/main/resources/rest-api-spec/test/indices.clear_cache/10_basic.yaml index 3388d06ba1748..d8db152e979b0 100644 --- a/rest-api-spec/src/main/resources/rest-api-spec/test/indices.clear_cache/10_basic.yaml +++ b/rest-api-spec/src/main/resources/rest-api-spec/test/indices.clear_cache/10_basic.yaml @@ -2,3 +2,26 @@ "clear_cache test": - do: indices.clear_cache: {} + +--- +"clear_cache with request set to false": + - skip: + version: " - 5.3.99" + reason: this name was added in 5.4 + + - do: + indices.clear_cache: + request: false + +--- +"clear_cache with request_cache set to false": + - skip: + version: " - 5.3.99" + reason: request_cache was deprecated in 5.4.0 + features: "warnings" + + - do: + warnings: + - 'Deprecated field [request_cache] used, expected [request] instead' + indices.clear_cache: + request_cache: false diff --git a/rest-api-spec/src/main/resources/rest-api-spec/test/indices.stats/10_index.yaml b/rest-api-spec/src/main/resources/rest-api-spec/test/indices.stats/10_index.yaml index d3f4134c0f6c5..b7724e062836e 100644 --- a/rest-api-spec/src/main/resources/rest-api-spec/test/indices.stats/10_index.yaml +++ b/rest-api-spec/src/main/resources/rest-api-spec/test/indices.stats/10_index.yaml @@ -106,9 +106,9 @@ setup: version: " - 5.0.99" reason: strict stats handling does not exist in 5.0 - do: + catch: request indices.stats: metric: [ fieldata ] - ignore: 400 - match: { status: 400 } - match: { error.type: illegal_argument_exception } diff --git a/rest-api-spec/src/main/resources/rest-api-spec/test/indices.validate_query/10_basic.yaml b/rest-api-spec/src/main/resources/rest-api-spec/test/indices.validate_query/10_basic.yaml index 637ebd4253e12..29b1e664d61c0 100644 --- a/rest-api-spec/src/main/resources/rest-api-spec/test/indices.validate_query/10_basic.yaml +++ b/rest-api-spec/src/main/resources/rest-api-spec/test/indices.validate_query/10_basic.yaml @@ -6,9 +6,9 @@ setup: settings: number_of_replicas: 0 aliases: - alias_1: { + alias_1: "filter" : { "match_all" : {} } - } + --- "Validate query api": diff --git a/rest-api-spec/src/main/resources/rest-api-spec/test/mget/12_non_existent_index.yaml b/rest-api-spec/src/main/resources/rest-api-spec/test/mget/12_non_existent_index.yaml index fd0100b0d635d..0623464225072 100644 --- a/rest-api-spec/src/main/resources/rest-api-spec/test/mget/12_non_existent_index.yaml +++ b/rest-api-spec/src/main/resources/rest-api-spec/test/mget/12_non_existent_index.yaml @@ -21,7 +21,6 @@ - do: mget: body: - index: test_2 docs: - { _index: test_1, _type: test, _id: 1} diff --git a/rest-api-spec/src/main/resources/rest-api-spec/test/nodes.stats/10_basic.yaml b/rest-api-spec/src/main/resources/rest-api-spec/test/nodes.stats/10_basic.yaml index 039a24284ea1f..62664319d8a43 100644 --- a/rest-api-spec/src/main/resources/rest-api-spec/test/nodes.stats/10_basic.yaml +++ b/rest-api-spec/src/main/resources/rest-api-spec/test/nodes.stats/10_basic.yaml @@ -27,9 +27,9 @@ version: " - 5.0.99" reason: strict stats handling does not exist in 5.0 - do: + catch: request nodes.stats: metric: [ transprot ] - ignore: 400 - match: { status: 400 } - match: { error.type: illegal_argument_exception } diff --git a/rest-api-spec/src/main/resources/rest-api-spec/test/search.aggregation/20_terms.yaml b/rest-api-spec/src/main/resources/rest-api-spec/test/search.aggregation/20_terms.yaml index 52379390d47b8..c9ba94cf61521 100644 --- a/rest-api-spec/src/main/resources/rest-api-spec/test/search.aggregation/20_terms.yaml +++ b/rest-api-spec/src/main/resources/rest-api-spec/test/search.aggregation/20_terms.yaml @@ -453,9 +453,9 @@ setup: - do: search: - body: { "size" : 0, "aggs" : { "str_terms" : { "terms" : { "field" : "str", "include" : {"partition":0, "num_partitions":2 } } } } } + body: { "size" : 0, "aggs" : { "str_terms" : { "terms" : { "field" : "str", "include" : {"partition": 0, "num_partitions": 2 } } } } } - - match: { hits.total: 3 } + - match: { hits.total : 3 } - length: { aggregations.str_terms.buckets: 1 } @@ -467,7 +467,7 @@ setup: - do: search: - body: { "size" : 0, "aggs" : { "str_terms" : { "terms" : { "field" : "str", "include" : {"partition":1, "num_partitions":2 } } } } } + body: { "size" : 0, "aggs" : { "str_terms" : { "terms" : { "field" : "str", "include" : {"partition": 1, "num_partitions": 2 } } } } } - match: { hits.total: 3 } @@ -512,7 +512,7 @@ setup: - do: search: - body: { "size" : 0, "aggs" : { "str_terms" : { "terms" : { "field" : "integer", "include" : {"partition":0, "num_partitions":2 } } } } } + body: { "size" : 0, "aggs" : { "str_terms" : { "terms" : { "field" : "integer", "include" : {"partition": 0, "num_partitions": 2 } } } } } - match: { hits.total: 3 } @@ -524,7 +524,7 @@ setup: - do: search: - body: { "size" : 0, "aggs" : { "str_terms" : { "terms" : { "field" : "integer", "include" : {"partition":1, "num_partitions":2 } } } } } + body: { "size" : 0, "aggs" : { "str_terms" : { "terms" : { "field" : "integer", "include" : {"partition": 1, "num_partitions": 2 } } } } } - match: { hits.total: 3 } diff --git a/rest-api-spec/src/main/resources/rest-api-spec/test/search/70_response_filtering.yaml b/rest-api-spec/src/main/resources/rest-api-spec/test/search/70_response_filtering.yaml index bff9a169604de..71eb5665bea43 100644 --- a/rest-api-spec/src/main/resources/rest-api-spec/test/search/70_response_filtering.yaml +++ b/rest-api-spec/src/main/resources/rest-api-spec/test/search/70_response_filtering.yaml @@ -43,7 +43,7 @@ filter_path: "took" body: "{ \"query\": { \"match_all\": {} } }" - - is_true: took + - gte: { took: 0 } - is_false: _shards.total - is_false: hits.total - is_false: hits.hits.0._index diff --git a/rest-api-spec/src/main/resources/rest-api-spec/test/search_shards/10_basic.yaml b/rest-api-spec/src/main/resources/rest-api-spec/test/search_shards/10_basic.yaml index f94ba86d914bb..d48d50887569c 100644 --- a/rest-api-spec/src/main/resources/rest-api-spec/test/search_shards/10_basic.yaml +++ b/rest-api-spec/src/main/resources/rest-api-spec/test/search_shards/10_basic.yaml @@ -57,7 +57,10 @@ - length: { shards: 1 } - match: { shards.0.0.index: test_index } - - match: { indices.test_index: {aliases: [test_alias_filter_1], filter: { term : { field: { value: value1, boost: 1.0}}}}} + - match: { indices.test_index.aliases: [test_alias_filter_1] } + - match: { indices.test_index.filter.term.field.value: value1 } + - lte: { indices.test_index.filter.term.field.boost: 1.0 } + - gte: { indices.test_index.filter.term.field.boost: 1.0 } - do: search_shards: @@ -65,4 +68,14 @@ - length: { shards: 1 } - match: { shards.0.0.index: test_index } - - match: { indices.test_index: {aliases: [test_alias_filter_1, test_alias_filter_2], filter: { bool: { should : [{ term : { field: { value: value1, boost: 1.0}}}, { term : { field: { value: value2, boost: 1.0}}}], adjust_pure_negative: true, boost: 1.0, disable_coord: false }}}} + - match: { indices.test_index.aliases: [test_alias_filter_1, test_alias_filter_2]} + - match: { indices.test_index.filter.bool.should.0.term.field.value: value1 } + - lte: { indices.test_index.filter.bool.should.0.term.field.boost: 1.0 } + - gte: { indices.test_index.filter.bool.should.0.term.field.boost: 1.0 } + - match: { indices.test_index.filter.bool.should.1.term.field.value: value2} + - lte: { indices.test_index.filter.bool.should.1.term.field.boost: 1.0 } + - gte: { indices.test_index.filter.bool.should.1.term.field.boost: 1.0 } + - match: { indices.test_index.filter.bool.adjust_pure_negative: true} + - match: { indices.test_index.filter.bool.disable_coord: false} + - lte: { indices.test_index.filter.bool.boost: 1.0 } + - gte: { indices.test_index.filter.bool.boost: 1.0 } diff --git a/settings.gradle b/settings.gradle index c0996f86a04fc..8e6d3d80a0e0c 100644 --- a/settings.gradle +++ b/settings.gradle @@ -15,6 +15,7 @@ List projects = [ 'client:benchmark', 'benchmarks', 'distribution:integ-test-zip', + 'distribution:bwc-zip', 'distribution:zip', 'distribution:tar', 'distribution:deb', diff --git a/test/framework/src/main/java/org/elasticsearch/bootstrap/BootstrapForTesting.java b/test/framework/src/main/java/org/elasticsearch/bootstrap/BootstrapForTesting.java index fe833470ad255..c88b56abdd074 100644 --- a/test/framework/src/main/java/org/elasticsearch/bootstrap/BootstrapForTesting.java +++ b/test/framework/src/main/java/org/elasticsearch/bootstrap/BootstrapForTesting.java @@ -22,6 +22,7 @@ import com.carrotsearch.randomizedtesting.RandomizedRunner; import org.apache.lucene.util.LuceneTestCase; import org.elasticsearch.SecureSM; +import org.elasticsearch.common.Booleans; import org.elasticsearch.common.Strings; import org.elasticsearch.common.SuppressForbidden; import org.elasticsearch.common.io.FileSystemUtils; @@ -119,7 +120,9 @@ public class BootstrapForTesting { perms.add(new FilePermission(System.getProperty("tests.config"), "read,readlink")); } // jacoco coverage output file - if (Boolean.getBoolean("tests.coverage")) { + final boolean testsCoverage = + Booleans.parseBoolean(System.getProperty("tests.coverage", "false")); + if (testsCoverage) { Path coverageDir = PathUtils.get(System.getProperty("tests.coverage.dir")); perms.add(new FilePermission(coverageDir.resolve("jacoco.exec").toString(), "read,write")); // in case we get fancy and use the -integration goals later: @@ -182,7 +185,7 @@ static Map getPluginPermissions() throws Exception { } // compute classpath minus obvious places, all other jars will get the permission. - Set codebases = new HashSet<>(Arrays.asList(parseClassPathWithSymlinks())); + Set codebases = new HashSet<>(parseClassPathWithSymlinks()); Set excluded = new HashSet<>(Arrays.asList( // es core Bootstrap.class.getProtectionDomain().getCodeSource().getLocation(), @@ -200,7 +203,7 @@ static Map getPluginPermissions() throws Exception { // parse each policy file, with codebase substitution from the classpath final List policies = new ArrayList<>(); for (URL policyFile : pluginPolicies) { - policies.add(Security.readPolicy(policyFile, codebases.toArray(new URL[codebases.size()]))); + policies.add(Security.readPolicy(policyFile, codebases)); } // consult each policy file for those codebases @@ -227,10 +230,14 @@ public boolean implies(ProtectionDomain domain, Permission permission) { * this is for matching the toRealPath() in the code where we have a proper plugin structure */ @SuppressForbidden(reason = "does evil stuff with paths and urls because devs and jenkins do evil stuff with paths and urls") - static URL[] parseClassPathWithSymlinks() throws Exception { - URL raw[] = JarHell.parseClassPath(); - for (int i = 0; i < raw.length; i++) { - raw[i] = PathUtils.get(raw[i].toURI()).toRealPath().toUri().toURL(); + static Set parseClassPathWithSymlinks() throws Exception { + Set raw = JarHell.parseClassPath(); + Set cooked = new HashSet<>(raw.size()); + for (URL url : raw) { + boolean added = cooked.add(PathUtils.get(url.toURI()).toRealPath().toUri().toURL()); + if (added == false) { + throw new IllegalStateException("Duplicate in classpath after resolving symlinks: " + url); + } } return raw; } diff --git a/test/framework/src/main/java/org/elasticsearch/script/MockScriptEngine.java b/test/framework/src/main/java/org/elasticsearch/script/MockScriptEngine.java index e036676677f9b..c2447b4504ee8 100644 --- a/test/framework/src/main/java/org/elasticsearch/script/MockScriptEngine.java +++ b/test/framework/src/main/java/org/elasticsearch/script/MockScriptEngine.java @@ -32,6 +32,8 @@ import java.util.Map; import java.util.function.Function; +import static java.util.Collections.emptyMap; + /** * A mocked script engine that can be used for testing purpose. * @@ -215,4 +217,9 @@ public boolean needsScores() { return true; } } + + public static Script mockInlineScript(final String script) { + return new Script(ScriptType.INLINE, "mock", script, emptyMap()); + } + } diff --git a/test/framework/src/main/java/org/elasticsearch/test/ESTestCase.java b/test/framework/src/main/java/org/elasticsearch/test/ESTestCase.java index 1ea96e6f54808..fa659e06fb214 100644 --- a/test/framework/src/main/java/org/elasticsearch/test/ESTestCase.java +++ b/test/framework/src/main/java/org/elasticsearch/test/ESTestCase.java @@ -1093,9 +1093,15 @@ public static void assertEquals(StackTraceElement expected, StackTraceElement ac } protected static long spinForAtLeastOneMillisecond() { - long nanosecondsInMillisecond = TimeUnit.NANOSECONDS.convert(1, TimeUnit.MILLISECONDS); - // force at least one millisecond to elapse, but ensure the - // clock has enough resolution to observe the passage of time + return spinForAtLeastNMilliseconds(1); + } + + protected static long spinForAtLeastNMilliseconds(final long ms) { + long nanosecondsInMillisecond = TimeUnit.NANOSECONDS.convert(ms, TimeUnit.MILLISECONDS); + /* + * Force at least ms milliseconds to elapse, but ensure the clock has enough resolution to + * observe the passage of time. + */ long start = System.nanoTime(); long elapsed; while ((elapsed = (System.nanoTime() - start)) < nanosecondsInMillisecond) { diff --git a/test/framework/src/main/java/org/elasticsearch/test/tasks/MockTaskManager.java b/test/framework/src/main/java/org/elasticsearch/test/tasks/MockTaskManager.java index b07e7315b88cf..bccbd537a53b4 100644 --- a/test/framework/src/main/java/org/elasticsearch/test/tasks/MockTaskManager.java +++ b/test/framework/src/main/java/org/elasticsearch/test/tasks/MockTaskManager.java @@ -25,8 +25,8 @@ import org.elasticsearch.common.settings.Setting.Property; import org.elasticsearch.common.settings.Settings; import org.elasticsearch.tasks.Task; +import org.elasticsearch.tasks.TaskAwareRequest; import org.elasticsearch.tasks.TaskManager; -import org.elasticsearch.transport.TransportRequest; import java.util.Collection; import java.util.concurrent.CopyOnWriteArrayList; @@ -46,7 +46,7 @@ public MockTaskManager(Settings settings) { } @Override - public Task register(String type, String action, TransportRequest request) { + public Task register(String type, String action, TaskAwareRequest request) { Task task = super.register(type, action, request); if (task != null) { for (MockTaskManagerListener listener : listeners) { diff --git a/test/framework/src/main/java/org/elasticsearch/transport/MockTcpTransport.java b/test/framework/src/main/java/org/elasticsearch/transport/MockTcpTransport.java index 1b2384ba5fc77..f9e5ff8981e31 100644 --- a/test/framework/src/main/java/org/elasticsearch/transport/MockTcpTransport.java +++ b/test/framework/src/main/java/org/elasticsearch/transport/MockTcpTransport.java @@ -20,6 +20,7 @@ import org.apache.lucene.util.IOUtils; import org.elasticsearch.Version; +import org.elasticsearch.action.ActionListener; import org.elasticsearch.cluster.node.DiscoveryNode; import org.elasticsearch.common.bytes.BytesReference; import org.elasticsearch.common.io.stream.BytesStreamOutput; @@ -79,7 +80,7 @@ public class MockTcpTransport extends TcpTransport private final Set openChannels = new HashSet<>(); - static { + static { ConnectionProfile.Builder builder = new ConnectionProfile.Builder(); builder.addConnections(1, TransportRequestOptions.Type.BULK, @@ -129,11 +130,7 @@ protected MockChannel bind(final String name, InetSocketAddress address) throws executor.execute(new AbstractRunnable() { @Override public void onFailure(Exception e) { - try { - onException(serverMockChannel, e); - } catch (IOException ex) { - logger.warn("failed on handling exception", ex); - } + onException(serverMockChannel, e); } @Override @@ -242,15 +239,18 @@ protected boolean isOpen(MockChannel mockChannel) { } @Override - protected void sendMessage(MockChannel mockChannel, BytesReference reference, Runnable sendListener) throws IOException { - synchronized (mockChannel) { - final Socket socket = mockChannel.activeChannel; - OutputStream outputStream = new BufferedOutputStream(socket.getOutputStream()); - reference.writeTo(outputStream); - outputStream.flush(); - } - if (sendListener != null) { - sendListener.run(); + protected void sendMessage(MockChannel mockChannel, BytesReference reference, ActionListener listener) { + try { + synchronized (mockChannel) { + final Socket socket = mockChannel.activeChannel; + OutputStream outputStream = new BufferedOutputStream(socket.getOutputStream()); + reference.writeTo(outputStream); + outputStream.flush(); + } + listener.onResponse(mockChannel); + } catch (IOException e) { + listener.onFailure(e); + onException(mockChannel, e); } } diff --git a/test/framework/src/test/java/org/elasticsearch/test/test/InternalTestClusterTests.java b/test/framework/src/test/java/org/elasticsearch/test/test/InternalTestClusterTests.java index fd4496a86789b..fae952c856fae 100644 --- a/test/framework/src/test/java/org/elasticsearch/test/test/InternalTestClusterTests.java +++ b/test/framework/src/test/java/org/elasticsearch/test/test/InternalTestClusterTests.java @@ -344,6 +344,7 @@ private Path[] getNodePaths(InternalTestCluster cluster, String name) { } } + @AwaitsFix(bugUrl = "https://github.com/elastic/elasticsearch/issues/23728") public void testDifferentRolesMaintainPathOnRestart() throws Exception { final Path baseDir = createTempDir(); final int numNodes = 5;