diff --git a/docs/apm/images/apm-service-map-anomaly.png b/docs/apm/images/apm-service-map-anomaly.png
new file mode 100644
index 0000000000000..b661e8f09d1a1
Binary files /dev/null and b/docs/apm/images/apm-service-map-anomaly.png differ
diff --git a/docs/apm/images/green-service.png b/docs/apm/images/green-service.png
new file mode 100644
index 0000000000000..bbc00a3543b08
Binary files /dev/null and b/docs/apm/images/green-service.png differ
diff --git a/docs/apm/images/red-service.png b/docs/apm/images/red-service.png
new file mode 100644
index 0000000000000..be7a62b1774ab
Binary files /dev/null and b/docs/apm/images/red-service.png differ
diff --git a/docs/apm/images/service-maps.png b/docs/apm/images/service-maps.png
index 454ae9bb720fb..d4272e8999991 100644
Binary files a/docs/apm/images/service-maps.png and b/docs/apm/images/service-maps.png differ
diff --git a/docs/apm/images/yellow-service.png b/docs/apm/images/yellow-service.png
new file mode 100644
index 0000000000000..43afd6250be72
Binary files /dev/null and b/docs/apm/images/yellow-service.png differ
diff --git a/docs/apm/machine-learning.asciidoc b/docs/apm/machine-learning.asciidoc
index 9d347fc4f1111..03f7e13c98579 100644
--- a/docs/apm/machine-learning.asciidoc
+++ b/docs/apm/machine-learning.asciidoc
@@ -6,13 +6,20 @@
Integrate with machine learning
++++
-The Machine Learning integration will initiate a new job predefined to calculate anomaly scores on transaction response times.
-The response time graph will show the expected bounds and add an annotation when the anomaly score is 75 or above.
-Jobs can be created per transaction type, and based on the average response time.
-Manage jobs in the *Machine Learning jobs management*.
+The Machine Learning integration initiates a new job predefined to calculate anomaly scores on APM transaction durations.
+Jobs can be created per transaction type, and are based on the service's average response time.
+
+After a machine learning job is created, results are shown in two places:
+
+The transaction duration graph will show the expected bounds and add an annotation when the anomaly score is 75 or above.
+
+[role="screenshot"]
+image::apm/images/apm-ml-integration.png[Example view of anomaly scores on response times in the APM app]
+
+Service maps will display a color-coded anomaly indicator based on the detected anomaly score.
[role="screenshot"]
-image::apm/images/apm-ml-integration.png[Example view of anomaly scores on response times in APM app in Kibana]
+image::apm/images/apm-service-map-anomaly.png[Example view of anomaly scores on service maps in the APM app]
[float]
[[create-ml-integration]]
@@ -20,8 +27,10 @@ image::apm/images/apm-ml-integration.png[Example view of anomaly scores on respo
To enable machine learning anomaly detection, first choose a service to monitor.
Then, select **Integrations** > **Enable ML anomaly detection** and click **Create job**.
+
That's it! After a few minutes, the job will begin calculating results;
it might take additional time for results to appear on your graph.
+Jobs can be managed in *Machine Learning jobs management*.
APM specific anomaly detection wizards are also available for certain Agents.
See the machine learning {ml-docs}/ootb-ml-jobs-apm.html[APM anomaly detection configurations] for more information.
diff --git a/docs/apm/service-maps.asciidoc b/docs/apm/service-maps.asciidoc
index be86b9d522ac5..3a6a96fca9d09 100644
--- a/docs/apm/service-maps.asciidoc
+++ b/docs/apm/service-maps.asciidoc
@@ -9,7 +9,9 @@ Please use Chrome or Firefox if available.
A service map is a real-time visual representation of the instrumented services in your application's architecture.
It shows you how these services are connected, along with high-level metrics like average transaction duration,
-requests per minute, and errors per minute, that allow you to quickly assess the status of your services.
+requests per minute, and errors per minute.
+If enabled, service maps also integrate with machine learning--for real time health indicators based on anomaly detection scores.
+All of these features can help you to quickly and visually assess the status and health of your services.
We currently surface two types of service maps:
@@ -52,6 +54,26 @@ Additional filters are not currently available for service maps.
[role="screenshot"]
image::apm/images/service-maps-java.png[Example view of service maps with Java highlighted in the APM app in Kibana]
+[float]
+[[service-map-anomaly-detection]]
+=== Anomaly detection with machine learning
+
+Machine learning jobs can be created to calculate anomaly scores on APM transaction durations within the selected service.
+When these jobs are active, service maps will display a color-coded anomaly indicator based on the detected anomaly score:
+
+[horizontal]
+image:apm/images/green-service.png[APM green service]:: Max anomaly score **<=25**. Service is healthy.
+image:apm/images/yellow-service.png[APM yellow service]:: Max anomaly score **26-74**. Anomalous activity detected. Service may be degraded.
+image:apm/images/red-service.png[APM red service]:: Max anomaly score **>=75**. Anomalous activity detected. Service is unhealthy.
+
+[role="screenshot"]
+image::apm/images/apm-service-map-anomaly.png[Example view of anomaly scores on service maps in the APM app]
+
+If an anomaly has been detected, click *view anomalies* to view the anomaly detection metric viewier in the Machine learning app.
+This time series analysis will display additional details on the severity and time of the detected anomalies.
+
+To learn how to create a machine learning job, see <>.
+
[float]
[[service-maps-legend]]
=== Legend
diff --git a/docs/apm/spans.asciidoc b/docs/apm/spans.asciidoc
index 2eed339160fc4..c35fb115d2db4 100644
--- a/docs/apm/spans.asciidoc
+++ b/docs/apm/spans.asciidoc
@@ -1,38 +1,53 @@
[role="xpack"]
[[spans]]
-=== Span timeline
+=== Trace sample timeline
-TIP: A {apm-overview-ref-v}/transaction-spans.html[span] is the duration of a single event.
-Spans are automatically captured by APM agents, and you can also define custom spans.
-Each span has a type and is defined by a different color in the timeline/waterfall visualization.
-
-The span timeline visualization is a bird's-eye view of what your application was doing while it was trying to respond to the request that came in.
+The trace sample timeline visualization is a bird's-eye view of what your application was doing while it was trying to respond to a request.
This makes it useful for visualizing where the selected transaction spent most of its time.
[role="screenshot"]
image::apm/images/apm-transaction-sample.png[Example of distributed trace colors in the APM app in Kibana]
View a span in detail by clicking on it in the timeline waterfall.
-When you click on an SQL Select database query,
+For example, when you click on an SQL Select database query,
the information displayed includes the actual SQL that was executed, how long it took,
and the percentage of the trace's total time.
You also get a stack trace, which shows the SQL query in your code.
Finally, APM knows which files are your code and which are just modules or libraries that you've installed.
These library frames will be minimized by default in order to show you the most relevant stack trace.
+TIP: A {apm-overview-ref-v}/transaction-spans.html[span] is the duration of a single event.
+Spans are automatically captured by APM agents, and you can also define custom spans.
+Each span has a type and is defined by a different color in the timeline/waterfall visualization.
+
[role="screenshot"]
image::apm/images/apm-span-detail.png[Example view of a span detail in the APM app in Kibana]
-If your span timeline is colorful, it's indicative of a <>.
+[float]
+[[distributed-tracing]]
+==== Distributed tracing
+
+If your trace sample timeline is colorful, it's indicative of a distributed trace.
Services in a distributed trace are separated by color and listed in the order they occur.
[role="screenshot"]
image::apm/images/apm-services-trace.png[Example of distributed trace colors in the APM app in Kibana]
-Don't forget; a distributed trace includes more than one transaction.
+As application architectures are shifting from monolithic to more distributed, service-based architectures,
+distributed tracing has become a crucial feature of modern application performance monitoring.
+It allows you to trace requests through your service architecture automatically, and visualize those traces in one single view in the APM app.
+From initial web requests to your front-end service, to queries made to your back-end services,
+this makes finding possible bottlenecks throughout your application much easier and faster.
+
+[role="screenshot"]
+image::apm/images/apm-distributed-tracing.png[Example view of the distributed tracing in APM app in Kibana]
+
+Don't forget; by definition, a distributed trace includes more than one transaction.
When viewing these distributed traces in the timeline waterfall, you'll see this image:apm/images/transaction-icon.png[APM icon] icon,
which indicates the next transaction in the trace.
These transactions can be expanded and viewed in detail by clicking on them.
After exploring these traces,
you can return to the full trace by clicking *View full trace*.
+
+TIP: Distributed tracing is supported by all APM agents, and there's no additional configuration needed.
diff --git a/docs/apm/traces.asciidoc b/docs/apm/traces.asciidoc
index 8eef3d9bed4db..52b4b618de466 100644
--- a/docs/apm/traces.asciidoc
+++ b/docs/apm/traces.asciidoc
@@ -4,7 +4,7 @@
TIP: Traces link together related transactions to show an end-to-end performance of how a request was served
and which services were part of it.
-In addition to the Traces overview, you can view your application traces in the <>.
+In addition to the Traces overview, you can view your application traces in the <>.
The *Traces* overview displays the entry transaction for all traces in your application.
If you're using <>, this view is key to finding the critical paths within your application.
@@ -17,25 +17,3 @@ If there's a particular endpoint you're worried about, you can click on it to vi
[role="screenshot"]
image::apm/images/apm-traces.png[Example view of the Traces overview in APM app in Kibana]
-
-[float]
-[[distributed-tracing]]
-==== Distributed tracing
-
-Elastic APM supports distributed tracing.
-Distributed tracing is a key feature of modern application performance monitoring as application architectures are shifting from monolithic to more distributed,
-service-based architectures.
-
-Distributed tracing allows APM users to automatically trace requests all the way through the service architecture,
-and visualize those traces in one single view in the APM app.
-This is accomplished by tracing all of the requests, from the initial web request to your front-end service,
-to queries made to your back-end services.
-This makes finding possible bottlenecks throughout your application much easier and faster.
-
-By definition, a distributed trace includes more than one transaction.
-You can use the <> to view a waterfall display of all of the transactions from individual services that are connected in a trace.
-
-[role="screenshot"]
-image::apm/images/apm-distributed-tracing.png[Example view of the distributed tracing in APM app in Kibana]
-
-TIP: Distributed tracing is supported by all APM agents, and there's no additional configuration needed.
\ No newline at end of file
diff --git a/docs/apm/transactions.asciidoc b/docs/apm/transactions.asciidoc
index 2e1022e6d684c..8012c9108ca5e 100644
--- a/docs/apm/transactions.asciidoc
+++ b/docs/apm/transactions.asciidoc
@@ -95,7 +95,7 @@ It's the requests on the right, the ones taking longer than average, that we pro
When you select one of these buckets,
you're presented with up to ten trace samples.
-Each sample has a span timeline waterfall that shows what a typical request in that bucket was doing.
+Each sample has a trace timeline waterfall that shows what a typical request in that bucket was doing.
By investigating this timeline waterfall, we can hopefully determine _why_ this request was slow and then implement a fix.
[role="screenshot"]
diff --git a/docs/development/plugins/data/public/kibana-plugin-plugins-data-public.indexpattern.md b/docs/development/plugins/data/public/kibana-plugin-plugins-data-public.indexpattern.md
index 7cd7eb222fadd..589069e63e7c9 100644
--- a/docs/development/plugins/data/public/kibana-plugin-plugins-data-public.indexpattern.md
+++ b/docs/development/plugins/data/public/kibana-plugin-plugins-data-public.indexpattern.md
@@ -29,7 +29,6 @@ export declare class IndexPattern implements IIndexPattern
| [id](./kibana-plugin-plugins-data-public.indexpattern.id.md) | | string | |
| [intervalName](./kibana-plugin-plugins-data-public.indexpattern.intervalname.md) | | string | undefined | null | |
| [metaFields](./kibana-plugin-plugins-data-public.indexpattern.metafields.md) | | string[] | |
-| [routes](./kibana-plugin-plugins-data-public.indexpattern.routes.md) | | { edit: string; addField: string; indexedFields: string; scriptedFields: string; sourceFilters: string; } | |
| [timeFieldName](./kibana-plugin-plugins-data-public.indexpattern.timefieldname.md) | | string | undefined | |
| [title](./kibana-plugin-plugins-data-public.indexpattern.title.md) | | string | |
| [type](./kibana-plugin-plugins-data-public.indexpattern.type.md) | | string | |
diff --git a/docs/development/plugins/data/public/kibana-plugin-plugins-data-public.indexpattern.routes.md b/docs/development/plugins/data/public/kibana-plugin-plugins-data-public.indexpattern.routes.md
deleted file mode 100644
index 81e7abd4f9609..0000000000000
--- a/docs/development/plugins/data/public/kibana-plugin-plugins-data-public.indexpattern.routes.md
+++ /dev/null
@@ -1,17 +0,0 @@
-
-
-[Home](./index.md) > [kibana-plugin-plugins-data-public](./kibana-plugin-plugins-data-public.md) > [IndexPattern](./kibana-plugin-plugins-data-public.indexpattern.md) > [routes](./kibana-plugin-plugins-data-public.indexpattern.routes.md)
-
-## IndexPattern.routes property
-
-Signature:
-
-```typescript
-get routes(): {
- edit: string;
- addField: string;
- indexedFields: string;
- scriptedFields: string;
- sourceFilters: string;
- };
-```
diff --git a/docs/development/plugins/data/public/kibana-plugin-plugins-data-public.indexpatterns.md b/docs/development/plugins/data/public/kibana-plugin-plugins-data-public.indexpatterns.md
index fa97666a61b93..39c8b0a700c8a 100644
--- a/docs/development/plugins/data/public/kibana-plugin-plugins-data-public.indexpatterns.md
+++ b/docs/development/plugins/data/public/kibana-plugin-plugins-data-public.indexpatterns.md
@@ -18,7 +18,6 @@ indexPatterns: {
validate: typeof validateIndexPattern;
getFromSavedObject: typeof getFromSavedObject;
flattenHitWrapper: typeof flattenHitWrapper;
- getRoutes: typeof getRoutes;
formatHitProvider: typeof formatHitProvider;
}
```
diff --git a/packages/kbn-dev-utils/src/precommit_hook/cli.ts b/packages/kbn-dev-utils/src/precommit_hook/cli.ts
new file mode 100644
index 0000000000000..a83e8c2b193d9
--- /dev/null
+++ b/packages/kbn-dev-utils/src/precommit_hook/cli.ts
@@ -0,0 +1,50 @@
+/*
+ * Licensed to Elasticsearch B.V. under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch B.V. licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+import Path from 'path';
+import { chmod, writeFile } from 'fs';
+import { promisify } from 'util';
+
+import { run } from '../run';
+import { REPO_ROOT } from '../repo_root';
+import { SCRIPT_SOURCE } from './script_source';
+import { getGitDir } from './get_git_dir';
+
+const chmodAsync = promisify(chmod);
+const writeFileAsync = promisify(writeFile);
+
+run(
+ async ({ log }) => {
+ try {
+ const gitDir = await getGitDir();
+ const installPath = Path.resolve(REPO_ROOT, gitDir, 'hooks/pre-commit');
+
+ log.info(`Registering Kibana pre-commit git hook...`);
+ await writeFileAsync(installPath, SCRIPT_SOURCE);
+ await chmodAsync(installPath, 0o755);
+ log.success(`Kibana pre-commit git hook was installed successfully.`);
+ } catch (e) {
+ log.error(`Kibana pre-commit git hook was not installed as an error occur.`);
+ throw e;
+ }
+ },
+ {
+ description: 'Register git hooks in the local repo',
+ }
+);
diff --git a/src/dev/register_git_hook/index.js b/packages/kbn-dev-utils/src/precommit_hook/get_git_dir.ts
similarity index 70%
rename from src/dev/register_git_hook/index.js
rename to packages/kbn-dev-utils/src/precommit_hook/get_git_dir.ts
index 6089256423ff6..5ca7d67d0d4ea 100644
--- a/src/dev/register_git_hook/index.js
+++ b/packages/kbn-dev-utils/src/precommit_hook/get_git_dir.ts
@@ -17,4 +17,16 @@
* under the License.
*/
-export { registerPrecommitGitHook } from './register_git_hook';
+import execa from 'execa';
+
+import { REPO_ROOT } from '../repo_root';
+
+// Retrieves the correct location for the .git dir for
+// every git setup (including git worktree)
+export async function getGitDir() {
+ return (
+ await execa('git', ['rev-parse', '--git-common-dir'], {
+ cwd: REPO_ROOT,
+ })
+ ).stdout.trim();
+}
diff --git a/packages/kbn-dev-utils/src/precommit_hook/script_source.ts b/packages/kbn-dev-utils/src/precommit_hook/script_source.ts
new file mode 100644
index 0000000000000..61b4552f6eaef
--- /dev/null
+++ b/packages/kbn-dev-utils/src/precommit_hook/script_source.ts
@@ -0,0 +1,117 @@
+/*
+ * Licensed to Elasticsearch B.V. under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch B.V. licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+import os from 'os';
+
+import normalizePath from 'normalize-path';
+
+const HOME_DIR = normalizePath(os.homedir());
+
+export const SCRIPT_SOURCE = `#!/usr/bin/env bash
+#
+# ** THIS IS AN AUTO-GENERATED FILE **
+# ** PLEASE DO NOT CHANGE IT MANUALLY **
+#
+# GENERATED BY \`node scripts/register_git_hook\`
+# IF YOU WANNA CHANGE SOMETHING IN THIS SCRIPT
+# PLEASE RE-RUN 'yarn kbn bootstrap' or 'node scripts/register_git_hook'
+
+# pre-commit script takes zero arguments: https://git-scm.com/docs/githooks#_pre_commit
+
+set -euo pipefail
+
+# Make it possible to terminate pre commit hook
+# using ctrl-c so nothing else would happen or be
+# sent to the output.
+#
+# The correct exit code on that situation
+# according the linux documentation project is 130
+# https://www.tldp.org/LDP/abs/html/exitcodes.html
+trap "exit 130" INT
+
+has_node() {
+ command -v node >/dev/null 2>&1
+}
+
+has_nvm() {
+ command -v nvm >/dev/null 2>&1
+}
+
+try_load_node_from_nvm_paths () {
+ # If nvm is not loaded, load it
+ has_node || {
+ NVM_SH="${HOME_DIR}/.nvm/nvm.sh"
+
+ if [ "${process.platform}" == "darwin" ] && [ -s "$(brew --prefix nvm)/nvm.sh" ]; then
+ NVM_SH="$(brew --prefix nvm)/nvm.sh"
+ fi
+
+ export NVM_DIR="${HOME_DIR}/.nvm"
+
+ [ -s "$NVM_SH" ] && \. "$NVM_SH"
+
+ # If nvm has been loaded correctly, use project .nvmrc
+ has_nvm && nvm use
+ }
+}
+
+extend_user_path() {
+ if [ "${process.platform}" == "win32" ]; then
+ export PATH="$PATH:/c/Program Files/nodejs"
+ else
+ export PATH="$PATH:/usr/local/bin:/usr/local"
+ try_load_node_from_nvm_paths
+ fi
+}
+
+# Extend path with common path locations for node
+# in order to make the hook working on git GUI apps
+extend_user_path
+
+# Check if we have node js bin in path
+has_node || {
+ echo "Can't found node bin in the PATH. Please update the PATH to proceed."
+ echo "If your PATH already has the node bin, maybe you are using some git GUI app."
+ echo "Can't found node bin in the PATH. Please update the PATH to proceed."
+ echo "If your PATH already has the node bin, maybe you are using some git GUI app not launched from the shell."
+ echo "In order to proceed, you need to config the PATH used by the application that are launching your git GUI app."
+ echo "If you are running macOS, you can do that using:"
+ echo "'sudo launchctl config user path /usr/local/bin:/usr/bin:/bin:/usr/sbin:/sbin'"
+
+ exit 1
+}
+
+execute_precommit_hook() {
+ node scripts/precommit_hook || return 1
+
+ PRECOMMIT_FILE="./.git/hooks/pre-commit.local"
+ if [ -x "\${PRECOMMIT_FILE}" ]; then
+ echo "Executing local precommit hook found in \${PRECOMMIT_FILE}"
+ "$PRECOMMIT_FILE" || return 1
+ fi
+}
+
+execute_precommit_hook || {
+ echo "Pre-commit hook failed (add --no-verify to bypass)";
+ echo ' For eslint failures you can try running \`node scripts/precommit_hook --fix\`';
+ exit 1;
+}
+
+exit 0
+`;
diff --git a/packages/kbn-optimizer/src/worker/webpack.config.ts b/packages/kbn-optimizer/src/worker/webpack.config.ts
index 928acdda28e71..369ceb5819e0e 100644
--- a/packages/kbn-optimizer/src/worker/webpack.config.ts
+++ b/packages/kbn-optimizer/src/worker/webpack.config.ts
@@ -135,9 +135,9 @@ export function getWebpackConfig(bundle: Bundle, worker: WorkerConfig) {
// or which have require() statements that should be ignored because the file is
// already bundled with all its necessary depedencies
noParse: [
- /[\///]node_modules[\///]elasticsearch-browser[\///]/,
- /[\///]node_modules[\///]lodash[\///]index\.js$/,
- /[\///]node_modules[\///]vega-lib[\///]build[\///]vega\.js$/,
+ /[\/\\]node_modules[\/\\]elasticsearch-browser[\/\\]/,
+ /[\/\\]node_modules[\/\\]lodash[\/\\]index\.js$/,
+ /[\/\\]node_modules[\/\\]vega-lib[\/\\]build[\/\\]vega\.js$/,
],
rules: [
diff --git a/scripts/register_git_hook.js b/scripts/register_git_hook.js
index 8e03f17967f3f..af3f54619bcec 100644
--- a/scripts/register_git_hook.js
+++ b/scripts/register_git_hook.js
@@ -17,5 +17,5 @@
* under the License.
*/
-require('../src/setup_node_env');
-require('../src/dev/run_register_git_hook');
+require('../src/setup_node_env/prebuilt_dev_only_entry');
+require('@kbn/dev-utils/target/precommit_hook/cli');
diff --git a/src/core/server/saved_objects/service/lib/repository.test.js b/src/core/server/saved_objects/service/lib/repository.test.js
index 927171438ae99..c46fcfbc6dbd7 100644
--- a/src/core/server/saved_objects/service/lib/repository.test.js
+++ b/src/core/server/saved_objects/service/lib/repository.test.js
@@ -23,6 +23,7 @@ import { SavedObjectsErrorHelpers } from './errors';
import { SavedObjectsSerializer } from '../../serialization';
import { encodeHitVersion } from '../../version';
import { SavedObjectTypeRegistry } from '../../saved_objects_type_registry';
+import { DocumentMigrator } from '../../migrations/core/document_migrator';
jest.mock('./search_dsl/search_dsl', () => ({ getSearchDsl: jest.fn() }));
@@ -115,6 +116,7 @@ describe('SavedObjectsRepository', () => {
const createType = type => ({
name: type,
mappings: { properties: mappings.properties[type].properties },
+ migrations: { '1.1.1': doc => doc },
});
const registry = new SavedObjectTypeRegistry();
@@ -144,6 +146,13 @@ describe('SavedObjectsRepository', () => {
namespaceType: 'agnostic',
});
+ const documentMigrator = new DocumentMigrator({
+ typeRegistry: registry,
+ kibanaVersion: '2.0.0',
+ log: {},
+ validateDoc: jest.fn(),
+ });
+
const getMockGetResponse = ({ type, id, references, namespace }) => ({
// NOTE: Elasticsearch returns more fields (_index, _type) but the SavedObjectsRepository method ignores these
found: true,
@@ -207,7 +216,7 @@ describe('SavedObjectsRepository', () => {
beforeEach(() => {
callAdminCluster = jest.fn();
migrator = {
- migrateDocument: jest.fn(doc => doc),
+ migrateDocument: jest.fn().mockImplementation(documentMigrator.migrate),
runMigrations: async () => ({ status: 'skipped' }),
};
@@ -424,9 +433,17 @@ describe('SavedObjectsRepository', () => {
const getMockBulkCreateResponse = (objects, namespace) => {
return {
- items: objects.map(({ type, id }) => ({
+ items: objects.map(({ type, id, attributes, references, migrationVersion }) => ({
create: {
_id: `${namespace ? `${namespace}:` : ''}${type}:${id}`,
+ _source: {
+ [type]: attributes,
+ type,
+ namespace,
+ references,
+ ...mockTimestampFields,
+ migrationVersion: migrationVersion || { [type]: '1.1.1' },
+ },
...mockVersionProps,
},
})),
@@ -474,7 +491,7 @@ describe('SavedObjectsRepository', () => {
const expectSuccessResult = obj => ({
...obj,
- migrationVersion: undefined,
+ migrationVersion: { [obj.type]: '1.1.1' },
version: mockVersion,
...mockTimestampFields,
});
@@ -619,13 +636,16 @@ describe('SavedObjectsRepository', () => {
};
const bulkCreateError = async (obj, esError, expectedError) => {
- const objects = [obj1, obj, obj2];
- const response = getMockBulkCreateResponse(objects);
+ let response;
if (esError) {
+ response = getMockBulkCreateResponse([obj1, obj, obj2]);
response.items[1].create = { error: esError };
+ } else {
+ response = getMockBulkCreateResponse([obj1, obj2]);
}
callAdminCluster.mockResolvedValue(response); // this._writeToCluster('bulk', ...)
+ const objects = [obj1, obj, obj2];
const result = await savedObjectsRepository.bulkCreate(objects);
expectClusterCalls('bulk');
const objCall = esError ? expectObjArgs(obj) : [];
@@ -781,7 +801,7 @@ describe('SavedObjectsRepository', () => {
id: 'three',
};
const objects = [obj1, obj, obj2];
- const response = getMockBulkCreateResponse(objects);
+ const response = getMockBulkCreateResponse([obj1, obj2]);
callAdminCluster.mockResolvedValue(response); // this._writeToCluster('bulk', ...)
const result = await savedObjectsRepository.bulkCreate(objects);
expect(callAdminCluster).toHaveBeenCalledTimes(1);
@@ -789,6 +809,32 @@ describe('SavedObjectsRepository', () => {
saved_objects: [expectSuccessResult(obj1), expectError(obj), expectSuccessResult(obj2)],
});
});
+
+ it(`a deserialized saved object`, async () => {
+ // Test for fix to https://github.com/elastic/kibana/issues/65088 where
+ // we returned raw ID's when an object without an id was created.
+ const namespace = 'myspace';
+ const response = getMockBulkCreateResponse([obj1, obj2], namespace);
+ callAdminCluster.mockResolvedValueOnce(response); // this._writeToCluster('bulk', ...)
+
+ // Bulk create one object with id unspecified, and one with id specified
+ const result = await savedObjectsRepository.bulkCreate([{ ...obj1, id: undefined }, obj2], {
+ namespace,
+ });
+
+ // Assert that both raw docs from the ES response are deserialized
+ expect(serializer.rawToSavedObject).toHaveBeenNthCalledWith(1, {
+ ...response.items[0].create,
+ _id: expect.stringMatching(/^myspace:config:[0-9a-f]{8}-([0-9a-f]{4}-){3}[0-9a-f]{12}$/),
+ });
+ expect(serializer.rawToSavedObject).toHaveBeenNthCalledWith(2, response.items[1].create);
+
+ // Assert that ID's are deserialized to remove the type and namespace
+ expect(result.saved_objects[0].id).toEqual(
+ expect.stringMatching(/^[0-9a-f]{8}-([0-9a-f]{4}-){3}[0-9a-f]{12}$/)
+ );
+ expect(result.saved_objects[1].id).toEqual(obj2.id);
+ });
});
});
@@ -1604,6 +1650,7 @@ describe('SavedObjectsRepository', () => {
version: mockVersion,
attributes,
references,
+ migrationVersion: { [type]: '1.1.1' },
});
});
});
diff --git a/src/core/server/saved_objects/service/lib/repository.ts b/src/core/server/saved_objects/service/lib/repository.ts
index 040be6711277b..7d6b890087595 100644
--- a/src/core/server/saved_objects/service/lib/repository.ts
+++ b/src/core/server/saved_objects/service/lib/repository.ts
@@ -18,6 +18,7 @@
*/
import { omit } from 'lodash';
+import uuid from 'uuid';
import { retryCallCluster } from '../../../elasticsearch/retry_call_cluster';
import { APICaller } from '../../../elasticsearch/';
import { getRootPropertiesObjects, IndexMapping } from '../../mappings';
@@ -298,6 +299,8 @@ export class SavedObjectsRepository {
const requiresNamespacesCheck =
method === 'index' && this._registry.isMultiNamespace(object.type);
+ if (object.id == null) object.id = uuid.v1();
+
return {
tag: 'Right' as 'Right',
value: {
@@ -403,35 +406,25 @@ export class SavedObjectsRepository {
}
const { requestedId, rawMigratedDoc, esRequestIndex } = expectedResult.value;
- const response = bulkResponse.items[esRequestIndex];
- const {
- error,
- _id: responseId,
- _seq_no: seqNo,
- _primary_term: primaryTerm,
- } = Object.values(response)[0] as any;
-
- const {
- _source: { type, [type]: attributes, references = [], namespaces },
- } = rawMigratedDoc;
-
- const id = requestedId || responseId;
+ const { error, ...rawResponse } = Object.values(
+ bulkResponse.items[esRequestIndex]
+ )[0] as any;
+
if (error) {
return {
- id,
- type,
- error: getBulkOperationError(error, type, id),
+ id: requestedId,
+ type: rawMigratedDoc._source.type,
+ error: getBulkOperationError(error, rawMigratedDoc._source.type, requestedId),
};
}
- return {
- id,
- type,
- ...(namespaces && { namespaces }),
- updated_at: time,
- version: encodeVersion(seqNo, primaryTerm),
- attributes,
- references,
- };
+
+ // When method == 'index' the bulkResponse doesn't include the indexed
+ // _source so we return rawMigratedDoc but have to spread the latest
+ // _seq_no and _primary_term values from the rawResponse.
+ return this._serializer.rawToSavedObject({
+ ...rawMigratedDoc,
+ ...{ _seq_no: rawResponse._seq_no, _primary_term: rawResponse._primary_term },
+ });
}),
};
}
diff --git a/src/dev/register_git_hook/register_git_hook.js b/src/dev/register_git_hook/register_git_hook.js
deleted file mode 100644
index 8820327d3adc0..0000000000000
--- a/src/dev/register_git_hook/register_git_hook.js
+++ /dev/null
@@ -1,169 +0,0 @@
-/*
- * Licensed to Elasticsearch B.V. under one or more contributor
- * license agreements. See the NOTICE file distributed with
- * this work for additional information regarding copyright
- * ownership. Elasticsearch B.V. licenses this file to you under
- * the Apache License, Version 2.0 (the "License"); you may
- * not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing,
- * software distributed under the License is distributed on an
- * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
- * KIND, either express or implied. See the License for the
- * specific language governing permissions and limitations
- * under the License.
- */
-
-import chalk from 'chalk';
-import { chmod, unlink, writeFile } from 'fs';
-import dedent from 'dedent';
-import normalizePath from 'normalize-path';
-import os from 'os';
-import { resolve } from 'path';
-import { promisify } from 'util';
-import SimpleGit from 'simple-git';
-import { REPO_ROOT } from '../constants';
-
-const simpleGit = new SimpleGit(REPO_ROOT);
-
-const chmodAsync = promisify(chmod);
-const gitRevParseAsync = promisify(simpleGit.revparse.bind(simpleGit));
-const unlinkAsync = promisify(unlink);
-const writeFileAsync = promisify(writeFile);
-
-async function getPrecommitGitHookScriptPath(rootPath) {
- // Retrieves the correct location for the .git dir for
- // every git setup (including git worktree)
- const gitDirPath = (await gitRevParseAsync(['--git-common-dir'])).trim();
-
- return resolve(rootPath, gitDirPath, 'hooks/pre-commit');
-}
-
-function getKbnPrecommitGitHookScript(rootPath, nodeHome, platform) {
- return dedent(`
- #!/usr/bin/env bash
- #
- # ** THIS IS AN AUTO-GENERATED FILE **
- # ** PLEASE DO NOT CHANGE IT MANUALLY **
- #
- # GENERATED BY ${__dirname}
- # IF YOU WANNA CHANGE SOMETHING INTO THIS SCRIPT
- # PLEASE RE-RUN 'yarn kbn bootstrap' or 'node scripts/register_git_hook' IN THE ROOT
- # OF THE CURRENT PROJECT ${rootPath}
-
- # pre-commit script takes zero arguments: https://git-scm.com/docs/githooks#_pre_commit
-
- set -euo pipefail
-
- # Make it possible to terminate pre commit hook
- # using ctrl-c so nothing else would happen or be
- # sent to the output.
- #
- # The correct exit code on that situation
- # according the linux documentation project is 130
- # https://www.tldp.org/LDP/abs/html/exitcodes.html
- trap "exit 130" INT
-
- has_node() {
- command -v node >/dev/null 2>&1
- }
-
- has_nvm() {
- command -v nvm >/dev/null 2>&1
- }
-
- try_load_node_from_nvm_paths () {
- # If nvm is not loaded, load it
- has_node || {
- NVM_SH="${nodeHome}/.nvm/nvm.sh"
-
- if [ "${platform}" == "darwin" ] && [ -s "$(brew --prefix nvm)/nvm.sh" ]; then
- NVM_SH="$(brew --prefix nvm)/nvm.sh"
- fi
-
- export NVM_DIR=${nodeHome}/.nvm
-
- [ -s "$NVM_SH" ] && \. "$NVM_SH"
-
- # If nvm has been loaded correctly, use project .nvmrc
- has_nvm && nvm use
- }
- }
-
- extend_user_path() {
- if [ "${platform}" == "win32" ]; then
- export PATH="$PATH:/c/Program Files/nodejs"
- else
- export PATH="$PATH:/usr/local/bin:/usr/local"
- try_load_node_from_nvm_paths
- fi
- }
-
- # Extend path with common path locations for node
- # in order to make the hook working on git GUI apps
- extend_user_path
-
- # Check if we have node js bin in path
- has_node || {
- echo "Can't found node bin in the PATH. Please update the PATH to proceed."
- echo "If your PATH already has the node bin, maybe you are using some git GUI app."
- echo "Can't found node bin in the PATH. Please update the PATH to proceed."
- echo "If your PATH already has the node bin, maybe you are using some git GUI app not launched from the shell."
- echo "In order to proceed, you need to config the PATH used by the application that are launching your git GUI app."
- echo "If you are running macOS, you can do that using:"
- echo "'sudo launchctl config user path /usr/local/bin:/usr/bin:/bin:/usr/sbin:/sbin'"
-
- exit 1
- }
-
- execute_precommit_hook() {
- node scripts/precommit_hook || return 1
-
- PRECOMMIT_FILE="./.git/hooks/pre-commit.local"
- if [ -x "\${PRECOMMIT_FILE}" ]; then
- echo "Executing local precommit hook found in \${PRECOMMIT_FILE}"
- "$PRECOMMIT_FILE" || return 1
- fi
- }
-
- execute_precommit_hook || {
- echo "Pre-commit hook failed (add --no-verify to bypass)";
- echo ' For eslint failures you can try running \`node scripts/precommit_hook --fix\`';
- exit 1;
- }
-
- exit 0
- `);
-}
-
-export async function registerPrecommitGitHook(log) {
- log.write(chalk.bold(`Registering Kibana pre-commit git hook...\n`));
-
- try {
- await writeGitHook(
- await getPrecommitGitHookScriptPath(REPO_ROOT),
- getKbnPrecommitGitHookScript(REPO_ROOT, normalizePath(os.homedir()), process.platform)
- );
- } catch (e) {
- log.write(
- `${chalk.red('fail')} Kibana pre-commit git hook was not installed as an error occur.\n`
- );
- throw e;
- }
-
- log.write(`${chalk.green('success')} Kibana pre-commit git hook was installed successfully.\n`);
-}
-
-async function writeGitHook(gitHookScriptPath, kbnHookScriptSource) {
- try {
- await unlinkAsync(gitHookScriptPath);
- } catch (e) {
- /* no-op */
- }
-
- await writeFileAsync(gitHookScriptPath, kbnHookScriptSource);
- await chmodAsync(gitHookScriptPath, 0o755);
-}
diff --git a/src/legacy/core_plugins/kibana/public/management/sections/index_patterns/edit_index_pattern/tabs/utils.ts b/src/legacy/core_plugins/kibana/public/management/sections/index_patterns/edit_index_pattern/tabs/utils.ts
index bdb1436c37efb..83335a6fabfeb 100644
--- a/src/legacy/core_plugins/kibana/public/management/sections/index_patterns/edit_index_pattern/tabs/utils.ts
+++ b/src/legacy/core_plugins/kibana/public/management/sections/index_patterns/edit_index_pattern/tabs/utils.ts
@@ -96,18 +96,21 @@ export function getTabs(
tabs.push({
name: getTitle('indexed', filteredCount, totalCount),
id: TAB_INDEXED_FIELDS,
+ 'data-test-subj': 'tab-indexedFields',
});
if (indexPatternListProvider.areScriptedFieldsEnabled(indexPattern)) {
tabs.push({
name: getTitle('scripted', filteredCount, totalCount),
id: TAB_SCRIPTED_FIELDS,
+ 'data-test-subj': 'tab-scriptedFields',
});
}
tabs.push({
name: getTitle('sourceFilters', filteredCount, totalCount),
id: TAB_SOURCE_FILTERS,
+ 'data-test-subj': 'tab-sourceFilters',
});
return tabs;
diff --git a/src/plugins/dashboard/public/application/__snapshots__/dashboard_empty_screen.test.tsx.snap b/src/plugins/dashboard/public/application/__snapshots__/dashboard_empty_screen.test.tsx.snap
index 1bc85fa110ca0..698c124d2d805 100644
--- a/src/plugins/dashboard/public/application/__snapshots__/dashboard_empty_screen.test.tsx.snap
+++ b/src/plugins/dashboard/public/application/__snapshots__/dashboard_empty_screen.test.tsx.snap
@@ -301,7 +301,7 @@ exports[`DashboardEmptyScreen renders correctly with readonly mode 1`] = `
>
@@ -995,7 +995,7 @@ exports[`DashboardEmptyScreen renders correctly without visualize paragraph 1`]
>
diff --git a/src/plugins/dashboard/public/application/dashboard_empty_screen.tsx b/src/plugins/dashboard/public/application/dashboard_empty_screen.tsx
index 8bf205b8cb507..955d5244ce190 100644
--- a/src/plugins/dashboard/public/application/dashboard_empty_screen.tsx
+++ b/src/plugins/dashboard/public/application/dashboard_empty_screen.tsx
@@ -50,8 +50,8 @@ export function DashboardEmptyScreen({
}: DashboardEmptyScreenProps) {
const IS_DARK_THEME = uiSettings.get('theme:darkMode');
const emptyStateGraphicURL = IS_DARK_THEME
- ? '/plugins/kibana/home/assets/welcome_graphic_dark_2x.png'
- : '/plugins/kibana/home/assets/welcome_graphic_light_2x.png';
+ ? '/plugins/home/assets/welcome_graphic_dark_2x.png'
+ : '/plugins/home/assets/welcome_graphic_light_2x.png';
const linkToVisualizeParagraph = (