diff --git a/.env b/.env index 68900a1144..924b9f2c45 100644 --- a/.env +++ b/.env @@ -1,4 +1,4 @@ -ELASTIC_VERSION=8.5.3 +ELASTIC_VERSION=8.11.1 ## Passwords for stack users # diff --git a/.github/ISSUE_TEMPLATE/config.yml b/.github/ISSUE_TEMPLATE/config.yml index 42d723deca..e30f9d298a 100644 --- a/.github/ISSUE_TEMPLATE/config.yml +++ b/.github/ISSUE_TEMPLATE/config.yml @@ -7,5 +7,5 @@ contact_links: url: https://forums.docker.com about: Please ask questions related to the usage of Docker products in those forums. - name: docker-elk Gitter chat room - url: https://gitter.im/deviantony/docker-elk + url: https://app.gitter.im/#/room/#deviantony_docker-elk:gitter.im about: General questions regarding this project can also be asked in the chat. diff --git a/.github/workflows/ci.yml b/.github/workflows/ci.yml index 0e9cbb17f3..40b7f2b2e0 100644 --- a/.github/workflows/ci.yml +++ b/.github/workflows/ci.yml @@ -14,8 +14,11 @@ jobs: # https://docs.github.com/en/actions/using-github-hosted-runners/about-github-hosted-runners runs-on: ubuntu-22.04 + env: + COMPOSE_PROJECT_NAME: docker-elk + steps: - - uses: actions/checkout@v3 + - uses: actions/checkout@v4 ##################################################### # # @@ -61,22 +64,18 @@ jobs: ########################################################## - name: Run the stack - run: docker compose up -d + run: | + docker compose up setup + docker compose up -d + + # Elasticsearch's high disk watermark gets regularly exceeded on GitHub Actions runners. + # https://www.elastic.co/guide/en/elasticsearch/reference/8.10/fix-watermark-errors.html + - name: Disable Elasticsearch disk allocation decider + run: .github/workflows/scripts/disable-disk-alloc-decider.sh - name: Execute core test suite run: .github/workflows/scripts/run-tests-core.sh - - name: 'debug: Display state and logs (core)' - # https://docs.github.com/en/free-pro-team@latest/actions/reference/workflow-syntax-for-github-actions#jobsjob_idif - # https://docs.github.com/en/free-pro-team@latest/actions/reference/context-and-expression-syntax-for-github-actions#job-status-check-functions - if: always() - run: | - docker compose ps - docker compose logs setup - docker compose logs elasticsearch - docker compose logs logstash - docker compose logs kibana - ############################## # # # Test supported extensions. # @@ -108,11 +107,6 @@ jobs: sed -i '/input { udp { port => 50000 codec => json } }/d' logstash/pipeline/logstash.conf docker compose restart logstash - - name: 'debug: Display state and logs (Logspout)' - if: always() - run: | - docker compose -f docker-compose.yml -f extensions/logspout/logspout-compose.yml ps - docker compose -f docker-compose.yml -f extensions/logspout/logspout-compose.yml logs logspout # next steps don't need Logstash docker compose stop logstash @@ -125,13 +119,6 @@ jobs: docker compose -f docker-compose.yml -f extensions/fleet/fleet-compose.yml -f extensions/fleet/agent-apmserver-compose.yml up --remove-orphans -d fleet-server apm-server .github/workflows/scripts/run-tests-fleet.sh - - name: 'debug: Display state and logs (Fleet)' - if: always() - run: | - docker compose -f docker-compose.yml -f extensions/fleet/fleet-compose.yml -f extensions/fleet/agent-apmserver-compose.yml ps - docker compose -f docker-compose.yml -f extensions/fleet/fleet-compose.yml -f extensions/fleet/agent-apmserver-compose.yml logs fleet-server - docker compose -f docker-compose.yml -f extensions/fleet/fleet-compose.yml -f extensions/fleet/agent-apmserver-compose.yml logs apm-server - # # Metricbeat # @@ -141,12 +128,6 @@ jobs: docker compose -f docker-compose.yml -f extensions/metricbeat/metricbeat-compose.yml up --remove-orphans -d metricbeat .github/workflows/scripts/run-tests-metricbeat.sh - - name: 'debug: Display state and logs (Metricbeat)' - if: always() - run: | - docker compose -f docker-compose.yml -f extensions/metricbeat/metricbeat-compose.yml ps - docker compose -f docker-compose.yml -f extensions/metricbeat/metricbeat-compose.yml logs metricbeat - # # Filebeat # @@ -156,12 +137,6 @@ jobs: docker compose -f docker-compose.yml -f extensions/filebeat/filebeat-compose.yml up --remove-orphans -d filebeat .github/workflows/scripts/run-tests-filebeat.sh - - name: 'debug: Display state and logs (Filebeat)' - if: always() - run: | - docker compose -f docker-compose.yml -f extensions/filebeat/filebeat-compose.yml ps - docker compose -f docker-compose.yml -f extensions/filebeat/filebeat-compose.yml logs filebeat - # # Heartbeat # @@ -171,12 +146,6 @@ jobs: docker compose -f docker-compose.yml -f extensions/heartbeat/heartbeat-compose.yml up --remove-orphans -d heartbeat .github/workflows/scripts/run-tests-heartbeat.sh - - name: 'debug: Display state and logs (Heartbeat)' - if: always() - run: | - docker compose -f docker-compose.yml -f extensions/heartbeat/heartbeat-compose.yml ps - docker compose -f docker-compose.yml -f extensions/heartbeat/heartbeat-compose.yml logs heartbeat - # # Enterprise Search # @@ -204,11 +173,42 @@ jobs: sed -i '/xpack.security.authc.api_key.enabled: true/d' elasticsearch/config/elasticsearch.yml docker compose restart elasticsearch - - name: 'debug: Display state and logs (Enterprise Search)' - if: always() + - name: Collect troubleshooting data + id: debug-data + if: failure() run: | - docker compose -f docker-compose.yml -f extensions/enterprise-search/enterprise-search-compose.yml ps - docker compose -f docker-compose.yml -f extensions/enterprise-search/enterprise-search-compose.yml logs enterprise-search + declare debug_data_dir="$(mktemp -d)" + + docker compose \ + -f docker-compose.yml \ + -f extensions/logspout/logspout-compose.yml \ + -f extensions/fleet/fleet-compose.yml \ + -f extensions/fleet/agent-apmserver-compose.yml \ + -f extensions/metricbeat/metricbeat-compose.yml \ + -f extensions/filebeat/filebeat-compose.yml \ + -f extensions/heartbeat/heartbeat-compose.yml \ + -f extensions/enterprise-search/enterprise-search-compose.yml \ + ps >"$debug_data_dir"/docker_ps.log + + docker compose \ + -f docker-compose.yml \ + -f extensions/logspout/logspout-compose.yml \ + -f extensions/fleet/fleet-compose.yml \ + -f extensions/fleet/agent-apmserver-compose.yml \ + -f extensions/metricbeat/metricbeat-compose.yml \ + -f extensions/filebeat/filebeat-compose.yml \ + -f extensions/heartbeat/heartbeat-compose.yml \ + -f extensions/enterprise-search/enterprise-search-compose.yml \ + logs >"$debug_data_dir"/docker_logs.log + + echo "path=${debug_data_dir}" >>"$GITHUB_OUTPUT" + + - name: Upload collected troubleshooting data + if: always() && steps.debug-data.outputs.path + uses: actions/upload-artifact@v3 + with: + name: debug-data + path: ${{ steps.debug-data.outputs.path }}/*.* ############## # # diff --git a/.github/workflows/docs.yml b/.github/workflows/docs.yml index 93300c3b4b..2b39074798 100644 --- a/.github/workflows/docs.yml +++ b/.github/workflows/docs.yml @@ -15,7 +15,7 @@ jobs: runs-on: ubuntu-latest steps: - - uses: actions/checkout@v3 + - uses: actions/checkout@v4 - name: Check links uses: gaurav-nelson/github-action-markdown-link-check@v1 diff --git a/.github/workflows/scripts/disable-disk-alloc-decider.sh b/.github/workflows/scripts/disable-disk-alloc-decider.sh new file mode 100755 index 0000000000..4e7d73d484 --- /dev/null +++ b/.github/workflows/scripts/disable-disk-alloc-decider.sh @@ -0,0 +1,30 @@ +#!/usr/bin/env bash + +set -eu +set -o pipefail + + +source "${BASH_SOURCE[0]%/*}"/lib/testing.sh + + +cid_es="$(container_id elasticsearch)" +ip_es="$(service_ip elasticsearch)" + +grouplog 'Wait for readiness of Elasticsearch' +poll_ready "$cid_es" "http://${ip_es}:9200/" -u 'elastic:testpasswd' +endgroup + +log 'Disabling disk allocation decider' + +declare -a put_args=( '-X' 'PUT' '--fail-with-body' '-s' '-u' 'elastic:testpasswd' + '-H' 'Content-Type: application/json' + "http://${ip_es}:9200/_cluster/settings?pretty" + '-d' '{"persistent":{"cluster.routing.allocation.disk.threshold_enabled":false}}' +) +declare response +declare -i exit_code=0 + +response=$(curl "${put_args[@]}") || exit_code=$? +echo "$response" + +exit $exit_code diff --git a/.github/workflows/scripts/lib/testing.sh b/.github/workflows/scripts/lib/testing.sh index 1c77954dcf..2d809925fe 100755 --- a/.github/workflows/scripts/lib/testing.sh +++ b/.github/workflows/scripts/lib/testing.sh @@ -10,6 +10,17 @@ function err { echo -e "\n[x] $1\n" >&2 } +# Start an expandable group in the GitHub Action log. +# https://docs.github.com/en/actions/using-workflows/workflow-commands-for-github-actions#grouping-log-lines +function grouplog { + echo "::group::$1" +} + +# End the current expandable group in the GitHub Action log. +function endgroup { + echo '::endgroup::' +} + # Return the ID of the container running the given service. function container_id { local svc=$1 diff --git a/.github/workflows/scripts/run-tests-core.sh b/.github/workflows/scripts/run-tests-core.sh index f5cf0706bb..849e52d0bd 100755 --- a/.github/workflows/scripts/run-tests-core.sh +++ b/.github/workflows/scripts/run-tests-core.sh @@ -4,7 +4,7 @@ set -eu set -o pipefail -source "$(dirname ${BASH_SOURCE[0]})/lib/testing.sh" +source "${BASH_SOURCE[0]%/*}"/lib/testing.sh cid_es="$(container_id elasticsearch)" @@ -15,14 +15,17 @@ ip_es="$(service_ip elasticsearch)" ip_ls="$(service_ip logstash)" ip_kb="$(service_ip kibana)" -log 'Waiting for readiness of Elasticsearch' +grouplog 'Wait for readiness of Elasticsearch' poll_ready "$cid_es" "http://${ip_es}:9200/" -u 'elastic:testpasswd' +endgroup -log 'Waiting for readiness of Logstash' +grouplog 'Wait for readiness of Logstash' poll_ready "$cid_ls" "http://${ip_ls}:9600/_node/pipelines/main?pretty" +endgroup -log 'Waiting for readiness of Kibana' +grouplog 'Wait for readiness of Kibana' poll_ready "$cid_kb" "http://${ip_kb}:5601/api/status" -u 'kibana_system:testpasswd' +endgroup log 'Sending message to Logstash TCP input' @@ -43,15 +46,58 @@ if ((was_retried)); then echo >&2 fi -sleep 5 -curl -X POST "http://${ip_es}:9200/logs-generic-default/_refresh" -u elastic:testpasswd \ - -s -w '\n' +# It might take a few seconds before the indices and alias are created, so we +# need to be resilient here. +was_retried=0 +declare -a refresh_args=( '-X' 'POST' '-s' '-w' '%{http_code}' '-u' 'elastic:testpasswd' + "http://${ip_es}:9200/logs-generic-default/_refresh" +) + +# retry for max 10s (10*1s) +for _ in $(seq 1 10); do + output="$(curl "${refresh_args[@]}")" + if [ "${output: -3}" -eq 200 ]; then + break + fi + + was_retried=1 + echo -n 'x' >&2 + sleep 1 +done +if ((was_retried)); then + # flush stderr, important in non-interactive environments (CI) + echo >&2 +fi log 'Searching message in Elasticsearch' -response="$(curl "http://${ip_es}:9200/logs-generic-default/_search?q=message:dockerelk&pretty" -s -u elastic:testpasswd)" -echo "$response" + +# We don't know how much time it will take Logstash to create our document, so +# we need to be resilient here too. +was_retried=0 +declare -a search_args=( '-s' '-u' 'elastic:testpasswd' + "http://${ip_es}:9200/logs-generic-default/_search?q=message:dockerelk&pretty" +) declare -i count -count="$(jq -rn --argjson data "${response}" '$data.hits.total.value')" +declare response + +# retry for max 10s (10*1s) +for _ in $(seq 1 10); do + response="$(curl "${search_args[@]}")" + count="$(jq -rn --argjson data "${response}" '$data.hits.total.value')" + if (( count )); then + break + fi + + was_retried=1 + echo -n 'x' >&2 + sleep 1 +done +if ((was_retried)); then + # flush stderr, important in non-interactive environments (CI) + echo >&2 +fi + +echo "$response" if (( count != 1 )); then echo "Expected 1 document, got ${count}" exit 1 diff --git a/.github/workflows/scripts/run-tests-enterprise-search.sh b/.github/workflows/scripts/run-tests-enterprise-search.sh index 3a58afd500..92158b329c 100755 --- a/.github/workflows/scripts/run-tests-enterprise-search.sh +++ b/.github/workflows/scripts/run-tests-enterprise-search.sh @@ -4,7 +4,7 @@ set -eu set -o pipefail -source "$(dirname ${BASH_SOURCE[0]})/lib/testing.sh" +source "${BASH_SOURCE[0]%/*}"/lib/testing.sh cid_es="$(container_id elasticsearch)" @@ -13,11 +13,13 @@ cid_en="$(container_id enterprise-search)" ip_es="$(service_ip elasticsearch)" ip_en="$(service_ip enterprise-search)" -log 'Waiting for readiness of Elasticsearch' +grouplog 'Wait for readiness of Elasticsearch' poll_ready "$cid_es" "http://${ip_es}:9200/" -u 'elastic:testpasswd' +endgroup -log 'Waiting for readiness of Enterprise Search' +grouplog 'Wait for readiness of Enterprise Search' poll_ready "$cid_en" "http://${ip_en}:3002/api/ent/v1/internal/health" -u 'elastic:testpasswd' +endgroup log 'Ensuring that App Search API keys were created in Elasticsearch' response="$(curl "http://${ip_es}:9200/.ent-search-actastic-app_search_api_tokens_v3/_search?q=*:*&pretty" -s -u elastic:testpasswd)" diff --git a/.github/workflows/scripts/run-tests-filebeat.sh b/.github/workflows/scripts/run-tests-filebeat.sh index 567c59db26..0fe56de702 100755 --- a/.github/workflows/scripts/run-tests-filebeat.sh +++ b/.github/workflows/scripts/run-tests-filebeat.sh @@ -4,7 +4,7 @@ set -eu set -o pipefail -source "$(dirname ${BASH_SOURCE[0]})/lib/testing.sh" +source "${BASH_SOURCE[0]%/*}"/lib/testing.sh cid_es="$(container_id elasticsearch)" @@ -13,11 +13,13 @@ cid_mb="$(container_id filebeat)" ip_es="$(service_ip elasticsearch)" ip_mb="$(service_ip filebeat)" -log 'Waiting for readiness of Elasticsearch' +grouplog 'Wait for readiness of Elasticsearch' poll_ready "$cid_es" "http://${ip_es}:9200/" -u 'elastic:testpasswd' +endgroup -log 'Waiting for readiness of Filebeat' +grouplog 'Wait for readiness of Filebeat' poll_ready "$cid_mb" "http://${ip_mb}:5066/?pretty" +endgroup # We expect to find log entries for the 'elasticsearch' Compose service using # the following query: diff --git a/.github/workflows/scripts/run-tests-fleet.sh b/.github/workflows/scripts/run-tests-fleet.sh index 5d1c2a5407..aed7d58a41 100755 --- a/.github/workflows/scripts/run-tests-fleet.sh +++ b/.github/workflows/scripts/run-tests-fleet.sh @@ -4,7 +4,7 @@ set -eu set -o pipefail -source "$(dirname ${BASH_SOURCE[0]})/lib/testing.sh" +source "${BASH_SOURCE[0]%/*}"/lib/testing.sh cid_es="$(container_id elasticsearch)" @@ -15,14 +15,17 @@ ip_es="$(service_ip elasticsearch)" ip_fl="$(service_ip fleet-server)" ip_apm="$(service_ip apm-server)" -log 'Waiting for readiness of Elasticsearch' +grouplog 'Wait for readiness of Elasticsearch' poll_ready "$cid_es" "http://${ip_es}:9200/" -u 'elastic:testpasswd' +endgroup -log 'Waiting for readiness of Fleet Server' +grouplog 'Wait for readiness of Fleet Server' poll_ready "$cid_fl" "http://${ip_fl}:8220/api/status" +endgroup -log 'Waiting for readiness of APM Server' +grouplog 'Wait for readiness of APM Server' poll_ready "$cid_apm" "http://${ip_apm}:8200/" +endgroup # We expect to find metrics entries using the following query: # diff --git a/.github/workflows/scripts/run-tests-heartbeat.sh b/.github/workflows/scripts/run-tests-heartbeat.sh index 882a9770cc..7b44a54329 100755 --- a/.github/workflows/scripts/run-tests-heartbeat.sh +++ b/.github/workflows/scripts/run-tests-heartbeat.sh @@ -4,7 +4,7 @@ set -eu set -o pipefail -source "$(dirname ${BASH_SOURCE[0]})/lib/testing.sh" +source "${BASH_SOURCE[0]%/*}"/lib/testing.sh cid_es="$(container_id elasticsearch)" @@ -13,11 +13,13 @@ cid_mb="$(container_id heartbeat)" ip_es="$(service_ip elasticsearch)" ip_mb="$(service_ip heartbeat)" -log 'Waiting for readiness of Elasticsearch' +grouplog 'Wait for readiness of Elasticsearch' poll_ready "$cid_es" "http://${ip_es}:9200/" -u 'elastic:testpasswd' +endgroup -log 'Waiting for readiness of Heartbeat' +grouplog 'Wait for readiness of Heartbeat' poll_ready "$cid_mb" "http://${ip_mb}:5066/?pretty" +endgroup # We expect to find heartbeat entries for the 'elasticsearch' HTTP service # using the following query: diff --git a/.github/workflows/scripts/run-tests-logspout.sh b/.github/workflows/scripts/run-tests-logspout.sh index 4bf0047ffa..72c15c8760 100755 --- a/.github/workflows/scripts/run-tests-logspout.sh +++ b/.github/workflows/scripts/run-tests-logspout.sh @@ -4,7 +4,7 @@ set -eu set -o pipefail -source "$(dirname ${BASH_SOURCE[0]})/lib/testing.sh" +source "${BASH_SOURCE[0]%/*}"/lib/testing.sh cid_es="$(container_id elasticsearch)" @@ -15,14 +15,17 @@ ip_es="$(service_ip elasticsearch)" ip_ls="$(service_ip logstash)" ip_lsp="$(service_ip logspout)" -log 'Waiting for readiness of Elasticsearch' +grouplog 'Wait for readiness of Elasticsearch' poll_ready "$cid_es" "http://${ip_es}:9200/" -u 'elastic:testpasswd' +endgroup -log 'Waiting for readiness of Logstash' +grouplog 'Wait for readiness of Logstash' poll_ready "$cid_ls" "http://${ip_ls}:9600/_node/pipelines/main?pretty" +endgroup -log 'Waiting for readiness of Logspout' +grouplog 'Wait for readiness of Logspout' poll_ready "$cid_lsp" "http://${ip_lsp}/health" +endgroup # When Logspout starts, it prints the following log line: # 2021/01/07 16:14:52 # logspout v3.2.13-custom by gliderlabs diff --git a/.github/workflows/scripts/run-tests-metricbeat.sh b/.github/workflows/scripts/run-tests-metricbeat.sh index b0e9856223..2cc9abecee 100755 --- a/.github/workflows/scripts/run-tests-metricbeat.sh +++ b/.github/workflows/scripts/run-tests-metricbeat.sh @@ -4,7 +4,7 @@ set -eu set -o pipefail -source "$(dirname ${BASH_SOURCE[0]})/lib/testing.sh" +source "${BASH_SOURCE[0]%/*}"/lib/testing.sh cid_es="$(container_id elasticsearch)" @@ -13,11 +13,13 @@ cid_mb="$(container_id metricbeat)" ip_es="$(service_ip elasticsearch)" ip_mb="$(service_ip metricbeat)" -log 'Waiting for readiness of Elasticsearch' +grouplog 'Wait for readiness of Elasticsearch' poll_ready "$cid_es" "http://${ip_es}:9200/" -u 'elastic:testpasswd' +endgroup -log 'Waiting for readiness of Metricbeat' +grouplog 'Wait for readiness of Metricbeat' poll_ready "$cid_mb" "http://${ip_mb}:5066/?pretty" +endgroup # We expect to find monitoring entries for the 'elasticsearch' Compose service # using the following query: diff --git a/.github/workflows/spam-issue-close.yml b/.github/workflows/spam-issue-close.yml index cb07bd80b0..bd774eb297 100644 --- a/.github/workflows/spam-issue-close.yml +++ b/.github/workflows/spam-issue-close.yml @@ -17,39 +17,26 @@ jobs: steps: - name: Close id: close - uses: actions/stale@v7.0.0 + uses: actions/stale@v8.0.0 with: days-before-issue-stale: -1 days-before-issue-close: 0 stale-issue-label: bot:close close-issue-label: insufficient information close-issue-message: >- - To avoid frequent and unnecessary back and forth in issue comments to ask for information that could have - been provided right away in the issue description, maintainers created a clear issue template with all the - information they need to be able to reproduce common issues, and ask everyone to follow it. + This description omits all, or critical parts of the information requested by maintainers to be able to + reproduce the issue: - **It appears that this issue description omits all, or critical parts the requested information.** + - the **complete** log history of your Elastic components, including `setup`. + - any change(s) performed to the docker-elk configuration. + - details about the runtime environment, for both Docker and Compose. - Maintainers of this project wish they had the superpower to read minds. Alas, they are mere mortals who - cannot understand the context in which this problem occurred without information such as: + Therefore, this issue will now be **closed**. Please open a new issue and fill in the template. It saves + everyone's efforts, and allows maintainers to provide you with a solution in as few round trips as possible. - - - your docker-elk configuration - - details about your runtime environment - - the complete log history of your Elastic components - - - Therefore, this issue will now be **closed**. Thank you for your understanding. :pray: - - - --- - - - Next time, please be respectful of maintainers' time by providing the requested information right away. It - saves everyone's efforts, and allows them to provide you with a solution with as few round trips as - possible. + Thank you for your understanding. :pray: # Due to eventual consistency, listing closed issues immediately after a # close does not always yield the expected results. A sleep is a simple @@ -59,7 +46,7 @@ jobs: run: sleep 5 - name: Lock - uses: dessant/lock-threads@v4 + uses: dessant/lock-threads@v5 if: fromJson(steps.close.outputs.closed-issues-prs)[0] with: process-only: issues diff --git a/.github/workflows/update-merge.yml b/.github/workflows/update-merge.yml index 0f4818a460..d9b8b7e84e 100644 --- a/.github/workflows/update-merge.yml +++ b/.github/workflows/update-merge.yml @@ -19,7 +19,7 @@ jobs: steps: - name: Impersonate update bot - uses: tibdex/github-app-token@v1 + uses: tibdex/github-app-token@v2 id: generate-token with: app_id: ${{ secrets.APP_ID }} @@ -32,7 +32,7 @@ jobs: GITHUB_TOKEN: ${{ steps.generate-token.outputs.token }} - name: Delete branch - uses: actions/github-script@v6 + uses: actions/github-script@v7 with: script: | await github.request('DELETE /repos/{owner}/{repo}/git/refs/{ref}', { diff --git a/.github/workflows/update.yml b/.github/workflows/update.yml index 8eca84fffa..978b503966 100644 --- a/.github/workflows/update.yml +++ b/.github/workflows/update.yml @@ -20,11 +20,11 @@ jobs: branch: release-7.x steps: - - uses: actions/setup-node@v3 + - uses: actions/setup-node@v4 - run: npm install semver - name: Get latest release version - uses: actions/github-script@v6 + uses: actions/github-script@v7 id: get-latest-release with: script: | @@ -54,7 +54,7 @@ jobs: return { version: latestVersion } } - - uses: actions/checkout@v3 + - uses: actions/checkout@v4 if: steps.get-latest-release.outputs.result with: ref: ${{ matrix.branch }} @@ -85,7 +85,7 @@ jobs: fi - name: Impersonate update bot - uses: tibdex/github-app-token@v1 + uses: tibdex/github-app-token@v2 id: generate-token if: steps.update-files.outputs.has_changes with: @@ -94,7 +94,7 @@ jobs: - name: Send pull request to update to new version if: steps.update-files.outputs.has_changes - uses: peter-evans/create-pull-request@v4 + uses: peter-evans/create-pull-request@v5 with: token: ${{ steps.generate-token.outputs.token }} branch: update/${{ matrix.branch }} diff --git a/README.md b/README.md index 6b67ffb65a..be31b53a4b 100644 --- a/README.md +++ b/README.md @@ -1,23 +1,15 @@ # Elastic stack (ELK) on Docker -[![Elastic Stack version](https://img.shields.io/badge/Elastic%20Stack-8.5.3-00bfb3?style=flat&logo=elastic-stack)](https://www.elastic.co/blog/category/releases) +[![Elastic Stack version](https://img.shields.io/badge/Elastic%20Stack-8.11.1-00bfb3?style=flat&logo=elastic-stack)](https://www.elastic.co/blog/category/releases) [![Build Status](https://github.com/deviantony/docker-elk/workflows/CI/badge.svg?branch=main)](https://github.com/deviantony/docker-elk/actions?query=workflow%3ACI+branch%3Amain) -[![Join the chat at https://gitter.im/deviantony/docker-elk](https://badges.gitter.im/Join%20Chat.svg)](https://gitter.im/deviantony/docker-elk?utm_source=badge&utm_medium=badge&utm_campaign=pr-badge&utm_content=badge) +[![Join the chat](https://badges.gitter.im/Join%20Chat.svg)](https://app.gitter.im/#/room/#deviantony_docker-elk:gitter.im) Run the latest version of the [Elastic stack][elk-stack] with Docker and Docker Compose. It gives you the ability to analyze any data set by using the searching/aggregation capabilities of Elasticsearch and the visualization power of Kibana. -![Animated demo](https://user-images.githubusercontent.com/3299086/155972072-0c89d6db-707a-47a1-818b-5f976565f95a.gif) - -> **Note** -> [Platinum][subscriptions] features are enabled by default for a [trial][license-mngmt] duration of **30 days**. After -> this evaluation period, you will retain access to all the free features included in the Open Basic license seamlessly, -> without manual intervention required, and without losing any data. Refer to the [How to disable paid -> features](#how-to-disable-paid-features) section to opt out of this behaviour. - -Based on the official Docker images from Elastic: +Based on the [official Docker images][elastic-docker] from Elastic: * [Elasticsearch](https://github.com/elastic/elasticsearch/tree/main/distribution/docker) * [Logstash](https://github.com/elastic/logstash/tree/main/docker) @@ -29,6 +21,26 @@ Other available stack variants: and Fleet * [`searchguard`](https://github.com/deviantony/docker-elk/tree/searchguard): Search Guard support +> [!IMPORTANT] +> [Platinum][subscriptions] features are enabled by default for a [trial][license-mngmt] duration of **30 days**. After +> this evaluation period, you will retain access to all the free features included in the Open Basic license seamlessly, +> without manual intervention required, and without losing any data. Refer to the [How to disable paid +> features](#how-to-disable-paid-features) section to opt out of this behaviour. + +--- + +## tl;dr + +```sh +docker-compose up setup +``` + +```sh +docker-compose up +``` + +![Animated demo](https://user-images.githubusercontent.com/3299086/155972072-0c89d6db-707a-47a1-818b-5f976565f95a.gif) + --- ## Philosophy @@ -79,15 +91,10 @@ own_. [sherifabdlnaby/elastdocker][elastdocker] is one example among others of p ### Host setup * [Docker Engine][docker-install] version **18.06.0** or newer -* [Docker Compose][compose-install] version **1.26.0** or newer (including [Compose V2][compose-v2]) +* [Docker Compose][compose-install] version **1.28.0** or newer (including [Compose V2][compose-v2]) * 1.5 GB of RAM -> **Warning** -> While Compose versions between **1.22.0** and **1.25.5** can technically run this stack as well, these versions have a -> [known issue](https://github.com/deviantony/docker-elk/pull/678#issuecomment-1055555368) which prevents them from -> parsing quoted values properly inside `.env` files. - -> **Note** +> [!NOTE] > Especially on Linux, make sure your user has the [required permissions][linux-postinstall] to interact with the Docker > daemon. @@ -100,7 +107,7 @@ By default, the stack exposes the following ports: * 9300: Elasticsearch TCP transport * 5601: Kibana -> **Warning** +> [!WARNING] > Elasticsearch's [bootstrap checks][bootstrap-checks] were purposely disabled to facilitate the setup of the Elastic > stack in development environments. For production setups, we recommend users to set up their host according to the > instructions from the Elasticsearch documentation: [Important System Configuration][es-sys-config]. @@ -120,20 +127,31 @@ instructions from the [documentation][mac-filesharing] to add more locations. ## Usage -> **Warning** +> [!WARNING] > You must rebuild the stack images with `docker-compose build` whenever you switch branch or update the > [version](#version-selection) of an already existing stack. ### Bringing up the stack -Clone this repository onto the Docker host that will run the stack, then start the stack's services locally using Docker -Compose: +Clone this repository onto the Docker host that will run the stack with the command below: -```console -$ docker-compose up +```sh +git clone https://github.com/deviantony/docker-elk.git +``` + +Then, initialize the Elasticsearch users and groups required by docker-elk by executing the command: + +```sh +docker-compose up setup ``` -> **Note** +If everything went well and the setup completed without error, start the other stack components: + +```sh +docker-compose up +``` + +> [!NOTE] > You can also run all services in the background (detached mode) by appending the `-d` flag to the above command. Give Kibana about a minute to initialize, then access the Kibana web UI by opening in a web @@ -142,7 +160,7 @@ browser and use the following (default) credentials to log in: * user: *elastic* * password: *changeme* -> **Note** +> [!NOTE] > Upon the initial startup, the `elastic`, `logstash_internal` and `kibana_system` Elasticsearch users are intialized > with the values of the passwords defined in the [`.env`](.env) file (_"changeme"_ by default). The first one is the > [built-in superuser][builtin-users], the other two are used by Kibana and Logstash respectively to communicate with @@ -153,10 +171,10 @@ browser and use the following (default) credentials to log in: #### Setting up user authentication -> **Note** +> [!NOTE] > Refer to [Security settings in Elasticsearch][es-security] to disable authentication. -> **Warning** +> [!WARNING] > Starting with Elastic v8.0.0, it is no longer possible to run Kibana using the bootstraped privileged `elastic` user. The _"changeme"_ password set by default for all aforementioned users is **unsecure**. For increased security, we will @@ -167,16 +185,16 @@ reset the passwords of all aforementioned Elasticsearch users to random secrets. The commands below reset the passwords of the `elastic`, `logstash_internal` and `kibana_system` users. Take note of them. - ```console - $ docker-compose exec elasticsearch bin/elasticsearch-reset-password --batch --user elastic + ```sh + docker-compose exec elasticsearch bin/elasticsearch-reset-password --batch --user elastic ``` - ```console - $ docker-compose exec elasticsearch bin/elasticsearch-reset-password --batch --user logstash_internal + ```sh + docker-compose exec elasticsearch bin/elasticsearch-reset-password --batch --user logstash_internal ``` - ```console - $ docker-compose exec elasticsearch bin/elasticsearch-reset-password --batch --user kibana_system + ```sh + docker-compose exec elasticsearch bin/elasticsearch-reset-password --batch --user kibana_system ``` If the need for it arises (e.g. if you want to [collect monitoring information][ls-monitoring] through Beats and @@ -189,7 +207,7 @@ reset the passwords of all aforementioned Elasticsearch users to random secrets. Its value isn't used by any core component, but [extensions](#how-to-enable-the-provided-extensions) use it to connect to Elasticsearch. - > **Note** + > [!NOTE] > In case you don't plan on using any of the provided [extensions](#how-to-enable-the-provided-extensions), or > prefer to create your own roles and users to authenticate these services, it is safe to remove the > `ELASTIC_PASSWORD` entry from the `.env` file altogether after the stack has been initialized. @@ -204,31 +222,33 @@ reset the passwords of all aforementioned Elasticsearch users to random secrets. 1. Restart Logstash and Kibana to re-connect to Elasticsearch using the new passwords - ```console - $ docker-compose up -d logstash kibana + ```sh + docker-compose up -d logstash kibana ``` -> **Note** +> [!NOTE] > Learn more about the security of the Elastic stack at [Secure the Elastic Stack][sec-cluster]. #### Injecting data -Open the Kibana web UI by opening in a web browser and use the following credentials to log in: +Launch the Kibana web UI by opening in a web browser, and use the following credentials to log +in: * user: *elastic* * password: *\* -Now that the stack is fully configured, you can go ahead and inject some log entries. The shipped Logstash configuration -allows you to send content via TCP: +Now that the stack is fully configured, you can go ahead and inject some log entries. -```console -# Using BSD netcat (Debian, Ubuntu, MacOS system, ...) -$ cat /path/to/logfile.log | nc -q0 localhost 50000 -``` +The shipped Logstash configuration allows you to send data over the TCP port 50000. For example, you can use one of the +following commands — depending on your installed version of `nc` (Netcat) — to ingest the content of the log file +`/path/to/logfile.log` in Elasticsearch, via Logstash: -```console -# Using GNU netcat (CentOS, Fedora, MacOS Homebrew, ...) -$ cat /path/to/logfile.log | nc -c localhost 50000 +```sh +# Execute `nc -h` to determine your `nc` version + +cat /path/to/logfile.log | nc -q0 localhost 50000 # BSD +cat /path/to/logfile.log | nc -c localhost 50000 # GNU +cat /path/to/logfile.log | nc --send-only localhost 50000 # nmap ``` You can also load the sample data provided by your Kibana installation. @@ -239,8 +259,8 @@ Elasticsearch data is persisted inside a volume by default. In order to entirely shutdown the stack and remove all persisted data, use the following Docker Compose command: -```console -$ docker-compose down -v +```sh +docker-compose down -v ``` ### Version selection @@ -252,7 +272,7 @@ To use a different version of the core Elastic components, simply change the ver file. If you are upgrading an existing stack, remember to rebuild all container images using the `docker-compose build` command. -> **Warning** +> [!IMPORTANT] > Always pay attention to the [official upgrade instructions][upgrade] for each individual component before performing a > stack upgrade. @@ -264,7 +284,7 @@ Older major versions are also supported on separate branches: ## Configuration -> **Note** +> [!IMPORTANT] > Configuration is not dynamically reloaded, you will need to restart individual components after any configuration > change. @@ -319,11 +339,15 @@ containers: [Configuring Logstash for Docker][ls-docker]. ### How to disable paid features -Switch the value of Elasticsearch's `xpack.license.self_generated.type` setting from `trial` to `basic` (see [License -settings][license-settings]). +You can cancel an ongoing trial before its expiry date — and thus revert to a basic license — either from the [License +Management][license-mngmt] panel of Kibana, or using Elasticsearch's `start_basic` [Licensing API][license-apis]. Please +note that the second option is the only way to recover access to Kibana if the license isn't either switched to `basic` +or upgraded before the trial's expiry date. -You can also cancel an ongoing trial before its expiry date — and thus revert to a basic license — either from the -[License Management][license-mngmt] panel of Kibana, or using Elasticsearch's [Licensing APIs][license-apis]. +Changing the license type by switching the value of Elasticsearch's `xpack.license.self_generated.type` setting from +`trial` to `basic` (see [License settings][license-settings]) will only work **if done prior to the initial setup.** +After a trial has been started, the loss of features from `trial` to `basic` _must_ be acknowledged using one of the two +methods described in the first paragraph. ### How to scale out the Elasticsearch cluster @@ -332,21 +356,10 @@ Follow the instructions from the Wiki: [Scaling out Elasticsearch](https://githu ### How to re-execute the setup To run the setup container again and re-initialize all users for which a password was defined inside the `.env` file, -delete its volume and "up" the `setup` Compose service again manually: - -```console -$ docker-compose rm -f setup - ⠿ Container docker-elk-setup-1 Removed -``` - -```console -$ docker volume rm docker-elk_setup -docker-elk_setup -``` +simply "up" the `setup` Compose service again: ```console $ docker-compose up setup - ⠿ Volume "docker-elk_setup" Created ⠿ Container docker-elk-elasticsearch-1 Running ⠿ Container docker-elk-setup-1 Created Attaching to docker-elk-setup-1 @@ -365,8 +378,8 @@ users][builtin-users]), you can use the Elasticsearch API instead and achieve th In the example below, we reset the password of the `elastic` user (notice "/user/elastic" in the URL): -```console -$ curl -XPOST -D- 'http://localhost:9200/_security/user/elastic/_password' \ +```sh +curl -XPOST -D- 'http://localhost:9200/_security/user/elastic/_password' \ -H 'Content-Type: application/json' \ -u elastic: \ -d '{"password" : ""}' @@ -402,7 +415,7 @@ variable, allowing the user to adjust the amount of memory that can be used by e | Elasticsearch | ES_JAVA_OPTS | | Logstash | LS_JAVA_OPTS | -To accomodate environments where memory is scarce (Docker Desktop for Mac has only 2 GB available by default), the Heap +To accommodate environments where memory is scarce (Docker Desktop for Mac has only 2 GB available by default), the Heap Size allocation is capped by default in the `docker-compose.yml` file to 512 MB for Elasticsearch and 256 MB for Logstash. If you want to override the default JVM configuration, edit the matching environment variable(s) in the `docker-compose.yml` file. @@ -447,6 +460,7 @@ See the following Wiki pages: * [Popular integrations](https://github.com/deviantony/docker-elk/wiki/Popular-integrations) [elk-stack]: https://www.elastic.co/what-is/elk-stack +[elastic-docker]: https://www.docker.elastic.co/ [subscriptions]: https://www.elastic.co/subscriptions [es-security]: https://www.elastic.co/guide/en/elasticsearch/reference/current/security-settings.html [license-settings]: https://www.elastic.co/guide/en/elasticsearch/reference/current/license-settings.html @@ -464,8 +478,8 @@ See the following Wiki pages: [es-sys-config]: https://www.elastic.co/guide/en/elasticsearch/reference/current/system-config.html [es-heap]: https://www.elastic.co/guide/en/elasticsearch/reference/current/important-settings.html#heap-size-settings -[win-filesharing]: https://docs.docker.com/desktop/windows/#file-sharing -[mac-filesharing]: https://docs.docker.com/desktop/mac/#file-sharing +[win-filesharing]: https://docs.docker.com/desktop/settings/windows/#file-sharing +[mac-filesharing]: https://docs.docker.com/desktop/settings/mac/#file-sharing [builtin-users]: https://www.elastic.co/guide/en/elasticsearch/reference/current/built-in-users.html [ls-monitoring]: https://www.elastic.co/guide/en/logstash/current/monitoring-with-metricbeat.html diff --git a/docker-compose.yml b/docker-compose.yml index 4e519e85c4..99dced1b1f 100644 --- a/docker-compose.yml +++ b/docker-compose.yml @@ -4,12 +4,21 @@ services: # The 'setup' service runs a one-off script which initializes users inside # Elasticsearch — such as 'logstash_internal' and 'kibana_system' — with the - # values of the passwords defined in the '.env' file. + # values of the passwords defined in the '.env' file. It also creates the + # roles required by some of these users. # - # This task is only performed during the *initial* startup of the stack. On all - # subsequent runs, the service simply returns immediately, without performing - # any modification to existing users. + # This task only needs to be performed once, during the *initial* startup of + # the stack. Any subsequent run will reset the passwords of existing users to + # the values defined inside the '.env' file, and the built-in roles to their + # default permissions. + # + # By default, it is excluded from the services started by 'docker compose up' + # due to the non-default profile it belongs to. To run it, either provide the + # '--profile=setup' CLI flag to Compose commands, or "up" the service by name + # such as 'docker compose up setup'. setup: + profiles: + - setup build: context: setup/ args: @@ -17,9 +26,8 @@ services: init: true volumes: - ./setup/entrypoint.sh:/entrypoint.sh:ro,Z - - ./setup/helpers.sh:/helpers.sh:ro,Z + - ./setup/lib.sh:/lib.sh:ro,Z - ./setup/roles:/roles:ro,Z - - setup:/state:Z environment: ELASTIC_PASSWORD: ${ELASTIC_PASSWORD:-} LOGSTASH_INTERNAL_PASSWORD: ${LOGSTASH_INTERNAL_PASSWORD:-} @@ -103,5 +111,4 @@ networks: driver: bridge volumes: - setup: elasticsearch: diff --git a/extensions/curator/Dockerfile b/extensions/curator/Dockerfile index 84ff5ade18..6cb8cdc681 100644 --- a/extensions/curator/Dockerfile +++ b/extensions/curator/Dockerfile @@ -1,17 +1,9 @@ -FROM bitnami/elasticsearch-curator:5.8.1 +FROM untergeek/curator:8.0.2 USER root -RUN install_packages cron && \ - echo \ - '* * * * *' \ - root \ - LC_ALL=C.UTF-8 LANG=C.UTF-8 \ - /opt/bitnami/python/bin/curator \ - --config=/usr/share/curator/config/curator.yml \ - /usr/share/curator/config/delete_log_files_curator.yml \ - '>/proc/1/fd/1' '2>/proc/1/fd/2' \ - >>/etc/crontab +RUN >>/var/spool/cron/crontabs/nobody \ + echo '* * * * * /curator/curator /.curator/delete_log_files_curator.yml' -ENTRYPOINT ["cron"] -CMD ["-f", "-L8"] +ENTRYPOINT ["crond"] +CMD ["-f", "-d8"] diff --git a/extensions/curator/config/curator.yml b/extensions/curator/config/curator.yml index f4cf8de3a9..6777edc9cb 100644 --- a/extensions/curator/config/curator.yml +++ b/extensions/curator/config/curator.yml @@ -1,11 +1,12 @@ # Curator configuration # https://www.elastic.co/guide/en/elasticsearch/client/curator/current/configfile.html -client: - hosts: - - elasticsearch - port: 9200 - http_auth: 'elastic:changeme' +elasticsearch: + client: + hosts: [ http://elasticsearch:9200 ] + other_settings: + username: elastic + password: ${ELASTIC_PASSWORD} logging: loglevel: INFO diff --git a/extensions/curator/curator-compose.yml b/extensions/curator/curator-compose.yml index 488cd8036c..1a4bb17e25 100644 --- a/extensions/curator/curator-compose.yml +++ b/extensions/curator/curator-compose.yml @@ -6,8 +6,10 @@ services: context: extensions/curator/ init: true volumes: - - ./extensions/curator/config/curator.yml:/usr/share/curator/config/curator.yml:ro,Z - - ./extensions/curator/config/delete_log_files_curator.yml:/usr/share/curator/config/delete_log_files_curator.yml:ro,Z + - ./extensions/curator/config/curator.yml:/.curator/curator.yml:ro,Z + - ./extensions/curator/config/delete_log_files_curator.yml:/.curator/delete_log_files_curator.yml:ro,Z + environment: + ELASTIC_PASSWORD: ${ELASTIC_PASSWORD:-} networks: - elk depends_on: diff --git a/extensions/enterprise-search/README.md b/extensions/enterprise-search/README.md index d6391dba67..eeafc9fd53 100644 --- a/extensions/enterprise-search/README.md +++ b/extensions/enterprise-search/README.md @@ -31,7 +31,7 @@ secret_management.encryption_keys: secret_management.encryption_keys: [my_first_encryption_key, my_second_encryption_key, ...] ``` -> **Note** +> [!NOTE] > To generate a strong random encryption key, you can use the OpenSSL utility or any other online/offline tool of your > choice: > @@ -96,7 +96,7 @@ enterprise-search: ENT_SEARCH_DEFAULT_PASSWORD: {{some strong password}} ``` -> **Warning** +> [!WARNING] > The default Enterprise Search password can only be set during the initial boot. Once the password is persisted in > Elasticsearch, it can only be changed via the Elasticsearch API. diff --git a/extensions/fleet/README.md b/extensions/fleet/README.md index de800857ad..d1cce04b57 100644 --- a/extensions/fleet/README.md +++ b/extensions/fleet/README.md @@ -1,6 +1,6 @@ # Fleet Server -> **Warning** +> [!WARNING] > This extension currently exists for preview purposes and should be considered **EXPERIMENTAL**. Expect regular changes > to the default Fleet settings, both in the Elastic Agent and Kibana. > diff --git a/extensions/fleet/fleet-compose.yml b/extensions/fleet/fleet-compose.yml index 5f19abc9bf..e33f47b0e6 100644 --- a/extensions/fleet/fleet-compose.yml +++ b/extensions/fleet/fleet-compose.yml @@ -11,6 +11,7 @@ services: environment: FLEET_SERVER_ENABLE: '1' FLEET_SERVER_INSECURE_HTTP: '1' + FLEET_SERVER_HOST: 0.0.0.0 FLEET_SERVER_POLICY_ID: fleet-server-policy # Fleet plugin in Kibana KIBANA_FLEET_SETUP: '1' diff --git a/kibana/config/kibana.yml b/kibana/config/kibana.yml index 45cd35715c..9d4e79ab44 100644 --- a/kibana/config/kibana.yml +++ b/kibana/config/kibana.yml @@ -14,6 +14,19 @@ monitoring.ui.container.logstash.enabled: true elasticsearch.username: kibana_system elasticsearch.password: ${KIBANA_SYSTEM_PASSWORD} +## Encryption keys (optional but highly recommended) +## +## Generate with either +## $ docker container run --rm docker.elastic.co/kibana/kibana:8.6.2 bin/kibana-encryption-keys generate +## $ openssl rand -hex 32 +## +## https://www.elastic.co/guide/en/kibana/current/using-kibana-with-security.html +## https://www.elastic.co/guide/en/kibana/current/kibana-encryption-keys.html +# +#xpack.security.encryptionKey: +#xpack.encryptedSavedObjects.encryptionKey: +#xpack.reporting.encryptionKey: + ## Fleet ## https://www.elastic.co/guide/en/kibana/current/fleet-settings-kb.html # diff --git a/setup/.dockerignore b/setup/.dockerignore index 02f2244078..c5dd1c85ad 100644 --- a/setup/.dockerignore +++ b/setup/.dockerignore @@ -7,6 +7,3 @@ Dockerfile # Ignore Git files .gitignore - -# Ignore setup state -state/ diff --git a/setup/.gitignore b/setup/.gitignore deleted file mode 100644 index a27475ad10..0000000000 --- a/setup/.gitignore +++ /dev/null @@ -1 +0,0 @@ -/state/ diff --git a/setup/Dockerfile b/setup/Dockerfile index 5365a99d1d..1cb7538995 100644 --- a/setup/Dockerfile +++ b/setup/Dockerfile @@ -3,13 +3,4 @@ ARG ELASTIC_VERSION # https://www.docker.elastic.co/ FROM docker.elastic.co/elasticsearch/elasticsearch:${ELASTIC_VERSION} -USER root - -RUN set -eux; \ - mkdir /state; \ - chmod 0775 /state; \ - chown elasticsearch:root /state - -USER elasticsearch:root - ENTRYPOINT ["/entrypoint.sh"] diff --git a/setup/entrypoint.sh b/setup/entrypoint.sh index 370068367f..ac79321a86 100755 --- a/setup/entrypoint.sh +++ b/setup/entrypoint.sh @@ -3,7 +3,7 @@ set -eu set -o pipefail -source "${BASH_SOURCE[0]%/*}"/helpers.sh +source "${BASH_SOURCE[0]%/*}"/lib.sh # -------------------------------------------------------- @@ -43,18 +43,6 @@ roles_files=( # -------------------------------------------------------- -echo "-------- $(date --rfc-3339=seconds) --------" - -state_file="${BASH_SOURCE[0]%/*}"/state/.done -if [[ -e "$state_file" ]]; then - declare state_birthtime - state_birthtime="$(stat -c '%Y' "$state_file")" - state_birthtime="$(date --rfc-3339=seconds --date="@${state_birthtime}")" - - log "Setup has already run successfully on ${state_birthtime}. Skipping" - exit 0 -fi - log 'Waiting for availability of Elasticsearch. This can take several minutes.' declare -i exit_code=0 @@ -129,6 +117,3 @@ for user in "${!users_passwords[@]}"; do create_user "$user" "${users_passwords[$user]}" "${users_roles[$user]}" fi done - -mkdir -p "${state_file%/*}" -touch "$state_file" diff --git a/setup/helpers.sh b/setup/lib.sh similarity index 100% rename from setup/helpers.sh rename to setup/lib.sh