Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

ES package log pipelines #4033

Merged
merged 20 commits into from
Sep 1, 2022
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
Original file line number Diff line number Diff line change
@@ -0,0 +1,10 @@
network.host: ""
transport.host: "127.0.0.1"
http.host: "0.0.0.0"
indices.id_field_data.enabled: true
xpack.license.self_generated.type: "trial"
xpack.security.enabled: true
xpack.security.authc.api_key.enabled: true
xpack.security.http.ssl.enabled: false
ingest.geoip.downloader.enabled: false
xpack.security.audit.enabled: true
264 changes: 264 additions & 0 deletions packages/elasticsearch/_dev/deploy/docker/config/log4j2.properties

Large diffs are not rendered by default.

29 changes: 29 additions & 0 deletions packages/elasticsearch/_dev/deploy/docker/docker-compose.yml
Original file line number Diff line number Diff line change
@@ -0,0 +1,29 @@
version: '2.3'
services:
elasticsearch:
environment:
- "ES_JAVA_OPTS=-Xms1g -Xmx1g"
- "ELASTIC_PASSWORD=changeme"
image: "docker.elastic.co/elasticsearch/elasticsearch:8.5.0-SNAPSHOT"
healthcheck:
test: ["CMD", "curl", "-f", "-u", "elastic:changeme", "http://127.0.0.1:9200/"]
retries: 300
interval: 1s
volumes:
- ./config/elasticsearch.yml:/usr/share/elasticsearch/config/elasticsearch.yml
- ./config/log4j2.properties:/usr/share/elasticsearch/config/log4j2.properties
- ${SERVICE_LOGS_DIR}:/usr/share/elasticsearch/logs
ports:
- "127.0.0.1:9201:9200"
logs_generation:
depends_on:
elasticsearch:
condition: service_healthy
environment:
- "ES_SERVICE_HOST=http://elasticsearch:9200"
- "ES_SERVICE_USERNAME=elastic"
- "ES_SERVICE_PASSWORD=changeme"
image: "alpine/curl:latest"
command: "./generate-logs.sh"
volumes:
- ./scripts/generate-logs.sh:/generate-logs.sh
129 changes: 129 additions & 0 deletions packages/elasticsearch/_dev/deploy/docker/scripts/generate-logs.sh
Original file line number Diff line number Diff line change
@@ -0,0 +1,129 @@
#!/bin/sh

# Sends queries to the elasticsearch service configured in _dev/deploy in order
# to generate all existing log types. `server` and `gc` logs will be generated
# without external trigger.

set -e

auth=$(echo -n $ES_SERVICE_USERNAME:$ES_SERVICE_PASSWORD | base64)

# create an index that will trace every indexing/searching operations
curl --request PUT \
--url $ES_SERVICE_HOST/test_1 \
--header "Authorization: Basic $auth" \
--header 'Content-Type: application/json' \
--header 'X-Opaque-ID: myApp1' \
--header 'traceparent: 00-0af7651916cd43dd8448eb211c80319c-b7ad6b7169203331-01'

# set index settings
curl --request PUT \
--url $ES_SERVICE_HOST/test_1/_settings \
--header "Authorization: Basic $auth" \
--header 'Content-Type: application/json' \
--header 'X-Opaque-ID: myApp1' \
--header 'traceparent: 00-0af7651916cd43dd8448eb211c80319c-b7ad6b7169203331-01' \
--data '{"settings": {
"index.search.slowlog.threshold.query.warn": "100ms",
"index.search.slowlog.threshold.query.info": "100ms",
"index.search.slowlog.threshold.query.debug": "0ms",
"index.search.slowlog.threshold.query.trace": 0,
"index.indexing.slowlog.threshold.index.trace": 0
}
}'

while true
do
echo Generating slowlogs, audit and deprecation

## INDEXING SLOW LOG
# index document
curl --request POST \
--url $ES_SERVICE_HOST/test_1/_doc \
--header "Authorization: Basic $auth" \
--header 'Content-Type: application/json' \
--header 'X-Opaque-ID: myApp1' \
--header 'traceparent: 00-0af7651916cd43dd8448eb211c80319c-b7ad6b7169203331-01' \
--data '{
"a":"b"
}'

## SEARCH SLOW LOG
# search with xopaqueid and trace.id
curl --request GET \
--url $ES_SERVICE_HOST/test_1/_search \
--header "Authorization: Basic $auth" \
--header 'Content-Type: application/json' \
--header 'X-Opaque-ID: myApp1' \
--header 'traceparent: 00-0af7651916cd43dd8448eb211c80319c-b7ad6b7169203331-01'

# search with trace.id only
curl --request GET \
--url $ES_SERVICE_HOST/test_1/_search \
--header "Authorization: Basic $auth" \
--header 'Content-Type: application/json' \
--header 'traceparent: 00-0af7651916cd43dd8448eb211c80319c-b7ad6b7169203331-01'

# search without trace.id nor xopaqueid
curl --request GET \
--url $ES_SERVICE_HOST/test_1/_search \
--header "Authorization: Basic $auth" \
--header 'Content-Type: application/json'


## AUDIT LOG
# access granted new index with ids
curl --request PUT \
--url $ES_SERVICE_HOST/test_1 \
--header "Authorization: Basic $auth" \
--header 'X-Opaque-ID: myApp1' \
--header 'traceparent: 00-0af7651916cd43dd8448eb211c80319c-b7ad6b7169203331-01'

# access granted new index no ids
curl --request PUT \
--url $ES_SERVICE_HOST/test_2 \
--header "Authorization: Basic $auth" \

# anonymous access denied
curl -s --request PUT \
--url $ES_SERVICE_HOST/test_3 \
--header 'X-Opaque-ID: myApp1' \
--header 'traceparent: 00-0af7651916cd43dd8448eb211c80319c-b7ad6b7169203331-01'

## DEPRECATION LOGS
# data path deprecation warning
curl --request PUT \
--url $ES_SERVICE_HOST/testindex2/ \
--header "Authorization: Basic $auth" \
--header 'Content-Type: application/json' \
--header 'X-Opaque-Id: myAppId' \
--header 'traceparent: 00-0af7651916cd43dd8448eb211c80319c-b7ad6b7169203331-01' \
--data '{
"settings" : {
"index" : {
"number_of_shards" : 3,
"number_of_replicas" : 1,
"data_path": "/tmp/dummy"
}
}
}'

# merge at once deprecation critical
curl --request PUT \
--url $ES_SERVICE_HOST/testindex2/ \
--header "Authorization: Basic $auth" \
--header 'Content-Type: application/json' \
--header 'X-Opaque-Id: myAppId' \
--header 'traceparent: 00-0af7651916cd43dd8448eb211c80319c-b7ad6b7169203331-01' \
--data '{
"settings" : {
"index" : {
"number_of_shards" : 3,
"number_of_replicas" : 1,
"merge.policy.max_merge_at_once_explicit": 20
}
}
}'

sleep 5
done
Original file line number Diff line number Diff line change
@@ -1,71 +1,69 @@
---
description: Pipeline for parsing elasticsearch audit logs
processors:
- set:
field: event.ingested
value: '{{_ingest.timestamp}}'
- rename:
field: '@timestamp'
target_field: event.created
- grok:
field: message
patterns:
- ^%{CHAR:first_char}
pattern_definitions:
CHAR: .
- pipeline:
if: ctx.first_char != '{'
name: '{{ IngestPipeline "pipeline-plaintext" }}'
- pipeline:
if: ctx.first_char == '{'
name: '{{ IngestPipeline "pipeline-json" }}'
- set:
field: event.kind
value: event
- set:
field: event.category
value: database
- set:
if: "ctx?.elasticsearch?.audit?.event_type != null"
field: event.type
value: access
- script:
lang: painless
source: >-
def successEvents = ['authentication_success', 'access_granted', 'run_as_granted', 'connection_granted'];
if (ctx?.elasticsearch?.audit?.event_type != null && successEvents.contains(ctx.elasticsearch.audit.event_type)) {
ctx.event.outcome = 'success';
} else {
ctx.event.outcome = 'failure';
}
if (ctx?.event.action != null && successEvents.contains(ctx.event.action)) {
ctx.event.outcome = 'success';
} else {
ctx.event.outcome = 'failure';
}

- set:
field: host.id
value: "{{elasticsearch.node.id}}"
ignore_empty_value: true
- set:
field: host.name
value: "{{elasticsearch.node.name}}"
ignore_empty_value: true
- append:
field: related.user
value: "{{user.name}}"
if: "ctx?.user?.name != null"
- append:
field: related.user
value: "{{user.effective.name}}"
if: "ctx?.user?.effective?.name != null"
- remove:
field: elasticsearch.audit.@timestamp
- remove:
field:
- first_char
- set:
field: event.ingested
value: "{{_ingest.timestamp}}"
- set:
copy_from: "@timestamp"
field: event.created
- grok:
field: message
patterns:
- ^%{CHAR:first_char}
pattern_definitions:
CHAR: .
- drop:
if: ctx.first_char != '{'
- pipeline:
if: ctx.first_char == '{'
name: '{< IngestPipeline "pipeline-json" >}'
- set:
field: event.kind
value: event
- set:
field: event.category
value: database
- set:
if: "ctx?.elasticsearch?.audit?.event_type != null"
field: event.type
value: access
- script:
lang: painless
source: >-
def successEvents = ['authentication_success', 'access_granted', 'run_as_granted', 'connection_granted'];
if (ctx?.elasticsearch?.audit?.event_type != null && successEvents.contains(ctx.elasticsearch.audit.event_type)) {
ctx.event.outcome = 'success';
} else {
ctx.event.outcome = 'failure';
}
if (ctx?.event.action != null && successEvents.contains(ctx.event.action)) {
ctx.event.outcome = 'success';
} else {
ctx.event.outcome = 'failure';
}
- set:
field: host.id
value: "{{elasticsearch.node.id}}"
ignore_empty_value: true
- set:
field: host.name
value: "{{elasticsearch.node.name}}"
ignore_empty_value: true
- append:
field: related.user
value: "{{user.name}}"
if: "ctx?.user?.name != null"
- append:
field: related.user
value: "{{user.effective.name}}"
if: "ctx?.user?.effective?.name != null"
- remove:
field: elasticsearch.audit.@timestamp
- remove:
field:
- first_char
on_failure:
- set:
field: error.message
value: '{{ _ingest.on_failure_message }}'
- set:
field: error.message
value: "{{ _ingest.on_failure_message }}"
Original file line number Diff line number Diff line change
Expand Up @@ -4,6 +4,13 @@ processors:
- json:
field: message
target_field: elasticsearch.audit
- dot_expander:
field: event.type
path: elasticsearch.audit
- drop:
if: ctx.elasticsearch.audit.containsKey('type') && ctx.elasticsearch.audit.type != 'audit'
- drop:
if: '!ctx.elasticsearch.audit.containsKey("type") && !["rest", "transport", "ip_filter", "security_config_change"].contains(ctx.elasticsearch?.audit?.event?.type)'
- remove:
field: elasticsearch.audit.type
ignore_missing: true
Expand All @@ -26,13 +33,13 @@ processors:
- dot_expander:
field: event.action
path: elasticsearch.audit
- remove:
field: event.action
ignore_missing: true
- rename:
field: elasticsearch.audit.event.action
target_field: event.action
ignore_missing: true
- dot_expander:
field: event.type
path: elasticsearch.audit
- rename:
field: elasticsearch.audit.event.type
target_field: elasticsearch.audit.layer
Expand All @@ -46,6 +53,9 @@ processors:
- \[%{IPORHOST:source.ip}\]:%{INT:source.port:int}
- "%{IPORHOST:source.ip}:%{INT:source.port:int}"
ignore_missing: true
- remove:
field: source.address
ignore_missing: true
- rename:
field: elasticsearch.audit.origin.address
target_field: source.address
Expand Down Expand Up @@ -77,6 +87,9 @@ processors:
- dot_expander:
field: node.name
path: elasticsearch.audit
- remove:
field: elasticsearch.node
ignore_missing: true
- rename:
field: elasticsearch.audit.node
target_field: elasticsearch.node
Expand Down Expand Up @@ -157,7 +170,7 @@ processors:
path: elasticsearch.audit
- set:
field: http.request.id
copy_from: elasticsearch.audit.request.id
value: "{{{elasticsearch.audit.request.id}}}"
ignore_empty_value: true
- dot_expander:
field: cluster.name
Expand All @@ -177,12 +190,29 @@ processors:
field: elasticsearch.audit.level
target_field: log.level
ignore_missing: true
- set:
field: log.level
value: info
override: false
- dot_expander:
field: trace.id
path: elasticsearch.audit
- rename:
field: elasticsearch.audit.trace.id
target_field: trace.id
ignore_missing: true
- remove:
field: elasticsearch.audit.trace.id
ignore_missing: true
- date:
field: elasticsearch.audit.@timestamp
target_field: "@timestamp"
formats:
- ISO8601
ignore_failure: true
- set:
klacabane marked this conversation as resolved.
Show resolved Hide resolved
field: service.type
value: 'elasticsearch'
on_failure:
- set:
field: error.message
Expand Down
Loading