From 0685375478466e81811700380936a1aaab37048b Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?No=C3=A9mi=20V=C3=A1nyi?= Date: Mon, 12 Oct 2020 10:53:59 +0200 Subject: [PATCH] Add configuration of filestream input (#21565) (cherry picked from commit d35dfb53111a86ed0e484f8450ffcf1474be60e8) --- .../config/filebeat.inputs.reference.yml.tmpl | 140 ++++++++++++++++++ .../_meta/config/filebeat.inputs.yml.tmpl | 29 ++++ filebeat/filebeat.reference.yml | 140 ++++++++++++++++++ filebeat/filebeat.yml | 29 ++++ filebeat/input/filestream/config.go | 18 +-- filebeat/input/filestream/input.go | 16 +- x-pack/filebeat/filebeat.reference.yml | 140 ++++++++++++++++++ x-pack/filebeat/filebeat.yml | 29 ++++ 8 files changed, 524 insertions(+), 17 deletions(-) diff --git a/filebeat/_meta/config/filebeat.inputs.reference.yml.tmpl b/filebeat/_meta/config/filebeat.inputs.reference.yml.tmpl index c920b7dbec8..7eceb559f16 100644 --- a/filebeat/_meta/config/filebeat.inputs.reference.yml.tmpl +++ b/filebeat/_meta/config/filebeat.inputs.reference.yml.tmpl @@ -11,6 +11,7 @@ filebeat.inputs: # # Possible options are: # * log: Reads every line of the log file (default) +# * filestream: Improved version of log input. Experimental. # * stdin: Reads the standard in #------------------------------ Log input -------------------------------- @@ -231,6 +232,145 @@ filebeat.inputs: # Defines if inputs is enabled #enabled: true +#--------------------------- Filestream input ---------------------------- +- type: filestream + + # Change to true to enable this input configuration. + enabled: false + + # Paths that should be crawled and fetched. Glob based paths. + # To fetch all ".log" files from a specific level of subdirectories + # /var/log/*/*.log can be used. + # For each file found under this path, a harvester is started. + # Make sure not file is defined twice as this can lead to unexpected behaviour. + paths: + - /var/log/*.log + #- c:\programdata\elasticsearch\logs\* + + # Configure the file encoding for reading files with international characters + # following the W3C recommendation for HTML5 (http://www.w3.org/TR/encoding). + # Some sample encodings: + # plain, utf-8, utf-16be-bom, utf-16be, utf-16le, big5, gb18030, gbk, + # hz-gb-2312, euc-kr, euc-jp, iso-2022-jp, shift-jis, ... + #encoding: plain + + + # Exclude lines. A list of regular expressions to match. It drops the lines that are + # matching any regular expression from the list. The include_lines is called before + # exclude_lines. By default, no lines are dropped. + #exclude_lines: ['^DBG'] + + # Include lines. A list of regular expressions to match. It exports the lines that are + # matching any regular expression from the list. The include_lines is called before + # exclude_lines. By default, all the lines are exported. + #include_lines: ['^ERR', '^WARN'] + + ### Prospector options + + # How often the input checks for new files in the paths that are specified + # for harvesting. Specify 1s to scan the directory as frequently as possible + # without causing Filebeat to scan too frequently. Default: 10s. + #prospector.scanner.check_interval: 10s + + # Exclude files. A list of regular expressions to match. Filebeat drops the files that + # are matching any regular expression from the list. By default, no files are dropped. + #prospector.scanner.exclude_files: ['.gz$'] + + # Expand "**" patterns into regular glob patterns. + #prospector.scanner.recursive_glob: true + + # If symlinks is enabled, symlinks are opened and harvested. The harvester is opening the + # original for harvesting but will report the symlink name as source. + #prospector.scanner.symlinks: false + + ### State options + + # Files for the modification data is older then clean_inactive the state from the registry is removed + # By default this is disabled. + #clean_inactive: 0 + + # Removes the state for file which cannot be found on disk anymore immediately + #clean_removed: true + + # Method to determine if two files are the same or not. By default + # the Beat considers two files the same if their inode and device id are the same. + #file_identity.native: ~ + + # Optional additional fields. These fields can be freely picked + # to add additional information to the crawled log files for filtering + #fields: + # level: debug + # review: 1 + + # Set to true to publish fields with null values in events. + #keep_null: false + + # By default, all events contain `host.name`. This option can be set to true + # to disable the addition of this field to all events. The default value is + # false. + #publisher_pipeline.disable_host: false + + # Ignore files which were modified more then the defined timespan in the past. + # ignore_older is disabled by default, so no files are ignored by setting it to 0. + # Time strings like 2h (2 hours), 5m (5 minutes) can be used. + #ignore_older: 0 + + # Defines the buffer size every harvester uses when fetching the file + #harvester_buffer_size: 16384 + + # Maximum number of bytes a single log event can have + # All bytes after max_bytes are discarded and not sent. The default is 10MB. + # This is especially useful for multiline log messages which can get large. + #message_max_bytes: 10485760 + + # Characters which separate the lines. Valid values: auto, line_feed, vertical_tab, form_feed, + # carriage_return, carriage_return_line_feed, next_line, line_separator, paragraph_separator. + #line_terminator: auto + + # The Ingest Node pipeline ID associated with this input. If this is set, it + # overwrites the pipeline option from the Elasticsearch output. + #pipeline: + + # Backoff values define how aggressively filebeat crawls new files for updates + # The default values can be used in most cases. Backoff defines how long it is waited + # to check a file again after EOF is reached. Default is 1s which means the file + # is checked every second if new lines were added. This leads to a near real time crawling. + # Every time a new line appears, backoff is reset to the initial value. + #backoff.init: 1s + + # Max backoff defines what the maximum backoff time is. After having backed off multiple times + # from checking the files, the waiting time will never exceed max_backoff independent of the + # backoff factor. Having it set to 10s means in the worst case a new line can be added to a log + # file after having backed off multiple times, it takes a maximum of 10s to read the new line + #backoff.max: 10s + + ### Harvester closing options + + # Close inactive closes the file handler after the predefined period. + # The period starts when the last line of the file was, not the file ModTime. + # Time strings like 2h (2 hours), 5m (5 minutes) can be used. + #close.on_state_change.inactive: 5m + + # Close renamed closes a file handler when the file is renamed or rotated. + # Note: Potential data loss. Make sure to read and understand the docs for this option. + #close.on_state_change.renamed: false + + # When enabling this option, a file handler is closed immediately in case a file can't be found + # any more. In case the file shows up again later, harvesting will continue at the last known position + # after scan_frequency. + #close.on_state_change.removed: true + + # Closes the file handler as soon as the harvesters reaches the end of the file. + # By default this option is disabled. + # Note: Potential data loss. Make sure to read and understand the docs for this option. + #close.reader.eof: false + + # Close timeout closes the harvester after the predefined time. + # This is independent if the harvester did finish reading the file or not. + # By default this option is disabled. + # Note: Potential data loss. Make sure to read and understand the docs for this option. + #close.reader.after_interval: 0 + #----------------------------- Stdin input ------------------------------- # Configuration to use stdin input #- type: stdin diff --git a/filebeat/_meta/config/filebeat.inputs.yml.tmpl b/filebeat/_meta/config/filebeat.inputs.yml.tmpl index a7bd1b5eaa6..70d52cbb9c6 100644 --- a/filebeat/_meta/config/filebeat.inputs.yml.tmpl +++ b/filebeat/_meta/config/filebeat.inputs.yml.tmpl @@ -49,3 +49,32 @@ filebeat.inputs: # that was (not) matched before or after or as long as a pattern is not matched based on negate. # Note: After is the equivalent to previous and before is the equivalent to to next in Logstash #multiline.match: after + +# filestream is an experimental input. It is going to replace log input in the future. +- type: filestream + + # Change to true to enable this input configuration. + enabled: false + + # Paths that should be crawled and fetched. Glob based paths. + paths: + - /var/log/*.log + #- c:\programdata\elasticsearch\logs\* + + # Exclude lines. A list of regular expressions to match. It drops the lines that are + # matching any regular expression from the list. + #exclude_lines: ['^DBG'] + + # Include lines. A list of regular expressions to match. It exports the lines that are + # matching any regular expression from the list. + #include_lines: ['^ERR', '^WARN'] + + # Exclude files. A list of regular expressions to match. Filebeat drops the files that + # are matching any regular expression from the list. By default, no files are dropped. + #prospector.scanner.exclude_files: ['.gz$'] + + # Optional additional fields. These fields can be freely picked + # to add additional information to the crawled log files for filtering + #fields: + # level: debug + # review: 1 diff --git a/filebeat/filebeat.reference.yml b/filebeat/filebeat.reference.yml index 0218c3d46af..9a3fd9f0d26 100644 --- a/filebeat/filebeat.reference.yml +++ b/filebeat/filebeat.reference.yml @@ -398,6 +398,7 @@ filebeat.inputs: # # Possible options are: # * log: Reads every line of the log file (default) +# * filestream: Improved version of log input. Experimental. # * stdin: Reads the standard in #------------------------------ Log input -------------------------------- @@ -618,6 +619,145 @@ filebeat.inputs: # Defines if inputs is enabled #enabled: true +#--------------------------- Filestream input ---------------------------- +- type: filestream + + # Change to true to enable this input configuration. + enabled: false + + # Paths that should be crawled and fetched. Glob based paths. + # To fetch all ".log" files from a specific level of subdirectories + # /var/log/*/*.log can be used. + # For each file found under this path, a harvester is started. + # Make sure not file is defined twice as this can lead to unexpected behaviour. + paths: + - /var/log/*.log + #- c:\programdata\elasticsearch\logs\* + + # Configure the file encoding for reading files with international characters + # following the W3C recommendation for HTML5 (http://www.w3.org/TR/encoding). + # Some sample encodings: + # plain, utf-8, utf-16be-bom, utf-16be, utf-16le, big5, gb18030, gbk, + # hz-gb-2312, euc-kr, euc-jp, iso-2022-jp, shift-jis, ... + #encoding: plain + + + # Exclude lines. A list of regular expressions to match. It drops the lines that are + # matching any regular expression from the list. The include_lines is called before + # exclude_lines. By default, no lines are dropped. + #exclude_lines: ['^DBG'] + + # Include lines. A list of regular expressions to match. It exports the lines that are + # matching any regular expression from the list. The include_lines is called before + # exclude_lines. By default, all the lines are exported. + #include_lines: ['^ERR', '^WARN'] + + ### Prospector options + + # How often the input checks for new files in the paths that are specified + # for harvesting. Specify 1s to scan the directory as frequently as possible + # without causing Filebeat to scan too frequently. Default: 10s. + #prospector.scanner.check_interval: 10s + + # Exclude files. A list of regular expressions to match. Filebeat drops the files that + # are matching any regular expression from the list. By default, no files are dropped. + #prospector.scanner.exclude_files: ['.gz$'] + + # Expand "**" patterns into regular glob patterns. + #prospector.scanner.recursive_glob: true + + # If symlinks is enabled, symlinks are opened and harvested. The harvester is opening the + # original for harvesting but will report the symlink name as source. + #prospector.scanner.symlinks: false + + ### State options + + # Files for the modification data is older then clean_inactive the state from the registry is removed + # By default this is disabled. + #clean_inactive: 0 + + # Removes the state for file which cannot be found on disk anymore immediately + #clean_removed: true + + # Method to determine if two files are the same or not. By default + # the Beat considers two files the same if their inode and device id are the same. + #file_identity.native: ~ + + # Optional additional fields. These fields can be freely picked + # to add additional information to the crawled log files for filtering + #fields: + # level: debug + # review: 1 + + # Set to true to publish fields with null values in events. + #keep_null: false + + # By default, all events contain `host.name`. This option can be set to true + # to disable the addition of this field to all events. The default value is + # false. + #publisher_pipeline.disable_host: false + + # Ignore files which were modified more then the defined timespan in the past. + # ignore_older is disabled by default, so no files are ignored by setting it to 0. + # Time strings like 2h (2 hours), 5m (5 minutes) can be used. + #ignore_older: 0 + + # Defines the buffer size every harvester uses when fetching the file + #harvester_buffer_size: 16384 + + # Maximum number of bytes a single log event can have + # All bytes after max_bytes are discarded and not sent. The default is 10MB. + # This is especially useful for multiline log messages which can get large. + #message_max_bytes: 10485760 + + # Characters which separate the lines. Valid values: auto, line_feed, vertical_tab, form_feed, + # carriage_return, carriage_return_line_feed, next_line, line_separator, paragraph_separator. + #line_terminator: auto + + # The Ingest Node pipeline ID associated with this input. If this is set, it + # overwrites the pipeline option from the Elasticsearch output. + #pipeline: + + # Backoff values define how aggressively filebeat crawls new files for updates + # The default values can be used in most cases. Backoff defines how long it is waited + # to check a file again after EOF is reached. Default is 1s which means the file + # is checked every second if new lines were added. This leads to a near real time crawling. + # Every time a new line appears, backoff is reset to the initial value. + #backoff.init: 1s + + # Max backoff defines what the maximum backoff time is. After having backed off multiple times + # from checking the files, the waiting time will never exceed max_backoff independent of the + # backoff factor. Having it set to 10s means in the worst case a new line can be added to a log + # file after having backed off multiple times, it takes a maximum of 10s to read the new line + #backoff.max: 10s + + ### Harvester closing options + + # Close inactive closes the file handler after the predefined period. + # The period starts when the last line of the file was, not the file ModTime. + # Time strings like 2h (2 hours), 5m (5 minutes) can be used. + #close.on_state_change.inactive: 5m + + # Close renamed closes a file handler when the file is renamed or rotated. + # Note: Potential data loss. Make sure to read and understand the docs for this option. + #close.on_state_change.renamed: false + + # When enabling this option, a file handler is closed immediately in case a file can't be found + # any more. In case the file shows up again later, harvesting will continue at the last known position + # after scan_frequency. + #close.on_state_change.removed: true + + # Closes the file handler as soon as the harvesters reaches the end of the file. + # By default this option is disabled. + # Note: Potential data loss. Make sure to read and understand the docs for this option. + #close.reader.eof: false + + # Close timeout closes the harvester after the predefined time. + # This is independent if the harvester did finish reading the file or not. + # By default this option is disabled. + # Note: Potential data loss. Make sure to read and understand the docs for this option. + #close.reader.after_interval: 0 + #----------------------------- Stdin input ------------------------------- # Configuration to use stdin input #- type: stdin diff --git a/filebeat/filebeat.yml b/filebeat/filebeat.yml index 2a3e678c542..9a29bc76962 100644 --- a/filebeat/filebeat.yml +++ b/filebeat/filebeat.yml @@ -62,6 +62,35 @@ filebeat.inputs: # Note: After is the equivalent to previous and before is the equivalent to to next in Logstash #multiline.match: after +# filestream is an experimental input. It is going to replace log input in the future. +- type: filestream + + # Change to true to enable this input configuration. + enabled: false + + # Paths that should be crawled and fetched. Glob based paths. + paths: + - /var/log/*.log + #- c:\programdata\elasticsearch\logs\* + + # Exclude lines. A list of regular expressions to match. It drops the lines that are + # matching any regular expression from the list. + #exclude_lines: ['^DBG'] + + # Include lines. A list of regular expressions to match. It exports the lines that are + # matching any regular expression from the list. + #include_lines: ['^ERR', '^WARN'] + + # Exclude files. A list of regular expressions to match. Filebeat drops the files that + # are matching any regular expression from the list. By default, no files are dropped. + #prospector.scanner.exclude_files: ['.gz$'] + + # Optional additional fields. These fields can be freely picked + # to add additional information to the crawled log files for filtering + #fields: + # level: debug + # review: 1 + # ============================== Filebeat modules ============================== filebeat.config.modules: diff --git a/filebeat/input/filestream/config.go b/filebeat/input/filestream/config.go index 3ec076196f0..c2b1e838ee5 100644 --- a/filebeat/input/filestream/config.go +++ b/filebeat/input/filestream/config.go @@ -30,10 +30,11 @@ import ( // Config stores the options of a file stream. type config struct { + readerConfig + Paths []string `config:"paths"` Close closerConfig `config:"close"` FileWatcher *common.ConfigNamespace `config:"file_watcher"` - Reader readerConfig `config:"readers"` FileIdentity *common.ConfigNamespace `config:"file_identity"` CleanInactive time.Duration `config:"clean_inactive" validate:"min=0"` CleanRemoved bool `config:"clean_removed"` @@ -47,18 +48,17 @@ type closerConfig struct { } type readerCloserConfig struct { - AfterInterval time.Duration - OnEOF bool + AfterInterval time.Duration `config:"after_interval"` + OnEOF bool `config:"on_eof"` } type stateChangeCloserConfig struct { - CheckInterval time.Duration - Inactive time.Duration - Removed bool - Renamed bool + CheckInterval time.Duration `config:"check_interval" validate:"nonzero"` + Inactive time.Duration `config:"inactive"` + Removed bool `config:"removed"` + Renamed bool `config:"renamed"` } -// TODO should this be inline? type readerConfig struct { Backoff backoffConfig `config:"backoff"` BufferSize int `config:"buffer_size"` @@ -79,9 +79,9 @@ type backoffConfig struct { func defaultConfig() config { return config{ + readerConfig: defaultReaderConfig(), Paths: []string{}, Close: defaultCloserConfig(), - Reader: defaultReaderConfig(), CleanInactive: 0, CleanRemoved: true, HarvesterLimit: 0, diff --git a/filebeat/input/filestream/input.go b/filebeat/input/filestream/input.go index b6c6598c50b..9f715d1183e 100644 --- a/filebeat/input/filestream/input.go +++ b/filebeat/input/filestream/input.go @@ -94,19 +94,19 @@ func configure(cfg *common.Config) (loginp.Prospector, loginp.Harvester, error) return nil, nil, err } - encodingFactory, ok := encoding.FindEncoding(config.Reader.Encoding) + encodingFactory, ok := encoding.FindEncoding(config.Encoding) if !ok || encodingFactory == nil { - return nil, nil, fmt.Errorf("unknown encoding('%v')", config.Reader.Encoding) + return nil, nil, fmt.Errorf("unknown encoding('%v')", config.Encoding) } return prospector, &filestream{ - readerConfig: config.Reader, - bufferSize: config.Reader.BufferSize, + readerConfig: config.readerConfig, + bufferSize: config.BufferSize, encodingFactory: encodingFactory, - lineTerminator: config.Reader.LineTerminator, - excludeLines: config.Reader.ExcludeLines, - includeLines: config.Reader.IncludeLines, - maxBytes: config.Reader.MaxBytes, + lineTerminator: config.LineTerminator, + excludeLines: config.ExcludeLines, + includeLines: config.IncludeLines, + maxBytes: config.MaxBytes, closerConfig: config.Close, }, nil } diff --git a/x-pack/filebeat/filebeat.reference.yml b/x-pack/filebeat/filebeat.reference.yml index f4ee32be630..4973a0ae915 100644 --- a/x-pack/filebeat/filebeat.reference.yml +++ b/x-pack/filebeat/filebeat.reference.yml @@ -1783,6 +1783,7 @@ filebeat.inputs: # # Possible options are: # * log: Reads every line of the log file (default) +# * filestream: Improved version of log input. Experimental. # * stdin: Reads the standard in #------------------------------ Log input -------------------------------- @@ -2003,6 +2004,145 @@ filebeat.inputs: # Defines if inputs is enabled #enabled: true +#--------------------------- Filestream input ---------------------------- +- type: filestream + + # Change to true to enable this input configuration. + enabled: false + + # Paths that should be crawled and fetched. Glob based paths. + # To fetch all ".log" files from a specific level of subdirectories + # /var/log/*/*.log can be used. + # For each file found under this path, a harvester is started. + # Make sure not file is defined twice as this can lead to unexpected behaviour. + paths: + - /var/log/*.log + #- c:\programdata\elasticsearch\logs\* + + # Configure the file encoding for reading files with international characters + # following the W3C recommendation for HTML5 (http://www.w3.org/TR/encoding). + # Some sample encodings: + # plain, utf-8, utf-16be-bom, utf-16be, utf-16le, big5, gb18030, gbk, + # hz-gb-2312, euc-kr, euc-jp, iso-2022-jp, shift-jis, ... + #encoding: plain + + + # Exclude lines. A list of regular expressions to match. It drops the lines that are + # matching any regular expression from the list. The include_lines is called before + # exclude_lines. By default, no lines are dropped. + #exclude_lines: ['^DBG'] + + # Include lines. A list of regular expressions to match. It exports the lines that are + # matching any regular expression from the list. The include_lines is called before + # exclude_lines. By default, all the lines are exported. + #include_lines: ['^ERR', '^WARN'] + + ### Prospector options + + # How often the input checks for new files in the paths that are specified + # for harvesting. Specify 1s to scan the directory as frequently as possible + # without causing Filebeat to scan too frequently. Default: 10s. + #prospector.scanner.check_interval: 10s + + # Exclude files. A list of regular expressions to match. Filebeat drops the files that + # are matching any regular expression from the list. By default, no files are dropped. + #prospector.scanner.exclude_files: ['.gz$'] + + # Expand "**" patterns into regular glob patterns. + #prospector.scanner.recursive_glob: true + + # If symlinks is enabled, symlinks are opened and harvested. The harvester is opening the + # original for harvesting but will report the symlink name as source. + #prospector.scanner.symlinks: false + + ### State options + + # Files for the modification data is older then clean_inactive the state from the registry is removed + # By default this is disabled. + #clean_inactive: 0 + + # Removes the state for file which cannot be found on disk anymore immediately + #clean_removed: true + + # Method to determine if two files are the same or not. By default + # the Beat considers two files the same if their inode and device id are the same. + #file_identity.native: ~ + + # Optional additional fields. These fields can be freely picked + # to add additional information to the crawled log files for filtering + #fields: + # level: debug + # review: 1 + + # Set to true to publish fields with null values in events. + #keep_null: false + + # By default, all events contain `host.name`. This option can be set to true + # to disable the addition of this field to all events. The default value is + # false. + #publisher_pipeline.disable_host: false + + # Ignore files which were modified more then the defined timespan in the past. + # ignore_older is disabled by default, so no files are ignored by setting it to 0. + # Time strings like 2h (2 hours), 5m (5 minutes) can be used. + #ignore_older: 0 + + # Defines the buffer size every harvester uses when fetching the file + #harvester_buffer_size: 16384 + + # Maximum number of bytes a single log event can have + # All bytes after max_bytes are discarded and not sent. The default is 10MB. + # This is especially useful for multiline log messages which can get large. + #message_max_bytes: 10485760 + + # Characters which separate the lines. Valid values: auto, line_feed, vertical_tab, form_feed, + # carriage_return, carriage_return_line_feed, next_line, line_separator, paragraph_separator. + #line_terminator: auto + + # The Ingest Node pipeline ID associated with this input. If this is set, it + # overwrites the pipeline option from the Elasticsearch output. + #pipeline: + + # Backoff values define how aggressively filebeat crawls new files for updates + # The default values can be used in most cases. Backoff defines how long it is waited + # to check a file again after EOF is reached. Default is 1s which means the file + # is checked every second if new lines were added. This leads to a near real time crawling. + # Every time a new line appears, backoff is reset to the initial value. + #backoff.init: 1s + + # Max backoff defines what the maximum backoff time is. After having backed off multiple times + # from checking the files, the waiting time will never exceed max_backoff independent of the + # backoff factor. Having it set to 10s means in the worst case a new line can be added to a log + # file after having backed off multiple times, it takes a maximum of 10s to read the new line + #backoff.max: 10s + + ### Harvester closing options + + # Close inactive closes the file handler after the predefined period. + # The period starts when the last line of the file was, not the file ModTime. + # Time strings like 2h (2 hours), 5m (5 minutes) can be used. + #close.on_state_change.inactive: 5m + + # Close renamed closes a file handler when the file is renamed or rotated. + # Note: Potential data loss. Make sure to read and understand the docs for this option. + #close.on_state_change.renamed: false + + # When enabling this option, a file handler is closed immediately in case a file can't be found + # any more. In case the file shows up again later, harvesting will continue at the last known position + # after scan_frequency. + #close.on_state_change.removed: true + + # Closes the file handler as soon as the harvesters reaches the end of the file. + # By default this option is disabled. + # Note: Potential data loss. Make sure to read and understand the docs for this option. + #close.reader.eof: false + + # Close timeout closes the harvester after the predefined time. + # This is independent if the harvester did finish reading the file or not. + # By default this option is disabled. + # Note: Potential data loss. Make sure to read and understand the docs for this option. + #close.reader.after_interval: 0 + #----------------------------- Stdin input ------------------------------- # Configuration to use stdin input #- type: stdin diff --git a/x-pack/filebeat/filebeat.yml b/x-pack/filebeat/filebeat.yml index 2a3e678c542..9a29bc76962 100644 --- a/x-pack/filebeat/filebeat.yml +++ b/x-pack/filebeat/filebeat.yml @@ -62,6 +62,35 @@ filebeat.inputs: # Note: After is the equivalent to previous and before is the equivalent to to next in Logstash #multiline.match: after +# filestream is an experimental input. It is going to replace log input in the future. +- type: filestream + + # Change to true to enable this input configuration. + enabled: false + + # Paths that should be crawled and fetched. Glob based paths. + paths: + - /var/log/*.log + #- c:\programdata\elasticsearch\logs\* + + # Exclude lines. A list of regular expressions to match. It drops the lines that are + # matching any regular expression from the list. + #exclude_lines: ['^DBG'] + + # Include lines. A list of regular expressions to match. It exports the lines that are + # matching any regular expression from the list. + #include_lines: ['^ERR', '^WARN'] + + # Exclude files. A list of regular expressions to match. Filebeat drops the files that + # are matching any regular expression from the list. By default, no files are dropped. + #prospector.scanner.exclude_files: ['.gz$'] + + # Optional additional fields. These fields can be freely picked + # to add additional information to the crawled log files for filtering + #fields: + # level: debug + # review: 1 + # ============================== Filebeat modules ============================== filebeat.config.modules: