FileBeat Logging
output.elasticsearch:
# Array of hosts to connect to.
hosts: ["127.0.0.1:9200"]
# Protocol - either `http` (default) or `https`.
protocol: "http"
# Authentication credentials - either API key or username/password.
username: "filebeat"
password: "[ your filebeat password ]"
# Ensure older versions of Elasticsearch are supported
allow_older_versions: "true"filebeat.inputs:
# Each - is an input. Most options can be set at the input level, so
# you can use different inputs for various configurations.
# Below are the input specific configurations.
# filestream is an input for collecting log messages from files.
- type: filestream
# Unique ID among all inputs, an ID is required.
id: commandbox.logs
# Change to true to enable this input configuration.
enabled: true
# Paths that should be crawled and fetched. Glob based paths.
paths:
- /usr/local/lib/serverHome/logs/server.out.txt
# Exclude lines. A list of regular expressions to match. It drops the lines that are
# matching any regular expression from the list.
exclude_lines: [ '^\[DEBUG', '^\[INFO' ]
# Include lines. A list of regular expressions to match. It exports the lines that are
# matching any regular expression from the list.
include_lines: ['^\[ERR', '^\[WARN', '^\[FATAL']
# Handle multi-line errors and stack traces
parsers:
- multiline:
type: pattern
pattern: '^[(\[]'
negate: true
match: after
# Exclude files. A list of regular expressions to match. Filebeat drops the files that
# are matching any regular expression from the list. By default, no files are dropped.
prospector.scanner.exclude_files: ['.gz$']
# Optional additional fields. These fields can be freely picked
# to add additional information to the crawled log files for filtering
fields:
application: my-application-nameLast updated
Was this helpful?