This repository has been archived by the owner on Nov 8, 2022. It is now read-only.
-
Notifications
You must be signed in to change notification settings - Fork 294
Commit
This commit does not belong to any branch on this repository, and may belong to a fork outside of the repository.
- Loading branch information
Showing
8 changed files
with
537 additions
and
77 deletions.
There are no files selected for viewing
This file contains bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
This file contains bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
Original file line number | Diff line number | Diff line change |
---|---|---|
@@ -1,69 +1,235 @@ | ||
bind-address = "0.0.0.0" | ||
port = 8086 | ||
### Welcome to the InfluxDB configuration file. | ||
|
||
# Once every 24 hours InfluxDB will report anonymous data to m.influxdb.com | ||
# The data includes raft id (random 8 bytes), os, arch, version, and metadata. | ||
# We don't track ip addresses of servers reporting. This is only used | ||
# to track the number of instances running and the versions, which | ||
# is very helpful for us. | ||
# Change this option to true to disable reporting. | ||
reporting-disabled = false | ||
|
||
[meta] | ||
dir = "/var/opt/influxdb/meta" | ||
### | ||
### [meta] | ||
### | ||
### Controls the parameters for the Raft consensus group that stores metadata | ||
### about the InfluxDB cluster. | ||
### | ||
|
||
[initialization] | ||
join-urls = "" | ||
[meta] | ||
dir = "/data/meta" | ||
hostname = "localhost" | ||
bind-address = ":8088" | ||
retention-autocreate = true | ||
election-timeout = "1s" | ||
heartbeat-timeout = "1s" | ||
leader-lease-timeout = "500ms" | ||
commit-timeout = "50ms" | ||
|
||
### | ||
### [data] | ||
### | ||
### Controls where the actual shard data for InfluxDB lives and how it is | ||
### flushed from the WAL. "dir" may need to be changed to a suitable place | ||
### for your system, but the WAL settings are an advanced configuration. The | ||
### defaults should work for most systems. | ||
### | ||
|
||
[authentication] | ||
enabled = false | ||
[data] | ||
dir = "/data/db" | ||
|
||
# The following WAL settings are for the b1 storage engine used in 0.9.2. They won't | ||
# apply to any new shards created after upgrading to a version > 0.9.3. | ||
max-wal-size = 104857600 # Maximum size the WAL can reach before a flush. Defaults to 100MB. | ||
wal-flush-interval = "10m0s" # Maximum time data can sit in WAL before a flush. | ||
wal-partition-flush-delay = "2s" # The delay time between each WAL partition being flushed. | ||
|
||
# These are the WAL settings for the storage engine >= 0.9.3 | ||
wal-dir = "/data/wal" | ||
wal-enable-logging = true | ||
|
||
# When a series in the WAL in-memory cache reaches this size in bytes it is marked as ready to | ||
# flush to the index | ||
# wal-ready-series-size = 25600 | ||
|
||
# Flush and compact a partition once this ratio of series are over the ready size | ||
# wal-compaction-threshold = 0.6 | ||
|
||
# Force a flush and compaction if any series in a partition gets above this size in bytes | ||
# wal-max-series-size = 2097152 | ||
|
||
# Force a flush of all series and full compaction if there have been no writes in this | ||
# amount of time. This is useful for ensuring that shards that are cold for writes don't | ||
# keep a bunch of data cached in memory and in the WAL. | ||
# wal-flush-cold-interval = "10m" | ||
|
||
# Force a partition to flush its largest series if it reaches this approximate size in | ||
# bytes. Remember there are 5 partitions so you'll need at least 5x this amount of memory. | ||
# The more memory you have, the bigger this can be. | ||
# wal-partition-size-threshold = 20971520 | ||
|
||
### | ||
### [cluster] | ||
### | ||
### Controls non-Raft cluster behavior, which generally includes how data is | ||
### shared across shards. | ||
### | ||
[cluster] | ||
write-timeout = "5s" # The time within which a write operation must complete on the cluster. | ||
shard-writer-timeout = "5s" # The time within which a shard must respond to write. | ||
|
||
### | ||
### [retention] | ||
### | ||
### Controls the enforcement of retention policies for evicting old data. | ||
### | ||
[retention] | ||
enabled = true | ||
check-interval = "10m0s" | ||
|
||
### | ||
### [admin] | ||
### | ||
### Controls the availability of the built-in, web-based admin interface. If HTTPS is | ||
### enabled for the admin interface, HTTPS must also be enabled on the [http] service. | ||
### | ||
[admin] | ||
enabled = true | ||
port = 8083 | ||
assets = "/opt/influxdb/current/admin" | ||
|
||
[api] | ||
bind-address = "0.0.0.0" | ||
port = 8086 | ||
|
||
bind-address = ":8083" | ||
https-enabled = false | ||
https-certificate = "/etc/ssl/influxdb.pem" | ||
|
||
### | ||
### [http] | ||
### | ||
### Controls how the HTTP endpoints are configured. These are the primary | ||
### mechanism for getting data into and out of InfluxDB. | ||
### | ||
[http] | ||
enabled = true | ||
bind-address = ":8086" | ||
auth-enabled = false | ||
log-enabled = true | ||
write-tracing = false | ||
pprof-enabled = false | ||
https-enabled = false | ||
https-certificate = "/etc/ssl/influxdb.pem" | ||
|
||
### | ||
### [[graphite]] | ||
### | ||
### Controls one or many listeners for Graphite data. | ||
### | ||
[[graphite]] | ||
enabled = false | ||
bind-address = ":2003" | ||
protocol = "tcp" | ||
consistency-level = "one" | ||
separator = "." | ||
database = "graphitedb" | ||
|
||
# These next lines control how batching works. You should have this enabled | ||
# otherwise you could get dropped metrics or poor performance. Batching | ||
# will buffer points in memory if you have many coming in. | ||
|
||
# batch-size = 1000 # will flush if this many points get buffered | ||
# batch-timeout = "1s" # will flush at least this often even if we haven't hit buffer limit | ||
batch-size = 1000 | ||
batch-timeout = "1s" | ||
templates = [ | ||
# filter + template | ||
#"*.app env.service.resource.measurement", | ||
|
||
# filter + template + extra tag | ||
#"stats.* .host.measurement* region=us-west,agent=sensu", | ||
|
||
# default template. Ignore the first graphite component "servers" | ||
"instance.profile.measurement*" | ||
] | ||
|
||
### | ||
### [collectd] | ||
### | ||
### Controls the listener for collectd data. | ||
### | ||
[collectd] | ||
enabled = false | ||
|
||
# bind-address = ":25826" | ||
# database = "collectd" | ||
# retention-policy = "" | ||
# typesdb = "/usr/share/collectd/types.db" | ||
|
||
# These next lines control how batching works. You should have this enabled | ||
# otherwise you could get dropped metrics or poor performance. Batching | ||
# will buffer points in memory if you have many coming in. | ||
|
||
# batch-size = 1000 # will flush if this many points get buffered | ||
# batch-timeout = "1s" # will flush at least this often even if we haven't hit buffer limit | ||
|
||
### | ||
### [opentsdb] | ||
### | ||
### Controls the listener for OpenTSDB data. | ||
### | ||
[opentsdb] | ||
enabled = false | ||
|
||
[udp] | ||
# bind-address = ":4242" | ||
# database = "opentsdb" | ||
# retention-policy = "" | ||
# consistency-level = "one" | ||
|
||
### | ||
### [[udp]] | ||
### | ||
### Controls the listeners for InfluxDB line protocol data via UDP. | ||
### | ||
|
||
[[udp]] | ||
enabled = false | ||
bind-address = ":4444" | ||
database = "udpdb" | ||
|
||
[broker] | ||
dir = "/data/broker" | ||
enabled = true | ||
election-timeout = "0" | ||
|
||
[data] | ||
dir = "/data/db" | ||
enabled = true | ||
retention-auto-create = true | ||
retention-check-enabled = true | ||
retention-check-period = "10m0s" | ||
retention-create-period = "45m0s" | ||
|
||
[snapshot] | ||
enabled = false | ||
# These next lines control how batching works. You should have this enabled | ||
# otherwise you could get dropped metrics or poor performance. Batching | ||
# will buffer points in memory if you have many coming in. | ||
|
||
[logging] | ||
write-tracing = false | ||
raft-tracing = false | ||
level = "info" | ||
file = "/opt/influxdb/shared/log.txt" | ||
# batch-size = 1000 # will flush if this many points get buffered | ||
# batch-timeout = "1s" # will flush at least this often even if we haven't hit buffer limit | ||
|
||
### | ||
### [monitoring] | ||
### | ||
### Send anonymous usage statistics to m.influxdb.com? | ||
### | ||
[monitoring] | ||
enabled = false | ||
write-interval = "24h" | ||
|
||
[debugging] | ||
pprof-enabled = false | ||
### | ||
### [continuous_queries] | ||
### | ||
### Controls how continuous queries are run within InfluxDB. | ||
### | ||
|
||
[continuous_queries] | ||
log-enabled = true | ||
enabled = true | ||
recompute-previous-n = 2 | ||
recompute-no-older-than = "10m0s" | ||
compute-runs-per-interval = 10 | ||
compute-no-more-than = "2m0s" | ||
disabled = false | ||
|
||
### | ||
### [hinted-handoff] | ||
### | ||
### Controls the hinted handoff feature, which allows nodes to temporarily | ||
### store queued data when one node of a cluster is down for a short period | ||
### of time. | ||
### | ||
|
||
[hinted-handoff] | ||
enabled = true | ||
dir = "/var/opt/influxdb/hh" | ||
dir = "/data/hh" | ||
max-size = 1073741824 | ||
max-age = "168h" | ||
retry-rate-limit = 0 | ||
retry-interval = "1s" |
Oops, something went wrong.