# Telegraf Configuration # # Telegraf is entirely plugin driven. All metrics are gathered from the # declared inputs, and sent to the declared outputs. # # Plugins must be declared in here to be active. # To deactivate a plugin, comment out the name and any variables. # # Use 'telegraf -config telegraf.conf -test' to see what metrics a config # file would generate. # # Environment variables can be used anywhere in this config file, simply prepend # them with $. For strings the variable must be within quotes (ie, "$STR_VAR"), # for numbers and booleans they should be plain (ie, $INT_VAR, $BOOL_VAR) # Global tags can be specified here in key="value" format. [global_tags] # dc = "us-east-1" # will tag all metrics with dc=us-east-1 # rack = "1a" ## Environment variables can be used as tags, and throughout the config file # user = "$USER" # Configuration for telegraf agent [agent] ## Default data collection interval for all inputs interval = "10s" ## Rounds collection interval to 'interval' ## ie, if interval="10s" then always collect on :00, :10, :20, etc. round_interval = true ## Telegraf will send metrics to outputs in batches of at ## most metric_batch_size metrics. metric_batch_size = 1000 ## For failed writes, telegraf will cache metric_buffer_limit metrics for each ## output, and will flush this buffer on a successful write. Oldest metrics ## are dropped first when this buffer fills. metric_buffer_limit = 10000 ## Collection jitter is used to jitter the collection by a random amount. ## Each plugin will sleep for a random time within jitter before collecting. ## This can be used to avoid many plugins querying things like sysfs at the ## same time, which can have a measurable effect on the system. collection_jitter = "0s" ## Default flushing interval for all outputs. You shouldn't set this below ## interval. Maximum flush_interval will be flush_interval + flush_jitter flush_interval = "10s" ## Jitter the flush interval by a random amount. This is primarily to avoid ## large write spikes for users running a large number of telegraf instances. ## ie, a jitter of 5s and interval 10s means flushes will happen every 10-15s flush_jitter = "0s" ## Run telegraf in debug mode debug = false ## Run telegraf in quiet mode quiet = false ## Override default hostname, if empty use os.Hostname() hostname = "" ## If set to true, do no set the "host" tag in the telegraf agent. omit_hostname = false ############################################################################### # OUTPUT PLUGINS # ############################################################################### # Configuration for influxdb server to send metrics to [[outputs.influxdb]] ## The full HTTP or UDP endpoint URL for your InfluxDB instance. ## Multiple urls can be specified as part of the same cluster, ## this means that only ONE of the urls will be written to each interval. # urls = ["udp://localhost:8089"] # UDP endpoint example urls = ["http://192.168.0.217:8586"] # required ## The target database for metrics (telegraf will create it if not exists). database = "telegraf" # required ## Precision of writes, valid values are "ns", "us" (or "µs"), "ms", "s", "m", "h". ## note: using "s" precision greatly improves InfluxDB compression. precision = "s" ## Retention policy to write to. retention_policy = "telegraf_01" ## Write consistency (clusters only), can be: "any", "one", "quorom", "all" write_consistency = "any" ## Write timeout (for the InfluxDB client), formatted as a string. ## If not provided, will default to 5s. 0s means no timeout (not recommended). timeout = "5s" # username = "telegraf" # password = "metricsmetricsmetricsmetrics" ## Set the user agent for HTTP POSTs (can be useful for log differentiation) user_agent = "telegraf" ## Set UDP payload size, defaults to InfluxDB UDP Client default (512 bytes) # udp_payload = 512 ## Optional SSL Config # ssl_ca = "/etc/telegraf/ca.pem" # ssl_cert = "/etc/telegraf/cert.pem" # ssl_key = "/etc/telegraf/key.pem" ## Use SSL but skip chain & host verification # insecure_skip_verify = false # # Configuration for Amon Server to send metrics to. # [[outputs.amon]] # ## Amon Server Key # server_key = "my-server-key" # required. # # ## Amon Instance URL # amon_instance = "https://youramoninstance" # required # # ## Connection timeout. # # timeout = "5s" # # Configuration for the AMQP server to send metrics to # [[outputs.amqp]] # ## AMQP url # url = "amqp://localhost:5672/influxdb" # ## AMQP exchange # exchange = "telegraf" # ## Auth method. PLAIN and EXTERNAL are supported # # auth_method = "PLAIN" # ## Telegraf tag to use as a routing key # ## ie, if this tag exists, it's value will be used as the routing key # routing_tag = "host" # # ## InfluxDB retention policy # # retention_policy = "default" # ## InfluxDB database # # database = "telegraf" # ## InfluxDB precision # # precision = "s" # # ## Optional SSL Config # # ssl_ca = "/etc/telegraf/ca.pem" # # ssl_cert = "/etc/telegraf/cert.pem" # # ssl_key = "/etc/telegraf/key.pem" # ## Use SSL but skip chain & host verification # # insecure_skip_verify = false # # ## Data format to output. # ## Each data format has it's own unique set of configuration options, read # ## more about them here: # ## https://github.com/influxdata/telegraf/blob/master/docs/DATA_FORMATS_OUTPUT.md # data_format = "influx" # # Configuration for AWS CloudWatch output. # [[outputs.cloudwatch]] # ## Amazon REGION # region = 'us-east-1' # # ## Amazon Credentials # ## Credentials are loaded in the following order # ## 1) explicit credentials from 'access_key' and 'secret_key' # ## 2) environment variables # ## 3) shared credentials file # ## 4) EC2 Instance Profile # #access_key = "" # #secret_key = "" # # ## Namespace for the CloudWatch MetricDatums # namespace = 'InfluxData/Telegraf' # # Configuration for DataDog API to send metrics to. # [[outputs.datadog]] # ## Datadog API key # apikey = "my-secret-key" # required. # # ## Connection timeout. # # timeout = "5s" # # Send telegraf metrics to file(s) # [[outputs.file]] # ## Files to write to, "stdout" is a specially handled file. # files = ["stdout", "/tmp/metrics.out"] # # ## Data format to output. # ## Each data format has it's own unique set of configuration options, read # ## more about them here: # ## https://github.com/influxdata/telegraf/blob/master/docs/DATA_FORMATS_OUTPUT.md # data_format = "influx" # # Configuration for Graphite server to send metrics to # [[outputs.graphite]] # ## TCP endpoint for your graphite instance. # servers = ["localhost:2003"] # ## Prefix metrics name # prefix = "" # ## Graphite output template # ## see https://github.com/influxdata/telegraf/blob/master/docs/DATA_FORMATS_OUTPUT.md # template = "host.tags.measurement.field" # ## timeout in seconds for the write connection to graphite # timeout = 2 # # Configuration for sending metrics to an Instrumental project # [[outputs.instrumental]] # ## Project API Token (required) # api_token = "API Token" # required # ## Prefix the metrics with a given name # prefix = "" # ## Stats output template (Graphite formatting) # ## see https://github.com/influxdata/telegraf/blob/master/docs/DATA_FORMATS_OUTPUT.md#graphite # template = "host.tags.measurement.field" # ## Timeout in seconds to connect # timeout = "2s" # ## Display Communcation to Instrumental # debug = false # # Configuration for the Kafka server to send metrics to # [[outputs.kafka]] # ## URLs of kafka brokers # brokers = ["localhost:9092"] # ## Kafka topic for producer messages # topic = "telegraf" # ## Telegraf tag to use as a routing key # ## ie, if this tag exists, it's value will be used as the routing key # routing_tag = "host" # # ## CompressionCodec represents the various compression codecs recognized by # ## Kafka in messages. # ## 0 : No compression # ## 1 : Gzip compression # ## 2 : Snappy compression # compression_codec = 0 # # ## RequiredAcks is used in Produce Requests to tell the broker how many # ## replica acknowledgements it must see before responding # ## 0 : the producer never waits for an acknowledgement from the broker. # ## This option provides the lowest latency but the weakest durability # ## guarantees (some data will be lost when a server fails). # ## 1 : the producer gets an acknowledgement after the leader replica has # ## received the data. This option provides better durability as the # ## client waits until the server acknowledges the request as successful # ## (only messages that were written to the now-dead leader but not yet # ## replicated will be lost). # ## -1: the producer gets an acknowledgement after all in-sync replicas have # ## received the data. This option provides the best durability, we # ## guarantee that no messages will be lost as long as at least one in # ## sync replica remains. # required_acks = -1 # # ## The total number of times to retry sending a message # max_retry = 3 # # ## Optional SSL Config # # ssl_ca = "/etc/telegraf/ca.pem" # # ssl_cert = "/etc/telegraf/cert.pem" # # ssl_key = "/etc/telegraf/key.pem" # ## Use SSL but skip chain & host verification # # insecure_skip_verify = false # # ## Data format to output. # ## Each data format has it's own unique set of configuration options, read # ## more about them here: # ## https://github.com/influxdata/telegraf/blob/master/docs/DATA_FORMATS_OUTPUT.md # data_format = "influx" # # Configuration for the AWS Kinesis output. # [[outputs.kinesis]] # ## Amazon REGION of kinesis endpoint. # region = "ap-southeast-2" # # ## Amazon Credentials # ## Credentials are loaded in the following order # ## 1) explicit credentials from 'access_key' and 'secret_key' # ## 2) environment variables # ## 3) shared credentials file # ## 4) EC2 Instance Profile # #access_key = "" # #secret_key = "" # # ## Kinesis StreamName must exist prior to starting telegraf. # streamname = "StreamName" # ## PartitionKey as used for sharding data. # partitionkey = "PartitionKey" # ## format of the Data payload in the kinesis PutRecord, supported # ## String and Custom. # format = "string" # ## debug will show upstream aws messages. # debug = false # # Configuration for Librato API to send metrics to. # [[outputs.librato]] # ## Librator API Docs # ## http://dev.librato.com/v1/metrics-authentication # ## Librato API user # api_user = "telegraf@influxdb.com" # required. # ## Librato API token # api_token = "my-secret-token" # required. # ## Debug # # debug = false # ## Tag Field to populate source attribute (optional) # ## This is typically the _hostname_ from which the metric was obtained. # source_tag = "host" # ## Connection timeout. # # timeout = "5s" # ## Output Name Template (same as graphite buckets) # ## see https://github.com/influxdata/telegraf/blob/master/docs/DATA_FORMATS_OUTPUT.md#graphite # template = "host.tags.measurement.field" # # Configuration for MQTT server to send metrics to # [[outputs.mqtt]] # servers = ["localhost:1883"] # required. # # ## MQTT outputs send metrics to this topic format # ## "///" # ## ex: prefix/web01.example.com/mem # topic_prefix = "telegraf" # # ## username and password to connect MQTT server. # # username = "telegraf" # # password = "metricsmetricsmetricsmetrics" # # ## Optional SSL Config # # ssl_ca = "/etc/telegraf/ca.pem" # # ssl_cert = "/etc/telegraf/cert.pem" # # ssl_key = "/etc/telegraf/key.pem" # ## Use SSL but skip chain & host verification # # insecure_skip_verify = false # # ## Data format to output. # ## Each data format has it's own unique set of configuration options, read # ## more about them here: # ## https://github.com/influxdata/telegraf/blob/master/docs/DATA_FORMATS_OUTPUT.md # data_format = "influx" # # Send telegraf measurements to NSQD # [[outputs.nsq]] # ## Location of nsqd instance listening on TCP # server = "localhost:4150" # ## NSQ topic for producer messages # topic = "telegraf" # # ## Data format to output. # ## Each data format has it's own unique set of configuration options, read # ## more about them here: # ## https://github.com/influxdata/telegraf/blob/master/docs/DATA_FORMATS_OUTPUT.md # data_format = "influx" # # Configuration for OpenTSDB server to send metrics to # [[outputs.opentsdb]] # ## prefix for metrics keys # prefix = "my.specific.prefix." # # ## Telnet Mode ## # ## DNS name of the OpenTSDB server in telnet mode # host = "opentsdb.example.com" # # ## Port of the OpenTSDB server in telnet mode # port = 4242 # # ## Debug true - Prints OpenTSDB communication # debug = false # # Configuration for the Prometheus client to spawn # [[outputs.prometheus_client]] # ## Address to listen on # # listen = ":9126" # # Configuration for the Riemann server to send metrics to # [[outputs.riemann]] # ## URL of server # url = "localhost:5555" # ## transport protocol to use either tcp or udp # transport = "tcp" # ## separator to use between input name and field name in Riemann service name # separator = " " ############################################################################### # INPUT PLUGINS # ############################################################################### # Read metrics about cpu usage [[inputs.cpu]] ## Whether to report per-cpu stats or not percpu = true ## Whether to report total system cpu stats or not totalcpu = true ## Comment this line if you want the raw CPU time metrics fielddrop = ["time_*"] # Read metrics about disk usage by mount point [[inputs.disk]] ## By default, telegraf gather stats for all mountpoints. ## Setting mountpoints will restrict the stats to the specified mountpoints. # mount_points = ["/"] ## Ignore some mountpoints by filesystem type. For example (dev)tmpfs (usually ## present on /run, /var/run, /dev/shm or /dev). ignore_fs = ["tmpfs", "devtmpfs"] # Read metrics about disk IO by device [[inputs.diskio]] ## By default, telegraf will gather stats for all devices including ## disk partitions. ## Setting devices will restrict the stats to the specified devices. # devices = ["sda", "sdb"] ## Uncomment the following line if you do not need disk serial numbers. # skip_serial_number = true # Get kernel statistics from /proc/stat [[inputs.kernel]] # no configuration # Read metrics about memory usage [[inputs.mem]] # no configuration # Get the number of processes and group them by status [[inputs.processes]] # no configuration # Read metrics about swap memory usage [[inputs.swap]] # no configuration # Read metrics about system load & uptime [[inputs.system]] # no configuration # # Read stats from an aerospike server # [[inputs.aerospike]] # ## Aerospike servers to connect to (with port) # ## This plugin will query all namespaces the aerospike # ## server has configured and get stats for them. # servers = ["localhost:3000"] # # Read Apache status information (mod_status) # [[inputs.apache]] # ## An array of Apache status URI to gather stats. # urls = ["http://localhost/server-status?auto"] # # Read metrics of bcache from stats_total and dirty_data # [[inputs.bcache]] # ## Bcache sets path # ## If not specified, then default is: # bcachePath = "/sys/fs/bcache" # # ## By default, telegraf gather stats for all bcache devices # ## Setting devices will restrict the stats to the specified # ## bcache devices. # bcacheDevs = ["bcache0"] # # Read Cassandra metrics through Jolokia # [[inputs.cassandra]] # # This is the context root used to compose the jolokia url # context = "/jolokia/read" # ## List of cassandra servers exposing jolokia read service # servers = ["myuser:mypassword@10.10.10.1:8778","10.10.10.2:8778",":8778"] # ## List of metrics collected on above servers # ## Each metric consists of a jmx path. # ## This will collect all heap memory usage metrics from the jvm and # ## ReadLatency metrics for all keyspaces and tables. # ## "type=Table" in the query works with Cassandra3.0. Older versions might # ## need to use "type=ColumnFamily" # metrics = [ # "/java.lang:type=Memory/HeapMemoryUsage", # "/org.apache.cassandra.metrics:type=Table,keyspace=*,scope=*,name=ReadLatency" # ] # # Collects performance metrics from the MON and OSD nodes in a Ceph storage cluster. # [[inputs.ceph]] # ## All configuration values are optional, defaults are shown below # # ## location of ceph binary # ceph_binary = "/usr/bin/ceph" # # ## directory in which to look for socket files # socket_dir = "/var/run/ceph" # # ## prefix of MON and OSD socket files, used to determine socket type # mon_prefix = "ceph-mon" # osd_prefix = "ceph-osd" # # ## suffix used to identify socket files # socket_suffix = "asok" # # Pull Metric Statistics from Amazon CloudWatch # [[inputs.cloudwatch]] # ## Amazon Region # region = 'us-east-1' # # ## Amazon Credentials # ## Credentials are loaded in the following order # ## 1) explicit credentials from 'access_key' and 'secret_key' # ## 2) environment variables # ## 3) shared credentials file # ## 4) EC2 Instance Profile # #access_key = "" # #secret_key = "" # # ## Requested CloudWatch aggregation Period (required - must be a multiple of 60s) # period = '1m' # # ## Collection Delay (required - must account for metrics availability via CloudWatch API) # delay = '1m' # # ## Recomended: use metric 'interval' that is a multiple of 'period' to avoid # ## gaps or overlap in pulled data # interval = '1m' # # ## Metric Statistic Namespace (required) # namespace = 'AWS/ELB' # # ## Metrics to Pull (optional) # ## Defaults to all Metrics in Namespace if nothing is provided # ## Refreshes Namespace available metrics every 1h # #[[inputs.cloudwatch.metrics]] # # names = ['Latency', 'RequestCount'] # # # # ## Dimension filters for Metric (optional) # # [[inputs.cloudwatch.metrics.dimensions]] # # name = 'LoadBalancerName' # # value = 'p-example' # # Read metrics from one or many couchbase clusters # [[inputs.couchbase]] # ## specify servers via a url matching: # ## [protocol://][:password]@address[:port] # ## e.g. # ## http://couchbase-0.example.com/ # ## http://admin:secret@couchbase-0.example.com:8091/ # ## # ## If no servers are specified, then localhost is used as the host. # ## If no protocol is specifed, HTTP is used. # ## If no port is specified, 8091 is used. # servers = ["http://localhost:8091"] # # Read CouchDB Stats from one or more servers # [[inputs.couchdb]] # ## Works with CouchDB stats endpoints out of the box # ## Multiple HOSTs from which to read CouchDB stats: # hosts = ["http://localhost:8086/_stats"] # # Read metrics from one or many disque servers # [[inputs.disque]] # ## An array of URI to gather stats about. Specify an ip or hostname # ## with optional port and password. # ## ie disque://localhost, disque://10.10.3.33:18832, 10.0.0.1:10000, etc. # ## If no servers are specified, then localhost is used as the host. # servers = ["localhost"] # # Query given DNS server and gives statistics # [[inputs.dns_query]] # ## servers to query # servers = ["8.8.8.8"] # required # # ## Domains or subdomains to query. "."(root) is default # domains = ["."] # optional # # ## Query record type. Default is "A" # ## Posible values: A, AAAA, CNAME, MX, NS, PTR, TXT, SOA, SPF, SRV. # record_type = "A" # optional # # ## Dns server port. 53 is default # port = 53 # optional # # ## Query timeout in seconds. Default is 2 seconds # timeout = 2 # optional # # Read metrics about docker containers # [[inputs.docker]] # ## Docker Endpoint # ## To use TCP, set endpoint = "tcp://[ip]:[port]" # ## To use environment variables (ie, docker-machine), set endpoint = "ENV" # endpoint = "unix:///var/run/docker.sock" # ## Only collect metrics for these containers, collect all if empty # container_names = [] # ## Timeout for docker list, info, and stats commands # timeout = "5s" # # Read statistics from one or many dovecot servers # [[inputs.dovecot]] # ## specify dovecot servers via an address:port list # ## e.g. # ## localhost:24242 # ## # ## If no servers are specified, then localhost is used as the host. # servers = ["localhost:24242"] # ## Type is one of "user", "domain", "ip", or "global" # type = "global" # ## Wildcard matches like "*.com". An empty string "" is same as "*" # ## If type = "ip" filters should be # filters = [""] # # Read stats from one or more Elasticsearch servers or clusters # [[inputs.elasticsearch]] # ## specify a list of one or more Elasticsearch servers # servers = ["http://localhost:9200"] # # ## set local to false when you want to read the indices stats from all nodes # ## within the cluster # local = true # # ## set cluster_health to true when you want to also obtain cluster level stats # cluster_health = false # # Read metrics from one or more commands that can output to stdout # [[inputs.exec]] # ## Commands array # commands = ["/tmp/test.sh", "/usr/bin/mycollector --foo=bar"] # # ## Timeout for each command to complete. # timeout = "5s" # # ## measurement name suffix (for separating different commands) # name_suffix = "_mycollector" # # ## Data format to consume. # ## Each data format has it's own unique set of configuration options, read # ## more about them here: # ## https://github.com/influxdata/telegraf/blob/master/docs/DATA_FORMATS_INPUT.md # data_format = "influx" [[inputs.exec]] commands = ["/opt/shell/telegraf/appcheck.sh"] name_override = "servercheck" data_format = "influx" interval = "1s" timeout = "1s" # # Read stats about given file(s) # [[inputs.filestat]] # ## Files to gather stats about. # ## These accept standard unix glob matching rules, but with the addition of # ## ** as a "super asterisk". ie: # ## "/var/log/**.log" -> recursively find all .log files in /var/log # ## "/var/log/*/*.log" -> find all .log files with a parent dir in /var/log # ## "/var/log/apache.log" -> just tail the apache log file # ## # ## See https://github.com/gobwas/glob for more examples # ## # files = ["/var/log/**.log"] # ## If true, read the entire file and calculate an md5 checksum. # md5 = false # # Read metrics of haproxy, via socket or csv stats page # [[inputs.haproxy]] # ## An array of address to gather stats about. Specify an ip on hostname # ## with optional port. ie localhost, 10.10.3.33:1936, etc. # # ## If no servers are specified, then default to 127.0.0.1:1936 # servers = ["http://myhaproxy.com:1936", "http://anotherhaproxy.com:1936"] # ## Or you can also use local socket # ## servers = ["socket:/run/haproxy/admin.sock"] # # HTTP/HTTPS request given an address a method and a timeout [[inputs.http_response]] address = "http://192.168.100.218:8097/watch" response_timeout = "5s" method = "GET" [[inputs.http_response]] address = "http://192.168.100.218:8082/watch" response_timeout = "5s" method = "GET" #[[inputs.http_response]] # address = "http://192.168.100.218:8098/watch" # response_timeout = "5s" # method = "GET" # ## Server address (default http://localhost) # ## Set response_timeout (default 5 seconds) # ## HTTP Request Method # ## Whether to follow redirects from the server (defaults to false) # follow_redirects = true # ## HTTP Request Headers (all values must be strings) # # [inputs.http_response.headers] # # Host = "github.com" # ## Optional HTTP Request Body # # body = ''' # # {'fake':'data'} # # ''' # # Read flattened metrics from one or more JSON HTTP endpoints # [[inputs.httpjson]] # ## NOTE This plugin only reads numerical measurements, strings and booleans # ## will be ignored. # # ## a name for the service being polled # name = "webserver_stats" # # ## URL of each server in the service's cluster # servers = [ # "http://localhost:9999/stats/", # "http://localhost:9998/stats/", # ] # # ## HTTP method to use: GET or POST (case-sensitive) # method = "GET" # # ## List of tag names to extract from top-level of JSON server response # # tag_keys = [ # # "my_tag_1", # # "my_tag_2" # # ] # # ## HTTP parameters (all values must be strings) # [inputs.httpjson.parameters] # event_type = "cpu_spike" # threshold = "0.75" # # ## HTTP Header parameters (all values must be strings) # # [inputs.httpjson.headers] # # X-Auth-Token = "my-xauth-token" # # apiVersion = "v1" # # ## Optional SSL Config # # ssl_ca = "/etc/telegraf/ca.pem" # # ssl_cert = "/etc/telegraf/cert.pem" # # ssl_key = "/etc/telegraf/key.pem" # ## Use SSL but skip chain & host verification # # insecure_skip_verify = false # # Read InfluxDB-formatted JSON metrics from one or more HTTP endpoints # [[inputs.influxdb]] # ## Works with InfluxDB debug endpoints out of the box, # ## but other services can use this format too. # ## See the influxdb plugin's README for more details. # # ## Multiple URLs from which to read InfluxDB-formatted JSON # urls = [ # "http://localhost:8086/debug/vars" # ] # # Read metrics from one or many bare metal servers # [[inputs.ipmi_sensor]] # ## specify servers via a url matching: # ## [username[:password]@][protocol[(address)]] # ## e.g. # ## root:passwd@lan(127.0.0.1) # ## # servers = ["USERID:PASSW0RD@lan(192.168.1.1)"] # # Read JMX metrics through Jolokia # [[inputs.jolokia]] # ## This is the context root used to compose the jolokia url # context = "/jolokia" # # ## This specifies the mode used # # mode = "proxy" # # # ## When in proxy mode this section is used to specify further # ## proxy address configurations. # ## Remember to change host address to fit your environment. # # [inputs.jolokia.proxy] # # host = "127.0.0.1" # # port = "8080" # # # ## List of servers exposing jolokia read service # [[inputs.jolokia.servers]] # name = "as-server-01" # host = "127.0.0.1" # port = "8080" # # username = "myuser" # # password = "mypassword" # # ## List of metrics collected on above servers # ## Each metric consists in a name, a jmx path and either # ## a pass or drop slice attribute. # ## This collect all heap memory usage metrics. # [[inputs.jolokia.metrics]] # name = "heap_memory_usage" # mbean = "java.lang:type=Memory" # attribute = "HeapMemoryUsage" # # ## This collect thread counts metrics. # [[inputs.jolokia.metrics]] # name = "thread_count" # mbean = "java.lang:type=Threading" # attribute = "TotalStartedThreadCount,ThreadCount,DaemonThreadCount,PeakThreadCount" # # ## This collect number of class loaded/unloaded counts metrics. # [[inputs.jolokia.metrics]] # name = "class_count" # mbean = "java.lang:type=ClassLoading" # attribute = "LoadedClassCount,UnloadedClassCount,TotalLoadedClassCount" # # Read metrics from a LeoFS Server via SNMP # [[inputs.leofs]] # ## An array of URI to gather stats about LeoFS. # ## Specify an ip or hostname with port. ie 127.0.0.1:4020 # servers = ["127.0.0.1:4021"] # # Read metrics from local Lustre service on OST, MDS # [[inputs.lustre2]] # ## An array of /proc globs to search for Lustre stats # ## If not specified, the default will work on Lustre 2.5.x # ## # # ost_procfiles = [ # # "/proc/fs/lustre/obdfilter/*/stats", # # "/proc/fs/lustre/osd-ldiskfs/*/stats", # # "/proc/fs/lustre/obdfilter/*/job_stats", # # ] # # mds_procfiles = [ # # "/proc/fs/lustre/mdt/*/md_stats", # # "/proc/fs/lustre/mdt/*/job_stats", # # ] # # Gathers metrics from the /3.0/reports MailChimp API # [[inputs.mailchimp]] # ## MailChimp API key # ## get from https://admin.mailchimp.com/account/api/ # api_key = "" # required # ## Reports for campaigns sent more than days_old ago will not be collected. # ## 0 means collect all. # days_old = 0 # ## Campaign ID to get, if empty gets all campaigns, this option overrides days_old # # campaign_id = "" # # Read metrics from one or many memcached servers # [[inputs.memcached]] # ## An array of address to gather stats about. Specify an ip on hostname # ## with optional port. ie localhost, 10.0.0.1:11211, etc. # servers = ["localhost:11211"] # # unix_sockets = ["/var/run/memcached.sock"] # # Telegraf plugin for gathering metrics from N Mesos masters # [[inputs.mesos]] # # Timeout, in ms. # timeout = 100 # # A list of Mesos masters, default value is localhost:5050. # masters = ["localhost:5050"] # # Metrics groups to be collected, by default, all enabled. # master_collections = [ # "resources", # "master", # "system", # "slaves", # "frameworks", # "messages", # "evqueue", # "registrar", # ] # # Read metrics from one or many MongoDB servers #[[inputs.mongodb]] # ## An array of URI to gather stats about. Specify an ip or hostname # ## with optional port add password. ie, # ## mongodb://user:auth_key@10.10.3.30:27017, # ## mongodb://10.10.3.33:18832, # ## 10.0.0.1:10000, etc. # servers = ["127.0.0.1:27017"] # Read metrics from one or many mysql servers #[[inputs.mysql]] # ## specify servers via a url matching: # ## [username[:password]@][protocol[(address)]]/[?tls=[true|false|skip-verify]] # ## see https://github.com/go-sql-driver/mysql#dsn-data-source-name # ## e.g. # ## db_user:passwd@tcp(127.0.0.1:3306)/?tls=false # ## db_user@tcp(127.0.0.1:3306)/?tls=false # # # ## If no servers are specified, then localhost is used as the host. # servers = ["root@tcp(192.168.100.222:3306)/?tls=false"] # ## the limits for metrics form perf_events_statements # perf_events_statements_digest_text_limit = 120 # perf_events_statements_limit = 250 # perf_events_statements_time_limit = 86400 # # # ## if the list is empty, then metrics are gathered from all databasee tables # table_schema_databases = [] # # # ## gather metrics from INFORMATION_SCHEMA.TABLES for databases provided above list # gather_table_schema = false # # # ## gather thread state counts from INFORMATION_SCHEMA.PROCESSLIST # gather_process_list = true # # # ## gather auto_increment columns and max values from information schema # gather_info_schema_auto_inc = true # # # ## gather metrics from SHOW SLAVE STATUS command output # gather_slave_status = true # # # ## gather metrics from SHOW BINARY LOGS command output # gather_binary_logs = false # # # ## gather metrics from PERFORMANCE_SCHEMA.TABLE_IO_WAITS_SUMMART_BY_TABLE # gather_table_io_waits = false # # # ## gather metrics from PERFORMANCE_SCHEMA.TABLE_LOCK_WAITS # gather_table_lock_waits = false # # # ## gather metrics from PERFORMANCE_SCHEMA.TABLE_IO_WAITS_SUMMART_BY_INDEX_USAGE # gather_index_io_waits = false # # # ## gather metrics from PERFORMANCE_SCHEMA.EVENT_WAITS # gather_event_waits = false # # # ## gather metrics from PERFORMANCE_SCHEMA.FILE_SUMMARY_BY_EVENT_NAME # gather_file_events_stats = false # # # ## gather metrics from PERFORMANCE_SCHEMA.EVENTS_STATEMENTS_SUMMARY_BY_DIGEST # gather_perf_events_statements = false # # # ## Some queries we may want to run less often (such as SHOW GLOBAL VARIABLES) # interval_slow = "30m" # # # Read metrics about network interface usage [[inputs.net]] interfaces = ["eth2"] [[inputs.net]] interfaces = ["eth3"] # ## By default, telegraf gathers stats from any up interface (excluding loopback) # ## Setting interfaces will tell it to gather these explicit interfaces, # ## regardless of status. # ## # # TCP or UDP 'ping' given url and collect response time in seconds # [[inputs.net_response]] # ## Protocol, must be "tcp" or "udp" # protocol = "tcp" # ## Server address (default localhost) # address = "github.com:80" # ## Set timeout # timeout = "1s" # # ## Optional string sent to the server # # send = "ssh" # ## Optional expected string in answer # # expect = "ssh" # ## Set read timeout (only used if expecting a response) # read_timeout = "1s" [[inputs.net_response]] protocol = "tcp" address = ":80" [[inputs.net_response]] protocol = "tcp" address = ":8101" [[inputs.net_response]] protocol = "tcp" address = ":8102" [[inputs.net_response]] protocol = "tcp" address = ":8103" [[inputs.net_response]] protocol = "tcp" address = ":8083" [[inputs.net_response]] protocol = "tcp" address = ":8084" [[inputs.net_response]] protocol = "tcp" address = ":8197" [[inputs.net_response]] protocol = "tcp" address = ":8086" [[inputs.net_response]] protocol = "tcp" address = ":8082" [[inputs.net_response]] protocol = "tcp" address = ":18902" [[inputs.net_response]] protocol = "tcp" address = ":18905" [[inputs.net_response]] protocol = "tcp" address = ":18901" [[inputs.net_response]] protocol = "tcp" address = ":18903" [[inputs.net_response]] protocol = "tcp" address = ":18904" [[inputs.net_response]] protocol = "tcp" address = ":8099" [[inputs.net_response]] protocol = "tcp" address = ":10045" [[inputs.net_response]] protocol = "tcp" address = ":8081" # # Read TCP metrics such as established, time wait and sockets counts. [[inputs.netstat]] # # no configuration # # Read Nginx's basic status information (ngx_http_stub_status_module) #[[inputs.nginx]] # ## An array of Nginx stub_status URI to gather stats. # urls = ["http://127.0.0.1/ngx_status"] # # Read NSQ topic and channel statistics. # [[inputs.nsq]] # ## An array of NSQD HTTP API endpoints # endpoints = ["http://localhost:4151"] # # Collect kernel snmp counters and network interface statistics # [[inputs.nstat]] # ## file paths for proc files. If empty default paths will be used: # ## /proc/net/netstat, /proc/net/snmp, /proc/net/snmp6 # ## These can also be overridden with env variables, see README. # proc_net_netstat = "" # proc_net_snmp = "" # proc_net_snmp6 = "" # ## dump metrics with 0 values too # dump_zeros = true # # Get standard NTP query metrics, requires ntpq executable. # [[inputs.ntpq]] # ## If false, set the -n ntpq flag. Can reduce metric gather time. # dns_lookup = true # # Read metrics of passenger using passenger-status # [[inputs.passenger]] # ## Path of passenger-status. # ## # ## Plugin gather metric via parsing XML output of passenger-status # ## More information about the tool: # ## https://www.phusionpassenger.com/library/admin/apache/overall_status_report.html # ## # ## If no path is specified, then the plugin simply execute passenger-status # ## hopefully it can be found in your PATH # command = "passenger-status -v --show=xml" # # Read metrics of phpfpm, via HTTP status page or socket [[inputs.phpfpm]] urls = ["http://php01.goonbaby.com/status"] # urls = ["/dev/shm/php-cgi.sock"] # ## An array of addresses to gather stats about. Specify an ip or hostname # ## with optional port and path # ## # ## Plugin can be configured in three modes (either can be used): # ## - http: the URL must start with http:// or https://, ie: # ## "http://localhost/status" # ## "http://192.168.130.1/status?full" # ## # ## - unixsocket: path to fpm socket, ie: # ## "/var/run/php5-fpm.sock" # ## or using a custom fpm status path: # ## "/var/run/php5-fpm.sock:fpm-custom-status-path" # ## # ## - fcgi: the URL must start with fcgi:// or cgi://, and port must be present, ie: # ## "fcgi://10.0.0.12:9000/status" # ## "cgi://10.0.10.12:9001/status" # ## # ## Example of multiple gathering from local socket and remove host # ## urls = ["http://192.168.1.20/status", "/tmp/fpm.sock"] # # Ping given url(s) and return statistics # [[inputs.ping]] # ## NOTE: this plugin forks the ping command. You may need to set capabilities # ## via setcap cap_net_raw+p /bin/ping # # # ## urls to ping # urls = ["www.google.com"] # required # ## number of pings to send per collection (ping -c ) # count = 1 # required # ## interval, in s, at which to ping. 0 == default (ping -i ) # ping_interval = 0.0 # ## ping timeout, in s. 0 == no timeout (ping -W ) # timeout = 1.0 # ## interface to send ping from (ping -I ) # interface = "" # # Read metrics from one or many postgresql servers # [[inputs.postgresql]] # ## specify address via a url matching: # ## postgres://[pqgotest[:password]]@localhost[/dbname]\ # ## ?sslmode=[disable|verify-ca|verify-full] # ## or a simple string: # ## host=localhost user=pqotest password=... sslmode=... dbname=app_production # ## # ## All connection parameters are optional. # ## # ## Without the dbname parameter, the driver will default to a database # ## with the same name as the user. This dbname is just for instantiating a # ## connection with the server and doesn't restrict the databases we are trying # ## to grab metrics for. # ## # address = "host=localhost user=postgres sslmode=disable" # # ## A list of databases to pull metrics about. If not specified, metrics for all # ## databases are gathered. # # databases = ["app_production", "testing"] # # Read metrics from one or many postgresql servers # [[inputs.postgresql_extensible]] # ## specify address via a url matching: # ## postgres://[pqgotest[:password]]@localhost[/dbname]\ # ## ?sslmode=[disable|verify-ca|verify-full] # ## or a simple string: # ## host=localhost user=pqotest password=... sslmode=... dbname=app_production # # # ## All connection parameters are optional. # # ## Without the dbname parameter, the driver will default to a database # ## with the same name as the user. This dbname is just for instantiating a # ## connection with the server and doesn't restrict the databases we are trying # ## to grab metrics for. # # # address = "host=localhost user=postgres sslmode=disable" # ## A list of databases to pull metrics about. If not specified, metrics for all # ## databases are gathered. # ## databases = ["app_production", "testing"] # # # # outputaddress = "db01" # ## A custom name for the database that will be used as the "server" tag in the # ## measurement output. If not specified, a default one generated from # ## the connection address is used. # # # ## Define the toml config where the sql queries are stored # ## New queries can be added, if the withdbname is set to true and there is no # ## databases defined in the 'databases field', the sql query is ended by a # ## 'is not null' in order to make the query succeed. # ## Example : # ## The sqlquery : "SELECT * FROM pg_stat_database where datname" become # ## "SELECT * FROM pg_stat_database where datname IN ('postgres', 'pgbench')" # ## because the databases variable was set to ['postgres', 'pgbench' ] and the # ## withdbname was true. Be careful that if the withdbname is set to false you # ## don't have to define the where clause (aka with the dbname) the tagvalue # ## field is used to define custom tags (separated by commas) # ## The optional "measurement" value can be used to override the default # ## output measurement name ("postgresql"). # # # ## Structure : # ## [[inputs.postgresql_extensible.query]] # ## sqlquery string # ## version string # ## withdbname boolean # ## tagvalue string (comma separated) # ## measurement string # [[inputs.postgresql_extensible.query]] # sqlquery="SELECT * FROM pg_stat_database" # version=901 # withdbname=false # tagvalue="" # measurement="" # [[inputs.postgresql_extensible.query]] # sqlquery="SELECT * FROM pg_stat_bgwriter" # version=901 # withdbname=false # tagvalue="postgresql.stats" # # Read metrics from one or many PowerDNS servers # [[inputs.powerdns]] # ## An array of sockets to gather stats about. # ## Specify a path to unix socket. # unix_sockets = ["/var/run/pdns.controlsocket"] [[inputs.procstat]] user = "digit" pattern = "alipayMobile/alipay_server.jar" fielddrop = ["cpu_time_*"] [[inputs.procstat]] user = "digit" pattern = "count_1.0.jar" fielddrop = ["cpu_time_*"] [[inputs.procstat]] user = "digit" pattern = "wechat_server.jar" fielddrop = ["cpu_time_*"] [[inputs.procstat]] user = "digit" pattern = "weixin_1.0.jar" fielddrop = ["cpu_time_*"] [[inputs.procstat]] user = "digit" pattern = "interface_1.0.jar" fielddrop = ["cpu_time_*"] [[inputs.procstat]] user = "digit" pattern = "wechat_qy_server.jar" fielddrop = ["cpu_time_*"] [[inputs.procstat]] user = "digit" pattern = "alipay_server.jar" fielddrop = ["cpu_time_*"] [[inputs.procstat]] user = "digit" pattern = "tomcat-server" fielddrop = ["cpu_time_*"] [[inputs.procstat]] user = "digit" pattern = "babyShow_1.0.jar" fielddrop = ["cpu_time_*"] [[inputs.procstat]] user = "digit" pattern = "gateway_server.jar" fielddrop = ["cpu_time_*"] [[inputs.procstat]] user = "digit" pattern = "attence/attence_1.0.jar" fielddrop = ["cpu_time_*"] [[inputs.procstat]] user = "digit" pattern = "attence_8102/attence_1.0.jar" fielddrop = ["cpu_time_*"] [[inputs.procstat]] user = "digit" pattern = "attence_8103/attence_1.0.jar" fielddrop = ["cpu_time_*"] [[inputs.procstat]] user = "digit" pattern = "exception_upload.jar" fielddrop = ["cpu_time_*"] [[inputs.procstat]] user = "digit" pattern = "SendSms.jar" fielddrop = ["cpu_time_*"] [[inputs.procstat]] user = "digit" pattern = "my_center_1.0.jar" fielddrop = ["cpu_time_*"] [[inputs.procstat]] user = "root" pattern = "nginx" fielddrop = ["cpu_time_*"] [[inputs.procstat]] user = "root" pattern = "php-fpm" fielddrop = ["cpu_time_*"] # # Monitor process cpu and memory usage # [[inputs.procstat]] # ## Must specify one of: pid_file, exe, or pattern # ## PID file to monitor process # pid_file = "/var/run/nginx.pid" # ## executable name (ie, pgrep ) # # exe = "nginx" # ## pattern as argument for pgrep (ie, pgrep -f ) # # pattern = "nginx" # ## user as argument for pgrep (ie, pgrep -u ) # # user = "nginx" # # ## override for process_name # ## This is optional; default is sourced from /proc//status # # process_name = "bar" # ## Field name prefix # prefix = "" # ## comment this out if you want raw cpu_time stats # fielddrop = ["cpu_time_*"] # # Read metrics from one or many prometheus clients # [[inputs.prometheus]] # ## An array of urls to scrape metrics from. # urls = ["http://localhost:9100/metrics"] # # ## Use SSL but skip chain & host verification # # insecure_skip_verify = false # ## Use bearer token for authorization # # bearer_token = /path/to/bearer/token # # Reads last_run_summary.yaml file and converts to measurments # [[inputs.puppetagent]] # ## Location of puppet last run summary file # location = "/var/lib/puppet/state/last_run_summary.yaml" # # Read metrics from one or many RabbitMQ servers via the management API # [[inputs.rabbitmq]] # url = "http://localhost:15672" # required # # name = "rmq-server-1" # optional tag # # username = "guest" # # password = "guest" # # ## A list of nodes to pull metrics about. If not specified, metrics for # ## all nodes are gathered. # # nodes = ["rabbit@node1", "rabbit@node2"] # # Read raindrops stats (raindrops - real-time stats for preforking Rack servers) # [[inputs.raindrops]] # ## An array of raindrops middleware URI to gather stats. # urls = ["http://localhost:8080/_raindrops"] # # Read metrics from one or many redis servers # [[inputs.redis]] # ## specify servers via a url matching: # ## [protocol://][:password]@address[:port] # ## e.g. # ## tcp://localhost:6379 # ## tcp://:password@192.168.99.100 # ## # ## If no servers are specified, then localhost is used as the host. # ## If no port is specified, 6379 is used # servers = ["tcp://localhost:6379"] # # Read metrics from one or many RethinkDB servers # [[inputs.rethinkdb]] # ## An array of URI to gather stats about. Specify an ip or hostname # ## with optional port add password. ie, # ## rethinkdb://user:auth_key@10.10.3.30:28105, # ## rethinkdb://10.10.3.33:18832, # ## 10.0.0.1:10000, etc. # servers = ["127.0.0.1:28015"] # # Read metrics one or many Riak servers # [[inputs.riak]] # # Specify a list of one or more riak http servers # servers = ["http://localhost:8098"] # # Reads oids value from one or many snmp agents # [[inputs.snmp]] # ## Use 'oids.txt' file to translate oids to names # ## To generate 'oids.txt' you need to run: # ## snmptranslate -m all -Tz -On | sed -e 's/"//g' > /tmp/oids.txt # ## Or if you have an other MIB folder with custom MIBs # ## snmptranslate -M /mycustommibfolder -Tz -On -m all | sed -e 's/"//g' > oids.txt # snmptranslate_file = "/tmp/oids.txt" # [[inputs.snmp.host]] # address = "192.168.2.2:161" # # SNMP community # community = "public" # default public # # SNMP version (1, 2 or 3) # # Version 3 not supported yet # version = 2 # default 2 # # SNMP response timeout # timeout = 2.0 # default 2.0 # # SNMP request retries # retries = 2 # default 2 # # Which get/bulk do you want to collect for this host # collect = ["mybulk", "sysservices", "sysdescr"] # # Simple list of OIDs to get, in addition to "collect" # get_oids = [] # # [[inputs.snmp.host]] # address = "192.168.2.3:161" # community = "public" # version = 2 # timeout = 2.0 # retries = 2 # collect = ["mybulk"] # get_oids = [ # "ifNumber", # ".1.3.6.1.2.1.1.3.0", # ] # # [[inputs.snmp.get]] # name = "ifnumber" # oid = "ifNumber" # # [[inputs.snmp.get]] # name = "interface_speed" # oid = "ifSpeed" # instance = "0" # # [[inputs.snmp.get]] # name = "sysuptime" # oid = ".1.3.6.1.2.1.1.3.0" # unit = "second" # # [[inputs.snmp.bulk]] # name = "mybulk" # max_repetition = 127 # oid = ".1.3.6.1.2.1.1" # # [[inputs.snmp.bulk]] # name = "ifoutoctets" # max_repetition = 127 # oid = "ifOutOctets" # # [[inputs.snmp.host]] # address = "192.168.2.13:161" # #address = "127.0.0.1:161" # community = "public" # version = 2 # timeout = 2.0 # retries = 2 # #collect = ["mybulk", "sysservices", "sysdescr", "systype"] # collect = ["sysuptime" ] # [[inputs.snmp.host.table]] # name = "iftable3" # include_instances = ["enp5s0", "eth1"] # # # SNMP TABLEs # # table without mapping neither subtables # [[inputs.snmp.table]] # name = "iftable1" # oid = ".1.3.6.1.2.1.31.1.1.1" # # # table without mapping but with subtables # [[inputs.snmp.table]] # name = "iftable2" # oid = ".1.3.6.1.2.1.31.1.1.1" # sub_tables = [".1.3.6.1.2.1.2.2.1.13"] # # # table with mapping but without subtables # [[inputs.snmp.table]] # name = "iftable3" # oid = ".1.3.6.1.2.1.31.1.1.1" # # if empty. get all instances # mapping_table = ".1.3.6.1.2.1.31.1.1.1.1" # # if empty, get all subtables # # # table with both mapping and subtables # [[inputs.snmp.table]] # name = "iftable4" # oid = ".1.3.6.1.2.1.31.1.1.1" # # if empty get all instances # mapping_table = ".1.3.6.1.2.1.31.1.1.1.1" # # if empty get all subtables # # sub_tables could be not "real subtables" # sub_tables=[".1.3.6.1.2.1.2.2.1.13", "bytes_recv", "bytes_send"] # # Read metrics from Microsoft SQL Server # [[inputs.sqlserver]] # ## Specify instances to monitor with a list of connection strings. # ## All connection parameters are optional. # ## By default, the host is localhost, listening on default port, TCP 1433. # ## for Windows, the user is the currently running AD user (SSO). # ## See https://github.com/denisenkom/go-mssqldb for detailed connection # ## parameters. # # servers = [ # # "Server=192.168.1.10;Port=1433;User Id=;Password=;app name=telegraf;log=1;", # # ] # # Inserts sine and cosine waves for demonstration purposes # [[inputs.trig]] # ## Set the amplitude # amplitude = 10.0 # # Read Twemproxy stats data # [[inputs.twemproxy]] # ## Twemproxy stats address and port (no scheme) # addr = "localhost:22222" # ## Monitor pool name # pools = ["redis_pool", "mc_pool"] # # A plugin to collect stats from Varnish HTTP Cache # [[inputs.varnish]] # ## The default location of the varnishstat binary can be overridden with: # binary = "/usr/bin/varnishstat" # # ## By default, telegraf gather stats for 3 metric points. # ## Setting stats will override the defaults shown below. # ## stats may also be set to ["all"], which will collect all stats # stats = ["MAIN.cache_hit", "MAIN.cache_miss", "MAIN.uptime"] # # Read metrics of ZFS from arcstats, zfetchstats and vdev_cache_stats # [[inputs.zfs]] # ## ZFS kstat path # ## If not specified, then default is: # kstatPath = "/proc/spl/kstat/zfs" # # ## By default, telegraf gather all zfs stats # ## If not specified, then default is: # kstatMetrics = ["arcstats", "zfetchstats", "vdev_cache_stats"] # # ## By default, don't gather zpool stats # poolMetrics = false # # Reads 'mntr' stats from one or many zookeeper servers # [[inputs.zookeeper]] # ## An array of address to gather stats about. Specify an ip or hostname # ## with port. ie localhost:2181, 10.0.0.1:2181, etc. # # ## If no servers are specified, then localhost is used as the host. # ## If no port is specified, 2181 is used # servers = [":2181"] ############################################################################### # SERVICE INPUT PLUGINS # ############################################################################### # # A Github Webhook Event collector # [[inputs.github_webhooks]] # ## Address and port to host Webhook listener on # service_address = ":1618" # # Read metrics from Kafka topic(s) # [[inputs.kafka_consumer]] # ## topic(s) to consume # topics = ["telegraf"] # ## an array of Zookeeper connection strings # zookeeper_peers = ["localhost:2181"] # ## Zookeeper Chroot # zookeeper_chroot = "/" # ## the name of the consumer group # consumer_group = "telegraf_metrics_consumers" # ## Offset (must be either "oldest" or "newest") # offset = "oldest" # # ## Data format to consume. # ## Each data format has it's own unique set of configuration options, read # ## more about them here: # ## https://github.com/influxdata/telegraf/blob/master/docs/DATA_FORMATS_INPUT.md # data_format = "influx" # # Read metrics from MQTT topic(s) # [[inputs.mqtt_consumer]] # servers = ["localhost:1883"] # ## MQTT QoS, must be 0, 1, or 2 # qos = 0 # # ## Topics to subscribe to # topics = [ # "telegraf/host01/cpu", # "telegraf/+/mem", # "sensors/#", # ] # # # if true, messages that can't be delivered while the subscriber is offline # # will be delivered when it comes back (such as on service restart). # # NOTE: if true, client_id MUST be set # persistent_session = false # # If empty, a random client ID will be generated. # client_id = "" # # ## username and password to connect MQTT server. # # username = "telegraf" # # password = "metricsmetricsmetricsmetrics" # # ## Optional SSL Config # # ssl_ca = "/etc/telegraf/ca.pem" # # ssl_cert = "/etc/telegraf/cert.pem" # # ssl_key = "/etc/telegraf/key.pem" # ## Use SSL but skip chain & host verification # # insecure_skip_verify = false # # ## Data format to consume. # ## Each data format has it's own unique set of configuration options, read # ## more about them here: # ## https://github.com/influxdata/telegraf/blob/master/docs/DATA_FORMATS_INPUT.md # data_format = "influx" # # Read metrics from NATS subject(s) # [[inputs.nats_consumer]] # ## urls of NATS servers # servers = ["nats://localhost:4222"] # ## Use Transport Layer Security # secure = false # ## subject(s) to consume # subjects = ["telegraf"] # ## name a queue group # queue_group = "telegraf_consumers" # # ## Data format to consume. # ## Each data format has it's own unique set of configuration options, read # ## more about them here: # ## https://github.com/influxdata/telegraf/blob/master/docs/DATA_FORMATS_INPUT.md # data_format = "influx" # # Statsd Server # [[inputs.statsd]] # ## Address and port to host UDP listener on # service_address = ":8125" # ## Delete gauges every interval (default=false) # delete_gauges = false # ## Delete counters every interval (default=false) # delete_counters = false # ## Delete sets every interval (default=false) # delete_sets = false # ## Delete timings & histograms every interval (default=true) # delete_timings = true # ## Percentiles to calculate for timing & histogram stats # percentiles = [90] # # ## separator to use between elements of a statsd metric # metric_separator = "_" # # ## Parses tags in the datadog statsd format # ## http://docs.datadoghq.com/guides/dogstatsd/ # parse_data_dog_tags = false # # ## Statsd data translation templates, more info can be read here: # ## https://github.com/influxdata/telegraf/blob/master/docs/DATA_FORMATS_INPUT.md#graphite # # templates = [ # # "cpu.* measurement*" # # ] # # ## Number of UDP messages allowed to queue up, once filled, # ## the statsd server will start dropping packets # allowed_pending_messages = 10000 # # ## Number of timing/histogram values to track per-measurement in the # ## calculation of percentiles. Raising this limit increases the accuracy # ## of percentiles but also increases the memory usage and cpu time. # percentile_limit = 1000 # # Stream a log file, like the tail -f command # [[inputs.tail]] # ## files to tail. # ## These accept standard unix glob matching rules, but with the addition of # ## ** as a "super asterisk". ie: # ## "/var/log/**.log" -> recursively find all .log files in /var/log # ## "/var/log/*/*.log" -> find all .log files with a parent dir in /var/log # ## "/var/log/apache.log" -> just tail the apache log file # ## # ## See https://github.com/gobwas/glob for more examples # ## # files = ["/var/mymetrics.out"] # ## Read file from beginning. # from_beginning = false # # ## Data format to consume. # ## Each data format has it's own unique set of configuration options, read # ## more about them here: # ## https://github.com/influxdata/telegraf/blob/master/docs/DATA_FORMATS_INPUT.md # data_format = "influx" # # Generic TCP listener # [[inputs.tcp_listener]] # ## Address and port to host TCP listener on # service_address = ":8094" # # ## Number of TCP messages allowed to queue up. Once filled, the # ## TCP listener will start dropping packets. # allowed_pending_messages = 10000 # # ## Maximum number of concurrent TCP connections to allow # max_tcp_connections = 250 # # ## Data format to consume. # ## Each data format has it's own unique set of configuration options, read # ## more about them here: # ## https://github.com/influxdata/telegraf/blob/master/docs/DATA_FORMATS_INPUT.md # data_format = "influx" # # Generic UDP listener # [[inputs.udp_listener]] # ## Address and port to host UDP listener on # service_address = ":8092" # # ## Number of UDP messages allowed to queue up. Once filled, the # ## UDP listener will start dropping packets. # allowed_pending_messages = 10000 # # ## Data format to consume. # ## Each data format has it's own unique set of configuration options, read # ## more about them here: # ## https://github.com/influxdata/telegraf/blob/master/docs/DATA_FORMATS_INPUT.md # data_format = "influx"