From 1043101d00b35c848d03871a07a356935c1e5199 Mon Sep 17 00:00:00 2001 From: John Shahid Date: Mon, 24 Mar 2014 11:33:28 -0400 Subject: [PATCH] modify the configuration to match this example https://github.com/influxdb/influxdb/pull/293#issuecomment-37389463 --- config.toml.sample | 12 +++--- src/configuration/config.toml | 18 ++++++--- src/configuration/configuration.go | 50 +++++++++++++------------ src/configuration/configuration_test.go | 4 ++ src/integration/test_config1.toml | 18 ++++++--- src/integration/test_config2.toml | 10 ++--- src/integration/test_config3.toml | 10 ++--- 7 files changed, 74 insertions(+), 48 deletions(-) diff --git a/config.toml.sample b/config.toml.sample index 5d16aaa4e6b..a2ae77610e6 100644 --- a/config.toml.sample +++ b/config.toml.sample @@ -23,11 +23,13 @@ port = 8086 # binding is disabled if the port isn't set # ssl-port = 8084 # Ssl support is enabled if you set a port and cert # ssl-cert = /path/to/cert.pem -[graphite] -# optionally enable a graphite (carbon) compatible ingestion -enabled = false -port = 2003 -database = "" # store graphite data in this database +[input_plugins] + + # Configure the graphite api + [input_plugins.graphite] + enabled = false + # port = 2003 + # database = "" # store graphite data in this database # Raft configuration [raft] diff --git a/src/configuration/config.toml b/src/configuration/config.toml index 88df40bbb23..9126d514971 100644 --- a/src/configuration/config.toml +++ b/src/configuration/config.toml @@ -1,7 +1,7 @@ # Welcome to the InfluxDB configuration file. -# If hostname (on the OS) doesn't return a name that can be resolved by the other -# systems in the cluster, you'll have to set the hostname to an IP or something +# If hostname (on the OS) doesn't return a name that can be resolved by the other +# systems in the cluster, you'll have to set the hostname to an IP or something # that can be resovled here. # hostname = "" @@ -20,6 +20,14 @@ assets = "./admin" ssl-port = 8087 # Ssl support is enabled if you set a port and cert ssl-cert = "../cert.pem" +[input_plugins] + + # Configure the graphite api + [input_plugins.graphite] + enabled = false + port = 2003 + database = "" # store graphite data in this database + # Raft configuration [raft] # The raft port should be open between all servers in a cluster. @@ -76,12 +84,12 @@ lru-cache-size = "200m" # files. max-open-files is per shard so this * that will be max. # max-open-shards = 0 -# These options specify how data is sharded across the cluster. There are two +# These options specify how data is sharded across the cluster. There are two # shard configurations that have the same knobs: short term and long term. # Any series that begins with a capital letter like Exceptions will be written # into the long term storage. Any series beginning with a lower case letter # like exceptions will be written into short term. The idea being that you -# can write high precision data into short term and drop it after a couple +# can write high precision data into short term and drop it after a couple # of days. Meanwhile, continuous queries can run downsampling on the short term # data and write into the long term area. [sharding] @@ -96,7 +104,7 @@ lru-cache-size = "200m" # over the network when doing a query. duration = "7d" - # split will determine how many shards to split each duration into. For example, + # split will determine how many shards to split each duration into. For example, # if we created a shard for 2014-02-10 and split was set to 2. Then two shards # would be created that have the data for 2014-02-10. By default, data will # be split into those two shards deterministically by hashing the (database, serise) diff --git a/src/configuration/configuration.go b/src/configuration/configuration.go index 5ed9b60bb6a..a2bb52e30a6 100644 --- a/src/configuration/configuration.go +++ b/src/configuration/configuration.go @@ -93,10 +93,10 @@ type LoggingConfig struct { } type LevelDbConfiguration struct { - MaxOpenFiles int `toml:"max-open-files"` - LruCacheSize size `toml:"lru-cache-size"` - MaxOpenShards int `toml:"max-open-shards"` - PointBatchSize int `toml:"point-batch-size"` + MaxOpenFiles int `toml:"max-open-files"` + LruCacheSize size `toml:"lru-cache-size"` + MaxOpenShards int `toml:"max-open-shards"` + PointBatchSize int `toml:"point-batch-size"` } type ShardingDefinition struct { @@ -160,19 +160,23 @@ type WalConfig struct { RequestsPerLogFile int `toml:"requests-per-log-file"` } +type InputPlugins struct { + Graphite GraphiteConfig `toml:"graphite"` +} + type TomlConfiguration struct { - Admin AdminConfig - Api ApiConfig - Graphite GraphiteConfig - Raft RaftConfig - Storage StorageConfig - Cluster ClusterConfig - Logging LoggingConfig - LevelDb LevelDbConfiguration - Hostname string - BindAddress string `toml:"bind-address"` - Sharding ShardingDefinition `toml:"sharding"` - WalConfig WalConfig `toml:"wal"` + Admin AdminConfig + HttpApi ApiConfig `toml:"api"` + InputPlugins InputPlugins `toml:"input_plugins"` + Raft RaftConfig + Storage StorageConfig + Cluster ClusterConfig + Logging LoggingConfig + LevelDb LevelDbConfiguration + Hostname string + BindAddress string `toml:"bind-address"` + Sharding ShardingDefinition `toml:"sharding"` + WalConfig WalConfig `toml:"wal"` } type Configuration struct { @@ -198,7 +202,7 @@ type Configuration struct { LevelDbMaxOpenFiles int LevelDbLruCacheSize int LevelDbMaxOpenShards int - LevelDbPointBatchSize int + LevelDbPointBatchSize int ShortTermShard *ShardConfiguration LongTermShard *ShardConfiguration ReplicationFactor int @@ -256,12 +260,12 @@ func parseTomlConfiguration(filename string) (*Configuration, error) { config := &Configuration{ AdminHttpPort: tomlConfiguration.Admin.Port, AdminAssetsDir: tomlConfiguration.Admin.Assets, - ApiHttpPort: tomlConfiguration.Api.Port, - ApiHttpCertPath: tomlConfiguration.Api.SslCertPath, - ApiHttpSslPort: tomlConfiguration.Api.SslPort, - GraphiteEnabled: tomlConfiguration.Graphite.Enabled, - GraphitePort: tomlConfiguration.Graphite.Port, - GraphiteDatabase: tomlConfiguration.Graphite.Database, + ApiHttpPort: tomlConfiguration.HttpApi.Port, + ApiHttpCertPath: tomlConfiguration.HttpApi.SslCertPath, + ApiHttpSslPort: tomlConfiguration.HttpApi.SslPort, + GraphiteEnabled: tomlConfiguration.InputPlugins.Graphite.Enabled, + GraphitePort: tomlConfiguration.InputPlugins.Graphite.Port, + GraphiteDatabase: tomlConfiguration.InputPlugins.Graphite.Database, RaftServerPort: tomlConfiguration.Raft.Port, RaftDir: tomlConfiguration.Raft.Dir, ProtobufPort: tomlConfiguration.Cluster.ProtobufPort, diff --git a/src/configuration/configuration_test.go b/src/configuration/configuration_test.go index 08748d10715..2adcbece0c2 100644 --- a/src/configuration/configuration_test.go +++ b/src/configuration/configuration_test.go @@ -34,6 +34,10 @@ func (self *LoadConfigurationSuite) TestConfig(c *C) { c.Assert(config.ApiHttpCertPath, Equals, "../cert.pem") c.Assert(config.ApiHttpPortString(), Equals, "") + c.Assert(config.GraphiteEnabled, Equals, false) + c.Assert(config.GraphitePort, Equals, 2003) + c.Assert(config.GraphiteDatabase, Equals, "") + c.Assert(config.RaftDir, Equals, "/tmp/influxdb/development/raft") c.Assert(config.RaftServerPort, Equals, 8090) diff --git a/src/integration/test_config1.toml b/src/integration/test_config1.toml index 39fc7ae86f5..6970b7cff6c 100644 --- a/src/integration/test_config1.toml +++ b/src/integration/test_config1.toml @@ -1,7 +1,7 @@ # Welcome to the InfluxDB configuration file. -# If hostname (on the OS) doesn't return a name that can be resolved by the other -# systems in the cluster, you'll have to set the hostname to an IP or something +# If hostname (on the OS) doesn't return a name that can be resolved by the other +# systems in the cluster, you'll have to set the hostname to an IP or something # that can be resovled here. # hostname = "" @@ -21,6 +21,14 @@ port = 60500 ssl-port = 60503 ssl-cert = "./cert.pem" +[input_plugins] + + # Configure the graphite api + [input_plugins.graphite] + enabled = true + port = 60513 + database = "graphite_db" # store graphite data in this database + # Raft configuration [raft] # The raft port should be open between all servers in a cluster. @@ -62,12 +70,12 @@ write-buffer-size = 1000 # This setting determines how many responses can be buffered in memory per shard before data starts gettind dropped. query-shard-buffer-size = 500 -# These options specify how data is sharded across the cluster. There are two +# These options specify how data is sharded across the cluster. There are two # shard configurations that have the same knobs: short term and long term. # Any series that begins with a capital letter like Exceptions will be written # into the long term storage. Any series beginning with a lower case letter # like exceptions will be written into short term. The idea being that you -# can write high precision data into short term and drop it after a couple +# can write high precision data into short term and drop it after a couple # of days. Meanwhile, continuous queries can run downsampling on the short term # data and write into the long term area. [sharding] @@ -82,7 +90,7 @@ query-shard-buffer-size = 500 # over the network when doing a query. duration = "1h" - # split will determine how many shards to split each duration into. For example, + # split will determine how many shards to split each duration into. For example, # if we created a shard for 2014-02-10 and split was set to 2. Then two shards # would be created that have the data for 2014-02-10. By default, data will # be split into those two shards deterministically by hashing the (database, serise) diff --git a/src/integration/test_config2.toml b/src/integration/test_config2.toml index 59d0052fa7b..143d63e2501 100644 --- a/src/integration/test_config2.toml +++ b/src/integration/test_config2.toml @@ -1,7 +1,7 @@ # Welcome to the InfluxDB configuration file. -# If hostname (on the OS) doesn't return a name that can be resolved by the other -# systems in the cluster, you'll have to set the hostname to an IP or something +# If hostname (on the OS) doesn't return a name that can be resolved by the other +# systems in the cluster, you'll have to set the hostname to an IP or something # that can be resovled here. # hostname = "" @@ -60,12 +60,12 @@ write-buffer-size = 1000 # This setting determines how many responses can be buffered in memory per shard before data starts gettind dropped. query-shard-buffer-size = 500 -# These options specify how data is sharded across the cluster. There are two +# These options specify how data is sharded across the cluster. There are two # shard configurations that have the same knobs: short term and long term. # Any series that begins with a capital letter like Exceptions will be written # into the long term storage. Any series beginning with a lower case letter # like exceptions will be written into short term. The idea being that you -# can write high precision data into short term and drop it after a couple +# can write high precision data into short term and drop it after a couple # of days. Meanwhile, continuous queries can run downsampling on the short term # data and write into the long term area. [sharding] @@ -80,7 +80,7 @@ query-shard-buffer-size = 500 # over the network when doing a query. duration = "1h" - # split will determine how many shards to split each duration into. For example, + # split will determine how many shards to split each duration into. For example, # if we created a shard for 2014-02-10 and split was set to 2. Then two shards # would be created that have the data for 2014-02-10. By default, data will # be split into those two shards deterministically by hashing the (database, serise) diff --git a/src/integration/test_config3.toml b/src/integration/test_config3.toml index 4f7fd822f2d..551cc910d3a 100644 --- a/src/integration/test_config3.toml +++ b/src/integration/test_config3.toml @@ -1,7 +1,7 @@ # Welcome to the InfluxDB configuration file. -# If hostname (on the OS) doesn't return a name that can be resolved by the other -# systems in the cluster, you'll have to set the hostname to an IP or something +# If hostname (on the OS) doesn't return a name that can be resolved by the other +# systems in the cluster, you'll have to set the hostname to an IP or something # that can be resovled here. # hostname = "" @@ -60,12 +60,12 @@ write-buffer-size = 1000 # This setting determines how many responses can be buffered in memory per shard before data starts gettind dropped. query-shard-buffer-size = 500 -# These options specify how data is sharded across the cluster. There are two +# These options specify how data is sharded across the cluster. There are two # shard configurations that have the same knobs: short term and long term. # Any series that begins with a capital letter like Exceptions will be written # into the long term storage. Any series beginning with a lower case letter # like exceptions will be written into short term. The idea being that you -# can write high precision data into short term and drop it after a couple +# can write high precision data into short term and drop it after a couple # of days. Meanwhile, continuous queries can run downsampling on the short term # data and write into the long term area. [sharding] @@ -80,7 +80,7 @@ query-shard-buffer-size = 500 # over the network when doing a query. duration = "1h" - # split will determine how many shards to split each duration into. For example, + # split will determine how many shards to split each duration into. For example, # if we created a shard for 2014-02-10 and split was set to 2. Then two shards # would be created that have the data for 2014-02-10. By default, data will # be split into those two shards deterministically by hashing the (database, serise)