diff --git a/.dockerignore b/.dockerignore index 3f852bd..6f192c1 100644 --- a/.dockerignore +++ b/.dockerignore @@ -14,4 +14,6 @@ files config.yml ./gondx ./indexed.db -.scripts \ No newline at end of file +.scripts +grafana +prom_data \ No newline at end of file diff --git a/.gitignore b/.gitignore index 0058eff..857fa7b 100644 --- a/.gitignore +++ b/.gitignore @@ -10,3 +10,5 @@ release/ *.db db_backups/ + +prom_data/ diff --git a/cmd/gondx/main.go b/cmd/gondx/main.go index 4bcbf66..50ff387 100644 --- a/cmd/gondx/main.go +++ b/cmd/gondx/main.go @@ -13,8 +13,10 @@ import ( "time" "github.com/bonedaddy/go-indexed/bclient" + "github.com/bonedaddy/go-indexed/dashboard" "github.com/bonedaddy/go-indexed/db" "github.com/bonedaddy/go-indexed/discord" + "github.com/bonedaddy/go-indexed/utils" "github.com/ethereum/go-ethereum/common" "github.com/urfave/cli/v2" ) @@ -82,6 +84,59 @@ func main() { return err } app.Commands = cli.Commands{ + &cli.Command{ + Name: "prometheus", + Usage: "serves the prometheus metric endpoint", + Action: func(c *cli.Context) error { + ctx, cancel := context.WithCancel(c.Context) + defer cancel() + cfg, err := discord.LoadConfig(c.String("config")) + if err != nil { + return err + } + if cfg.InfuraAPIKey != "" { + bc, err = bclient.NewInfuraClient(cfg.InfuraAPIKey, cfg.InfuraWSEnabled) + } else { + bc, err = bclient.NewClient(cfg.ETHRPCEndpoint) + } + if err != nil { + return err + } + defer bc.Close() + database, err := db.New(&db.Opts{ + Type: cfg.Database.Type, + Host: cfg.Database.Host, + Port: cfg.Database.Port, + User: cfg.Database.User, + Password: cfg.Database.Pass, + DBName: cfg.Database.DBName, + SSLModeDisable: cfg.Database.SSLModeDisable, + }) + if err != nil { + return err + } + defer database.Close() + if err := database.AutoMigrate(); err != nil { + return err + } + // serve prometheus metrics + go dashboard.ServePrometheusMetrics(ctx, c.String("listen.address")) + // update metrics information + go dashboard.UpdateMetrics(ctx, database, bc) + sc := make(chan os.Signal, 1) + signal.Notify(sc, syscall.SIGINT, syscall.SIGQUIT, syscall.SIGTERM, os.Interrupt, os.Kill) + <-sc + cancel() + return nil + }, + Flags: []cli.Flag{ + &cli.StringFlag{ + Name: "listen.address", + Usage: "the address we will expose the metrics listener on", + Value: "0.0.0.0:9123", + }, + }, + }, &cli.Command{ Name: "discord", Usage: "discord bot management", @@ -275,7 +330,7 @@ func main() { } func dbPriceUpdateLoop(ctx context.Context, bc *bclient.Client, db *db.Database) { - ticker := time.NewTicker(time.Second * 30) + ticker := time.NewTicker(time.Second * 60) // update every 1m defer ticker.Stop() // this will tick less frequently as its an extremely expensive RPC to calculate // a single price update likely requires 100 -> 150 RPC calls @@ -384,6 +439,51 @@ func dbPriceUpdateLoop(ctx context.Context, bc *bclient.Client, db *db.Database) log.Println("failed to update ndx dai price: ", err) } } + // update defi5 total supply + ip, err := bc.DEFI5() + if err != nil { + log.Println("failed to get defi5 contract: ", err) + } else { + supply, err := ip.TotalSupply(nil) + if err != nil { + log.Println("failed to get total supply: ", err) + } else { + supplyF, _ := utils.ToDecimal(supply, 18).Float64() + if err := db.RecordTotalSupply("defi5", supplyF); err != nil { + log.Println("failed to update defi5 total supply") + } + } + } + // update cc10 total supply + ip, err = bc.CC10() + if err != nil { + log.Println("failed to get cc10 contract: ", err) + } else { + supply, err := ip.TotalSupply(nil) + if err != nil { + log.Println("failed to get total supply: ", err) + } else { + supplyF, _ := utils.ToDecimal(supply, 18).Float64() + if err := db.RecordTotalSupply("cc10", supplyF); err != nil { + log.Println("failed to update cc10 total supply") + } + } + } + // update ndx total supply + erc, err := bc.NDX() + if err != nil { + log.Println("failed to get ndx contract: ", err) + } else { + supply, err := erc.TotalSupply(nil) + if err != nil { + log.Println("failed to get total supply: ", err) + } else { + supplyF, _ := utils.ToDecimal(supply, 18).Float64() + if err := db.RecordTotalSupply("ndx", supplyF); err != nil { + log.Println("failed to update ndx total supply") + } + } + } } } diff --git a/dashboard/dashboard.go b/dashboard/dashboard.go new file mode 100644 index 0000000..31a4c7e --- /dev/null +++ b/dashboard/dashboard.go @@ -0,0 +1,134 @@ +package dashboard + +import ( + "context" + "log" + "net/http" + "time" + + "github.com/bonedaddy/go-indexed/bclient" + "github.com/bonedaddy/go-indexed/db" + "github.com/bonedaddy/go-indexed/utils" + "github.com/prometheus/client_golang/prometheus" + "github.com/prometheus/client_golang/prometheus/promhttp" +) + +var ( + // CC10 + cc10TVL = prometheus.NewGauge(prometheus.GaugeOpts{ + Namespace: "indexed", + Subsystem: "pool", + Name: "cc10_tvl", + }) + cc10TotalSupply = prometheus.NewGauge(prometheus.GaugeOpts{ + Namespace: "indexed", + Subsystem: "pool", + Name: "cc10_total_supply", + }) + cc10DaiPrice = prometheus.NewGauge(prometheus.GaugeOpts{ + Namespace: "indexed", + Subsystem: "pool", + Name: "cc10_dai_price", + }) + // DEFI5 + defi5TVL = prometheus.NewGauge(prometheus.GaugeOpts{ + Namespace: "indexed", + Subsystem: "pool", + Name: "defi5_tvl", + }) + defi5TotalSupply = prometheus.NewGauge(prometheus.GaugeOpts{ + Namespace: "indexed", + Subsystem: "pool", + Name: "defi5_total_supply", + }) + defi5DaiPrice = prometheus.NewGauge(prometheus.GaugeOpts{ + Namespace: "indexed", + Subsystem: "pool", + Name: "defi5_dai_price", + }) + // NDX + ndxTotalSupply = prometheus.NewGauge(prometheus.GaugeOpts{ + Namespace: "indexed", + Subsystem: "governance", + Name: "ndx_total_supply", + }) + ndxDaiPrice = prometheus.NewGauge(prometheus.GaugeOpts{ + Namespace: "indexed", + Subsystem: "governance", + Name: "ndx_dai_price", + }) +) + +func init() { + prometheus.MustRegister(cc10TVL, cc10TotalSupply, cc10DaiPrice, defi5TVL, defi5TotalSupply, defi5DaiPrice, ndxTotalSupply, ndxDaiPrice) +} + +// ServePrometheusMetrics starts a http server to expose prometheus metrics +func ServePrometheusMetrics(ctx context.Context, listenAddr string) error { + handler := http.NewServeMux() + handler.Handle("/metrics", promhttp.Handler()) + srv := &http.Server{ + Handler: handler, + Addr: listenAddr, + } + go func() { + <-ctx.Done() + srv.Close() + }() + return srv.ListenAndServe() +} + +// UpdateMetrics is used to update the prometheus metrics +func UpdateMetrics(ctx context.Context, database *db.Database, bc *bclient.Client) { + ticker := time.NewTicker(time.Minute) + defer ticker.Stop() + for { + select { + case <-ctx.Done(): + return + case <-ticker.C: + // set price information + if price, err := database.LastPrice("ndx"); err == nil { + ndxDaiPrice.Set(price) + } + if price, err := database.LastPrice("defi5"); err == nil { + defi5DaiPrice.Set(price) + } + if price, err := database.LastPrice("cc10"); err == nil { + cc10DaiPrice.Set(price) + } + // set tvl information + if tvl, err := database.LastValueLocked("cc10"); err == nil { + cc10TVL.Set(tvl) + } + if tvl, err := database.LastValueLocked("defi5"); err == nil { + defi5TVL.Set(tvl) + } + // set total supply information + if erc, err := bc.NDX(); err == nil { + if supply, err := erc.TotalSupply(nil); err == nil { + supplyF, _ := utils.ToDecimal(supply, 18).Float64() + ndxTotalSupply.Set(supplyF) + } else { + log.Println("failed to get total supply: ", err) + } + } + if erc, err := bc.DEFI5(); err == nil { + if supply, err := erc.TotalSupply(nil); err == nil { + supplyF, _ := utils.ToDecimal(supply, 18).Float64() + defi5TotalSupply.Set(supplyF) + } else { + log.Println("failed to get total supply: ", err) + } + } + if erc, err := bc.CC10(); err == nil { + if supply, err := erc.TotalSupply(nil); err == nil { + supplyF, _ := utils.ToDecimal(supply, 18).Float64() + cc10TotalSupply.Set(supplyF) + } else { + log.Println("failed to get total supply: ", err) + } + } + } + } +} diff --git a/dashboard/dashboard_test.go b/dashboard/dashboard_test.go new file mode 100644 index 0000000..f77e99b --- /dev/null +++ b/dashboard/dashboard_test.go @@ -0,0 +1,17 @@ +package dashboard + +import ( + "context" + "net/http" + "testing" + "time" +) + +func TestServePrometheusMetrics(t *testing.T) { + ctx, cancel := context.WithTimeout(context.Background(), time.Second*5) + defer cancel() + err := ServePrometheusMetrics(ctx, ":12345") + if err != nil && err != http.ErrServerClosed { + t.Fatal(err) + } +} diff --git a/dashboard/doc.go b/dashboard/doc.go new file mode 100644 index 0000000..52b7b5d --- /dev/null +++ b/dashboard/doc.go @@ -0,0 +1,2 @@ +// Package dashboard provides utilities for rendering tracked information through grafana +package dashboard diff --git a/db/db.go b/db/db.go index 57b89d1..ff3d366 100644 --- a/db/db.go +++ b/db/db.go @@ -57,7 +57,7 @@ func New(opts *Opts) (*Database, error) { if err != nil { return nil, err } - db, err := gorm.Open(dialector, &gorm.Config{}) + db, err := gorm.Open(dialector, &gorm.Config{PrepareStmt: true}) if err != nil { return nil, err } @@ -67,7 +67,7 @@ func New(opts *Opts) (*Database, error) { // AutoMigrate is used to automatically migrate datbase tables func (d *Database) AutoMigrate() error { var tables []interface{} - tables = append(tables, &Price{}, &TotalValueLocked{}) + tables = append(tables, &Price{}, &TotalValueLocked{}, &TotalSupply{}) for _, table := range tables { if err := d.db.AutoMigrate(table); err != nil { return err diff --git a/db/price.go b/db/price.go index 798a6e4..81d3368 100644 --- a/db/price.go +++ b/db/price.go @@ -4,13 +4,11 @@ import ( "errors" "math" "time" - - "gorm.io/gorm" ) // Price is a given price entry for an asset type Price struct { - gorm.Model + Model Type Asset `gorm:"varchar(255)"` USDPrice float64 } diff --git a/db/total_supply.go b/db/total_supply.go new file mode 100644 index 0000000..ef108ab --- /dev/null +++ b/db/total_supply.go @@ -0,0 +1,23 @@ +package db + +// TotalSupply is used to track the total supply of a given asset +type TotalSupply struct { + Model + Type string + Supply float64 +} + +// RecordTotalSupply is used to record the supply for an asset +func (d *Database) RecordTotalSupply(asset string, supply float64) error { + return d.db.Create(&TotalSupply{Type: asset, Supply: supply}).Error +} + +// LastTotalSupply returns the last recorded total supply for an asset +func (d *Database) LastTotalSupply(asset string) (float64, error) { + var ts TotalSupply + if err := d.db.Model(&TotalSupply{}).Where( + "type = ?", asset).Last(&ts).Error; err != nil { + return 0, err + } + return ts.Supply, nil +} diff --git a/db/total_supply_test.go b/db/total_supply_test.go new file mode 100644 index 0000000..6b6859e --- /dev/null +++ b/db/total_supply_test.go @@ -0,0 +1,39 @@ +package db + +import ( + "os" + "testing" + + "github.com/stretchr/testify/require" +) + +func TestTotalSupply(t *testing.T) { + t.Cleanup(func() { + os.Remove("indexed.db") + }) + db := newTestDB(t) + + type args struct { + pool string + price float64 + wantPrice float64 + } + tests := []struct { + name string + args args + }{ + {"DEFI5-5", args{"defi5", 5, 5}}, + {"DEFI5-10", args{"defi5", 10, 10}}, + {"CC10-15", args{"cc10", 15, 15}}, + {"CC10-20", args{"cc10", 20, 20}}, + } + + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + require.NoError(t, db.RecordTotalSupply(tt.args.pool, tt.args.price)) + price, err := db.LastTotalSupply(tt.args.pool) + require.NoError(t, err) + require.Equal(t, price, float64(tt.args.wantPrice)) + }) + } +} diff --git a/db/tvl.go b/db/tvl.go index 35f49f6..eb96400 100644 --- a/db/tvl.go +++ b/db/tvl.go @@ -1,10 +1,8 @@ package db -import "gorm.io/gorm" - // TotalValueLocked records the USD value of funds locked within index pool type TotalValueLocked struct { - gorm.Model + Model PoolName string ValueLocked float64 } diff --git a/db/utils.go b/db/utils.go new file mode 100644 index 0000000..63ce373 --- /dev/null +++ b/db/utils.go @@ -0,0 +1,15 @@ +package db + +import ( + "time" + + "gorm.io/gorm" +) + +// Model is a copy of the gorm model type but using slightly different configurations +type Model struct { + ID uint `gorm:"primarykey"` + CreatedAt time.Time // `gorm:"index"` // this is changed to include the index + UpdatedAt time.Time + DeletedAt gorm.DeletedAt `gorm:"index"` +} diff --git a/discord/discord.go b/discord/discord.go index 5adb143..e69aee0 100644 --- a/discord/discord.go +++ b/discord/discord.go @@ -153,10 +153,6 @@ func NewClient(ctx context.Context, cfg *Config, bc *bclient.Client, db *db.Data Example: " pool total-supply cc10", IgnoreCase: true, Handler: client.poolTotalSupply, - // We want the user to be able to execute this command once in 60 seconds and the cleanup interval shpuld be one second - RateLimiter: dgc.NewRateLimiter(60*time.Second, 1*time.Second, func(ctx *dgc.Ctx) { - ctx.RespondText(rateLimitMsg) - }), }, }, Usage: " pool ", @@ -253,9 +249,6 @@ func NewClient(ctx context.Context, cfg *Config, bc *bclient.Client, db *db.Data Description: "returns the total supply of the governance token, NDX", IgnoreCase: true, Handler: client.governanceTokenTotalSupply, - RateLimiter: dgc.NewRateLimiter(60*time.Second, 1*time.Second, func(ctx *dgc.Ctx) { - ctx.RespondText(rateLimitMsg) - }), }, }, Handler: func(ctx *dgc.Ctx) { @@ -413,13 +406,13 @@ func launchSingleWatcherBot(ctx context.Context, bot *discordgo.Session, bc *bcl case "cc10": price, err = database.LastPrice("cc10") if err != nil { - log.Println("failed to get defi5 dai price error: ", err) + log.Println("failed to get cc10 dai price error: ", err) continue } case "ndx": price, err = database.LastPrice("ndx") if err != nil { - log.Println("failed to get defi5 dai price error: ", err) + log.Println("failed to get ndx dai price error: ", err) continue } default: diff --git a/discord/governance.go b/discord/governance.go index 2011352..92f802e 100644 --- a/discord/governance.go +++ b/discord/governance.go @@ -147,19 +147,10 @@ func (c *Client) governanceProposalInfoHandler(ctx *dgc.Ctx) { } func (c *Client) governanceTokenTotalSupply(ctx *dgc.Ctx) { - if !ctx.Command.RateLimiter.NotifyExecution(ctx) { - return - } - ndx, err := c.bc.NDX() - if err != nil { - ctx.RespondText("failed to get ndx contract binding") - return - } - supply, err := ndx.TotalSupply(nil) + supply, err := c.db.LastTotalSupply("ndx") if err != nil { ctx.RespondText("failed to get total supply") return } - supplyF, _ := utils.ToDecimal(supply, 18).Float64() - ctx.RespondText(printer.Sprintf("ndx token total supply: %0.2f", supplyF)) + ctx.RespondText(printer.Sprintf("ndx token total supply: %0.2f", supply)) } diff --git a/discord/pool.go b/discord/pool.go index d94e04d..f3bb1dd 100644 --- a/discord/pool.go +++ b/discord/pool.go @@ -7,7 +7,6 @@ import ( "github.com/bonedaddy/dgc" "github.com/bonedaddy/go-indexed/bclient" - "github.com/bonedaddy/go-indexed/utils" "github.com/bwmarrin/discordgo" "github.com/ethereum/go-ethereum/common" ) @@ -76,21 +75,12 @@ func (c *Client) poolTotalValueLocked(ctx *dgc.Ctx) { } func (c *Client) poolTotalSupply(ctx *dgc.Ctx) { - if !ctx.Command.RateLimiter.NotifyExecution(ctx) { - return - } arguments := ctx.Arguments poolName := arguments.Get(0).Raw() - ip, err := c.getIndexPool(poolName) + supply, err := c.db.LastTotalSupply(poolName) if err != nil { ctx.RespondText("invalid pool") return } - supply, err := ip.TotalSupply(nil) - if err != nil { - ctx.RespondText("failed to get total supply") - return - } - supplyF, _ := utils.ToDecimal(supply, 18).Float64() - ctx.RespondText(printer.Sprintf("total supply for %s: %0.2f", poolName, supplyF)) + ctx.RespondText(printer.Sprintf("total supply for %s: %0.2f", poolName, supply)) } diff --git a/docker-compose.yml b/docker-compose.yml index e10d634..c6beac0 100644 --- a/docker-compose.yml +++ b/docker-compose.yml @@ -1,7 +1,7 @@ version: "3.5" services: ndx-bot: - image: bonedaddy/gondx:latest + image: bonedaddy/gondx:v0.0.17-rc1-2-gfe8d0e5 command: "--startup.sleep discord ndx-bot" depends_on: - chain-updater @@ -12,7 +12,7 @@ services: volumes: - ./config.yml:/config.yml chain-updater: - image: bonedaddy/gondx:latest + image: bonedaddy/gondx:v0.0.17-rc1-2-gfe8d0e5 command: "--startup.sleep discord db chain-updater" depends_on: - postgres @@ -23,6 +23,7 @@ services: - ./config.yml:/config.yml postgres: image: postgres:10.12 + shm_size: 2gb networks: privnet: ipv4_address: 172.16.238.4 @@ -30,6 +31,19 @@ services: POSTGRES_DB: "indexed" POSTGRES_USER: "postgres" POSTGRES_PASSWORD: "password123" + volumes: + - ./my-postgres.conf:/etc/postgresql/postgresql.conf + grafana: + image: grafana/grafana + depends_on: + - postgres + networks: + privnet: + ipv4_address: 172.16.238.6 + ports: + - 3001:3000 + volumes: + - ./grafana:/var/lib/grafana networks: privnet: driver: bridge diff --git a/go.mod b/go.mod index 65265f8..1faa8d1 100644 --- a/go.mod +++ b/go.mod @@ -7,6 +7,7 @@ require ( github.com/bwmarrin/discordgo v0.22.1-0.20201217190221-8d6815dde7ed github.com/ethereum/go-ethereum v1.9.25 github.com/pkg/errors v0.8.1 + github.com/prometheus/client_golang v0.9.1 github.com/shopspring/decimal v1.2.0 github.com/stretchr/testify v1.5.1 github.com/urfave/cli/v2 v2.3.0 diff --git a/go.sum b/go.sum index 1045aad..8c2e465 100644 --- a/go.sum +++ b/go.sum @@ -25,6 +25,7 @@ github.com/allegro/bigcache v1.2.1-0.20190218064605-e24eb225f156/go.mod h1:Cb/ax github.com/aristanetworks/goarista v0.0.0-20170210015632-ea17b1a17847 h1:rtI0fD4oG/8eVokGVPYJEW1F88p1ZNgXiEIs9thEE4A= github.com/aristanetworks/goarista v0.0.0-20170210015632-ea17b1a17847/go.mod h1:D/tb0zPVXnP7fmsLZjtdUhSsumbK/ij54UXjjVgMGxQ= github.com/aws/aws-sdk-go v1.25.48/go.mod h1:KmX6BPdI08NWTb3/sm4ZGu5ShLoqVDhKgpiN924inxo= +github.com/beorn7/perks v0.0.0-20180321164747-3a771d992973 h1:xJ4a3vCFaGF/jqvzLMYoU8P317H5OQ+Via4RmuPwCS0= github.com/beorn7/perks v0.0.0-20180321164747-3a771d992973/go.mod h1:Dwedo/Wpr24TaqPxmxbtue+5NUziq4I4S80YR8gNf3Q= github.com/bonedaddy/dgc v0.0.2-0.20210101001021-5e0add7c4088 h1:t4Z+bNaMXXxk0dFO5M/1iUaXDpJGbr4gO0hY8oCOKII= github.com/bonedaddy/dgc v0.0.2-0.20210101001021-5e0add7c4088/go.mod h1:MB/odTlKS7hlW8CC/5LkaPf16QGByIIPjIWkT6solH0= @@ -216,6 +217,7 @@ github.com/mattn/go-runewidth v0.0.4 h1:2BvfKmzob6Bmd4YsL0zygOqfdFnK7GR4QL06Do4/ github.com/mattn/go-runewidth v0.0.4/go.mod h1:LwmH8dsx7+W8Uxz3IHJYH5QSwggIsqBzpuz5H//U1FU= github.com/mattn/go-sqlite3 v1.14.5 h1:1IdxlwTNazvbKJQSxoJ5/9ECbEeaTTyeU7sEAZ5KKTQ= github.com/mattn/go-sqlite3 v1.14.5/go.mod h1:WVKg1VTActs4Qso6iwGbiFih2UIHo0ENGwNd0Lj+XmI= +github.com/matttproud/golang_protobuf_extensions v1.0.1 h1:4hp9jkHxhMHkqkrB3Ix0jegS5sx/RkqARlsWZ6pIwiU= github.com/matttproud/golang_protobuf_extensions v1.0.1/go.mod h1:D8He9yQNgCq6Z5Ld7szi9bcBfOoFv/3dc6xSMkL2PC0= github.com/naoina/go-stringutil v0.1.0/go.mod h1:XJ2SJL9jCtBh+P9q5btrd/Ylo8XwT/h1USek5+NqSA0= github.com/naoina/toml v0.1.2-0.20170918210437-9fafd6967416/go.mod h1:NBIhNtsFMo3G2szEBne+bO4gS192HuIYRqfvOWb4i1E= @@ -242,9 +244,13 @@ github.com/pkg/errors v0.8.1 h1:iURUrRGxPUNPdy5/HRSm+Yj6okJ6UtLINN0Q9M4+h3I= github.com/pkg/errors v0.8.1/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0= github.com/pmezard/go-difflib v1.0.0 h1:4DBwDE0NGyQoBHbLQYPwSUPoCMWR5BEzIk/f1lZbAQM= github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4= +github.com/prometheus/client_golang v0.9.1 h1:K47Rk0v/fkEfwfQet2KWhscE0cJzjgCCDBG2KHZoVno= github.com/prometheus/client_golang v0.9.1/go.mod h1:7SWBe2y4D6OKWSNQJUaRYU/AaXPKyh/dDVn+NZz0KFw= +github.com/prometheus/client_model v0.0.0-20180712105110-5c3871d89910 h1:idejC8f05m9MGOsuEi1ATq9shN03HrxNkD/luQvxCv8= github.com/prometheus/client_model v0.0.0-20180712105110-5c3871d89910/go.mod h1:MbSGuTsp3dbXC40dX6PRTWyKYBIrTGTE9sqQNg2J8bo= +github.com/prometheus/common v0.0.0-20181113130724-41aa239b4cce h1:X0jFYGnHemYDIW6jlc+fSI8f9Cg+jqCnClYP2WgZT/A= github.com/prometheus/common v0.0.0-20181113130724-41aa239b4cce/go.mod h1:daVV7qP5qjZbuso7PdcryaAu0sAZbrN9i7WWcTMWvro= +github.com/prometheus/procfs v0.0.0-20181005140218-185b4288413d h1:GoAlyOgbOEIFdaDqxJVlbOQ1DtGmZWs/Qau0hIlk+WQ= github.com/prometheus/procfs v0.0.0-20181005140218-185b4288413d/go.mod h1:c3At6R/oaqEKCNdg8wHV1ftS6bRYblBhIjjI8uT2IGk= github.com/prometheus/tsdb v0.6.2-0.20190402121629-4f204dcbc150 h1:ZeU+auZj1iNzN8iVhff6M38Mfu73FQiJve/GEXYJBjE= github.com/prometheus/tsdb v0.6.2-0.20190402121629-4f204dcbc150/go.mod h1:qhTCs0VvXwvX/y3TZrWD7rabWM+ijKTux40TwIPHuXU= diff --git a/my-postgres.conf b/my-postgres.conf new file mode 100644 index 0000000..3423a5b --- /dev/null +++ b/my-postgres.conf @@ -0,0 +1,663 @@ +# ----------------------------- +# PostgreSQL configuration file +# ----------------------------- +# +# This file consists of lines of the form: +# +# name = value +# +# (The "=" is optional.) Whitespace may be used. Comments are introduced with +# "#" anywhere on a line. The complete list of parameter names and allowed +# values can be found in the PostgreSQL documentation. +# +# The commented-out settings shown in this file represent the default values. +# Re-commenting a setting is NOT sufficient to revert it to the default value; +# you need to reload the server. +# +# This file is read on server startup and when the server receives a SIGHUP +# signal. If you edit the file on a running system, you have to SIGHUP the +# server for the changes to take effect, run "pg_ctl reload", or execute +# "SELECT pg_reload_conf()". Some parameters, which are marked below, +# require a server shutdown and restart to take effect. +# +# Any parameter can also be given as a command-line option to the server, e.g., +# "postgres -c log_connections=on". Some parameters can be changed at run time +# with the "SET" SQL command. +# +# Memory units: kB = kilobytes Time units: ms = milliseconds +# MB = megabytes s = seconds +# GB = gigabytes min = minutes +# TB = terabytes h = hours +# d = days + + +#------------------------------------------------------------------------------ +# FILE LOCATIONS +#------------------------------------------------------------------------------ + +# The default values of these variables are driven from the -D command-line +# option or PGDATA environment variable, represented here as ConfigDir. + +#data_directory = 'ConfigDir' # use data in another directory + # (change requires restart) +#hba_file = 'ConfigDir/pg_hba.conf' # host-based authentication file + # (change requires restart) +#ident_file = 'ConfigDir/pg_ident.conf' # ident configuration file + # (change requires restart) + +# If external_pid_file is not explicitly set, no extra PID file is written. +#external_pid_file = '' # write an extra PID file + # (change requires restart) + + +#------------------------------------------------------------------------------ +# CONNECTIONS AND AUTHENTICATION +#------------------------------------------------------------------------------ + +# - Connection Settings - + +listen_addresses = '*' + # comma-separated list of addresses; + # defaults to 'localhost'; use '*' for all + # (change requires restart) +#port = 5432 # (change requires restart) +#max_connections = 100 # (change requires restart) +#superuser_reserved_connections = 3 # (change requires restart) +#unix_socket_directories = '/tmp' # comma-separated list of directories + # (change requires restart) +#unix_socket_group = '' # (change requires restart) +#unix_socket_permissions = 0777 # begin with 0 to use octal notation + # (change requires restart) +#bonjour = off # advertise server via Bonjour + # (change requires restart) +#bonjour_name = '' # defaults to the computer name + # (change requires restart) + +# - Security and Authentication - + +#authentication_timeout = 1min # 1s-600s +#ssl = off +#ssl_ciphers = 'HIGH:MEDIUM:+3DES:!aNULL' # allowed SSL ciphers +#ssl_prefer_server_ciphers = on +#ssl_ecdh_curve = 'prime256v1' +#ssl_dh_params_file = '' +#ssl_cert_file = 'server.crt' +#ssl_key_file = 'server.key' +#ssl_ca_file = '' +#ssl_crl_file = '' +#password_encryption = md5 # md5 or scram-sha-256 +#db_user_namespace = off +#row_security = on + +# GSSAPI using Kerberos +#krb_server_keyfile = '' +#krb_caseins_users = off + +# - TCP Keepalives - +# see "man 7 tcp" for details + +#tcp_keepalives_idle = 0 # TCP_KEEPIDLE, in seconds; + # 0 selects the system default +#tcp_keepalives_interval = 0 # TCP_KEEPINTVL, in seconds; + # 0 selects the system default +#tcp_keepalives_count = 0 # TCP_KEEPCNT; + # 0 selects the system default + + +#------------------------------------------------------------------------------ +# RESOURCE USAGE (except WAL) +#------------------------------------------------------------------------------ + +# - Memory - + +# increased from 32MB +shared_buffers = 128MB # min 128kB + # (change requires restart) +#huge_pages = try # on, off, or try + # (change requires restart) +#temp_buffers = 8MB # min 800kB +#max_prepared_transactions = 0 # zero disables the feature + # (change requires restart) +# Caution: it is not advisable to set max_prepared_transactions nonzero unless +# you actively intend to use prepared transactions. +# increased from 4MB +work_mem = 64MB # min 64kB +#maintenance_work_mem = 64MB # min 1MB +#replacement_sort_tuples = 150000 # limits use of replacement selection sort +#autovacuum_work_mem = -1 # min 1MB, or -1 to use maintenance_work_mem +#max_stack_depth = 2MB # min 100kB +#dynamic_shared_memory_type = posix # the default is the first option + # supported by the operating system: + # posix + # sysv + # windows + # mmap + # use none to disable dynamic shared memory + # (change requires restart) + +# - Disk - + +#temp_file_limit = -1 # limits per-process temp file space + # in kB, or -1 for no limit + +# - Kernel Resource Usage - + +#max_files_per_process = 1000 # min 25 + # (change requires restart) +#shared_preload_libraries = '' # (change requires restart) + +# - Cost-Based Vacuum Delay - + +#vacuum_cost_delay = 0 # 0-100 milliseconds +#vacuum_cost_page_hit = 1 # 0-10000 credits +#vacuum_cost_page_miss = 10 # 0-10000 credits +#vacuum_cost_page_dirty = 20 # 0-10000 credits +#vacuum_cost_limit = 200 # 1-10000 credits + +# - Background Writer - + +#bgwriter_delay = 200ms # 10-10000ms between rounds +#bgwriter_lru_maxpages = 100 # 0-1000 max buffers written/round +#bgwriter_lru_multiplier = 2.0 # 0-10.0 multiplier on buffers scanned/round +#bgwriter_flush_after = 0 # measured in pages, 0 disables + +# - Asynchronous Behavior - + +#effective_io_concurrency = 1 # 1-1000; 0 disables prefetching +#max_worker_processes = 8 # (change requires restart) +#max_parallel_workers_per_gather = 2 # taken from max_parallel_workers +#max_parallel_workers = 8 # maximum number of max_worker_processes that + # can be used in parallel queries +#old_snapshot_threshold = -1 # 1min-60d; -1 disables; 0 is immediate + # (change requires restart) +#backend_flush_after = 0 # measured in pages, 0 disables + + +#------------------------------------------------------------------------------ +# WRITE AHEAD LOG +#------------------------------------------------------------------------------ + +# - Settings - + +#wal_level = replica # minimal, replica, or logical + # (change requires restart) +#fsync = on # flush data to disk for crash safety + # (turning this off can cause + # unrecoverable data corruption) +#synchronous_commit = on # synchronization level; + # off, local, remote_write, remote_apply, or on +#wal_sync_method = fsync # the default is the first option + # supported by the operating system: + # open_datasync + # fdatasync (default on Linux) + # fsync + # fsync_writethrough + # open_sync +#full_page_writes = on # recover from partial page writes +#wal_compression = off # enable compression of full-page writes +#wal_log_hints = off # also do full page writes of non-critical updates + # (change requires restart) +#wal_buffers = -1 # min 32kB, -1 sets based on shared_buffers + # (change requires restart) +#wal_writer_delay = 200ms # 1-10000 milliseconds +#wal_writer_flush_after = 1MB # measured in pages, 0 disables + +#commit_delay = 0 # range 0-100000, in microseconds +#commit_siblings = 5 # range 1-1000 + +# - Checkpoints - + +#checkpoint_timeout = 5min # range 30s-1d +#max_wal_size = 1GB +#min_wal_size = 80MB +#checkpoint_completion_target = 0.5 # checkpoint target duration, 0.0 - 1.0 +#checkpoint_flush_after = 0 # measured in pages, 0 disables +#checkpoint_warning = 30s # 0 disables + +# - Archiving - + +#archive_mode = off # enables archiving; off, on, or always + # (change requires restart) +#archive_command = '' # command to use to archive a logfile segment + # placeholders: %p = path of file to archive + # %f = file name only + # e.g. 'test ! -f /mnt/server/archivedir/%f && cp %p /mnt/server/archivedir/%f' +#archive_timeout = 0 # force a logfile segment switch after this + # number of seconds; 0 disables + + +#------------------------------------------------------------------------------ +# REPLICATION +#------------------------------------------------------------------------------ + +# - Sending Server(s) - + +# Set these on the master and on any standby that will send replication data. + +#max_wal_senders = 10 # max number of walsender processes + # (change requires restart) +#wal_keep_segments = 0 # in logfile segments, 16MB each; 0 disables +#wal_sender_timeout = 60s # in milliseconds; 0 disables + +#max_replication_slots = 10 # max number of replication slots + # (change requires restart) +#track_commit_timestamp = off # collect timestamp of transaction commit + # (change requires restart) + +# - Master Server - + +# These settings are ignored on a standby server. + +#synchronous_standby_names = '' # standby servers that provide sync rep + # method to choose sync standbys, number of sync standbys, + # and comma-separated list of application_name + # from standby(s); '*' = all +#vacuum_defer_cleanup_age = 0 # number of xacts by which cleanup is delayed + +# - Standby Servers - + +# These settings are ignored on a master server. + +#hot_standby = on # "off" disallows queries during recovery + # (change requires restart) +#max_standby_archive_delay = 30s # max delay before canceling queries + # when reading WAL from archive; + # -1 allows indefinite delay +#max_standby_streaming_delay = 30s # max delay before canceling queries + # when reading streaming WAL; + # -1 allows indefinite delay +#wal_receiver_status_interval = 10s # send replies at least this often + # 0 disables +#hot_standby_feedback = off # send info from standby to prevent + # query conflicts +#wal_receiver_timeout = 60s # time that receiver waits for + # communication from master + # in milliseconds; 0 disables +#wal_retrieve_retry_interval = 5s # time to wait before retrying to + # retrieve WAL after a failed attempt + +# - Subscribers - + +# These settings are ignored on a publisher. + +#max_logical_replication_workers = 4 # taken from max_worker_processes + # (change requires restart) +#max_sync_workers_per_subscription = 2 # taken from max_logical_replication_workers + + +#------------------------------------------------------------------------------ +# QUERY TUNING +#------------------------------------------------------------------------------ + +# - Planner Method Configuration - + +#enable_bitmapscan = on +#enable_hashagg = on +#enable_hashjoin = on +#enable_indexscan = on +#enable_indexonlyscan = on +#enable_material = on +#enable_mergejoin = on +#enable_nestloop = on +#enable_seqscan = on +#enable_sort = on +#enable_tidscan = on + +# - Planner Cost Constants - + +#seq_page_cost = 1.0 # measured on an arbitrary scale +#random_page_cost = 4.0 # same scale as above +#cpu_tuple_cost = 0.01 # same scale as above +#cpu_index_tuple_cost = 0.005 # same scale as above +#cpu_operator_cost = 0.0025 # same scale as above +#parallel_tuple_cost = 0.1 # same scale as above +#parallel_setup_cost = 1000.0 # same scale as above +#min_parallel_table_scan_size = 8MB +#min_parallel_index_scan_size = 512kB +#effective_cache_size = 4GB + +# - Genetic Query Optimizer - + +#geqo = on +#geqo_threshold = 12 +#geqo_effort = 5 # range 1-10 +#geqo_pool_size = 0 # selects default based on effort +#geqo_generations = 0 # selects default based on effort +#geqo_selection_bias = 2.0 # range 1.5-2.0 +#geqo_seed = 0.0 # range 0.0-1.0 + +# - Other Planner Options - + +#default_statistics_target = 100 # range 1-10000 +#constraint_exclusion = partition # on, off, or partition +#cursor_tuple_fraction = 0.1 # range 0.0-1.0 +#from_collapse_limit = 8 +#join_collapse_limit = 8 # 1 disables collapsing of explicit + # JOIN clauses +#force_parallel_mode = off + + +#------------------------------------------------------------------------------ +# ERROR REPORTING AND LOGGING +#------------------------------------------------------------------------------ + +# - Where to Log - + +#log_destination = 'stderr' # Valid values are combinations of + # stderr, csvlog, syslog, and eventlog, + # depending on platform. csvlog + # requires logging_collector to be on. + +# This is used when logging to stderr: +#logging_collector = off # Enable capturing of stderr and csvlog + # into log files. Required to be on for + # csvlogs. + # (change requires restart) + +# These are only used if logging_collector is on: +#log_directory = 'log' # directory where log files are written, + # can be absolute or relative to PGDATA +#log_filename = 'postgresql-%Y-%m-%d_%H%M%S.log' # log file name pattern, + # can include strftime() escapes +#log_file_mode = 0600 # creation mode for log files, + # begin with 0 to use octal notation +#log_truncate_on_rotation = off # If on, an existing log file with the + # same name as the new log file will be + # truncated rather than appended to. + # But such truncation only occurs on + # time-driven rotation, not on restarts + # or size-driven rotation. Default is + # off, meaning append to existing files + # in all cases. +#log_rotation_age = 1d # Automatic rotation of logfiles will + # happen after that time. 0 disables. +#log_rotation_size = 10MB # Automatic rotation of logfiles will + # happen after that much log output. + # 0 disables. + +# These are relevant when logging to syslog: +#syslog_facility = 'LOCAL0' +#syslog_ident = 'postgres' +#syslog_sequence_numbers = on +#syslog_split_messages = on + +# This is only relevant when logging to eventlog (win32): +# (change requires restart) +#event_source = 'PostgreSQL' + +# - When to Log - + +#log_min_messages = warning # values in order of decreasing detail: + # debug5 + # debug4 + # debug3 + # debug2 + # debug1 + # info + # notice + # warning + # error + # log + # fatal + # panic + +#log_min_error_statement = error # values in order of decreasing detail: + # debug5 + # debug4 + # debug3 + # debug2 + # debug1 + # info + # notice + # warning + # error + # log + # fatal + # panic (effectively off) + +#log_min_duration_statement = -1 # -1 is disabled, 0 logs all statements + # and their durations, > 0 logs only + # statements running at least this number + # of milliseconds + + +# - What to Log - + +#debug_print_parse = off +#debug_print_rewritten = off +#debug_print_plan = off +#debug_pretty_print = on +#log_checkpoints = off +#log_connections = off +#log_disconnections = off +#log_duration = off +#log_error_verbosity = default # terse, default, or verbose messages +#log_hostname = off +#log_line_prefix = '%m [%p] ' # special values: + # %a = application name + # %u = user name + # %d = database name + # %r = remote host and port + # %h = remote host + # %p = process ID + # %t = timestamp without milliseconds + # %m = timestamp with milliseconds + # %n = timestamp with milliseconds (as a Unix epoch) + # %i = command tag + # %e = SQL state + # %c = session ID + # %l = session line number + # %s = session start timestamp + # %v = virtual transaction ID + # %x = transaction ID (0 if none) + # %q = stop here in non-session + # processes + # %% = '%' + # e.g. '<%u%%%d> ' +#log_lock_waits = off # log lock waits >= deadlock_timeout +#log_statement = 'none' # none, ddl, mod, all +#log_replication_commands = off +#log_temp_files = -1 # log temporary files equal or larger + # than the specified size in kilobytes; + # -1 disables, 0 logs all temp files +#log_timezone = 'GMT' + + +# - Process Title - + +#cluster_name = '' # added to process titles if nonempty + # (change requires restart) +#update_process_title = on + + +#------------------------------------------------------------------------------ +# RUNTIME STATISTICS +#------------------------------------------------------------------------------ + +# - Query/Index Statistics Collector - + +#track_activities = on +#track_counts = on +#track_io_timing = off +#track_functions = none # none, pl, all +#track_activity_query_size = 1024 # (change requires restart) +#stats_temp_directory = 'pg_stat_tmp' + + +# - Statistics Monitoring - + +#log_parser_stats = off +#log_planner_stats = off +#log_executor_stats = off +#log_statement_stats = off + + +#------------------------------------------------------------------------------ +# AUTOVACUUM PARAMETERS +#------------------------------------------------------------------------------ + +#autovacuum = on # Enable autovacuum subprocess? 'on' + # requires track_counts to also be on. +#log_autovacuum_min_duration = -1 # -1 disables, 0 logs all actions and + # their durations, > 0 logs only + # actions running at least this number + # of milliseconds. +#autovacuum_max_workers = 3 # max number of autovacuum subprocesses + # (change requires restart) +#autovacuum_naptime = 1min # time between autovacuum runs +#autovacuum_vacuum_threshold = 50 # min number of row updates before + # vacuum +#autovacuum_analyze_threshold = 50 # min number of row updates before + # analyze +#autovacuum_vacuum_scale_factor = 0.2 # fraction of table size before vacuum +#autovacuum_analyze_scale_factor = 0.1 # fraction of table size before analyze +#autovacuum_freeze_max_age = 200000000 # maximum XID age before forced vacuum + # (change requires restart) +#autovacuum_multixact_freeze_max_age = 400000000 # maximum multixact age + # before forced vacuum + # (change requires restart) +#autovacuum_vacuum_cost_delay = 20ms # default vacuum cost delay for + # autovacuum, in milliseconds; + # -1 means use vacuum_cost_delay +#autovacuum_vacuum_cost_limit = -1 # default vacuum cost limit for + # autovacuum, -1 means use + # vacuum_cost_limit + + +#------------------------------------------------------------------------------ +# CLIENT CONNECTION DEFAULTS +#------------------------------------------------------------------------------ + +# - Statement Behavior - + +#client_min_messages = notice # values in order of decreasing detail: + # debug5 + # debug4 + # debug3 + # debug2 + # debug1 + # log + # notice + # warning + # error +#search_path = '"$user", public' # schema names +#default_tablespace = '' # a tablespace name, '' uses the default +#temp_tablespaces = '' # a list of tablespace names, '' uses + # only default tablespace +#check_function_bodies = on +#default_transaction_isolation = 'read committed' +#default_transaction_read_only = off +#default_transaction_deferrable = off +#session_replication_role = 'origin' +#statement_timeout = 0 # in milliseconds, 0 is disabled +#lock_timeout = 0 # in milliseconds, 0 is disabled +#idle_in_transaction_session_timeout = 0 # in milliseconds, 0 is disabled +#vacuum_freeze_min_age = 50000000 +#vacuum_freeze_table_age = 150000000 +#vacuum_multixact_freeze_min_age = 5000000 +#vacuum_multixact_freeze_table_age = 150000000 +#bytea_output = 'hex' # hex, escape +#xmlbinary = 'base64' +#xmloption = 'content' +#gin_fuzzy_search_limit = 0 +#gin_pending_list_limit = 4MB + +# - Locale and Formatting - + +#datestyle = 'iso, mdy' +#intervalstyle = 'postgres' +#timezone = 'GMT' +#timezone_abbreviations = 'Default' # Select the set of available time zone + # abbreviations. Currently, there are + # Default + # Australia (historical usage) + # India + # You can create your own file in + # share/timezonesets/. +#extra_float_digits = 0 # min -15, max 3 +#client_encoding = sql_ascii # actually, defaults to database + # encoding + +# These settings are initialized by initdb, but they can be changed. +#lc_messages = 'C' # locale for system error message + # strings +#lc_monetary = 'C' # locale for monetary formatting +#lc_numeric = 'C' # locale for number formatting +#lc_time = 'C' # locale for time formatting + +# default configuration for text search +#default_text_search_config = 'pg_catalog.simple' + +# - Other Defaults - + +#dynamic_library_path = '$libdir' +#local_preload_libraries = '' +#session_preload_libraries = '' + + +#------------------------------------------------------------------------------ +# LOCK MANAGEMENT +#------------------------------------------------------------------------------ + +#deadlock_timeout = 1s +#max_locks_per_transaction = 64 # min 10 + # (change requires restart) +#max_pred_locks_per_transaction = 64 # min 10 + # (change requires restart) +#max_pred_locks_per_relation = -2 # negative values mean + # (max_pred_locks_per_transaction + # / -max_pred_locks_per_relation) - 1 +#max_pred_locks_per_page = 2 # min 0 + + +#------------------------------------------------------------------------------ +# VERSION/PLATFORM COMPATIBILITY +#------------------------------------------------------------------------------ + +# - Previous PostgreSQL Versions - + +#array_nulls = on +#backslash_quote = safe_encoding # on, off, or safe_encoding +#default_with_oids = off +#escape_string_warning = on +#lo_compat_privileges = off +#operator_precedence_warning = off +#quote_all_identifiers = off +#standard_conforming_strings = on +#synchronize_seqscans = on + +# - Other Platforms and Clients - + +#transform_null_equals = off + + +#------------------------------------------------------------------------------ +# ERROR HANDLING +#------------------------------------------------------------------------------ + +#exit_on_error = off # terminate session on any error? +#restart_after_crash = on # reinitialize after backend crash? +#data_sync_retry = off # retry or panic on failure to fsync + # data? + # (change requires restart) + + +#------------------------------------------------------------------------------ +# CONFIG FILE INCLUDES +#------------------------------------------------------------------------------ + +# These options allow settings to be loaded from files other than the +# default postgresql.conf. Note that these are directives, not variable +# assignments, so they can usefully be given more than once. + +#include_dir = '...' # include files ending in '.conf' from + # a directory, e.g., 'conf.d' +#include_if_exists = '...' # include file only if it exists +#include = '...' # include file + + +#------------------------------------------------------------------------------ +# CUSTOMIZED OPTIONS +#------------------------------------------------------------------------------ + +# Add settings for extensions here diff --git a/prometheus.yml b/prometheus.yml new file mode 100644 index 0000000..0d4ddc2 --- /dev/null +++ b/prometheus.yml @@ -0,0 +1,32 @@ +# my global config +global: + scrape_interval: 60s # Set the scrape interval to every 15 seconds. Default is every 1 minute. + evaluation_interval: 60s # Evaluate rules every 15 seconds. The default is every 1 minute. + # scrape_timeout is set to the global default (10s). + +# Alertmanager configuration +alerting: + alertmanagers: + - static_configs: + - targets: + # - alertmanager:9093 + +# Load rules once and periodically evaluate them according to the global 'evaluation_interval'. +rule_files: + # - "first_rules.yml" + # - "second_rules.yml" +# A scrape configuration containing exactly one endpoint to scrape: +# Here it's Prometheus itself. +scrape_configs: + # The job name is added as a label `job=` to any timeseries scraped from this config. + - job_name: 'prometheus' + + # metrics_path defaults to '/metrics' + # scheme defaults to 'http'. + + static_configs: + - targets: ['localhost:9090'] + + - job_name: 'nebula' + static_configs: + - targets: ['172.16.238.7:9123']