diff --git a/collector/exporter.go b/collector/exporter.go index 0587f1d98..d272c4d78 100644 --- a/collector/exporter.go +++ b/collector/exporter.go @@ -50,17 +50,23 @@ var ( versionRE = regexp.MustCompile(`^\d+\.\d+`) ) -// Tunable flags. -var ( - exporterLockTimeout = kingpin.Flag( +// Config holds configuration options for the exporter. +type Config struct { + LockTimeout int + SlowLogFilter bool +} + +// RegisterFlags adds flags to configure the exporter. +func (c *Config) RegisterFlags(application *kingpin.Application) { + application.Flag( "exporter.lock_wait_timeout", "Set a lock_wait_timeout (in seconds) on the connection to avoid long metadata locking.", - ).Default("2").Int() - slowLogFilter = kingpin.Flag( + ).Default("2").IntVar(&c.LockTimeout) + application.Flag( "exporter.log_slow_filter", "Add a log_slow_filter to avoid slow query logging of scrapes. NOTE: Not supported by Oracle MySQL.", - ).Default("false").Bool() -) + ).Default("false").BoolVar(&c.SlowLogFilter) +} // metric definition var ( @@ -95,11 +101,11 @@ type Exporter struct { } // New returns a new MySQL exporter for the provided DSN. -func New(ctx context.Context, dsn string, scrapers []Scraper, logger log.Logger) *Exporter { +func New(ctx context.Context, dsn string, scrapers []Scraper, logger log.Logger, cfg Config) *Exporter { // Setup extra params for the DSN, default to having a lock timeout. - dsnParams := []string{fmt.Sprintf(timeoutParam, *exporterLockTimeout)} + dsnParams := []string{fmt.Sprintf(timeoutParam, cfg.LockTimeout)} - if *slowLogFilter { + if cfg.SlowLogFilter { dsnParams = append(dsnParams, sessionSettingsParam) } diff --git a/collector/exporter_test.go b/collector/exporter_test.go index ff6bcf744..b60ef8413 100644 --- a/collector/exporter_test.go +++ b/collector/exporter_test.go @@ -19,6 +19,7 @@ import ( "os" "testing" + "github.com/alecthomas/kingpin/v2" "github.com/go-kit/log" "github.com/go-kit/log/level" "github.com/prometheus/client_golang/prometheus" @@ -33,6 +34,14 @@ func TestExporter(t *testing.T) { t.Skip("-short is passed, skipping test") } + var exporterConfig Config + kingpinApp := kingpin.New("TestExporter", "") + exporterConfig.RegisterFlags(kingpinApp) + _, err := kingpinApp.Parse([]string{}) + if err != nil { + t.Fatal(err) + } + exporter := New( context.Background(), dsn, @@ -40,6 +49,7 @@ func TestExporter(t *testing.T) { ScrapeGlobalStatus{}, }, log.NewNopLogger(), + exporterConfig, ) convey.Convey("Metrics describing", t, func() { diff --git a/collector/heartbeat.go b/collector/heartbeat.go index 7bc5fc588..71f843203 100644 --- a/collector/heartbeat.go +++ b/collector/heartbeat.go @@ -36,21 +36,6 @@ const ( heartbeatQuery = "SELECT UNIX_TIMESTAMP(ts), UNIX_TIMESTAMP(%s), server_id from `%s`.`%s`" ) -var ( - collectHeartbeatDatabase = kingpin.Flag( - "collect.heartbeat.database", - "Database from where to collect heartbeat data", - ).Default("heartbeat").String() - collectHeartbeatTable = kingpin.Flag( - "collect.heartbeat.table", - "Table from where to collect heartbeat data", - ).Default("heartbeat").String() - collectHeartbeatUtc = kingpin.Flag( - "collect.heartbeat.utc", - "Use UTC for timestamps of the current server (`pt-heartbeat` is called with `--utc`)", - ).Bool() -) - // Metric descriptors. var ( HeartbeatStoredDesc = prometheus.NewDesc( @@ -74,7 +59,11 @@ var ( // server_id int unsigned NOT NULL PRIMARY KEY, // // ); -type ScrapeHeartbeat struct{} +type ScrapeHeartbeat struct { + Database string + Table string + UTC bool +} // Name of the Scraper. Should be unique. func (ScrapeHeartbeat) Name() string { @@ -91,17 +80,33 @@ func (ScrapeHeartbeat) Version() float64 { return 5.1 } +// RegisterFlags adds flags to configure the Scraper. +func (s *ScrapeHeartbeat) RegisterFlags(application *kingpin.Application) { + application.Flag( + "collect.heartbeat.database", + "Database from where to collect heartbeat data", + ).Default("heartbeat").StringVar(&s.Database) + application.Flag( + "collect.heartbeat.table", + "Table from where to collect heartbeat data", + ).Default("heartbeat").StringVar(&s.Table) + application.Flag( + "collect.heartbeat.utc", + "Use UTC for timestamps of the current server (`pt-heartbeat` is called with `--utc`)", + ).BoolVar(&s.UTC) +} + // nowExpr returns a current timestamp expression. -func nowExpr() string { - if *collectHeartbeatUtc { +func (s ScrapeHeartbeat) nowExpr() string { + if s.UTC { return "UTC_TIMESTAMP(6)" } return "NOW(6)" } // Scrape collects data from database connection and sends it over channel as prometheus metric. -func (ScrapeHeartbeat) Scrape(ctx context.Context, db *sql.DB, ch chan<- prometheus.Metric, logger log.Logger) error { - query := fmt.Sprintf(heartbeatQuery, nowExpr(), *collectHeartbeatDatabase, *collectHeartbeatTable) +func (s ScrapeHeartbeat) Scrape(ctx context.Context, db *sql.DB, ch chan<- prometheus.Metric, logger log.Logger) error { + query := fmt.Sprintf(heartbeatQuery, s.nowExpr(), s.Database, s.Table) heartbeatRows, err := db.QueryContext(ctx, query) if err != nil { return err diff --git a/collector/heartbeat_test.go b/collector/heartbeat_test.go index 48e35925c..b2b3d7ae8 100644 --- a/collector/heartbeat_test.go +++ b/collector/heartbeat_test.go @@ -55,7 +55,12 @@ var ScrapeHeartbeatTestCases = []ScrapeHeartbeatTestCase{ func TestScrapeHeartbeat(t *testing.T) { for _, tt := range ScrapeHeartbeatTestCases { t.Run(fmt.Sprint(tt.Args), func(t *testing.T) { - _, err := kingpin.CommandLine.Parse(tt.Args) + scraper := ScrapeHeartbeat{} + + app := kingpin.New("TestScrapeHeartbeat", "") + scraper.RegisterFlags(app) + + _, err := app.Parse(tt.Args) if err != nil { t.Fatal(err) } @@ -72,7 +77,7 @@ func TestScrapeHeartbeat(t *testing.T) { ch := make(chan prometheus.Metric) go func() { - if err = (ScrapeHeartbeat{}).Scrape(context.Background(), db, ch, log.NewNopLogger()); err != nil { + if err = scraper.Scrape(context.Background(), db, ch, log.NewNopLogger()); err != nil { t.Errorf("error calling function on test: %s", err) } close(ch) diff --git a/collector/info_schema_processlist.go b/collector/info_schema_processlist.go index 0d97161c3..52606ec24 100755 --- a/collector/info_schema_processlist.go +++ b/collector/info_schema_processlist.go @@ -42,22 +42,6 @@ const infoSchemaProcesslistQuery = ` GROUP BY user, SUBSTRING_INDEX(host, ':', 1), command, state ` -// Tunable flags. -var ( - processlistMinTime = kingpin.Flag( - "collect.info_schema.processlist.min_time", - "Minimum time a thread must be in each state to be counted", - ).Default("0").Int() - processesByUserFlag = kingpin.Flag( - "collect.info_schema.processlist.processes_by_user", - "Enable collecting the number of processes by user", - ).Default("true").Bool() - processesByHostFlag = kingpin.Flag( - "collect.info_schema.processlist.processes_by_host", - "Enable collecting the number of processes by host", - ).Default("true").Bool() -) - // Metric descriptors. var ( processlistCountDesc = prometheus.NewDesc( @@ -79,7 +63,11 @@ var ( ) // ScrapeProcesslist collects from `information_schema.processlist`. -type ScrapeProcesslist struct{} +type ScrapeProcesslist struct { + ProcessListMinTime int + ProcessesByUserFlag bool + ProcessesByHostFlag bool +} // Name of the Scraper. Should be unique. func (ScrapeProcesslist) Name() string { @@ -96,11 +84,27 @@ func (ScrapeProcesslist) Version() float64 { return 5.1 } +// RegisterFlags adds flags to configure the Scraper. +func (s *ScrapeProcesslist) RegisterFlags(application *kingpin.Application) { + application.Flag( + "collect.info_schema.processlist.min_time", + "Minimum time a thread must be in each state to be counted", + ).Default("0").IntVar(&s.ProcessListMinTime) + application.Flag( + "collect.info_schema.processlist.processes_by_user", + "Enable collecting the number of processes by user", + ).Default("true").BoolVar(&s.ProcessesByUserFlag) + application.Flag( + "collect.info_schema.processlist.processes_by_host", + "Enable collecting the number of processes by host", + ).Default("true").BoolVar(&s.ProcessesByHostFlag) +} + // Scrape collects data from database connection and sends it over channel as prometheus metric. -func (ScrapeProcesslist) Scrape(ctx context.Context, db *sql.DB, ch chan<- prometheus.Metric, logger log.Logger) error { +func (s ScrapeProcesslist) Scrape(ctx context.Context, db *sql.DB, ch chan<- prometheus.Metric, logger log.Logger) error { processQuery := fmt.Sprintf( infoSchemaProcesslistQuery, - *processlistMinTime, + s.ProcessListMinTime, ) processlistRows, err := db.QueryContext(ctx, processQuery) if err != nil { @@ -162,12 +166,13 @@ func (ScrapeProcesslist) Scrape(ctx context.Context, db *sql.DB, ch chan<- prome } } - if *processesByHostFlag { + if s.ProcessesByHostFlag { for _, host := range sortedMapKeys(stateHostCounts) { ch <- prometheus.MustNewConstMetric(processesByHostDesc, prometheus.GaugeValue, float64(stateHostCounts[host]), host) } } - if *processesByUserFlag { + + if s.ProcessesByUserFlag { for _, user := range sortedMapKeys(stateUserCounts) { ch <- prometheus.MustNewConstMetric(processesByUserDesc, prometheus.GaugeValue, float64(stateUserCounts[user]), user) } diff --git a/collector/info_schema_processlist_test.go b/collector/info_schema_processlist_test.go index 6bcd0108d..c4d37ae90 100644 --- a/collector/info_schema_processlist_test.go +++ b/collector/info_schema_processlist_test.go @@ -27,7 +27,11 @@ import ( ) func TestScrapeProcesslist(t *testing.T) { - _, err := kingpin.CommandLine.Parse([]string{ + scraper := ScrapeProcesslist{} + app := kingpin.New("TestScrapeProcesslist", "") + scraper.RegisterFlags(app) + + _, err := app.Parse([]string{ "--collect.info_schema.processlist.processes_by_user", "--collect.info_schema.processlist.processes_by_host", }) @@ -56,7 +60,7 @@ func TestScrapeProcesslist(t *testing.T) { ch := make(chan prometheus.Metric) go func() { - if err = (ScrapeProcesslist{}).Scrape(context.Background(), db, ch, log.NewNopLogger()); err != nil { + if err = scraper.Scrape(context.Background(), db, ch, log.NewNopLogger()); err != nil { t.Errorf("error calling function on test: %s", err) } close(ch) diff --git a/collector/info_schema_tables.go b/collector/info_schema_tables.go index f526f4d6d..c96b05b89 100644 --- a/collector/info_schema_tables.go +++ b/collector/info_schema_tables.go @@ -51,14 +51,6 @@ const ( ` ) -// Tunable flags. -var ( - tableSchemaDatabases = kingpin.Flag( - "collect.info_schema.tables.databases", - "The list of databases to collect table stats for, or '*' for all", - ).Default("*").String() -) - // Metric descriptors. var ( infoSchemaTablesVersionDesc = prometheus.NewDesc( @@ -79,7 +71,9 @@ var ( ) // ScrapeTableSchema collects from `information_schema.tables`. -type ScrapeTableSchema struct{} +type ScrapeTableSchema struct { + Databases string +} // Name of the Scraper. Should be unique. func (ScrapeTableSchema) Name() string { @@ -96,10 +90,18 @@ func (ScrapeTableSchema) Version() float64 { return 5.1 } +// RegisterFlags adds flags to configure the Scraper. +func (s *ScrapeTableSchema) RegisterFlags(application *kingpin.Application) { + application.Flag( + "collect.info_schema.tables.databases", + "The list of databases to collect table stats for, or '*' for all", + ).Default("*").StringVar(&s.Databases) +} + // Scrape collects data from database connection and sends it over channel as prometheus metric. -func (ScrapeTableSchema) Scrape(ctx context.Context, db *sql.DB, ch chan<- prometheus.Metric, logger log.Logger) error { +func (s ScrapeTableSchema) Scrape(ctx context.Context, db *sql.DB, ch chan<- prometheus.Metric, logger log.Logger) error { var dbList []string - if *tableSchemaDatabases == "*" { + if s.Databases == "*" { dbListRows, err := db.QueryContext(ctx, dbListQuery) if err != nil { return err @@ -117,7 +119,7 @@ func (ScrapeTableSchema) Scrape(ctx context.Context, db *sql.DB, ch chan<- prome dbList = append(dbList, database) } } else { - dbList = strings.Split(*tableSchemaDatabases, ",") + dbList = strings.Split(s.Databases, ",") } for _, database := range dbList { diff --git a/collector/mysql_user.go b/collector/mysql_user.go index 50891a32b..35272cc8a 100644 --- a/collector/mysql_user.go +++ b/collector/mysql_user.go @@ -69,14 +69,6 @@ const mysqlUserQuery = ` FROM mysql.user ` -// Tunable flags. -var ( - userPrivilegesFlag = kingpin.Flag( - "collect.mysql.user.privileges", - "Enable collecting user privileges from mysql.user", - ).Default("false").Bool() -) - var ( labelNames = []string{"mysql_user", "hostmask"} ) @@ -102,7 +94,9 @@ var ( ) // ScrapeUser collects from `information_schema.processlist`. -type ScrapeUser struct{} +type ScrapeUser struct { + Privileges bool +} // Name of the Scraper. Should be unique. func (ScrapeUser) Name() string { @@ -119,8 +113,16 @@ func (ScrapeUser) Version() float64 { return 5.1 } +// RegisterFlags adds flags to configure the Scraper. +func (s *ScrapeUser) RegisterFlags(application *kingpin.Application) { + application.Flag( + "collect.mysql.user.privileges", + "Enable collecting user privileges from mysql.user", + ).Default("false").BoolVar(&s.Privileges) +} + // Scrape collects data from database connection and sends it over channel as prometheus metric. -func (ScrapeUser) Scrape(ctx context.Context, db *sql.DB, ch chan<- prometheus.Metric, logger log.Logger) error { +func (s ScrapeUser) Scrape(ctx context.Context, db *sql.DB, ch chan<- prometheus.Metric, logger log.Logger) error { var ( userRows *sql.Rows err error @@ -213,7 +215,7 @@ func (ScrapeUser) Scrape(ctx context.Context, db *sql.DB, ch chan<- prometheus.M return err } - if *userPrivilegesFlag { + if s.Privileges { userCols, err := userRows.Columns() if err != nil { return err diff --git a/collector/perf_schema_events_statements.go b/collector/perf_schema_events_statements.go index 568bde7c8..8c8c8e306 100644 --- a/collector/perf_schema_events_statements.go +++ b/collector/perf_schema_events_statements.go @@ -69,22 +69,6 @@ const perfEventsStatementsQuery = ` LIMIT %d ` -// Tunable flags. -var ( - perfEventsStatementsLimit = kingpin.Flag( - "collect.perf_schema.eventsstatements.limit", - "Limit the number of events statements digests by response time", - ).Default("250").Int() - perfEventsStatementsTimeLimit = kingpin.Flag( - "collect.perf_schema.eventsstatements.timelimit", - "Limit how old the 'last_seen' events statements can be, in seconds", - ).Default("86400").Int() - perfEventsStatementsDigestTextLimit = kingpin.Flag( - "collect.perf_schema.eventsstatements.digest_text_limit", - "Maximum length of the normalized statement text", - ).Default("120").Int() -) - // Metric descriptors. var ( performanceSchemaEventsStatementsDesc = prometheus.NewDesc( @@ -150,7 +134,11 @@ var ( ) // ScrapePerfEventsStatements collects from `performance_schema.events_statements_summary_by_digest`. -type ScrapePerfEventsStatements struct{} +type ScrapePerfEventsStatements struct { + Limit int + TimeLimit int + DigestTextLimit int +} // Name of the Scraper. Should be unique. func (ScrapePerfEventsStatements) Name() string { @@ -167,13 +155,29 @@ func (ScrapePerfEventsStatements) Version() float64 { return 5.6 } +// RegisterFlags adds flags to configure the Scraper. +func (c *ScrapePerfEventsStatements) RegisterFlags(application *kingpin.Application) { + application.Flag( + "collect.perf_schema.eventsstatements.limit", + "Limit the number of events statements digests by response time", + ).Default("250").IntVar(&c.Limit) + application.Flag( + "collect.perf_schema.eventsstatements.timelimit", + "Limit how old the 'last_seen' events statements can be, in seconds", + ).Default("86400").IntVar(&c.TimeLimit) + application.Flag( + "collect.perf_schema.eventsstatements.digest_text_limit", + "Maximum length of the normalized statement text", + ).Default("120").IntVar(&c.DigestTextLimit) +} + // Scrape collects data from database connection and sends it over channel as prometheus metric. -func (ScrapePerfEventsStatements) Scrape(ctx context.Context, db *sql.DB, ch chan<- prometheus.Metric, logger log.Logger) error { +func (c ScrapePerfEventsStatements) Scrape(ctx context.Context, db *sql.DB, ch chan<- prometheus.Metric, logger log.Logger) error { perfQuery := fmt.Sprintf( perfEventsStatementsQuery, - *perfEventsStatementsDigestTextLimit, - *perfEventsStatementsTimeLimit, - *perfEventsStatementsLimit, + c.DigestTextLimit, + c.TimeLimit, + c.Limit, ) // Timers here are returned in picoseconds. perfSchemaEventsStatementsRows, err := db.QueryContext(ctx, perfQuery) diff --git a/collector/perf_schema_file_instances.go b/collector/perf_schema_file_instances.go index 4443a62ec..7958b1ad6 100644 --- a/collector/perf_schema_file_instances.go +++ b/collector/perf_schema_file_instances.go @@ -34,19 +34,6 @@ const perfFileInstancesQuery = ` where FILE_NAME REGEXP ? ` -// Tunable flags. -var ( - performanceSchemaFileInstancesFilter = kingpin.Flag( - "collect.perf_schema.file_instances.filter", - "RegEx file_name filter for performance_schema.file_summary_by_instance", - ).Default(".*").String() - - performanceSchemaFileInstancesRemovePrefix = kingpin.Flag( - "collect.perf_schema.file_instances.remove_prefix", - "Remove path prefix in performance_schema.file_summary_by_instance", - ).Default("/var/lib/mysql/").String() -) - // Metric descriptors. var ( performanceSchemaFileInstancesBytesDesc = prometheus.NewDesc( @@ -62,7 +49,10 @@ var ( ) // ScrapePerfFileInstances collects from `performance_schema.file_summary_by_instance`. -type ScrapePerfFileInstances struct{} +type ScrapePerfFileInstances struct { + Filter string + RemovePrefix string +} // Name of the Scraper. Should be unique. func (ScrapePerfFileInstances) Name() string { @@ -79,10 +69,22 @@ func (ScrapePerfFileInstances) Version() float64 { return 5.5 } +// RegisterFlags adds flags to configure the Scraper. +func (s *ScrapePerfFileInstances) RegisterFlags(application *kingpin.Application) { + application.Flag( + "collect.perf_schema.file_instances.filter", + "RegEx file_name filter for performance_schema.file_summary_by_instance", + ).Default(".*").StringVar(&s.Filter) + application.Flag( + "collect.perf_schema.file_instances.remove_prefix", + "Remove path prefix in performance_schema.file_summary_by_instance", + ).Default("/var/lib/mysql/").StringVar(&s.RemovePrefix) +} + // Scrape collects data from database connection and sends it over channel as prometheus metric. -func (ScrapePerfFileInstances) Scrape(ctx context.Context, db *sql.DB, ch chan<- prometheus.Metric, logger log.Logger) error { +func (s ScrapePerfFileInstances) Scrape(ctx context.Context, db *sql.DB, ch chan<- prometheus.Metric, logger log.Logger) error { // Timers here are returned in picoseconds. - perfSchemaFileInstancesRows, err := db.QueryContext(ctx, perfFileInstancesQuery, *performanceSchemaFileInstancesFilter) + perfSchemaFileInstancesRows, err := db.QueryContext(ctx, perfFileInstancesQuery, s.Filter) if err != nil { return err } @@ -103,7 +105,7 @@ func (ScrapePerfFileInstances) Scrape(ctx context.Context, db *sql.DB, ch chan<- return err } - fileName = strings.TrimPrefix(fileName, *performanceSchemaFileInstancesRemovePrefix) + fileName = strings.TrimPrefix(fileName, s.RemovePrefix) ch <- prometheus.MustNewConstMetric( performanceSchemaFileInstancesCountDesc, prometheus.CounterValue, float64(countRead), fileName, eventName, "read", diff --git a/collector/perf_schema_file_instances_test.go b/collector/perf_schema_file_instances_test.go index c1485e5b7..474179888 100644 --- a/collector/perf_schema_file_instances_test.go +++ b/collector/perf_schema_file_instances_test.go @@ -27,7 +27,11 @@ import ( ) func TestScrapePerfFileInstances(t *testing.T) { - _, err := kingpin.CommandLine.Parse([]string{"--collect.perf_schema.file_instances.filter", ""}) + scraper := ScrapePerfFileInstances{} + + app := kingpin.New("TestScrapePerfFileInstances", "") + scraper.RegisterFlags(app) + _, err := app.Parse([]string{"--collect.perf_schema.file_instances.filter", ""}) if err != nil { t.Fatal(err) } @@ -48,7 +52,7 @@ func TestScrapePerfFileInstances(t *testing.T) { ch := make(chan prometheus.Metric) go func() { - if err = (ScrapePerfFileInstances{}).Scrape(context.Background(), db, ch, log.NewNopLogger()); err != nil { + if err = scraper.Scrape(context.Background(), db, ch, log.NewNopLogger()); err != nil { panic(fmt.Sprintf("error calling function on test: %s", err)) } close(ch) diff --git a/collector/perf_schema_memory_events.go b/collector/perf_schema_memory_events.go index 288ecdd65..959be5762 100644 --- a/collector/perf_schema_memory_events.go +++ b/collector/perf_schema_memory_events.go @@ -33,14 +33,6 @@ const perfMemoryEventsQuery = ` where COUNT_ALLOC > 0; ` -// Tunable flags. -var ( - performanceSchemaMemoryEventsRemovePrefix = kingpin.Flag( - "collect.perf_schema.memory_events.remove_prefix", - "Remove instrument prefix in performance_schema.memory_summary_global_by_event_name", - ).Default("memory/").String() -) - // Metric descriptors. var ( performanceSchemaMemoryBytesAllocDesc = prometheus.NewDesc( @@ -61,7 +53,9 @@ var ( ) // ScrapePerfMemoryEvents collects from `performance_schema.memory_summary_global_by_event_name`. -type ScrapePerfMemoryEvents struct{} +type ScrapePerfMemoryEvents struct { + RemovePrefix string +} // Name of the Scraper. Should be unique. func (ScrapePerfMemoryEvents) Name() string { @@ -78,8 +72,16 @@ func (ScrapePerfMemoryEvents) Version() float64 { return 5.7 } +// RegisterFlags adds flags to configure the Scraper. +func (s *ScrapePerfMemoryEvents) RegisterFlags(application *kingpin.Application) { + application.Flag( + "collect.perf_schema.memory_events.remove_prefix", + "Remove instrument prefix in performance_schema.memory_summary_global_by_event_name", + ).Default("memory/").StringVar(&s.RemovePrefix) +} + // Scrape collects data from database connection and sends it over channel as prometheus metric. -func (ScrapePerfMemoryEvents) Scrape(ctx context.Context, db *sql.DB, ch chan<- prometheus.Metric, logger log.Logger) error { +func (s ScrapePerfMemoryEvents) Scrape(ctx context.Context, db *sql.DB, ch chan<- prometheus.Metric, logger log.Logger) error { perfSchemaMemoryEventsRows, err := db.QueryContext(ctx, perfMemoryEventsQuery) if err != nil { return err @@ -100,7 +102,7 @@ func (ScrapePerfMemoryEvents) Scrape(ctx context.Context, db *sql.DB, ch chan<- return err } - eventName := strings.TrimPrefix(eventName, *performanceSchemaMemoryEventsRemovePrefix) + eventName := strings.TrimPrefix(eventName, s.RemovePrefix) ch <- prometheus.MustNewConstMetric( performanceSchemaMemoryBytesAllocDesc, prometheus.CounterValue, float64(bytesAlloc), eventName, ) diff --git a/collector/perf_schema_memory_events_test.go b/collector/perf_schema_memory_events_test.go index 1baa0ead7..6b0ea5e52 100644 --- a/collector/perf_schema_memory_events_test.go +++ b/collector/perf_schema_memory_events_test.go @@ -27,7 +27,11 @@ import ( ) func TestScrapePerfMemoryEvents(t *testing.T) { - _, err := kingpin.CommandLine.Parse([]string{}) + scraper := ScrapePerfMemoryEvents{} + + app := kingpin.New("TestScrapePerfMemoryEvents", "") + scraper.RegisterFlags(app) + _, err := app.Parse([]string{}) if err != nil { t.Fatal(err) } @@ -54,7 +58,7 @@ func TestScrapePerfMemoryEvents(t *testing.T) { ch := make(chan prometheus.Metric) go func() { - if err = (ScrapePerfMemoryEvents{}).Scrape(context.Background(), db, ch, log.NewNopLogger()); err != nil { + if err = scraper.Scrape(context.Background(), db, ch, log.NewNopLogger()); err != nil { panic(fmt.Sprintf("error calling function on test: %s", err)) } close(ch) diff --git a/collector/scraper.go b/collector/scraper.go index e77fd8a35..e38bdb893 100644 --- a/collector/scraper.go +++ b/collector/scraper.go @@ -17,6 +17,7 @@ import ( "context" "database/sql" + "github.com/alecthomas/kingpin/v2" "github.com/go-kit/log" _ "github.com/go-sql-driver/mysql" "github.com/prometheus/client_golang/prometheus" @@ -37,3 +38,11 @@ type Scraper interface { // Scrape collects data from database connection and sends it over channel as prometheus metric. Scrape(ctx context.Context, db *sql.DB, ch chan<- prometheus.Metric, logger log.Logger) error } + +// ConfigurableScraper extends the Scraper interface by allowing it to be configured with flags. +type ConfigurableScraper interface { + Scraper + + // Register flags to configure a scraper against a given kingpin Application. + RegisterFlags(application *kingpin.Application) +} diff --git a/mysqld_exporter.go b/mysqld_exporter.go index 76fb107bf..fa906a46f 100644 --- a/mysqld_exporter.go +++ b/mysqld_exporter.go @@ -68,41 +68,39 @@ var ( // scrapers lists all possible collection methods and if they should be enabled by default. var scrapers = map[collector.Scraper]bool{ - collector.ScrapeGlobalStatus{}: true, - collector.ScrapeGlobalVariables{}: true, - collector.ScrapeSlaveStatus{}: true, - collector.ScrapeProcesslist{}: false, - collector.ScrapeUser{}: false, - collector.ScrapeTableSchema{}: false, - collector.ScrapeInfoSchemaInnodbTablespaces{}: false, - collector.ScrapeInnodbMetrics{}: false, - collector.ScrapeAutoIncrementColumns{}: false, - collector.ScrapeBinlogSize{}: false, - collector.ScrapePerfTableIOWaits{}: false, - collector.ScrapePerfIndexIOWaits{}: false, - collector.ScrapePerfTableLockWaits{}: false, - collector.ScrapePerfEventsStatements{}: false, - collector.ScrapePerfEventsStatementsSum{}: false, - collector.ScrapePerfEventsWaits{}: false, - collector.ScrapePerfFileEvents{}: false, - collector.ScrapePerfFileInstances{}: false, - collector.ScrapePerfMemoryEvents{}: false, - collector.ScrapePerfReplicationGroupMembers{}: false, - collector.ScrapePerfReplicationGroupMemberStats{}: false, - collector.ScrapePerfReplicationApplierStatsByWorker{}: false, - collector.ScrapeSysUserSummary{}: false, - collector.ScrapeUserStat{}: false, - collector.ScrapeClientStat{}: false, - collector.ScrapeTableStat{}: false, - collector.ScrapeSchemaStat{}: false, - collector.ScrapeInnodbCmp{}: true, - collector.ScrapeInnodbCmpMem{}: true, - collector.ScrapeQueryResponseTime{}: true, - collector.ScrapeEngineTokudbStatus{}: false, - collector.ScrapeEngineInnodbStatus{}: false, - collector.ScrapeHeartbeat{}: false, - collector.ScrapeSlaveHosts{}: false, - collector.ScrapeReplicaHost{}: false, + &collector.ScrapeGlobalStatus{}: true, + &collector.ScrapeGlobalVariables{}: true, + &collector.ScrapeSlaveStatus{}: true, + &collector.ScrapeProcesslist{}: false, + &collector.ScrapeUser{}: false, + &collector.ScrapeTableSchema{}: false, + &collector.ScrapeInfoSchemaInnodbTablespaces{}: false, + &collector.ScrapeInnodbMetrics{}: false, + &collector.ScrapeAutoIncrementColumns{}: false, + &collector.ScrapeBinlogSize{}: false, + &collector.ScrapePerfTableIOWaits{}: false, + &collector.ScrapePerfIndexIOWaits{}: false, + &collector.ScrapePerfTableLockWaits{}: false, + &collector.ScrapePerfEventsStatements{}: false, + &collector.ScrapePerfEventsStatementsSum{}: false, + &collector.ScrapePerfEventsWaits{}: false, + &collector.ScrapePerfFileEvents{}: false, + &collector.ScrapePerfFileInstances{}: false, + &collector.ScrapePerfReplicationGroupMembers{}: false, + &collector.ScrapePerfReplicationGroupMemberStats{}: false, + &collector.ScrapePerfReplicationApplierStatsByWorker{}: false, + &collector.ScrapeUserStat{}: false, + &collector.ScrapeClientStat{}: false, + &collector.ScrapeTableStat{}: false, + &collector.ScrapeSchemaStat{}: false, + &collector.ScrapeInnodbCmp{}: true, + &collector.ScrapeInnodbCmpMem{}: true, + &collector.ScrapeQueryResponseTime{}: true, + &collector.ScrapeEngineTokudbStatus{}: false, + &collector.ScrapeEngineInnodbStatus{}: false, + &collector.ScrapeHeartbeat{}: false, + &collector.ScrapeSlaveHosts{}: false, + &collector.ScrapeReplicaHost{}: false, } func filterScrapers(scrapers []collector.Scraper, collectParams []string) []collector.Scraper { @@ -131,7 +129,7 @@ func init() { prometheus.MustRegister(version.NewCollector("mysqld_exporter")) } -func newHandler(scrapers []collector.Scraper, logger log.Logger) http.HandlerFunc { +func newHandler(scrapers []collector.Scraper, logger log.Logger, config collector.Config) http.HandlerFunc { return func(w http.ResponseWriter, r *http.Request) { var dsn string var err error @@ -179,8 +177,7 @@ func newHandler(scrapers []collector.Scraper, logger log.Logger) http.HandlerFun filteredScrapers := filterScrapers(scrapers, collect) registry := prometheus.NewRegistry() - - registry.MustRegister(collector.New(ctx, dsn, filteredScrapers, logger)) + registry.MustRegister(collector.New(ctx, dsn, filteredScrapers, logger, config)) gatherers := prometheus.Gatherers{ prometheus.DefaultGatherer, @@ -206,10 +203,16 @@ func main() { scraper.Help(), ).Default(defaultOn).Bool() + if cfgScraper, ok := scraper.(collector.ConfigurableScraper); ok { + cfgScraper.RegisterFlags(kingpin.CommandLine) + } + scraperFlags[scraper] = f } // Parse flags. + var collectorConfig collector.Config + collectorConfig.RegisterFlags(kingpin.CommandLine) promlogConfig := &promlog.Config{} flag.AddFlags(kingpin.CommandLine, promlogConfig) kingpin.Version(version.Print("mysqld_exporter")) @@ -234,7 +237,7 @@ func main() { enabledScrapers = append(enabledScrapers, scraper) } } - handlerFunc := newHandler(enabledScrapers, logger) + handlerFunc := newHandler(enabledScrapers, logger, collectorConfig) http.Handle(*metricsPath, promhttp.InstrumentMetricHandler(prometheus.DefaultRegisterer, handlerFunc)) if *metricsPath != "/" && *metricsPath != "" { landingConfig := web.LandingConfig{ @@ -255,7 +258,7 @@ func main() { } http.Handle("/", landingPage) } - http.HandleFunc("/probe", handleProbe(enabledScrapers, logger)) + http.HandleFunc("/probe", handleProbe(enabledScrapers, logger, collectorConfig)) http.HandleFunc("/-/reload", func(w http.ResponseWriter, r *http.Request) { if err = c.ReloadConfig(*configMycnf, *mysqldAddress, *mysqldUser, *tlsInsecureSkipVerify, logger); err != nil { level.Warn(logger).Log("msg", "Error reloading host config", "file", *configMycnf, "error", err) diff --git a/probe.go b/probe.go index a7c6606a5..e222bdc8f 100644 --- a/probe.go +++ b/probe.go @@ -24,7 +24,7 @@ import ( "github.com/prometheus/mysqld_exporter/collector" ) -func handleProbe(scrapers []collector.Scraper, logger log.Logger) http.HandlerFunc { +func handleProbe(scrapers []collector.Scraper, logger log.Logger, config collector.Config) http.HandlerFunc { return func(w http.ResponseWriter, r *http.Request) { ctx := r.Context() params := r.URL.Query() @@ -57,7 +57,7 @@ func handleProbe(scrapers []collector.Scraper, logger log.Logger) http.HandlerFu filteredScrapers := filterScrapers(scrapers, collectParams) registry := prometheus.NewRegistry() - registry.MustRegister(collector.New(ctx, dsn, filteredScrapers, logger)) + registry.MustRegister(collector.New(ctx, dsn, filteredScrapers, logger, config)) h := promhttp.HandlerFor(registry, promhttp.HandlerOpts{}) h.ServeHTTP(w, r)