diff --git a/collector/exporter.go b/collector/exporter.go index 4e98ea25..5a4e5822 100644 --- a/collector/exporter.go +++ b/collector/exporter.go @@ -40,17 +40,23 @@ const ( timeoutParam = `lock_wait_timeout=%d` ) -// Tunable flags. -var ( - exporterLockTimeout = kingpin.Flag( +// Config holds configuration options for the exporter. +type Config struct { + LockTimeout int + SlowLogFilter bool +} + +// RegisterFlags adds flags to configure the exporter. +func (c *Config) RegisterFlags(application *kingpin.Application) { + application.Flag( "exporter.lock_wait_timeout", "Set a lock_wait_timeout (in seconds) on the connection to avoid long metadata locking.", - ).Default("2").Int() - slowLogFilter = kingpin.Flag( + ).Default("2").IntVar(&c.LockTimeout) + application.Flag( "exporter.log_slow_filter", "Add a log_slow_filter to avoid slow query logging of scrapes. NOTE: Not supported by Oracle MySQL.", - ).Default("false").Bool() -) + ).Default("false").BoolVar(&c.SlowLogFilter) +} // metric definition var ( @@ -86,11 +92,11 @@ type Exporter struct { } // New returns a new MySQL exporter for the provided DSN. -func New(ctx context.Context, dsn string, scrapers []Scraper, logger *slog.Logger) *Exporter { +func New(ctx context.Context, dsn string, scrapers []Scraper, logger *slog.Logger, cfg Config) *Exporter { // Setup extra params for the DSN, default to having a lock timeout. - dsnParams := []string{fmt.Sprintf(timeoutParam, *exporterLockTimeout)} + dsnParams := []string{fmt.Sprintf(timeoutParam, cfg.LockTimeout)} - if *slowLogFilter { + if cfg.SlowLogFilter { dsnParams = append(dsnParams, sessionSettingsParam) } diff --git a/collector/exporter_test.go b/collector/exporter_test.go index db447bfe..fa63a558 100644 --- a/collector/exporter_test.go +++ b/collector/exporter_test.go @@ -17,6 +17,7 @@ import ( "context" "testing" + "github.com/alecthomas/kingpin/v2" "github.com/prometheus/client_golang/prometheus" "github.com/prometheus/common/model" "github.com/prometheus/common/promslog" @@ -30,6 +31,14 @@ func TestExporter(t *testing.T) { t.Skip("-short is passed, skipping test") } + var exporterConfig Config + kingpinApp := kingpin.New("TestExporter", "") + exporterConfig.RegisterFlags(kingpinApp) + _, err := kingpinApp.Parse([]string{}) + if err != nil { + t.Fatal(err) + } + exporter := New( context.Background(), dsn, @@ -37,6 +46,7 @@ func TestExporter(t *testing.T) { ScrapeGlobalStatus{}, }, promslog.NewNopLogger(), + exporterConfig, ) convey.Convey("Metrics describing", t, func() { diff --git a/collector/heartbeat.go b/collector/heartbeat.go index 68845d91..b429f3f4 100644 --- a/collector/heartbeat.go +++ b/collector/heartbeat.go @@ -36,21 +36,6 @@ const ( heartbeatQuery = "SELECT UNIX_TIMESTAMP(ts), UNIX_TIMESTAMP(%s), server_id from `%s`.`%s`" ) -var ( - collectHeartbeatDatabase = kingpin.Flag( - "collect.heartbeat.database", - "Database from where to collect heartbeat data", - ).Default("heartbeat").String() - collectHeartbeatTable = kingpin.Flag( - "collect.heartbeat.table", - "Table from where to collect heartbeat data", - ).Default("heartbeat").String() - collectHeartbeatUtc = kingpin.Flag( - "collect.heartbeat.utc", - "Use UTC for timestamps of the current server (`pt-heartbeat` is called with `--utc`)", - ).Bool() -) - // Metric descriptors. var ( HeartbeatStoredDesc = prometheus.NewDesc( @@ -74,7 +59,11 @@ var ( // server_id int unsigned NOT NULL PRIMARY KEY, // // ); -type ScrapeHeartbeat struct{} +type ScrapeHeartbeat struct { + Database string + Table string + UTC bool +} // Name of the Scraper. Should be unique. func (ScrapeHeartbeat) Name() string { @@ -91,18 +80,34 @@ func (ScrapeHeartbeat) Version() float64 { return 5.1 } +// RegisterFlags adds flags to configure the Scraper. +func (s *ScrapeHeartbeat) RegisterFlags(application *kingpin.Application) { + application.Flag( + "collect.heartbeat.database", + "Database from where to collect heartbeat data", + ).Default("heartbeat").StringVar(&s.Database) + application.Flag( + "collect.heartbeat.table", + "Table from where to collect heartbeat data", + ).Default("heartbeat").StringVar(&s.Table) + application.Flag( + "collect.heartbeat.utc", + "Use UTC for timestamps of the current server (`pt-heartbeat` is called with `--utc`)", + ).BoolVar(&s.UTC) +} + // nowExpr returns a current timestamp expression. -func nowExpr() string { - if *collectHeartbeatUtc { +func (s ScrapeHeartbeat) nowExpr() string { + if s.UTC { return "UTC_TIMESTAMP(6)" } return "NOW(6)" } // Scrape collects data from database connection and sends it over channel as prometheus metric. -func (ScrapeHeartbeat) Scrape(ctx context.Context, instance *instance, ch chan<- prometheus.Metric, logger *slog.Logger) error { +func (s ScrapeHeartbeat) Scrape(ctx context.Context, instance *instance, ch chan<- prometheus.Metric, logger *slog.Logger) error { db := instance.getDB() - query := fmt.Sprintf(heartbeatQuery, nowExpr(), *collectHeartbeatDatabase, *collectHeartbeatTable) + query := fmt.Sprintf(heartbeatQuery, s.nowExpr(), s.Database, s.Table) heartbeatRows, err := db.QueryContext(ctx, query) if err != nil { return err diff --git a/collector/heartbeat_test.go b/collector/heartbeat_test.go index 2a734138..c2a9888d 100644 --- a/collector/heartbeat_test.go +++ b/collector/heartbeat_test.go @@ -55,7 +55,12 @@ var ScrapeHeartbeatTestCases = []ScrapeHeartbeatTestCase{ func TestScrapeHeartbeat(t *testing.T) { for _, tt := range ScrapeHeartbeatTestCases { t.Run(fmt.Sprint(tt.Args), func(t *testing.T) { - _, err := kingpin.CommandLine.Parse(tt.Args) + scraper := ScrapeHeartbeat{} + + app := kingpin.New("TestScrapeHeartbeat", "") + scraper.RegisterFlags(app) + + _, err := app.Parse(tt.Args) if err != nil { t.Fatal(err) } @@ -73,7 +78,7 @@ func TestScrapeHeartbeat(t *testing.T) { ch := make(chan prometheus.Metric) go func() { - if err = (ScrapeHeartbeat{}).Scrape(context.Background(), inst, ch, promslog.NewNopLogger()); err != nil { + if err = scraper.Scrape(context.Background(), inst, ch, promslog.NewNopLogger()); err != nil { t.Errorf("error calling function on test: %s", err) } close(ch) diff --git a/collector/info_schema_processlist.go b/collector/info_schema_processlist.go index 3dd5d5e2..d5c08213 100755 --- a/collector/info_schema_processlist.go +++ b/collector/info_schema_processlist.go @@ -41,22 +41,6 @@ const infoSchemaProcesslistQuery = ` GROUP BY user, host, command, state ` -// Tunable flags. -var ( - processlistMinTime = kingpin.Flag( - "collect.info_schema.processlist.min_time", - "Minimum time a thread must be in each state to be counted", - ).Default("0").Int() - processesByUserFlag = kingpin.Flag( - "collect.info_schema.processlist.processes_by_user", - "Enable collecting the number of processes by user", - ).Default("true").Bool() - processesByHostFlag = kingpin.Flag( - "collect.info_schema.processlist.processes_by_host", - "Enable collecting the number of processes by host", - ).Default("true").Bool() -) - // Metric descriptors. var ( processlistCountDesc = prometheus.NewDesc( @@ -78,7 +62,11 @@ var ( ) // ScrapeProcesslist collects from `information_schema.processlist`. -type ScrapeProcesslist struct{} +type ScrapeProcesslist struct { + ProcessListMinTime int + ProcessesByUserFlag bool + ProcessesByHostFlag bool +} // Name of the Scraper. Should be unique. func (ScrapeProcesslist) Name() string { @@ -95,11 +83,27 @@ func (ScrapeProcesslist) Version() float64 { return 5.1 } +// RegisterFlags adds flags to configure the Scraper. +func (s *ScrapeProcesslist) RegisterFlags(application *kingpin.Application) { + application.Flag( + "collect.info_schema.processlist.min_time", + "Minimum time a thread must be in each state to be counted", + ).Default("0").IntVar(&s.ProcessListMinTime) + application.Flag( + "collect.info_schema.processlist.processes_by_user", + "Enable collecting the number of processes by user", + ).Default("true").BoolVar(&s.ProcessesByUserFlag) + application.Flag( + "collect.info_schema.processlist.processes_by_host", + "Enable collecting the number of processes by host", + ).Default("true").BoolVar(&s.ProcessesByHostFlag) +} + // Scrape collects data from database connection and sends it over channel as prometheus metric. -func (ScrapeProcesslist) Scrape(ctx context.Context, instance *instance, ch chan<- prometheus.Metric, logger *slog.Logger) error { +func (s ScrapeProcesslist) Scrape(ctx context.Context, instance *instance, ch chan<- prometheus.Metric, logger *slog.Logger) error { processQuery := fmt.Sprintf( infoSchemaProcesslistQuery, - *processlistMinTime, + s.ProcessListMinTime, ) db := instance.getDB() processlistRows, err := db.QueryContext(ctx, processQuery) @@ -162,12 +166,13 @@ func (ScrapeProcesslist) Scrape(ctx context.Context, instance *instance, ch chan } } - if *processesByHostFlag { + if s.ProcessesByHostFlag { for _, host := range sortedMapKeys(stateHostCounts) { ch <- prometheus.MustNewConstMetric(processesByHostDesc, prometheus.GaugeValue, float64(stateHostCounts[host]), host) } } - if *processesByUserFlag { + + if s.ProcessesByUserFlag { for _, user := range sortedMapKeys(stateUserCounts) { ch <- prometheus.MustNewConstMetric(processesByUserDesc, prometheus.GaugeValue, float64(stateUserCounts[user]), user) } diff --git a/collector/info_schema_processlist_test.go b/collector/info_schema_processlist_test.go index 356f1fa7..4d582c56 100644 --- a/collector/info_schema_processlist_test.go +++ b/collector/info_schema_processlist_test.go @@ -27,7 +27,11 @@ import ( ) func TestScrapeProcesslist(t *testing.T) { - _, err := kingpin.CommandLine.Parse([]string{ + scraper := ScrapeProcesslist{} + app := kingpin.New("TestScrapeProcesslist", "") + scraper.RegisterFlags(app) + + _, err := app.Parse([]string{ "--collect.info_schema.processlist.processes_by_user", "--collect.info_schema.processlist.processes_by_host", }) @@ -57,7 +61,7 @@ func TestScrapeProcesslist(t *testing.T) { ch := make(chan prometheus.Metric) go func() { - if err = (ScrapeProcesslist{}).Scrape(context.Background(), inst, ch, promslog.NewNopLogger()); err != nil { + if err = scraper.Scrape(context.Background(), inst, ch, promslog.NewNopLogger()); err != nil { t.Errorf("error calling function on test: %s", err) } close(ch) diff --git a/collector/info_schema_tables.go b/collector/info_schema_tables.go index 9818cc6e..01b09d24 100644 --- a/collector/info_schema_tables.go +++ b/collector/info_schema_tables.go @@ -50,14 +50,6 @@ const ( ` ) -// Tunable flags. -var ( - tableSchemaDatabases = kingpin.Flag( - "collect.info_schema.tables.databases", - "The list of databases to collect table stats for, or '*' for all", - ).Default("*").String() -) - // Metric descriptors. var ( infoSchemaTablesVersionDesc = prometheus.NewDesc( @@ -78,7 +70,9 @@ var ( ) // ScrapeTableSchema collects from `information_schema.tables`. -type ScrapeTableSchema struct{} +type ScrapeTableSchema struct { + Databases string +} // Name of the Scraper. Should be unique. func (ScrapeTableSchema) Name() string { @@ -95,11 +89,19 @@ func (ScrapeTableSchema) Version() float64 { return 5.1 } +// RegisterFlags adds flags to configure the Scraper. +func (s *ScrapeTableSchema) RegisterFlags(application *kingpin.Application) { + application.Flag( + "collect.info_schema.tables.databases", + "The list of databases to collect table stats for, or '*' for all", + ).Default("*").StringVar(&s.Databases) +} + // Scrape collects data from database connection and sends it over channel as prometheus metric. -func (ScrapeTableSchema) Scrape(ctx context.Context, instance *instance, ch chan<- prometheus.Metric, logger *slog.Logger) error { +func (s ScrapeTableSchema) Scrape(ctx context.Context, instance *instance, ch chan<- prometheus.Metric, logger *slog.Logger) error { var dbList []string db := instance.getDB() - if *tableSchemaDatabases == "*" { + if s.Databases == "*" { dbListRows, err := db.QueryContext(ctx, dbListQuery) if err != nil { return err @@ -117,7 +119,7 @@ func (ScrapeTableSchema) Scrape(ctx context.Context, instance *instance, ch chan dbList = append(dbList, database) } } else { - dbList = strings.Split(*tableSchemaDatabases, ",") + dbList = strings.Split(s.Databases, ",") } for _, database := range dbList { diff --git a/collector/mysql_user.go b/collector/mysql_user.go index 6648c958..a00b6f59 100644 --- a/collector/mysql_user.go +++ b/collector/mysql_user.go @@ -69,14 +69,6 @@ const mysqlUserQuery = ` FROM mysql.user ` -// Tunable flags. -var ( - userPrivilegesFlag = kingpin.Flag( - "collect.mysql.user.privileges", - "Enable collecting user privileges from mysql.user", - ).Default("false").Bool() -) - var ( labelNames = []string{"mysql_user", "hostmask"} ) @@ -102,7 +94,9 @@ var ( ) // ScrapeUser collects from `information_schema.processlist`. -type ScrapeUser struct{} +type ScrapeUser struct { + Privileges bool +} // Name of the Scraper. Should be unique. func (ScrapeUser) Name() string { @@ -119,8 +113,16 @@ func (ScrapeUser) Version() float64 { return 5.1 } +// RegisterFlags adds flags to configure the Scraper. +func (s *ScrapeUser) RegisterFlags(application *kingpin.Application) { + application.Flag( + "collect.mysql.user.privileges", + "Enable collecting user privileges from mysql.user", + ).Default("false").BoolVar(&s.Privileges) +} + // Scrape collects data from database connection and sends it over channel as prometheus metric. -func (ScrapeUser) Scrape(ctx context.Context, instance *instance, ch chan<- prometheus.Metric, logger *slog.Logger) error { +func (s ScrapeUser) Scrape(ctx context.Context, instance *instance, ch chan<- prometheus.Metric, logger *slog.Logger) error { db := instance.getDB() var ( userRows *sql.Rows @@ -214,7 +216,7 @@ func (ScrapeUser) Scrape(ctx context.Context, instance *instance, ch chan<- prom return err } - if *userPrivilegesFlag { + if s.Privileges { userCols, err := userRows.Columns() if err != nil { return err diff --git a/collector/perf_schema_events_statements.go b/collector/perf_schema_events_statements.go index 0dc74210..5563a94c 100644 --- a/collector/perf_schema_events_statements.go +++ b/collector/perf_schema_events_statements.go @@ -72,22 +72,6 @@ const perfEventsStatementsQuery = ` LIMIT %d ` -// Tunable flags. -var ( - perfEventsStatementsLimit = kingpin.Flag( - "collect.perf_schema.eventsstatements.limit", - "Limit the number of events statements digests by response time", - ).Default("250").Int() - perfEventsStatementsTimeLimit = kingpin.Flag( - "collect.perf_schema.eventsstatements.timelimit", - "Limit how old the 'last_seen' events statements can be, in seconds", - ).Default("86400").Int() - perfEventsStatementsDigestTextLimit = kingpin.Flag( - "collect.perf_schema.eventsstatements.digest_text_limit", - "Maximum length of the normalized statement text", - ).Default("120").Int() -) - // Metric descriptors. var ( performanceSchemaEventsStatementsDesc = prometheus.NewDesc( @@ -163,7 +147,11 @@ var ( ) // ScrapePerfEventsStatements collects from `performance_schema.events_statements_summary_by_digest`. -type ScrapePerfEventsStatements struct{} +type ScrapePerfEventsStatements struct { + Limit int + TimeLimit int + DigestTextLimit int +} // Name of the Scraper. Should be unique. func (ScrapePerfEventsStatements) Name() string { @@ -180,13 +168,29 @@ func (ScrapePerfEventsStatements) Version() float64 { return 5.6 } +// RegisterFlags adds flags to configure the Scraper. +func (c *ScrapePerfEventsStatements) RegisterFlags(application *kingpin.Application) { + application.Flag( + "collect.perf_schema.eventsstatements.limit", + "Limit the number of events statements digests by response time", + ).Default("250").IntVar(&c.Limit) + application.Flag( + "collect.perf_schema.eventsstatements.timelimit", + "Limit how old the 'last_seen' events statements can be, in seconds", + ).Default("86400").IntVar(&c.TimeLimit) + application.Flag( + "collect.perf_schema.eventsstatements.digest_text_limit", + "Maximum length of the normalized statement text", + ).Default("120").IntVar(&c.DigestTextLimit) +} + // Scrape collects data from database connection and sends it over channel as prometheus metric. -func (ScrapePerfEventsStatements) Scrape(ctx context.Context, instance *instance, ch chan<- prometheus.Metric, logger *slog.Logger) error { +func (c ScrapePerfEventsStatements) Scrape(ctx context.Context, instance *instance, ch chan<- prometheus.Metric, logger *slog.Logger) error { perfQuery := fmt.Sprintf( perfEventsStatementsQuery, - *perfEventsStatementsDigestTextLimit, - *perfEventsStatementsTimeLimit, - *perfEventsStatementsLimit, + c.DigestTextLimit, + c.TimeLimit, + c.Limit, ) db := instance.getDB() // Timers here are returned in picoseconds. diff --git a/collector/perf_schema_file_instances.go b/collector/perf_schema_file_instances.go index f4c44b98..64f28939 100644 --- a/collector/perf_schema_file_instances.go +++ b/collector/perf_schema_file_instances.go @@ -33,19 +33,6 @@ const perfFileInstancesQuery = ` where FILE_NAME REGEXP ? ` -// Tunable flags. -var ( - performanceSchemaFileInstancesFilter = kingpin.Flag( - "collect.perf_schema.file_instances.filter", - "RegEx file_name filter for performance_schema.file_summary_by_instance", - ).Default(".*").String() - - performanceSchemaFileInstancesRemovePrefix = kingpin.Flag( - "collect.perf_schema.file_instances.remove_prefix", - "Remove path prefix in performance_schema.file_summary_by_instance", - ).Default("/var/lib/mysql/").String() -) - // Metric descriptors. var ( performanceSchemaFileInstancesBytesDesc = prometheus.NewDesc( @@ -61,7 +48,10 @@ var ( ) // ScrapePerfFileInstances collects from `performance_schema.file_summary_by_instance`. -type ScrapePerfFileInstances struct{} +type ScrapePerfFileInstances struct { + Filter string + RemovePrefix string +} // Name of the Scraper. Should be unique. func (ScrapePerfFileInstances) Name() string { @@ -78,11 +68,23 @@ func (ScrapePerfFileInstances) Version() float64 { return 5.5 } +// RegisterFlags adds flags to configure the Scraper. +func (s *ScrapePerfFileInstances) RegisterFlags(application *kingpin.Application) { + application.Flag( + "collect.perf_schema.file_instances.filter", + "RegEx file_name filter for performance_schema.file_summary_by_instance", + ).Default(".*").StringVar(&s.Filter) + application.Flag( + "collect.perf_schema.file_instances.remove_prefix", + "Remove path prefix in performance_schema.file_summary_by_instance", + ).Default("/var/lib/mysql/").StringVar(&s.RemovePrefix) +} + // Scrape collects data from database connection and sends it over channel as prometheus metric. -func (ScrapePerfFileInstances) Scrape(ctx context.Context, instance *instance, ch chan<- prometheus.Metric, logger *slog.Logger) error { +func (s ScrapePerfFileInstances) Scrape(ctx context.Context, instance *instance, ch chan<- prometheus.Metric, logger *slog.Logger) error { db := instance.getDB() // Timers here are returned in picoseconds. - perfSchemaFileInstancesRows, err := db.QueryContext(ctx, perfFileInstancesQuery, *performanceSchemaFileInstancesFilter) + perfSchemaFileInstancesRows, err := db.QueryContext(ctx, perfFileInstancesQuery, s.Filter) if err != nil { return err } @@ -103,7 +105,7 @@ func (ScrapePerfFileInstances) Scrape(ctx context.Context, instance *instance, c return err } - fileName = strings.TrimPrefix(fileName, *performanceSchemaFileInstancesRemovePrefix) + fileName = strings.TrimPrefix(fileName, s.RemovePrefix) ch <- prometheus.MustNewConstMetric( performanceSchemaFileInstancesCountDesc, prometheus.CounterValue, float64(countRead), fileName, eventName, "read", diff --git a/collector/perf_schema_file_instances_test.go b/collector/perf_schema_file_instances_test.go index ac0e6e89..ec337da8 100644 --- a/collector/perf_schema_file_instances_test.go +++ b/collector/perf_schema_file_instances_test.go @@ -27,7 +27,11 @@ import ( ) func TestScrapePerfFileInstances(t *testing.T) { - _, err := kingpin.CommandLine.Parse([]string{"--collect.perf_schema.file_instances.filter", ""}) + scraper := ScrapePerfFileInstances{} + + app := kingpin.New("TestScrapePerfFileInstances", "") + scraper.RegisterFlags(app) + _, err := app.Parse([]string{"--collect.perf_schema.file_instances.filter", ""}) if err != nil { t.Fatal(err) } @@ -49,7 +53,7 @@ func TestScrapePerfFileInstances(t *testing.T) { ch := make(chan prometheus.Metric) go func() { - if err = (ScrapePerfFileInstances{}).Scrape(context.Background(), inst, ch, promslog.NewNopLogger()); err != nil { + if err = scraper.Scrape(context.Background(), inst, ch, promslog.NewNopLogger()); err != nil { panic(fmt.Sprintf("error calling function on test: %s", err)) } close(ch) diff --git a/collector/perf_schema_memory_events.go b/collector/perf_schema_memory_events.go index a4b1856e..6fc44482 100644 --- a/collector/perf_schema_memory_events.go +++ b/collector/perf_schema_memory_events.go @@ -32,14 +32,6 @@ const perfMemoryEventsQuery = ` where COUNT_ALLOC > 0; ` -// Tunable flags. -var ( - performanceSchemaMemoryEventsRemovePrefix = kingpin.Flag( - "collect.perf_schema.memory_events.remove_prefix", - "Remove instrument prefix in performance_schema.memory_summary_global_by_event_name", - ).Default("memory/").String() -) - // Metric descriptors. var ( performanceSchemaMemoryBytesAllocDesc = prometheus.NewDesc( @@ -60,7 +52,9 @@ var ( ) // ScrapePerfMemoryEvents collects from `performance_schema.memory_summary_global_by_event_name`. -type ScrapePerfMemoryEvents struct{} +type ScrapePerfMemoryEvents struct { + RemovePrefix string +} // Name of the Scraper. Should be unique. func (ScrapePerfMemoryEvents) Name() string { @@ -77,8 +71,16 @@ func (ScrapePerfMemoryEvents) Version() float64 { return 5.7 } +// RegisterFlags adds flags to configure the Scraper. +func (s *ScrapePerfMemoryEvents) RegisterFlags(application *kingpin.Application) { + application.Flag( + "collect.perf_schema.memory_events.remove_prefix", + "Remove instrument prefix in performance_schema.memory_summary_global_by_event_name", + ).Default("memory/").StringVar(&s.RemovePrefix) +} + // Scrape collects data from database connection and sends it over channel as prometheus metric. -func (ScrapePerfMemoryEvents) Scrape(ctx context.Context, instance *instance, ch chan<- prometheus.Metric, logger *slog.Logger) error { +func (s ScrapePerfMemoryEvents) Scrape(ctx context.Context, instance *instance, ch chan<- prometheus.Metric, logger *slog.Logger) error { db := instance.getDB() perfSchemaMemoryEventsRows, err := db.QueryContext(ctx, perfMemoryEventsQuery) if err != nil { @@ -100,7 +102,7 @@ func (ScrapePerfMemoryEvents) Scrape(ctx context.Context, instance *instance, ch return err } - eventName := strings.TrimPrefix(eventName, *performanceSchemaMemoryEventsRemovePrefix) + eventName := strings.TrimPrefix(eventName, s.RemovePrefix) ch <- prometheus.MustNewConstMetric( performanceSchemaMemoryBytesAllocDesc, prometheus.CounterValue, float64(bytesAlloc), eventName, ) diff --git a/collector/perf_schema_memory_events_test.go b/collector/perf_schema_memory_events_test.go index af5a43da..384153b3 100644 --- a/collector/perf_schema_memory_events_test.go +++ b/collector/perf_schema_memory_events_test.go @@ -27,7 +27,11 @@ import ( ) func TestScrapePerfMemoryEvents(t *testing.T) { - _, err := kingpin.CommandLine.Parse([]string{}) + scraper := ScrapePerfMemoryEvents{} + + app := kingpin.New("TestScrapePerfMemoryEvents", "") + scraper.RegisterFlags(app) + _, err := app.Parse([]string{}) if err != nil { t.Fatal(err) } @@ -55,7 +59,7 @@ func TestScrapePerfMemoryEvents(t *testing.T) { ch := make(chan prometheus.Metric) go func() { - if err = (ScrapePerfMemoryEvents{}).Scrape(context.Background(), inst, ch, promslog.NewNopLogger()); err != nil { + if err = scraper.Scrape(context.Background(), inst, ch, promslog.NewNopLogger()); err != nil { panic(fmt.Sprintf("error calling function on test: %s", err)) } close(ch) diff --git a/collector/scraper.go b/collector/scraper.go index 81486de7..49cddac6 100644 --- a/collector/scraper.go +++ b/collector/scraper.go @@ -17,6 +17,7 @@ import ( "context" "log/slog" + "github.com/alecthomas/kingpin/v2" _ "github.com/go-sql-driver/mysql" "github.com/prometheus/client_golang/prometheus" ) @@ -36,3 +37,11 @@ type Scraper interface { // Scrape collects data from database connection and sends it over channel as prometheus metric. Scrape(ctx context.Context, instance *instance, ch chan<- prometheus.Metric, logger *slog.Logger) error } + +// ConfigurableScraper extends the Scraper interface by allowing it to be configured with flags. +type ConfigurableScraper interface { + Scraper + + // Register flags to configure a scraper against a given kingpin Application. + RegisterFlags(application *kingpin.Application) +} diff --git a/mysqld_exporter.go b/mysqld_exporter.go index d50bb60e..7f84bf08 100644 --- a/mysqld_exporter.go +++ b/mysqld_exporter.go @@ -69,41 +69,41 @@ var ( // scrapers lists all possible collection methods and if they should be enabled by default. var scrapers = map[collector.Scraper]bool{ - collector.ScrapeGlobalStatus{}: true, - collector.ScrapeGlobalVariables{}: true, - collector.ScrapeSlaveStatus{}: true, - collector.ScrapeProcesslist{}: false, - collector.ScrapeUser{}: false, - collector.ScrapeTableSchema{}: false, - collector.ScrapeInfoSchemaInnodbTablespaces{}: false, - collector.ScrapeInnodbMetrics{}: false, - collector.ScrapeAutoIncrementColumns{}: false, - collector.ScrapeBinlogSize{}: false, - collector.ScrapePerfTableIOWaits{}: false, - collector.ScrapePerfIndexIOWaits{}: false, - collector.ScrapePerfTableLockWaits{}: false, - collector.ScrapePerfEventsStatements{}: false, - collector.ScrapePerfEventsStatementsSum{}: false, - collector.ScrapePerfEventsWaits{}: false, - collector.ScrapePerfFileEvents{}: false, - collector.ScrapePerfFileInstances{}: false, - collector.ScrapePerfMemoryEvents{}: false, - collector.ScrapePerfReplicationGroupMembers{}: false, - collector.ScrapePerfReplicationGroupMemberStats{}: false, - collector.ScrapePerfReplicationApplierStatsByWorker{}: false, - collector.ScrapeSysUserSummary{}: false, - collector.ScrapeUserStat{}: false, - collector.ScrapeClientStat{}: false, - collector.ScrapeTableStat{}: false, - collector.ScrapeSchemaStat{}: false, - collector.ScrapeInnodbCmp{}: true, - collector.ScrapeInnodbCmpMem{}: true, - collector.ScrapeQueryResponseTime{}: true, - collector.ScrapeEngineTokudbStatus{}: false, - collector.ScrapeEngineInnodbStatus{}: false, - collector.ScrapeHeartbeat{}: false, - collector.ScrapeSlaveHosts{}: false, - collector.ScrapeReplicaHost{}: false, + &collector.ScrapeGlobalStatus{}: true, + &collector.ScrapeGlobalVariables{}: true, + &collector.ScrapeSlaveStatus{}: true, + &collector.ScrapeProcesslist{}: false, + &collector.ScrapeUser{}: false, + &collector.ScrapeTableSchema{}: false, + &collector.ScrapeInfoSchemaInnodbTablespaces{}: false, + &collector.ScrapeInnodbMetrics{}: false, + &collector.ScrapeAutoIncrementColumns{}: false, + &collector.ScrapeBinlogSize{}: false, + &collector.ScrapePerfTableIOWaits{}: false, + &collector.ScrapePerfIndexIOWaits{}: false, + &collector.ScrapePerfTableLockWaits{}: false, + &collector.ScrapePerfEventsStatements{}: false, + &collector.ScrapePerfEventsStatementsSum{}: false, + &collector.ScrapePerfEventsWaits{}: false, + &collector.ScrapePerfFileEvents{}: false, + &collector.ScrapePerfFileInstances{}: false, + &collector.ScrapePerfMemoryEvents{}: false, + &collector.ScrapePerfReplicationGroupMembers{}: false, + &collector.ScrapePerfReplicationGroupMemberStats{}: false, + &collector.ScrapePerfReplicationApplierStatsByWorker{}: false, + &collector.ScrapeSysUserSummary{}: false, + &collector.ScrapeUserStat{}: false, + &collector.ScrapeClientStat{}: false, + &collector.ScrapeTableStat{}: false, + &collector.ScrapeSchemaStat{}: false, + &collector.ScrapeInnodbCmp{}: true, + &collector.ScrapeInnodbCmpMem{}: true, + &collector.ScrapeQueryResponseTime{}: true, + &collector.ScrapeEngineTokudbStatus{}: false, + &collector.ScrapeEngineInnodbStatus{}: false, + &collector.ScrapeHeartbeat{}: false, + &collector.ScrapeSlaveHosts{}: false, + &collector.ScrapeReplicaHost{}: false, } func filterScrapers(scrapers []collector.Scraper, collectParams []string) []collector.Scraper { @@ -158,7 +158,7 @@ func init() { prometheus.MustRegister(versioncollector.NewCollector("mysqld_exporter")) } -func newHandler(scrapers []collector.Scraper, logger *slog.Logger) http.HandlerFunc { +func newHandler(scrapers []collector.Scraper, logger *slog.Logger, config collector.Config) http.HandlerFunc { return func(w http.ResponseWriter, r *http.Request) { var dsn string var err error @@ -198,8 +198,7 @@ func newHandler(scrapers []collector.Scraper, logger *slog.Logger) http.HandlerF filteredScrapers := filterScrapers(scrapers, collect) registry := prometheus.NewRegistry() - - registry.MustRegister(collector.New(ctx, dsn, filteredScrapers, logger)) + registry.MustRegister(collector.New(ctx, dsn, filteredScrapers, logger, config)) gatherers := prometheus.Gatherers{ prometheus.DefaultGatherer, @@ -225,10 +224,16 @@ func main() { scraper.Help(), ).Default(defaultOn).Bool() + if cfgScraper, ok := scraper.(collector.ConfigurableScraper); ok { + cfgScraper.RegisterFlags(kingpin.CommandLine) + } + scraperFlags[scraper] = f } // Parse flags. + var collectorConfig collector.Config + collectorConfig.RegisterFlags(kingpin.CommandLine) promslogConfig := &promslog.Config{} flag.AddFlags(kingpin.CommandLine, promslogConfig) kingpin.Version(version.Print("mysqld_exporter")) @@ -253,7 +258,7 @@ func main() { enabledScrapers = append(enabledScrapers, scraper) } } - handlerFunc := newHandler(enabledScrapers, logger) + handlerFunc := newHandler(enabledScrapers, logger, collectorConfig) http.Handle(*metricsPath, promhttp.InstrumentMetricHandler(prometheus.DefaultRegisterer, handlerFunc)) if *metricsPath != "/" && *metricsPath != "" { landingConfig := web.LandingConfig{ @@ -274,7 +279,7 @@ func main() { } http.Handle("/", landingPage) } - http.HandleFunc("/probe", handleProbe(enabledScrapers, logger)) + http.HandleFunc("/probe", handleProbe(enabledScrapers, logger, collectorConfig)) http.HandleFunc("/-/reload", func(w http.ResponseWriter, r *http.Request) { if err = c.ReloadConfig(*configMycnf, *mysqldAddress, *mysqldUser, *tlsInsecureSkipVerify, logger); err != nil { logger.Warn("Error reloading host config", "file", *configMycnf, "error", err) diff --git a/probe.go b/probe.go index a6027fb6..3b8d3c2d 100644 --- a/probe.go +++ b/probe.go @@ -25,7 +25,7 @@ import ( "github.com/prometheus/mysqld_exporter/collector" ) -func handleProbe(scrapers []collector.Scraper, logger *slog.Logger) http.HandlerFunc { +func handleProbe(scrapers []collector.Scraper, logger *slog.Logger, config collector.Config) http.HandlerFunc { return func(w http.ResponseWriter, r *http.Request) { ctx := r.Context() params := r.URL.Query() @@ -72,7 +72,7 @@ func handleProbe(scrapers []collector.Scraper, logger *slog.Logger) http.Handler filteredScrapers := filterScrapers(scrapers, collectParams) registry := prometheus.NewRegistry() - registry.MustRegister(collector.New(ctx, dsn, filteredScrapers, logger)) + registry.MustRegister(collector.New(ctx, dsn, filteredScrapers, logger, config)) h := promhttp.HandlerFor(registry, promhttp.HandlerOpts{}) h.ServeHTTP(w, r)