From 85da9de6a0c629e7f7a71c845202cb19249c2878 Mon Sep 17 00:00:00 2001 From: Dmitry Date: Wed, 23 Mar 2022 11:59:30 -0700 Subject: [PATCH] [Metrics builder] Move resource creation to the generated code This change updates metrics builder to fully control creation of the metrics object. Scraper author don't need to create pdata.Metrics object and pass it to MetricsBuiler.Emit function. This change also moves definition of resource attributes to metadata.yaml which is used to generate attribute helper that can be used in the scraper. 313873 --- CHANGELOG.md | 5 + cmd/mdatagen/documentation.tmpl | 13 +- cmd/mdatagen/loader.go | 55 ++++++++ cmd/mdatagen/metric-metadata.yaml | 8 ++ cmd/mdatagen/metrics_v2.tmpl | 71 ++++++++--- receiver/apachereceiver/documentation.md | 2 +- .../internal/metadata/generated_metrics_v2.go | 72 +++++++---- receiver/apachereceiver/scraper.go | 4 +- receiver/couchbasereceiver/documentation.md | 2 +- .../internal/metadata/generated_metrics_v2.go | 62 ++++++--- receiver/couchdbreceiver/documentation.md | 9 +- .../internal/metadata/generated_metrics_v2.go | 86 +++++++++---- receiver/couchdbreceiver/metadata.yaml | 5 +- receiver/couchdbreceiver/scraper.go | 20 +-- .../elasticsearchreceiver/documentation.md | 2 +- .../internal/metadata/generated_metrics_v2.go | 118 +++++++++++------- .../scraper/cpuscraper/cpu_scraper.go | 10 +- .../scraper/cpuscraper/cpu_scraper_test.go | 6 +- .../scraper/cpuscraper/documentation.md | 2 +- .../internal/metadata/generated_metrics_v2.go | 64 +++++++--- .../diskscraper/disk_scraper_others.go | 9 +- .../scraper/diskscraper/disk_scraper_test.go | 4 + .../diskscraper/disk_scraper_windows.go | 13 +- .../scraper/diskscraper/documentation.md | 2 +- .../internal/metadata/generated_metrics_v2.go | 74 +++++++---- .../filesystemscraper/documentation.md | 2 +- .../filesystemscraper/filesystem_scraper.go | 9 +- .../internal/metadata/generated_metrics_v2.go | 66 +++++++--- .../scraper/loadscraper/documentation.md | 2 +- .../internal/metadata/generated_metrics_v2.go | 66 +++++++--- .../scraper/loadscraper/load_scraper.go | 11 +- .../scraper/memoryscraper/documentation.md | 2 +- .../internal/metadata/generated_metrics_v2.go | 64 +++++++--- .../scraper/memoryscraper/memory_scraper.go | 13 +- .../scraper/networkscraper/documentation.md | 2 +- .../internal/metadata/generated_metrics_v2.go | 70 ++++++++--- .../scraper/networkscraper/network_scraper.go | 19 +-- .../scraper/pagingscraper/documentation.md | 2 +- .../internal/metadata/generated_metrics_v2.go | 68 +++++++--- .../pagingscraper/paging_scraper_others.go | 6 +- .../pagingscraper/paging_scraper_windows.go | 6 +- .../scraper/processesscraper/documentation.md | 2 +- .../scraper/processscraper/documentation.md | 13 +- .../internal/metadata/generated_metrics_v2.go | 110 +++++++++++++--- .../scraper/processscraper/metadata.yaml | 33 +++++ .../scraper/processscraper/process.go | 26 ++-- .../scraper/processscraper/process_scraper.go | 17 +-- .../processscraper/process_scraper_test.go | 19 ++- .../kafkametricsreceiver/documentation.md | 2 +- .../kubeletstatsreceiver/documentation.md | 2 +- receiver/memcachedreceiver/documentation.md | 2 +- .../mongodbatlasreceiver/documentation.md | 2 +- receiver/mongodbreceiver/documentation.md | 2 +- .../internal/metadata/generated_metrics_v2.go | 84 +++++++++---- receiver/mysqlreceiver/documentation.md | 2 +- .../internal/metadata/generated_metrics_v2.go | 94 +++++++++----- receiver/mysqlreceiver/scraper.go | 5 +- receiver/nginxreceiver/documentation.md | 2 +- .../internal/metadata/generated_metrics_v2.go | 68 +++++++--- receiver/nginxreceiver/scraper.go | 4 +- receiver/postgresqlreceiver/documentation.md | 2 +- .../internal/metadata/generated_metrics_v2.go | 74 +++++++---- receiver/postgresqlreceiver/scraper.go | 5 +- receiver/rabbitmqreceiver/documentation.md | 13 +- .../internal/metadata/generated_metrics_v2.go | 102 ++++++++++----- receiver/rabbitmqreceiver/metadata.yaml | 7 +- receiver/rabbitmqreceiver/scraper.go | 30 ++--- receiver/redisreceiver/documentation.md | 2 +- .../internal/metadata/generated_metrics_v2.go | 118 +++++++++++------- receiver/redisreceiver/redis_scraper.go | 6 +- receiver/zookeeperreceiver/documentation.md | 11 +- .../internal/metadata/generated_metrics_v2.go | 114 +++++++++++------ receiver/zookeeperreceiver/metadata.yaml | 6 +- receiver/zookeeperreceiver/scraper.go | 10 +- 74 files changed, 1426 insertions(+), 689 deletions(-) diff --git a/CHANGELOG.md b/CHANGELOG.md index 29f3d5c4532a..65958796eb0c 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -17,6 +17,11 @@ - `resourcedetectionprocessor`: Add attribute allowlist (#8547) +### 💡 Enhancements 💡 + +- `cmd/mdatagen`: Add resource attributes definition to metadata.yaml and move `pdata.Metrics` creation to the + generated code (#5270) + ## v0.47.0 ### 💡 Enhancements 💡 diff --git a/cmd/mdatagen/documentation.tmpl b/cmd/mdatagen/documentation.tmpl index 81d2bf6fa8c0..370a0ec184b5 100644 --- a/cmd/mdatagen/documentation.tmpl +++ b/cmd/mdatagen/documentation.tmpl @@ -25,7 +25,18 @@ metrics: ``` {{- end }} -## Attributes +{{- if .ResourceAttributes }} + +## Resource attributes + +| Name | Description | Type | +| ---- | ----------- | ---- | +{{- range $attributeName, $attributeInfo := .ResourceAttributes }} +| {{ $attributeName }} | {{ $attributeInfo.Description }} | {{ $attributeInfo.Type }} | +{{- end }} +{{- end }} + +## Metric attributes | Name | Description | | ---- | ----------- | diff --git a/cmd/mdatagen/loader.go b/cmd/mdatagen/loader.go index 5eccc9b83b63..91539d727888 100644 --- a/cmd/mdatagen/loader.go +++ b/cmd/mdatagen/loader.go @@ -26,6 +26,7 @@ import ( "github.com/go-playground/validator/v10/non-standard/validators" en_translations "github.com/go-playground/validator/v10/translations/en" "go.opentelemetry.io/collector/config/mapprovider/filemapprovider" + "go.opentelemetry.io/collector/model/pdata" ) type metricName string @@ -48,6 +49,56 @@ func (mn attributeName) RenderUnexported() (string, error) { return formatIdentifier(string(mn), false) } +// ValueType defines an attribute value type. +type ValueType struct { + // ValueType is type of the metric number, options are "double", "int". + ValueType pdata.ValueType +} + +// UnmarshalText implements the encoding.TextUnmarshaler interface. +func (mvt *ValueType) UnmarshalText(text []byte) error { + switch vtStr := string(text); vtStr { + case "": + mvt.ValueType = pdata.ValueTypeEmpty + case "string": + mvt.ValueType = pdata.ValueTypeString + case "int": + mvt.ValueType = pdata.ValueTypeInt + case "double": + mvt.ValueType = pdata.ValueTypeDouble + case "bool": + mvt.ValueType = pdata.ValueTypeDouble + case "bytes": + mvt.ValueType = pdata.ValueTypeDouble + default: + return fmt.Errorf("invalid type: %q", vtStr) + } + return nil +} + +// String returns capitalized name of the ValueType. +func (mvt ValueType) String() string { + return strings.Title(strings.ToLower(mvt.ValueType.String())) +} + +// Primitive returns name of primitive type for the ValueType. +func (mvt ValueType) Primitive() string { + switch mvt.ValueType { + case pdata.ValueTypeString: + return "string" + case pdata.ValueTypeInt: + return "int64" + case pdata.ValueTypeDouble: + return "float64" + case pdata.ValueTypeBool: + return "bool" + case pdata.ValueTypeBytes: + return "[]byte" + default: + return "" + } +} + type metric struct { // Enabled defines whether the metric is enabled by default. Enabled *bool `yaml:"enabled" validate:"required"` @@ -99,11 +150,15 @@ type attribute struct { Value string // Enum can optionally describe the set of values to which the attribute can belong. Enum []string + // Type is an attribute type. + Type ValueType `mapstructure:"type"` } type metadata struct { // Name of the component. Name string `validate:"notblank"` + // ResourceAttributes that can be emitted by the component. + ResourceAttributes map[attributeName]attribute `mapstructure:"resource_attributes" validate:"dive"` // Attributes emitted by one or more metrics. Attributes map[attributeName]attribute `validate:"dive"` // Metrics that can be emitted by the component. diff --git a/cmd/mdatagen/metric-metadata.yaml b/cmd/mdatagen/metric-metadata.yaml index ac3c467d0885..5e2bb7c05bf5 100644 --- a/cmd/mdatagen/metric-metadata.yaml +++ b/cmd/mdatagen/metric-metadata.yaml @@ -1,6 +1,14 @@ # Required: name of the receiver. name: +# Optional: map of resource attribute definitions with the key being the attribute name. +resource_attributes: + : + # Required: description of the attribute. + description: + # Required: attribute type. + type: + # Optional: map of attribute definitions with the key being the attribute name and value # being described below. attributes: diff --git a/cmd/mdatagen/metrics_v2.tmpl b/cmd/mdatagen/metrics_v2.tmpl index be3916d53be8..0c35c2416f63 100644 --- a/cmd/mdatagen/metrics_v2.tmpl +++ b/cmd/mdatagen/metrics_v2.tmpl @@ -101,7 +101,10 @@ func newMetric{{ $name.Render }}(settings MetricSettings) metric{{ $name.Render // MetricsBuilder provides an interface for scrapers to report metrics while taking care of all the transformations // required to produce metric representation defined in metadata and user settings. type MetricsBuilder struct { - startTime pdata.Timestamp + startTime pdata.Timestamp // start time that will be applied to all recorded data points. + metricsCapacity int // maximum observed number of metrics per resource. + resourceCapacity int // maximum observed number of resource attributes. + metricsBuffer pdata.Metrics // accumulates metrics data before emitting. {{- range $name, $metric := .Metrics }} metric{{ $name.Render }} metric{{ $name.Render }} {{- end }} @@ -120,6 +123,7 @@ func WithStartTime(startTime pdata.Timestamp) metricBuilderOption { func NewMetricsBuilder(settings MetricsSettings, options ...metricBuilderOption) *MetricsBuilder { mb := &MetricsBuilder{ startTime: pdata.NewTimestampFromTime(time.Now()), + metricsBuffer: pdata.NewMetrics(), {{- range $name, $metric := .Metrics }} metric{{ $name.Render }}: newMetric{{ $name.Render }}(settings.{{ $name.Render }}), {{- end }} @@ -130,13 +134,58 @@ func NewMetricsBuilder(settings MetricsSettings, options ...metricBuilderOption) return mb } -// Emit appends generated metrics to a pdata.MetricsSlice and updates the internal state to be ready for recording -// another set of data points. This function will be doing all transformations required to produce metric representation -// defined in metadata and user settings, e.g. delta/cumulative translation. -func (mb *MetricsBuilder) Emit(metrics pdata.MetricSlice) { +// updateCapacity updates max length of metrics and resource attributes that will be used for the slice capacity. +func (mb *MetricsBuilder) updateCapacity(rm pdata.ResourceMetrics) { + if mb.metricsCapacity < rm.InstrumentationLibraryMetrics().At(0).Metrics().Len() { + mb.metricsCapacity = rm.InstrumentationLibraryMetrics().At(0).Metrics().Len() + } + if mb.resourceCapacity < rm.Resource().Attributes().Len() { + mb.resourceCapacity = rm.Resource().Attributes().Len() + } +} + +// ResourceOption applies changes to provided resource. +type ResourceOption func(pdata.Resource) + +{{- range $name, $attr := .ResourceAttributes }} +// With{{ $name.Render }} sets provided value as "{{ $name }}" attribute for current resource. +func With{{ $name.Render }}(val {{ $attr.Type.Primitive }}) ResourceOption { + return func(r pdata.Resource) { + r.Attributes().Upsert{{ $attr.Type }}("{{ $name }}", val) + } +} +{{ end }} + +// EmitForResource saves all the generated metrics under a new resource and updates the internal state to be ready for +// recording another set of data points as part of another resource. This function can be helpful when one scraper +// needs to emit metrics from several resources. Otherwise calling this function is not required, +// just `Emit` function can be called instead. Resource attributes should be provided as ResourceOption arguments. +func (mb *MetricsBuilder) EmitForResource(ro ...ResourceOption) { + rm := pdata.NewResourceMetrics() + rm.Resource().Attributes().EnsureCapacity(mb.resourceCapacity) + for _, op := range ro { + op(rm.Resource()) + } + ils := rm.InstrumentationLibraryMetrics().AppendEmpty() + ils.InstrumentationLibrary().SetName("otelcol/{{ .Name }}") + ils.Metrics().EnsureCapacity(mb.metricsCapacity) {{- range $name, $metric := .Metrics }} - mb.metric{{- $name.Render }}.emit(metrics) + mb.metric{{- $name.Render }}.emit(ils.Metrics()) {{- end }} + if ils.Metrics().Len() > 0 { + mb.updateCapacity(rm) + rm.MoveTo(mb.metricsBuffer.ResourceMetrics().AppendEmpty()) + } +} + +// Emit returns all the metrics accumulated by the metrics builder and updates the internal state to be ready for +// recording another set of metrics. This function will be responsible for applying all the transformations required to +// produce metric representation defined in metadata and user settings, e.g. delta or cumulative. +func (mb *MetricsBuilder) Emit(ro ...ResourceOption) pdata.Metrics { + mb.EmitForResource(ro...) + metrics := pdata.NewMetrics() + mb.metricsBuffer.MoveTo(metrics) + return metrics } {{ range $name, $metric := .Metrics -}} @@ -159,16 +208,6 @@ func (mb *MetricsBuilder) Reset(options ...metricBuilderOption) { } } -// NewMetricData creates new pdata.Metrics and sets the InstrumentationLibrary -// name on the ResourceMetrics. -func (mb *MetricsBuilder) NewMetricData() pdata.Metrics { - md := pdata.NewMetrics() - rm := md.ResourceMetrics().AppendEmpty() - ilm := rm.InstrumentationLibraryMetrics().AppendEmpty() - ilm.InstrumentationLibrary().SetName("otelcol/{{ .Name }}") - return md -} - // Attributes contains the possible metric attributes that can be used. var Attributes = struct { {{- range $name, $info := .Attributes }} diff --git a/receiver/apachereceiver/documentation.md b/receiver/apachereceiver/documentation.md index 025bd8a47337..392e54d0a24f 100644 --- a/receiver/apachereceiver/documentation.md +++ b/receiver/apachereceiver/documentation.md @@ -24,7 +24,7 @@ metrics: enabled: ``` -## Attributes +## Metric attributes | Name | Description | | ---- | ----------- | diff --git a/receiver/apachereceiver/internal/metadata/generated_metrics_v2.go b/receiver/apachereceiver/internal/metadata/generated_metrics_v2.go index 6d0d2a8acb9d..1a98c9e05cb8 100644 --- a/receiver/apachereceiver/internal/metadata/generated_metrics_v2.go +++ b/receiver/apachereceiver/internal/metadata/generated_metrics_v2.go @@ -369,7 +369,10 @@ func newMetricApacheWorkers(settings MetricSettings) metricApacheWorkers { // MetricsBuilder provides an interface for scrapers to report metrics while taking care of all the transformations // required to produce metric representation defined in metadata and user settings. type MetricsBuilder struct { - startTime pdata.Timestamp + startTime pdata.Timestamp // start time that will be applied to all recorded data points. + metricsCapacity int // maximum observed number of metrics per resource. + resourceCapacity int // maximum observed number of resource attributes. + metricsBuffer pdata.Metrics // accumulates metrics data before emitting. metricApacheCurrentConnections metricApacheCurrentConnections metricApacheRequests metricApacheRequests metricApacheScoreboard metricApacheScoreboard @@ -391,6 +394,7 @@ func WithStartTime(startTime pdata.Timestamp) metricBuilderOption { func NewMetricsBuilder(settings MetricsSettings, options ...metricBuilderOption) *MetricsBuilder { mb := &MetricsBuilder{ startTime: pdata.NewTimestampFromTime(time.Now()), + metricsBuffer: pdata.NewMetrics(), metricApacheCurrentConnections: newMetricApacheCurrentConnections(settings.ApacheCurrentConnections), metricApacheRequests: newMetricApacheRequests(settings.ApacheRequests), metricApacheScoreboard: newMetricApacheScoreboard(settings.ApacheScoreboard), @@ -404,16 +408,52 @@ func NewMetricsBuilder(settings MetricsSettings, options ...metricBuilderOption) return mb } -// Emit appends generated metrics to a pdata.MetricsSlice and updates the internal state to be ready for recording -// another set of data points. This function will be doing all transformations required to produce metric representation -// defined in metadata and user settings, e.g. delta/cumulative translation. -func (mb *MetricsBuilder) Emit(metrics pdata.MetricSlice) { - mb.metricApacheCurrentConnections.emit(metrics) - mb.metricApacheRequests.emit(metrics) - mb.metricApacheScoreboard.emit(metrics) - mb.metricApacheTraffic.emit(metrics) - mb.metricApacheUptime.emit(metrics) - mb.metricApacheWorkers.emit(metrics) +// updateCapacity updates max length of metrics and resource attributes that will be used for the slice capacity. +func (mb *MetricsBuilder) updateCapacity(rm pdata.ResourceMetrics) { + if mb.metricsCapacity < rm.InstrumentationLibraryMetrics().At(0).Metrics().Len() { + mb.metricsCapacity = rm.InstrumentationLibraryMetrics().At(0).Metrics().Len() + } + if mb.resourceCapacity < rm.Resource().Attributes().Len() { + mb.resourceCapacity = rm.Resource().Attributes().Len() + } +} + +// ResourceOption applies changes to provided resource. +type ResourceOption func(pdata.Resource) + +// EmitForResource saves all the generated metrics under a new resource and updates the internal state to be ready for +// recording another set of data points as part of another resource. This function can be helpful when one scraper +// needs to emit metrics from several resources. Otherwise calling this function is not required, +// just `Emit` function can be called instead. Resource attributes should be provided as ResourceOption arguments. +func (mb *MetricsBuilder) EmitForResource(ro ...ResourceOption) { + rm := pdata.NewResourceMetrics() + rm.Resource().Attributes().EnsureCapacity(mb.resourceCapacity) + for _, op := range ro { + op(rm.Resource()) + } + ils := rm.InstrumentationLibraryMetrics().AppendEmpty() + ils.InstrumentationLibrary().SetName("otelcol/apachereceiver") + ils.Metrics().EnsureCapacity(mb.metricsCapacity) + mb.metricApacheCurrentConnections.emit(ils.Metrics()) + mb.metricApacheRequests.emit(ils.Metrics()) + mb.metricApacheScoreboard.emit(ils.Metrics()) + mb.metricApacheTraffic.emit(ils.Metrics()) + mb.metricApacheUptime.emit(ils.Metrics()) + mb.metricApacheWorkers.emit(ils.Metrics()) + if ils.Metrics().Len() > 0 { + mb.updateCapacity(rm) + rm.MoveTo(mb.metricsBuffer.ResourceMetrics().AppendEmpty()) + } +} + +// Emit returns all the metrics accumulated by the metrics builder and updates the internal state to be ready for +// recording another set of metrics. This function will be responsible for applying all the transformations required to +// produce metric representation defined in metadata and user settings, e.g. delta or cumulative. +func (mb *MetricsBuilder) Emit(ro ...ResourceOption) pdata.Metrics { + mb.EmitForResource(ro...) + metrics := pdata.NewMetrics() + mb.metricsBuffer.MoveTo(metrics) + return metrics } // RecordApacheCurrentConnectionsDataPoint adds a data point to apache.current_connections metric. @@ -455,16 +495,6 @@ func (mb *MetricsBuilder) Reset(options ...metricBuilderOption) { } } -// NewMetricData creates new pdata.Metrics and sets the InstrumentationLibrary -// name on the ResourceMetrics. -func (mb *MetricsBuilder) NewMetricData() pdata.Metrics { - md := pdata.NewMetrics() - rm := md.ResourceMetrics().AppendEmpty() - ilm := rm.InstrumentationLibraryMetrics().AppendEmpty() - ilm.InstrumentationLibrary().SetName("otelcol/apachereceiver") - return md -} - // Attributes contains the possible metric attributes that can be used. var Attributes = struct { // ScoreboardState (The state of a connection.) diff --git a/receiver/apachereceiver/scraper.go b/receiver/apachereceiver/scraper.go index 5bf38c0c22c1..b65af119a5ae 100644 --- a/receiver/apachereceiver/scraper.go +++ b/receiver/apachereceiver/scraper.go @@ -103,9 +103,7 @@ func (r *apacheScraper) scrape(context.Context) (pdata.Metrics, error) { } } - md := r.mb.NewMetricData() - r.mb.Emit(md.ResourceMetrics().At(0).InstrumentationLibraryMetrics().At(0).Metrics()) - return md, nil + return r.mb.Emit(), nil } // GetStats collects metric stats by making a get request at an endpoint. diff --git a/receiver/couchbasereceiver/documentation.md b/receiver/couchbasereceiver/documentation.md index ae0708565c74..a9d8ebe8492e 100644 --- a/receiver/couchbasereceiver/documentation.md +++ b/receiver/couchbasereceiver/documentation.md @@ -18,7 +18,7 @@ metrics: enabled: ``` -## Attributes +## Metric attributes | Name | Description | | ---- | ----------- | diff --git a/receiver/couchbasereceiver/internal/metadata/generated_metrics_v2.go b/receiver/couchbasereceiver/internal/metadata/generated_metrics_v2.go index 69d9cf0de392..ed0ea617497c 100644 --- a/receiver/couchbasereceiver/internal/metadata/generated_metrics_v2.go +++ b/receiver/couchbasereceiver/internal/metadata/generated_metrics_v2.go @@ -24,7 +24,10 @@ func DefaultMetricsSettings() MetricsSettings { // MetricsBuilder provides an interface for scrapers to report metrics while taking care of all the transformations // required to produce metric representation defined in metadata and user settings. type MetricsBuilder struct { - startTime pdata.Timestamp + startTime pdata.Timestamp // start time that will be applied to all recorded data points. + metricsCapacity int // maximum observed number of metrics per resource. + resourceCapacity int // maximum observed number of resource attributes. + metricsBuffer pdata.Metrics // accumulates metrics data before emitting. } // metricBuilderOption applies changes to default metrics builder. @@ -39,7 +42,8 @@ func WithStartTime(startTime pdata.Timestamp) metricBuilderOption { func NewMetricsBuilder(settings MetricsSettings, options ...metricBuilderOption) *MetricsBuilder { mb := &MetricsBuilder{ - startTime: pdata.NewTimestampFromTime(time.Now()), + startTime: pdata.NewTimestampFromTime(time.Now()), + metricsBuffer: pdata.NewMetrics(), } for _, op := range options { op(mb) @@ -47,10 +51,46 @@ func NewMetricsBuilder(settings MetricsSettings, options ...metricBuilderOption) return mb } -// Emit appends generated metrics to a pdata.MetricsSlice and updates the internal state to be ready for recording -// another set of data points. This function will be doing all transformations required to produce metric representation -// defined in metadata and user settings, e.g. delta/cumulative translation. -func (mb *MetricsBuilder) Emit(metrics pdata.MetricSlice) { +// updateCapacity updates max length of metrics and resource attributes that will be used for the slice capacity. +func (mb *MetricsBuilder) updateCapacity(rm pdata.ResourceMetrics) { + if mb.metricsCapacity < rm.InstrumentationLibraryMetrics().At(0).Metrics().Len() { + mb.metricsCapacity = rm.InstrumentationLibraryMetrics().At(0).Metrics().Len() + } + if mb.resourceCapacity < rm.Resource().Attributes().Len() { + mb.resourceCapacity = rm.Resource().Attributes().Len() + } +} + +// ResourceOption applies changes to provided resource. +type ResourceOption func(pdata.Resource) + +// EmitForResource saves all the generated metrics under a new resource and updates the internal state to be ready for +// recording another set of data points as part of another resource. This function can be helpful when one scraper +// needs to emit metrics from several resources. Otherwise calling this function is not required, +// just `Emit` function can be called instead. Resource attributes should be provided as ResourceOption arguments. +func (mb *MetricsBuilder) EmitForResource(ro ...ResourceOption) { + rm := pdata.NewResourceMetrics() + rm.Resource().Attributes().EnsureCapacity(mb.resourceCapacity) + for _, op := range ro { + op(rm.Resource()) + } + ils := rm.InstrumentationLibraryMetrics().AppendEmpty() + ils.InstrumentationLibrary().SetName("otelcol/couchbasereceiver") + ils.Metrics().EnsureCapacity(mb.metricsCapacity) + if ils.Metrics().Len() > 0 { + mb.updateCapacity(rm) + rm.MoveTo(mb.metricsBuffer.ResourceMetrics().AppendEmpty()) + } +} + +// Emit returns all the metrics accumulated by the metrics builder and updates the internal state to be ready for +// recording another set of metrics. This function will be responsible for applying all the transformations required to +// produce metric representation defined in metadata and user settings, e.g. delta or cumulative. +func (mb *MetricsBuilder) Emit(ro ...ResourceOption) pdata.Metrics { + mb.EmitForResource(ro...) + metrics := pdata.NewMetrics() + mb.metricsBuffer.MoveTo(metrics) + return metrics } // Reset resets metrics builder to its initial state. It should be used when external metrics source is restarted, @@ -62,16 +102,6 @@ func (mb *MetricsBuilder) Reset(options ...metricBuilderOption) { } } -// NewMetricData creates new pdata.Metrics and sets the InstrumentationLibrary -// name on the ResourceMetrics. -func (mb *MetricsBuilder) NewMetricData() pdata.Metrics { - md := pdata.NewMetrics() - rm := md.ResourceMetrics().AppendEmpty() - ilm := rm.InstrumentationLibraryMetrics().AppendEmpty() - ilm.InstrumentationLibrary().SetName("otelcol/couchbasereceiver") - return md -} - // Attributes contains the possible metric attributes that can be used. var Attributes = struct { }{} diff --git a/receiver/couchdbreceiver/documentation.md b/receiver/couchdbreceiver/documentation.md index fbe548507c9a..59a07961182a 100644 --- a/receiver/couchdbreceiver/documentation.md +++ b/receiver/couchdbreceiver/documentation.md @@ -26,11 +26,16 @@ metrics: enabled: ``` -## Attributes +## Resource attributes + +| Name | Description | Type | +| ---- | ----------- | ---- | +| couchdb.node.name | The name of the node. | String | + +## Metric attributes | Name | Description | | ---- | ----------- | -| couchdb.node.name | The name of the node. | | http.method | An HTTP request method. | | http.status_code | An HTTP status code. | | operation | The operation type. | diff --git a/receiver/couchdbreceiver/internal/metadata/generated_metrics_v2.go b/receiver/couchdbreceiver/internal/metadata/generated_metrics_v2.go index 1e88a0a865c7..28b451a03df2 100644 --- a/receiver/couchdbreceiver/internal/metadata/generated_metrics_v2.go +++ b/receiver/couchdbreceiver/internal/metadata/generated_metrics_v2.go @@ -471,7 +471,10 @@ func newMetricCouchdbHttpdViews(settings MetricSettings) metricCouchdbHttpdViews // MetricsBuilder provides an interface for scrapers to report metrics while taking care of all the transformations // required to produce metric representation defined in metadata and user settings. type MetricsBuilder struct { - startTime pdata.Timestamp + startTime pdata.Timestamp // start time that will be applied to all recorded data points. + metricsCapacity int // maximum observed number of metrics per resource. + resourceCapacity int // maximum observed number of resource attributes. + metricsBuffer pdata.Metrics // accumulates metrics data before emitting. metricCouchdbAverageRequestTime metricCouchdbAverageRequestTime metricCouchdbDatabaseOpen metricCouchdbDatabaseOpen metricCouchdbDatabaseOperations metricCouchdbDatabaseOperations @@ -495,6 +498,7 @@ func WithStartTime(startTime pdata.Timestamp) metricBuilderOption { func NewMetricsBuilder(settings MetricsSettings, options ...metricBuilderOption) *MetricsBuilder { mb := &MetricsBuilder{ startTime: pdata.NewTimestampFromTime(time.Now()), + metricsBuffer: pdata.NewMetrics(), metricCouchdbAverageRequestTime: newMetricCouchdbAverageRequestTime(settings.CouchdbAverageRequestTime), metricCouchdbDatabaseOpen: newMetricCouchdbDatabaseOpen(settings.CouchdbDatabaseOpen), metricCouchdbDatabaseOperations: newMetricCouchdbDatabaseOperations(settings.CouchdbDatabaseOperations), @@ -510,18 +514,61 @@ func NewMetricsBuilder(settings MetricsSettings, options ...metricBuilderOption) return mb } -// Emit appends generated metrics to a pdata.MetricsSlice and updates the internal state to be ready for recording -// another set of data points. This function will be doing all transformations required to produce metric representation -// defined in metadata and user settings, e.g. delta/cumulative translation. -func (mb *MetricsBuilder) Emit(metrics pdata.MetricSlice) { - mb.metricCouchdbAverageRequestTime.emit(metrics) - mb.metricCouchdbDatabaseOpen.emit(metrics) - mb.metricCouchdbDatabaseOperations.emit(metrics) - mb.metricCouchdbFileDescriptorOpen.emit(metrics) - mb.metricCouchdbHttpdBulkRequests.emit(metrics) - mb.metricCouchdbHttpdRequests.emit(metrics) - mb.metricCouchdbHttpdResponses.emit(metrics) - mb.metricCouchdbHttpdViews.emit(metrics) +// updateCapacity updates max length of metrics and resource attributes that will be used for the slice capacity. +func (mb *MetricsBuilder) updateCapacity(rm pdata.ResourceMetrics) { + if mb.metricsCapacity < rm.InstrumentationLibraryMetrics().At(0).Metrics().Len() { + mb.metricsCapacity = rm.InstrumentationLibraryMetrics().At(0).Metrics().Len() + } + if mb.resourceCapacity < rm.Resource().Attributes().Len() { + mb.resourceCapacity = rm.Resource().Attributes().Len() + } +} + +// ResourceOption applies changes to provided resource. +type ResourceOption func(pdata.Resource) + +// WithCouchdbNodeName sets provided value as "couchdb.node.name" attribute for current resource. +func WithCouchdbNodeName(val string) ResourceOption { + return func(r pdata.Resource) { + r.Attributes().UpsertString("couchdb.node.name", val) + } +} + +// EmitForResource saves all the generated metrics under a new resource and updates the internal state to be ready for +// recording another set of data points as part of another resource. This function can be helpful when one scraper +// needs to emit metrics from several resources. Otherwise calling this function is not required, +// just `Emit` function can be called instead. Resource attributes should be provided as ResourceOption arguments. +func (mb *MetricsBuilder) EmitForResource(ro ...ResourceOption) { + rm := pdata.NewResourceMetrics() + rm.Resource().Attributes().EnsureCapacity(mb.resourceCapacity) + for _, op := range ro { + op(rm.Resource()) + } + ils := rm.InstrumentationLibraryMetrics().AppendEmpty() + ils.InstrumentationLibrary().SetName("otelcol/couchdbreceiver") + ils.Metrics().EnsureCapacity(mb.metricsCapacity) + mb.metricCouchdbAverageRequestTime.emit(ils.Metrics()) + mb.metricCouchdbDatabaseOpen.emit(ils.Metrics()) + mb.metricCouchdbDatabaseOperations.emit(ils.Metrics()) + mb.metricCouchdbFileDescriptorOpen.emit(ils.Metrics()) + mb.metricCouchdbHttpdBulkRequests.emit(ils.Metrics()) + mb.metricCouchdbHttpdRequests.emit(ils.Metrics()) + mb.metricCouchdbHttpdResponses.emit(ils.Metrics()) + mb.metricCouchdbHttpdViews.emit(ils.Metrics()) + if ils.Metrics().Len() > 0 { + mb.updateCapacity(rm) + rm.MoveTo(mb.metricsBuffer.ResourceMetrics().AppendEmpty()) + } +} + +// Emit returns all the metrics accumulated by the metrics builder and updates the internal state to be ready for +// recording another set of metrics. This function will be responsible for applying all the transformations required to +// produce metric representation defined in metadata and user settings, e.g. delta or cumulative. +func (mb *MetricsBuilder) Emit(ro ...ResourceOption) pdata.Metrics { + mb.EmitForResource(ro...) + metrics := pdata.NewMetrics() + mb.metricsBuffer.MoveTo(metrics) + return metrics } // RecordCouchdbAverageRequestTimeDataPoint adds a data point to couchdb.average_request_time metric. @@ -573,20 +620,8 @@ func (mb *MetricsBuilder) Reset(options ...metricBuilderOption) { } } -// NewMetricData creates new pdata.Metrics and sets the InstrumentationLibrary -// name on the ResourceMetrics. -func (mb *MetricsBuilder) NewMetricData() pdata.Metrics { - md := pdata.NewMetrics() - rm := md.ResourceMetrics().AppendEmpty() - ilm := rm.InstrumentationLibraryMetrics().AppendEmpty() - ilm.InstrumentationLibrary().SetName("otelcol/couchdbreceiver") - return md -} - // Attributes contains the possible metric attributes that can be used. var Attributes = struct { - // CouchdbNodeName (The name of the node.) - CouchdbNodeName string // HTTPMethod (An HTTP request method.) HTTPMethod string // HTTPStatusCode (An HTTP status code.) @@ -596,7 +631,6 @@ var Attributes = struct { // View (The view type.) View string }{ - "couchdb.node.name", "http.method", "http.status_code", "operation", diff --git a/receiver/couchdbreceiver/metadata.yaml b/receiver/couchdbreceiver/metadata.yaml index 7914c1e9a7e5..4873ce54203d 100644 --- a/receiver/couchdbreceiver/metadata.yaml +++ b/receiver/couchdbreceiver/metadata.yaml @@ -1,8 +1,11 @@ name: couchdbreceiver -attributes: +resource_attributes: couchdb.node.name: description: The name of the node. + type: string + +attributes: http.method: description: An HTTP request method. enum: [ COPY, DELETE, GET, HEAD, OPTIONS, POST, PUT ] diff --git a/receiver/couchdbreceiver/scraper.go b/receiver/couchdbreceiver/scraper.go index 26476bbf4aa1..b19b20e41c6f 100644 --- a/receiver/couchdbreceiver/scraper.go +++ b/receiver/couchdbreceiver/scraper.go @@ -57,10 +57,6 @@ func (c *couchdbScraper) scrape(context.Context) (pdata.Metrics, error) { return pdata.NewMetrics(), errors.New("no client available") } - return c.getResourceMetrics() -} - -func (c *couchdbScraper) getResourceMetrics() (pdata.Metrics, error) { localNode := "_local" stats, err := c.client.GetStats(localNode) if err != nil { @@ -71,16 +67,7 @@ func (c *couchdbScraper) getResourceMetrics() (pdata.Metrics, error) { return pdata.NewMetrics(), err } - md := pdata.NewMetrics() - err = c.appendMetrics(stats, md.ResourceMetrics()) - return md, err -} - -func (c *couchdbScraper) appendMetrics(stats map[string]interface{}, rms pdata.ResourceMetricsSlice) error { now := pdata.NewTimestampFromTime(time.Now()) - md := c.mb.NewMetricData() - - md.ResourceMetrics().At(0).Resource().Attributes().UpsertString(metadata.A.CouchdbNodeName, c.config.Endpoint) var errors scrapererror.ScrapeErrors c.recordCouchdbAverageRequestTimeDataPoint(now, stats, errors) @@ -92,10 +79,5 @@ func (c *couchdbScraper) appendMetrics(stats map[string]interface{}, rms pdata.R c.recordCouchdbFileDescriptorOpenDataPoint(now, stats, errors) c.recordCouchdbDatabaseOperationsDataPoint(now, stats, errors) - c.mb.Emit(md.ResourceMetrics().At(0).InstrumentationLibraryMetrics().At(0).Metrics()) - if md.ResourceMetrics().At(0).InstrumentationLibraryMetrics().At(0).Metrics().Len() > 0 { - md.ResourceMetrics().At(0).CopyTo(rms.AppendEmpty()) - } - - return errors.Combine() + return c.mb.Emit(metadata.WithCouchdbNodeName(c.config.Endpoint)), errors.Combine() } diff --git a/receiver/elasticsearchreceiver/documentation.md b/receiver/elasticsearchreceiver/documentation.md index 4aa6e44fab10..2ee889d55a39 100644 --- a/receiver/elasticsearchreceiver/documentation.md +++ b/receiver/elasticsearchreceiver/documentation.md @@ -47,7 +47,7 @@ metrics: enabled: ``` -## Attributes +## Metric attributes | Name | Description | | ---- | ----------- | diff --git a/receiver/elasticsearchreceiver/internal/metadata/generated_metrics_v2.go b/receiver/elasticsearchreceiver/internal/metadata/generated_metrics_v2.go index 6a68ea1aac12..a9964bfcd514 100644 --- a/receiver/elasticsearchreceiver/internal/metadata/generated_metrics_v2.go +++ b/receiver/elasticsearchreceiver/internal/metadata/generated_metrics_v2.go @@ -1634,7 +1634,10 @@ func newMetricJvmThreadsCount(settings MetricSettings) metricJvmThreadsCount { // MetricsBuilder provides an interface for scrapers to report metrics while taking care of all the transformations // required to produce metric representation defined in metadata and user settings. type MetricsBuilder struct { - startTime pdata.Timestamp + startTime pdata.Timestamp // start time that will be applied to all recorded data points. + metricsCapacity int // maximum observed number of metrics per resource. + resourceCapacity int // maximum observed number of resource attributes. + metricsBuffer pdata.Metrics // accumulates metrics data before emitting. metricElasticsearchClusterDataNodes metricElasticsearchClusterDataNodes metricElasticsearchClusterHealth metricElasticsearchClusterHealth metricElasticsearchClusterNodes metricElasticsearchClusterNodes @@ -1679,6 +1682,7 @@ func WithStartTime(startTime pdata.Timestamp) metricBuilderOption { func NewMetricsBuilder(settings MetricsSettings, options ...metricBuilderOption) *MetricsBuilder { mb := &MetricsBuilder{ startTime: pdata.NewTimestampFromTime(time.Now()), + metricsBuffer: pdata.NewMetrics(), metricElasticsearchClusterDataNodes: newMetricElasticsearchClusterDataNodes(settings.ElasticsearchClusterDataNodes), metricElasticsearchClusterHealth: newMetricElasticsearchClusterHealth(settings.ElasticsearchClusterHealth), metricElasticsearchClusterNodes: newMetricElasticsearchClusterNodes(settings.ElasticsearchClusterNodes), @@ -1715,39 +1719,75 @@ func NewMetricsBuilder(settings MetricsSettings, options ...metricBuilderOption) return mb } -// Emit appends generated metrics to a pdata.MetricsSlice and updates the internal state to be ready for recording -// another set of data points. This function will be doing all transformations required to produce metric representation -// defined in metadata and user settings, e.g. delta/cumulative translation. -func (mb *MetricsBuilder) Emit(metrics pdata.MetricSlice) { - mb.metricElasticsearchClusterDataNodes.emit(metrics) - mb.metricElasticsearchClusterHealth.emit(metrics) - mb.metricElasticsearchClusterNodes.emit(metrics) - mb.metricElasticsearchClusterShards.emit(metrics) - mb.metricElasticsearchNodeCacheEvictions.emit(metrics) - mb.metricElasticsearchNodeCacheMemoryUsage.emit(metrics) - mb.metricElasticsearchNodeClusterConnections.emit(metrics) - mb.metricElasticsearchNodeClusterIo.emit(metrics) - mb.metricElasticsearchNodeDocuments.emit(metrics) - mb.metricElasticsearchNodeFsDiskAvailable.emit(metrics) - mb.metricElasticsearchNodeHTTPConnections.emit(metrics) - mb.metricElasticsearchNodeOpenFiles.emit(metrics) - mb.metricElasticsearchNodeOperationsCompleted.emit(metrics) - mb.metricElasticsearchNodeOperationsTime.emit(metrics) - mb.metricElasticsearchNodeShardsSize.emit(metrics) - mb.metricElasticsearchNodeThreadPoolTasksFinished.emit(metrics) - mb.metricElasticsearchNodeThreadPoolTasksQueued.emit(metrics) - mb.metricElasticsearchNodeThreadPoolThreads.emit(metrics) - mb.metricJvmClassesLoaded.emit(metrics) - mb.metricJvmGcCollectionsCount.emit(metrics) - mb.metricJvmGcCollectionsElapsed.emit(metrics) - mb.metricJvmMemoryHeapCommitted.emit(metrics) - mb.metricJvmMemoryHeapMax.emit(metrics) - mb.metricJvmMemoryHeapUsed.emit(metrics) - mb.metricJvmMemoryNonheapCommitted.emit(metrics) - mb.metricJvmMemoryNonheapUsed.emit(metrics) - mb.metricJvmMemoryPoolMax.emit(metrics) - mb.metricJvmMemoryPoolUsed.emit(metrics) - mb.metricJvmThreadsCount.emit(metrics) +// updateCapacity updates max length of metrics and resource attributes that will be used for the slice capacity. +func (mb *MetricsBuilder) updateCapacity(rm pdata.ResourceMetrics) { + if mb.metricsCapacity < rm.InstrumentationLibraryMetrics().At(0).Metrics().Len() { + mb.metricsCapacity = rm.InstrumentationLibraryMetrics().At(0).Metrics().Len() + } + if mb.resourceCapacity < rm.Resource().Attributes().Len() { + mb.resourceCapacity = rm.Resource().Attributes().Len() + } +} + +// ResourceOption applies changes to provided resource. +type ResourceOption func(pdata.Resource) + +// EmitForResource saves all the generated metrics under a new resource and updates the internal state to be ready for +// recording another set of data points as part of another resource. This function can be helpful when one scraper +// needs to emit metrics from several resources. Otherwise calling this function is not required, +// just `Emit` function can be called instead. Resource attributes should be provided as ResourceOption arguments. +func (mb *MetricsBuilder) EmitForResource(ro ...ResourceOption) { + rm := pdata.NewResourceMetrics() + rm.Resource().Attributes().EnsureCapacity(mb.resourceCapacity) + for _, op := range ro { + op(rm.Resource()) + } + ils := rm.InstrumentationLibraryMetrics().AppendEmpty() + ils.InstrumentationLibrary().SetName("otelcol/elasticsearchreceiver") + ils.Metrics().EnsureCapacity(mb.metricsCapacity) + mb.metricElasticsearchClusterDataNodes.emit(ils.Metrics()) + mb.metricElasticsearchClusterHealth.emit(ils.Metrics()) + mb.metricElasticsearchClusterNodes.emit(ils.Metrics()) + mb.metricElasticsearchClusterShards.emit(ils.Metrics()) + mb.metricElasticsearchNodeCacheEvictions.emit(ils.Metrics()) + mb.metricElasticsearchNodeCacheMemoryUsage.emit(ils.Metrics()) + mb.metricElasticsearchNodeClusterConnections.emit(ils.Metrics()) + mb.metricElasticsearchNodeClusterIo.emit(ils.Metrics()) + mb.metricElasticsearchNodeDocuments.emit(ils.Metrics()) + mb.metricElasticsearchNodeFsDiskAvailable.emit(ils.Metrics()) + mb.metricElasticsearchNodeHTTPConnections.emit(ils.Metrics()) + mb.metricElasticsearchNodeOpenFiles.emit(ils.Metrics()) + mb.metricElasticsearchNodeOperationsCompleted.emit(ils.Metrics()) + mb.metricElasticsearchNodeOperationsTime.emit(ils.Metrics()) + mb.metricElasticsearchNodeShardsSize.emit(ils.Metrics()) + mb.metricElasticsearchNodeThreadPoolTasksFinished.emit(ils.Metrics()) + mb.metricElasticsearchNodeThreadPoolTasksQueued.emit(ils.Metrics()) + mb.metricElasticsearchNodeThreadPoolThreads.emit(ils.Metrics()) + mb.metricJvmClassesLoaded.emit(ils.Metrics()) + mb.metricJvmGcCollectionsCount.emit(ils.Metrics()) + mb.metricJvmGcCollectionsElapsed.emit(ils.Metrics()) + mb.metricJvmMemoryHeapCommitted.emit(ils.Metrics()) + mb.metricJvmMemoryHeapMax.emit(ils.Metrics()) + mb.metricJvmMemoryHeapUsed.emit(ils.Metrics()) + mb.metricJvmMemoryNonheapCommitted.emit(ils.Metrics()) + mb.metricJvmMemoryNonheapUsed.emit(ils.Metrics()) + mb.metricJvmMemoryPoolMax.emit(ils.Metrics()) + mb.metricJvmMemoryPoolUsed.emit(ils.Metrics()) + mb.metricJvmThreadsCount.emit(ils.Metrics()) + if ils.Metrics().Len() > 0 { + mb.updateCapacity(rm) + rm.MoveTo(mb.metricsBuffer.ResourceMetrics().AppendEmpty()) + } +} + +// Emit returns all the metrics accumulated by the metrics builder and updates the internal state to be ready for +// recording another set of metrics. This function will be responsible for applying all the transformations required to +// produce metric representation defined in metadata and user settings, e.g. delta or cumulative. +func (mb *MetricsBuilder) Emit(ro ...ResourceOption) pdata.Metrics { + mb.EmitForResource(ro...) + metrics := pdata.NewMetrics() + mb.metricsBuffer.MoveTo(metrics) + return metrics } // RecordElasticsearchClusterDataNodesDataPoint adds a data point to elasticsearch.cluster.data_nodes metric. @@ -1904,16 +1944,6 @@ func (mb *MetricsBuilder) Reset(options ...metricBuilderOption) { } } -// NewMetricData creates new pdata.Metrics and sets the InstrumentationLibrary -// name on the ResourceMetrics. -func (mb *MetricsBuilder) NewMetricData() pdata.Metrics { - md := pdata.NewMetrics() - rm := md.ResourceMetrics().AppendEmpty() - ilm := rm.InstrumentationLibraryMetrics().AppendEmpty() - ilm.InstrumentationLibrary().SetName("otelcol/elasticsearchreceiver") - return md -} - // Attributes contains the possible metric attributes that can be used. var Attributes = struct { // CacheName (The name of cache.) diff --git a/receiver/hostmetricsreceiver/internal/scraper/cpuscraper/cpu_scraper.go b/receiver/hostmetricsreceiver/internal/scraper/cpuscraper/cpu_scraper.go index 031998ddbca2..0d3bd9b287b6 100644 --- a/receiver/hostmetricsreceiver/internal/scraper/cpuscraper/cpu_scraper.go +++ b/receiver/hostmetricsreceiver/internal/scraper/cpuscraper/cpu_scraper.go @@ -57,13 +57,10 @@ func (s *scraper) start(context.Context, component.Host) error { } func (s *scraper) scrape(_ context.Context) (pdata.Metrics, error) { - md := pdata.NewMetrics() - metrics := md.ResourceMetrics().AppendEmpty().InstrumentationLibraryMetrics().AppendEmpty().Metrics() - now := pdata.NewTimestampFromTime(s.now()) cpuTimes, err := s.times( /*percpu=*/ true) if err != nil { - return md, scrapererror.NewPartialScrapeError(err, metricsLen) + return pdata.NewMetrics(), scrapererror.NewPartialScrapeError(err, metricsLen) } for _, cpuTime := range cpuTimes { @@ -72,9 +69,8 @@ func (s *scraper) scrape(_ context.Context) (pdata.Metrics, error) { err = s.ucal.CalculateAndRecord(now, cpuTimes, s.recordCPUUtilization) if err != nil { - return md, scrapererror.NewPartialScrapeError(err, metricsLen) + return pdata.NewMetrics(), scrapererror.NewPartialScrapeError(err, metricsLen) } - s.mb.Emit(metrics) - return md, nil + return s.mb.Emit(), nil } diff --git a/receiver/hostmetricsreceiver/internal/scraper/cpuscraper/cpu_scraper_test.go b/receiver/hostmetricsreceiver/internal/scraper/cpuscraper/cpu_scraper_test.go index 775beb4d593c..196b86882860 100644 --- a/receiver/hostmetricsreceiver/internal/scraper/cpuscraper/cpu_scraper_test.go +++ b/receiver/hostmetricsreceiver/internal/scraper/cpuscraper/cpu_scraper_test.go @@ -197,6 +197,10 @@ func TestScrape_CpuUtilization(t *testing.T) { require.NoError(t, err, "Failed to scrape metrics: %v", err) assert.Equal(t, test.expectedMetricCount, md.MetricCount()) + if md.ResourceMetrics().Len() == 0 { + return + } + metrics := md.ResourceMetrics().At(0).InstrumentationLibraryMetrics().At(0).Metrics() internal.AssertSameTimeStampForAllMetrics(t, metrics) if test.times { @@ -303,7 +307,7 @@ func TestScrape_CpuUtilizationStandard(t *testing.T) { require.NoError(t, err) //no metrics in the first scrape if len(scrapeData.expectedDps) == 0 { - assert.Equal(t, 0, md.ResourceMetrics().At(0).InstrumentationLibraryMetrics().At(0).Metrics().Len()) + assert.Equal(t, 0, md.ResourceMetrics().Len()) continue } diff --git a/receiver/hostmetricsreceiver/internal/scraper/cpuscraper/documentation.md b/receiver/hostmetricsreceiver/internal/scraper/cpuscraper/documentation.md index bb13312ca201..a1359673dc08 100644 --- a/receiver/hostmetricsreceiver/internal/scraper/cpuscraper/documentation.md +++ b/receiver/hostmetricsreceiver/internal/scraper/cpuscraper/documentation.md @@ -20,7 +20,7 @@ metrics: enabled: ``` -## Attributes +## Metric attributes | Name | Description | | ---- | ----------- | diff --git a/receiver/hostmetricsreceiver/internal/scraper/cpuscraper/internal/metadata/generated_metrics_v2.go b/receiver/hostmetricsreceiver/internal/scraper/cpuscraper/internal/metadata/generated_metrics_v2.go index 378a02ba77da..941206c54829 100644 --- a/receiver/hostmetricsreceiver/internal/scraper/cpuscraper/internal/metadata/generated_metrics_v2.go +++ b/receiver/hostmetricsreceiver/internal/scraper/cpuscraper/internal/metadata/generated_metrics_v2.go @@ -139,7 +139,10 @@ func newMetricSystemCPUUtilization(settings MetricSettings) metricSystemCPUUtili // MetricsBuilder provides an interface for scrapers to report metrics while taking care of all the transformations // required to produce metric representation defined in metadata and user settings. type MetricsBuilder struct { - startTime pdata.Timestamp + startTime pdata.Timestamp // start time that will be applied to all recorded data points. + metricsCapacity int // maximum observed number of metrics per resource. + resourceCapacity int // maximum observed number of resource attributes. + metricsBuffer pdata.Metrics // accumulates metrics data before emitting. metricSystemCPUTime metricSystemCPUTime metricSystemCPUUtilization metricSystemCPUUtilization } @@ -157,6 +160,7 @@ func WithStartTime(startTime pdata.Timestamp) metricBuilderOption { func NewMetricsBuilder(settings MetricsSettings, options ...metricBuilderOption) *MetricsBuilder { mb := &MetricsBuilder{ startTime: pdata.NewTimestampFromTime(time.Now()), + metricsBuffer: pdata.NewMetrics(), metricSystemCPUTime: newMetricSystemCPUTime(settings.SystemCPUTime), metricSystemCPUUtilization: newMetricSystemCPUUtilization(settings.SystemCPUUtilization), } @@ -166,12 +170,48 @@ func NewMetricsBuilder(settings MetricsSettings, options ...metricBuilderOption) return mb } -// Emit appends generated metrics to a pdata.MetricsSlice and updates the internal state to be ready for recording -// another set of data points. This function will be doing all transformations required to produce metric representation -// defined in metadata and user settings, e.g. delta/cumulative translation. -func (mb *MetricsBuilder) Emit(metrics pdata.MetricSlice) { - mb.metricSystemCPUTime.emit(metrics) - mb.metricSystemCPUUtilization.emit(metrics) +// updateCapacity updates max length of metrics and resource attributes that will be used for the slice capacity. +func (mb *MetricsBuilder) updateCapacity(rm pdata.ResourceMetrics) { + if mb.metricsCapacity < rm.InstrumentationLibraryMetrics().At(0).Metrics().Len() { + mb.metricsCapacity = rm.InstrumentationLibraryMetrics().At(0).Metrics().Len() + } + if mb.resourceCapacity < rm.Resource().Attributes().Len() { + mb.resourceCapacity = rm.Resource().Attributes().Len() + } +} + +// ResourceOption applies changes to provided resource. +type ResourceOption func(pdata.Resource) + +// EmitForResource saves all the generated metrics under a new resource and updates the internal state to be ready for +// recording another set of data points as part of another resource. This function can be helpful when one scraper +// needs to emit metrics from several resources. Otherwise calling this function is not required, +// just `Emit` function can be called instead. Resource attributes should be provided as ResourceOption arguments. +func (mb *MetricsBuilder) EmitForResource(ro ...ResourceOption) { + rm := pdata.NewResourceMetrics() + rm.Resource().Attributes().EnsureCapacity(mb.resourceCapacity) + for _, op := range ro { + op(rm.Resource()) + } + ils := rm.InstrumentationLibraryMetrics().AppendEmpty() + ils.InstrumentationLibrary().SetName("otelcol/hostmetricsreceiver/cpu") + ils.Metrics().EnsureCapacity(mb.metricsCapacity) + mb.metricSystemCPUTime.emit(ils.Metrics()) + mb.metricSystemCPUUtilization.emit(ils.Metrics()) + if ils.Metrics().Len() > 0 { + mb.updateCapacity(rm) + rm.MoveTo(mb.metricsBuffer.ResourceMetrics().AppendEmpty()) + } +} + +// Emit returns all the metrics accumulated by the metrics builder and updates the internal state to be ready for +// recording another set of metrics. This function will be responsible for applying all the transformations required to +// produce metric representation defined in metadata and user settings, e.g. delta or cumulative. +func (mb *MetricsBuilder) Emit(ro ...ResourceOption) pdata.Metrics { + mb.EmitForResource(ro...) + metrics := pdata.NewMetrics() + mb.metricsBuffer.MoveTo(metrics) + return metrics } // RecordSystemCPUTimeDataPoint adds a data point to system.cpu.time metric. @@ -193,16 +233,6 @@ func (mb *MetricsBuilder) Reset(options ...metricBuilderOption) { } } -// NewMetricData creates new pdata.Metrics and sets the InstrumentationLibrary -// name on the ResourceMetrics. -func (mb *MetricsBuilder) NewMetricData() pdata.Metrics { - md := pdata.NewMetrics() - rm := md.ResourceMetrics().AppendEmpty() - ilm := rm.InstrumentationLibraryMetrics().AppendEmpty() - ilm.InstrumentationLibrary().SetName("otelcol/hostmetricsreceiver/cpu") - return md -} - // Attributes contains the possible metric attributes that can be used. var Attributes = struct { // Cpu (CPU number starting at 0.) diff --git a/receiver/hostmetricsreceiver/internal/scraper/diskscraper/disk_scraper_others.go b/receiver/hostmetricsreceiver/internal/scraper/diskscraper/disk_scraper_others.go index 0c5694ccba35..951891ee7777 100644 --- a/receiver/hostmetricsreceiver/internal/scraper/diskscraper/disk_scraper_others.go +++ b/receiver/hostmetricsreceiver/internal/scraper/diskscraper/disk_scraper_others.go @@ -85,30 +85,25 @@ func (s *scraper) start(context.Context, component.Host) error { } func (s *scraper) scrape(_ context.Context) (pdata.Metrics, error) { - md := pdata.NewMetrics() - metrics := md.ResourceMetrics().AppendEmpty().InstrumentationLibraryMetrics().AppendEmpty().Metrics() - now := pdata.NewTimestampFromTime(time.Now()) ioCounters, err := s.ioCounters() if err != nil { - return md, scrapererror.NewPartialScrapeError(err, metricsLen) + return pdata.NewMetrics(), scrapererror.NewPartialScrapeError(err, metricsLen) } // filter devices by name ioCounters = s.filterByDevice(ioCounters) if len(ioCounters) > 0 { - metrics.EnsureCapacity(metricsLen) s.recordDiskIOMetric(now, ioCounters) s.recordDiskOperationsMetric(now, ioCounters) s.recordDiskIOTimeMetric(now, ioCounters) s.recordDiskOperationTimeMetric(now, ioCounters) s.recordDiskPendingOperationsMetric(now, ioCounters) s.recordSystemSpecificDataPoints(now, ioCounters) - s.mb.Emit(metrics) } - return md, nil + return s.mb.Emit(), nil } func (s *scraper) recordDiskIOMetric(now pdata.Timestamp, ioCounters map[string]disk.IOCountersStat) { diff --git a/receiver/hostmetricsreceiver/internal/scraper/diskscraper/disk_scraper_test.go b/receiver/hostmetricsreceiver/internal/scraper/diskscraper/disk_scraper_test.go index 7f18995a8479..c328181a0180 100644 --- a/receiver/hostmetricsreceiver/internal/scraper/diskscraper/disk_scraper_test.go +++ b/receiver/hostmetricsreceiver/internal/scraper/diskscraper/disk_scraper_test.go @@ -120,6 +120,10 @@ func TestScrape(t *testing.T) { require.NoError(t, err, "Failed to scrape metrics: %v", err) assert.Equal(t, test.expectMetrics, md.MetricCount()) + if md.ResourceMetrics().Len() == 0 { + return + } + metrics := md.ResourceMetrics().At(0).InstrumentationLibraryMetrics().At(0).Metrics() assert.Equal(t, test.expectMetrics, metrics.Len()) diff --git a/receiver/hostmetricsreceiver/internal/scraper/diskscraper/disk_scraper_windows.go b/receiver/hostmetricsreceiver/internal/scraper/diskscraper/disk_scraper_windows.go index 167c31aa46b8..1179983fa565 100644 --- a/receiver/hostmetricsreceiver/internal/scraper/diskscraper/disk_scraper_windows.go +++ b/receiver/hostmetricsreceiver/internal/scraper/diskscraper/disk_scraper_windows.go @@ -98,19 +98,16 @@ func (s *scraper) start(context.Context, component.Host) error { } func (s *scraper) scrape(ctx context.Context) (pdata.Metrics, error) { - md := pdata.NewMetrics() - metrics := md.ResourceMetrics().AppendEmpty().InstrumentationLibraryMetrics().AppendEmpty().Metrics() - now := pdata.NewTimestampFromTime(time.Now()) counters, err := s.perfCounterScraper.Scrape() if err != nil { - return md, scrapererror.NewPartialScrapeError(err, metricsLen) + return pdata.NewMetrics(), scrapererror.NewPartialScrapeError(err, metricsLen) } logicalDiskObject, err := counters.GetObject(logicalDisk) if err != nil { - return md, scrapererror.NewPartialScrapeError(err, metricsLen) + return pdata.NewMetrics(), scrapererror.NewPartialScrapeError(err, metricsLen) } // filter devices by name @@ -118,20 +115,18 @@ func (s *scraper) scrape(ctx context.Context) (pdata.Metrics, error) { logicalDiskCounterValues, err := logicalDiskObject.GetValues(readsPerSec, writesPerSec, readBytesPerSec, writeBytesPerSec, idleTime, avgDiskSecsPerRead, avgDiskSecsPerWrite, queueLength) if err != nil { - return md, scrapererror.NewPartialScrapeError(err, metricsLen) + return pdata.NewMetrics(), scrapererror.NewPartialScrapeError(err, metricsLen) } if len(logicalDiskCounterValues) > 0 { - metrics.EnsureCapacity(metricsLen) s.recordDiskIOMetric(now, logicalDiskCounterValues) s.recordDiskOperationsMetric(now, logicalDiskCounterValues) s.recordDiskIOTimeMetric(now, logicalDiskCounterValues) s.recordDiskOperationTimeMetric(now, logicalDiskCounterValues) s.recordDiskPendingOperationsMetric(now, logicalDiskCounterValues) - s.mb.Emit(metrics) } - return md, nil + return s.mb.Emit(), nil } func (s *scraper) recordDiskIOMetric(now pdata.Timestamp, logicalDiskCounterValues []*perfcounters.CounterValues) { diff --git a/receiver/hostmetricsreceiver/internal/scraper/diskscraper/documentation.md b/receiver/hostmetricsreceiver/internal/scraper/diskscraper/documentation.md index 15f7af17e5ba..3a1143ca09d4 100644 --- a/receiver/hostmetricsreceiver/internal/scraper/diskscraper/documentation.md +++ b/receiver/hostmetricsreceiver/internal/scraper/diskscraper/documentation.md @@ -25,7 +25,7 @@ metrics: enabled: ``` -## Attributes +## Metric attributes | Name | Description | | ---- | ----------- | diff --git a/receiver/hostmetricsreceiver/internal/scraper/diskscraper/internal/metadata/generated_metrics_v2.go b/receiver/hostmetricsreceiver/internal/scraper/diskscraper/internal/metadata/generated_metrics_v2.go index d7dfb1366712..6eea6d4f9202 100644 --- a/receiver/hostmetricsreceiver/internal/scraper/diskscraper/internal/metadata/generated_metrics_v2.go +++ b/receiver/hostmetricsreceiver/internal/scraper/diskscraper/internal/metadata/generated_metrics_v2.go @@ -428,7 +428,10 @@ func newMetricSystemDiskWeightedIoTime(settings MetricSettings) metricSystemDisk // MetricsBuilder provides an interface for scrapers to report metrics while taking care of all the transformations // required to produce metric representation defined in metadata and user settings. type MetricsBuilder struct { - startTime pdata.Timestamp + startTime pdata.Timestamp // start time that will be applied to all recorded data points. + metricsCapacity int // maximum observed number of metrics per resource. + resourceCapacity int // maximum observed number of resource attributes. + metricsBuffer pdata.Metrics // accumulates metrics data before emitting. metricSystemDiskIo metricSystemDiskIo metricSystemDiskIoTime metricSystemDiskIoTime metricSystemDiskMerged metricSystemDiskMerged @@ -451,6 +454,7 @@ func WithStartTime(startTime pdata.Timestamp) metricBuilderOption { func NewMetricsBuilder(settings MetricsSettings, options ...metricBuilderOption) *MetricsBuilder { mb := &MetricsBuilder{ startTime: pdata.NewTimestampFromTime(time.Now()), + metricsBuffer: pdata.NewMetrics(), metricSystemDiskIo: newMetricSystemDiskIo(settings.SystemDiskIo), metricSystemDiskIoTime: newMetricSystemDiskIoTime(settings.SystemDiskIoTime), metricSystemDiskMerged: newMetricSystemDiskMerged(settings.SystemDiskMerged), @@ -465,17 +469,53 @@ func NewMetricsBuilder(settings MetricsSettings, options ...metricBuilderOption) return mb } -// Emit appends generated metrics to a pdata.MetricsSlice and updates the internal state to be ready for recording -// another set of data points. This function will be doing all transformations required to produce metric representation -// defined in metadata and user settings, e.g. delta/cumulative translation. -func (mb *MetricsBuilder) Emit(metrics pdata.MetricSlice) { - mb.metricSystemDiskIo.emit(metrics) - mb.metricSystemDiskIoTime.emit(metrics) - mb.metricSystemDiskMerged.emit(metrics) - mb.metricSystemDiskOperationTime.emit(metrics) - mb.metricSystemDiskOperations.emit(metrics) - mb.metricSystemDiskPendingOperations.emit(metrics) - mb.metricSystemDiskWeightedIoTime.emit(metrics) +// updateCapacity updates max length of metrics and resource attributes that will be used for the slice capacity. +func (mb *MetricsBuilder) updateCapacity(rm pdata.ResourceMetrics) { + if mb.metricsCapacity < rm.InstrumentationLibraryMetrics().At(0).Metrics().Len() { + mb.metricsCapacity = rm.InstrumentationLibraryMetrics().At(0).Metrics().Len() + } + if mb.resourceCapacity < rm.Resource().Attributes().Len() { + mb.resourceCapacity = rm.Resource().Attributes().Len() + } +} + +// ResourceOption applies changes to provided resource. +type ResourceOption func(pdata.Resource) + +// EmitForResource saves all the generated metrics under a new resource and updates the internal state to be ready for +// recording another set of data points as part of another resource. This function can be helpful when one scraper +// needs to emit metrics from several resources. Otherwise calling this function is not required, +// just `Emit` function can be called instead. Resource attributes should be provided as ResourceOption arguments. +func (mb *MetricsBuilder) EmitForResource(ro ...ResourceOption) { + rm := pdata.NewResourceMetrics() + rm.Resource().Attributes().EnsureCapacity(mb.resourceCapacity) + for _, op := range ro { + op(rm.Resource()) + } + ils := rm.InstrumentationLibraryMetrics().AppendEmpty() + ils.InstrumentationLibrary().SetName("otelcol/hostmetricsreceiver/disk") + ils.Metrics().EnsureCapacity(mb.metricsCapacity) + mb.metricSystemDiskIo.emit(ils.Metrics()) + mb.metricSystemDiskIoTime.emit(ils.Metrics()) + mb.metricSystemDiskMerged.emit(ils.Metrics()) + mb.metricSystemDiskOperationTime.emit(ils.Metrics()) + mb.metricSystemDiskOperations.emit(ils.Metrics()) + mb.metricSystemDiskPendingOperations.emit(ils.Metrics()) + mb.metricSystemDiskWeightedIoTime.emit(ils.Metrics()) + if ils.Metrics().Len() > 0 { + mb.updateCapacity(rm) + rm.MoveTo(mb.metricsBuffer.ResourceMetrics().AppendEmpty()) + } +} + +// Emit returns all the metrics accumulated by the metrics builder and updates the internal state to be ready for +// recording another set of metrics. This function will be responsible for applying all the transformations required to +// produce metric representation defined in metadata and user settings, e.g. delta or cumulative. +func (mb *MetricsBuilder) Emit(ro ...ResourceOption) pdata.Metrics { + mb.EmitForResource(ro...) + metrics := pdata.NewMetrics() + mb.metricsBuffer.MoveTo(metrics) + return metrics } // RecordSystemDiskIoDataPoint adds a data point to system.disk.io metric. @@ -522,16 +562,6 @@ func (mb *MetricsBuilder) Reset(options ...metricBuilderOption) { } } -// NewMetricData creates new pdata.Metrics and sets the InstrumentationLibrary -// name on the ResourceMetrics. -func (mb *MetricsBuilder) NewMetricData() pdata.Metrics { - md := pdata.NewMetrics() - rm := md.ResourceMetrics().AppendEmpty() - ilm := rm.InstrumentationLibraryMetrics().AppendEmpty() - ilm.InstrumentationLibrary().SetName("otelcol/hostmetricsreceiver/disk") - return md -} - // Attributes contains the possible metric attributes that can be used. var Attributes = struct { // Device (Name of the disk.) diff --git a/receiver/hostmetricsreceiver/internal/scraper/filesystemscraper/documentation.md b/receiver/hostmetricsreceiver/internal/scraper/filesystemscraper/documentation.md index 369cbf2e877a..3983180d9775 100644 --- a/receiver/hostmetricsreceiver/internal/scraper/filesystemscraper/documentation.md +++ b/receiver/hostmetricsreceiver/internal/scraper/filesystemscraper/documentation.md @@ -21,7 +21,7 @@ metrics: enabled: ``` -## Attributes +## Metric attributes | Name | Description | | ---- | ----------- | diff --git a/receiver/hostmetricsreceiver/internal/scraper/filesystemscraper/filesystem_scraper.go b/receiver/hostmetricsreceiver/internal/scraper/filesystemscraper/filesystem_scraper.go index 5a6eb091c231..736c2a17bfbf 100644 --- a/receiver/hostmetricsreceiver/internal/scraper/filesystemscraper/filesystem_scraper.go +++ b/receiver/hostmetricsreceiver/internal/scraper/filesystemscraper/filesystem_scraper.go @@ -72,15 +72,12 @@ func (s *scraper) start(context.Context, component.Host) error { } func (s *scraper) scrape(_ context.Context) (pdata.Metrics, error) { - md := pdata.NewMetrics() - metrics := md.ResourceMetrics().AppendEmpty().InstrumentationLibraryMetrics().AppendEmpty().Metrics() - now := pdata.NewTimestampFromTime(time.Now()) // omit logical (virtual) filesystems (not relevant for windows) partitions, err := s.partitions( /*all=*/ false) if err != nil { - return md, scrapererror.NewPartialScrapeError(err, metricsLen) + return pdata.NewMetrics(), scrapererror.NewPartialScrapeError(err, metricsLen) } var errors scrapererror.ScrapeErrors @@ -99,10 +96,8 @@ func (s *scraper) scrape(_ context.Context) (pdata.Metrics, error) { } if len(usages) > 0 { - metrics.EnsureCapacity(metricsLen) s.recordFileSystemUsageMetric(now, usages) s.recordSystemSpecificMetrics(now, usages) - s.mb.Emit(metrics) } err = errors.Combine() @@ -110,7 +105,7 @@ func (s *scraper) scrape(_ context.Context) (pdata.Metrics, error) { err = scrapererror.NewPartialScrapeError(err, metricsLen) } - return md, err + return s.mb.Emit(), err } func getMountMode(opts []string) string { diff --git a/receiver/hostmetricsreceiver/internal/scraper/filesystemscraper/internal/metadata/generated_metrics_v2.go b/receiver/hostmetricsreceiver/internal/scraper/filesystemscraper/internal/metadata/generated_metrics_v2.go index e71f9dc8eb1f..976aef981d34 100644 --- a/receiver/hostmetricsreceiver/internal/scraper/filesystemscraper/internal/metadata/generated_metrics_v2.go +++ b/receiver/hostmetricsreceiver/internal/scraper/filesystemscraper/internal/metadata/generated_metrics_v2.go @@ -205,7 +205,10 @@ func newMetricSystemFilesystemUtilization(settings MetricSettings) metricSystemF // MetricsBuilder provides an interface for scrapers to report metrics while taking care of all the transformations // required to produce metric representation defined in metadata and user settings. type MetricsBuilder struct { - startTime pdata.Timestamp + startTime pdata.Timestamp // start time that will be applied to all recorded data points. + metricsCapacity int // maximum observed number of metrics per resource. + resourceCapacity int // maximum observed number of resource attributes. + metricsBuffer pdata.Metrics // accumulates metrics data before emitting. metricSystemFilesystemInodesUsage metricSystemFilesystemInodesUsage metricSystemFilesystemUsage metricSystemFilesystemUsage metricSystemFilesystemUtilization metricSystemFilesystemUtilization @@ -224,6 +227,7 @@ func WithStartTime(startTime pdata.Timestamp) metricBuilderOption { func NewMetricsBuilder(settings MetricsSettings, options ...metricBuilderOption) *MetricsBuilder { mb := &MetricsBuilder{ startTime: pdata.NewTimestampFromTime(time.Now()), + metricsBuffer: pdata.NewMetrics(), metricSystemFilesystemInodesUsage: newMetricSystemFilesystemInodesUsage(settings.SystemFilesystemInodesUsage), metricSystemFilesystemUsage: newMetricSystemFilesystemUsage(settings.SystemFilesystemUsage), metricSystemFilesystemUtilization: newMetricSystemFilesystemUtilization(settings.SystemFilesystemUtilization), @@ -234,13 +238,49 @@ func NewMetricsBuilder(settings MetricsSettings, options ...metricBuilderOption) return mb } -// Emit appends generated metrics to a pdata.MetricsSlice and updates the internal state to be ready for recording -// another set of data points. This function will be doing all transformations required to produce metric representation -// defined in metadata and user settings, e.g. delta/cumulative translation. -func (mb *MetricsBuilder) Emit(metrics pdata.MetricSlice) { - mb.metricSystemFilesystemInodesUsage.emit(metrics) - mb.metricSystemFilesystemUsage.emit(metrics) - mb.metricSystemFilesystemUtilization.emit(metrics) +// updateCapacity updates max length of metrics and resource attributes that will be used for the slice capacity. +func (mb *MetricsBuilder) updateCapacity(rm pdata.ResourceMetrics) { + if mb.metricsCapacity < rm.InstrumentationLibraryMetrics().At(0).Metrics().Len() { + mb.metricsCapacity = rm.InstrumentationLibraryMetrics().At(0).Metrics().Len() + } + if mb.resourceCapacity < rm.Resource().Attributes().Len() { + mb.resourceCapacity = rm.Resource().Attributes().Len() + } +} + +// ResourceOption applies changes to provided resource. +type ResourceOption func(pdata.Resource) + +// EmitForResource saves all the generated metrics under a new resource and updates the internal state to be ready for +// recording another set of data points as part of another resource. This function can be helpful when one scraper +// needs to emit metrics from several resources. Otherwise calling this function is not required, +// just `Emit` function can be called instead. Resource attributes should be provided as ResourceOption arguments. +func (mb *MetricsBuilder) EmitForResource(ro ...ResourceOption) { + rm := pdata.NewResourceMetrics() + rm.Resource().Attributes().EnsureCapacity(mb.resourceCapacity) + for _, op := range ro { + op(rm.Resource()) + } + ils := rm.InstrumentationLibraryMetrics().AppendEmpty() + ils.InstrumentationLibrary().SetName("otelcol/hostmetricsreceiver/filesystem") + ils.Metrics().EnsureCapacity(mb.metricsCapacity) + mb.metricSystemFilesystemInodesUsage.emit(ils.Metrics()) + mb.metricSystemFilesystemUsage.emit(ils.Metrics()) + mb.metricSystemFilesystemUtilization.emit(ils.Metrics()) + if ils.Metrics().Len() > 0 { + mb.updateCapacity(rm) + rm.MoveTo(mb.metricsBuffer.ResourceMetrics().AppendEmpty()) + } +} + +// Emit returns all the metrics accumulated by the metrics builder and updates the internal state to be ready for +// recording another set of metrics. This function will be responsible for applying all the transformations required to +// produce metric representation defined in metadata and user settings, e.g. delta or cumulative. +func (mb *MetricsBuilder) Emit(ro ...ResourceOption) pdata.Metrics { + mb.EmitForResource(ro...) + metrics := pdata.NewMetrics() + mb.metricsBuffer.MoveTo(metrics) + return metrics } // RecordSystemFilesystemInodesUsageDataPoint adds a data point to system.filesystem.inodes.usage metric. @@ -267,16 +307,6 @@ func (mb *MetricsBuilder) Reset(options ...metricBuilderOption) { } } -// NewMetricData creates new pdata.Metrics and sets the InstrumentationLibrary -// name on the ResourceMetrics. -func (mb *MetricsBuilder) NewMetricData() pdata.Metrics { - md := pdata.NewMetrics() - rm := md.ResourceMetrics().AppendEmpty() - ilm := rm.InstrumentationLibraryMetrics().AppendEmpty() - ilm.InstrumentationLibrary().SetName("otelcol/hostmetricsreceiver/filesystem") - return md -} - // Attributes contains the possible metric attributes that can be used. var Attributes = struct { // Device (Identifier of the filesystem.) diff --git a/receiver/hostmetricsreceiver/internal/scraper/loadscraper/documentation.md b/receiver/hostmetricsreceiver/internal/scraper/loadscraper/documentation.md index 23fad6e4d767..a445bc4a12f7 100644 --- a/receiver/hostmetricsreceiver/internal/scraper/loadscraper/documentation.md +++ b/receiver/hostmetricsreceiver/internal/scraper/loadscraper/documentation.md @@ -21,7 +21,7 @@ metrics: enabled: ``` -## Attributes +## Metric attributes | Name | Description | | ---- | ----------- | diff --git a/receiver/hostmetricsreceiver/internal/scraper/loadscraper/internal/metadata/generated_metrics_v2.go b/receiver/hostmetricsreceiver/internal/scraper/loadscraper/internal/metadata/generated_metrics_v2.go index 7a25e179b159..8b65ce8a1153 100644 --- a/receiver/hostmetricsreceiver/internal/scraper/loadscraper/internal/metadata/generated_metrics_v2.go +++ b/receiver/hostmetricsreceiver/internal/scraper/loadscraper/internal/metadata/generated_metrics_v2.go @@ -184,7 +184,10 @@ func newMetricSystemCPULoadAverage5m(settings MetricSettings) metricSystemCPULoa // MetricsBuilder provides an interface for scrapers to report metrics while taking care of all the transformations // required to produce metric representation defined in metadata and user settings. type MetricsBuilder struct { - startTime pdata.Timestamp + startTime pdata.Timestamp // start time that will be applied to all recorded data points. + metricsCapacity int // maximum observed number of metrics per resource. + resourceCapacity int // maximum observed number of resource attributes. + metricsBuffer pdata.Metrics // accumulates metrics data before emitting. metricSystemCPULoadAverage15m metricSystemCPULoadAverage15m metricSystemCPULoadAverage1m metricSystemCPULoadAverage1m metricSystemCPULoadAverage5m metricSystemCPULoadAverage5m @@ -203,6 +206,7 @@ func WithStartTime(startTime pdata.Timestamp) metricBuilderOption { func NewMetricsBuilder(settings MetricsSettings, options ...metricBuilderOption) *MetricsBuilder { mb := &MetricsBuilder{ startTime: pdata.NewTimestampFromTime(time.Now()), + metricsBuffer: pdata.NewMetrics(), metricSystemCPULoadAverage15m: newMetricSystemCPULoadAverage15m(settings.SystemCPULoadAverage15m), metricSystemCPULoadAverage1m: newMetricSystemCPULoadAverage1m(settings.SystemCPULoadAverage1m), metricSystemCPULoadAverage5m: newMetricSystemCPULoadAverage5m(settings.SystemCPULoadAverage5m), @@ -213,13 +217,49 @@ func NewMetricsBuilder(settings MetricsSettings, options ...metricBuilderOption) return mb } -// Emit appends generated metrics to a pdata.MetricsSlice and updates the internal state to be ready for recording -// another set of data points. This function will be doing all transformations required to produce metric representation -// defined in metadata and user settings, e.g. delta/cumulative translation. -func (mb *MetricsBuilder) Emit(metrics pdata.MetricSlice) { - mb.metricSystemCPULoadAverage15m.emit(metrics) - mb.metricSystemCPULoadAverage1m.emit(metrics) - mb.metricSystemCPULoadAverage5m.emit(metrics) +// updateCapacity updates max length of metrics and resource attributes that will be used for the slice capacity. +func (mb *MetricsBuilder) updateCapacity(rm pdata.ResourceMetrics) { + if mb.metricsCapacity < rm.InstrumentationLibraryMetrics().At(0).Metrics().Len() { + mb.metricsCapacity = rm.InstrumentationLibraryMetrics().At(0).Metrics().Len() + } + if mb.resourceCapacity < rm.Resource().Attributes().Len() { + mb.resourceCapacity = rm.Resource().Attributes().Len() + } +} + +// ResourceOption applies changes to provided resource. +type ResourceOption func(pdata.Resource) + +// EmitForResource saves all the generated metrics under a new resource and updates the internal state to be ready for +// recording another set of data points as part of another resource. This function can be helpful when one scraper +// needs to emit metrics from several resources. Otherwise calling this function is not required, +// just `Emit` function can be called instead. Resource attributes should be provided as ResourceOption arguments. +func (mb *MetricsBuilder) EmitForResource(ro ...ResourceOption) { + rm := pdata.NewResourceMetrics() + rm.Resource().Attributes().EnsureCapacity(mb.resourceCapacity) + for _, op := range ro { + op(rm.Resource()) + } + ils := rm.InstrumentationLibraryMetrics().AppendEmpty() + ils.InstrumentationLibrary().SetName("otelcol/hostmetricsreceiver/load") + ils.Metrics().EnsureCapacity(mb.metricsCapacity) + mb.metricSystemCPULoadAverage15m.emit(ils.Metrics()) + mb.metricSystemCPULoadAverage1m.emit(ils.Metrics()) + mb.metricSystemCPULoadAverage5m.emit(ils.Metrics()) + if ils.Metrics().Len() > 0 { + mb.updateCapacity(rm) + rm.MoveTo(mb.metricsBuffer.ResourceMetrics().AppendEmpty()) + } +} + +// Emit returns all the metrics accumulated by the metrics builder and updates the internal state to be ready for +// recording another set of metrics. This function will be responsible for applying all the transformations required to +// produce metric representation defined in metadata and user settings, e.g. delta or cumulative. +func (mb *MetricsBuilder) Emit(ro ...ResourceOption) pdata.Metrics { + mb.EmitForResource(ro...) + metrics := pdata.NewMetrics() + mb.metricsBuffer.MoveTo(metrics) + return metrics } // RecordSystemCPULoadAverage15mDataPoint adds a data point to system.cpu.load_average.15m metric. @@ -246,16 +286,6 @@ func (mb *MetricsBuilder) Reset(options ...metricBuilderOption) { } } -// NewMetricData creates new pdata.Metrics and sets the InstrumentationLibrary -// name on the ResourceMetrics. -func (mb *MetricsBuilder) NewMetricData() pdata.Metrics { - md := pdata.NewMetrics() - rm := md.ResourceMetrics().AppendEmpty() - ilm := rm.InstrumentationLibraryMetrics().AppendEmpty() - ilm.InstrumentationLibrary().SetName("otelcol/hostmetricsreceiver/load") - return md -} - // Attributes contains the possible metric attributes that can be used. var Attributes = struct { }{} diff --git a/receiver/hostmetricsreceiver/internal/scraper/loadscraper/load_scraper.go b/receiver/hostmetricsreceiver/internal/scraper/loadscraper/load_scraper.go index 1d7eaa151f1a..d73ffd1271c6 100644 --- a/receiver/hostmetricsreceiver/internal/scraper/loadscraper/load_scraper.go +++ b/receiver/hostmetricsreceiver/internal/scraper/loadscraper/load_scraper.go @@ -65,13 +65,10 @@ func (s *scraper) shutdown(ctx context.Context) error { // scrape func (s *scraper) scrape(_ context.Context) (pdata.Metrics, error) { - md := pdata.NewMetrics() - metrics := md.ResourceMetrics().AppendEmpty().InstrumentationLibraryMetrics().AppendEmpty().Metrics() - now := pdata.NewTimestampFromTime(time.Now()) avgLoadValues, err := s.load() if err != nil { - return md, scrapererror.NewPartialScrapeError(err, metricsLen) + return pdata.NewMetrics(), scrapererror.NewPartialScrapeError(err, metricsLen) } if s.config.CPUAverage { @@ -81,11 +78,9 @@ func (s *scraper) scrape(_ context.Context) (pdata.Metrics, error) { avgLoadValues.Load15 = avgLoadValues.Load1 / divisor } - metrics.EnsureCapacity(metricsLen) - s.mb.RecordSystemCPULoadAverage1mDataPoint(now, avgLoadValues.Load1) s.mb.RecordSystemCPULoadAverage5mDataPoint(now, avgLoadValues.Load5) s.mb.RecordSystemCPULoadAverage15mDataPoint(now, avgLoadValues.Load15) - s.mb.Emit(metrics) - return md, nil + + return s.mb.Emit(), nil } diff --git a/receiver/hostmetricsreceiver/internal/scraper/memoryscraper/documentation.md b/receiver/hostmetricsreceiver/internal/scraper/memoryscraper/documentation.md index 82b5deb8e38e..81cf3f554928 100644 --- a/receiver/hostmetricsreceiver/internal/scraper/memoryscraper/documentation.md +++ b/receiver/hostmetricsreceiver/internal/scraper/memoryscraper/documentation.md @@ -20,7 +20,7 @@ metrics: enabled: ``` -## Attributes +## Metric attributes | Name | Description | | ---- | ----------- | diff --git a/receiver/hostmetricsreceiver/internal/scraper/memoryscraper/internal/metadata/generated_metrics_v2.go b/receiver/hostmetricsreceiver/internal/scraper/memoryscraper/internal/metadata/generated_metrics_v2.go index 420880a3b819..87d7fa16a81a 100644 --- a/receiver/hostmetricsreceiver/internal/scraper/memoryscraper/internal/metadata/generated_metrics_v2.go +++ b/receiver/hostmetricsreceiver/internal/scraper/memoryscraper/internal/metadata/generated_metrics_v2.go @@ -137,7 +137,10 @@ func newMetricSystemMemoryUtilization(settings MetricSettings) metricSystemMemor // MetricsBuilder provides an interface for scrapers to report metrics while taking care of all the transformations // required to produce metric representation defined in metadata and user settings. type MetricsBuilder struct { - startTime pdata.Timestamp + startTime pdata.Timestamp // start time that will be applied to all recorded data points. + metricsCapacity int // maximum observed number of metrics per resource. + resourceCapacity int // maximum observed number of resource attributes. + metricsBuffer pdata.Metrics // accumulates metrics data before emitting. metricSystemMemoryUsage metricSystemMemoryUsage metricSystemMemoryUtilization metricSystemMemoryUtilization } @@ -155,6 +158,7 @@ func WithStartTime(startTime pdata.Timestamp) metricBuilderOption { func NewMetricsBuilder(settings MetricsSettings, options ...metricBuilderOption) *MetricsBuilder { mb := &MetricsBuilder{ startTime: pdata.NewTimestampFromTime(time.Now()), + metricsBuffer: pdata.NewMetrics(), metricSystemMemoryUsage: newMetricSystemMemoryUsage(settings.SystemMemoryUsage), metricSystemMemoryUtilization: newMetricSystemMemoryUtilization(settings.SystemMemoryUtilization), } @@ -164,12 +168,48 @@ func NewMetricsBuilder(settings MetricsSettings, options ...metricBuilderOption) return mb } -// Emit appends generated metrics to a pdata.MetricsSlice and updates the internal state to be ready for recording -// another set of data points. This function will be doing all transformations required to produce metric representation -// defined in metadata and user settings, e.g. delta/cumulative translation. -func (mb *MetricsBuilder) Emit(metrics pdata.MetricSlice) { - mb.metricSystemMemoryUsage.emit(metrics) - mb.metricSystemMemoryUtilization.emit(metrics) +// updateCapacity updates max length of metrics and resource attributes that will be used for the slice capacity. +func (mb *MetricsBuilder) updateCapacity(rm pdata.ResourceMetrics) { + if mb.metricsCapacity < rm.InstrumentationLibraryMetrics().At(0).Metrics().Len() { + mb.metricsCapacity = rm.InstrumentationLibraryMetrics().At(0).Metrics().Len() + } + if mb.resourceCapacity < rm.Resource().Attributes().Len() { + mb.resourceCapacity = rm.Resource().Attributes().Len() + } +} + +// ResourceOption applies changes to provided resource. +type ResourceOption func(pdata.Resource) + +// EmitForResource saves all the generated metrics under a new resource and updates the internal state to be ready for +// recording another set of data points as part of another resource. This function can be helpful when one scraper +// needs to emit metrics from several resources. Otherwise calling this function is not required, +// just `Emit` function can be called instead. Resource attributes should be provided as ResourceOption arguments. +func (mb *MetricsBuilder) EmitForResource(ro ...ResourceOption) { + rm := pdata.NewResourceMetrics() + rm.Resource().Attributes().EnsureCapacity(mb.resourceCapacity) + for _, op := range ro { + op(rm.Resource()) + } + ils := rm.InstrumentationLibraryMetrics().AppendEmpty() + ils.InstrumentationLibrary().SetName("otelcol/hostmetricsreceiver/memory") + ils.Metrics().EnsureCapacity(mb.metricsCapacity) + mb.metricSystemMemoryUsage.emit(ils.Metrics()) + mb.metricSystemMemoryUtilization.emit(ils.Metrics()) + if ils.Metrics().Len() > 0 { + mb.updateCapacity(rm) + rm.MoveTo(mb.metricsBuffer.ResourceMetrics().AppendEmpty()) + } +} + +// Emit returns all the metrics accumulated by the metrics builder and updates the internal state to be ready for +// recording another set of metrics. This function will be responsible for applying all the transformations required to +// produce metric representation defined in metadata and user settings, e.g. delta or cumulative. +func (mb *MetricsBuilder) Emit(ro ...ResourceOption) pdata.Metrics { + mb.EmitForResource(ro...) + metrics := pdata.NewMetrics() + mb.metricsBuffer.MoveTo(metrics) + return metrics } // RecordSystemMemoryUsageDataPoint adds a data point to system.memory.usage metric. @@ -191,16 +231,6 @@ func (mb *MetricsBuilder) Reset(options ...metricBuilderOption) { } } -// NewMetricData creates new pdata.Metrics and sets the InstrumentationLibrary -// name on the ResourceMetrics. -func (mb *MetricsBuilder) NewMetricData() pdata.Metrics { - md := pdata.NewMetrics() - rm := md.ResourceMetrics().AppendEmpty() - ilm := rm.InstrumentationLibraryMetrics().AppendEmpty() - ilm.InstrumentationLibrary().SetName("otelcol/hostmetricsreceiver/memory") - return md -} - // Attributes contains the possible metric attributes that can be used. var Attributes = struct { // State (Breakdown of memory usage by type.) diff --git a/receiver/hostmetricsreceiver/internal/scraper/memoryscraper/memory_scraper.go b/receiver/hostmetricsreceiver/internal/scraper/memoryscraper/memory_scraper.go index 0b522c26c33b..52edd5131105 100644 --- a/receiver/hostmetricsreceiver/internal/scraper/memoryscraper/memory_scraper.go +++ b/receiver/hostmetricsreceiver/internal/scraper/memoryscraper/memory_scraper.go @@ -59,23 +59,20 @@ func (s *scraper) start(context.Context, component.Host) error { } func (s *scraper) scrape(_ context.Context) (pdata.Metrics, error) { - md := pdata.NewMetrics() - metrics := md.ResourceMetrics().AppendEmpty().InstrumentationLibraryMetrics().AppendEmpty().Metrics() - now := pdata.NewTimestampFromTime(time.Now()) memInfo, err := s.virtualMemory() if err != nil { - return md, scrapererror.NewPartialScrapeError(err, metricsLen) + return pdata.NewMetrics(), scrapererror.NewPartialScrapeError(err, metricsLen) } if memInfo != nil { - metrics.EnsureCapacity(metricsLen) s.recordMemoryUsageMetric(now, memInfo) if memInfo.Total <= 0 { - return md, scrapererror.NewPartialScrapeError(fmt.Errorf("%w: %d", ErrInvalidTotalMem, memInfo.Total), metricsLen) + return pdata.NewMetrics(), scrapererror.NewPartialScrapeError(fmt.Errorf("%w: %d", ErrInvalidTotalMem, + memInfo.Total), metricsLen) } s.recordMemoryUtilizationMetric(now, memInfo) } - s.mb.Emit(metrics) - return md, nil + + return s.mb.Emit(), nil } diff --git a/receiver/hostmetricsreceiver/internal/scraper/networkscraper/documentation.md b/receiver/hostmetricsreceiver/internal/scraper/networkscraper/documentation.md index 18474d66d9ac..dedf15002e2a 100644 --- a/receiver/hostmetricsreceiver/internal/scraper/networkscraper/documentation.md +++ b/receiver/hostmetricsreceiver/internal/scraper/networkscraper/documentation.md @@ -23,7 +23,7 @@ metrics: enabled: ``` -## Attributes +## Metric attributes | Name | Description | | ---- | ----------- | diff --git a/receiver/hostmetricsreceiver/internal/scraper/networkscraper/internal/metadata/generated_metrics_v2.go b/receiver/hostmetricsreceiver/internal/scraper/networkscraper/internal/metadata/generated_metrics_v2.go index ed5102dc42bf..8b3fd16efca9 100644 --- a/receiver/hostmetricsreceiver/internal/scraper/networkscraper/internal/metadata/generated_metrics_v2.go +++ b/receiver/hostmetricsreceiver/internal/scraper/networkscraper/internal/metadata/generated_metrics_v2.go @@ -315,7 +315,10 @@ func newMetricSystemNetworkPackets(settings MetricSettings) metricSystemNetworkP // MetricsBuilder provides an interface for scrapers to report metrics while taking care of all the transformations // required to produce metric representation defined in metadata and user settings. type MetricsBuilder struct { - startTime pdata.Timestamp + startTime pdata.Timestamp // start time that will be applied to all recorded data points. + metricsCapacity int // maximum observed number of metrics per resource. + resourceCapacity int // maximum observed number of resource attributes. + metricsBuffer pdata.Metrics // accumulates metrics data before emitting. metricSystemNetworkConnections metricSystemNetworkConnections metricSystemNetworkDropped metricSystemNetworkDropped metricSystemNetworkErrors metricSystemNetworkErrors @@ -336,6 +339,7 @@ func WithStartTime(startTime pdata.Timestamp) metricBuilderOption { func NewMetricsBuilder(settings MetricsSettings, options ...metricBuilderOption) *MetricsBuilder { mb := &MetricsBuilder{ startTime: pdata.NewTimestampFromTime(time.Now()), + metricsBuffer: pdata.NewMetrics(), metricSystemNetworkConnections: newMetricSystemNetworkConnections(settings.SystemNetworkConnections), metricSystemNetworkDropped: newMetricSystemNetworkDropped(settings.SystemNetworkDropped), metricSystemNetworkErrors: newMetricSystemNetworkErrors(settings.SystemNetworkErrors), @@ -348,15 +352,51 @@ func NewMetricsBuilder(settings MetricsSettings, options ...metricBuilderOption) return mb } -// Emit appends generated metrics to a pdata.MetricsSlice and updates the internal state to be ready for recording -// another set of data points. This function will be doing all transformations required to produce metric representation -// defined in metadata and user settings, e.g. delta/cumulative translation. -func (mb *MetricsBuilder) Emit(metrics pdata.MetricSlice) { - mb.metricSystemNetworkConnections.emit(metrics) - mb.metricSystemNetworkDropped.emit(metrics) - mb.metricSystemNetworkErrors.emit(metrics) - mb.metricSystemNetworkIo.emit(metrics) - mb.metricSystemNetworkPackets.emit(metrics) +// updateCapacity updates max length of metrics and resource attributes that will be used for the slice capacity. +func (mb *MetricsBuilder) updateCapacity(rm pdata.ResourceMetrics) { + if mb.metricsCapacity < rm.InstrumentationLibraryMetrics().At(0).Metrics().Len() { + mb.metricsCapacity = rm.InstrumentationLibraryMetrics().At(0).Metrics().Len() + } + if mb.resourceCapacity < rm.Resource().Attributes().Len() { + mb.resourceCapacity = rm.Resource().Attributes().Len() + } +} + +// ResourceOption applies changes to provided resource. +type ResourceOption func(pdata.Resource) + +// EmitForResource saves all the generated metrics under a new resource and updates the internal state to be ready for +// recording another set of data points as part of another resource. This function can be helpful when one scraper +// needs to emit metrics from several resources. Otherwise calling this function is not required, +// just `Emit` function can be called instead. Resource attributes should be provided as ResourceOption arguments. +func (mb *MetricsBuilder) EmitForResource(ro ...ResourceOption) { + rm := pdata.NewResourceMetrics() + rm.Resource().Attributes().EnsureCapacity(mb.resourceCapacity) + for _, op := range ro { + op(rm.Resource()) + } + ils := rm.InstrumentationLibraryMetrics().AppendEmpty() + ils.InstrumentationLibrary().SetName("otelcol/hostmetricsreceiver/network") + ils.Metrics().EnsureCapacity(mb.metricsCapacity) + mb.metricSystemNetworkConnections.emit(ils.Metrics()) + mb.metricSystemNetworkDropped.emit(ils.Metrics()) + mb.metricSystemNetworkErrors.emit(ils.Metrics()) + mb.metricSystemNetworkIo.emit(ils.Metrics()) + mb.metricSystemNetworkPackets.emit(ils.Metrics()) + if ils.Metrics().Len() > 0 { + mb.updateCapacity(rm) + rm.MoveTo(mb.metricsBuffer.ResourceMetrics().AppendEmpty()) + } +} + +// Emit returns all the metrics accumulated by the metrics builder and updates the internal state to be ready for +// recording another set of metrics. This function will be responsible for applying all the transformations required to +// produce metric representation defined in metadata and user settings, e.g. delta or cumulative. +func (mb *MetricsBuilder) Emit(ro ...ResourceOption) pdata.Metrics { + mb.EmitForResource(ro...) + metrics := pdata.NewMetrics() + mb.metricsBuffer.MoveTo(metrics) + return metrics } // RecordSystemNetworkConnectionsDataPoint adds a data point to system.network.connections metric. @@ -393,16 +433,6 @@ func (mb *MetricsBuilder) Reset(options ...metricBuilderOption) { } } -// NewMetricData creates new pdata.Metrics and sets the InstrumentationLibrary -// name on the ResourceMetrics. -func (mb *MetricsBuilder) NewMetricData() pdata.Metrics { - md := pdata.NewMetrics() - rm := md.ResourceMetrics().AppendEmpty() - ilm := rm.InstrumentationLibraryMetrics().AppendEmpty() - ilm.InstrumentationLibrary().SetName("otelcol/hostmetricsreceiver/network") - return md -} - // Attributes contains the possible metric attributes that can be used. var Attributes = struct { // Device (Name of the network interface.) diff --git a/receiver/hostmetricsreceiver/internal/scraper/networkscraper/network_scraper.go b/receiver/hostmetricsreceiver/internal/scraper/networkscraper/network_scraper.go index a4051ff93582..b5701ce62e7e 100644 --- a/receiver/hostmetricsreceiver/internal/scraper/networkscraper/network_scraper.go +++ b/receiver/hostmetricsreceiver/internal/scraper/networkscraper/network_scraper.go @@ -83,24 +83,22 @@ func (s *scraper) start(context.Context, component.Host) error { } func (s *scraper) scrape(_ context.Context) (pdata.Metrics, error) { - md := pdata.NewMetrics() - metrics := md.ResourceMetrics().AppendEmpty().InstrumentationLibraryMetrics().AppendEmpty().Metrics() var errors scrapererror.ScrapeErrors - err := s.recordNetworkCounterMetrics(metrics) + err := s.recordNetworkCounterMetrics() if err != nil { errors.AddPartial(networkMetricsLen, err) } - err = s.recordNetworkConnectionsMetrics(metrics) + err = s.recordNetworkConnectionsMetrics() if err != nil { errors.AddPartial(connectionsMetricsLen, err) } - s.mb.Emit(metrics) - return md, errors.Combine() + + return s.mb.Emit(), errors.Combine() } -func (s *scraper) recordNetworkCounterMetrics(metrics pdata.MetricSlice) error { +func (s *scraper) recordNetworkCounterMetrics() error { now := pdata.NewTimestampFromTime(time.Now()) // get total stats only @@ -113,9 +111,6 @@ func (s *scraper) recordNetworkCounterMetrics(metrics pdata.MetricSlice) error { ioCounters = s.filterByInterface(ioCounters) if len(ioCounters) > 0 { - startIdx := metrics.Len() - metrics.EnsureCapacity(startIdx + networkMetricsLen) - s.recordNetworkPacketsMetric(now, ioCounters) s.recordNetworkDroppedPacketsMetric(now, ioCounters) s.recordNetworkErrorPacketsMetric(now, ioCounters) @@ -153,7 +148,7 @@ func (s *scraper) recordNetworkIOMetric(now pdata.Timestamp, ioCountersSlice []n } } -func (s *scraper) recordNetworkConnectionsMetrics(metrics pdata.MetricSlice) error { +func (s *scraper) recordNetworkConnectionsMetrics() error { now := pdata.NewTimestampFromTime(time.Now()) connections, err := s.connections("tcp") @@ -163,8 +158,6 @@ func (s *scraper) recordNetworkConnectionsMetrics(metrics pdata.MetricSlice) err tcpConnectionStatusCounts := getTCPConnectionStatusCounts(connections) - startIdx := metrics.Len() - metrics.EnsureCapacity(startIdx + connectionsMetricsLen) s.recordNetworkConnectionsMetric(now, tcpConnectionStatusCounts) return nil } diff --git a/receiver/hostmetricsreceiver/internal/scraper/pagingscraper/documentation.md b/receiver/hostmetricsreceiver/internal/scraper/pagingscraper/documentation.md index 1933b98af984..b2c2f4b6c3e6 100644 --- a/receiver/hostmetricsreceiver/internal/scraper/pagingscraper/documentation.md +++ b/receiver/hostmetricsreceiver/internal/scraper/pagingscraper/documentation.md @@ -22,7 +22,7 @@ metrics: enabled: ``` -## Attributes +## Metric attributes | Name | Description | | ---- | ----------- | diff --git a/receiver/hostmetricsreceiver/internal/scraper/pagingscraper/internal/metadata/generated_metrics_v2.go b/receiver/hostmetricsreceiver/internal/scraper/pagingscraper/internal/metadata/generated_metrics_v2.go index 3f4679291d4d..c7c9ef3f2172 100644 --- a/receiver/hostmetricsreceiver/internal/scraper/pagingscraper/internal/metadata/generated_metrics_v2.go +++ b/receiver/hostmetricsreceiver/internal/scraper/pagingscraper/internal/metadata/generated_metrics_v2.go @@ -254,7 +254,10 @@ func newMetricSystemPagingUtilization(settings MetricSettings) metricSystemPagin // MetricsBuilder provides an interface for scrapers to report metrics while taking care of all the transformations // required to produce metric representation defined in metadata and user settings. type MetricsBuilder struct { - startTime pdata.Timestamp + startTime pdata.Timestamp // start time that will be applied to all recorded data points. + metricsCapacity int // maximum observed number of metrics per resource. + resourceCapacity int // maximum observed number of resource attributes. + metricsBuffer pdata.Metrics // accumulates metrics data before emitting. metricSystemPagingFaults metricSystemPagingFaults metricSystemPagingOperations metricSystemPagingOperations metricSystemPagingUsage metricSystemPagingUsage @@ -274,6 +277,7 @@ func WithStartTime(startTime pdata.Timestamp) metricBuilderOption { func NewMetricsBuilder(settings MetricsSettings, options ...metricBuilderOption) *MetricsBuilder { mb := &MetricsBuilder{ startTime: pdata.NewTimestampFromTime(time.Now()), + metricsBuffer: pdata.NewMetrics(), metricSystemPagingFaults: newMetricSystemPagingFaults(settings.SystemPagingFaults), metricSystemPagingOperations: newMetricSystemPagingOperations(settings.SystemPagingOperations), metricSystemPagingUsage: newMetricSystemPagingUsage(settings.SystemPagingUsage), @@ -285,14 +289,50 @@ func NewMetricsBuilder(settings MetricsSettings, options ...metricBuilderOption) return mb } -// Emit appends generated metrics to a pdata.MetricsSlice and updates the internal state to be ready for recording -// another set of data points. This function will be doing all transformations required to produce metric representation -// defined in metadata and user settings, e.g. delta/cumulative translation. -func (mb *MetricsBuilder) Emit(metrics pdata.MetricSlice) { - mb.metricSystemPagingFaults.emit(metrics) - mb.metricSystemPagingOperations.emit(metrics) - mb.metricSystemPagingUsage.emit(metrics) - mb.metricSystemPagingUtilization.emit(metrics) +// updateCapacity updates max length of metrics and resource attributes that will be used for the slice capacity. +func (mb *MetricsBuilder) updateCapacity(rm pdata.ResourceMetrics) { + if mb.metricsCapacity < rm.InstrumentationLibraryMetrics().At(0).Metrics().Len() { + mb.metricsCapacity = rm.InstrumentationLibraryMetrics().At(0).Metrics().Len() + } + if mb.resourceCapacity < rm.Resource().Attributes().Len() { + mb.resourceCapacity = rm.Resource().Attributes().Len() + } +} + +// ResourceOption applies changes to provided resource. +type ResourceOption func(pdata.Resource) + +// EmitForResource saves all the generated metrics under a new resource and updates the internal state to be ready for +// recording another set of data points as part of another resource. This function can be helpful when one scraper +// needs to emit metrics from several resources. Otherwise calling this function is not required, +// just `Emit` function can be called instead. Resource attributes should be provided as ResourceOption arguments. +func (mb *MetricsBuilder) EmitForResource(ro ...ResourceOption) { + rm := pdata.NewResourceMetrics() + rm.Resource().Attributes().EnsureCapacity(mb.resourceCapacity) + for _, op := range ro { + op(rm.Resource()) + } + ils := rm.InstrumentationLibraryMetrics().AppendEmpty() + ils.InstrumentationLibrary().SetName("otelcol/hostmetricsreceiver/paging") + ils.Metrics().EnsureCapacity(mb.metricsCapacity) + mb.metricSystemPagingFaults.emit(ils.Metrics()) + mb.metricSystemPagingOperations.emit(ils.Metrics()) + mb.metricSystemPagingUsage.emit(ils.Metrics()) + mb.metricSystemPagingUtilization.emit(ils.Metrics()) + if ils.Metrics().Len() > 0 { + mb.updateCapacity(rm) + rm.MoveTo(mb.metricsBuffer.ResourceMetrics().AppendEmpty()) + } +} + +// Emit returns all the metrics accumulated by the metrics builder and updates the internal state to be ready for +// recording another set of metrics. This function will be responsible for applying all the transformations required to +// produce metric representation defined in metadata and user settings, e.g. delta or cumulative. +func (mb *MetricsBuilder) Emit(ro ...ResourceOption) pdata.Metrics { + mb.EmitForResource(ro...) + metrics := pdata.NewMetrics() + mb.metricsBuffer.MoveTo(metrics) + return metrics } // RecordSystemPagingFaultsDataPoint adds a data point to system.paging.faults metric. @@ -324,16 +364,6 @@ func (mb *MetricsBuilder) Reset(options ...metricBuilderOption) { } } -// NewMetricData creates new pdata.Metrics and sets the InstrumentationLibrary -// name on the ResourceMetrics. -func (mb *MetricsBuilder) NewMetricData() pdata.Metrics { - md := pdata.NewMetrics() - rm := md.ResourceMetrics().AppendEmpty() - ilm := rm.InstrumentationLibraryMetrics().AppendEmpty() - ilm.InstrumentationLibrary().SetName("otelcol/hostmetricsreceiver/paging") - return md -} - // Attributes contains the possible metric attributes that can be used. var Attributes = struct { // Device (Name of the page file.) diff --git a/receiver/hostmetricsreceiver/internal/scraper/pagingscraper/paging_scraper_others.go b/receiver/hostmetricsreceiver/internal/scraper/pagingscraper/paging_scraper_others.go index 176b83c87315..c32239d87fa9 100644 --- a/receiver/hostmetricsreceiver/internal/scraper/pagingscraper/paging_scraper_others.go +++ b/receiver/hostmetricsreceiver/internal/scraper/pagingscraper/paging_scraper_others.go @@ -63,9 +63,6 @@ func (s *scraper) start(context.Context, component.Host) error { } func (s *scraper) scrape(_ context.Context) (pdata.Metrics, error) { - md := pdata.NewMetrics() - metrics := md.ResourceMetrics().AppendEmpty().InstrumentationLibraryMetrics().AppendEmpty().Metrics() - var errors scrapererror.ScrapeErrors err := s.scrapePagingUsageMetric() @@ -78,8 +75,7 @@ func (s *scraper) scrape(_ context.Context) (pdata.Metrics, error) { errors.AddPartial(pagingMetricsLen, err) } - s.mb.Emit(metrics) - return md, errors.Combine() + return s.mb.Emit(), errors.Combine() } func (s *scraper) scrapePagingUsageMetric() error { diff --git a/receiver/hostmetricsreceiver/internal/scraper/pagingscraper/paging_scraper_windows.go b/receiver/hostmetricsreceiver/internal/scraper/pagingscraper/paging_scraper_windows.go index ea1c7eada1f2..01ff74784e08 100644 --- a/receiver/hostmetricsreceiver/internal/scraper/pagingscraper/paging_scraper_windows.go +++ b/receiver/hostmetricsreceiver/internal/scraper/pagingscraper/paging_scraper_windows.go @@ -70,9 +70,6 @@ func (s *scraper) start(context.Context, component.Host) error { } func (s *scraper) scrape(context.Context) (pdata.Metrics, error) { - md := pdata.NewMetrics() - metrics := md.ResourceMetrics().AppendEmpty().InstrumentationLibraryMetrics().AppendEmpty().Metrics() - var errors scrapererror.ScrapeErrors err := s.scrapePagingUsageMetric() @@ -85,8 +82,7 @@ func (s *scraper) scrape(context.Context) (pdata.Metrics, error) { errors.AddPartial(pagingMetricsLen, err) } - s.mb.Emit(metrics) - return md, errors.Combine() + return s.mb.Emit(), errors.Combine() } func (s *scraper) scrapePagingUsageMetric() error { diff --git a/receiver/hostmetricsreceiver/internal/scraper/processesscraper/documentation.md b/receiver/hostmetricsreceiver/internal/scraper/processesscraper/documentation.md index 1de21adc3495..a2d27dcef307 100644 --- a/receiver/hostmetricsreceiver/internal/scraper/processesscraper/documentation.md +++ b/receiver/hostmetricsreceiver/internal/scraper/processesscraper/documentation.md @@ -13,7 +13,7 @@ These are the metrics available for this scraper. **Highlighted metrics** are emitted by default. -## Attributes +## Metric attributes | Name | Description | | ---- | ----------- | diff --git a/receiver/hostmetricsreceiver/internal/scraper/processscraper/documentation.md b/receiver/hostmetricsreceiver/internal/scraper/processscraper/documentation.md index 17ae6f5dcef9..6bc364703c16 100644 --- a/receiver/hostmetricsreceiver/internal/scraper/processscraper/documentation.md +++ b/receiver/hostmetricsreceiver/internal/scraper/processscraper/documentation.md @@ -22,7 +22,18 @@ metrics: enabled: ``` -## Attributes +## Resource attributes + +| Name | Description | Type | +| ---- | ----------- | ---- | +| process.command | The command used to launch the process (i.e. the command name). On Linux based systems, can be set to the zeroth string in proc/[pid]/cmdline. On Windows, can be set to the first parameter extracted from GetCommandLineW. | String | +| process.command_line | The full command used to launch the process as a single string representing the full command. On Windows, can be set to the result of GetCommandLineW. Do not set this if you have to assemble it just for monitoring; use process.command_args instead. | String | +| process.executable.name | The name of the process executable. On Linux based systems, can be set to the Name in proc/[pid]/status. On Windows, can be set to the base name of GetProcessImageFileNameW. | String | +| process.executable.path | The full path to the process executable. On Linux based systems, can be set to the target of proc/[pid]/exe. On Windows, can be set to the result of GetProcessImageFileNameW. | String | +| process.owner | The username of the user that owns the process. | String | +| process.pid | Process identifier (PID). | Int | + +## Metric attributes | Name | Description | | ---- | ----------- | diff --git a/receiver/hostmetricsreceiver/internal/scraper/processscraper/internal/metadata/generated_metrics_v2.go b/receiver/hostmetricsreceiver/internal/scraper/processscraper/internal/metadata/generated_metrics_v2.go index aaa0b87111d1..204d8101a5e4 100644 --- a/receiver/hostmetricsreceiver/internal/scraper/processscraper/internal/metadata/generated_metrics_v2.go +++ b/receiver/hostmetricsreceiver/internal/scraper/processscraper/internal/metadata/generated_metrics_v2.go @@ -249,7 +249,10 @@ func newMetricProcessMemoryVirtualUsage(settings MetricSettings) metricProcessMe // MetricsBuilder provides an interface for scrapers to report metrics while taking care of all the transformations // required to produce metric representation defined in metadata and user settings. type MetricsBuilder struct { - startTime pdata.Timestamp + startTime pdata.Timestamp // start time that will be applied to all recorded data points. + metricsCapacity int // maximum observed number of metrics per resource. + resourceCapacity int // maximum observed number of resource attributes. + metricsBuffer pdata.Metrics // accumulates metrics data before emitting. metricProcessCPUTime metricProcessCPUTime metricProcessDiskIo metricProcessDiskIo metricProcessMemoryPhysicalUsage metricProcessMemoryPhysicalUsage @@ -269,6 +272,7 @@ func WithStartTime(startTime pdata.Timestamp) metricBuilderOption { func NewMetricsBuilder(settings MetricsSettings, options ...metricBuilderOption) *MetricsBuilder { mb := &MetricsBuilder{ startTime: pdata.NewTimestampFromTime(time.Now()), + metricsBuffer: pdata.NewMetrics(), metricProcessCPUTime: newMetricProcessCPUTime(settings.ProcessCPUTime), metricProcessDiskIo: newMetricProcessDiskIo(settings.ProcessDiskIo), metricProcessMemoryPhysicalUsage: newMetricProcessMemoryPhysicalUsage(settings.ProcessMemoryPhysicalUsage), @@ -280,14 +284,92 @@ func NewMetricsBuilder(settings MetricsSettings, options ...metricBuilderOption) return mb } -// Emit appends generated metrics to a pdata.MetricsSlice and updates the internal state to be ready for recording -// another set of data points. This function will be doing all transformations required to produce metric representation -// defined in metadata and user settings, e.g. delta/cumulative translation. -func (mb *MetricsBuilder) Emit(metrics pdata.MetricSlice) { - mb.metricProcessCPUTime.emit(metrics) - mb.metricProcessDiskIo.emit(metrics) - mb.metricProcessMemoryPhysicalUsage.emit(metrics) - mb.metricProcessMemoryVirtualUsage.emit(metrics) +// updateCapacity updates max length of metrics and resource attributes that will be used for the slice capacity. +func (mb *MetricsBuilder) updateCapacity(rm pdata.ResourceMetrics) { + if mb.metricsCapacity < rm.InstrumentationLibraryMetrics().At(0).Metrics().Len() { + mb.metricsCapacity = rm.InstrumentationLibraryMetrics().At(0).Metrics().Len() + } + if mb.resourceCapacity < rm.Resource().Attributes().Len() { + mb.resourceCapacity = rm.Resource().Attributes().Len() + } +} + +// ResourceOption applies changes to provided resource. +type ResourceOption func(pdata.Resource) + +// WithProcessCommand sets provided value as "process.command" attribute for current resource. +func WithProcessCommand(val string) ResourceOption { + return func(r pdata.Resource) { + r.Attributes().UpsertString("process.command", val) + } +} + +// WithProcessCommandLine sets provided value as "process.command_line" attribute for current resource. +func WithProcessCommandLine(val string) ResourceOption { + return func(r pdata.Resource) { + r.Attributes().UpsertString("process.command_line", val) + } +} + +// WithProcessExecutableName sets provided value as "process.executable.name" attribute for current resource. +func WithProcessExecutableName(val string) ResourceOption { + return func(r pdata.Resource) { + r.Attributes().UpsertString("process.executable.name", val) + } +} + +// WithProcessExecutablePath sets provided value as "process.executable.path" attribute for current resource. +func WithProcessExecutablePath(val string) ResourceOption { + return func(r pdata.Resource) { + r.Attributes().UpsertString("process.executable.path", val) + } +} + +// WithProcessOwner sets provided value as "process.owner" attribute for current resource. +func WithProcessOwner(val string) ResourceOption { + return func(r pdata.Resource) { + r.Attributes().UpsertString("process.owner", val) + } +} + +// WithProcessPid sets provided value as "process.pid" attribute for current resource. +func WithProcessPid(val int64) ResourceOption { + return func(r pdata.Resource) { + r.Attributes().UpsertInt("process.pid", val) + } +} + +// EmitForResource saves all the generated metrics under a new resource and updates the internal state to be ready for +// recording another set of data points as part of another resource. This function can be helpful when one scraper +// needs to emit metrics from several resources. Otherwise calling this function is not required, +// just `Emit` function can be called instead. Resource attributes should be provided as ResourceOption arguments. +func (mb *MetricsBuilder) EmitForResource(ro ...ResourceOption) { + rm := pdata.NewResourceMetrics() + rm.Resource().Attributes().EnsureCapacity(mb.resourceCapacity) + for _, op := range ro { + op(rm.Resource()) + } + ils := rm.InstrumentationLibraryMetrics().AppendEmpty() + ils.InstrumentationLibrary().SetName("otelcol/hostmetricsreceiver/process") + ils.Metrics().EnsureCapacity(mb.metricsCapacity) + mb.metricProcessCPUTime.emit(ils.Metrics()) + mb.metricProcessDiskIo.emit(ils.Metrics()) + mb.metricProcessMemoryPhysicalUsage.emit(ils.Metrics()) + mb.metricProcessMemoryVirtualUsage.emit(ils.Metrics()) + if ils.Metrics().Len() > 0 { + mb.updateCapacity(rm) + rm.MoveTo(mb.metricsBuffer.ResourceMetrics().AppendEmpty()) + } +} + +// Emit returns all the metrics accumulated by the metrics builder and updates the internal state to be ready for +// recording another set of metrics. This function will be responsible for applying all the transformations required to +// produce metric representation defined in metadata and user settings, e.g. delta or cumulative. +func (mb *MetricsBuilder) Emit(ro ...ResourceOption) pdata.Metrics { + mb.EmitForResource(ro...) + metrics := pdata.NewMetrics() + mb.metricsBuffer.MoveTo(metrics) + return metrics } // RecordProcessCPUTimeDataPoint adds a data point to process.cpu.time metric. @@ -319,16 +401,6 @@ func (mb *MetricsBuilder) Reset(options ...metricBuilderOption) { } } -// NewMetricData creates new pdata.Metrics and sets the InstrumentationLibrary -// name on the ResourceMetrics. -func (mb *MetricsBuilder) NewMetricData() pdata.Metrics { - md := pdata.NewMetrics() - rm := md.ResourceMetrics().AppendEmpty() - ilm := rm.InstrumentationLibraryMetrics().AppendEmpty() - ilm.InstrumentationLibrary().SetName("otelcol/hostmetricsreceiver/process") - return md -} - // Attributes contains the possible metric attributes that can be used. var Attributes = struct { // Direction (Direction of flow of bytes (read or write).) diff --git a/receiver/hostmetricsreceiver/internal/scraper/processscraper/metadata.yaml b/receiver/hostmetricsreceiver/internal/scraper/processscraper/metadata.yaml index 979958883520..a18ff1002c0b 100644 --- a/receiver/hostmetricsreceiver/internal/scraper/processscraper/metadata.yaml +++ b/receiver/hostmetricsreceiver/internal/scraper/processscraper/metadata.yaml @@ -1,5 +1,38 @@ name: hostmetricsreceiver/process +resource_attributes: + process.pid: + description: Process identifier (PID). + type: int + process.executable.name: + description: >- + The name of the process executable. On Linux based systems, can be set to the + Name in proc/[pid]/status. On Windows, can be set to the base name of + GetProcessImageFileNameW. + type: string + process.executable.path: + description: >- + The full path to the process executable. On Linux based systems, can be set to + the target of proc/[pid]/exe. On Windows, can be set to the result of + GetProcessImageFileNameW. + type: string + process.command: + description: >- + The command used to launch the process (i.e. the command name). On Linux based + systems, can be set to the zeroth string in proc/[pid]/cmdline. On Windows, can + be set to the first parameter extracted from GetCommandLineW. + type: string + process.command_line: + description: >- + The full command used to launch the process as a single string representing the + full command. On Windows, can be set to the result of GetCommandLineW. Do not + set this if you have to assemble it just for monitoring; use + process.command_args instead. + type: string + process.owner: + description: The username of the user that owns the process. + type: string + attributes: direction: description: Direction of flow of bytes (read or write). diff --git a/receiver/hostmetricsreceiver/internal/scraper/processscraper/process.go b/receiver/hostmetricsreceiver/internal/scraper/processscraper/process.go index 68cb2841a4b2..7078c787aa08 100644 --- a/receiver/hostmetricsreceiver/internal/scraper/processscraper/process.go +++ b/receiver/hostmetricsreceiver/internal/scraper/processscraper/process.go @@ -19,8 +19,8 @@ import ( "github.com/shirou/gopsutil/v3/cpu" "github.com/shirou/gopsutil/v3/process" - "go.opentelemetry.io/collector/model/pdata" - conventions "go.opentelemetry.io/collector/model/semconv/v1.6.1" + + "github.com/open-telemetry/opentelemetry-collector-contrib/receiver/hostmetricsreceiver/internal/scraper/processscraper/internal/metadata" ) // processMetadata stores process related metadata along @@ -46,25 +46,27 @@ type commandMetadata struct { commandLineSlice []string } -func (m *processMetadata) initializeResource(resource pdata.Resource) { - attr := resource.Attributes() - attr.EnsureCapacity(6) - attr.InsertInt(conventions.AttributeProcessPID, int64(m.pid)) - attr.InsertString(conventions.AttributeProcessExecutableName, m.executable.name) - attr.InsertString(conventions.AttributeProcessExecutablePath, m.executable.path) +func (m *processMetadata) resourceOptions() []metadata.ResourceOption { + opts := make([]metadata.ResourceOption, 0, 6) + opts = append(opts, + metadata.WithProcessPid(int64(m.pid)), + metadata.WithProcessExecutableName(m.executable.name), + metadata.WithProcessExecutablePath(m.executable.path), + ) if m.command != nil { - attr.InsertString(conventions.AttributeProcessCommand, m.command.command) + opts = append(opts, metadata.WithProcessCommand(m.command.command)) if m.command.commandLineSlice != nil { // TODO insert slice here once this is supported by the data model // (see https://github.com/open-telemetry/opentelemetry-collector/pull/1142) - attr.InsertString(conventions.AttributeProcessCommandLine, strings.Join(m.command.commandLineSlice, " ")) + opts = append(opts, metadata.WithProcessCommandLine(strings.Join(m.command.commandLineSlice, " "))) } else { - attr.InsertString(conventions.AttributeProcessCommandLine, m.command.commandLine) + opts = append(opts, metadata.WithProcessCommandLine(m.command.commandLine)) } } if m.username != "" { - attr.InsertString(conventions.AttributeProcessOwner, m.username) + opts = append(opts, metadata.WithProcessOwner(m.username)) } + return opts } // processHandles provides a wrapper around []*process.Process diff --git a/receiver/hostmetricsreceiver/internal/scraper/processscraper/process_scraper.go b/receiver/hostmetricsreceiver/internal/scraper/processscraper/process_scraper.go index b283e1c3272b..c669d9fdb2e2 100644 --- a/receiver/hostmetricsreceiver/internal/scraper/processscraper/process_scraper.go +++ b/receiver/hostmetricsreceiver/internal/scraper/processscraper/process_scraper.go @@ -22,7 +22,6 @@ import ( "github.com/shirou/gopsutil/v3/host" "go.opentelemetry.io/collector/component" "go.opentelemetry.io/collector/model/pdata" - conventions "go.opentelemetry.io/collector/model/semconv/v1.6.1" "go.opentelemetry.io/collector/receiver/scrapererror" "github.com/open-telemetry/opentelemetry-collector-contrib/internal/coreinternal/processor/filterset" @@ -83,28 +82,19 @@ func (s *scraper) start(context.Context, component.Host) error { } func (s *scraper) scrape(_ context.Context) (pdata.Metrics, error) { - md := pdata.NewMetrics() - rms := md.ResourceMetrics() - var errs scrapererror.ScrapeErrors metadata, err := s.getProcessMetadata() if err != nil { partialErr, isPartial := err.(scrapererror.PartialScrapeError) if !isPartial { - return md, err + return pdata.NewMetrics(), err } errs.AddPartial(partialErr.Failed, partialErr) } - rms.EnsureCapacity(len(metadata)) for _, md := range metadata { - rm := rms.AppendEmpty() - rm.SetSchemaUrl(conventions.SchemaURL) - md.initializeResource(rm.Resource()) - metrics := rm.InstrumentationLibraryMetrics().AppendEmpty().Metrics() - now := pdata.NewTimestampFromTime(time.Now()) if err = s.scrapeAndAppendCPUTimeMetric(now, md.handle); err != nil { @@ -118,10 +108,11 @@ func (s *scraper) scrape(_ context.Context) (pdata.Metrics, error) { if err = s.scrapeAndAppendDiskIOMetric(now, md.handle); err != nil { errs.AddPartial(diskMetricsLen, fmt.Errorf("error reading disk usage for process %q (pid %v): %w", md.executable.name, md.pid, err)) } - s.mb.Emit(metrics) + + s.mb.EmitForResource(md.resourceOptions()...) } - return md, errs.Combine() + return s.mb.Emit(), errs.Combine() } // getProcessMetadata returns a slice of processMetadata, including handles, diff --git a/receiver/hostmetricsreceiver/internal/scraper/processscraper/process_scraper_test.go b/receiver/hostmetricsreceiver/internal/scraper/processscraper/process_scraper_test.go index 2b0112874c7c..3e85ef3300ba 100644 --- a/receiver/hostmetricsreceiver/internal/scraper/processscraper/process_scraper_test.go +++ b/receiver/hostmetricsreceiver/internal/scraper/processscraper/process_scraper_test.go @@ -70,7 +70,6 @@ func TestScrape(t *testing.T) { } require.Greater(t, md.ResourceMetrics().Len(), 1) - assertSchemaIsSet(t, md.ResourceMetrics()) assertProcessResourceAttributesExist(t, md.ResourceMetrics()) assertCPUTimeMetricValid(t, md.ResourceMetrics(), expectedStartTime) assertMemoryUsageMetricValid(t, md.ResourceMetrics(), expectedStartTime) @@ -78,13 +77,6 @@ func TestScrape(t *testing.T) { assertSameTimeStampForAllMetricsWithinResource(t, md.ResourceMetrics()) } -func assertSchemaIsSet(t *testing.T, resourceMetrics pdata.ResourceMetricsSlice) { - for i := 0; i < resourceMetrics.Len(); i++ { - rm := resourceMetrics.At(0) - assert.EqualValues(t, conventions.SchemaURL, rm.SchemaUrl()) - } -} - func assertProcessResourceAttributesExist(t *testing.T, resourceMetrics pdata.ResourceMetricsSlice) { for i := 0; i < resourceMetrics.Len(); i++ { attr := resourceMetrics.At(0).Resource().Attributes() @@ -486,15 +478,18 @@ func getExpectedLengthOfReturnedMetrics(nameError, exeError, timeError, memError if diskError == nil { expectedLen += diskMetricsLen } + + if expectedLen == 0 { + return 0, 0 + } return 1, expectedLen } func getExpectedScrapeFailures(nameError, exeError, timeError, memError, diskError error) int { - expectedResourceMetricsLen, expectedMetricsLen := getExpectedLengthOfReturnedMetrics(nameError, exeError, timeError, memError, diskError) - if expectedResourceMetricsLen == 0 { + if nameError != nil || exeError != nil { return 1 } - + _, expectedMetricsLen := getExpectedLengthOfReturnedMetrics(nameError, exeError, timeError, memError, diskError) return metricsLen - expectedMetricsLen } @@ -535,6 +530,8 @@ func TestScrapeMetrics_MuteProcessNameError(t *testing.T) { } scraper, err := newProcessScraper(config) require.NoError(t, err, "Failed to create process scraper: %v", err) + err = scraper.start(context.Background(), componenttest.NewNopHost()) + require.NoError(t, err, "Failed to initialize process scraper: %v", err) handleMock := &processHandleMock{} handleMock.On("Name").Return("test", processNameError) diff --git a/receiver/kafkametricsreceiver/documentation.md b/receiver/kafkametricsreceiver/documentation.md index 9a7b3e9f839e..bc6984c4642c 100644 --- a/receiver/kafkametricsreceiver/documentation.md +++ b/receiver/kafkametricsreceiver/documentation.md @@ -22,7 +22,7 @@ These are the metrics available for this scraper. **Highlighted metrics** are emitted by default. -## Attributes +## Metric attributes | Name | Description | | ---- | ----------- | diff --git a/receiver/kubeletstatsreceiver/documentation.md b/receiver/kubeletstatsreceiver/documentation.md index 8a332bbe0a88..940ca6053ace 100644 --- a/receiver/kubeletstatsreceiver/documentation.md +++ b/receiver/kubeletstatsreceiver/documentation.md @@ -29,7 +29,7 @@ These are the metrics available for this scraper. **Highlighted metrics** are emitted by default. -## Attributes +## Metric attributes | Name | Description | | ---- | ----------- | diff --git a/receiver/memcachedreceiver/documentation.md b/receiver/memcachedreceiver/documentation.md index 4cbc7bfd0b11..2deb8e17ba9e 100644 --- a/receiver/memcachedreceiver/documentation.md +++ b/receiver/memcachedreceiver/documentation.md @@ -22,7 +22,7 @@ These are the metrics available for this scraper. **Highlighted metrics** are emitted by default. -## Attributes +## Metric attributes | Name | Description | | ---- | ----------- | diff --git a/receiver/mongodbatlasreceiver/documentation.md b/receiver/mongodbatlasreceiver/documentation.md index 02b951d1f213..ab74c04e4cfd 100644 --- a/receiver/mongodbatlasreceiver/documentation.md +++ b/receiver/mongodbatlasreceiver/documentation.md @@ -74,7 +74,7 @@ These are the metrics available for this scraper. **Highlighted metrics** are emitted by default. -## Attributes +## Metric attributes | Name | Description | | ---- | ----------- | diff --git a/receiver/mongodbreceiver/documentation.md b/receiver/mongodbreceiver/documentation.md index 686eed801ad1..a0327bd6807a 100644 --- a/receiver/mongodbreceiver/documentation.md +++ b/receiver/mongodbreceiver/documentation.md @@ -30,7 +30,7 @@ metrics: enabled: ``` -## Attributes +## Metric attributes | Name | Description | | ---- | ----------- | diff --git a/receiver/mongodbreceiver/internal/metadata/generated_metrics_v2.go b/receiver/mongodbreceiver/internal/metadata/generated_metrics_v2.go index ee8d75c2a085..fd5dc86c0913 100644 --- a/receiver/mongodbreceiver/internal/metadata/generated_metrics_v2.go +++ b/receiver/mongodbreceiver/internal/metadata/generated_metrics_v2.go @@ -709,7 +709,10 @@ func newMetricMongodbStorageSize(settings MetricSettings) metricMongodbStorageSi // MetricsBuilder provides an interface for scrapers to report metrics while taking care of all the transformations // required to produce metric representation defined in metadata and user settings. type MetricsBuilder struct { - startTime pdata.Timestamp + startTime pdata.Timestamp // start time that will be applied to all recorded data points. + metricsCapacity int // maximum observed number of metrics per resource. + resourceCapacity int // maximum observed number of resource attributes. + metricsBuffer pdata.Metrics // accumulates metrics data before emitting. metricMongodbCacheOperations metricMongodbCacheOperations metricMongodbCollectionCount metricMongodbCollectionCount metricMongodbConnectionCount metricMongodbConnectionCount @@ -737,6 +740,7 @@ func WithStartTime(startTime pdata.Timestamp) metricBuilderOption { func NewMetricsBuilder(settings MetricsSettings, options ...metricBuilderOption) *MetricsBuilder { mb := &MetricsBuilder{ startTime: pdata.NewTimestampFromTime(time.Now()), + metricsBuffer: pdata.NewMetrics(), metricMongodbCacheOperations: newMetricMongodbCacheOperations(settings.MongodbCacheOperations), metricMongodbCollectionCount: newMetricMongodbCollectionCount(settings.MongodbCollectionCount), metricMongodbConnectionCount: newMetricMongodbConnectionCount(settings.MongodbConnectionCount), @@ -756,22 +760,58 @@ func NewMetricsBuilder(settings MetricsSettings, options ...metricBuilderOption) return mb } -// Emit appends generated metrics to a pdata.MetricsSlice and updates the internal state to be ready for recording -// another set of data points. This function will be doing all transformations required to produce metric representation -// defined in metadata and user settings, e.g. delta/cumulative translation. -func (mb *MetricsBuilder) Emit(metrics pdata.MetricSlice) { - mb.metricMongodbCacheOperations.emit(metrics) - mb.metricMongodbCollectionCount.emit(metrics) - mb.metricMongodbConnectionCount.emit(metrics) - mb.metricMongodbDataSize.emit(metrics) - mb.metricMongodbExtentCount.emit(metrics) - mb.metricMongodbGlobalLockTime.emit(metrics) - mb.metricMongodbIndexCount.emit(metrics) - mb.metricMongodbIndexSize.emit(metrics) - mb.metricMongodbMemoryUsage.emit(metrics) - mb.metricMongodbObjectCount.emit(metrics) - mb.metricMongodbOperationCount.emit(metrics) - mb.metricMongodbStorageSize.emit(metrics) +// updateCapacity updates max length of metrics and resource attributes that will be used for the slice capacity. +func (mb *MetricsBuilder) updateCapacity(rm pdata.ResourceMetrics) { + if mb.metricsCapacity < rm.InstrumentationLibraryMetrics().At(0).Metrics().Len() { + mb.metricsCapacity = rm.InstrumentationLibraryMetrics().At(0).Metrics().Len() + } + if mb.resourceCapacity < rm.Resource().Attributes().Len() { + mb.resourceCapacity = rm.Resource().Attributes().Len() + } +} + +// ResourceOption applies changes to provided resource. +type ResourceOption func(pdata.Resource) + +// EmitForResource saves all the generated metrics under a new resource and updates the internal state to be ready for +// recording another set of data points as part of another resource. This function can be helpful when one scraper +// needs to emit metrics from several resources. Otherwise calling this function is not required, +// just `Emit` function can be called instead. Resource attributes should be provided as ResourceOption arguments. +func (mb *MetricsBuilder) EmitForResource(ro ...ResourceOption) { + rm := pdata.NewResourceMetrics() + rm.Resource().Attributes().EnsureCapacity(mb.resourceCapacity) + for _, op := range ro { + op(rm.Resource()) + } + ils := rm.InstrumentationLibraryMetrics().AppendEmpty() + ils.InstrumentationLibrary().SetName("otelcol/mongodbreceiver") + ils.Metrics().EnsureCapacity(mb.metricsCapacity) + mb.metricMongodbCacheOperations.emit(ils.Metrics()) + mb.metricMongodbCollectionCount.emit(ils.Metrics()) + mb.metricMongodbConnectionCount.emit(ils.Metrics()) + mb.metricMongodbDataSize.emit(ils.Metrics()) + mb.metricMongodbExtentCount.emit(ils.Metrics()) + mb.metricMongodbGlobalLockTime.emit(ils.Metrics()) + mb.metricMongodbIndexCount.emit(ils.Metrics()) + mb.metricMongodbIndexSize.emit(ils.Metrics()) + mb.metricMongodbMemoryUsage.emit(ils.Metrics()) + mb.metricMongodbObjectCount.emit(ils.Metrics()) + mb.metricMongodbOperationCount.emit(ils.Metrics()) + mb.metricMongodbStorageSize.emit(ils.Metrics()) + if ils.Metrics().Len() > 0 { + mb.updateCapacity(rm) + rm.MoveTo(mb.metricsBuffer.ResourceMetrics().AppendEmpty()) + } +} + +// Emit returns all the metrics accumulated by the metrics builder and updates the internal state to be ready for +// recording another set of metrics. This function will be responsible for applying all the transformations required to +// produce metric representation defined in metadata and user settings, e.g. delta or cumulative. +func (mb *MetricsBuilder) Emit(ro ...ResourceOption) pdata.Metrics { + mb.EmitForResource(ro...) + metrics := pdata.NewMetrics() + mb.metricsBuffer.MoveTo(metrics) + return metrics } // RecordMongodbCacheOperationsDataPoint adds a data point to mongodb.cache.operations metric. @@ -843,16 +883,6 @@ func (mb *MetricsBuilder) Reset(options ...metricBuilderOption) { } } -// NewMetricData creates new pdata.Metrics and sets the InstrumentationLibrary -// name on the ResourceMetrics. -func (mb *MetricsBuilder) NewMetricData() pdata.Metrics { - md := pdata.NewMetrics() - rm := md.ResourceMetrics().AppendEmpty() - ilm := rm.InstrumentationLibraryMetrics().AppendEmpty() - ilm.InstrumentationLibrary().SetName("otelcol/mongodbreceiver") - return md -} - // Attributes contains the possible metric attributes that can be used. var Attributes = struct { // ConnectionType (The status of the connection.) diff --git a/receiver/mysqlreceiver/documentation.md b/receiver/mysqlreceiver/documentation.md index 608d6614fb27..4035cad7bad1 100644 --- a/receiver/mysqlreceiver/documentation.md +++ b/receiver/mysqlreceiver/documentation.md @@ -35,7 +35,7 @@ metrics: enabled: ``` -## Attributes +## Metric attributes | Name | Description | | ---- | ----------- | diff --git a/receiver/mysqlreceiver/internal/metadata/generated_metrics_v2.go b/receiver/mysqlreceiver/internal/metadata/generated_metrics_v2.go index 6758e2f8c523..14dae04bd6a2 100644 --- a/receiver/mysqlreceiver/internal/metadata/generated_metrics_v2.go +++ b/receiver/mysqlreceiver/internal/metadata/generated_metrics_v2.go @@ -990,7 +990,10 @@ func newMetricMysqlThreads(settings MetricSettings) metricMysqlThreads { // MetricsBuilder provides an interface for scrapers to report metrics while taking care of all the transformations // required to produce metric representation defined in metadata and user settings. type MetricsBuilder struct { - startTime pdata.Timestamp + startTime pdata.Timestamp // start time that will be applied to all recorded data points. + metricsCapacity int // maximum observed number of metrics per resource. + resourceCapacity int // maximum observed number of resource attributes. + metricsBuffer pdata.Metrics // accumulates metrics data before emitting. metricMysqlBufferPoolDataPages metricMysqlBufferPoolDataPages metricMysqlBufferPoolLimit metricMysqlBufferPoolLimit metricMysqlBufferPoolOperations metricMysqlBufferPoolOperations @@ -1023,6 +1026,7 @@ func WithStartTime(startTime pdata.Timestamp) metricBuilderOption { func NewMetricsBuilder(settings MetricsSettings, options ...metricBuilderOption) *MetricsBuilder { mb := &MetricsBuilder{ startTime: pdata.NewTimestampFromTime(time.Now()), + metricsBuffer: pdata.NewMetrics(), metricMysqlBufferPoolDataPages: newMetricMysqlBufferPoolDataPages(settings.MysqlBufferPoolDataPages), metricMysqlBufferPoolLimit: newMetricMysqlBufferPoolLimit(settings.MysqlBufferPoolLimit), metricMysqlBufferPoolOperations: newMetricMysqlBufferPoolOperations(settings.MysqlBufferPoolOperations), @@ -1047,27 +1051,63 @@ func NewMetricsBuilder(settings MetricsSettings, options ...metricBuilderOption) return mb } -// Emit appends generated metrics to a pdata.MetricsSlice and updates the internal state to be ready for recording -// another set of data points. This function will be doing all transformations required to produce metric representation -// defined in metadata and user settings, e.g. delta/cumulative translation. -func (mb *MetricsBuilder) Emit(metrics pdata.MetricSlice) { - mb.metricMysqlBufferPoolDataPages.emit(metrics) - mb.metricMysqlBufferPoolLimit.emit(metrics) - mb.metricMysqlBufferPoolOperations.emit(metrics) - mb.metricMysqlBufferPoolPageFlushes.emit(metrics) - mb.metricMysqlBufferPoolPages.emit(metrics) - mb.metricMysqlBufferPoolUsage.emit(metrics) - mb.metricMysqlCommands.emit(metrics) - mb.metricMysqlDoubleWrites.emit(metrics) - mb.metricMysqlHandlers.emit(metrics) - mb.metricMysqlLocks.emit(metrics) - mb.metricMysqlLogOperations.emit(metrics) - mb.metricMysqlOperations.emit(metrics) - mb.metricMysqlPageOperations.emit(metrics) - mb.metricMysqlRowLocks.emit(metrics) - mb.metricMysqlRowOperations.emit(metrics) - mb.metricMysqlSorts.emit(metrics) - mb.metricMysqlThreads.emit(metrics) +// updateCapacity updates max length of metrics and resource attributes that will be used for the slice capacity. +func (mb *MetricsBuilder) updateCapacity(rm pdata.ResourceMetrics) { + if mb.metricsCapacity < rm.InstrumentationLibraryMetrics().At(0).Metrics().Len() { + mb.metricsCapacity = rm.InstrumentationLibraryMetrics().At(0).Metrics().Len() + } + if mb.resourceCapacity < rm.Resource().Attributes().Len() { + mb.resourceCapacity = rm.Resource().Attributes().Len() + } +} + +// ResourceOption applies changes to provided resource. +type ResourceOption func(pdata.Resource) + +// EmitForResource saves all the generated metrics under a new resource and updates the internal state to be ready for +// recording another set of data points as part of another resource. This function can be helpful when one scraper +// needs to emit metrics from several resources. Otherwise calling this function is not required, +// just `Emit` function can be called instead. Resource attributes should be provided as ResourceOption arguments. +func (mb *MetricsBuilder) EmitForResource(ro ...ResourceOption) { + rm := pdata.NewResourceMetrics() + rm.Resource().Attributes().EnsureCapacity(mb.resourceCapacity) + for _, op := range ro { + op(rm.Resource()) + } + ils := rm.InstrumentationLibraryMetrics().AppendEmpty() + ils.InstrumentationLibrary().SetName("otelcol/mysqlreceiver") + ils.Metrics().EnsureCapacity(mb.metricsCapacity) + mb.metricMysqlBufferPoolDataPages.emit(ils.Metrics()) + mb.metricMysqlBufferPoolLimit.emit(ils.Metrics()) + mb.metricMysqlBufferPoolOperations.emit(ils.Metrics()) + mb.metricMysqlBufferPoolPageFlushes.emit(ils.Metrics()) + mb.metricMysqlBufferPoolPages.emit(ils.Metrics()) + mb.metricMysqlBufferPoolUsage.emit(ils.Metrics()) + mb.metricMysqlCommands.emit(ils.Metrics()) + mb.metricMysqlDoubleWrites.emit(ils.Metrics()) + mb.metricMysqlHandlers.emit(ils.Metrics()) + mb.metricMysqlLocks.emit(ils.Metrics()) + mb.metricMysqlLogOperations.emit(ils.Metrics()) + mb.metricMysqlOperations.emit(ils.Metrics()) + mb.metricMysqlPageOperations.emit(ils.Metrics()) + mb.metricMysqlRowLocks.emit(ils.Metrics()) + mb.metricMysqlRowOperations.emit(ils.Metrics()) + mb.metricMysqlSorts.emit(ils.Metrics()) + mb.metricMysqlThreads.emit(ils.Metrics()) + if ils.Metrics().Len() > 0 { + mb.updateCapacity(rm) + rm.MoveTo(mb.metricsBuffer.ResourceMetrics().AppendEmpty()) + } +} + +// Emit returns all the metrics accumulated by the metrics builder and updates the internal state to be ready for +// recording another set of metrics. This function will be responsible for applying all the transformations required to +// produce metric representation defined in metadata and user settings, e.g. delta or cumulative. +func (mb *MetricsBuilder) Emit(ro ...ResourceOption) pdata.Metrics { + mb.EmitForResource(ro...) + metrics := pdata.NewMetrics() + mb.metricsBuffer.MoveTo(metrics) + return metrics } // RecordMysqlBufferPoolDataPagesDataPoint adds a data point to mysql.buffer_pool.data_pages metric. @@ -1164,16 +1204,6 @@ func (mb *MetricsBuilder) Reset(options ...metricBuilderOption) { } } -// NewMetricData creates new pdata.Metrics and sets the InstrumentationLibrary -// name on the ResourceMetrics. -func (mb *MetricsBuilder) NewMetricData() pdata.Metrics { - md := pdata.NewMetrics() - rm := md.ResourceMetrics().AppendEmpty() - ilm := rm.InstrumentationLibraryMetrics().AppendEmpty() - ilm.InstrumentationLibrary().SetName("otelcol/mysqlreceiver") - return md -} - // Attributes contains the possible metric attributes that can be used. var Attributes = struct { // BufferPoolData (The status of buffer pool data.) diff --git a/receiver/mysqlreceiver/scraper.go b/receiver/mysqlreceiver/scraper.go index 9460e8aa6843..2b8ffa7eefee 100644 --- a/receiver/mysqlreceiver/scraper.go +++ b/receiver/mysqlreceiver/scraper.go @@ -73,8 +73,6 @@ func (m *mySQLScraper) scrape(context.Context) (pdata.Metrics, error) { return pdata.Metrics{}, errors.New("failed to connect to http client") } - // metric initialization - md := m.mb.NewMetricData() now := pdata.NewTimestampFromTime(time.Now()) // collect innodb metrics. @@ -510,8 +508,7 @@ func (m *mySQLScraper) scrape(context.Context) (pdata.Metrics, error) { } } - m.mb.Emit(md.ResourceMetrics().At(0).InstrumentationLibraryMetrics().At(0).Metrics()) - return md, errors.Combine() + return m.mb.Emit(), errors.Combine() } func (m *mySQLScraper) recordDataPages(now pdata.Timestamp, globalStats map[string]string, errors scrapererror.ScrapeErrors) { diff --git a/receiver/nginxreceiver/documentation.md b/receiver/nginxreceiver/documentation.md index d4b979947129..2d1b2a7a8f91 100644 --- a/receiver/nginxreceiver/documentation.md +++ b/receiver/nginxreceiver/documentation.md @@ -22,7 +22,7 @@ metrics: enabled: ``` -## Attributes +## Metric attributes | Name | Description | | ---- | ----------- | diff --git a/receiver/nginxreceiver/internal/metadata/generated_metrics_v2.go b/receiver/nginxreceiver/internal/metadata/generated_metrics_v2.go index bb58d048d767..a12b3db9d662 100644 --- a/receiver/nginxreceiver/internal/metadata/generated_metrics_v2.go +++ b/receiver/nginxreceiver/internal/metadata/generated_metrics_v2.go @@ -245,7 +245,10 @@ func newMetricNginxRequests(settings MetricSettings) metricNginxRequests { // MetricsBuilder provides an interface for scrapers to report metrics while taking care of all the transformations // required to produce metric representation defined in metadata and user settings. type MetricsBuilder struct { - startTime pdata.Timestamp + startTime pdata.Timestamp // start time that will be applied to all recorded data points. + metricsCapacity int // maximum observed number of metrics per resource. + resourceCapacity int // maximum observed number of resource attributes. + metricsBuffer pdata.Metrics // accumulates metrics data before emitting. metricNginxConnectionsAccepted metricNginxConnectionsAccepted metricNginxConnectionsCurrent metricNginxConnectionsCurrent metricNginxConnectionsHandled metricNginxConnectionsHandled @@ -265,6 +268,7 @@ func WithStartTime(startTime pdata.Timestamp) metricBuilderOption { func NewMetricsBuilder(settings MetricsSettings, options ...metricBuilderOption) *MetricsBuilder { mb := &MetricsBuilder{ startTime: pdata.NewTimestampFromTime(time.Now()), + metricsBuffer: pdata.NewMetrics(), metricNginxConnectionsAccepted: newMetricNginxConnectionsAccepted(settings.NginxConnectionsAccepted), metricNginxConnectionsCurrent: newMetricNginxConnectionsCurrent(settings.NginxConnectionsCurrent), metricNginxConnectionsHandled: newMetricNginxConnectionsHandled(settings.NginxConnectionsHandled), @@ -276,14 +280,50 @@ func NewMetricsBuilder(settings MetricsSettings, options ...metricBuilderOption) return mb } -// Emit appends generated metrics to a pdata.MetricsSlice and updates the internal state to be ready for recording -// another set of data points. This function will be doing all transformations required to produce metric representation -// defined in metadata and user settings, e.g. delta/cumulative translation. -func (mb *MetricsBuilder) Emit(metrics pdata.MetricSlice) { - mb.metricNginxConnectionsAccepted.emit(metrics) - mb.metricNginxConnectionsCurrent.emit(metrics) - mb.metricNginxConnectionsHandled.emit(metrics) - mb.metricNginxRequests.emit(metrics) +// updateCapacity updates max length of metrics and resource attributes that will be used for the slice capacity. +func (mb *MetricsBuilder) updateCapacity(rm pdata.ResourceMetrics) { + if mb.metricsCapacity < rm.InstrumentationLibraryMetrics().At(0).Metrics().Len() { + mb.metricsCapacity = rm.InstrumentationLibraryMetrics().At(0).Metrics().Len() + } + if mb.resourceCapacity < rm.Resource().Attributes().Len() { + mb.resourceCapacity = rm.Resource().Attributes().Len() + } +} + +// ResourceOption applies changes to provided resource. +type ResourceOption func(pdata.Resource) + +// EmitForResource saves all the generated metrics under a new resource and updates the internal state to be ready for +// recording another set of data points as part of another resource. This function can be helpful when one scraper +// needs to emit metrics from several resources. Otherwise calling this function is not required, +// just `Emit` function can be called instead. Resource attributes should be provided as ResourceOption arguments. +func (mb *MetricsBuilder) EmitForResource(ro ...ResourceOption) { + rm := pdata.NewResourceMetrics() + rm.Resource().Attributes().EnsureCapacity(mb.resourceCapacity) + for _, op := range ro { + op(rm.Resource()) + } + ils := rm.InstrumentationLibraryMetrics().AppendEmpty() + ils.InstrumentationLibrary().SetName("otelcol/nginxreceiver") + ils.Metrics().EnsureCapacity(mb.metricsCapacity) + mb.metricNginxConnectionsAccepted.emit(ils.Metrics()) + mb.metricNginxConnectionsCurrent.emit(ils.Metrics()) + mb.metricNginxConnectionsHandled.emit(ils.Metrics()) + mb.metricNginxRequests.emit(ils.Metrics()) + if ils.Metrics().Len() > 0 { + mb.updateCapacity(rm) + rm.MoveTo(mb.metricsBuffer.ResourceMetrics().AppendEmpty()) + } +} + +// Emit returns all the metrics accumulated by the metrics builder and updates the internal state to be ready for +// recording another set of metrics. This function will be responsible for applying all the transformations required to +// produce metric representation defined in metadata and user settings, e.g. delta or cumulative. +func (mb *MetricsBuilder) Emit(ro ...ResourceOption) pdata.Metrics { + mb.EmitForResource(ro...) + metrics := pdata.NewMetrics() + mb.metricsBuffer.MoveTo(metrics) + return metrics } // RecordNginxConnectionsAcceptedDataPoint adds a data point to nginx.connections_accepted metric. @@ -315,16 +355,6 @@ func (mb *MetricsBuilder) Reset(options ...metricBuilderOption) { } } -// NewMetricData creates new pdata.Metrics and sets the InstrumentationLibrary -// name on the ResourceMetrics. -func (mb *MetricsBuilder) NewMetricData() pdata.Metrics { - md := pdata.NewMetrics() - rm := md.ResourceMetrics().AppendEmpty() - ilm := rm.InstrumentationLibraryMetrics().AppendEmpty() - ilm.InstrumentationLibrary().SetName("otelcol/nginxreceiver") - return md -} - // Attributes contains the possible metric attributes that can be used. var Attributes = struct { // State (The state of a connection) diff --git a/receiver/nginxreceiver/scraper.go b/receiver/nginxreceiver/scraper.go index 4e2ac75ddc17..26ff768eaefd 100644 --- a/receiver/nginxreceiver/scraper.go +++ b/receiver/nginxreceiver/scraper.go @@ -75,7 +75,6 @@ func (r *nginxScraper) scrape(context.Context) (pdata.Metrics, error) { } now := pdata.NewTimestampFromTime(time.Now()) - md := r.mb.NewMetricData() r.mb.RecordNginxRequestsDataPoint(now, stats.Requests) r.mb.RecordNginxConnectionsAcceptedDataPoint(now, stats.Connections.Accepted) @@ -85,6 +84,5 @@ func (r *nginxScraper) scrape(context.Context) (pdata.Metrics, error) { r.mb.RecordNginxConnectionsCurrentDataPoint(now, stats.Connections.Writing, metadata.AttributeState.Writing) r.mb.RecordNginxConnectionsCurrentDataPoint(now, stats.Connections.Waiting, metadata.AttributeState.Waiting) - r.mb.Emit(md.ResourceMetrics().At(0).InstrumentationLibraryMetrics().At(0).Metrics()) - return md, nil + return r.mb.Emit(), nil } diff --git a/receiver/postgresqlreceiver/documentation.md b/receiver/postgresqlreceiver/documentation.md index 3087824663af..8efdf5abcbf1 100644 --- a/receiver/postgresqlreceiver/documentation.md +++ b/receiver/postgresqlreceiver/documentation.md @@ -25,7 +25,7 @@ metrics: enabled: ``` -## Attributes +## Metric attributes | Name | Description | | ---- | ----------- | diff --git a/receiver/postgresqlreceiver/internal/metadata/generated_metrics_v2.go b/receiver/postgresqlreceiver/internal/metadata/generated_metrics_v2.go index a6cb0f2a8273..09f64334aa4f 100644 --- a/receiver/postgresqlreceiver/internal/metadata/generated_metrics_v2.go +++ b/receiver/postgresqlreceiver/internal/metadata/generated_metrics_v2.go @@ -430,7 +430,10 @@ func newMetricPostgresqlRows(settings MetricSettings) metricPostgresqlRows { // MetricsBuilder provides an interface for scrapers to report metrics while taking care of all the transformations // required to produce metric representation defined in metadata and user settings. type MetricsBuilder struct { - startTime pdata.Timestamp + startTime pdata.Timestamp // start time that will be applied to all recorded data points. + metricsCapacity int // maximum observed number of metrics per resource. + resourceCapacity int // maximum observed number of resource attributes. + metricsBuffer pdata.Metrics // accumulates metrics data before emitting. metricPostgresqlBackends metricPostgresqlBackends metricPostgresqlBlocksRead metricPostgresqlBlocksRead metricPostgresqlCommits metricPostgresqlCommits @@ -453,6 +456,7 @@ func WithStartTime(startTime pdata.Timestamp) metricBuilderOption { func NewMetricsBuilder(settings MetricsSettings, options ...metricBuilderOption) *MetricsBuilder { mb := &MetricsBuilder{ startTime: pdata.NewTimestampFromTime(time.Now()), + metricsBuffer: pdata.NewMetrics(), metricPostgresqlBackends: newMetricPostgresqlBackends(settings.PostgresqlBackends), metricPostgresqlBlocksRead: newMetricPostgresqlBlocksRead(settings.PostgresqlBlocksRead), metricPostgresqlCommits: newMetricPostgresqlCommits(settings.PostgresqlCommits), @@ -467,17 +471,53 @@ func NewMetricsBuilder(settings MetricsSettings, options ...metricBuilderOption) return mb } -// Emit appends generated metrics to a pdata.MetricsSlice and updates the internal state to be ready for recording -// another set of data points. This function will be doing all transformations required to produce metric representation -// defined in metadata and user settings, e.g. delta/cumulative translation. -func (mb *MetricsBuilder) Emit(metrics pdata.MetricSlice) { - mb.metricPostgresqlBackends.emit(metrics) - mb.metricPostgresqlBlocksRead.emit(metrics) - mb.metricPostgresqlCommits.emit(metrics) - mb.metricPostgresqlDbSize.emit(metrics) - mb.metricPostgresqlOperations.emit(metrics) - mb.metricPostgresqlRollbacks.emit(metrics) - mb.metricPostgresqlRows.emit(metrics) +// updateCapacity updates max length of metrics and resource attributes that will be used for the slice capacity. +func (mb *MetricsBuilder) updateCapacity(rm pdata.ResourceMetrics) { + if mb.metricsCapacity < rm.InstrumentationLibraryMetrics().At(0).Metrics().Len() { + mb.metricsCapacity = rm.InstrumentationLibraryMetrics().At(0).Metrics().Len() + } + if mb.resourceCapacity < rm.Resource().Attributes().Len() { + mb.resourceCapacity = rm.Resource().Attributes().Len() + } +} + +// ResourceOption applies changes to provided resource. +type ResourceOption func(pdata.Resource) + +// EmitForResource saves all the generated metrics under a new resource and updates the internal state to be ready for +// recording another set of data points as part of another resource. This function can be helpful when one scraper +// needs to emit metrics from several resources. Otherwise calling this function is not required, +// just `Emit` function can be called instead. Resource attributes should be provided as ResourceOption arguments. +func (mb *MetricsBuilder) EmitForResource(ro ...ResourceOption) { + rm := pdata.NewResourceMetrics() + rm.Resource().Attributes().EnsureCapacity(mb.resourceCapacity) + for _, op := range ro { + op(rm.Resource()) + } + ils := rm.InstrumentationLibraryMetrics().AppendEmpty() + ils.InstrumentationLibrary().SetName("otelcol/postgresqlreceiver") + ils.Metrics().EnsureCapacity(mb.metricsCapacity) + mb.metricPostgresqlBackends.emit(ils.Metrics()) + mb.metricPostgresqlBlocksRead.emit(ils.Metrics()) + mb.metricPostgresqlCommits.emit(ils.Metrics()) + mb.metricPostgresqlDbSize.emit(ils.Metrics()) + mb.metricPostgresqlOperations.emit(ils.Metrics()) + mb.metricPostgresqlRollbacks.emit(ils.Metrics()) + mb.metricPostgresqlRows.emit(ils.Metrics()) + if ils.Metrics().Len() > 0 { + mb.updateCapacity(rm) + rm.MoveTo(mb.metricsBuffer.ResourceMetrics().AppendEmpty()) + } +} + +// Emit returns all the metrics accumulated by the metrics builder and updates the internal state to be ready for +// recording another set of metrics. This function will be responsible for applying all the transformations required to +// produce metric representation defined in metadata and user settings, e.g. delta or cumulative. +func (mb *MetricsBuilder) Emit(ro ...ResourceOption) pdata.Metrics { + mb.EmitForResource(ro...) + metrics := pdata.NewMetrics() + mb.metricsBuffer.MoveTo(metrics) + return metrics } // RecordPostgresqlBackendsDataPoint adds a data point to postgresql.backends metric. @@ -524,16 +564,6 @@ func (mb *MetricsBuilder) Reset(options ...metricBuilderOption) { } } -// NewMetricData creates new pdata.Metrics and sets the InstrumentationLibrary -// name on the ResourceMetrics. -func (mb *MetricsBuilder) NewMetricData() pdata.Metrics { - md := pdata.NewMetrics() - rm := md.ResourceMetrics().AppendEmpty() - ilm := rm.InstrumentationLibraryMetrics().AppendEmpty() - ilm.InstrumentationLibrary().SetName("otelcol/postgresqlreceiver") - return md -} - // Attributes contains the possible metric attributes that can be used. var Attributes = struct { // Database (The name of the database.) diff --git a/receiver/postgresqlreceiver/scraper.go b/receiver/postgresqlreceiver/scraper.go index 0aea107f5c71..3ca50aa3b57d 100644 --- a/receiver/postgresqlreceiver/scraper.go +++ b/receiver/postgresqlreceiver/scraper.go @@ -81,8 +81,6 @@ func (p *postgreSQLScraper) scrape(ctx context.Context) (pdata.Metrics, error) { databases = dbList } - // metric initialization - md := p.mb.NewMetricData() now := pdata.NewTimestampFromTime(time.Now()) var errors scrapererror.ScrapeErrors @@ -104,8 +102,7 @@ func (p *postgreSQLScraper) scrape(ctx context.Context) (pdata.Metrics, error) { p.collectDatabaseTableMetrics(ctx, now, dbClient, errors) } - p.mb.Emit(md.ResourceMetrics().At(0).InstrumentationLibraryMetrics().At(0).Metrics()) - return md, errors.Combine() + return p.mb.Emit(), errors.Combine() } func (p *postgreSQLScraper) collectBlockReads( diff --git a/receiver/rabbitmqreceiver/documentation.md b/receiver/rabbitmqreceiver/documentation.md index da35d5cc690e..ee7a7a76e025 100644 --- a/receiver/rabbitmqreceiver/documentation.md +++ b/receiver/rabbitmqreceiver/documentation.md @@ -24,11 +24,16 @@ metrics: enabled: ``` -## Attributes +## Resource attributes + +| Name | Description | Type | +| ---- | ----------- | ---- | +| rabbitmq.node.name | The name of the RabbitMQ node. | String | +| rabbitmq.queue.name | The name of the RabbitMQ queue. | String | +| rabbitmq.vhost.name | The name of the RabbitMQ vHost. | String | + +## Metric attributes | Name | Description | | ---- | ----------- | | message.state | The state of messages in a queue. | -| rabbitmq.node.name | The name of the RabbitMQ node. | -| rabbitmq.queue.name | The name of the RabbitMQ queue. | -| rabbitmq.vhost.name | The name of the RabbitMQ vHost. | diff --git a/receiver/rabbitmqreceiver/internal/metadata/generated_metrics_v2.go b/receiver/rabbitmqreceiver/internal/metadata/generated_metrics_v2.go index e5c6b33355c6..573ecbf08d8f 100644 --- a/receiver/rabbitmqreceiver/internal/metadata/generated_metrics_v2.go +++ b/receiver/rabbitmqreceiver/internal/metadata/generated_metrics_v2.go @@ -357,7 +357,10 @@ func newMetricRabbitmqMessagePublished(settings MetricSettings) metricRabbitmqMe // MetricsBuilder provides an interface for scrapers to report metrics while taking care of all the transformations // required to produce metric representation defined in metadata and user settings. type MetricsBuilder struct { - startTime pdata.Timestamp + startTime pdata.Timestamp // start time that will be applied to all recorded data points. + metricsCapacity int // maximum observed number of metrics per resource. + resourceCapacity int // maximum observed number of resource attributes. + metricsBuffer pdata.Metrics // accumulates metrics data before emitting. metricRabbitmqConsumerCount metricRabbitmqConsumerCount metricRabbitmqMessageAcknowledged metricRabbitmqMessageAcknowledged metricRabbitmqMessageCurrent metricRabbitmqMessageCurrent @@ -379,6 +382,7 @@ func WithStartTime(startTime pdata.Timestamp) metricBuilderOption { func NewMetricsBuilder(settings MetricsSettings, options ...metricBuilderOption) *MetricsBuilder { mb := &MetricsBuilder{ startTime: pdata.NewTimestampFromTime(time.Now()), + metricsBuffer: pdata.NewMetrics(), metricRabbitmqConsumerCount: newMetricRabbitmqConsumerCount(settings.RabbitmqConsumerCount), metricRabbitmqMessageAcknowledged: newMetricRabbitmqMessageAcknowledged(settings.RabbitmqMessageAcknowledged), metricRabbitmqMessageCurrent: newMetricRabbitmqMessageCurrent(settings.RabbitmqMessageCurrent), @@ -392,16 +396,73 @@ func NewMetricsBuilder(settings MetricsSettings, options ...metricBuilderOption) return mb } -// Emit appends generated metrics to a pdata.MetricsSlice and updates the internal state to be ready for recording -// another set of data points. This function will be doing all transformations required to produce metric representation -// defined in metadata and user settings, e.g. delta/cumulative translation. -func (mb *MetricsBuilder) Emit(metrics pdata.MetricSlice) { - mb.metricRabbitmqConsumerCount.emit(metrics) - mb.metricRabbitmqMessageAcknowledged.emit(metrics) - mb.metricRabbitmqMessageCurrent.emit(metrics) - mb.metricRabbitmqMessageDelivered.emit(metrics) - mb.metricRabbitmqMessageDropped.emit(metrics) - mb.metricRabbitmqMessagePublished.emit(metrics) +// updateCapacity updates max length of metrics and resource attributes that will be used for the slice capacity. +func (mb *MetricsBuilder) updateCapacity(rm pdata.ResourceMetrics) { + if mb.metricsCapacity < rm.InstrumentationLibraryMetrics().At(0).Metrics().Len() { + mb.metricsCapacity = rm.InstrumentationLibraryMetrics().At(0).Metrics().Len() + } + if mb.resourceCapacity < rm.Resource().Attributes().Len() { + mb.resourceCapacity = rm.Resource().Attributes().Len() + } +} + +// ResourceOption applies changes to provided resource. +type ResourceOption func(pdata.Resource) + +// WithRabbitmqNodeName sets provided value as "rabbitmq.node.name" attribute for current resource. +func WithRabbitmqNodeName(val string) ResourceOption { + return func(r pdata.Resource) { + r.Attributes().UpsertString("rabbitmq.node.name", val) + } +} + +// WithRabbitmqQueueName sets provided value as "rabbitmq.queue.name" attribute for current resource. +func WithRabbitmqQueueName(val string) ResourceOption { + return func(r pdata.Resource) { + r.Attributes().UpsertString("rabbitmq.queue.name", val) + } +} + +// WithRabbitmqVhostName sets provided value as "rabbitmq.vhost.name" attribute for current resource. +func WithRabbitmqVhostName(val string) ResourceOption { + return func(r pdata.Resource) { + r.Attributes().UpsertString("rabbitmq.vhost.name", val) + } +} + +// EmitForResource saves all the generated metrics under a new resource and updates the internal state to be ready for +// recording another set of data points as part of another resource. This function can be helpful when one scraper +// needs to emit metrics from several resources. Otherwise calling this function is not required, +// just `Emit` function can be called instead. Resource attributes should be provided as ResourceOption arguments. +func (mb *MetricsBuilder) EmitForResource(ro ...ResourceOption) { + rm := pdata.NewResourceMetrics() + rm.Resource().Attributes().EnsureCapacity(mb.resourceCapacity) + for _, op := range ro { + op(rm.Resource()) + } + ils := rm.InstrumentationLibraryMetrics().AppendEmpty() + ils.InstrumentationLibrary().SetName("otelcol/rabbitmqreceiver") + ils.Metrics().EnsureCapacity(mb.metricsCapacity) + mb.metricRabbitmqConsumerCount.emit(ils.Metrics()) + mb.metricRabbitmqMessageAcknowledged.emit(ils.Metrics()) + mb.metricRabbitmqMessageCurrent.emit(ils.Metrics()) + mb.metricRabbitmqMessageDelivered.emit(ils.Metrics()) + mb.metricRabbitmqMessageDropped.emit(ils.Metrics()) + mb.metricRabbitmqMessagePublished.emit(ils.Metrics()) + if ils.Metrics().Len() > 0 { + mb.updateCapacity(rm) + rm.MoveTo(mb.metricsBuffer.ResourceMetrics().AppendEmpty()) + } +} + +// Emit returns all the metrics accumulated by the metrics builder and updates the internal state to be ready for +// recording another set of metrics. This function will be responsible for applying all the transformations required to +// produce metric representation defined in metadata and user settings, e.g. delta or cumulative. +func (mb *MetricsBuilder) Emit(ro ...ResourceOption) pdata.Metrics { + mb.EmitForResource(ro...) + metrics := pdata.NewMetrics() + mb.metricsBuffer.MoveTo(metrics) + return metrics } // RecordRabbitmqConsumerCountDataPoint adds a data point to rabbitmq.consumer.count metric. @@ -443,31 +504,12 @@ func (mb *MetricsBuilder) Reset(options ...metricBuilderOption) { } } -// NewMetricData creates new pdata.Metrics and sets the InstrumentationLibrary -// name on the ResourceMetrics. -func (mb *MetricsBuilder) NewMetricData() pdata.Metrics { - md := pdata.NewMetrics() - rm := md.ResourceMetrics().AppendEmpty() - ilm := rm.InstrumentationLibraryMetrics().AppendEmpty() - ilm.InstrumentationLibrary().SetName("otelcol/rabbitmqreceiver") - return md -} - // Attributes contains the possible metric attributes that can be used. var Attributes = struct { // MessageState (The state of messages in a queue.) MessageState string - // RabbitmqNodeName (The name of the RabbitMQ node.) - RabbitmqNodeName string - // RabbitmqQueueName (The name of the RabbitMQ queue.) - RabbitmqQueueName string - // RabbitmqVhostName (The name of the RabbitMQ vHost.) - RabbitmqVhostName string }{ "state", - "rabbitmq.node.name", - "rabbitmq.queue.name", - "rabbitmq.vhost.name", } // A is an alias for Attributes. diff --git a/receiver/rabbitmqreceiver/metadata.yaml b/receiver/rabbitmqreceiver/metadata.yaml index e6828a76d32e..e7ffa4bb60ed 100644 --- a/receiver/rabbitmqreceiver/metadata.yaml +++ b/receiver/rabbitmqreceiver/metadata.yaml @@ -1,12 +1,17 @@ name: rabbitmqreceiver -attributes: +resource_attributes: rabbitmq.queue.name: description: The name of the RabbitMQ queue. + type: string rabbitmq.node.name: description: The name of the RabbitMQ node. + type: string rabbitmq.vhost.name: description: The name of the RabbitMQ vHost. + type: string + +attributes: message.state: value: state description: The state of messages in a queue. diff --git a/receiver/rabbitmqreceiver/scraper.go b/receiver/rabbitmqreceiver/scraper.go index fb78f08f5af6..3ca16edcd452 100644 --- a/receiver/rabbitmqreceiver/scraper.go +++ b/receiver/rabbitmqreceiver/scraper.go @@ -27,8 +27,6 @@ import ( "github.com/open-telemetry/opentelemetry-collector-contrib/receiver/rabbitmqreceiver/internal/models" ) -const instrumentationLibraryName = "otelcol/rabbitmqreceiver" - var errClientNotInit = errors.New("client not initialized") // Names of metrics in message_stats @@ -74,41 +72,30 @@ func (r *rabbitmqScraper) start(ctx context.Context, host component.Host) (err e // scrape collects metrics from the RabbitMQ API func (r *rabbitmqScraper) scrape(ctx context.Context) (pdata.Metrics, error) { - metrics := pdata.NewMetrics() now := pdata.NewTimestampFromTime(time.Now()) - rms := metrics.ResourceMetrics() // Validate we don't attempt to scrape without initializing the client if r.client == nil { - return metrics, errClientNotInit + return pdata.NewMetrics(), errClientNotInit } // Get queues for processing queues, err := r.client.GetQueues(ctx) if err != nil { - return metrics, err + return pdata.NewMetrics(), err } // Collect metrics for each queue for _, queue := range queues { - r.collectQueue(queue, now, rms) + r.collectQueue(queue, now) } - return metrics, nil + return r.mb.Emit(), nil } // collectQueue collects metrics -func (r *rabbitmqScraper) collectQueue(queue *models.Queue, now pdata.Timestamp, rms pdata.ResourceMetricsSlice) { - resourceMetric := rms.AppendEmpty() - resourceAttrs := resourceMetric.Resource().Attributes() - resourceAttrs.InsertString(metadata.A.RabbitmqQueueName, queue.Name) - resourceAttrs.InsertString(metadata.A.RabbitmqNodeName, queue.Node) - resourceAttrs.InsertString(metadata.A.RabbitmqVhostName, queue.VHost) - - ilms := resourceMetric.InstrumentationLibraryMetrics().AppendEmpty() - ilms.InstrumentationLibrary().SetName(instrumentationLibraryName) - +func (r *rabbitmqScraper) collectQueue(queue *models.Queue, now pdata.Timestamp) { r.mb.RecordRabbitmqConsumerCountDataPoint(now, queue.Consumers) r.mb.RecordRabbitmqMessageCurrentDataPoint(now, queue.UnacknowledgedMessages, metadata.AttributeMessageState.Unacknowledged) r.mb.RecordRabbitmqMessageCurrentDataPoint(now, queue.ReadyMessages, metadata.AttributeMessageState.Ready) @@ -139,10 +126,13 @@ func (r *rabbitmqScraper) collectQueue(queue *models.Queue, now pdata.Timestamp, r.mb.RecordRabbitmqMessageAcknowledgedDataPoint(now, val64) case dropUnroutableStat: r.mb.RecordRabbitmqMessageDroppedDataPoint(now, val64) - } } - r.mb.Emit(ilms.Metrics()) + r.mb.EmitForResource( + metadata.WithRabbitmqQueueName(queue.Name), + metadata.WithRabbitmqNodeName(queue.Node), + metadata.WithRabbitmqVhostName(queue.VHost), + ) } // convertValToInt64 values from message state unmarshal as float64s but should be int64. diff --git a/receiver/redisreceiver/documentation.md b/receiver/redisreceiver/documentation.md index f4eb26203638..a453165955d8 100644 --- a/receiver/redisreceiver/documentation.md +++ b/receiver/redisreceiver/documentation.md @@ -47,7 +47,7 @@ metrics: enabled: ``` -## Attributes +## Metric attributes | Name | Description | | ---- | ----------- | diff --git a/receiver/redisreceiver/internal/metadata/generated_metrics_v2.go b/receiver/redisreceiver/internal/metadata/generated_metrics_v2.go index 0e98e9d36e77..2091f2e9961e 100644 --- a/receiver/redisreceiver/internal/metadata/generated_metrics_v2.go +++ b/receiver/redisreceiver/internal/metadata/generated_metrics_v2.go @@ -1600,7 +1600,10 @@ func newMetricRedisUptime(settings MetricSettings) metricRedisUptime { // MetricsBuilder provides an interface for scrapers to report metrics while taking care of all the transformations // required to produce metric representation defined in metadata and user settings. type MetricsBuilder struct { - startTime pdata.Timestamp + startTime pdata.Timestamp // start time that will be applied to all recorded data points. + metricsCapacity int // maximum observed number of metrics per resource. + resourceCapacity int // maximum observed number of resource attributes. + metricsBuffer pdata.Metrics // accumulates metrics data before emitting. metricRedisClientsBlocked metricRedisClientsBlocked metricRedisClientsConnected metricRedisClientsConnected metricRedisClientsMaxInputBuffer metricRedisClientsMaxInputBuffer @@ -1645,6 +1648,7 @@ func WithStartTime(startTime pdata.Timestamp) metricBuilderOption { func NewMetricsBuilder(settings MetricsSettings, options ...metricBuilderOption) *MetricsBuilder { mb := &MetricsBuilder{ startTime: pdata.NewTimestampFromTime(time.Now()), + metricsBuffer: pdata.NewMetrics(), metricRedisClientsBlocked: newMetricRedisClientsBlocked(settings.RedisClientsBlocked), metricRedisClientsConnected: newMetricRedisClientsConnected(settings.RedisClientsConnected), metricRedisClientsMaxInputBuffer: newMetricRedisClientsMaxInputBuffer(settings.RedisClientsMaxInputBuffer), @@ -1681,39 +1685,75 @@ func NewMetricsBuilder(settings MetricsSettings, options ...metricBuilderOption) return mb } -// Emit appends generated metrics to a pdata.MetricsSlice and updates the internal state to be ready for recording -// another set of data points. This function will be doing all transformations required to produce metric representation -// defined in metadata and user settings, e.g. delta/cumulative translation. -func (mb *MetricsBuilder) Emit(metrics pdata.MetricSlice) { - mb.metricRedisClientsBlocked.emit(metrics) - mb.metricRedisClientsConnected.emit(metrics) - mb.metricRedisClientsMaxInputBuffer.emit(metrics) - mb.metricRedisClientsMaxOutputBuffer.emit(metrics) - mb.metricRedisCommands.emit(metrics) - mb.metricRedisCommandsProcessed.emit(metrics) - mb.metricRedisConnectionsReceived.emit(metrics) - mb.metricRedisConnectionsRejected.emit(metrics) - mb.metricRedisCPUTime.emit(metrics) - mb.metricRedisDbAvgTTL.emit(metrics) - mb.metricRedisDbExpires.emit(metrics) - mb.metricRedisDbKeys.emit(metrics) - mb.metricRedisKeysEvicted.emit(metrics) - mb.metricRedisKeysExpired.emit(metrics) - mb.metricRedisKeyspaceHits.emit(metrics) - mb.metricRedisKeyspaceMisses.emit(metrics) - mb.metricRedisLatestFork.emit(metrics) - mb.metricRedisMemoryFragmentationRatio.emit(metrics) - mb.metricRedisMemoryLua.emit(metrics) - mb.metricRedisMemoryPeak.emit(metrics) - mb.metricRedisMemoryRss.emit(metrics) - mb.metricRedisMemoryUsed.emit(metrics) - mb.metricRedisNetInput.emit(metrics) - mb.metricRedisNetOutput.emit(metrics) - mb.metricRedisRdbChangesSinceLastSave.emit(metrics) - mb.metricRedisReplicationBacklogFirstByteOffset.emit(metrics) - mb.metricRedisReplicationOffset.emit(metrics) - mb.metricRedisSlavesConnected.emit(metrics) - mb.metricRedisUptime.emit(metrics) +// updateCapacity updates max length of metrics and resource attributes that will be used for the slice capacity. +func (mb *MetricsBuilder) updateCapacity(rm pdata.ResourceMetrics) { + if mb.metricsCapacity < rm.InstrumentationLibraryMetrics().At(0).Metrics().Len() { + mb.metricsCapacity = rm.InstrumentationLibraryMetrics().At(0).Metrics().Len() + } + if mb.resourceCapacity < rm.Resource().Attributes().Len() { + mb.resourceCapacity = rm.Resource().Attributes().Len() + } +} + +// ResourceOption applies changes to provided resource. +type ResourceOption func(pdata.Resource) + +// EmitForResource saves all the generated metrics under a new resource and updates the internal state to be ready for +// recording another set of data points as part of another resource. This function can be helpful when one scraper +// needs to emit metrics from several resources. Otherwise calling this function is not required, +// just `Emit` function can be called instead. Resource attributes should be provided as ResourceOption arguments. +func (mb *MetricsBuilder) EmitForResource(ro ...ResourceOption) { + rm := pdata.NewResourceMetrics() + rm.Resource().Attributes().EnsureCapacity(mb.resourceCapacity) + for _, op := range ro { + op(rm.Resource()) + } + ils := rm.InstrumentationLibraryMetrics().AppendEmpty() + ils.InstrumentationLibrary().SetName("otelcol/redisreceiver") + ils.Metrics().EnsureCapacity(mb.metricsCapacity) + mb.metricRedisClientsBlocked.emit(ils.Metrics()) + mb.metricRedisClientsConnected.emit(ils.Metrics()) + mb.metricRedisClientsMaxInputBuffer.emit(ils.Metrics()) + mb.metricRedisClientsMaxOutputBuffer.emit(ils.Metrics()) + mb.metricRedisCommands.emit(ils.Metrics()) + mb.metricRedisCommandsProcessed.emit(ils.Metrics()) + mb.metricRedisConnectionsReceived.emit(ils.Metrics()) + mb.metricRedisConnectionsRejected.emit(ils.Metrics()) + mb.metricRedisCPUTime.emit(ils.Metrics()) + mb.metricRedisDbAvgTTL.emit(ils.Metrics()) + mb.metricRedisDbExpires.emit(ils.Metrics()) + mb.metricRedisDbKeys.emit(ils.Metrics()) + mb.metricRedisKeysEvicted.emit(ils.Metrics()) + mb.metricRedisKeysExpired.emit(ils.Metrics()) + mb.metricRedisKeyspaceHits.emit(ils.Metrics()) + mb.metricRedisKeyspaceMisses.emit(ils.Metrics()) + mb.metricRedisLatestFork.emit(ils.Metrics()) + mb.metricRedisMemoryFragmentationRatio.emit(ils.Metrics()) + mb.metricRedisMemoryLua.emit(ils.Metrics()) + mb.metricRedisMemoryPeak.emit(ils.Metrics()) + mb.metricRedisMemoryRss.emit(ils.Metrics()) + mb.metricRedisMemoryUsed.emit(ils.Metrics()) + mb.metricRedisNetInput.emit(ils.Metrics()) + mb.metricRedisNetOutput.emit(ils.Metrics()) + mb.metricRedisRdbChangesSinceLastSave.emit(ils.Metrics()) + mb.metricRedisReplicationBacklogFirstByteOffset.emit(ils.Metrics()) + mb.metricRedisReplicationOffset.emit(ils.Metrics()) + mb.metricRedisSlavesConnected.emit(ils.Metrics()) + mb.metricRedisUptime.emit(ils.Metrics()) + if ils.Metrics().Len() > 0 { + mb.updateCapacity(rm) + rm.MoveTo(mb.metricsBuffer.ResourceMetrics().AppendEmpty()) + } +} + +// Emit returns all the metrics accumulated by the metrics builder and updates the internal state to be ready for +// recording another set of metrics. This function will be responsible for applying all the transformations required to +// produce metric representation defined in metadata and user settings, e.g. delta or cumulative. +func (mb *MetricsBuilder) Emit(ro ...ResourceOption) pdata.Metrics { + mb.EmitForResource(ro...) + metrics := pdata.NewMetrics() + mb.metricsBuffer.MoveTo(metrics) + return metrics } // RecordRedisClientsBlockedDataPoint adds a data point to redis.clients.blocked metric. @@ -1870,16 +1910,6 @@ func (mb *MetricsBuilder) Reset(options ...metricBuilderOption) { } } -// NewMetricData creates new pdata.Metrics and sets the InstrumentationLibrary -// name on the ResourceMetrics. -func (mb *MetricsBuilder) NewMetricData() pdata.Metrics { - md := pdata.NewMetrics() - rm := md.ResourceMetrics().AppendEmpty() - ilm := rm.InstrumentationLibraryMetrics().AppendEmpty() - ilm.InstrumentationLibrary().SetName("otelcol/redisreceiver") - return md -} - // Attributes contains the possible metric attributes that can be used. var Attributes = struct { // Db (Redis database identifier) diff --git a/receiver/redisreceiver/redis_scraper.go b/receiver/redisreceiver/redis_scraper.go index 6692ec80cc68..0ff84bb72073 100644 --- a/receiver/redisreceiver/redis_scraper.go +++ b/receiver/redisreceiver/redis_scraper.go @@ -84,14 +84,10 @@ func (rs *redisScraper) Scrape(context.Context) (pdata.Metrics, error) { } rs.uptime = currentUptime - md := rs.mb.NewMetricData() - rs.recordCommonMetrics(now, inf) rs.recordKeyspaceMetrics(now, inf) - rs.mb.Emit(md.ResourceMetrics().At(0).InstrumentationLibraryMetrics().At(0).Metrics()) - - return md, nil + return rs.mb.Emit(), nil } // recordCommonMetrics records metrics from Redis info key-value pairs. diff --git a/receiver/zookeeperreceiver/documentation.md b/receiver/zookeeperreceiver/documentation.md index 08079ab4e047..316aee43a3fe 100644 --- a/receiver/zookeeperreceiver/documentation.md +++ b/receiver/zookeeperreceiver/documentation.md @@ -33,11 +33,16 @@ metrics: enabled: ``` -## Attributes +## Resource attributes + +| Name | Description | Type | +| ---- | ----------- | ---- | +| server.state | State of the Zookeeper server (leader, standalone or follower). | String | +| zk.version | Zookeeper version of the instance. | String | + +## Metric attributes | Name | Description | | ---- | ----------- | | direction | State of a packet based on io direction. | -| server.state | State of the Zookeeper server (leader, standalone or follower). | | state | State of followers | -| zk.version | Zookeeper version of the instance. | diff --git a/receiver/zookeeperreceiver/internal/metadata/generated_metrics_v2.go b/receiver/zookeeperreceiver/internal/metadata/generated_metrics_v2.go index a1569941fdf6..be272e46dc99 100644 --- a/receiver/zookeeperreceiver/internal/metadata/generated_metrics_v2.go +++ b/receiver/zookeeperreceiver/internal/metadata/generated_metrics_v2.go @@ -846,7 +846,10 @@ func newMetricZookeeperZnodeCount(settings MetricSettings) metricZookeeperZnodeC // MetricsBuilder provides an interface for scrapers to report metrics while taking care of all the transformations // required to produce metric representation defined in metadata and user settings. type MetricsBuilder struct { - startTime pdata.Timestamp + startTime pdata.Timestamp // start time that will be applied to all recorded data points. + metricsCapacity int // maximum observed number of metrics per resource. + resourceCapacity int // maximum observed number of resource attributes. + metricsBuffer pdata.Metrics // accumulates metrics data before emitting. metricZookeeperConnectionActive metricZookeeperConnectionActive metricZookeeperDataTreeEphemeralNodeCount metricZookeeperDataTreeEphemeralNodeCount metricZookeeperDataTreeSize metricZookeeperDataTreeSize @@ -876,8 +879,9 @@ func WithStartTime(startTime pdata.Timestamp) metricBuilderOption { func NewMetricsBuilder(settings MetricsSettings, options ...metricBuilderOption) *MetricsBuilder { mb := &MetricsBuilder{ - startTime: pdata.NewTimestampFromTime(time.Now()), - metricZookeeperConnectionActive: newMetricZookeeperConnectionActive(settings.ZookeeperConnectionActive), + startTime: pdata.NewTimestampFromTime(time.Now()), + metricsBuffer: pdata.NewMetrics(), + metricZookeeperConnectionActive: newMetricZookeeperConnectionActive(settings.ZookeeperConnectionActive), metricZookeeperDataTreeEphemeralNodeCount: newMetricZookeeperDataTreeEphemeralNodeCount(settings.ZookeeperDataTreeEphemeralNodeCount), metricZookeeperDataTreeSize: newMetricZookeeperDataTreeSize(settings.ZookeeperDataTreeSize), metricZookeeperFileDescriptorLimit: newMetricZookeeperFileDescriptorLimit(settings.ZookeeperFileDescriptorLimit), @@ -899,25 +903,75 @@ func NewMetricsBuilder(settings MetricsSettings, options ...metricBuilderOption) return mb } -// Emit appends generated metrics to a pdata.MetricsSlice and updates the internal state to be ready for recording -// another set of data points. This function will be doing all transformations required to produce metric representation -// defined in metadata and user settings, e.g. delta/cumulative translation. -func (mb *MetricsBuilder) Emit(metrics pdata.MetricSlice) { - mb.metricZookeeperConnectionActive.emit(metrics) - mb.metricZookeeperDataTreeEphemeralNodeCount.emit(metrics) - mb.metricZookeeperDataTreeSize.emit(metrics) - mb.metricZookeeperFileDescriptorLimit.emit(metrics) - mb.metricZookeeperFileDescriptorOpen.emit(metrics) - mb.metricZookeeperFollowerCount.emit(metrics) - mb.metricZookeeperFsyncExceededThresholdCount.emit(metrics) - mb.metricZookeeperLatencyAvg.emit(metrics) - mb.metricZookeeperLatencyMax.emit(metrics) - mb.metricZookeeperLatencyMin.emit(metrics) - mb.metricZookeeperPacketCount.emit(metrics) - mb.metricZookeeperRequestActive.emit(metrics) - mb.metricZookeeperSyncPending.emit(metrics) - mb.metricZookeeperWatchCount.emit(metrics) - mb.metricZookeeperZnodeCount.emit(metrics) +// updateCapacity updates max length of metrics and resource attributes that will be used for the slice capacity. +func (mb *MetricsBuilder) updateCapacity(rm pdata.ResourceMetrics) { + if mb.metricsCapacity < rm.InstrumentationLibraryMetrics().At(0).Metrics().Len() { + mb.metricsCapacity = rm.InstrumentationLibraryMetrics().At(0).Metrics().Len() + } + if mb.resourceCapacity < rm.Resource().Attributes().Len() { + mb.resourceCapacity = rm.Resource().Attributes().Len() + } +} + +// ResourceOption applies changes to provided resource. +type ResourceOption func(pdata.Resource) + +// WithServerState sets provided value as "server.state" attribute for current resource. +func WithServerState(val string) ResourceOption { + return func(r pdata.Resource) { + r.Attributes().UpsertString("server.state", val) + } +} + +// WithZkVersion sets provided value as "zk.version" attribute for current resource. +func WithZkVersion(val string) ResourceOption { + return func(r pdata.Resource) { + r.Attributes().UpsertString("zk.version", val) + } +} + +// EmitForResource saves all the generated metrics under a new resource and updates the internal state to be ready for +// recording another set of data points as part of another resource. This function can be helpful when one scraper +// needs to emit metrics from several resources. Otherwise calling this function is not required, +// just `Emit` function can be called instead. Resource attributes should be provided as ResourceOption arguments. +func (mb *MetricsBuilder) EmitForResource(ro ...ResourceOption) { + rm := pdata.NewResourceMetrics() + rm.Resource().Attributes().EnsureCapacity(mb.resourceCapacity) + for _, op := range ro { + op(rm.Resource()) + } + ils := rm.InstrumentationLibraryMetrics().AppendEmpty() + ils.InstrumentationLibrary().SetName("otelcol/zookeeperreceiver") + ils.Metrics().EnsureCapacity(mb.metricsCapacity) + mb.metricZookeeperConnectionActive.emit(ils.Metrics()) + mb.metricZookeeperDataTreeEphemeralNodeCount.emit(ils.Metrics()) + mb.metricZookeeperDataTreeSize.emit(ils.Metrics()) + mb.metricZookeeperFileDescriptorLimit.emit(ils.Metrics()) + mb.metricZookeeperFileDescriptorOpen.emit(ils.Metrics()) + mb.metricZookeeperFollowerCount.emit(ils.Metrics()) + mb.metricZookeeperFsyncExceededThresholdCount.emit(ils.Metrics()) + mb.metricZookeeperLatencyAvg.emit(ils.Metrics()) + mb.metricZookeeperLatencyMax.emit(ils.Metrics()) + mb.metricZookeeperLatencyMin.emit(ils.Metrics()) + mb.metricZookeeperPacketCount.emit(ils.Metrics()) + mb.metricZookeeperRequestActive.emit(ils.Metrics()) + mb.metricZookeeperSyncPending.emit(ils.Metrics()) + mb.metricZookeeperWatchCount.emit(ils.Metrics()) + mb.metricZookeeperZnodeCount.emit(ils.Metrics()) + if ils.Metrics().Len() > 0 { + mb.updateCapacity(rm) + rm.MoveTo(mb.metricsBuffer.ResourceMetrics().AppendEmpty()) + } +} + +// Emit returns all the metrics accumulated by the metrics builder and updates the internal state to be ready for +// recording another set of metrics. This function will be responsible for applying all the transformations required to +// produce metric representation defined in metadata and user settings, e.g. delta or cumulative. +func (mb *MetricsBuilder) Emit(ro ...ResourceOption) pdata.Metrics { + mb.EmitForResource(ro...) + metrics := pdata.NewMetrics() + mb.metricsBuffer.MoveTo(metrics) + return metrics } // RecordZookeeperConnectionActiveDataPoint adds a data point to zookeeper.connection.active metric. @@ -1004,31 +1058,15 @@ func (mb *MetricsBuilder) Reset(options ...metricBuilderOption) { } } -// NewMetricData creates new pdata.Metrics and sets the InstrumentationLibrary -// name on the ResourceMetrics. -func (mb *MetricsBuilder) NewMetricData() pdata.Metrics { - md := pdata.NewMetrics() - rm := md.ResourceMetrics().AppendEmpty() - ilm := rm.InstrumentationLibraryMetrics().AppendEmpty() - ilm.InstrumentationLibrary().SetName("otelcol/zookeeperreceiver") - return md -} - // Attributes contains the possible metric attributes that can be used. var Attributes = struct { // Direction (State of a packet based on io direction.) Direction string - // ServerState (State of the Zookeeper server (leader, standalone or follower).) - ServerState string // State (State of followers) State string - // ZkVersion (Zookeeper version of the instance.) - ZkVersion string }{ "direction", - "server.state", "state", - "zk.version", } // A is an alias for Attributes. diff --git a/receiver/zookeeperreceiver/metadata.yaml b/receiver/zookeeperreceiver/metadata.yaml index 4cd0cd53864c..3778c6614287 100644 --- a/receiver/zookeeperreceiver/metadata.yaml +++ b/receiver/zookeeperreceiver/metadata.yaml @@ -1,10 +1,14 @@ name: zookeeperreceiver -attributes: +resource_attributes: server.state: description: State of the Zookeeper server (leader, standalone or follower). + type: string zk.version: description: Zookeeper version of the instance. + type: string + +attributes: state: description: State of followers enum: diff --git a/receiver/zookeeperreceiver/scraper.go b/receiver/zookeeperreceiver/scraper.go index e522f7705e6c..2a05bc81e1a0 100644 --- a/receiver/zookeeperreceiver/scraper.go +++ b/receiver/zookeeperreceiver/scraper.go @@ -118,10 +118,9 @@ func (z *zookeeperMetricsScraper) getResourceMetrics(conn net.Conn) (pdata.Metri return pdata.NewMetrics(), err } - md := z.mb.NewMetricData() creator := newMetricCreator(z.mb) now := pdata.NewTimestampFromTime(time.Now()) - rm := md.ResourceMetrics().At(0) + resourceOpts := make([]metadata.ResourceOption, 0, 2) for scanner.Scan() { line := scanner.Text() parts := zookeeperFormatRE.FindStringSubmatch(line) @@ -137,10 +136,10 @@ func (z *zookeeperMetricsScraper) getResourceMetrics(conn net.Conn) (pdata.Metri metricValue := parts[2] switch metricKey { case zkVersionKey: - rm.Resource().Attributes().UpsertString(metadata.Attributes.ZkVersion, metricValue) + resourceOpts = append(resourceOpts, metadata.WithZkVersion(metricValue)) continue case serverStateKey: - rm.Resource().Attributes().UpsertString(metadata.Attributes.ServerState, metricValue) + resourceOpts = append(resourceOpts, metadata.WithServerState(metricValue)) continue default: // Skip metric if there is no descriptor associated with it. @@ -164,8 +163,7 @@ func (z *zookeeperMetricsScraper) getResourceMetrics(conn net.Conn) (pdata.Metri // Generate computed metrics creator.generateComputedMetrics(z.logger, now) - z.mb.Emit(rm.InstrumentationLibraryMetrics().At(0).Metrics()) - return md, nil + return z.mb.Emit(resourceOpts...), nil } func closeConnection(conn net.Conn) error {