From 8dba9f54d6c0b581ea67c67b224630a4e1bc83bd Mon Sep 17 00:00:00 2001 From: Povilas Versockas Date: Fri, 15 Sep 2023 16:38:18 +0300 Subject: [PATCH] [k8sclusterreceiver] refactor metric units to follow Otel conventions --- .chloggen/k8sclusterreceiver-fix-units.yaml | 27 ++++++++++++ receiver/k8sclusterreceiver/documentation.md | 44 +++++++++---------- .../internal/cronjob/testdata/expected.yaml | 2 +- .../internal/demonset/testdata/expected.yaml | 8 ++-- .../internal/jobs/testdata/expected.yaml | 10 ++--- .../jobs/testdata/expected_empty.yaml | 6 +-- .../internal/metadata/generated_metrics.go | 44 +++++++++---------- .../metadata/generated_metrics_test.go | 44 +++++++++---------- .../replicaset/testdata/expected.yaml | 4 +- .../testdata/expected.yaml | 4 +- receiver/k8sclusterreceiver/metadata.yaml | 44 +++++++++---------- .../testdata/e2e/expected.yaml | 28 ++++++------ 12 files changed, 146 insertions(+), 119 deletions(-) create mode 100755 .chloggen/k8sclusterreceiver-fix-units.yaml diff --git a/.chloggen/k8sclusterreceiver-fix-units.yaml b/.chloggen/k8sclusterreceiver-fix-units.yaml new file mode 100755 index 000000000000..1ad7fd8dfc1a --- /dev/null +++ b/.chloggen/k8sclusterreceiver-fix-units.yaml @@ -0,0 +1,27 @@ +# Use this changelog template to create an entry for release notes. + +# One of 'breaking', 'deprecation', 'new_component', 'enhancement', 'bug_fix' +change_type: 'bug_fix' + +# The name of the component, or a single word describing the area of concern, (e.g. filelogreceiver) +component: 'k8sclusterreceiver' + +# A brief description of the change. Surround your text with quotes ("") if it needs to start with a backtick (`). +note: "Change k8scluster receiver metric units to follow otel semantic conventions" + +# Mandatory: One or more tracking issues related to the change. You can use the PR number here if no issue exists. +issues: [10553] + +# (Optional) One or more lines of additional information to render under the primary note. +# These lines will be padded with 2 spaces and then inserted directly into the document. +# Use pipe (|) for multiline entries. +subtext: + +# If your change doesn't affect end users or the exported elements of any package, +# you should instead start your pull request title with [chore] or use the "Skip Changelog" label. +# Optional: The change log or logs in which this entry should be included. +# e.g. '[user]' or '[user, api]' +# Include 'user' if the change is relevant to end users. +# Include 'api' if there is a change to a library API. +# Default: '[user]' +change_logs: [user] diff --git a/receiver/k8sclusterreceiver/documentation.md b/receiver/k8sclusterreceiver/documentation.md index b783d8ef8cc8..63588e48fabd 100644 --- a/receiver/k8sclusterreceiver/documentation.md +++ b/receiver/k8sclusterreceiver/documentation.md @@ -98,7 +98,7 @@ The number of actively running jobs for a cronjob | Unit | Metric Type | Value Type | | ---- | ----------- | ---------- | -| 1 | Gauge | Int | +| {job} | Gauge | Int | ### k8s.daemonset.current_scheduled_nodes @@ -106,7 +106,7 @@ Number of nodes that are running at least 1 daemon pod and are supposed to run t | Unit | Metric Type | Value Type | | ---- | ----------- | ---------- | -| 1 | Gauge | Int | +| {node} | Gauge | Int | ### k8s.daemonset.desired_scheduled_nodes @@ -114,7 +114,7 @@ Number of nodes that should be running the daemon pod (including nodes currently | Unit | Metric Type | Value Type | | ---- | ----------- | ---------- | -| 1 | Gauge | Int | +| {node} | Gauge | Int | ### k8s.daemonset.misscheduled_nodes @@ -122,7 +122,7 @@ Number of nodes that are running the daemon pod, but are not supposed to run the | Unit | Metric Type | Value Type | | ---- | ----------- | ---------- | -| 1 | Gauge | Int | +| {node} | Gauge | Int | ### k8s.daemonset.ready_nodes @@ -130,7 +130,7 @@ Number of nodes that should be running the daemon pod and have one or more of th | Unit | Metric Type | Value Type | | ---- | ----------- | ---------- | -| 1 | Gauge | Int | +| {node} | Gauge | Int | ### k8s.deployment.available @@ -154,7 +154,7 @@ Current number of pod replicas managed by this autoscaler. | Unit | Metric Type | Value Type | | ---- | ----------- | ---------- | -| 1 | Gauge | Int | +| {pod} | Gauge | Int | ### k8s.hpa.desired_replicas @@ -162,7 +162,7 @@ Desired number of pod replicas managed by this autoscaler. | Unit | Metric Type | Value Type | | ---- | ----------- | ---------- | -| 1 | Gauge | Int | +| {pod} | Gauge | Int | ### k8s.hpa.max_replicas @@ -170,7 +170,7 @@ Maximum number of replicas to which the autoscaler can scale up. | Unit | Metric Type | Value Type | | ---- | ----------- | ---------- | -| 1 | Gauge | Int | +| {pod} | Gauge | Int | ### k8s.hpa.min_replicas @@ -178,7 +178,7 @@ Minimum number of replicas to which the autoscaler can scale up. | Unit | Metric Type | Value Type | | ---- | ----------- | ---------- | -| 1 | Gauge | Int | +| {pod} | Gauge | Int | ### k8s.job.active_pods @@ -186,7 +186,7 @@ The number of actively running pods for a job | Unit | Metric Type | Value Type | | ---- | ----------- | ---------- | -| 1 | Gauge | Int | +| {pod} | Gauge | Int | ### k8s.job.desired_successful_pods @@ -194,7 +194,7 @@ The desired number of successfully finished pods the job should be run with | Unit | Metric Type | Value Type | | ---- | ----------- | ---------- | -| 1 | Gauge | Int | +| {pod} | Gauge | Int | ### k8s.job.failed_pods @@ -202,7 +202,7 @@ The number of pods which reached phase Failed for a job | Unit | Metric Type | Value Type | | ---- | ----------- | ---------- | -| 1 | Gauge | Int | +| {pod} | Gauge | Int | ### k8s.job.max_parallel_pods @@ -210,7 +210,7 @@ The max desired number of pods the job should run at any given time | Unit | Metric Type | Value Type | | ---- | ----------- | ---------- | -| 1 | Gauge | Int | +| {pod} | Gauge | Int | ### k8s.job.successful_pods @@ -218,7 +218,7 @@ The number of pods which reached phase Succeeded for a job | Unit | Metric Type | Value Type | | ---- | ----------- | ---------- | -| 1 | Gauge | Int | +| {pod} | Gauge | Int | ### k8s.namespace.phase @@ -242,7 +242,7 @@ Total number of available pods (ready for at least minReadySeconds) targeted by | Unit | Metric Type | Value Type | | ---- | ----------- | ---------- | -| 1 | Gauge | Int | +| {pod} | Gauge | Int | ### k8s.replicaset.desired @@ -250,7 +250,7 @@ Number of desired pods in this replicaset | Unit | Metric Type | Value Type | | ---- | ----------- | ---------- | -| 1 | Gauge | Int | +| {pod} | Gauge | Int | ### k8s.replication_controller.available @@ -258,7 +258,7 @@ Total number of available pods (ready for at least minReadySeconds) targeted by | Unit | Metric Type | Value Type | | ---- | ----------- | ---------- | -| 1 | Gauge | Int | +| {pod} | Gauge | Int | ### k8s.replication_controller.desired @@ -266,7 +266,7 @@ Number of desired pods in this replication_controller | Unit | Metric Type | Value Type | | ---- | ----------- | ---------- | -| 1 | Gauge | Int | +| {pod} | Gauge | Int | ### k8s.resource_quota.hard_limit @@ -302,7 +302,7 @@ The number of pods created by the StatefulSet controller from the StatefulSet ve | Unit | Metric Type | Value Type | | ---- | ----------- | ---------- | -| 1 | Gauge | Int | +| {pod} | Gauge | Int | ### k8s.statefulset.desired_pods @@ -310,7 +310,7 @@ Number of desired pods in the stateful set (the `spec.replicas` field) | Unit | Metric Type | Value Type | | ---- | ----------- | ---------- | -| 1 | Gauge | Int | +| {pod} | Gauge | Int | ### k8s.statefulset.ready_pods @@ -318,7 +318,7 @@ Number of pods created by the stateful set that have the `Ready` condition | Unit | Metric Type | Value Type | | ---- | ----------- | ---------- | -| 1 | Gauge | Int | +| {pod} | Gauge | Int | ### k8s.statefulset.updated_pods @@ -326,7 +326,7 @@ Number of pods created by the StatefulSet controller from the StatefulSet versio | Unit | Metric Type | Value Type | | ---- | ----------- | ---------- | -| 1 | Gauge | Int | +| {pod} | Gauge | Int | ### openshift.appliedclusterquota.limit diff --git a/receiver/k8sclusterreceiver/internal/cronjob/testdata/expected.yaml b/receiver/k8sclusterreceiver/internal/cronjob/testdata/expected.yaml index cae91467060e..708e2e6c75df 100644 --- a/receiver/k8sclusterreceiver/internal/cronjob/testdata/expected.yaml +++ b/receiver/k8sclusterreceiver/internal/cronjob/testdata/expected.yaml @@ -21,7 +21,7 @@ resourceMetrics: dataPoints: - asInt: "2" name: k8s.cronjob.active_jobs - unit: "1" + unit: "{job}" scope: name: otelcol/k8sclusterreceiver version: latest diff --git a/receiver/k8sclusterreceiver/internal/demonset/testdata/expected.yaml b/receiver/k8sclusterreceiver/internal/demonset/testdata/expected.yaml index c5dafb3a7824..a13a8a4eae4e 100644 --- a/receiver/k8sclusterreceiver/internal/demonset/testdata/expected.yaml +++ b/receiver/k8sclusterreceiver/internal/demonset/testdata/expected.yaml @@ -21,25 +21,25 @@ resourceMetrics: dataPoints: - asInt: "3" name: k8s.daemonset.current_scheduled_nodes - unit: "1" + unit: "{node}" - description: Number of nodes that should be running the daemon pod (including nodes currently running the daemon pod) gauge: dataPoints: - asInt: "5" name: k8s.daemonset.desired_scheduled_nodes - unit: "1" + unit: "{node}" - description: Number of nodes that are running the daemon pod, but are not supposed to run the daemon pod gauge: dataPoints: - asInt: "1" name: k8s.daemonset.misscheduled_nodes - unit: "1" + unit: "{node}" - description: Number of nodes that should be running the daemon pod and have one or more of the daemon pod running and ready gauge: dataPoints: - asInt: "2" name: k8s.daemonset.ready_nodes - unit: "1" + unit: "{node}" scope: name: otelcol/k8sclusterreceiver version: latest diff --git a/receiver/k8sclusterreceiver/internal/jobs/testdata/expected.yaml b/receiver/k8sclusterreceiver/internal/jobs/testdata/expected.yaml index dfdbef482724..e3403cf2950c 100644 --- a/receiver/k8sclusterreceiver/internal/jobs/testdata/expected.yaml +++ b/receiver/k8sclusterreceiver/internal/jobs/testdata/expected.yaml @@ -21,31 +21,31 @@ resourceMetrics: dataPoints: - asInt: "2" name: k8s.job.active_pods - unit: "1" + unit: "{pod}" - description: The number of pods which reached phase Failed for a job gauge: dataPoints: - asInt: "0" name: k8s.job.failed_pods - unit: "1" + unit: "{pod}" - description: The number of pods which reached phase Succeeded for a job gauge: dataPoints: - asInt: "3" name: k8s.job.successful_pods - unit: "1" + unit: "{pod}" - description: The desired number of successfully finished pods the job should be run with gauge: dataPoints: - asInt: "10" name: k8s.job.desired_successful_pods - unit: "1" + unit: "{pod}" - description: The max desired number of pods the job should run at any given time gauge: dataPoints: - asInt: "2" name: k8s.job.max_parallel_pods - unit: "1" + unit: "{pod}" scope: name: otelcol/k8sclusterreceiver version: latest diff --git a/receiver/k8sclusterreceiver/internal/jobs/testdata/expected_empty.yaml b/receiver/k8sclusterreceiver/internal/jobs/testdata/expected_empty.yaml index 129093763cfb..199a875229ef 100644 --- a/receiver/k8sclusterreceiver/internal/jobs/testdata/expected_empty.yaml +++ b/receiver/k8sclusterreceiver/internal/jobs/testdata/expected_empty.yaml @@ -21,19 +21,19 @@ resourceMetrics: dataPoints: - asInt: "2" name: k8s.job.active_pods - unit: "1" + unit: "{pod}" - description: The number of pods which reached phase Failed for a job gauge: dataPoints: - asInt: "0" name: k8s.job.failed_pods - unit: "1" + unit: "{pod}" - description: The number of pods which reached phase Succeeded for a job gauge: dataPoints: - asInt: "3" name: k8s.job.successful_pods - unit: "1" + unit: "{pod}" scope: name: otelcol/k8sclusterreceiver version: latest diff --git a/receiver/k8sclusterreceiver/internal/metadata/generated_metrics.go b/receiver/k8sclusterreceiver/internal/metadata/generated_metrics.go index 3a1485a963a0..5e3b6173755f 100644 --- a/receiver/k8sclusterreceiver/internal/metadata/generated_metrics.go +++ b/receiver/k8sclusterreceiver/internal/metadata/generated_metrics.go @@ -512,7 +512,7 @@ type metricK8sCronjobActiveJobs struct { func (m *metricK8sCronjobActiveJobs) init() { m.data.SetName("k8s.cronjob.active_jobs") m.data.SetDescription("The number of actively running jobs for a cronjob") - m.data.SetUnit("1") + m.data.SetUnit("{job}") m.data.SetEmptyGauge() } @@ -561,7 +561,7 @@ type metricK8sDaemonsetCurrentScheduledNodes struct { func (m *metricK8sDaemonsetCurrentScheduledNodes) init() { m.data.SetName("k8s.daemonset.current_scheduled_nodes") m.data.SetDescription("Number of nodes that are running at least 1 daemon pod and are supposed to run the daemon pod") - m.data.SetUnit("1") + m.data.SetUnit("{node}") m.data.SetEmptyGauge() } @@ -610,7 +610,7 @@ type metricK8sDaemonsetDesiredScheduledNodes struct { func (m *metricK8sDaemonsetDesiredScheduledNodes) init() { m.data.SetName("k8s.daemonset.desired_scheduled_nodes") m.data.SetDescription("Number of nodes that should be running the daemon pod (including nodes currently running the daemon pod)") - m.data.SetUnit("1") + m.data.SetUnit("{node}") m.data.SetEmptyGauge() } @@ -659,7 +659,7 @@ type metricK8sDaemonsetMisscheduledNodes struct { func (m *metricK8sDaemonsetMisscheduledNodes) init() { m.data.SetName("k8s.daemonset.misscheduled_nodes") m.data.SetDescription("Number of nodes that are running the daemon pod, but are not supposed to run the daemon pod") - m.data.SetUnit("1") + m.data.SetUnit("{node}") m.data.SetEmptyGauge() } @@ -708,7 +708,7 @@ type metricK8sDaemonsetReadyNodes struct { func (m *metricK8sDaemonsetReadyNodes) init() { m.data.SetName("k8s.daemonset.ready_nodes") m.data.SetDescription("Number of nodes that should be running the daemon pod and have one or more of the daemon pod running and ready") - m.data.SetUnit("1") + m.data.SetUnit("{node}") m.data.SetEmptyGauge() } @@ -855,7 +855,7 @@ type metricK8sHpaCurrentReplicas struct { func (m *metricK8sHpaCurrentReplicas) init() { m.data.SetName("k8s.hpa.current_replicas") m.data.SetDescription("Current number of pod replicas managed by this autoscaler.") - m.data.SetUnit("1") + m.data.SetUnit("{pod}") m.data.SetEmptyGauge() } @@ -904,7 +904,7 @@ type metricK8sHpaDesiredReplicas struct { func (m *metricK8sHpaDesiredReplicas) init() { m.data.SetName("k8s.hpa.desired_replicas") m.data.SetDescription("Desired number of pod replicas managed by this autoscaler.") - m.data.SetUnit("1") + m.data.SetUnit("{pod}") m.data.SetEmptyGauge() } @@ -953,7 +953,7 @@ type metricK8sHpaMaxReplicas struct { func (m *metricK8sHpaMaxReplicas) init() { m.data.SetName("k8s.hpa.max_replicas") m.data.SetDescription("Maximum number of replicas to which the autoscaler can scale up.") - m.data.SetUnit("1") + m.data.SetUnit("{pod}") m.data.SetEmptyGauge() } @@ -1002,7 +1002,7 @@ type metricK8sHpaMinReplicas struct { func (m *metricK8sHpaMinReplicas) init() { m.data.SetName("k8s.hpa.min_replicas") m.data.SetDescription("Minimum number of replicas to which the autoscaler can scale up.") - m.data.SetUnit("1") + m.data.SetUnit("{pod}") m.data.SetEmptyGauge() } @@ -1051,7 +1051,7 @@ type metricK8sJobActivePods struct { func (m *metricK8sJobActivePods) init() { m.data.SetName("k8s.job.active_pods") m.data.SetDescription("The number of actively running pods for a job") - m.data.SetUnit("1") + m.data.SetUnit("{pod}") m.data.SetEmptyGauge() } @@ -1100,7 +1100,7 @@ type metricK8sJobDesiredSuccessfulPods struct { func (m *metricK8sJobDesiredSuccessfulPods) init() { m.data.SetName("k8s.job.desired_successful_pods") m.data.SetDescription("The desired number of successfully finished pods the job should be run with") - m.data.SetUnit("1") + m.data.SetUnit("{pod}") m.data.SetEmptyGauge() } @@ -1149,7 +1149,7 @@ type metricK8sJobFailedPods struct { func (m *metricK8sJobFailedPods) init() { m.data.SetName("k8s.job.failed_pods") m.data.SetDescription("The number of pods which reached phase Failed for a job") - m.data.SetUnit("1") + m.data.SetUnit("{pod}") m.data.SetEmptyGauge() } @@ -1198,7 +1198,7 @@ type metricK8sJobMaxParallelPods struct { func (m *metricK8sJobMaxParallelPods) init() { m.data.SetName("k8s.job.max_parallel_pods") m.data.SetDescription("The max desired number of pods the job should run at any given time") - m.data.SetUnit("1") + m.data.SetUnit("{pod}") m.data.SetEmptyGauge() } @@ -1247,7 +1247,7 @@ type metricK8sJobSuccessfulPods struct { func (m *metricK8sJobSuccessfulPods) init() { m.data.SetName("k8s.job.successful_pods") m.data.SetDescription("The number of pods which reached phase Succeeded for a job") - m.data.SetUnit("1") + m.data.SetUnit("{pod}") m.data.SetEmptyGauge() } @@ -1443,7 +1443,7 @@ type metricK8sReplicasetAvailable struct { func (m *metricK8sReplicasetAvailable) init() { m.data.SetName("k8s.replicaset.available") m.data.SetDescription("Total number of available pods (ready for at least minReadySeconds) targeted by this replicaset") - m.data.SetUnit("1") + m.data.SetUnit("{pod}") m.data.SetEmptyGauge() } @@ -1492,7 +1492,7 @@ type metricK8sReplicasetDesired struct { func (m *metricK8sReplicasetDesired) init() { m.data.SetName("k8s.replicaset.desired") m.data.SetDescription("Number of desired pods in this replicaset") - m.data.SetUnit("1") + m.data.SetUnit("{pod}") m.data.SetEmptyGauge() } @@ -1541,7 +1541,7 @@ type metricK8sReplicationControllerAvailable struct { func (m *metricK8sReplicationControllerAvailable) init() { m.data.SetName("k8s.replication_controller.available") m.data.SetDescription("Total number of available pods (ready for at least minReadySeconds) targeted by this replication_controller") - m.data.SetUnit("1") + m.data.SetUnit("{pod}") m.data.SetEmptyGauge() } @@ -1590,7 +1590,7 @@ type metricK8sReplicationControllerDesired struct { func (m *metricK8sReplicationControllerDesired) init() { m.data.SetName("k8s.replication_controller.desired") m.data.SetDescription("Number of desired pods in this replication_controller") - m.data.SetUnit("1") + m.data.SetUnit("{pod}") m.data.SetEmptyGauge() } @@ -1741,7 +1741,7 @@ type metricK8sStatefulsetCurrentPods struct { func (m *metricK8sStatefulsetCurrentPods) init() { m.data.SetName("k8s.statefulset.current_pods") m.data.SetDescription("The number of pods created by the StatefulSet controller from the StatefulSet version") - m.data.SetUnit("1") + m.data.SetUnit("{pod}") m.data.SetEmptyGauge() } @@ -1790,7 +1790,7 @@ type metricK8sStatefulsetDesiredPods struct { func (m *metricK8sStatefulsetDesiredPods) init() { m.data.SetName("k8s.statefulset.desired_pods") m.data.SetDescription("Number of desired pods in the stateful set (the `spec.replicas` field)") - m.data.SetUnit("1") + m.data.SetUnit("{pod}") m.data.SetEmptyGauge() } @@ -1839,7 +1839,7 @@ type metricK8sStatefulsetReadyPods struct { func (m *metricK8sStatefulsetReadyPods) init() { m.data.SetName("k8s.statefulset.ready_pods") m.data.SetDescription("Number of pods created by the stateful set that have the `Ready` condition") - m.data.SetUnit("1") + m.data.SetUnit("{pod}") m.data.SetEmptyGauge() } @@ -1888,7 +1888,7 @@ type metricK8sStatefulsetUpdatedPods struct { func (m *metricK8sStatefulsetUpdatedPods) init() { m.data.SetName("k8s.statefulset.updated_pods") m.data.SetDescription("Number of pods created by the StatefulSet controller from the StatefulSet version") - m.data.SetUnit("1") + m.data.SetUnit("{pod}") m.data.SetEmptyGauge() } diff --git a/receiver/k8sclusterreceiver/internal/metadata/generated_metrics_test.go b/receiver/k8sclusterreceiver/internal/metadata/generated_metrics_test.go index a441e679f16a..415736675923 100644 --- a/receiver/k8sclusterreceiver/internal/metadata/generated_metrics_test.go +++ b/receiver/k8sclusterreceiver/internal/metadata/generated_metrics_test.go @@ -407,7 +407,7 @@ func TestMetricsBuilder(t *testing.T) { assert.Equal(t, pmetric.MetricTypeGauge, ms.At(i).Type()) assert.Equal(t, 1, ms.At(i).Gauge().DataPoints().Len()) assert.Equal(t, "The number of actively running jobs for a cronjob", ms.At(i).Description()) - assert.Equal(t, "1", ms.At(i).Unit()) + assert.Equal(t, "{job}", ms.At(i).Unit()) dp := ms.At(i).Gauge().DataPoints().At(0) assert.Equal(t, start, dp.StartTimestamp()) assert.Equal(t, ts, dp.Timestamp()) @@ -419,7 +419,7 @@ func TestMetricsBuilder(t *testing.T) { assert.Equal(t, pmetric.MetricTypeGauge, ms.At(i).Type()) assert.Equal(t, 1, ms.At(i).Gauge().DataPoints().Len()) assert.Equal(t, "Number of nodes that are running at least 1 daemon pod and are supposed to run the daemon pod", ms.At(i).Description()) - assert.Equal(t, "1", ms.At(i).Unit()) + assert.Equal(t, "{node}", ms.At(i).Unit()) dp := ms.At(i).Gauge().DataPoints().At(0) assert.Equal(t, start, dp.StartTimestamp()) assert.Equal(t, ts, dp.Timestamp()) @@ -431,7 +431,7 @@ func TestMetricsBuilder(t *testing.T) { assert.Equal(t, pmetric.MetricTypeGauge, ms.At(i).Type()) assert.Equal(t, 1, ms.At(i).Gauge().DataPoints().Len()) assert.Equal(t, "Number of nodes that should be running the daemon pod (including nodes currently running the daemon pod)", ms.At(i).Description()) - assert.Equal(t, "1", ms.At(i).Unit()) + assert.Equal(t, "{node}", ms.At(i).Unit()) dp := ms.At(i).Gauge().DataPoints().At(0) assert.Equal(t, start, dp.StartTimestamp()) assert.Equal(t, ts, dp.Timestamp()) @@ -443,7 +443,7 @@ func TestMetricsBuilder(t *testing.T) { assert.Equal(t, pmetric.MetricTypeGauge, ms.At(i).Type()) assert.Equal(t, 1, ms.At(i).Gauge().DataPoints().Len()) assert.Equal(t, "Number of nodes that are running the daemon pod, but are not supposed to run the daemon pod", ms.At(i).Description()) - assert.Equal(t, "1", ms.At(i).Unit()) + assert.Equal(t, "{node}", ms.At(i).Unit()) dp := ms.At(i).Gauge().DataPoints().At(0) assert.Equal(t, start, dp.StartTimestamp()) assert.Equal(t, ts, dp.Timestamp()) @@ -455,7 +455,7 @@ func TestMetricsBuilder(t *testing.T) { assert.Equal(t, pmetric.MetricTypeGauge, ms.At(i).Type()) assert.Equal(t, 1, ms.At(i).Gauge().DataPoints().Len()) assert.Equal(t, "Number of nodes that should be running the daemon pod and have one or more of the daemon pod running and ready", ms.At(i).Description()) - assert.Equal(t, "1", ms.At(i).Unit()) + assert.Equal(t, "{node}", ms.At(i).Unit()) dp := ms.At(i).Gauge().DataPoints().At(0) assert.Equal(t, start, dp.StartTimestamp()) assert.Equal(t, ts, dp.Timestamp()) @@ -491,7 +491,7 @@ func TestMetricsBuilder(t *testing.T) { assert.Equal(t, pmetric.MetricTypeGauge, ms.At(i).Type()) assert.Equal(t, 1, ms.At(i).Gauge().DataPoints().Len()) assert.Equal(t, "Current number of pod replicas managed by this autoscaler.", ms.At(i).Description()) - assert.Equal(t, "1", ms.At(i).Unit()) + assert.Equal(t, "{pod}", ms.At(i).Unit()) dp := ms.At(i).Gauge().DataPoints().At(0) assert.Equal(t, start, dp.StartTimestamp()) assert.Equal(t, ts, dp.Timestamp()) @@ -503,7 +503,7 @@ func TestMetricsBuilder(t *testing.T) { assert.Equal(t, pmetric.MetricTypeGauge, ms.At(i).Type()) assert.Equal(t, 1, ms.At(i).Gauge().DataPoints().Len()) assert.Equal(t, "Desired number of pod replicas managed by this autoscaler.", ms.At(i).Description()) - assert.Equal(t, "1", ms.At(i).Unit()) + assert.Equal(t, "{pod}", ms.At(i).Unit()) dp := ms.At(i).Gauge().DataPoints().At(0) assert.Equal(t, start, dp.StartTimestamp()) assert.Equal(t, ts, dp.Timestamp()) @@ -515,7 +515,7 @@ func TestMetricsBuilder(t *testing.T) { assert.Equal(t, pmetric.MetricTypeGauge, ms.At(i).Type()) assert.Equal(t, 1, ms.At(i).Gauge().DataPoints().Len()) assert.Equal(t, "Maximum number of replicas to which the autoscaler can scale up.", ms.At(i).Description()) - assert.Equal(t, "1", ms.At(i).Unit()) + assert.Equal(t, "{pod}", ms.At(i).Unit()) dp := ms.At(i).Gauge().DataPoints().At(0) assert.Equal(t, start, dp.StartTimestamp()) assert.Equal(t, ts, dp.Timestamp()) @@ -527,7 +527,7 @@ func TestMetricsBuilder(t *testing.T) { assert.Equal(t, pmetric.MetricTypeGauge, ms.At(i).Type()) assert.Equal(t, 1, ms.At(i).Gauge().DataPoints().Len()) assert.Equal(t, "Minimum number of replicas to which the autoscaler can scale up.", ms.At(i).Description()) - assert.Equal(t, "1", ms.At(i).Unit()) + assert.Equal(t, "{pod}", ms.At(i).Unit()) dp := ms.At(i).Gauge().DataPoints().At(0) assert.Equal(t, start, dp.StartTimestamp()) assert.Equal(t, ts, dp.Timestamp()) @@ -539,7 +539,7 @@ func TestMetricsBuilder(t *testing.T) { assert.Equal(t, pmetric.MetricTypeGauge, ms.At(i).Type()) assert.Equal(t, 1, ms.At(i).Gauge().DataPoints().Len()) assert.Equal(t, "The number of actively running pods for a job", ms.At(i).Description()) - assert.Equal(t, "1", ms.At(i).Unit()) + assert.Equal(t, "{pod}", ms.At(i).Unit()) dp := ms.At(i).Gauge().DataPoints().At(0) assert.Equal(t, start, dp.StartTimestamp()) assert.Equal(t, ts, dp.Timestamp()) @@ -551,7 +551,7 @@ func TestMetricsBuilder(t *testing.T) { assert.Equal(t, pmetric.MetricTypeGauge, ms.At(i).Type()) assert.Equal(t, 1, ms.At(i).Gauge().DataPoints().Len()) assert.Equal(t, "The desired number of successfully finished pods the job should be run with", ms.At(i).Description()) - assert.Equal(t, "1", ms.At(i).Unit()) + assert.Equal(t, "{pod}", ms.At(i).Unit()) dp := ms.At(i).Gauge().DataPoints().At(0) assert.Equal(t, start, dp.StartTimestamp()) assert.Equal(t, ts, dp.Timestamp()) @@ -563,7 +563,7 @@ func TestMetricsBuilder(t *testing.T) { assert.Equal(t, pmetric.MetricTypeGauge, ms.At(i).Type()) assert.Equal(t, 1, ms.At(i).Gauge().DataPoints().Len()) assert.Equal(t, "The number of pods which reached phase Failed for a job", ms.At(i).Description()) - assert.Equal(t, "1", ms.At(i).Unit()) + assert.Equal(t, "{pod}", ms.At(i).Unit()) dp := ms.At(i).Gauge().DataPoints().At(0) assert.Equal(t, start, dp.StartTimestamp()) assert.Equal(t, ts, dp.Timestamp()) @@ -575,7 +575,7 @@ func TestMetricsBuilder(t *testing.T) { assert.Equal(t, pmetric.MetricTypeGauge, ms.At(i).Type()) assert.Equal(t, 1, ms.At(i).Gauge().DataPoints().Len()) assert.Equal(t, "The max desired number of pods the job should run at any given time", ms.At(i).Description()) - assert.Equal(t, "1", ms.At(i).Unit()) + assert.Equal(t, "{pod}", ms.At(i).Unit()) dp := ms.At(i).Gauge().DataPoints().At(0) assert.Equal(t, start, dp.StartTimestamp()) assert.Equal(t, ts, dp.Timestamp()) @@ -587,7 +587,7 @@ func TestMetricsBuilder(t *testing.T) { assert.Equal(t, pmetric.MetricTypeGauge, ms.At(i).Type()) assert.Equal(t, 1, ms.At(i).Gauge().DataPoints().Len()) assert.Equal(t, "The number of pods which reached phase Succeeded for a job", ms.At(i).Description()) - assert.Equal(t, "1", ms.At(i).Unit()) + assert.Equal(t, "{pod}", ms.At(i).Unit()) dp := ms.At(i).Gauge().DataPoints().At(0) assert.Equal(t, start, dp.StartTimestamp()) assert.Equal(t, ts, dp.Timestamp()) @@ -635,7 +635,7 @@ func TestMetricsBuilder(t *testing.T) { assert.Equal(t, pmetric.MetricTypeGauge, ms.At(i).Type()) assert.Equal(t, 1, ms.At(i).Gauge().DataPoints().Len()) assert.Equal(t, "Total number of available pods (ready for at least minReadySeconds) targeted by this replicaset", ms.At(i).Description()) - assert.Equal(t, "1", ms.At(i).Unit()) + assert.Equal(t, "{pod}", ms.At(i).Unit()) dp := ms.At(i).Gauge().DataPoints().At(0) assert.Equal(t, start, dp.StartTimestamp()) assert.Equal(t, ts, dp.Timestamp()) @@ -647,7 +647,7 @@ func TestMetricsBuilder(t *testing.T) { assert.Equal(t, pmetric.MetricTypeGauge, ms.At(i).Type()) assert.Equal(t, 1, ms.At(i).Gauge().DataPoints().Len()) assert.Equal(t, "Number of desired pods in this replicaset", ms.At(i).Description()) - assert.Equal(t, "1", ms.At(i).Unit()) + assert.Equal(t, "{pod}", ms.At(i).Unit()) dp := ms.At(i).Gauge().DataPoints().At(0) assert.Equal(t, start, dp.StartTimestamp()) assert.Equal(t, ts, dp.Timestamp()) @@ -659,7 +659,7 @@ func TestMetricsBuilder(t *testing.T) { assert.Equal(t, pmetric.MetricTypeGauge, ms.At(i).Type()) assert.Equal(t, 1, ms.At(i).Gauge().DataPoints().Len()) assert.Equal(t, "Total number of available pods (ready for at least minReadySeconds) targeted by this replication_controller", ms.At(i).Description()) - assert.Equal(t, "1", ms.At(i).Unit()) + assert.Equal(t, "{pod}", ms.At(i).Unit()) dp := ms.At(i).Gauge().DataPoints().At(0) assert.Equal(t, start, dp.StartTimestamp()) assert.Equal(t, ts, dp.Timestamp()) @@ -671,7 +671,7 @@ func TestMetricsBuilder(t *testing.T) { assert.Equal(t, pmetric.MetricTypeGauge, ms.At(i).Type()) assert.Equal(t, 1, ms.At(i).Gauge().DataPoints().Len()) assert.Equal(t, "Number of desired pods in this replication_controller", ms.At(i).Description()) - assert.Equal(t, "1", ms.At(i).Unit()) + assert.Equal(t, "{pod}", ms.At(i).Unit()) dp := ms.At(i).Gauge().DataPoints().At(0) assert.Equal(t, start, dp.StartTimestamp()) assert.Equal(t, ts, dp.Timestamp()) @@ -713,7 +713,7 @@ func TestMetricsBuilder(t *testing.T) { assert.Equal(t, pmetric.MetricTypeGauge, ms.At(i).Type()) assert.Equal(t, 1, ms.At(i).Gauge().DataPoints().Len()) assert.Equal(t, "The number of pods created by the StatefulSet controller from the StatefulSet version", ms.At(i).Description()) - assert.Equal(t, "1", ms.At(i).Unit()) + assert.Equal(t, "{pod}", ms.At(i).Unit()) dp := ms.At(i).Gauge().DataPoints().At(0) assert.Equal(t, start, dp.StartTimestamp()) assert.Equal(t, ts, dp.Timestamp()) @@ -725,7 +725,7 @@ func TestMetricsBuilder(t *testing.T) { assert.Equal(t, pmetric.MetricTypeGauge, ms.At(i).Type()) assert.Equal(t, 1, ms.At(i).Gauge().DataPoints().Len()) assert.Equal(t, "Number of desired pods in the stateful set (the `spec.replicas` field)", ms.At(i).Description()) - assert.Equal(t, "1", ms.At(i).Unit()) + assert.Equal(t, "{pod}", ms.At(i).Unit()) dp := ms.At(i).Gauge().DataPoints().At(0) assert.Equal(t, start, dp.StartTimestamp()) assert.Equal(t, ts, dp.Timestamp()) @@ -737,7 +737,7 @@ func TestMetricsBuilder(t *testing.T) { assert.Equal(t, pmetric.MetricTypeGauge, ms.At(i).Type()) assert.Equal(t, 1, ms.At(i).Gauge().DataPoints().Len()) assert.Equal(t, "Number of pods created by the stateful set that have the `Ready` condition", ms.At(i).Description()) - assert.Equal(t, "1", ms.At(i).Unit()) + assert.Equal(t, "{pod}", ms.At(i).Unit()) dp := ms.At(i).Gauge().DataPoints().At(0) assert.Equal(t, start, dp.StartTimestamp()) assert.Equal(t, ts, dp.Timestamp()) @@ -749,7 +749,7 @@ func TestMetricsBuilder(t *testing.T) { assert.Equal(t, pmetric.MetricTypeGauge, ms.At(i).Type()) assert.Equal(t, 1, ms.At(i).Gauge().DataPoints().Len()) assert.Equal(t, "Number of pods created by the StatefulSet controller from the StatefulSet version", ms.At(i).Description()) - assert.Equal(t, "1", ms.At(i).Unit()) + assert.Equal(t, "{pod}", ms.At(i).Unit()) dp := ms.At(i).Gauge().DataPoints().At(0) assert.Equal(t, start, dp.StartTimestamp()) assert.Equal(t, ts, dp.Timestamp()) diff --git a/receiver/k8sclusterreceiver/internal/replicaset/testdata/expected.yaml b/receiver/k8sclusterreceiver/internal/replicaset/testdata/expected.yaml index aeac1a0fa9e7..0ff430e54e17 100644 --- a/receiver/k8sclusterreceiver/internal/replicaset/testdata/expected.yaml +++ b/receiver/k8sclusterreceiver/internal/replicaset/testdata/expected.yaml @@ -21,13 +21,13 @@ resourceMetrics: dataPoints: - asInt: "3" name: k8s.replicaset.desired - unit: "1" + unit: "{pod}" - description: Total number of available pods (ready for at least minReadySeconds) targeted by this replicaset gauge: dataPoints: - asInt: "2" name: k8s.replicaset.available - unit: "1" + unit: "{pod}" scope: name: otelcol/k8sclusterreceiver diff --git a/receiver/k8sclusterreceiver/internal/replicationcontroller/testdata/expected.yaml b/receiver/k8sclusterreceiver/internal/replicationcontroller/testdata/expected.yaml index 64df9eb4d7cf..4d3058a8a645 100644 --- a/receiver/k8sclusterreceiver/internal/replicationcontroller/testdata/expected.yaml +++ b/receiver/k8sclusterreceiver/internal/replicationcontroller/testdata/expected.yaml @@ -21,13 +21,13 @@ resourceMetrics: dataPoints: - asInt: "2" name: k8s.replication_controller.available - unit: "1" + unit: "{pod}" - description: Number of desired pods in this replication_controller gauge: dataPoints: - asInt: "1" name: k8s.replication_controller.desired - unit: "1" + unit: "{pod}" scope: name: otelcol/k8sclusterreceiver version: latest diff --git a/receiver/k8sclusterreceiver/metadata.yaml b/receiver/k8sclusterreceiver/metadata.yaml index a8351fa3757d..51878a47f627 100644 --- a/receiver/k8sclusterreceiver/metadata.yaml +++ b/receiver/k8sclusterreceiver/metadata.yaml @@ -278,91 +278,91 @@ metrics: k8s.cronjob.active_jobs: enabled: true description: The number of actively running jobs for a cronjob - unit: "1" + unit: "{job}" gauge: value_type: int k8s.daemonset.current_scheduled_nodes: enabled: true description: Number of nodes that are running at least 1 daemon pod and are supposed to run the daemon pod - unit: "1" + unit: "{node}" gauge: value_type: int k8s.daemonset.desired_scheduled_nodes: enabled: true description: Number of nodes that should be running the daemon pod (including nodes currently running the daemon pod) - unit: "1" + unit: "{node}" gauge: value_type: int k8s.daemonset.misscheduled_nodes: enabled: true description: Number of nodes that are running the daemon pod, but are not supposed to run the daemon pod - unit: "1" + unit: "{node}" gauge: value_type: int k8s.daemonset.ready_nodes: enabled: true description: Number of nodes that should be running the daemon pod and have one or more of the daemon pod running and ready - unit: "1" + unit: "{node}" gauge: value_type: int k8s.hpa.max_replicas: enabled: true description: Maximum number of replicas to which the autoscaler can scale up. - unit: 1 + unit: "{pod}" gauge: value_type: int k8s.hpa.min_replicas: enabled: true description: Minimum number of replicas to which the autoscaler can scale up. - unit: 1 + unit: "{pod}" gauge: value_type: int k8s.hpa.current_replicas: enabled: true description: Current number of pod replicas managed by this autoscaler. - unit: 1 + unit: "{pod}" gauge: value_type: int k8s.hpa.desired_replicas: enabled: true description: Desired number of pod replicas managed by this autoscaler. - unit: 1 + unit: "{pod}" gauge: value_type: int k8s.job.active_pods: enabled: true description: The number of actively running pods for a job - unit: "1" + unit: "{pod}" gauge: value_type: int k8s.job.desired_successful_pods: enabled: true description: The desired number of successfully finished pods the job should be run with - unit: "1" + unit: "{pod}" gauge: value_type: int k8s.job.failed_pods: enabled: true description: The number of pods which reached phase Failed for a job - unit: "1" + unit: "{pod}" gauge: value_type: int k8s.job.max_parallel_pods: enabled: true description: The max desired number of pods the job should run at any given time - unit: "1" + unit: "{pod}" gauge: value_type: int k8s.job.successful_pods: enabled: true description: The number of pods which reached phase Succeeded for a job - unit: "1" + unit: "{pod}" gauge: value_type: int @@ -376,26 +376,26 @@ metrics: k8s.replicaset.desired: enabled: true description: Number of desired pods in this replicaset - unit: "1" + unit: "{pod}" gauge: value_type: int k8s.replicaset.available: enabled: true description: Total number of available pods (ready for at least minReadySeconds) targeted by this replicaset - unit: "1" + unit: "{pod}" gauge: value_type: int k8s.replication_controller.desired: enabled: true description: Number of desired pods in this replication_controller - unit: 1 + unit: "{pod}" gauge: value_type: int k8s.replication_controller.available: enabled: true description: Total number of available pods (ready for at least minReadySeconds) targeted by this replication_controller - unit: 1 + unit: "{pod}" gauge: value_type: int @@ -419,28 +419,28 @@ metrics: k8s.statefulset.desired_pods: enabled: true description: Number of desired pods in the stateful set (the `spec.replicas` field) - unit: 1 + unit: "{pod}" gauge: value_type: int k8s.statefulset.ready_pods: enabled: true description: Number of pods created by the stateful set that have the `Ready` condition - unit: 1 + unit: "{pod}" gauge: value_type: int k8s.statefulset.current_pods: enabled: true description: The number of pods created by the StatefulSet controller from the StatefulSet version - unit: 1 + unit: "{pod}" gauge: value_type: int k8s.statefulset.updated_pods: enabled: true description: Number of pods created by the StatefulSet controller from the StatefulSet version - unit: 1 + unit: "{pod}" gauge: value_type: int diff --git a/receiver/k8sclusterreceiver/testdata/e2e/expected.yaml b/receiver/k8sclusterreceiver/testdata/e2e/expected.yaml index 4fa2ea72aa6b..9a84f2ccf4bd 100644 --- a/receiver/k8sclusterreceiver/testdata/e2e/expected.yaml +++ b/receiver/k8sclusterreceiver/testdata/e2e/expected.yaml @@ -166,28 +166,28 @@ resourceMetrics: - asInt: "1" timeUnixNano: "1686772769034865545" name: k8s.daemonset.current_scheduled_nodes - unit: "1" + unit: "{node}" - description: Number of nodes that should be running the daemon pod (including nodes currently running the daemon pod) gauge: dataPoints: - asInt: "1" timeUnixNano: "1686772769034865545" name: k8s.daemonset.desired_scheduled_nodes - unit: "1" + unit: "{node}" - description: Number of nodes that are running the daemon pod, but are not supposed to run the daemon pod gauge: dataPoints: - asInt: "0" timeUnixNano: "1686772769034865545" name: k8s.daemonset.misscheduled_nodes - unit: "1" + unit: "{node}" - description: Number of nodes that should be running the daemon pod and have one or more of the daemon pod running and ready gauge: dataPoints: - asInt: "1" timeUnixNano: "1686772769034865545" name: k8s.daemonset.ready_nodes - unit: "1" + unit: "{node}" scope: name: otelcol/k8sclusterreceiver version: latest @@ -214,28 +214,28 @@ resourceMetrics: - asInt: "1" timeUnixNano: "1686772769034865545" name: k8s.daemonset.current_scheduled_nodes - unit: "1" + unit: "{node}" - description: Number of nodes that should be running the daemon pod (including nodes currently running the daemon pod) gauge: dataPoints: - asInt: "1" timeUnixNano: "1686772769034865545" name: k8s.daemonset.desired_scheduled_nodes - unit: "1" + unit: "{node}" - description: Number of nodes that are running the daemon pod, but are not supposed to run the daemon pod gauge: dataPoints: - asInt: "0" timeUnixNano: "1686772769034865545" name: k8s.daemonset.misscheduled_nodes - unit: "1" + unit: "{node}" - description: Number of nodes that should be running the daemon pod and have one or more of the daemon pod running and ready gauge: dataPoints: - asInt: "1" timeUnixNano: "1686772769034865545" name: k8s.daemonset.ready_nodes - unit: "1" + unit: "{node}" scope: name: otelcol/k8sclusterreceiver version: latest @@ -364,14 +364,14 @@ resourceMetrics: - asInt: "1" timeUnixNano: "1686772769034865545" name: k8s.replicaset.desired - unit: "1" + unit: "{pod}" - description: Total number of available pods (ready for at least minReadySeconds) targeted by this replicaset gauge: dataPoints: - asInt: "1" timeUnixNano: "1686772769034865545" name: k8s.replicaset.available - unit: "1" + unit: "{pod}" scope: name: otelcol/k8sclusterreceiver version: latest @@ -398,14 +398,14 @@ resourceMetrics: - asInt: "2" timeUnixNano: "1686772769034865545" name: k8s.replicaset.desired - unit: "1" + unit: "{pod}" - description: Total number of available pods (ready for at least minReadySeconds) targeted by this replicaset gauge: dataPoints: - asInt: "2" timeUnixNano: "1686772769034865545" name: k8s.replicaset.available - unit: "1" + unit: "{pod}" scope: name: otelcol/k8sclusterreceiver version: latest @@ -432,14 +432,14 @@ resourceMetrics: - asInt: "1" timeUnixNano: "1686772769034865545" name: k8s.replicaset.desired - unit: "1" + unit: "{pod}" - description: Total number of available pods (ready for at least minReadySeconds) targeted by this replicaset gauge: dataPoints: - asInt: "1" timeUnixNano: "1686772769034865545" name: k8s.replicaset.available - unit: "1" + unit: "{pod}" scope: name: otelcol/k8sclusterreceiver version: latest