From 6d9669b0e7449c770433424b7f8818512a5d0e8f Mon Sep 17 00:00:00 2001 From: shalper2 <99686388+shalper2@users.noreply.github.com> Date: Sun, 11 Dec 2022 23:10:33 -0600 Subject: [PATCH] [receiver/snowflake] add metrics (#15296) Define metrics in metadata.yaml and ran mdatagen to create the metadata package for use in receiver component. Metrics are designed for parity with existing smartagent. Dimensions have been translated into attributes with the same column values associated. The smartagent definition does not include data types so care was taken to apply applicable types to each metric and attribute field. --- .chloggen/snowflake_add_metrics.yaml | 16 + receiver/snowflakereceiver/documentation.md | 719 +++++ receiver/snowflakereceiver/go.mod | 39 +- receiver/snowflakereceiver/go.sum | 447 +++ .../internal/metadata/generated_metrics.go | 2831 +++++++++++++++++ .../metadata/generated_metrics_test.go | 1330 ++++++++ receiver/snowflakereceiver/metadata.yaml | 305 ++ 7 files changed, 5685 insertions(+), 2 deletions(-) create mode 100755 .chloggen/snowflake_add_metrics.yaml create mode 100644 receiver/snowflakereceiver/documentation.md create mode 100644 receiver/snowflakereceiver/go.sum create mode 100644 receiver/snowflakereceiver/internal/metadata/generated_metrics.go create mode 100644 receiver/snowflakereceiver/internal/metadata/generated_metrics_test.go diff --git a/.chloggen/snowflake_add_metrics.yaml b/.chloggen/snowflake_add_metrics.yaml new file mode 100755 index 000000000000..1c3617e18c34 --- /dev/null +++ b/.chloggen/snowflake_add_metrics.yaml @@ -0,0 +1,16 @@ +# One of 'breaking', 'deprecation', 'new_component', 'enhancement', 'bug_fix' +change_type: enhancement + +# The name of the component, or a single word describing the area of concern, (e.g. filelogreceiver) +component: snowflakereceiver + +# A brief description of the change. Surround your text with quotes ("") if it needs to start with a backtick (`). +note: added metrics to snowflakereceiver + +# One or more tracking issues related to the change +issues: [14754] + +# (Optional) One or more lines of additional information to render under the primary note. +# These lines will be padded with 2 spaces and then inserted directly into the document. +# Use pipe (|) for multiline entries. +subtext: generated w/ mdatagen diff --git a/receiver/snowflakereceiver/documentation.md b/receiver/snowflakereceiver/documentation.md new file mode 100644 index 000000000000..622d59600c9c --- /dev/null +++ b/receiver/snowflakereceiver/documentation.md @@ -0,0 +1,719 @@ +[comment]: <> (Code generated by mdatagen. DO NOT EDIT.) + +# snowflakereceiver + +## Default Metrics + +The following metrics are emitted by default. Each of them can be disabled by applying the following configuration: + +```yaml +metrics: + : + enabled: false +``` + +### snowflake.database.bytes_scanned.avg + +Average bytes scanned in a database. + +| Unit | Metric Type | Value Type | +| ---- | ----------- | ---------- | +| By | Gauge | Double | + +#### Attributes + +| Name | Description | Values | +| ---- | ----------- | ------ | +| schema_name | Name of schema associated with query result. | Any Str | +| execution_status | Execution status of query being reported. | Any Str | +| error_message | Error message reported by query if present | Any Str | +| query_type | Type of query performed. | Any Str | +| warehouse_name | Name of warehouse in query being reported on. | Any Str | +| database_name | Name of database being queried (default is snowflake). | Any Str | +| warehouse_size | Size of warehouse being reported on. | Any Str | + +### snowflake.database.query.count + +Total query count for database. + +| Unit | Metric Type | Value Type | +| ---- | ----------- | ---------- | +| 1 | Gauge | Int | + +#### Attributes + +| Name | Description | Values | +| ---- | ----------- | ------ | +| schema_name | Name of schema associated with query result. | Any Str | +| execution_status | Execution status of query being reported. | Any Str | +| error_message | Error message reported by query if present | Any Str | +| query_type | Type of query performed. | Any Str | +| warehouse_name | Name of warehouse in query being reported on. | Any Str | +| database_name | Name of database being queried (default is snowflake). | Any Str | +| warehouse_size | Size of warehouse being reported on. | Any Str | + +### snowflake.query.blocked + +Blocked query count for warehouse. + +| Unit | Metric Type | Value Type | +| ---- | ----------- | ---------- | +| 1 | Gauge | Double | + +#### Attributes + +| Name | Description | Values | +| ---- | ----------- | ------ | +| warehouse_name | Name of warehouse in query being reported on. | Any Str | + +### snowflake.query.bytes_deleted.total + +Total bytes deleted in database. + +| Unit | Metric Type | Value Type | +| ---- | ----------- | ---------- | +| By | Gauge | Int | + +#### Attributes + +| Name | Description | Values | +| ---- | ----------- | ------ | +| schema_name | Name of schema associated with query result. | Any Str | +| execution_status | Execution status of query being reported. | Any Str | +| error_message | Error message reported by query if present | Any Str | +| query_type | Type of query performed. | Any Str | +| warehouse_name | Name of warehouse in query being reported on. | Any Str | +| database_name | Name of database being queried (default is snowflake). | Any Str | +| warehouse_size | Size of warehouse being reported on. | Any Str | + +### snowflake.query.bytes_written.total + +Total bytes written by database. + +| Unit | Metric Type | Value Type | +| ---- | ----------- | ---------- | +| By | Gauge | Int | + +#### Attributes + +| Name | Description | Values | +| ---- | ----------- | ------ | +| schema_name | Name of schema associated with query result. | Any Str | +| execution_status | Execution status of query being reported. | Any Str | +| error_message | Error message reported by query if present | Any Str | +| query_type | Type of query performed. | Any Str | +| warehouse_name | Name of warehouse in query being reported on. | Any Str | +| database_name | Name of database being queried (default is snowflake). | Any Str | +| warehouse_size | Size of warehouse being reported on. | Any Str | + +### snowflake.query.compilation_time.total + +Total time taken to compile query. + +| Unit | Metric Type | Value Type | +| ---- | ----------- | ---------- | +| s | Gauge | Double | + +#### Attributes + +| Name | Description | Values | +| ---- | ----------- | ------ | +| schema_name | Name of schema associated with query result. | Any Str | +| execution_status | Execution status of query being reported. | Any Str | +| error_message | Error message reported by query if present | Any Str | +| query_type | Type of query performed. | Any Str | +| warehouse_name | Name of warehouse in query being reported on. | Any Str | +| database_name | Name of database being queried (default is snowflake). | Any Str | +| warehouse_size | Size of warehouse being reported on. | Any Str | + +### snowflake.query.executed + +Executed query count for warehouse. + +| Unit | Metric Type | Value Type | +| ---- | ----------- | ---------- | +| 1 | Gauge | Double | + +#### Attributes + +| Name | Description | Values | +| ---- | ----------- | ------ | +| warehouse_name | Name of warehouse in query being reported on. | Any Str | + +### snowflake.query.execution_time.total + +Total time spent executing queries in database. + +| Unit | Metric Type | Value Type | +| ---- | ----------- | ---------- | +| s | Gauge | Double | + +#### Attributes + +| Name | Description | Values | +| ---- | ----------- | ------ | +| schema_name | Name of schema associated with query result. | Any Str | +| execution_status | Execution status of query being reported. | Any Str | +| error_message | Error message reported by query if present | Any Str | +| query_type | Type of query performed. | Any Str | +| warehouse_name | Name of warehouse in query being reported on. | Any Str | +| database_name | Name of database being queried (default is snowflake). | Any Str | +| warehouse_size | Size of warehouse being reported on. | Any Str | + +### snowflake.query.queued_overload + +Overloaded query count for warehouse. + +| Unit | Metric Type | Value Type | +| ---- | ----------- | ---------- | +| 1 | Gauge | Double | + +#### Attributes + +| Name | Description | Values | +| ---- | ----------- | ------ | +| warehouse_name | Name of warehouse in query being reported on. | Any Str | + +### snowflake.query.queued_provision + +Number of compute resources queued for provisioning. + +| Unit | Metric Type | Value Type | +| ---- | ----------- | ---------- | +| 1 | Gauge | Double | + +#### Attributes + +| Name | Description | Values | +| ---- | ----------- | ------ | +| warehouse_name | Name of warehouse in query being reported on. | Any Str | + +### snowflake.queued_overload_time.avg + +Average time spent in warehouse queue due to warehouse being overloaded. + +| Unit | Metric Type | Value Type | +| ---- | ----------- | ---------- | +| s | Gauge | Double | + +#### Attributes + +| Name | Description | Values | +| ---- | ----------- | ------ | +| schema_name | Name of schema associated with query result. | Any Str | +| execution_status | Execution status of query being reported. | Any Str | +| error_message | Error message reported by query if present | Any Str | +| query_type | Type of query performed. | Any Str | +| warehouse_name | Name of warehouse in query being reported on. | Any Str | +| database_name | Name of database being queried (default is snowflake). | Any Str | +| warehouse_size | Size of warehouse being reported on. | Any Str | + +### snowflake.queued_provisioning_time.avg + +Average time spent in warehouse queue waiting for resources to provision. + +| Unit | Metric Type | Value Type | +| ---- | ----------- | ---------- | +| s | Gauge | Double | + +#### Attributes + +| Name | Description | Values | +| ---- | ----------- | ------ | +| schema_name | Name of schema associated with query result. | Any Str | +| execution_status | Execution status of query being reported. | Any Str | +| error_message | Error message reported by query if present | Any Str | +| query_type | Type of query performed. | Any Str | +| warehouse_name | Name of warehouse in query being reported on. | Any Str | +| database_name | Name of database being queried (default is snowflake). | Any Str | +| warehouse_size | Size of warehouse being reported on. | Any Str | + +### snowflake.queued_repair_time.avg + +Average time spent in warehouse queue waiting for compute resources to be repaired. + +| Unit | Metric Type | Value Type | +| ---- | ----------- | ---------- | +| s | Gauge | Double | + +#### Attributes + +| Name | Description | Values | +| ---- | ----------- | ------ | +| schema_name | Name of schema associated with query result. | Any Str | +| execution_status | Execution status of query being reported. | Any Str | +| error_message | Error message reported by query if present | Any Str | +| query_type | Type of query performed. | Any Str | +| warehouse_name | Name of warehouse in query being reported on. | Any Str | +| database_name | Name of database being queried (default is snowflake). | Any Str | +| warehouse_size | Size of warehouse being reported on. | Any Str | + +### snowflake.storage.stage_bytes.total + +Number of bytes of stage storage used by files in all internal stages (named, table, user). + +| Unit | Metric Type | Value Type | +| ---- | ----------- | ---------- | +| By | Gauge | Int | + +### snowflake.storage.storage_bytes.total + +Number of bytes of table storage used, including bytes for data currently in Time Travel. + +| Unit | Metric Type | Value Type | +| ---- | ----------- | ---------- | +| By | Gauge | Int | + +### snowflake.total_elapsed_time.avg + +Average elapsed time. + +| Unit | Metric Type | Value Type | +| ---- | ----------- | ---------- | +| s | Gauge | Double | + +#### Attributes + +| Name | Description | Values | +| ---- | ----------- | ------ | +| schema_name | Name of schema associated with query result. | Any Str | +| execution_status | Execution status of query being reported. | Any Str | +| error_message | Error message reported by query if present | Any Str | +| query_type | Type of query performed. | Any Str | +| warehouse_name | Name of warehouse in query being reported on. | Any Str | +| database_name | Name of database being queried (default is snowflake). | Any Str | +| warehouse_size | Size of warehouse being reported on. | Any Str | + +## Optional Metrics + +The following metrics are not emitted by default. Each of them can be enabled by applying the following configuration: + +```yaml +metrics: + : + enabled: true +``` + +### snowflake.billing.cloud_service.total + +Reported total credits used in the cloud service. + +| Unit | Metric Type | Value Type | +| ---- | ----------- | ---------- | +| {credits} | Gauge | Double | + +#### Attributes + +| Name | Description | Values | +| ---- | ----------- | ------ | +| service_type | Service type associateed with metric query | Any Str | + +### snowflake.billing.total_credit.total + +Reported total credits used across account. + +| Unit | Metric Type | Value Type | +| ---- | ----------- | ---------- | +| {credits} | Gauge | Double | + +#### Attributes + +| Name | Description | Values | +| ---- | ----------- | ------ | +| service_type | Service type associateed with metric query | Any Str | + +### snowflake.billing.virtual_warehouse.total + +Reported total credits used by virtual warehouse service. + +| Unit | Metric Type | Value Type | +| ---- | ----------- | ---------- | +| {credits} | Gauge | Double | + +#### Attributes + +| Name | Description | Values | +| ---- | ----------- | ------ | +| service_type | Service type associateed with metric query | Any Str | + +### snowflake.billing.warehouse.cloud_service.total + +Credits used across cloud service for given warehouse. + +| Unit | Metric Type | Value Type | +| ---- | ----------- | ---------- | +| {credits} | Gauge | Double | + +#### Attributes + +| Name | Description | Values | +| ---- | ----------- | ------ | +| warehouse_name | Name of warehouse in query being reported on. | Any Str | + +### snowflake.billing.warehouse.total_credit.total + +Total credits used associated with given warehouse. + +| Unit | Metric Type | Value Type | +| ---- | ----------- | ---------- | +| {credits} | Gauge | Double | + +#### Attributes + +| Name | Description | Values | +| ---- | ----------- | ------ | +| warehouse_name | Name of warehouse in query being reported on. | Any Str | + +### snowflake.billing.warehouse.virtual_warehouse.total + +Total credits used by virtual warehouse service for given warehouse. + +| Unit | Metric Type | Value Type | +| ---- | ----------- | ---------- | +| {credits} | Gauge | Double | + +#### Attributes + +| Name | Description | Values | +| ---- | ----------- | ------ | +| warehouse_name | Name of warehouse in query being reported on. | Any Str | + +### snowflake.logins.total + +Total login attempts for account. + +| Unit | Metric Type | Value Type | +| ---- | ----------- | ---------- | +| 1 | Gauge | Int | + +#### Attributes + +| Name | Description | Values | +| ---- | ----------- | ------ | +| error_message | Error message reported by query if present | Any Str | +| reported_client_type | Client type used for attempt | Any Str | +| is_success | Login status (success or failure). | Any Str | + +### snowflake.pipe.credits_used.total + +Snow pipe credits contotaled. + +| Unit | Metric Type | Value Type | +| ---- | ----------- | ---------- | +| {credits} | Gauge | Double | + +#### Attributes + +| Name | Description | Values | +| ---- | ----------- | ------ | +| pipe_name | Name of snowpipe. | Any Str | + +### snowflake.query.bytes_scanned.total + +Total bytes scanend in database. + +| Unit | Metric Type | Value Type | +| ---- | ----------- | ---------- | +| By | Gauge | Int | + +#### Attributes + +| Name | Description | Values | +| ---- | ----------- | ------ | +| schema_name | Name of schema associated with query result. | Any Str | +| execution_status | Execution status of query being reported. | Any Str | +| error_message | Error message reported by query if present | Any Str | +| query_type | Type of query performed. | Any Str | +| warehouse_name | Name of warehouse in query being reported on. | Any Str | +| database_name | Name of database being queried (default is snowflake). | Any Str | +| warehouse_size | Size of warehouse being reported on. | Any Str | + +### snowflake.query.bytes_spilled.local.total + +Total bytes spilled (intermediate results do not fit in memory) by local storage. + +| Unit | Metric Type | Value Type | +| ---- | ----------- | ---------- | +| By | Gauge | Int | + +#### Attributes + +| Name | Description | Values | +| ---- | ----------- | ------ | +| schema_name | Name of schema associated with query result. | Any Str | +| execution_status | Execution status of query being reported. | Any Str | +| error_message | Error message reported by query if present | Any Str | +| query_type | Type of query performed. | Any Str | +| warehouse_name | Name of warehouse in query being reported on. | Any Str | +| database_name | Name of database being queried (default is snowflake). | Any Str | +| warehouse_size | Size of warehouse being reported on. | Any Str | + +### snowflake.query.bytes_spilled.remote.total + +Total bytes spilled (intermediate results do not fit in memory) by remote storage. + +| Unit | Metric Type | Value Type | +| ---- | ----------- | ---------- | +| By | Gauge | Int | + +#### Attributes + +| Name | Description | Values | +| ---- | ----------- | ------ | +| schema_name | Name of schema associated with query result. | Any Str | +| execution_status | Execution status of query being reported. | Any Str | +| error_message | Error message reported by query if present | Any Str | +| query_type | Type of query performed. | Any Str | +| warehouse_name | Name of warehouse in query being reported on. | Any Str | +| database_name | Name of database being queried (default is snowflake). | Any Str | +| warehouse_size | Size of warehouse being reported on. | Any Str | + +### snowflake.query.data_scanned_cache.avg + +Average percentage of data scanned from cache. + +| Unit | Metric Type | Value Type | +| ---- | ----------- | ---------- | +| 1 | Gauge | Double | + +#### Attributes + +| Name | Description | Values | +| ---- | ----------- | ------ | +| schema_name | Name of schema associated with query result. | Any Str | +| execution_status | Execution status of query being reported. | Any Str | +| error_message | Error message reported by query if present | Any Str | +| query_type | Type of query performed. | Any Str | +| warehouse_name | Name of warehouse in query being reported on. | Any Str | +| database_name | Name of database being queried (default is snowflake). | Any Str | +| warehouse_size | Size of warehouse being reported on. | Any Str | + +### snowflake.query.partitions_scanned.total + +Number of partitions scanned during query so far. + +| Unit | Metric Type | Value Type | +| ---- | ----------- | ---------- | +| 1 | Gauge | Int | + +#### Attributes + +| Name | Description | Values | +| ---- | ----------- | ------ | +| schema_name | Name of schema associated with query result. | Any Str | +| execution_status | Execution status of query being reported. | Any Str | +| error_message | Error message reported by query if present | Any Str | +| query_type | Type of query performed. | Any Str | +| warehouse_name | Name of warehouse in query being reported on. | Any Str | +| database_name | Name of database being queried (default is snowflake). | Any Str | +| warehouse_size | Size of warehouse being reported on. | Any Str | + +### snowflake.queued_overload_time.total + +Total time spent in warehouse queue due to warehouse being overloaded. + +| Unit | Metric Type | Value Type | +| ---- | ----------- | ---------- | +| s | Gauge | Int | + +#### Attributes + +| Name | Description | Values | +| ---- | ----------- | ------ | +| schema_name | Name of schema associated with query result. | Any Str | +| execution_status | Execution status of query being reported. | Any Str | +| error_message | Error message reported by query if present | Any Str | +| query_type | Type of query performed. | Any Str | +| warehouse_name | Name of warehouse in query being reported on. | Any Str | +| database_name | Name of database being queried (default is snowflake). | Any Str | +| warehouse_size | Size of warehouse being reported on. | Any Str | + +### snowflake.queued_provisioning_time.total + +Total time spent in warehouse queue waiting for resources to provision. + +| Unit | Metric Type | Value Type | +| ---- | ----------- | ---------- | +| s | Gauge | Int | + +#### Attributes + +| Name | Description | Values | +| ---- | ----------- | ------ | +| schema_name | Name of schema associated with query result. | Any Str | +| execution_status | Execution status of query being reported. | Any Str | +| error_message | Error message reported by query if present | Any Str | +| query_type | Type of query performed. | Any Str | +| warehouse_name | Name of warehouse in query being reported on. | Any Str | +| database_name | Name of database being queried (default is snowflake). | Any Str | +| warehouse_size | Size of warehouse being reported on. | Any Str | + +### snowflake.queued_repair_time.total + +Total time spent in warehouse queue waiting for compute resources to be repaired. + +| Unit | Metric Type | Value Type | +| ---- | ----------- | ---------- | +| s | Gauge | Int | + +#### Attributes + +| Name | Description | Values | +| ---- | ----------- | ------ | +| schema_name | Name of schema associated with query result. | Any Str | +| execution_status | Execution status of query being reported. | Any Str | +| error_message | Error message reported by query if present | Any Str | +| query_type | Type of query performed. | Any Str | +| warehouse_name | Name of warehouse in query being reported on. | Any Str | +| database_name | Name of database being queried (default is snowflake). | Any Str | +| warehouse_size | Size of warehouse being reported on. | Any Str | + +### snowflake.rows_deleted.total + +Number of rows deleted from a table (or tables). + +| Unit | Metric Type | Value Type | +| ---- | ----------- | ---------- | +| {rows} | Gauge | Int | + +#### Attributes + +| Name | Description | Values | +| ---- | ----------- | ------ | +| schema_name | Name of schema associated with query result. | Any Str | +| execution_status | Execution status of query being reported. | Any Str | +| error_message | Error message reported by query if present | Any Str | +| query_type | Type of query performed. | Any Str | +| warehouse_name | Name of warehouse in query being reported on. | Any Str | +| database_name | Name of database being queried (default is snowflake). | Any Str | +| warehouse_size | Size of warehouse being reported on. | Any Str | + +### snowflake.rows_inserted.total + +Number of rows inserted into a table (or tables). + +| Unit | Metric Type | Value Type | +| ---- | ----------- | ---------- | +| {rows} | Gauge | Int | + +#### Attributes + +| Name | Description | Values | +| ---- | ----------- | ------ | +| schema_name | Name of schema associated with query result. | Any Str | +| execution_status | Execution status of query being reported. | Any Str | +| error_message | Error message reported by query if present | Any Str | +| query_type | Type of query performed. | Any Str | +| warehouse_name | Name of warehouse in query being reported on. | Any Str | +| database_name | Name of database being queried (default is snowflake). | Any Str | +| warehouse_size | Size of warehouse being reported on. | Any Str | + +### snowflake.rows_produced.total + +Total number of rows produced by statement. + +| Unit | Metric Type | Value Type | +| ---- | ----------- | ---------- | +| {rows} | Gauge | Int | + +#### Attributes + +| Name | Description | Values | +| ---- | ----------- | ------ | +| schema_name | Name of schema associated with query result. | Any Str | +| execution_status | Execution status of query being reported. | Any Str | +| error_message | Error message reported by query if present | Any Str | +| query_type | Type of query performed. | Any Str | +| warehouse_name | Name of warehouse in query being reported on. | Any Str | +| database_name | Name of database being queried (default is snowflake). | Any Str | +| warehouse_size | Size of warehouse being reported on. | Any Str | + +### snowflake.rows_unloaded.total + +Total number of rows unloaded during data export. + +| Unit | Metric Type | Value Type | +| ---- | ----------- | ---------- | +| {rows} | Gauge | Int | + +#### Attributes + +| Name | Description | Values | +| ---- | ----------- | ------ | +| schema_name | Name of schema associated with query result. | Any Str | +| execution_status | Execution status of query being reported. | Any Str | +| error_message | Error message reported by query if present | Any Str | +| query_type | Type of query performed. | Any Str | +| warehouse_name | Name of warehouse in query being reported on. | Any Str | +| database_name | Name of database being queried (default is snowflake). | Any Str | +| warehouse_size | Size of warehouse being reported on. | Any Str | + +### snowflake.rows_updated.total + +Total number of rows updated in a table. + +| Unit | Metric Type | Value Type | +| ---- | ----------- | ---------- | +| {rows} | Gauge | Int | + +#### Attributes + +| Name | Description | Values | +| ---- | ----------- | ------ | +| schema_name | Name of schema associated with query result. | Any Str | +| execution_status | Execution status of query being reported. | Any Str | +| error_message | Error message reported by query if present | Any Str | +| query_type | Type of query performed. | Any Str | +| warehouse_name | Name of warehouse in query being reported on. | Any Str | +| database_name | Name of database being queried (default is snowflake). | Any Str | +| warehouse_size | Size of warehouse being reported on. | Any Str | + +### snowflake.session_id.count + +Distinct session id's associated with snowflake username. + +| Unit | Metric Type | Value Type | +| ---- | ----------- | ---------- | +| 1 | Gauge | Int | + +#### Attributes + +| Name | Description | Values | +| ---- | ----------- | ------ | +| user_name | Username in query being reported. | Any Str | + +### snowflake.storage.failsafe_bytes.total + +Number of bytes of data in Fail-safe. + +| Unit | Metric Type | Value Type | +| ---- | ----------- | ---------- | +| By | Gauge | Int | + +### snowflake.total_elapsed_time.total + +Total elapsed time. + +| Unit | Metric Type | Value Type | +| ---- | ----------- | ---------- | +| s | Gauge | Int | + +#### Attributes + +| Name | Description | Values | +| ---- | ----------- | ------ | +| schema_name | Name of schema associated with query result. | Any Str | +| execution_status | Execution status of query being reported. | Any Str | +| error_message | Error message reported by query if present | Any Str | +| query_type | Type of query performed. | Any Str | +| warehouse_name | Name of warehouse in query being reported on. | Any Str | +| database_name | Name of database being queried (default is snowflake). | Any Str | +| warehouse_size | Size of warehouse being reported on. | Any Str | + +## Resource Attributes + +| Name | Description | Values | +| ---- | ----------- | ------ | +| snowflake.account.name | Snowflake account being used by receiver. | Any Str | +| snowflake.username | The name of the snowflake user account being used by receiver. | Any Str | +| snowflake.warehouse.name | The name of the warehouse being used by receiver. | Any Str | diff --git a/receiver/snowflakereceiver/go.mod b/receiver/snowflakereceiver/go.mod index 21c71ccdfb4b..587033508be5 100644 --- a/receiver/snowflakereceiver/go.mod +++ b/receiver/snowflakereceiver/go.mod @@ -1,5 +1,40 @@ module github.com/open-telemetry/opentelemetry-collector-contrib/receiver/snowflakereceiver -go 1.18 +go 1.19 -retract v0.65.0 +require ( + github.com/stretchr/testify v1.8.1 + go.opentelemetry.io/collector/component v0.66.1-0.20221202005155-1c54042beb70 + go.opentelemetry.io/collector/confmap v0.0.0-20221201172708-2bdff61fa52a + go.opentelemetry.io/collector/pdata v0.66.1-0.20221202005155-1c54042beb70 +) + +require ( + github.com/davecgh/go-spew v1.1.1 // indirect + github.com/gogo/protobuf v1.3.2 // indirect + github.com/golang/protobuf v1.5.2 // indirect + github.com/json-iterator/go v1.1.12 // indirect + github.com/knadh/koanf v1.4.4 // indirect + github.com/mitchellh/copystructure v1.2.0 // indirect + github.com/mitchellh/mapstructure v1.5.0 // indirect + github.com/mitchellh/reflectwalk v1.0.2 // indirect + github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd // indirect + github.com/modern-go/reflect2 v1.0.2 // indirect + github.com/pmezard/go-difflib v1.0.0 // indirect + go.opentelemetry.io/collector v0.66.1-0.20221202005155-1c54042beb70 // indirect + go.opentelemetry.io/collector/consumer v0.66.1-0.20221202005155-1c54042beb70 // indirect + go.opentelemetry.io/collector/featuregate v0.66.1-0.20221202005155-1c54042beb70 // indirect + go.opentelemetry.io/otel v1.11.1 // indirect + go.opentelemetry.io/otel/metric v0.33.0 // indirect + go.opentelemetry.io/otel/trace v1.11.1 // indirect + go.uber.org/atomic v1.10.0 // indirect + go.uber.org/multierr v1.8.0 // indirect + go.uber.org/zap v1.24.0 // indirect + golang.org/x/net v0.0.0-20220722155237-a158d28d115b // indirect + golang.org/x/sys v0.2.0 // indirect + golang.org/x/text v0.4.0 // indirect + google.golang.org/genproto v0.0.0-20211208223120-3a66f561d7aa // indirect + google.golang.org/grpc v1.51.0 // indirect + google.golang.org/protobuf v1.28.1 // indirect + gopkg.in/yaml.v3 v3.0.1 // indirect +) diff --git a/receiver/snowflakereceiver/go.sum b/receiver/snowflakereceiver/go.sum new file mode 100644 index 000000000000..1324c1ca4278 --- /dev/null +++ b/receiver/snowflakereceiver/go.sum @@ -0,0 +1,447 @@ +cloud.google.com/go v0.26.0/go.mod h1:aQUYkXzVsufM+DwF1aE+0xfcU+56JwCaLick0ClmMTw= +cloud.google.com/go v0.34.0/go.mod h1:aQUYkXzVsufM+DwF1aE+0xfcU+56JwCaLick0ClmMTw= +github.com/BurntSushi/toml v0.3.1/go.mod h1:xHWCNGjB5oqiDr8zfno3MHue2Ht5sIBksp03qcyfWMU= +github.com/OneOfOne/xxhash v1.2.2/go.mod h1:HSdplMjZKSmBqAxg5vPj2TmRDmfkzw+cTzAElWljhcU= +github.com/alecthomas/template v0.0.0-20160405071501-a0175ee3bccc/go.mod h1:LOuyumcjzFXgccqObfd/Ljyb9UuFJ6TxHnclSeseNhc= +github.com/alecthomas/template v0.0.0-20190718012654-fb15b899a751/go.mod h1:LOuyumcjzFXgccqObfd/Ljyb9UuFJ6TxHnclSeseNhc= +github.com/alecthomas/units v0.0.0-20151022065526-2efee857e7cf/go.mod h1:ybxpYRFXyAe+OPACYpWeL0wqObRcbAqCMya13uyzqw0= +github.com/alecthomas/units v0.0.0-20190717042225-c3de453c63f4/go.mod h1:ybxpYRFXyAe+OPACYpWeL0wqObRcbAqCMya13uyzqw0= +github.com/alecthomas/units v0.0.0-20190924025748-f65c72e2690d/go.mod h1:rBZYJk541a8SKzHPHnH3zbiI+7dagKZ0cgpgrD7Fyho= +github.com/antihax/optional v1.0.0/go.mod h1:uupD/76wgC+ih3iEmQUL+0Ugr19nfwCT1kdvxnR2qWY= +github.com/armon/circbuf v0.0.0-20150827004946-bbbad097214e/go.mod h1:3U/XgcO3hCbHZ8TKRvWD2dDTCfh9M9ya+I9JpbB7O8o= +github.com/armon/go-metrics v0.0.0-20180917152333-f0300d1749da/go.mod h1:Q73ZrmVTwzkszR9V5SSuryQ31EELlFMUz1kKyl939pY= +github.com/armon/go-radix v0.0.0-20180808171621-7fddfc383310/go.mod h1:ufUuZ+zHj4x4TnLV4JWEpy2hxWSpsRywHrMgIH9cCH8= +github.com/armon/go-radix v1.0.0/go.mod h1:ufUuZ+zHj4x4TnLV4JWEpy2hxWSpsRywHrMgIH9cCH8= +github.com/aws/aws-sdk-go-v2 v1.9.2/go.mod h1:cK/D0BBs0b/oWPIcX/Z/obahJK1TT7IPVjy53i/mX/4= +github.com/aws/aws-sdk-go-v2/config v1.8.3/go.mod h1:4AEiLtAb8kLs7vgw2ZV3p2VZ1+hBavOc84hqxVNpCyw= +github.com/aws/aws-sdk-go-v2/credentials v1.4.3/go.mod h1:FNNC6nQZQUuyhq5aE5c7ata8o9e4ECGmS4lAXC7o1mQ= +github.com/aws/aws-sdk-go-v2/feature/ec2/imds v1.6.0/go.mod h1:gqlclDEZp4aqJOancXK6TN24aKhT0W0Ae9MHk3wzTMM= +github.com/aws/aws-sdk-go-v2/internal/ini v1.2.4/go.mod h1:ZcBrrI3zBKlhGFNYWvju0I3TR93I7YIgAfy82Fh4lcQ= +github.com/aws/aws-sdk-go-v2/service/appconfig v1.4.2/go.mod h1:FZ3HkCe+b10uFZZkFdvf98LHW21k49W8o8J366lqVKY= +github.com/aws/aws-sdk-go-v2/service/internal/presigned-url v1.3.2/go.mod h1:72HRZDLMtmVQiLG2tLfQcaWLCssELvGl+Zf2WVxMmR8= +github.com/aws/aws-sdk-go-v2/service/sso v1.4.2/go.mod h1:NBvT9R1MEF+Ud6ApJKM0G+IkPchKS7p7c2YPKwHmBOk= +github.com/aws/aws-sdk-go-v2/service/sts v1.7.2/go.mod h1:8EzeIqfWt2wWT4rJVu3f21TfrhJ8AEMzVybRNSb/b4g= +github.com/aws/smithy-go v1.8.0/go.mod h1:SObp3lf9smib00L/v3U2eAKG8FyQ7iLrJnQiAmR5n+E= +github.com/benbjohnson/clock v1.3.0 h1:ip6w0uFQkncKQ979AypyG0ER7mqUSBdKLOgAle/AT8A= +github.com/beorn7/perks v0.0.0-20180321164747-3a771d992973/go.mod h1:Dwedo/Wpr24TaqPxmxbtue+5NUziq4I4S80YR8gNf3Q= +github.com/beorn7/perks v1.0.0/go.mod h1:KWe93zE9D1o94FZ5RNwFwVgaQK1VOXiVxmqh+CedLV8= +github.com/beorn7/perks v1.0.1/go.mod h1:G2ZrVWU2WbWT9wwq4/hrbKbnv/1ERSJQ0ibhJ6rlkpw= +github.com/bgentry/speakeasy v0.1.0/go.mod h1:+zsyZBPWlz7T6j88CTgSN5bM796AkVf0kBD4zp0CCIs= +github.com/census-instrumentation/opencensus-proto v0.2.1/go.mod h1:f6KPmirojxKA12rnyqOA5BBL4O983OfeGPqjHWSTneU= +github.com/cespare/xxhash v1.1.0/go.mod h1:XrSqR1VqqWfGrhpAt58auRo0WTKS1nRRg3ghfAqPWnc= +github.com/cespare/xxhash/v2 v2.1.1/go.mod h1:VGX0DQ3Q6kWi7AoAeZDth3/j3BFtOZR5XLFGgcrjCOs= +github.com/client9/misspell v0.3.4/go.mod h1:qj6jICC3Q7zFZvVWo7KLAzC3yx5G7kyvSDkc90ppPyw= +github.com/cncf/udpa/go v0.0.0-20191209042840-269d4d468f6f/go.mod h1:M8M6+tZqaGXZJjfX53e64911xZQV5JYwmTeXPW+k8Sc= +github.com/cncf/udpa/go v0.0.0-20201120205902-5459f2c99403/go.mod h1:WmhPx2Nbnhtbo57+VJT5O0JRkEi1Wbu0z5j0R8u5Hbk= +github.com/cncf/xds/go v0.0.0-20210312221358-fbca930ec8ed/go.mod h1:eXthEFrGJvWHgFFCl3hGmgk+/aYT6PnTQLykKQRLhEs= +github.com/coreos/go-semver v0.3.0/go.mod h1:nnelYz7RCh+5ahJtPPxZlU+153eP4D4r3EedlOD2RNk= +github.com/coreos/go-systemd/v22 v22.3.2/go.mod h1:Y58oyj3AT4RCenI/lSvhwexgC+NSVTIJ3seZv2GcEnc= +github.com/davecgh/go-spew v1.1.0/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= +github.com/davecgh/go-spew v1.1.1 h1:vj9j/u1bqnvCEfJOwUhtlOARqs3+rkHYY13jYWTU97c= +github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= +github.com/dustin/go-humanize v1.0.0/go.mod h1:HtrtbFcZ19U5GC7JDqmcUSB87Iq5E25KnS6fMYU6eOk= +github.com/envoyproxy/go-control-plane v0.9.0/go.mod h1:YTl/9mNaCwkRvm6d1a2C3ymFceY/DCBVvsKhRF0iEA4= +github.com/envoyproxy/go-control-plane v0.9.1-0.20191026205805-5f8ba28d4473/go.mod h1:YTl/9mNaCwkRvm6d1a2C3ymFceY/DCBVvsKhRF0iEA4= +github.com/envoyproxy/go-control-plane v0.9.4/go.mod h1:6rpuAdCZL397s3pYoYcLgu1mIlRU8Am5FuJP05cCM98= +github.com/envoyproxy/go-control-plane v0.9.9-0.20201210154907-fd9021fe5dad/go.mod h1:cXg6YxExXjJnVBQHBLXeUAgxn2UodCpnH306RInaBQk= +github.com/envoyproxy/go-control-plane v0.9.9-0.20210217033140-668b12f5399d/go.mod h1:cXg6YxExXjJnVBQHBLXeUAgxn2UodCpnH306RInaBQk= +github.com/envoyproxy/go-control-plane v0.9.9-0.20210512163311-63b5d3c536b0/go.mod h1:hliV/p42l8fGbc6Y9bQ70uLwIvmJyVE5k4iMKlh8wCQ= +github.com/envoyproxy/protoc-gen-validate v0.1.0/go.mod h1:iSmxcyjqTsJpI2R4NaDN7+kN2VEUnK/pcBlmesArF7c= +github.com/fatih/color v1.7.0/go.mod h1:Zm6kSWBoL9eyXnKyktHP6abPY2pDugNf5KwzbycvMj4= +github.com/fatih/color v1.9.0/go.mod h1:eQcE1qtQxscV5RaZvpXrrb8Drkc3/DdQ+uUYCNjL+zU= +github.com/fatih/structs v1.1.0/go.mod h1:9NiDSp5zOcgEDl+j00MP/WkGVPOlPRLejGD8Ga6PJ7M= +github.com/fsnotify/fsnotify v1.4.9 h1:hsms1Qyu0jgnwNXIxa+/V/PDsU6CfLf6CNO8H7IWoS4= +github.com/fsnotify/fsnotify v1.4.9/go.mod h1:znqG4EE+3YCdAaPaxE2ZRY/06pZUdp0tY4IgpuI1SZQ= +github.com/ghodss/yaml v1.0.0/go.mod h1:4dBDuWmgqj2HViK6kFavaiC9ZROes6MMH2rRYeMEF04= +github.com/go-kit/kit v0.8.0/go.mod h1:xBxKIO96dXMWWy0MnWVtmwkA9/13aqxPnvrjFYMA2as= +github.com/go-kit/kit v0.9.0/go.mod h1:xBxKIO96dXMWWy0MnWVtmwkA9/13aqxPnvrjFYMA2as= +github.com/go-kit/log v0.1.0/go.mod h1:zbhenjAZHb184qTLMA9ZjW7ThYL0H2mk7Q6pNt4vbaY= +github.com/go-ldap/ldap v3.0.2+incompatible/go.mod h1:qfd9rJvER9Q0/D/Sqn1DfHRoBp40uXYvFoEVrNEPqRc= +github.com/go-logfmt/logfmt v0.3.0/go.mod h1:Qt1PoO58o5twSAckw1HlFXLmHsOX5/0LbT9GBnD5lWE= +github.com/go-logfmt/logfmt v0.4.0/go.mod h1:3RMwSq7FuexP4Kalkev3ejPJsZTpXXBr9+V4qmtdjCk= +github.com/go-logfmt/logfmt v0.5.0/go.mod h1:wCYkCAKZfumFQihp8CzCvQ3paCTfi41vtzG1KdI/P7A= +github.com/go-stack/stack v1.8.0/go.mod h1:v0f6uXyyMGvRgIKkXu+yp6POWl0qKG85gN/melR3HDY= +github.com/go-test/deep v1.0.2-0.20181118220953-042da051cf31/go.mod h1:wGDj63lr65AM2AQyKZd/NYHGb0R+1RLqB8NKt3aSFNA= +github.com/godbus/dbus/v5 v5.0.4/go.mod h1:xhWf0FNVPg57R7Z0UbKHbJfkEywrmjJnf7w5xrFpKfA= +github.com/gogo/protobuf v1.1.1/go.mod h1:r8qH/GZQm5c6nD/R0oafs1akxWv10x8SbQlK7atdtwQ= +github.com/gogo/protobuf v1.3.2 h1:Ov1cvc58UF3b5XjBnZv7+opcTcQFZebYjWzi34vdm4Q= +github.com/gogo/protobuf v1.3.2/go.mod h1:P1XiOD3dCwIKUDQYPy72D8LYyHL2YPYrpS2s69NZV8Q= +github.com/golang/glog v0.0.0-20160126235308-23def4e6c14b/go.mod h1:SBH7ygxi8pfUlaOkMMuAQtPIUF8ecWP5IEl/CR7VP2Q= +github.com/golang/mock v1.1.1/go.mod h1:oTYuIxOrZwtPieC+H1uAHpcLFnEyAGVDL/k47Jfbm0A= +github.com/golang/protobuf v1.2.0/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U= +github.com/golang/protobuf v1.3.1/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U= +github.com/golang/protobuf v1.3.2/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U= +github.com/golang/protobuf v1.3.3/go.mod h1:vzj43D7+SQXF/4pzW/hwtAqwc6iTitCiVSaWz5lYuqw= +github.com/golang/protobuf v1.4.0-rc.1/go.mod h1:ceaxUfeHdC40wWswd/P6IGgMaK3YpKi5j83Wpe3EHw8= +github.com/golang/protobuf v1.4.0-rc.1.0.20200221234624-67d41d38c208/go.mod h1:xKAWHe0F5eneWXFV3EuXVDTCmh+JuBKY0li0aMyXATA= +github.com/golang/protobuf v1.4.0-rc.2/go.mod h1:LlEzMj4AhA7rCAGe4KMBDvJI+AwstrUpVNzEA03Pprs= +github.com/golang/protobuf v1.4.0-rc.4.0.20200313231945-b860323f09d0/go.mod h1:WU3c8KckQ9AFe+yFwt9sWVRKCVIyN9cPHBJSNnbL67w= +github.com/golang/protobuf v1.4.0/go.mod h1:jodUvKwWbYaEsadDk5Fwe5c77LiNKVO9IDvqG2KuDX0= +github.com/golang/protobuf v1.4.1/go.mod h1:U8fpvMrcmy5pZrNK1lt4xCsGvpyWQ/VVv6QDs8UjoX8= +github.com/golang/protobuf v1.4.2/go.mod h1:oDoupMAO8OvCJWAcko0GGGIgR6R6ocIYbsSw735rRwI= +github.com/golang/protobuf v1.4.3/go.mod h1:oDoupMAO8OvCJWAcko0GGGIgR6R6ocIYbsSw735rRwI= +github.com/golang/protobuf v1.5.0/go.mod h1:FsONVRAS9T7sI+LIUmWTfcYkHO4aIWwzhcaSAoJOfIk= +github.com/golang/protobuf v1.5.2 h1:ROPKBNFfQgOUMifHyP+KYbvpjbdoFNs+aK7DXlji0Tw= +github.com/golang/protobuf v1.5.2/go.mod h1:XVQd3VNwM+JqD3oG2Ue2ip4fOMUkwXdXDdiuN0vRsmY= +github.com/golang/snappy v0.0.1/go.mod h1:/XxbfmMg8lxefKM7IXC3fBNl/7bRcc72aCRzEWrmP2Q= +github.com/google/btree v0.0.0-20180813153112-4030bb1f1f0c/go.mod h1:lNA+9X1NB3Zf8V7Ke586lFgjr2dZNuvo3lPJSGZ5JPQ= +github.com/google/go-cmp v0.2.0/go.mod h1:oXzfMopK8JAjlY9xF4vHSVASa0yLyX7SntLO5aqRK0M= +github.com/google/go-cmp v0.3.0/go.mod h1:8QqcDgzrUqlUb/G2PQTWiueGozuR1884gddMywk6iLU= +github.com/google/go-cmp v0.3.1/go.mod h1:8QqcDgzrUqlUb/G2PQTWiueGozuR1884gddMywk6iLU= +github.com/google/go-cmp v0.4.0/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= +github.com/google/go-cmp v0.5.0/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= +github.com/google/go-cmp v0.5.4/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= +github.com/google/go-cmp v0.5.5/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= +github.com/google/go-cmp v0.5.6/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= +github.com/google/go-cmp v0.5.7/go.mod h1:n+brtR0CgQNWTVd5ZUFpTBC8YFBDLK/h/bpaJ8/DtOE= +github.com/google/go-cmp v0.5.9 h1:O2Tfq5qg4qc4AmwVlvv0oLiVAGB7enBSJ2x2DqQFi38= +github.com/google/gofuzz v1.0.0/go.mod h1:dBl0BpW6vV/+mYPU4Po3pmUjxk6FQPldtuIdl/M65Eg= +github.com/google/uuid v1.1.2/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= +github.com/grpc-ecosystem/go-grpc-prometheus v1.2.0/go.mod h1:8NvIoxWQoOIhqOTXgfV/d3M/q6VIi02HzZEHgUlZvzk= +github.com/grpc-ecosystem/grpc-gateway v1.16.0/go.mod h1:BDjrQk3hbvj6Nolgz8mAMFbcEtjT1g+wF4CSlocrBnw= +github.com/hashicorp/consul/api v1.13.0/go.mod h1:ZlVrynguJKcYr54zGaDbaL3fOvKC9m72FhPvA8T35KQ= +github.com/hashicorp/consul/sdk v0.8.0/go.mod h1:GBvyrGALthsZObzUGsfgHZQDXjg4lOjagTIwIR1vPms= +github.com/hashicorp/errwrap v1.0.0/go.mod h1:YH+1FKiLXxHSkmPseP+kNlulaMuP3n2brvKWEqk/Jc4= +github.com/hashicorp/go-cleanhttp v0.5.0/go.mod h1:JpRdi6/HCYpAwUzNwuwqhbovhLtngrth3wmdIIUrZ80= +github.com/hashicorp/go-cleanhttp v0.5.1/go.mod h1:JpRdi6/HCYpAwUzNwuwqhbovhLtngrth3wmdIIUrZ80= +github.com/hashicorp/go-hclog v0.0.0-20180709165350-ff2cf002a8dd/go.mod h1:9bjs9uLqI8l75knNv3lV1kA55veR+WUPSiKIWcQHudI= +github.com/hashicorp/go-hclog v0.8.0/go.mod h1:5CU+agLiy3J7N7QjHK5d05KxGsuXiQLrjA0H7acj2lQ= +github.com/hashicorp/go-hclog v0.12.0/go.mod h1:whpDNt7SSdeAju8AWKIWsul05p54N/39EeqMAyrmvFQ= +github.com/hashicorp/go-immutable-radix v1.0.0/go.mod h1:0y9vanUI8NX6FsYoO3zeMjhV/C5i9g4Q3DwcSNZ4P60= +github.com/hashicorp/go-msgpack v0.5.3/go.mod h1:ahLV/dePpqEmjfWmKiqvPkv/twdG7iPBM1vqhUKIvfM= +github.com/hashicorp/go-multierror v1.0.0/go.mod h1:dHtQlpGsu+cZNNAkkCN/P3hoUDHhCYQXV3UM06sGGrk= +github.com/hashicorp/go-multierror v1.1.0/go.mod h1:spPvp8C1qA32ftKqdAHm4hHTbPw+vmowP0z+KUhOZdA= +github.com/hashicorp/go-plugin v1.0.1/go.mod h1:++UyYGoz3o5w9ZzAdZxtQKrWWP+iqPBn3cQptSMzBuY= +github.com/hashicorp/go-retryablehttp v0.5.4/go.mod h1:9B5zBasrRhHXnJnui7y6sL7es7NDiJgTc6Er0maI1Xs= +github.com/hashicorp/go-rootcerts v1.0.1/go.mod h1:pqUvnprVnM5bf7AOirdbb01K4ccR319Vf4pU3K5EGc8= +github.com/hashicorp/go-rootcerts v1.0.2/go.mod h1:pqUvnprVnM5bf7AOirdbb01K4ccR319Vf4pU3K5EGc8= +github.com/hashicorp/go-sockaddr v1.0.0/go.mod h1:7Xibr9yA9JjQq1JpNB2Vw7kxv8xerXegt+ozgdvDeDU= +github.com/hashicorp/go-sockaddr v1.0.2/go.mod h1:rB4wwRAUzs07qva3c5SdrY/NEtAUjGlgmH/UkBUC97A= +github.com/hashicorp/go-syslog v1.0.0/go.mod h1:qPfqrKkXGihmCqbJM2mZgkZGvKG1dFdvsLplgctolz4= +github.com/hashicorp/go-uuid v1.0.0/go.mod h1:6SBZvOh/SIDV7/2o3Jml5SYk/TvGqwFJ/bN7x4byOro= +github.com/hashicorp/go-uuid v1.0.1/go.mod h1:6SBZvOh/SIDV7/2o3Jml5SYk/TvGqwFJ/bN7x4byOro= +github.com/hashicorp/go-version v1.1.0/go.mod h1:fltr4n8CU8Ke44wwGCBoEymUuxUHl09ZGVZPK5anwXA= +github.com/hashicorp/golang-lru v0.5.0/go.mod h1:/m3WP610KZHVQ1SGc6re/UDhFvYD7pJ4Ao+sR/qLZy8= +github.com/hashicorp/golang-lru v0.5.1/go.mod h1:/m3WP610KZHVQ1SGc6re/UDhFvYD7pJ4Ao+sR/qLZy8= +github.com/hashicorp/hcl v1.0.0 h1:0Anlzjpi4vEasTeNFn2mLJgTSwt0+6sfsiTG8qcWGx4= +github.com/hashicorp/hcl v1.0.0/go.mod h1:E5yfLk+7swimpb2L/Alb/PJmXilQ/rhwaUYs4T20WEQ= +github.com/hashicorp/logutils v1.0.0/go.mod h1:QIAnNjmIWmVIIkWDTG1z5v++HQmx9WQRO+LraFDTW64= +github.com/hashicorp/mdns v1.0.4/go.mod h1:mtBihi+LeNXGtG8L9dX59gAEa12BDtBQSp4v/YAJqrc= +github.com/hashicorp/memberlist v0.3.0/go.mod h1:MS2lj3INKhZjWNqd3N0m3J+Jxf3DAOnAH9VT3Sh9MUE= +github.com/hashicorp/serf v0.9.6/go.mod h1:TXZNMjZQijwlDvp+r0b63xZ45H7JmCmgg4gpTwn9UV4= +github.com/hashicorp/vault/api v1.0.4/go.mod h1:gDcqh3WGcR1cpF5AJz/B1UFheUEneMoIospckxBxk6Q= +github.com/hashicorp/vault/sdk v0.1.13/go.mod h1:B+hVj7TpuQY1Y/GPbCpffmgd+tSEwvhkWnjtSYCaS2M= +github.com/hashicorp/yamux v0.0.0-20180604194846-3520598351bb/go.mod h1:+NfK9FKeTrX5uv1uIXGdwYDTeHna2qgaIlx54MXqjAM= +github.com/hashicorp/yamux v0.0.0-20181012175058-2f1d1f20f75d/go.mod h1:+NfK9FKeTrX5uv1uIXGdwYDTeHna2qgaIlx54MXqjAM= +github.com/hjson/hjson-go/v4 v4.0.0 h1:wlm6IYYqHjOdXH1gHev4VoXCaW20HdQAGCxdOEEg2cs= +github.com/hjson/hjson-go/v4 v4.0.0/go.mod h1:KaYt3bTw3zhBjYqnXkYywcYctk0A2nxeEFTse3rH13E= +github.com/jmespath/go-jmespath v0.4.0/go.mod h1:T8mJZnbsbmF+m6zOOFylbeCJqk5+pHWvzYPziyZiYoo= +github.com/jmespath/go-jmespath/internal/testify v1.5.1/go.mod h1:L3OGu8Wl2/fWfCI6z80xFu9LTZmf1ZRjMHUOPmWr69U= +github.com/joho/godotenv v1.3.0 h1:Zjp+RcGpHhGlrMbJzXTrZZPrWj+1vfm90La1wgB6Bhc= +github.com/joho/godotenv v1.3.0/go.mod h1:7hK45KPybAkOC6peb+G5yklZfMxEjkZhHbwpqxOKXbg= +github.com/jpillora/backoff v1.0.0/go.mod h1:J/6gKK9jxlEcS3zixgDgUAsiuZ7yrSoa/FX5e0EB2j4= +github.com/json-iterator/go v1.1.6/go.mod h1:+SdeFBvtyEkXs7REEP0seUULqWtbJapLOCVDaaPEHmU= +github.com/json-iterator/go v1.1.10/go.mod h1:KdQUCv79m/52Kvf8AW2vK1V8akMuk1QjK/uOdHXbAo4= +github.com/json-iterator/go v1.1.11/go.mod h1:KdQUCv79m/52Kvf8AW2vK1V8akMuk1QjK/uOdHXbAo4= +github.com/json-iterator/go v1.1.12 h1:PV8peI4a0ysnczrg+LtxykD8LfKY9ML6u2jnxaEnrnM= +github.com/json-iterator/go v1.1.12/go.mod h1:e30LSqwooZae/UwlEbR2852Gd8hjQvJoHmT4TnhNGBo= +github.com/julienschmidt/httprouter v1.2.0/go.mod h1:SYymIcj16QtmaHHD7aYtjjsJG7VTCxuUUipMqKk8s4w= +github.com/julienschmidt/httprouter v1.3.0/go.mod h1:JR6WtHb+2LUe8TCKY3cZOxFyyO8IZAc4RVcycCCAKdM= +github.com/kisielk/errcheck v1.5.0/go.mod h1:pFxgyoBC7bSaBwPgfKdkLd5X25qrDl4LWUI2bnpBCr8= +github.com/kisielk/gotool v1.0.0/go.mod h1:XhKaO+MFFWcvkIS/tQcRk01m1F5IRFswLeQ+oQHNcck= +github.com/knadh/koanf v1.4.4 h1:d2jY5nCCeoaiqvEKSBW9rEc93EfNy/XWgWsSB3j7JEA= +github.com/knadh/koanf v1.4.4/go.mod h1:Hgyjp4y8v44hpZtPzs7JZfRAW5AhN7KfZcwv1RYggDs= +github.com/konsorten/go-windows-terminal-sequences v1.0.1/go.mod h1:T0+1ngSBFLxvqU3pZ+m/2kptfBszLMUkC4ZK/EgS/cQ= +github.com/konsorten/go-windows-terminal-sequences v1.0.3/go.mod h1:T0+1ngSBFLxvqU3pZ+m/2kptfBszLMUkC4ZK/EgS/cQ= +github.com/kr/logfmt v0.0.0-20140226030751-b84e30acd515/go.mod h1:+0opPa2QZZtGFBFZlji/RkVcI2GknAs/DXo4wKdlNEc= +github.com/kr/pretty v0.1.0/go.mod h1:dAy3ld7l9f0ibDNOQOHHMYYIIbhfbHSm3C4ZsoJORNo= +github.com/kr/pretty v0.2.0 h1:s5hAObm+yFO5uHYt5dYjxi2rXrsnmRpJx4OYvIWUaQs= +github.com/kr/pretty v0.2.0/go.mod h1:ipq/a2n7PKx3OHsz4KJII5eveXtPO4qwEXGdVfWzfnI= +github.com/kr/pty v1.1.1/go.mod h1:pFQYn66WHrOpPYNljwOMqo10TkYh1fy3cYio2l3bCsQ= +github.com/kr/text v0.1.0 h1:45sCR5RtlFHMR4UwH9sdQ5TC8v0qDQCHnXt+kaKSTVE= +github.com/kr/text v0.1.0/go.mod h1:4Jbv+DJW3UT/LiOwJeYQe1efqtUx/iVham/4vfdArNI= +github.com/mattn/go-colorable v0.0.9/go.mod h1:9vuHe8Xs5qXnSaW/c/ABM9alt+Vo+STaOChaDxuIBZU= +github.com/mattn/go-colorable v0.1.4/go.mod h1:U0ppj6V5qS13XJ6of8GYAs25YV2eR4EVcfRqFIhoBtE= +github.com/mattn/go-colorable v0.1.6/go.mod h1:u6P/XSegPjTcexA+o6vUJrdnUu04hMope9wVRipJSqc= +github.com/mattn/go-isatty v0.0.3/go.mod h1:M+lRXTBqGeGNdLjl/ufCoiOlB5xdOkqRJdNxMWT7Zi4= +github.com/mattn/go-isatty v0.0.8/go.mod h1:Iq45c/XA43vh69/j3iqttzPXn0bhXyGjM0Hdxcsrc5s= +github.com/mattn/go-isatty v0.0.10/go.mod h1:qgIWMr58cqv1PHHyhnkY9lrL7etaEgOFcMEpPG5Rm84= +github.com/mattn/go-isatty v0.0.11/go.mod h1:PhnuNfih5lzO57/f3n+odYbM4JtupLOxQOAqxQCu2WE= +github.com/mattn/go-isatty v0.0.12/go.mod h1:cbi8OIDigv2wuxKPP5vlRcQ1OAZbq2CE4Kysco4FUpU= +github.com/matttproud/golang_protobuf_extensions v1.0.1/go.mod h1:D8He9yQNgCq6Z5Ld7szi9bcBfOoFv/3dc6xSMkL2PC0= +github.com/miekg/dns v1.1.26/go.mod h1:bPDLeHnStXmXAq1m/Ch/hvfNHr14JKNPMBo3VZKjuso= +github.com/miekg/dns v1.1.41/go.mod h1:p6aan82bvRIyn+zDIv9xYNUpwa73JcSh9BKwknJysuI= +github.com/mitchellh/cli v1.0.0/go.mod h1:hNIlj7HEI86fIcpObd7a0FcrxTWetlwJDGcceTlRvqc= +github.com/mitchellh/cli v1.1.0/go.mod h1:xcISNoH86gajksDmfB23e/pu+B+GeFRMYmoHXxx3xhI= +github.com/mitchellh/copystructure v1.0.0/go.mod h1:SNtv71yrdKgLRyLFxmLdkAbkKEFWgYaq1OVrnRcwhnw= +github.com/mitchellh/copystructure v1.2.0 h1:vpKXTN4ewci03Vljg/q9QvCGUDttBOGBIa15WveJJGw= +github.com/mitchellh/copystructure v1.2.0/go.mod h1:qLl+cE2AmVv+CoeAwDPye/v+N2HKCj9FbZEVFJRxO9s= +github.com/mitchellh/go-homedir v1.1.0/go.mod h1:SfyaCUpYCn1Vlf4IUYiD9fPX4A5wJrkLzIz1N1q0pr0= +github.com/mitchellh/go-testing-interface v0.0.0-20171004221916-a61a99592b77/go.mod h1:kRemZodwjscx+RGhAo8eIhFbs2+BFgRtFPeD/KE+zxI= +github.com/mitchellh/go-testing-interface v1.0.0/go.mod h1:kRemZodwjscx+RGhAo8eIhFbs2+BFgRtFPeD/KE+zxI= +github.com/mitchellh/go-wordwrap v1.0.0/go.mod h1:ZXFpozHsX6DPmq2I0TCekCxypsnAUbP2oI0UX1GXzOo= +github.com/mitchellh/mapstructure v0.0.0-20160808181253-ca63d7c062ee/go.mod h1:FVVH3fgwuzCH5S8UJGiWEs2h04kUh9fWfEaFds41c1Y= +github.com/mitchellh/mapstructure v1.1.2/go.mod h1:FVVH3fgwuzCH5S8UJGiWEs2h04kUh9fWfEaFds41c1Y= +github.com/mitchellh/mapstructure v1.5.0 h1:jeMsZIYE/09sWLaz43PL7Gy6RuMjD2eJVyuac5Z2hdY= +github.com/mitchellh/mapstructure v1.5.0/go.mod h1:bFUtVrKA4DC2yAKiSyO/QUcy7e+RRV2QTWOzhPopBRo= +github.com/mitchellh/reflectwalk v1.0.0/go.mod h1:mSTlrgnPZtwu0c4WaC2kGObEpuNDbx0jmZXqmk4esnw= +github.com/mitchellh/reflectwalk v1.0.2 h1:G2LzWKi524PWgd3mLHV8Y5k7s6XUvT0Gef6zxSIeXaQ= +github.com/mitchellh/reflectwalk v1.0.2/go.mod h1:mSTlrgnPZtwu0c4WaC2kGObEpuNDbx0jmZXqmk4esnw= +github.com/modern-go/concurrent v0.0.0-20180228061459-e0a39a4cb421/go.mod h1:6dJC0mAP4ikYIbvyc7fijjWJddQyLn8Ig3JB5CqoB9Q= +github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd h1:TRLaZ9cD/w8PVh93nsPXa1VrQ6jlwL5oN8l14QlcNfg= +github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd/go.mod h1:6dJC0mAP4ikYIbvyc7fijjWJddQyLn8Ig3JB5CqoB9Q= +github.com/modern-go/reflect2 v0.0.0-20180701023420-4b7aa43c6742/go.mod h1:bx2lNnkwVCuqBIxFjflWJWanXIb3RllmbCylyMrvgv0= +github.com/modern-go/reflect2 v1.0.1/go.mod h1:bx2lNnkwVCuqBIxFjflWJWanXIb3RllmbCylyMrvgv0= +github.com/modern-go/reflect2 v1.0.2 h1:xBagoLtFs94CBntxluKeaWgTMpvLxC4ur3nMaC9Gz0M= +github.com/modern-go/reflect2 v1.0.2/go.mod h1:yWuevngMOJpCy52FWWMvUC8ws7m/LJsjYzDa0/r8luk= +github.com/mwitkow/go-conntrack v0.0.0-20161129095857-cc309e4a2223/go.mod h1:qRWi+5nqEBWmkhHvq77mSJWrCKwh8bxhgT7d/eI7P4U= +github.com/mwitkow/go-conntrack v0.0.0-20190716064945-2f068394615f/go.mod h1:qRWi+5nqEBWmkhHvq77mSJWrCKwh8bxhgT7d/eI7P4U= +github.com/npillmayer/nestext v0.1.3/go.mod h1:h2lrijH8jpicr25dFY+oAJLyzlya6jhnuG+zWp9L0Uk= +github.com/oklog/run v1.0.0/go.mod h1:dlhp/R75TPv97u0XWUtDeV/lRKWPKSdTuV0TZvrmrQA= +github.com/pascaldekloe/goe v0.0.0-20180627143212-57f6aae5913c/go.mod h1:lzWF7FIEvWOWxwDKqyGYQf6ZUaNfKdP144TG7ZOy1lc= +github.com/pascaldekloe/goe v0.1.0/go.mod h1:lzWF7FIEvWOWxwDKqyGYQf6ZUaNfKdP144TG7ZOy1lc= +github.com/pelletier/go-toml v1.7.0 h1:7utD74fnzVc/cpcyy8sjrlFr5vYpypUixARcHIMIGuI= +github.com/pelletier/go-toml v1.7.0/go.mod h1:vwGMzjaWMwyfHwgIBhI2YUM4fB6nL6lVAvS1LBMMhTE= +github.com/pierrec/lz4 v2.0.5+incompatible/go.mod h1:pdkljMzZIN41W+lC3N2tnIh5sFi+IEE17M5jbnwPHcY= +github.com/pkg/errors v0.8.0/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0= +github.com/pkg/errors v0.8.1/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0= +github.com/pkg/errors v0.9.1 h1:FEBLx1zS214owpjy7qsBeixbURkuhQAwrK5UwLGTwt4= +github.com/pkg/errors v0.9.1/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0= +github.com/pmezard/go-difflib v1.0.0 h1:4DBwDE0NGyQoBHbLQYPwSUPoCMWR5BEzIk/f1lZbAQM= +github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4= +github.com/posener/complete v1.1.1/go.mod h1:em0nMJCgc9GFtwrmVmEMR/ZL6WyhyjMBndrE9hABlRI= +github.com/posener/complete v1.2.3/go.mod h1:WZIdtGGp+qx0sLrYKtIRAruyNpv6hFCicSgv7Sy7s/s= +github.com/prometheus/client_golang v0.9.1/go.mod h1:7SWBe2y4D6OKWSNQJUaRYU/AaXPKyh/dDVn+NZz0KFw= +github.com/prometheus/client_golang v1.0.0/go.mod h1:db9x61etRT2tGnBNRi70OPL5FsnadC4Ky3P0J6CfImo= +github.com/prometheus/client_golang v1.7.1/go.mod h1:PY5Wy2awLA44sXw4AOSfFBetzPP4j5+D6mVACh+pe2M= +github.com/prometheus/client_golang v1.11.1/go.mod h1:Z6t4BnS23TR94PD6BsDNk8yVqroYurpAkEiz0P2BEV0= +github.com/prometheus/client_model v0.0.0-20180712105110-5c3871d89910/go.mod h1:MbSGuTsp3dbXC40dX6PRTWyKYBIrTGTE9sqQNg2J8bo= +github.com/prometheus/client_model v0.0.0-20190129233127-fd36f4220a90/go.mod h1:xMI15A0UPsDsEKsMN9yxemIoYk6Tm2C1GtYGdfGttqA= +github.com/prometheus/client_model v0.0.0-20190812154241-14fe0d1b01d4/go.mod h1:xMI15A0UPsDsEKsMN9yxemIoYk6Tm2C1GtYGdfGttqA= +github.com/prometheus/client_model v0.2.0/go.mod h1:xMI15A0UPsDsEKsMN9yxemIoYk6Tm2C1GtYGdfGttqA= +github.com/prometheus/common v0.4.1/go.mod h1:TNfzLD0ON7rHzMJeJkieUDPYmFC7Snx/y86RQel1bk4= +github.com/prometheus/common v0.10.0/go.mod h1:Tlit/dnDKsSWFlCLTWaA1cyBgKHSMdTB80sz/V91rCo= +github.com/prometheus/common v0.26.0/go.mod h1:M7rCNAaPfAosfx8veZJCuw84e35h3Cfd9VFqTh1DIvc= +github.com/prometheus/procfs v0.0.0-20181005140218-185b4288413d/go.mod h1:c3At6R/oaqEKCNdg8wHV1ftS6bRYblBhIjjI8uT2IGk= +github.com/prometheus/procfs v0.0.2/go.mod h1:TjEm7ze935MbeOT/UhFTIMYKhuLP4wbCsTZCD3I8kEA= +github.com/prometheus/procfs v0.1.3/go.mod h1:lV6e/gmhEcM9IjHGsFOCxxuZ+z1YqCvr4OA4YeYWdaU= +github.com/prometheus/procfs v0.6.0/go.mod h1:cz+aTbrPOrUb4q7XlbU9ygM+/jj0fzG6c1xBZuNvfVA= +github.com/rhnvrm/simples3 v0.6.1/go.mod h1:Y+3vYm2V7Y4VijFoJHHTrja6OgPrJ2cBti8dPGkC3sA= +github.com/rogpeppe/fastuuid v1.2.0/go.mod h1:jVj6XXZzXRy/MSR5jhDC/2q6DgLz+nrA6LYCDYWNEvQ= +github.com/ryanuber/columnize v0.0.0-20160712163229-9b3edd62028f/go.mod h1:sm1tb6uqfes/u+d4ooFouqFdy9/2g9QGwK3SQygK0Ts= +github.com/ryanuber/columnize v2.1.0+incompatible/go.mod h1:sm1tb6uqfes/u+d4ooFouqFdy9/2g9QGwK3SQygK0Ts= +github.com/ryanuber/go-glob v1.0.0/go.mod h1:807d1WSdnB0XRJzKNil9Om6lcp/3a0v4qIHxIXzX/Yc= +github.com/sean-/seed v0.0.0-20170313163322-e2103e2c3529/go.mod h1:DxrIzT+xaE7yg65j358z/aeFdxmN0P9QXhEzd20vsDc= +github.com/sirupsen/logrus v1.2.0/go.mod h1:LxeOpSwHxABJmUn/MG1IvRgCAasNZTLOkJPxbbu5VWo= +github.com/sirupsen/logrus v1.4.2/go.mod h1:tLMulIdttU9McNUspp0xgXVQah82FyeX6MwdIuYE2rE= +github.com/sirupsen/logrus v1.6.0/go.mod h1:7uNnSEd1DgxDLC74fIahvMZmmYsHGZGEOFrfsX/uA88= +github.com/spaolacci/murmur3 v0.0.0-20180118202830-f09979ecbc72/go.mod h1:JwIasOWyU6f++ZhiEuf87xNszmSA2myDM2Kzu9HwQUA= +github.com/spf13/pflag v1.0.5 h1:iy+VFUOCP1a+8yFto/drg2CJ5u0yRoB7fZw3DKv/JXA= +github.com/spf13/pflag v1.0.5/go.mod h1:McXfInJRrz4CZXVZOBLb0bTZqETkiAhM9Iw0y3An2Bg= +github.com/stretchr/objx v0.1.0/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME= +github.com/stretchr/objx v0.1.1/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME= +github.com/stretchr/objx v0.4.0/go.mod h1:YvHI0jy2hoMjB+UWwv71VJQ9isScKT/TqJzVSSt89Yw= +github.com/stretchr/objx v0.5.0/go.mod h1:Yh+to48EsGEfYuaHDzXPcE3xhTkx73EhmCGUpEOglKo= +github.com/stretchr/testify v1.2.2/go.mod h1:a8OnRcib4nhh0OaRAV+Yts87kKdq0PP7pXfy6kDkUVs= +github.com/stretchr/testify v1.3.0/go.mod h1:M5WIy9Dh21IEIfnGCwXGc5bZfKNJtfHm1UVUgZn+9EI= +github.com/stretchr/testify v1.4.0/go.mod h1:j7eGeouHqKxXV5pUuKE4zz7dFj8WfuZ+81PSLYec5m4= +github.com/stretchr/testify v1.5.1/go.mod h1:5W2xD1RspED5o8YsWQXVCued0rvSQ+mT+I5cxcmMvtA= +github.com/stretchr/testify v1.7.0/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg= +github.com/stretchr/testify v1.7.1/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg= +github.com/stretchr/testify v1.8.0/go.mod h1:yNjHg4UonilssWZ8iaSj1OCr/vHnekPRkoO+kdMU+MU= +github.com/stretchr/testify v1.8.1 h1:w7B6lhMri9wdJUVmEZPGGhZzrYTPvgJArz7wNPgYKsk= +github.com/stretchr/testify v1.8.1/go.mod h1:w2LPCIKwWwSfY2zedu0+kehJoqGctiVI29o6fzry7u4= +github.com/yuin/goldmark v1.1.27/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74= +github.com/yuin/goldmark v1.2.1/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74= +github.com/yuin/goldmark v1.3.5/go.mod h1:mwnBkeHKe2W/ZEtQ+71ViKU8L12m81fl3OWwC1Zlc8k= +go.etcd.io/etcd/api/v3 v3.5.4/go.mod h1:5GB2vv4A4AOn3yk7MftYGHkUfGtDHnEraIjym4dYz5A= +go.etcd.io/etcd/client/pkg/v3 v3.5.4/go.mod h1:IJHfcCEKxYu1Os13ZdwCwIUTUVGYTSAM3YSwc9/Ac1g= +go.etcd.io/etcd/client/v3 v3.5.4/go.mod h1:ZaRkVgBZC+L+dLCjTcF1hRXpgZXQPOvnA/Ak/gq3kiY= +go.opentelemetry.io/collector v0.66.1-0.20221202005155-1c54042beb70 h1:INB2CsOtqqrija2KvMuIpbBQ3qX1BECEeR+d0NJX+oM= +go.opentelemetry.io/collector v0.66.1-0.20221202005155-1c54042beb70/go.mod h1:d0NHkHyANJy3HDTvYE/PoylMShUmFIbiir89ieI0X0c= +go.opentelemetry.io/collector/component v0.66.1-0.20221202005155-1c54042beb70 h1:dEoJhzCmRShgmVro/kXm6pouGElgxkH2DsvanzCeZtU= +go.opentelemetry.io/collector/component v0.66.1-0.20221202005155-1c54042beb70/go.mod h1:OsVnvFFe38QSpcuZlQ8cvarDzcloCexgQ1/B2m80F6U= +go.opentelemetry.io/collector/confmap v0.0.0-20221201172708-2bdff61fa52a h1:2TscCXAQASO2kU9xHcMIUM/FP29fiQIRTilZmliq/7I= +go.opentelemetry.io/collector/confmap v0.0.0-20221201172708-2bdff61fa52a/go.mod h1:qJ3e8bA8h4wz/jYlbeEUafVHdrU7DkpStnmIUAz6VTQ= +go.opentelemetry.io/collector/consumer v0.66.1-0.20221202005155-1c54042beb70 h1:XwpVjI1KeKGPXOZBLefGILMziPYuNf6IzbmiIEsIyI0= +go.opentelemetry.io/collector/consumer v0.66.1-0.20221202005155-1c54042beb70/go.mod h1:EWVBcSGcIcmeDGTnIboc/aqa3agsBeH8/q7yFWG7xVc= +go.opentelemetry.io/collector/featuregate v0.66.1-0.20221202005155-1c54042beb70 h1:V0tf4ebciqIs07jFATfLGdUpLRaEsi+AiWuA/in0G8Y= +go.opentelemetry.io/collector/featuregate v0.66.1-0.20221202005155-1c54042beb70/go.mod h1:tewuFKJYalWBU0bmNKg++MC1ipINXUr6szYzOw2p1GI= +go.opentelemetry.io/collector/pdata v0.66.1-0.20221202005155-1c54042beb70 h1:TPOrxkEMvZzfBSwF3ct+kUUgKA5g8unkZZbKQvJweeA= +go.opentelemetry.io/collector/pdata v0.66.1-0.20221202005155-1c54042beb70/go.mod h1:pqyaznLzk21m+1KL6fwOsRryRELL+zNM0qiVSn0MbVc= +go.opentelemetry.io/otel v1.11.1 h1:4WLLAmcfkmDk2ukNXJyq3/kiz/3UzCaYq6PskJsaou4= +go.opentelemetry.io/otel v1.11.1/go.mod h1:1nNhXBbWSD0nsL38H6btgnFN2k4i0sNLHNNMZMSbUGE= +go.opentelemetry.io/otel/metric v0.33.0 h1:xQAyl7uGEYvrLAiV/09iTJlp1pZnQ9Wl793qbVvED1E= +go.opentelemetry.io/otel/metric v0.33.0/go.mod h1:QlTYc+EnYNq/M2mNk1qDDMRLpqCOj2f/r5c7Fd5FYaI= +go.opentelemetry.io/otel/trace v1.11.1 h1:ofxdnzsNrGBYXbP7t7zpUK281+go5rF7dvdIZXF8gdQ= +go.opentelemetry.io/otel/trace v1.11.1/go.mod h1:f/Q9G7vzk5u91PhbmKbg1Qn0rzH1LJ4vbPHFGkTPtOk= +go.opentelemetry.io/proto/otlp v0.7.0/go.mod h1:PqfVotwruBrMGOCsRd/89rSnXhoiJIqeYNgFYFoEGnI= +go.uber.org/atomic v1.7.0/go.mod h1:fEN4uk6kAWBTFdckzkM89CLk9XfWZrxpCo0nPH17wJc= +go.uber.org/atomic v1.10.0 h1:9qC72Qh0+3MqyJbAn8YU5xVq1frD8bn3JtD2oXtafVQ= +go.uber.org/atomic v1.10.0/go.mod h1:LUxbIzbOniOlMKjJjyPfpl4v+PKK2cNJn91OQbhoJI0= +go.uber.org/goleak v1.1.11 h1:wy28qYRKZgnJTxGxvye5/wgWr1EKjmUDGYox5mGlRlI= +go.uber.org/multierr v1.6.0/go.mod h1:cdWPpRnG4AhwMwsgIHip0KRBQjJy5kYEpYjJxpXp9iU= +go.uber.org/multierr v1.8.0 h1:dg6GjLku4EH+249NNmoIciG9N/jURbDG+pFlTkhzIC8= +go.uber.org/multierr v1.8.0/go.mod h1:7EAYxJLBy9rStEaz58O2t4Uvip6FSURkq8/ppBp95ak= +go.uber.org/zap v1.17.0/go.mod h1:MXVU+bhUf/A7Xi2HNOnopQOrmycQ5Ih87HtOu4q5SSo= +go.uber.org/zap v1.24.0 h1:FiJd5l1UOLj0wCgbSE0rwwXHzEdAZS6hiiSnxJN/D60= +go.uber.org/zap v1.24.0/go.mod h1:2kMP+WWQ8aoFoedH3T2sq6iJ2yDWpHbP0f6MQbS9Gkg= +golang.org/x/crypto v0.0.0-20180904163835-0709b304e793/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4= +golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w= +golang.org/x/crypto v0.0.0-20190923035154-9ee001bba392/go.mod h1:/lpIB1dKB+9EgE3H3cr1v9wB50oz8l4C4h62xy7jSTY= +golang.org/x/crypto v0.0.0-20191011191535-87dc89f01550/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= +golang.org/x/crypto v0.0.0-20200622213623-75b288015ac9/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto= +golang.org/x/exp v0.0.0-20190121172915-509febef88a4/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA= +golang.org/x/lint v0.0.0-20181026193005-c67002cb31c3/go.mod h1:UVdnD1Gm6xHRNCYTkRU2/jEulfH38KcIWyp/GAMgvoE= +golang.org/x/lint v0.0.0-20190227174305-5b3e6a55c961/go.mod h1:wehouNa3lNwaWXcvxsM5YxQ5yQlVC4a0KAMCusXpPoU= +golang.org/x/lint v0.0.0-20190313153728-d0100b6bd8b3/go.mod h1:6SW0HCj/g11FgYtHlgUYUwCkIfeOF89ocIRzGO/8vkc= +golang.org/x/lint v0.0.0-20210508222113-6edffad5e616/go.mod h1:3xt1FjdF8hUf6vQPIChWIBhFzV8gjjsPE/fR3IyQdNY= +golang.org/x/mod v0.1.1-0.20191105210325-c90efee705ee/go.mod h1:QqPTAvyqsEbceGzBzNggFXnrqF1CaUcvgkdR5Ot7KZg= +golang.org/x/mod v0.2.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= +golang.org/x/mod v0.3.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= +golang.org/x/mod v0.4.2/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= +golang.org/x/net v0.0.0-20180724234803-3673e40ba225/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= +golang.org/x/net v0.0.0-20180826012351-8a410e7b638d/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= +golang.org/x/net v0.0.0-20181114220301-adae6a3d119a/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= +golang.org/x/net v0.0.0-20190108225652-1e06a53dbb7e/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= +golang.org/x/net v0.0.0-20190213061140-3a22650c66bd/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= +golang.org/x/net v0.0.0-20190311183353-d8887717615a/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg= +golang.org/x/net v0.0.0-20190404232315-eb5bcb51f2a3/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg= +golang.org/x/net v0.0.0-20190613194153-d28f0bde5980/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= +golang.org/x/net v0.0.0-20190620200207-3b0461eec859/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= +golang.org/x/net v0.0.0-20190923162816-aa69164e4478/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= +golang.org/x/net v0.0.0-20200226121028-0de0cce0169b/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= +golang.org/x/net v0.0.0-20200625001655-4c5254603344/go.mod h1:/O7V0waA8r7cgGh81Ro3o1hOxt32SMVPicZroKQ2sZA= +golang.org/x/net v0.0.0-20200822124328-c89045814202/go.mod h1:/O7V0waA8r7cgGh81Ro3o1hOxt32SMVPicZroKQ2sZA= +golang.org/x/net v0.0.0-20201021035429-f5854403a974/go.mod h1:sp8m0HH+o8qH0wwXwYZr8TS3Oi6o0r6Gce1SSxlDquU= +golang.org/x/net v0.0.0-20210226172049-e18ecbb05110/go.mod h1:m0MpNAwzfU5UDzcl9v0D8zg8gWTRqZa9RBIspLL5mdg= +golang.org/x/net v0.0.0-20210405180319-a5a99cb37ef4/go.mod h1:p54w0d4576C0XHj96bSt6lcn1PtDYWL6XObtHCRCNQM= +golang.org/x/net v0.0.0-20210410081132-afb366fc7cd1/go.mod h1:9tjilg8BloeKEkVJvy7fQ90B1CfIiPueXVOjqfkSzI8= +golang.org/x/net v0.0.0-20220722155237-a158d28d115b h1:PxfKdU9lEEDYjdIzOtC4qFWgkU2rGHdKlKowJSMN9h0= +golang.org/x/net v0.0.0-20220722155237-a158d28d115b/go.mod h1:XRhObCWvk6IyKnWLug+ECip1KBveYUHfp+8e9klMJ9c= +golang.org/x/oauth2 v0.0.0-20180821212333-d2e6202438be/go.mod h1:N/0e6XlmueqKjAGxoOufVs8QHGRruUQn6yWY3a++T0U= +golang.org/x/oauth2 v0.0.0-20190226205417-e64efc72b421/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw= +golang.org/x/oauth2 v0.0.0-20200107190931-bf48bf16ab8d/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw= +golang.org/x/sync v0.0.0-20180314180146-1d60e4601c6f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/sync v0.0.0-20181108010431-42b317875d0f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/sync v0.0.0-20181221193216-37e7f081c4d4/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/sync v0.0.0-20190227155943-e225da77a7e6/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/sync v0.0.0-20190423024810-112230192c58/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/sync v0.0.0-20190911185100-cd5d95a43a6e/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/sync v0.0.0-20201020160332-67f06af15bc9/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/sync v0.0.0-20201207232520-09787c993a3a/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/sync v0.0.0-20210220032951-036812b2e83c/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/sys v0.0.0-20180823144017-11551d06cbcc/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= +golang.org/x/sys v0.0.0-20180830151530-49385e6e1522/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= +golang.org/x/sys v0.0.0-20180905080454-ebe1bf3edb33/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= +golang.org/x/sys v0.0.0-20181116152217-5ac8a444bdc5/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= +golang.org/x/sys v0.0.0-20190129075346-302c3dd5f1cc/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= +golang.org/x/sys v0.0.0-20190215142949-d0b11bdaac8a/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= +golang.org/x/sys v0.0.0-20190222072716-a9d3bda3a223/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= +golang.org/x/sys v0.0.0-20190403152447-81d4e9dc473e/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20190412213103-97732733099d/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20190422165155-953cdadca894/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20190922100055-0a153f010e69/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20190924154521-2837fb4f24fe/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20191005200804-aed5e4c7ecf9/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20191008105621-543471e840be/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20191026070338-33540a1f6037/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200106162015-b016eb3dc98e/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200116001909-b77594299b42/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200124204421-9fbb57f87de9/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200223170610-d5e6a3e2c0ae/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200323222414-85ca7c5b95cd/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200615200032-f1bc736245b1/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200625212154-ddb9806d33ae/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200930185726-fdedc70b468f/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20201119102817-f84b799fce68/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20210124154548-22da62e12c0c/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20210303074136-134d130e1a04/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20210330210617-4fbd30eecc44/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20210403161142-5e06dd20ab57/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20210510120138-977fb7262007/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.0.0-20210603081109-ebe580a85c40/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.2.0 h1:ljd4t30dBnAvMZaQCevtY0xLLD0A+bRZXbgLMLU1F/A= +golang.org/x/sys v0.2.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/term v0.0.0-20201126162022-7de9c90e9dd1/go.mod h1:bj7SfCRtBDWHUb9snDiAeCFNEtKQo2Wmx5Cou7ajbmo= +golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= +golang.org/x/text v0.3.1-0.20181227161524-e6919f6577db/go.mod h1:bEr9sfX3Q8Zfm5fL9x+3itogRgK3+ptLWKqgva+5dAk= +golang.org/x/text v0.3.2/go.mod h1:bEr9sfX3Q8Zfm5fL9x+3itogRgK3+ptLWKqgva+5dAk= +golang.org/x/text v0.3.3/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ= +golang.org/x/text v0.3.5/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ= +golang.org/x/text v0.3.6/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ= +golang.org/x/text v0.4.0 h1:BrVqGRd7+k1DiOgtnFvAkoQEWQvBc25ouMJM6429SFg= +golang.org/x/text v0.4.0/go.mod h1:mrYo+phRRbMaCq/xk9113O4dZlRixOauAjOtrjsXDZ8= +golang.org/x/time v0.0.0-20190308202827-9d24e82272b4/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= +golang.org/x/tools v0.0.0-20180917221912-90fa682c2a6e/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= +golang.org/x/tools v0.0.0-20190114222345-bf090417da8b/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= +golang.org/x/tools v0.0.0-20190226205152-f727befe758c/go.mod h1:9Yl7xja0Znq3iFh3HoIrodX9oNMXvdceNzlUR8zjMvY= +golang.org/x/tools v0.0.0-20190311212946-11955173bddd/go.mod h1:LCzVGOaR6xXOjkQ3onu1FJEFr0SW1gC7cKk1uF8kGRs= +golang.org/x/tools v0.0.0-20190524140312-2c0ae7006135/go.mod h1:RgjU9mgBXZiqYHBnxXauZ1Gv1EHHAz9KjViQ78xBX0Q= +golang.org/x/tools v0.0.0-20190907020128-2ca718005c18/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= +golang.org/x/tools v0.0.0-20191119224855-298f0cb1881e/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= +golang.org/x/tools v0.0.0-20200130002326-2f3ba24bd6e7/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28= +golang.org/x/tools v0.0.0-20200619180055-7c47624df98f/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE= +golang.org/x/tools v0.0.0-20210106214847-113979e3529a/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA= +golang.org/x/tools v0.1.2/go.mod h1:o0xws9oXOQQZyjljx8fwUC0k7L1pTE6eaCbjGeHmOkk= +golang.org/x/xerrors v0.0.0-20190717185122-a985d3407aa7/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= +golang.org/x/xerrors v0.0.0-20191011141410-1b5146add898/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= +golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= +golang.org/x/xerrors v0.0.0-20200804184101-5ec99f83aff1/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= +google.golang.org/appengine v1.1.0/go.mod h1:EbEs0AVv82hx2wNQdGPgUI5lhzA/G0D9YwlJXL52JkM= +google.golang.org/appengine v1.4.0/go.mod h1:xpcJRLb0r/rnEns0DIKYYv+WjYCduHsrkT7/EB5XEv4= +google.golang.org/genproto v0.0.0-20180817151627-c66870c02cf8/go.mod h1:JiN7NxoALGmiZfu7CAH4rXhgtRTLTxftemlI0sWmxmc= +google.golang.org/genproto v0.0.0-20190404172233-64821d5d2107/go.mod h1:VzzqZJRnGkLBvHegQrXjBqPurQTc5/KpmUdxsrq26oE= +google.golang.org/genproto v0.0.0-20190819201941-24fa4b261c55/go.mod h1:DMBHOl98Agz4BDEuKkezgsaosCRResVns1a3J2ZsMNc= +google.golang.org/genproto v0.0.0-20200513103714-09dca8ec2884/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c= +google.golang.org/genproto v0.0.0-20200526211855-cb27e3aa2013/go.mod h1:NbSheEEYHJ7i3ixzK3sjbqSGDJWnxyFXZblF3eUsNvo= +google.golang.org/genproto v0.0.0-20210602131652-f16073e35f0c/go.mod h1:UODoCrxHCcBojKKwX1terBiRUaqAsFqJiF615XL43r0= +google.golang.org/genproto v0.0.0-20211208223120-3a66f561d7aa h1:I0YcKz0I7OAhddo7ya8kMnvprhcWM045PmkBdMO9zN0= +google.golang.org/genproto v0.0.0-20211208223120-3a66f561d7aa/go.mod h1:5CzLGKJ67TSI2B9POpiiyGha0AjJvZIUgRMt1dSmuhc= +google.golang.org/grpc v1.14.0/go.mod h1:yo6s7OP7yaDglbqo1J04qKzAhqBH6lvTonzMVmEdcZw= +google.golang.org/grpc v1.19.0/go.mod h1:mqu4LbDTu4XGKhr4mRzUsmM4RtVoemTSY81AxZiDr8c= +google.golang.org/grpc v1.22.0/go.mod h1:Y5yQAOtifL1yxbo5wqy6BxZv8vAUGQwXBOALyacEbxg= +google.golang.org/grpc v1.23.0/go.mod h1:Y5yQAOtifL1yxbo5wqy6BxZv8vAUGQwXBOALyacEbxg= +google.golang.org/grpc v1.25.1/go.mod h1:c3i+UQWmh7LiEpx4sFZnkU36qjEYZ0imhYfXVyQciAY= +google.golang.org/grpc v1.27.0/go.mod h1:qbnxyOmOxrQa7FizSgH+ReBfzJrCY1pSN7KXBS8abTk= +google.golang.org/grpc v1.33.1/go.mod h1:fr5YgcSWrqhRRxogOsw7RzIpsmvOZ6IcH4kBYTpR3n0= +google.golang.org/grpc v1.36.0/go.mod h1:qjiiYl8FncCW8feJPdyg3v6XW24KsRHe+dy9BAGRRjU= +google.golang.org/grpc v1.38.0/go.mod h1:NREThFqKR1f3iQ6oBuvc5LadQuXVGo9rkm5ZGrQdJfM= +google.golang.org/grpc v1.40.0/go.mod h1:ogyxbiOoUXAkP+4+xa6PZSE9DZgIHtSpzjDTB9KAK34= +google.golang.org/grpc v1.51.0 h1:E1eGv1FTqoLIdnBCZufiSHgKjlqG6fKFf6pPWtMTh8U= +google.golang.org/grpc v1.51.0/go.mod h1:wgNDFcnuBGmxLKI/qn4T+m5BtEBYXJPvibbUPsAIPww= +google.golang.org/protobuf v0.0.0-20200109180630-ec00e32a8dfd/go.mod h1:DFci5gLYBciE7Vtevhsrf46CRTquxDuWsQurQQe4oz8= +google.golang.org/protobuf v0.0.0-20200221191635-4d8936d0db64/go.mod h1:kwYJMbMJ01Woi6D6+Kah6886xMZcty6N08ah7+eCXa0= +google.golang.org/protobuf v0.0.0-20200228230310-ab0ca4ff8a60/go.mod h1:cfTl7dwQJ+fmap5saPgwCLgHXTUD7jkjRqWcaiX5VyM= +google.golang.org/protobuf v1.20.1-0.20200309200217-e05f789c0967/go.mod h1:A+miEFZTKqfCUM6K7xSMQL9OKL/b6hQv+e19PK+JZNE= +google.golang.org/protobuf v1.21.0/go.mod h1:47Nbq4nVaFHyn7ilMalzfO3qCViNmqZ2kzikPIcrTAo= +google.golang.org/protobuf v1.22.0/go.mod h1:EGpADcykh3NcUnDUJcl1+ZksZNG86OlYog2l/sGQquU= +google.golang.org/protobuf v1.23.0/go.mod h1:EGpADcykh3NcUnDUJcl1+ZksZNG86OlYog2l/sGQquU= +google.golang.org/protobuf v1.23.1-0.20200526195155-81db48ad09cc/go.mod h1:EGpADcykh3NcUnDUJcl1+ZksZNG86OlYog2l/sGQquU= +google.golang.org/protobuf v1.25.0/go.mod h1:9JNX74DMeImyA3h4bdi1ymwjUzf21/xIlbajtzgsN7c= +google.golang.org/protobuf v1.26.0-rc.1/go.mod h1:jlhhOSvTdKEhbULTjvd4ARK9grFBp09yW+WbY/TyQbw= +google.golang.org/protobuf v1.26.0/go.mod h1:9q0QmTI4eRPtz6boOQmLYwt+qCgq0jsYwAQnmE0givc= +google.golang.org/protobuf v1.27.1/go.mod h1:9q0QmTI4eRPtz6boOQmLYwt+qCgq0jsYwAQnmE0givc= +google.golang.org/protobuf v1.28.1 h1:d0NfwRgPtno5B1Wa6L2DAG+KivqkdutMf1UhdNx175w= +google.golang.org/protobuf v1.28.1/go.mod h1:HV8QOd/L58Z+nl8r43ehVNZIU/HEI6OcFqwMG9pJV4I= +gopkg.in/alecthomas/kingpin.v2 v2.2.6/go.mod h1:FMv+mEhP44yOT+4EoQTLFTRgOQ1FBLkstjWtayDeSgw= +gopkg.in/asn1-ber.v1 v1.0.0-20181015200546-f715ec2f112d/go.mod h1:cuepJuh7vyXfUyUwEgHQXw849cJrilpS5NeIjOWESAw= +gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= +gopkg.in/check.v1 v1.0.0-20190902080502-41f04d3bba15 h1:YR8cESwS4TdDjEe65xsg0ogRM/Nc3DYOhEAlW+xobZo= +gopkg.in/check.v1 v1.0.0-20190902080502-41f04d3bba15/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= +gopkg.in/square/go-jose.v2 v2.3.1/go.mod h1:M9dMgbHiYLoDGQrXy7OpJDJWiKiU//h+vD76mk0e1AI= +gopkg.in/yaml.v2 v2.2.1/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= +gopkg.in/yaml.v2 v2.2.2/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= +gopkg.in/yaml.v2 v2.2.3/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= +gopkg.in/yaml.v2 v2.2.4/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= +gopkg.in/yaml.v2 v2.2.5/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= +gopkg.in/yaml.v2 v2.2.8/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= +gopkg.in/yaml.v2 v2.3.0/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= +gopkg.in/yaml.v2 v2.4.0/go.mod h1:RDklbk79AGWmwhnvt/jBztapEOGDOx6ZbXqjP6csGnQ= +gopkg.in/yaml.v3 v3.0.0-20200313102051-9f266ea9e77c/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM= +gopkg.in/yaml.v3 v3.0.0-20210107192922-496545a6307b/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM= +gopkg.in/yaml.v3 v3.0.1 h1:fxVm/GzAzEWqLHuvctI91KS9hhNmmWOoWu0XTYJS7CA= +gopkg.in/yaml.v3 v3.0.1/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM= +honnef.co/go/tools v0.0.0-20190102054323-c2f93a96b099/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4= +honnef.co/go/tools v0.0.0-20190523083050-ea95bdfd59fc/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4= +sigs.k8s.io/yaml v1.2.0/go.mod h1:yfXDCHCao9+ENCvLSE62v9VSji2MKu5jeNfTrofGhJc= diff --git a/receiver/snowflakereceiver/internal/metadata/generated_metrics.go b/receiver/snowflakereceiver/internal/metadata/generated_metrics.go new file mode 100644 index 000000000000..112e5308f6db --- /dev/null +++ b/receiver/snowflakereceiver/internal/metadata/generated_metrics.go @@ -0,0 +1,2831 @@ +// Code generated by mdatagen. DO NOT EDIT. + +package metadata + +import ( + "time" + + "go.opentelemetry.io/collector/component" + "go.opentelemetry.io/collector/confmap" + "go.opentelemetry.io/collector/pdata/pcommon" + "go.opentelemetry.io/collector/pdata/pmetric" +) + +// MetricSettings provides common settings for a particular metric. +type MetricSettings struct { + Enabled bool `mapstructure:"enabled"` + + enabledProvidedByUser bool +} + +// IsEnabledProvidedByUser returns true if `enabled` option is explicitly set in user settings to any value. +func (ms *MetricSettings) IsEnabledProvidedByUser() bool { + return ms.enabledProvidedByUser +} + +func (ms *MetricSettings) Unmarshal(parser *confmap.Conf) error { + if parser == nil { + return nil + } + err := parser.Unmarshal(ms, confmap.WithErrorUnused()) + if err != nil { + return err + } + ms.enabledProvidedByUser = parser.IsSet("enabled") + return nil +} + +// MetricsSettings provides settings for snowflakereceiver metrics. +type MetricsSettings struct { + SnowflakeBillingCloudServiceTotal MetricSettings `mapstructure:"snowflake.billing.cloud_service.total"` + SnowflakeBillingTotalCreditTotal MetricSettings `mapstructure:"snowflake.billing.total_credit.total"` + SnowflakeBillingVirtualWarehouseTotal MetricSettings `mapstructure:"snowflake.billing.virtual_warehouse.total"` + SnowflakeBillingWarehouseCloudServiceTotal MetricSettings `mapstructure:"snowflake.billing.warehouse.cloud_service.total"` + SnowflakeBillingWarehouseTotalCreditTotal MetricSettings `mapstructure:"snowflake.billing.warehouse.total_credit.total"` + SnowflakeBillingWarehouseVirtualWarehouseTotal MetricSettings `mapstructure:"snowflake.billing.warehouse.virtual_warehouse.total"` + SnowflakeDatabaseBytesScannedAvg MetricSettings `mapstructure:"snowflake.database.bytes_scanned.avg"` + SnowflakeDatabaseQueryCount MetricSettings `mapstructure:"snowflake.database.query.count"` + SnowflakeLoginsTotal MetricSettings `mapstructure:"snowflake.logins.total"` + SnowflakePipeCreditsUsedTotal MetricSettings `mapstructure:"snowflake.pipe.credits_used.total"` + SnowflakeQueryBlocked MetricSettings `mapstructure:"snowflake.query.blocked"` + SnowflakeQueryBytesDeletedTotal MetricSettings `mapstructure:"snowflake.query.bytes_deleted.total"` + SnowflakeQueryBytesScannedTotal MetricSettings `mapstructure:"snowflake.query.bytes_scanned.total"` + SnowflakeQueryBytesSpilledLocalTotal MetricSettings `mapstructure:"snowflake.query.bytes_spilled.local.total"` + SnowflakeQueryBytesSpilledRemoteTotal MetricSettings `mapstructure:"snowflake.query.bytes_spilled.remote.total"` + SnowflakeQueryBytesWrittenTotal MetricSettings `mapstructure:"snowflake.query.bytes_written.total"` + SnowflakeQueryCompilationTimeTotal MetricSettings `mapstructure:"snowflake.query.compilation_time.total"` + SnowflakeQueryDataScannedCacheAvg MetricSettings `mapstructure:"snowflake.query.data_scanned_cache.avg"` + SnowflakeQueryExecuted MetricSettings `mapstructure:"snowflake.query.executed"` + SnowflakeQueryExecutionTimeTotal MetricSettings `mapstructure:"snowflake.query.execution_time.total"` + SnowflakeQueryPartitionsScannedTotal MetricSettings `mapstructure:"snowflake.query.partitions_scanned.total"` + SnowflakeQueryQueuedOverload MetricSettings `mapstructure:"snowflake.query.queued_overload"` + SnowflakeQueryQueuedProvision MetricSettings `mapstructure:"snowflake.query.queued_provision"` + SnowflakeQueuedOverloadTimeAvg MetricSettings `mapstructure:"snowflake.queued_overload_time.avg"` + SnowflakeQueuedOverloadTimeTotal MetricSettings `mapstructure:"snowflake.queued_overload_time.total"` + SnowflakeQueuedProvisioningTimeAvg MetricSettings `mapstructure:"snowflake.queued_provisioning_time.avg"` + SnowflakeQueuedProvisioningTimeTotal MetricSettings `mapstructure:"snowflake.queued_provisioning_time.total"` + SnowflakeQueuedRepairTimeAvg MetricSettings `mapstructure:"snowflake.queued_repair_time.avg"` + SnowflakeQueuedRepairTimeTotal MetricSettings `mapstructure:"snowflake.queued_repair_time.total"` + SnowflakeRowsDeletedTotal MetricSettings `mapstructure:"snowflake.rows_deleted.total"` + SnowflakeRowsInsertedTotal MetricSettings `mapstructure:"snowflake.rows_inserted.total"` + SnowflakeRowsProducedTotal MetricSettings `mapstructure:"snowflake.rows_produced.total"` + SnowflakeRowsUnloadedTotal MetricSettings `mapstructure:"snowflake.rows_unloaded.total"` + SnowflakeRowsUpdatedTotal MetricSettings `mapstructure:"snowflake.rows_updated.total"` + SnowflakeSessionIDCount MetricSettings `mapstructure:"snowflake.session_id.count"` + SnowflakeStorageFailsafeBytesTotal MetricSettings `mapstructure:"snowflake.storage.failsafe_bytes.total"` + SnowflakeStorageStageBytesTotal MetricSettings `mapstructure:"snowflake.storage.stage_bytes.total"` + SnowflakeStorageStorageBytesTotal MetricSettings `mapstructure:"snowflake.storage.storage_bytes.total"` + SnowflakeTotalElapsedTimeAvg MetricSettings `mapstructure:"snowflake.total_elapsed_time.avg"` + SnowflakeTotalElapsedTimeTotal MetricSettings `mapstructure:"snowflake.total_elapsed_time.total"` +} + +func DefaultMetricsSettings() MetricsSettings { + return MetricsSettings{ + SnowflakeBillingCloudServiceTotal: MetricSettings{ + Enabled: false, + }, + SnowflakeBillingTotalCreditTotal: MetricSettings{ + Enabled: false, + }, + SnowflakeBillingVirtualWarehouseTotal: MetricSettings{ + Enabled: false, + }, + SnowflakeBillingWarehouseCloudServiceTotal: MetricSettings{ + Enabled: false, + }, + SnowflakeBillingWarehouseTotalCreditTotal: MetricSettings{ + Enabled: false, + }, + SnowflakeBillingWarehouseVirtualWarehouseTotal: MetricSettings{ + Enabled: false, + }, + SnowflakeDatabaseBytesScannedAvg: MetricSettings{ + Enabled: true, + }, + SnowflakeDatabaseQueryCount: MetricSettings{ + Enabled: true, + }, + SnowflakeLoginsTotal: MetricSettings{ + Enabled: false, + }, + SnowflakePipeCreditsUsedTotal: MetricSettings{ + Enabled: false, + }, + SnowflakeQueryBlocked: MetricSettings{ + Enabled: true, + }, + SnowflakeQueryBytesDeletedTotal: MetricSettings{ + Enabled: true, + }, + SnowflakeQueryBytesScannedTotal: MetricSettings{ + Enabled: false, + }, + SnowflakeQueryBytesSpilledLocalTotal: MetricSettings{ + Enabled: false, + }, + SnowflakeQueryBytesSpilledRemoteTotal: MetricSettings{ + Enabled: false, + }, + SnowflakeQueryBytesWrittenTotal: MetricSettings{ + Enabled: true, + }, + SnowflakeQueryCompilationTimeTotal: MetricSettings{ + Enabled: true, + }, + SnowflakeQueryDataScannedCacheAvg: MetricSettings{ + Enabled: false, + }, + SnowflakeQueryExecuted: MetricSettings{ + Enabled: true, + }, + SnowflakeQueryExecutionTimeTotal: MetricSettings{ + Enabled: true, + }, + SnowflakeQueryPartitionsScannedTotal: MetricSettings{ + Enabled: false, + }, + SnowflakeQueryQueuedOverload: MetricSettings{ + Enabled: true, + }, + SnowflakeQueryQueuedProvision: MetricSettings{ + Enabled: true, + }, + SnowflakeQueuedOverloadTimeAvg: MetricSettings{ + Enabled: true, + }, + SnowflakeQueuedOverloadTimeTotal: MetricSettings{ + Enabled: false, + }, + SnowflakeQueuedProvisioningTimeAvg: MetricSettings{ + Enabled: true, + }, + SnowflakeQueuedProvisioningTimeTotal: MetricSettings{ + Enabled: false, + }, + SnowflakeQueuedRepairTimeAvg: MetricSettings{ + Enabled: true, + }, + SnowflakeQueuedRepairTimeTotal: MetricSettings{ + Enabled: false, + }, + SnowflakeRowsDeletedTotal: MetricSettings{ + Enabled: false, + }, + SnowflakeRowsInsertedTotal: MetricSettings{ + Enabled: false, + }, + SnowflakeRowsProducedTotal: MetricSettings{ + Enabled: false, + }, + SnowflakeRowsUnloadedTotal: MetricSettings{ + Enabled: false, + }, + SnowflakeRowsUpdatedTotal: MetricSettings{ + Enabled: false, + }, + SnowflakeSessionIDCount: MetricSettings{ + Enabled: false, + }, + SnowflakeStorageFailsafeBytesTotal: MetricSettings{ + Enabled: false, + }, + SnowflakeStorageStageBytesTotal: MetricSettings{ + Enabled: true, + }, + SnowflakeStorageStorageBytesTotal: MetricSettings{ + Enabled: true, + }, + SnowflakeTotalElapsedTimeAvg: MetricSettings{ + Enabled: true, + }, + SnowflakeTotalElapsedTimeTotal: MetricSettings{ + Enabled: false, + }, + } +} + +type metricSnowflakeBillingCloudServiceTotal struct { + data pmetric.Metric // data buffer for generated metric. + settings MetricSettings // metric settings provided by user. + capacity int // max observed number of data points added to the metric. +} + +// init fills snowflake.billing.cloud_service.total metric with initial data. +func (m *metricSnowflakeBillingCloudServiceTotal) init() { + m.data.SetName("snowflake.billing.cloud_service.total") + m.data.SetDescription("Reported total credits used in the cloud service.") + m.data.SetUnit("{credits}") + m.data.SetEmptyGauge() + m.data.Gauge().DataPoints().EnsureCapacity(m.capacity) +} + +func (m *metricSnowflakeBillingCloudServiceTotal) recordDataPoint(start pcommon.Timestamp, ts pcommon.Timestamp, val float64, serviceTypeAttributeValue string) { + if !m.settings.Enabled { + return + } + dp := m.data.Gauge().DataPoints().AppendEmpty() + dp.SetStartTimestamp(start) + dp.SetTimestamp(ts) + dp.SetDoubleValue(val) + dp.Attributes().PutStr("service_type", serviceTypeAttributeValue) +} + +// updateCapacity saves max length of data point slices that will be used for the slice capacity. +func (m *metricSnowflakeBillingCloudServiceTotal) updateCapacity() { + if m.data.Gauge().DataPoints().Len() > m.capacity { + m.capacity = m.data.Gauge().DataPoints().Len() + } +} + +// emit appends recorded metric data to a metrics slice and prepares it for recording another set of data points. +func (m *metricSnowflakeBillingCloudServiceTotal) emit(metrics pmetric.MetricSlice) { + if m.settings.Enabled && m.data.Gauge().DataPoints().Len() > 0 { + m.updateCapacity() + m.data.MoveTo(metrics.AppendEmpty()) + m.init() + } +} + +func newMetricSnowflakeBillingCloudServiceTotal(settings MetricSettings) metricSnowflakeBillingCloudServiceTotal { + m := metricSnowflakeBillingCloudServiceTotal{settings: settings} + if settings.Enabled { + m.data = pmetric.NewMetric() + m.init() + } + return m +} + +type metricSnowflakeBillingTotalCreditTotal struct { + data pmetric.Metric // data buffer for generated metric. + settings MetricSettings // metric settings provided by user. + capacity int // max observed number of data points added to the metric. +} + +// init fills snowflake.billing.total_credit.total metric with initial data. +func (m *metricSnowflakeBillingTotalCreditTotal) init() { + m.data.SetName("snowflake.billing.total_credit.total") + m.data.SetDescription("Reported total credits used across account.") + m.data.SetUnit("{credits}") + m.data.SetEmptyGauge() + m.data.Gauge().DataPoints().EnsureCapacity(m.capacity) +} + +func (m *metricSnowflakeBillingTotalCreditTotal) recordDataPoint(start pcommon.Timestamp, ts pcommon.Timestamp, val float64, serviceTypeAttributeValue string) { + if !m.settings.Enabled { + return + } + dp := m.data.Gauge().DataPoints().AppendEmpty() + dp.SetStartTimestamp(start) + dp.SetTimestamp(ts) + dp.SetDoubleValue(val) + dp.Attributes().PutStr("service_type", serviceTypeAttributeValue) +} + +// updateCapacity saves max length of data point slices that will be used for the slice capacity. +func (m *metricSnowflakeBillingTotalCreditTotal) updateCapacity() { + if m.data.Gauge().DataPoints().Len() > m.capacity { + m.capacity = m.data.Gauge().DataPoints().Len() + } +} + +// emit appends recorded metric data to a metrics slice and prepares it for recording another set of data points. +func (m *metricSnowflakeBillingTotalCreditTotal) emit(metrics pmetric.MetricSlice) { + if m.settings.Enabled && m.data.Gauge().DataPoints().Len() > 0 { + m.updateCapacity() + m.data.MoveTo(metrics.AppendEmpty()) + m.init() + } +} + +func newMetricSnowflakeBillingTotalCreditTotal(settings MetricSettings) metricSnowflakeBillingTotalCreditTotal { + m := metricSnowflakeBillingTotalCreditTotal{settings: settings} + if settings.Enabled { + m.data = pmetric.NewMetric() + m.init() + } + return m +} + +type metricSnowflakeBillingVirtualWarehouseTotal struct { + data pmetric.Metric // data buffer for generated metric. + settings MetricSettings // metric settings provided by user. + capacity int // max observed number of data points added to the metric. +} + +// init fills snowflake.billing.virtual_warehouse.total metric with initial data. +func (m *metricSnowflakeBillingVirtualWarehouseTotal) init() { + m.data.SetName("snowflake.billing.virtual_warehouse.total") + m.data.SetDescription("Reported total credits used by virtual warehouse service.") + m.data.SetUnit("{credits}") + m.data.SetEmptyGauge() + m.data.Gauge().DataPoints().EnsureCapacity(m.capacity) +} + +func (m *metricSnowflakeBillingVirtualWarehouseTotal) recordDataPoint(start pcommon.Timestamp, ts pcommon.Timestamp, val float64, serviceTypeAttributeValue string) { + if !m.settings.Enabled { + return + } + dp := m.data.Gauge().DataPoints().AppendEmpty() + dp.SetStartTimestamp(start) + dp.SetTimestamp(ts) + dp.SetDoubleValue(val) + dp.Attributes().PutStr("service_type", serviceTypeAttributeValue) +} + +// updateCapacity saves max length of data point slices that will be used for the slice capacity. +func (m *metricSnowflakeBillingVirtualWarehouseTotal) updateCapacity() { + if m.data.Gauge().DataPoints().Len() > m.capacity { + m.capacity = m.data.Gauge().DataPoints().Len() + } +} + +// emit appends recorded metric data to a metrics slice and prepares it for recording another set of data points. +func (m *metricSnowflakeBillingVirtualWarehouseTotal) emit(metrics pmetric.MetricSlice) { + if m.settings.Enabled && m.data.Gauge().DataPoints().Len() > 0 { + m.updateCapacity() + m.data.MoveTo(metrics.AppendEmpty()) + m.init() + } +} + +func newMetricSnowflakeBillingVirtualWarehouseTotal(settings MetricSettings) metricSnowflakeBillingVirtualWarehouseTotal { + m := metricSnowflakeBillingVirtualWarehouseTotal{settings: settings} + if settings.Enabled { + m.data = pmetric.NewMetric() + m.init() + } + return m +} + +type metricSnowflakeBillingWarehouseCloudServiceTotal struct { + data pmetric.Metric // data buffer for generated metric. + settings MetricSettings // metric settings provided by user. + capacity int // max observed number of data points added to the metric. +} + +// init fills snowflake.billing.warehouse.cloud_service.total metric with initial data. +func (m *metricSnowflakeBillingWarehouseCloudServiceTotal) init() { + m.data.SetName("snowflake.billing.warehouse.cloud_service.total") + m.data.SetDescription("Credits used across cloud service for given warehouse.") + m.data.SetUnit("{credits}") + m.data.SetEmptyGauge() + m.data.Gauge().DataPoints().EnsureCapacity(m.capacity) +} + +func (m *metricSnowflakeBillingWarehouseCloudServiceTotal) recordDataPoint(start pcommon.Timestamp, ts pcommon.Timestamp, val float64, warehouseNameAttributeValue string) { + if !m.settings.Enabled { + return + } + dp := m.data.Gauge().DataPoints().AppendEmpty() + dp.SetStartTimestamp(start) + dp.SetTimestamp(ts) + dp.SetDoubleValue(val) + dp.Attributes().PutStr("warehouse_name", warehouseNameAttributeValue) +} + +// updateCapacity saves max length of data point slices that will be used for the slice capacity. +func (m *metricSnowflakeBillingWarehouseCloudServiceTotal) updateCapacity() { + if m.data.Gauge().DataPoints().Len() > m.capacity { + m.capacity = m.data.Gauge().DataPoints().Len() + } +} + +// emit appends recorded metric data to a metrics slice and prepares it for recording another set of data points. +func (m *metricSnowflakeBillingWarehouseCloudServiceTotal) emit(metrics pmetric.MetricSlice) { + if m.settings.Enabled && m.data.Gauge().DataPoints().Len() > 0 { + m.updateCapacity() + m.data.MoveTo(metrics.AppendEmpty()) + m.init() + } +} + +func newMetricSnowflakeBillingWarehouseCloudServiceTotal(settings MetricSettings) metricSnowflakeBillingWarehouseCloudServiceTotal { + m := metricSnowflakeBillingWarehouseCloudServiceTotal{settings: settings} + if settings.Enabled { + m.data = pmetric.NewMetric() + m.init() + } + return m +} + +type metricSnowflakeBillingWarehouseTotalCreditTotal struct { + data pmetric.Metric // data buffer for generated metric. + settings MetricSettings // metric settings provided by user. + capacity int // max observed number of data points added to the metric. +} + +// init fills snowflake.billing.warehouse.total_credit.total metric with initial data. +func (m *metricSnowflakeBillingWarehouseTotalCreditTotal) init() { + m.data.SetName("snowflake.billing.warehouse.total_credit.total") + m.data.SetDescription("Total credits used associated with given warehouse.") + m.data.SetUnit("{credits}") + m.data.SetEmptyGauge() + m.data.Gauge().DataPoints().EnsureCapacity(m.capacity) +} + +func (m *metricSnowflakeBillingWarehouseTotalCreditTotal) recordDataPoint(start pcommon.Timestamp, ts pcommon.Timestamp, val float64, warehouseNameAttributeValue string) { + if !m.settings.Enabled { + return + } + dp := m.data.Gauge().DataPoints().AppendEmpty() + dp.SetStartTimestamp(start) + dp.SetTimestamp(ts) + dp.SetDoubleValue(val) + dp.Attributes().PutStr("warehouse_name", warehouseNameAttributeValue) +} + +// updateCapacity saves max length of data point slices that will be used for the slice capacity. +func (m *metricSnowflakeBillingWarehouseTotalCreditTotal) updateCapacity() { + if m.data.Gauge().DataPoints().Len() > m.capacity { + m.capacity = m.data.Gauge().DataPoints().Len() + } +} + +// emit appends recorded metric data to a metrics slice and prepares it for recording another set of data points. +func (m *metricSnowflakeBillingWarehouseTotalCreditTotal) emit(metrics pmetric.MetricSlice) { + if m.settings.Enabled && m.data.Gauge().DataPoints().Len() > 0 { + m.updateCapacity() + m.data.MoveTo(metrics.AppendEmpty()) + m.init() + } +} + +func newMetricSnowflakeBillingWarehouseTotalCreditTotal(settings MetricSettings) metricSnowflakeBillingWarehouseTotalCreditTotal { + m := metricSnowflakeBillingWarehouseTotalCreditTotal{settings: settings} + if settings.Enabled { + m.data = pmetric.NewMetric() + m.init() + } + return m +} + +type metricSnowflakeBillingWarehouseVirtualWarehouseTotal struct { + data pmetric.Metric // data buffer for generated metric. + settings MetricSettings // metric settings provided by user. + capacity int // max observed number of data points added to the metric. +} + +// init fills snowflake.billing.warehouse.virtual_warehouse.total metric with initial data. +func (m *metricSnowflakeBillingWarehouseVirtualWarehouseTotal) init() { + m.data.SetName("snowflake.billing.warehouse.virtual_warehouse.total") + m.data.SetDescription("Total credits used by virtual warehouse service for given warehouse.") + m.data.SetUnit("{credits}") + m.data.SetEmptyGauge() + m.data.Gauge().DataPoints().EnsureCapacity(m.capacity) +} + +func (m *metricSnowflakeBillingWarehouseVirtualWarehouseTotal) recordDataPoint(start pcommon.Timestamp, ts pcommon.Timestamp, val float64, warehouseNameAttributeValue string) { + if !m.settings.Enabled { + return + } + dp := m.data.Gauge().DataPoints().AppendEmpty() + dp.SetStartTimestamp(start) + dp.SetTimestamp(ts) + dp.SetDoubleValue(val) + dp.Attributes().PutStr("warehouse_name", warehouseNameAttributeValue) +} + +// updateCapacity saves max length of data point slices that will be used for the slice capacity. +func (m *metricSnowflakeBillingWarehouseVirtualWarehouseTotal) updateCapacity() { + if m.data.Gauge().DataPoints().Len() > m.capacity { + m.capacity = m.data.Gauge().DataPoints().Len() + } +} + +// emit appends recorded metric data to a metrics slice and prepares it for recording another set of data points. +func (m *metricSnowflakeBillingWarehouseVirtualWarehouseTotal) emit(metrics pmetric.MetricSlice) { + if m.settings.Enabled && m.data.Gauge().DataPoints().Len() > 0 { + m.updateCapacity() + m.data.MoveTo(metrics.AppendEmpty()) + m.init() + } +} + +func newMetricSnowflakeBillingWarehouseVirtualWarehouseTotal(settings MetricSettings) metricSnowflakeBillingWarehouseVirtualWarehouseTotal { + m := metricSnowflakeBillingWarehouseVirtualWarehouseTotal{settings: settings} + if settings.Enabled { + m.data = pmetric.NewMetric() + m.init() + } + return m +} + +type metricSnowflakeDatabaseBytesScannedAvg struct { + data pmetric.Metric // data buffer for generated metric. + settings MetricSettings // metric settings provided by user. + capacity int // max observed number of data points added to the metric. +} + +// init fills snowflake.database.bytes_scanned.avg metric with initial data. +func (m *metricSnowflakeDatabaseBytesScannedAvg) init() { + m.data.SetName("snowflake.database.bytes_scanned.avg") + m.data.SetDescription("Average bytes scanned in a database.") + m.data.SetUnit("By") + m.data.SetEmptyGauge() + m.data.Gauge().DataPoints().EnsureCapacity(m.capacity) +} + +func (m *metricSnowflakeDatabaseBytesScannedAvg) recordDataPoint(start pcommon.Timestamp, ts pcommon.Timestamp, val float64, schemaNameAttributeValue string, executionStatusAttributeValue string, errorMessageAttributeValue string, queryTypeAttributeValue string, warehouseNameAttributeValue string, databaseNameAttributeValue string, warehouseSizeAttributeValue string) { + if !m.settings.Enabled { + return + } + dp := m.data.Gauge().DataPoints().AppendEmpty() + dp.SetStartTimestamp(start) + dp.SetTimestamp(ts) + dp.SetDoubleValue(val) + dp.Attributes().PutStr("schema_name", schemaNameAttributeValue) + dp.Attributes().PutStr("execution_status", executionStatusAttributeValue) + dp.Attributes().PutStr("error_message", errorMessageAttributeValue) + dp.Attributes().PutStr("query_type", queryTypeAttributeValue) + dp.Attributes().PutStr("warehouse_name", warehouseNameAttributeValue) + dp.Attributes().PutStr("database_name", databaseNameAttributeValue) + dp.Attributes().PutStr("warehouse_size", warehouseSizeAttributeValue) +} + +// updateCapacity saves max length of data point slices that will be used for the slice capacity. +func (m *metricSnowflakeDatabaseBytesScannedAvg) updateCapacity() { + if m.data.Gauge().DataPoints().Len() > m.capacity { + m.capacity = m.data.Gauge().DataPoints().Len() + } +} + +// emit appends recorded metric data to a metrics slice and prepares it for recording another set of data points. +func (m *metricSnowflakeDatabaseBytesScannedAvg) emit(metrics pmetric.MetricSlice) { + if m.settings.Enabled && m.data.Gauge().DataPoints().Len() > 0 { + m.updateCapacity() + m.data.MoveTo(metrics.AppendEmpty()) + m.init() + } +} + +func newMetricSnowflakeDatabaseBytesScannedAvg(settings MetricSettings) metricSnowflakeDatabaseBytesScannedAvg { + m := metricSnowflakeDatabaseBytesScannedAvg{settings: settings} + if settings.Enabled { + m.data = pmetric.NewMetric() + m.init() + } + return m +} + +type metricSnowflakeDatabaseQueryCount struct { + data pmetric.Metric // data buffer for generated metric. + settings MetricSettings // metric settings provided by user. + capacity int // max observed number of data points added to the metric. +} + +// init fills snowflake.database.query.count metric with initial data. +func (m *metricSnowflakeDatabaseQueryCount) init() { + m.data.SetName("snowflake.database.query.count") + m.data.SetDescription("Total query count for database.") + m.data.SetUnit("1") + m.data.SetEmptyGauge() + m.data.Gauge().DataPoints().EnsureCapacity(m.capacity) +} + +func (m *metricSnowflakeDatabaseQueryCount) recordDataPoint(start pcommon.Timestamp, ts pcommon.Timestamp, val int64, schemaNameAttributeValue string, executionStatusAttributeValue string, errorMessageAttributeValue string, queryTypeAttributeValue string, warehouseNameAttributeValue string, databaseNameAttributeValue string, warehouseSizeAttributeValue string) { + if !m.settings.Enabled { + return + } + dp := m.data.Gauge().DataPoints().AppendEmpty() + dp.SetStartTimestamp(start) + dp.SetTimestamp(ts) + dp.SetIntValue(val) + dp.Attributes().PutStr("schema_name", schemaNameAttributeValue) + dp.Attributes().PutStr("execution_status", executionStatusAttributeValue) + dp.Attributes().PutStr("error_message", errorMessageAttributeValue) + dp.Attributes().PutStr("query_type", queryTypeAttributeValue) + dp.Attributes().PutStr("warehouse_name", warehouseNameAttributeValue) + dp.Attributes().PutStr("database_name", databaseNameAttributeValue) + dp.Attributes().PutStr("warehouse_size", warehouseSizeAttributeValue) +} + +// updateCapacity saves max length of data point slices that will be used for the slice capacity. +func (m *metricSnowflakeDatabaseQueryCount) updateCapacity() { + if m.data.Gauge().DataPoints().Len() > m.capacity { + m.capacity = m.data.Gauge().DataPoints().Len() + } +} + +// emit appends recorded metric data to a metrics slice and prepares it for recording another set of data points. +func (m *metricSnowflakeDatabaseQueryCount) emit(metrics pmetric.MetricSlice) { + if m.settings.Enabled && m.data.Gauge().DataPoints().Len() > 0 { + m.updateCapacity() + m.data.MoveTo(metrics.AppendEmpty()) + m.init() + } +} + +func newMetricSnowflakeDatabaseQueryCount(settings MetricSettings) metricSnowflakeDatabaseQueryCount { + m := metricSnowflakeDatabaseQueryCount{settings: settings} + if settings.Enabled { + m.data = pmetric.NewMetric() + m.init() + } + return m +} + +type metricSnowflakeLoginsTotal struct { + data pmetric.Metric // data buffer for generated metric. + settings MetricSettings // metric settings provided by user. + capacity int // max observed number of data points added to the metric. +} + +// init fills snowflake.logins.total metric with initial data. +func (m *metricSnowflakeLoginsTotal) init() { + m.data.SetName("snowflake.logins.total") + m.data.SetDescription("Total login attempts for account.") + m.data.SetUnit("1") + m.data.SetEmptyGauge() + m.data.Gauge().DataPoints().EnsureCapacity(m.capacity) +} + +func (m *metricSnowflakeLoginsTotal) recordDataPoint(start pcommon.Timestamp, ts pcommon.Timestamp, val int64, errorMessageAttributeValue string, reportedClientTypeAttributeValue string, isSuccessAttributeValue string) { + if !m.settings.Enabled { + return + } + dp := m.data.Gauge().DataPoints().AppendEmpty() + dp.SetStartTimestamp(start) + dp.SetTimestamp(ts) + dp.SetIntValue(val) + dp.Attributes().PutStr("error_message", errorMessageAttributeValue) + dp.Attributes().PutStr("reported_client_type", reportedClientTypeAttributeValue) + dp.Attributes().PutStr("is_success", isSuccessAttributeValue) +} + +// updateCapacity saves max length of data point slices that will be used for the slice capacity. +func (m *metricSnowflakeLoginsTotal) updateCapacity() { + if m.data.Gauge().DataPoints().Len() > m.capacity { + m.capacity = m.data.Gauge().DataPoints().Len() + } +} + +// emit appends recorded metric data to a metrics slice and prepares it for recording another set of data points. +func (m *metricSnowflakeLoginsTotal) emit(metrics pmetric.MetricSlice) { + if m.settings.Enabled && m.data.Gauge().DataPoints().Len() > 0 { + m.updateCapacity() + m.data.MoveTo(metrics.AppendEmpty()) + m.init() + } +} + +func newMetricSnowflakeLoginsTotal(settings MetricSettings) metricSnowflakeLoginsTotal { + m := metricSnowflakeLoginsTotal{settings: settings} + if settings.Enabled { + m.data = pmetric.NewMetric() + m.init() + } + return m +} + +type metricSnowflakePipeCreditsUsedTotal struct { + data pmetric.Metric // data buffer for generated metric. + settings MetricSettings // metric settings provided by user. + capacity int // max observed number of data points added to the metric. +} + +// init fills snowflake.pipe.credits_used.total metric with initial data. +func (m *metricSnowflakePipeCreditsUsedTotal) init() { + m.data.SetName("snowflake.pipe.credits_used.total") + m.data.SetDescription("Snow pipe credits contotaled.") + m.data.SetUnit("{credits}") + m.data.SetEmptyGauge() + m.data.Gauge().DataPoints().EnsureCapacity(m.capacity) +} + +func (m *metricSnowflakePipeCreditsUsedTotal) recordDataPoint(start pcommon.Timestamp, ts pcommon.Timestamp, val float64, pipeNameAttributeValue string) { + if !m.settings.Enabled { + return + } + dp := m.data.Gauge().DataPoints().AppendEmpty() + dp.SetStartTimestamp(start) + dp.SetTimestamp(ts) + dp.SetDoubleValue(val) + dp.Attributes().PutStr("pipe_name", pipeNameAttributeValue) +} + +// updateCapacity saves max length of data point slices that will be used for the slice capacity. +func (m *metricSnowflakePipeCreditsUsedTotal) updateCapacity() { + if m.data.Gauge().DataPoints().Len() > m.capacity { + m.capacity = m.data.Gauge().DataPoints().Len() + } +} + +// emit appends recorded metric data to a metrics slice and prepares it for recording another set of data points. +func (m *metricSnowflakePipeCreditsUsedTotal) emit(metrics pmetric.MetricSlice) { + if m.settings.Enabled && m.data.Gauge().DataPoints().Len() > 0 { + m.updateCapacity() + m.data.MoveTo(metrics.AppendEmpty()) + m.init() + } +} + +func newMetricSnowflakePipeCreditsUsedTotal(settings MetricSettings) metricSnowflakePipeCreditsUsedTotal { + m := metricSnowflakePipeCreditsUsedTotal{settings: settings} + if settings.Enabled { + m.data = pmetric.NewMetric() + m.init() + } + return m +} + +type metricSnowflakeQueryBlocked struct { + data pmetric.Metric // data buffer for generated metric. + settings MetricSettings // metric settings provided by user. + capacity int // max observed number of data points added to the metric. +} + +// init fills snowflake.query.blocked metric with initial data. +func (m *metricSnowflakeQueryBlocked) init() { + m.data.SetName("snowflake.query.blocked") + m.data.SetDescription("Blocked query count for warehouse.") + m.data.SetUnit("1") + m.data.SetEmptyGauge() + m.data.Gauge().DataPoints().EnsureCapacity(m.capacity) +} + +func (m *metricSnowflakeQueryBlocked) recordDataPoint(start pcommon.Timestamp, ts pcommon.Timestamp, val float64, warehouseNameAttributeValue string) { + if !m.settings.Enabled { + return + } + dp := m.data.Gauge().DataPoints().AppendEmpty() + dp.SetStartTimestamp(start) + dp.SetTimestamp(ts) + dp.SetDoubleValue(val) + dp.Attributes().PutStr("warehouse_name", warehouseNameAttributeValue) +} + +// updateCapacity saves max length of data point slices that will be used for the slice capacity. +func (m *metricSnowflakeQueryBlocked) updateCapacity() { + if m.data.Gauge().DataPoints().Len() > m.capacity { + m.capacity = m.data.Gauge().DataPoints().Len() + } +} + +// emit appends recorded metric data to a metrics slice and prepares it for recording another set of data points. +func (m *metricSnowflakeQueryBlocked) emit(metrics pmetric.MetricSlice) { + if m.settings.Enabled && m.data.Gauge().DataPoints().Len() > 0 { + m.updateCapacity() + m.data.MoveTo(metrics.AppendEmpty()) + m.init() + } +} + +func newMetricSnowflakeQueryBlocked(settings MetricSettings) metricSnowflakeQueryBlocked { + m := metricSnowflakeQueryBlocked{settings: settings} + if settings.Enabled { + m.data = pmetric.NewMetric() + m.init() + } + return m +} + +type metricSnowflakeQueryBytesDeletedTotal struct { + data pmetric.Metric // data buffer for generated metric. + settings MetricSettings // metric settings provided by user. + capacity int // max observed number of data points added to the metric. +} + +// init fills snowflake.query.bytes_deleted.total metric with initial data. +func (m *metricSnowflakeQueryBytesDeletedTotal) init() { + m.data.SetName("snowflake.query.bytes_deleted.total") + m.data.SetDescription("Total bytes deleted in database.") + m.data.SetUnit("By") + m.data.SetEmptyGauge() + m.data.Gauge().DataPoints().EnsureCapacity(m.capacity) +} + +func (m *metricSnowflakeQueryBytesDeletedTotal) recordDataPoint(start pcommon.Timestamp, ts pcommon.Timestamp, val int64, schemaNameAttributeValue string, executionStatusAttributeValue string, errorMessageAttributeValue string, queryTypeAttributeValue string, warehouseNameAttributeValue string, databaseNameAttributeValue string, warehouseSizeAttributeValue string) { + if !m.settings.Enabled { + return + } + dp := m.data.Gauge().DataPoints().AppendEmpty() + dp.SetStartTimestamp(start) + dp.SetTimestamp(ts) + dp.SetIntValue(val) + dp.Attributes().PutStr("schema_name", schemaNameAttributeValue) + dp.Attributes().PutStr("execution_status", executionStatusAttributeValue) + dp.Attributes().PutStr("error_message", errorMessageAttributeValue) + dp.Attributes().PutStr("query_type", queryTypeAttributeValue) + dp.Attributes().PutStr("warehouse_name", warehouseNameAttributeValue) + dp.Attributes().PutStr("database_name", databaseNameAttributeValue) + dp.Attributes().PutStr("warehouse_size", warehouseSizeAttributeValue) +} + +// updateCapacity saves max length of data point slices that will be used for the slice capacity. +func (m *metricSnowflakeQueryBytesDeletedTotal) updateCapacity() { + if m.data.Gauge().DataPoints().Len() > m.capacity { + m.capacity = m.data.Gauge().DataPoints().Len() + } +} + +// emit appends recorded metric data to a metrics slice and prepares it for recording another set of data points. +func (m *metricSnowflakeQueryBytesDeletedTotal) emit(metrics pmetric.MetricSlice) { + if m.settings.Enabled && m.data.Gauge().DataPoints().Len() > 0 { + m.updateCapacity() + m.data.MoveTo(metrics.AppendEmpty()) + m.init() + } +} + +func newMetricSnowflakeQueryBytesDeletedTotal(settings MetricSettings) metricSnowflakeQueryBytesDeletedTotal { + m := metricSnowflakeQueryBytesDeletedTotal{settings: settings} + if settings.Enabled { + m.data = pmetric.NewMetric() + m.init() + } + return m +} + +type metricSnowflakeQueryBytesScannedTotal struct { + data pmetric.Metric // data buffer for generated metric. + settings MetricSettings // metric settings provided by user. + capacity int // max observed number of data points added to the metric. +} + +// init fills snowflake.query.bytes_scanned.total metric with initial data. +func (m *metricSnowflakeQueryBytesScannedTotal) init() { + m.data.SetName("snowflake.query.bytes_scanned.total") + m.data.SetDescription("Total bytes scanend in database.") + m.data.SetUnit("By") + m.data.SetEmptyGauge() + m.data.Gauge().DataPoints().EnsureCapacity(m.capacity) +} + +func (m *metricSnowflakeQueryBytesScannedTotal) recordDataPoint(start pcommon.Timestamp, ts pcommon.Timestamp, val int64, schemaNameAttributeValue string, executionStatusAttributeValue string, errorMessageAttributeValue string, queryTypeAttributeValue string, warehouseNameAttributeValue string, databaseNameAttributeValue string, warehouseSizeAttributeValue string) { + if !m.settings.Enabled { + return + } + dp := m.data.Gauge().DataPoints().AppendEmpty() + dp.SetStartTimestamp(start) + dp.SetTimestamp(ts) + dp.SetIntValue(val) + dp.Attributes().PutStr("schema_name", schemaNameAttributeValue) + dp.Attributes().PutStr("execution_status", executionStatusAttributeValue) + dp.Attributes().PutStr("error_message", errorMessageAttributeValue) + dp.Attributes().PutStr("query_type", queryTypeAttributeValue) + dp.Attributes().PutStr("warehouse_name", warehouseNameAttributeValue) + dp.Attributes().PutStr("database_name", databaseNameAttributeValue) + dp.Attributes().PutStr("warehouse_size", warehouseSizeAttributeValue) +} + +// updateCapacity saves max length of data point slices that will be used for the slice capacity. +func (m *metricSnowflakeQueryBytesScannedTotal) updateCapacity() { + if m.data.Gauge().DataPoints().Len() > m.capacity { + m.capacity = m.data.Gauge().DataPoints().Len() + } +} + +// emit appends recorded metric data to a metrics slice and prepares it for recording another set of data points. +func (m *metricSnowflakeQueryBytesScannedTotal) emit(metrics pmetric.MetricSlice) { + if m.settings.Enabled && m.data.Gauge().DataPoints().Len() > 0 { + m.updateCapacity() + m.data.MoveTo(metrics.AppendEmpty()) + m.init() + } +} + +func newMetricSnowflakeQueryBytesScannedTotal(settings MetricSettings) metricSnowflakeQueryBytesScannedTotal { + m := metricSnowflakeQueryBytesScannedTotal{settings: settings} + if settings.Enabled { + m.data = pmetric.NewMetric() + m.init() + } + return m +} + +type metricSnowflakeQueryBytesSpilledLocalTotal struct { + data pmetric.Metric // data buffer for generated metric. + settings MetricSettings // metric settings provided by user. + capacity int // max observed number of data points added to the metric. +} + +// init fills snowflake.query.bytes_spilled.local.total metric with initial data. +func (m *metricSnowflakeQueryBytesSpilledLocalTotal) init() { + m.data.SetName("snowflake.query.bytes_spilled.local.total") + m.data.SetDescription("Total bytes spilled (intermediate results do not fit in memory) by local storage.") + m.data.SetUnit("By") + m.data.SetEmptyGauge() + m.data.Gauge().DataPoints().EnsureCapacity(m.capacity) +} + +func (m *metricSnowflakeQueryBytesSpilledLocalTotal) recordDataPoint(start pcommon.Timestamp, ts pcommon.Timestamp, val int64, schemaNameAttributeValue string, executionStatusAttributeValue string, errorMessageAttributeValue string, queryTypeAttributeValue string, warehouseNameAttributeValue string, databaseNameAttributeValue string, warehouseSizeAttributeValue string) { + if !m.settings.Enabled { + return + } + dp := m.data.Gauge().DataPoints().AppendEmpty() + dp.SetStartTimestamp(start) + dp.SetTimestamp(ts) + dp.SetIntValue(val) + dp.Attributes().PutStr("schema_name", schemaNameAttributeValue) + dp.Attributes().PutStr("execution_status", executionStatusAttributeValue) + dp.Attributes().PutStr("error_message", errorMessageAttributeValue) + dp.Attributes().PutStr("query_type", queryTypeAttributeValue) + dp.Attributes().PutStr("warehouse_name", warehouseNameAttributeValue) + dp.Attributes().PutStr("database_name", databaseNameAttributeValue) + dp.Attributes().PutStr("warehouse_size", warehouseSizeAttributeValue) +} + +// updateCapacity saves max length of data point slices that will be used for the slice capacity. +func (m *metricSnowflakeQueryBytesSpilledLocalTotal) updateCapacity() { + if m.data.Gauge().DataPoints().Len() > m.capacity { + m.capacity = m.data.Gauge().DataPoints().Len() + } +} + +// emit appends recorded metric data to a metrics slice and prepares it for recording another set of data points. +func (m *metricSnowflakeQueryBytesSpilledLocalTotal) emit(metrics pmetric.MetricSlice) { + if m.settings.Enabled && m.data.Gauge().DataPoints().Len() > 0 { + m.updateCapacity() + m.data.MoveTo(metrics.AppendEmpty()) + m.init() + } +} + +func newMetricSnowflakeQueryBytesSpilledLocalTotal(settings MetricSettings) metricSnowflakeQueryBytesSpilledLocalTotal { + m := metricSnowflakeQueryBytesSpilledLocalTotal{settings: settings} + if settings.Enabled { + m.data = pmetric.NewMetric() + m.init() + } + return m +} + +type metricSnowflakeQueryBytesSpilledRemoteTotal struct { + data pmetric.Metric // data buffer for generated metric. + settings MetricSettings // metric settings provided by user. + capacity int // max observed number of data points added to the metric. +} + +// init fills snowflake.query.bytes_spilled.remote.total metric with initial data. +func (m *metricSnowflakeQueryBytesSpilledRemoteTotal) init() { + m.data.SetName("snowflake.query.bytes_spilled.remote.total") + m.data.SetDescription("Total bytes spilled (intermediate results do not fit in memory) by remote storage.") + m.data.SetUnit("By") + m.data.SetEmptyGauge() + m.data.Gauge().DataPoints().EnsureCapacity(m.capacity) +} + +func (m *metricSnowflakeQueryBytesSpilledRemoteTotal) recordDataPoint(start pcommon.Timestamp, ts pcommon.Timestamp, val int64, schemaNameAttributeValue string, executionStatusAttributeValue string, errorMessageAttributeValue string, queryTypeAttributeValue string, warehouseNameAttributeValue string, databaseNameAttributeValue string, warehouseSizeAttributeValue string) { + if !m.settings.Enabled { + return + } + dp := m.data.Gauge().DataPoints().AppendEmpty() + dp.SetStartTimestamp(start) + dp.SetTimestamp(ts) + dp.SetIntValue(val) + dp.Attributes().PutStr("schema_name", schemaNameAttributeValue) + dp.Attributes().PutStr("execution_status", executionStatusAttributeValue) + dp.Attributes().PutStr("error_message", errorMessageAttributeValue) + dp.Attributes().PutStr("query_type", queryTypeAttributeValue) + dp.Attributes().PutStr("warehouse_name", warehouseNameAttributeValue) + dp.Attributes().PutStr("database_name", databaseNameAttributeValue) + dp.Attributes().PutStr("warehouse_size", warehouseSizeAttributeValue) +} + +// updateCapacity saves max length of data point slices that will be used for the slice capacity. +func (m *metricSnowflakeQueryBytesSpilledRemoteTotal) updateCapacity() { + if m.data.Gauge().DataPoints().Len() > m.capacity { + m.capacity = m.data.Gauge().DataPoints().Len() + } +} + +// emit appends recorded metric data to a metrics slice and prepares it for recording another set of data points. +func (m *metricSnowflakeQueryBytesSpilledRemoteTotal) emit(metrics pmetric.MetricSlice) { + if m.settings.Enabled && m.data.Gauge().DataPoints().Len() > 0 { + m.updateCapacity() + m.data.MoveTo(metrics.AppendEmpty()) + m.init() + } +} + +func newMetricSnowflakeQueryBytesSpilledRemoteTotal(settings MetricSettings) metricSnowflakeQueryBytesSpilledRemoteTotal { + m := metricSnowflakeQueryBytesSpilledRemoteTotal{settings: settings} + if settings.Enabled { + m.data = pmetric.NewMetric() + m.init() + } + return m +} + +type metricSnowflakeQueryBytesWrittenTotal struct { + data pmetric.Metric // data buffer for generated metric. + settings MetricSettings // metric settings provided by user. + capacity int // max observed number of data points added to the metric. +} + +// init fills snowflake.query.bytes_written.total metric with initial data. +func (m *metricSnowflakeQueryBytesWrittenTotal) init() { + m.data.SetName("snowflake.query.bytes_written.total") + m.data.SetDescription("Total bytes written by database.") + m.data.SetUnit("By") + m.data.SetEmptyGauge() + m.data.Gauge().DataPoints().EnsureCapacity(m.capacity) +} + +func (m *metricSnowflakeQueryBytesWrittenTotal) recordDataPoint(start pcommon.Timestamp, ts pcommon.Timestamp, val int64, schemaNameAttributeValue string, executionStatusAttributeValue string, errorMessageAttributeValue string, queryTypeAttributeValue string, warehouseNameAttributeValue string, databaseNameAttributeValue string, warehouseSizeAttributeValue string) { + if !m.settings.Enabled { + return + } + dp := m.data.Gauge().DataPoints().AppendEmpty() + dp.SetStartTimestamp(start) + dp.SetTimestamp(ts) + dp.SetIntValue(val) + dp.Attributes().PutStr("schema_name", schemaNameAttributeValue) + dp.Attributes().PutStr("execution_status", executionStatusAttributeValue) + dp.Attributes().PutStr("error_message", errorMessageAttributeValue) + dp.Attributes().PutStr("query_type", queryTypeAttributeValue) + dp.Attributes().PutStr("warehouse_name", warehouseNameAttributeValue) + dp.Attributes().PutStr("database_name", databaseNameAttributeValue) + dp.Attributes().PutStr("warehouse_size", warehouseSizeAttributeValue) +} + +// updateCapacity saves max length of data point slices that will be used for the slice capacity. +func (m *metricSnowflakeQueryBytesWrittenTotal) updateCapacity() { + if m.data.Gauge().DataPoints().Len() > m.capacity { + m.capacity = m.data.Gauge().DataPoints().Len() + } +} + +// emit appends recorded metric data to a metrics slice and prepares it for recording another set of data points. +func (m *metricSnowflakeQueryBytesWrittenTotal) emit(metrics pmetric.MetricSlice) { + if m.settings.Enabled && m.data.Gauge().DataPoints().Len() > 0 { + m.updateCapacity() + m.data.MoveTo(metrics.AppendEmpty()) + m.init() + } +} + +func newMetricSnowflakeQueryBytesWrittenTotal(settings MetricSettings) metricSnowflakeQueryBytesWrittenTotal { + m := metricSnowflakeQueryBytesWrittenTotal{settings: settings} + if settings.Enabled { + m.data = pmetric.NewMetric() + m.init() + } + return m +} + +type metricSnowflakeQueryCompilationTimeTotal struct { + data pmetric.Metric // data buffer for generated metric. + settings MetricSettings // metric settings provided by user. + capacity int // max observed number of data points added to the metric. +} + +// init fills snowflake.query.compilation_time.total metric with initial data. +func (m *metricSnowflakeQueryCompilationTimeTotal) init() { + m.data.SetName("snowflake.query.compilation_time.total") + m.data.SetDescription("Total time taken to compile query.") + m.data.SetUnit("s") + m.data.SetEmptyGauge() + m.data.Gauge().DataPoints().EnsureCapacity(m.capacity) +} + +func (m *metricSnowflakeQueryCompilationTimeTotal) recordDataPoint(start pcommon.Timestamp, ts pcommon.Timestamp, val float64, schemaNameAttributeValue string, executionStatusAttributeValue string, errorMessageAttributeValue string, queryTypeAttributeValue string, warehouseNameAttributeValue string, databaseNameAttributeValue string, warehouseSizeAttributeValue string) { + if !m.settings.Enabled { + return + } + dp := m.data.Gauge().DataPoints().AppendEmpty() + dp.SetStartTimestamp(start) + dp.SetTimestamp(ts) + dp.SetDoubleValue(val) + dp.Attributes().PutStr("schema_name", schemaNameAttributeValue) + dp.Attributes().PutStr("execution_status", executionStatusAttributeValue) + dp.Attributes().PutStr("error_message", errorMessageAttributeValue) + dp.Attributes().PutStr("query_type", queryTypeAttributeValue) + dp.Attributes().PutStr("warehouse_name", warehouseNameAttributeValue) + dp.Attributes().PutStr("database_name", databaseNameAttributeValue) + dp.Attributes().PutStr("warehouse_size", warehouseSizeAttributeValue) +} + +// updateCapacity saves max length of data point slices that will be used for the slice capacity. +func (m *metricSnowflakeQueryCompilationTimeTotal) updateCapacity() { + if m.data.Gauge().DataPoints().Len() > m.capacity { + m.capacity = m.data.Gauge().DataPoints().Len() + } +} + +// emit appends recorded metric data to a metrics slice and prepares it for recording another set of data points. +func (m *metricSnowflakeQueryCompilationTimeTotal) emit(metrics pmetric.MetricSlice) { + if m.settings.Enabled && m.data.Gauge().DataPoints().Len() > 0 { + m.updateCapacity() + m.data.MoveTo(metrics.AppendEmpty()) + m.init() + } +} + +func newMetricSnowflakeQueryCompilationTimeTotal(settings MetricSettings) metricSnowflakeQueryCompilationTimeTotal { + m := metricSnowflakeQueryCompilationTimeTotal{settings: settings} + if settings.Enabled { + m.data = pmetric.NewMetric() + m.init() + } + return m +} + +type metricSnowflakeQueryDataScannedCacheAvg struct { + data pmetric.Metric // data buffer for generated metric. + settings MetricSettings // metric settings provided by user. + capacity int // max observed number of data points added to the metric. +} + +// init fills snowflake.query.data_scanned_cache.avg metric with initial data. +func (m *metricSnowflakeQueryDataScannedCacheAvg) init() { + m.data.SetName("snowflake.query.data_scanned_cache.avg") + m.data.SetDescription("Average percentage of data scanned from cache.") + m.data.SetUnit("1") + m.data.SetEmptyGauge() + m.data.Gauge().DataPoints().EnsureCapacity(m.capacity) +} + +func (m *metricSnowflakeQueryDataScannedCacheAvg) recordDataPoint(start pcommon.Timestamp, ts pcommon.Timestamp, val float64, schemaNameAttributeValue string, executionStatusAttributeValue string, errorMessageAttributeValue string, queryTypeAttributeValue string, warehouseNameAttributeValue string, databaseNameAttributeValue string, warehouseSizeAttributeValue string) { + if !m.settings.Enabled { + return + } + dp := m.data.Gauge().DataPoints().AppendEmpty() + dp.SetStartTimestamp(start) + dp.SetTimestamp(ts) + dp.SetDoubleValue(val) + dp.Attributes().PutStr("schema_name", schemaNameAttributeValue) + dp.Attributes().PutStr("execution_status", executionStatusAttributeValue) + dp.Attributes().PutStr("error_message", errorMessageAttributeValue) + dp.Attributes().PutStr("query_type", queryTypeAttributeValue) + dp.Attributes().PutStr("warehouse_name", warehouseNameAttributeValue) + dp.Attributes().PutStr("database_name", databaseNameAttributeValue) + dp.Attributes().PutStr("warehouse_size", warehouseSizeAttributeValue) +} + +// updateCapacity saves max length of data point slices that will be used for the slice capacity. +func (m *metricSnowflakeQueryDataScannedCacheAvg) updateCapacity() { + if m.data.Gauge().DataPoints().Len() > m.capacity { + m.capacity = m.data.Gauge().DataPoints().Len() + } +} + +// emit appends recorded metric data to a metrics slice and prepares it for recording another set of data points. +func (m *metricSnowflakeQueryDataScannedCacheAvg) emit(metrics pmetric.MetricSlice) { + if m.settings.Enabled && m.data.Gauge().DataPoints().Len() > 0 { + m.updateCapacity() + m.data.MoveTo(metrics.AppendEmpty()) + m.init() + } +} + +func newMetricSnowflakeQueryDataScannedCacheAvg(settings MetricSettings) metricSnowflakeQueryDataScannedCacheAvg { + m := metricSnowflakeQueryDataScannedCacheAvg{settings: settings} + if settings.Enabled { + m.data = pmetric.NewMetric() + m.init() + } + return m +} + +type metricSnowflakeQueryExecuted struct { + data pmetric.Metric // data buffer for generated metric. + settings MetricSettings // metric settings provided by user. + capacity int // max observed number of data points added to the metric. +} + +// init fills snowflake.query.executed metric with initial data. +func (m *metricSnowflakeQueryExecuted) init() { + m.data.SetName("snowflake.query.executed") + m.data.SetDescription("Executed query count for warehouse.") + m.data.SetUnit("1") + m.data.SetEmptyGauge() + m.data.Gauge().DataPoints().EnsureCapacity(m.capacity) +} + +func (m *metricSnowflakeQueryExecuted) recordDataPoint(start pcommon.Timestamp, ts pcommon.Timestamp, val float64, warehouseNameAttributeValue string) { + if !m.settings.Enabled { + return + } + dp := m.data.Gauge().DataPoints().AppendEmpty() + dp.SetStartTimestamp(start) + dp.SetTimestamp(ts) + dp.SetDoubleValue(val) + dp.Attributes().PutStr("warehouse_name", warehouseNameAttributeValue) +} + +// updateCapacity saves max length of data point slices that will be used for the slice capacity. +func (m *metricSnowflakeQueryExecuted) updateCapacity() { + if m.data.Gauge().DataPoints().Len() > m.capacity { + m.capacity = m.data.Gauge().DataPoints().Len() + } +} + +// emit appends recorded metric data to a metrics slice and prepares it for recording another set of data points. +func (m *metricSnowflakeQueryExecuted) emit(metrics pmetric.MetricSlice) { + if m.settings.Enabled && m.data.Gauge().DataPoints().Len() > 0 { + m.updateCapacity() + m.data.MoveTo(metrics.AppendEmpty()) + m.init() + } +} + +func newMetricSnowflakeQueryExecuted(settings MetricSettings) metricSnowflakeQueryExecuted { + m := metricSnowflakeQueryExecuted{settings: settings} + if settings.Enabled { + m.data = pmetric.NewMetric() + m.init() + } + return m +} + +type metricSnowflakeQueryExecutionTimeTotal struct { + data pmetric.Metric // data buffer for generated metric. + settings MetricSettings // metric settings provided by user. + capacity int // max observed number of data points added to the metric. +} + +// init fills snowflake.query.execution_time.total metric with initial data. +func (m *metricSnowflakeQueryExecutionTimeTotal) init() { + m.data.SetName("snowflake.query.execution_time.total") + m.data.SetDescription("Total time spent executing queries in database.") + m.data.SetUnit("s") + m.data.SetEmptyGauge() + m.data.Gauge().DataPoints().EnsureCapacity(m.capacity) +} + +func (m *metricSnowflakeQueryExecutionTimeTotal) recordDataPoint(start pcommon.Timestamp, ts pcommon.Timestamp, val float64, schemaNameAttributeValue string, executionStatusAttributeValue string, errorMessageAttributeValue string, queryTypeAttributeValue string, warehouseNameAttributeValue string, databaseNameAttributeValue string, warehouseSizeAttributeValue string) { + if !m.settings.Enabled { + return + } + dp := m.data.Gauge().DataPoints().AppendEmpty() + dp.SetStartTimestamp(start) + dp.SetTimestamp(ts) + dp.SetDoubleValue(val) + dp.Attributes().PutStr("schema_name", schemaNameAttributeValue) + dp.Attributes().PutStr("execution_status", executionStatusAttributeValue) + dp.Attributes().PutStr("error_message", errorMessageAttributeValue) + dp.Attributes().PutStr("query_type", queryTypeAttributeValue) + dp.Attributes().PutStr("warehouse_name", warehouseNameAttributeValue) + dp.Attributes().PutStr("database_name", databaseNameAttributeValue) + dp.Attributes().PutStr("warehouse_size", warehouseSizeAttributeValue) +} + +// updateCapacity saves max length of data point slices that will be used for the slice capacity. +func (m *metricSnowflakeQueryExecutionTimeTotal) updateCapacity() { + if m.data.Gauge().DataPoints().Len() > m.capacity { + m.capacity = m.data.Gauge().DataPoints().Len() + } +} + +// emit appends recorded metric data to a metrics slice and prepares it for recording another set of data points. +func (m *metricSnowflakeQueryExecutionTimeTotal) emit(metrics pmetric.MetricSlice) { + if m.settings.Enabled && m.data.Gauge().DataPoints().Len() > 0 { + m.updateCapacity() + m.data.MoveTo(metrics.AppendEmpty()) + m.init() + } +} + +func newMetricSnowflakeQueryExecutionTimeTotal(settings MetricSettings) metricSnowflakeQueryExecutionTimeTotal { + m := metricSnowflakeQueryExecutionTimeTotal{settings: settings} + if settings.Enabled { + m.data = pmetric.NewMetric() + m.init() + } + return m +} + +type metricSnowflakeQueryPartitionsScannedTotal struct { + data pmetric.Metric // data buffer for generated metric. + settings MetricSettings // metric settings provided by user. + capacity int // max observed number of data points added to the metric. +} + +// init fills snowflake.query.partitions_scanned.total metric with initial data. +func (m *metricSnowflakeQueryPartitionsScannedTotal) init() { + m.data.SetName("snowflake.query.partitions_scanned.total") + m.data.SetDescription("Number of partitions scanned during query so far.") + m.data.SetUnit("1") + m.data.SetEmptyGauge() + m.data.Gauge().DataPoints().EnsureCapacity(m.capacity) +} + +func (m *metricSnowflakeQueryPartitionsScannedTotal) recordDataPoint(start pcommon.Timestamp, ts pcommon.Timestamp, val int64, schemaNameAttributeValue string, executionStatusAttributeValue string, errorMessageAttributeValue string, queryTypeAttributeValue string, warehouseNameAttributeValue string, databaseNameAttributeValue string, warehouseSizeAttributeValue string) { + if !m.settings.Enabled { + return + } + dp := m.data.Gauge().DataPoints().AppendEmpty() + dp.SetStartTimestamp(start) + dp.SetTimestamp(ts) + dp.SetIntValue(val) + dp.Attributes().PutStr("schema_name", schemaNameAttributeValue) + dp.Attributes().PutStr("execution_status", executionStatusAttributeValue) + dp.Attributes().PutStr("error_message", errorMessageAttributeValue) + dp.Attributes().PutStr("query_type", queryTypeAttributeValue) + dp.Attributes().PutStr("warehouse_name", warehouseNameAttributeValue) + dp.Attributes().PutStr("database_name", databaseNameAttributeValue) + dp.Attributes().PutStr("warehouse_size", warehouseSizeAttributeValue) +} + +// updateCapacity saves max length of data point slices that will be used for the slice capacity. +func (m *metricSnowflakeQueryPartitionsScannedTotal) updateCapacity() { + if m.data.Gauge().DataPoints().Len() > m.capacity { + m.capacity = m.data.Gauge().DataPoints().Len() + } +} + +// emit appends recorded metric data to a metrics slice and prepares it for recording another set of data points. +func (m *metricSnowflakeQueryPartitionsScannedTotal) emit(metrics pmetric.MetricSlice) { + if m.settings.Enabled && m.data.Gauge().DataPoints().Len() > 0 { + m.updateCapacity() + m.data.MoveTo(metrics.AppendEmpty()) + m.init() + } +} + +func newMetricSnowflakeQueryPartitionsScannedTotal(settings MetricSettings) metricSnowflakeQueryPartitionsScannedTotal { + m := metricSnowflakeQueryPartitionsScannedTotal{settings: settings} + if settings.Enabled { + m.data = pmetric.NewMetric() + m.init() + } + return m +} + +type metricSnowflakeQueryQueuedOverload struct { + data pmetric.Metric // data buffer for generated metric. + settings MetricSettings // metric settings provided by user. + capacity int // max observed number of data points added to the metric. +} + +// init fills snowflake.query.queued_overload metric with initial data. +func (m *metricSnowflakeQueryQueuedOverload) init() { + m.data.SetName("snowflake.query.queued_overload") + m.data.SetDescription("Overloaded query count for warehouse.") + m.data.SetUnit("1") + m.data.SetEmptyGauge() + m.data.Gauge().DataPoints().EnsureCapacity(m.capacity) +} + +func (m *metricSnowflakeQueryQueuedOverload) recordDataPoint(start pcommon.Timestamp, ts pcommon.Timestamp, val float64, warehouseNameAttributeValue string) { + if !m.settings.Enabled { + return + } + dp := m.data.Gauge().DataPoints().AppendEmpty() + dp.SetStartTimestamp(start) + dp.SetTimestamp(ts) + dp.SetDoubleValue(val) + dp.Attributes().PutStr("warehouse_name", warehouseNameAttributeValue) +} + +// updateCapacity saves max length of data point slices that will be used for the slice capacity. +func (m *metricSnowflakeQueryQueuedOverload) updateCapacity() { + if m.data.Gauge().DataPoints().Len() > m.capacity { + m.capacity = m.data.Gauge().DataPoints().Len() + } +} + +// emit appends recorded metric data to a metrics slice and prepares it for recording another set of data points. +func (m *metricSnowflakeQueryQueuedOverload) emit(metrics pmetric.MetricSlice) { + if m.settings.Enabled && m.data.Gauge().DataPoints().Len() > 0 { + m.updateCapacity() + m.data.MoveTo(metrics.AppendEmpty()) + m.init() + } +} + +func newMetricSnowflakeQueryQueuedOverload(settings MetricSettings) metricSnowflakeQueryQueuedOverload { + m := metricSnowflakeQueryQueuedOverload{settings: settings} + if settings.Enabled { + m.data = pmetric.NewMetric() + m.init() + } + return m +} + +type metricSnowflakeQueryQueuedProvision struct { + data pmetric.Metric // data buffer for generated metric. + settings MetricSettings // metric settings provided by user. + capacity int // max observed number of data points added to the metric. +} + +// init fills snowflake.query.queued_provision metric with initial data. +func (m *metricSnowflakeQueryQueuedProvision) init() { + m.data.SetName("snowflake.query.queued_provision") + m.data.SetDescription("Number of compute resources queued for provisioning.") + m.data.SetUnit("1") + m.data.SetEmptyGauge() + m.data.Gauge().DataPoints().EnsureCapacity(m.capacity) +} + +func (m *metricSnowflakeQueryQueuedProvision) recordDataPoint(start pcommon.Timestamp, ts pcommon.Timestamp, val float64, warehouseNameAttributeValue string) { + if !m.settings.Enabled { + return + } + dp := m.data.Gauge().DataPoints().AppendEmpty() + dp.SetStartTimestamp(start) + dp.SetTimestamp(ts) + dp.SetDoubleValue(val) + dp.Attributes().PutStr("warehouse_name", warehouseNameAttributeValue) +} + +// updateCapacity saves max length of data point slices that will be used for the slice capacity. +func (m *metricSnowflakeQueryQueuedProvision) updateCapacity() { + if m.data.Gauge().DataPoints().Len() > m.capacity { + m.capacity = m.data.Gauge().DataPoints().Len() + } +} + +// emit appends recorded metric data to a metrics slice and prepares it for recording another set of data points. +func (m *metricSnowflakeQueryQueuedProvision) emit(metrics pmetric.MetricSlice) { + if m.settings.Enabled && m.data.Gauge().DataPoints().Len() > 0 { + m.updateCapacity() + m.data.MoveTo(metrics.AppendEmpty()) + m.init() + } +} + +func newMetricSnowflakeQueryQueuedProvision(settings MetricSettings) metricSnowflakeQueryQueuedProvision { + m := metricSnowflakeQueryQueuedProvision{settings: settings} + if settings.Enabled { + m.data = pmetric.NewMetric() + m.init() + } + return m +} + +type metricSnowflakeQueuedOverloadTimeAvg struct { + data pmetric.Metric // data buffer for generated metric. + settings MetricSettings // metric settings provided by user. + capacity int // max observed number of data points added to the metric. +} + +// init fills snowflake.queued_overload_time.avg metric with initial data. +func (m *metricSnowflakeQueuedOverloadTimeAvg) init() { + m.data.SetName("snowflake.queued_overload_time.avg") + m.data.SetDescription("Average time spent in warehouse queue due to warehouse being overloaded.") + m.data.SetUnit("s") + m.data.SetEmptyGauge() + m.data.Gauge().DataPoints().EnsureCapacity(m.capacity) +} + +func (m *metricSnowflakeQueuedOverloadTimeAvg) recordDataPoint(start pcommon.Timestamp, ts pcommon.Timestamp, val float64, schemaNameAttributeValue string, executionStatusAttributeValue string, errorMessageAttributeValue string, queryTypeAttributeValue string, warehouseNameAttributeValue string, databaseNameAttributeValue string, warehouseSizeAttributeValue string) { + if !m.settings.Enabled { + return + } + dp := m.data.Gauge().DataPoints().AppendEmpty() + dp.SetStartTimestamp(start) + dp.SetTimestamp(ts) + dp.SetDoubleValue(val) + dp.Attributes().PutStr("schema_name", schemaNameAttributeValue) + dp.Attributes().PutStr("execution_status", executionStatusAttributeValue) + dp.Attributes().PutStr("error_message", errorMessageAttributeValue) + dp.Attributes().PutStr("query_type", queryTypeAttributeValue) + dp.Attributes().PutStr("warehouse_name", warehouseNameAttributeValue) + dp.Attributes().PutStr("database_name", databaseNameAttributeValue) + dp.Attributes().PutStr("warehouse_size", warehouseSizeAttributeValue) +} + +// updateCapacity saves max length of data point slices that will be used for the slice capacity. +func (m *metricSnowflakeQueuedOverloadTimeAvg) updateCapacity() { + if m.data.Gauge().DataPoints().Len() > m.capacity { + m.capacity = m.data.Gauge().DataPoints().Len() + } +} + +// emit appends recorded metric data to a metrics slice and prepares it for recording another set of data points. +func (m *metricSnowflakeQueuedOverloadTimeAvg) emit(metrics pmetric.MetricSlice) { + if m.settings.Enabled && m.data.Gauge().DataPoints().Len() > 0 { + m.updateCapacity() + m.data.MoveTo(metrics.AppendEmpty()) + m.init() + } +} + +func newMetricSnowflakeQueuedOverloadTimeAvg(settings MetricSettings) metricSnowflakeQueuedOverloadTimeAvg { + m := metricSnowflakeQueuedOverloadTimeAvg{settings: settings} + if settings.Enabled { + m.data = pmetric.NewMetric() + m.init() + } + return m +} + +type metricSnowflakeQueuedOverloadTimeTotal struct { + data pmetric.Metric // data buffer for generated metric. + settings MetricSettings // metric settings provided by user. + capacity int // max observed number of data points added to the metric. +} + +// init fills snowflake.queued_overload_time.total metric with initial data. +func (m *metricSnowflakeQueuedOverloadTimeTotal) init() { + m.data.SetName("snowflake.queued_overload_time.total") + m.data.SetDescription("Total time spent in warehouse queue due to warehouse being overloaded.") + m.data.SetUnit("s") + m.data.SetEmptyGauge() + m.data.Gauge().DataPoints().EnsureCapacity(m.capacity) +} + +func (m *metricSnowflakeQueuedOverloadTimeTotal) recordDataPoint(start pcommon.Timestamp, ts pcommon.Timestamp, val int64, schemaNameAttributeValue string, executionStatusAttributeValue string, errorMessageAttributeValue string, queryTypeAttributeValue string, warehouseNameAttributeValue string, databaseNameAttributeValue string, warehouseSizeAttributeValue string) { + if !m.settings.Enabled { + return + } + dp := m.data.Gauge().DataPoints().AppendEmpty() + dp.SetStartTimestamp(start) + dp.SetTimestamp(ts) + dp.SetIntValue(val) + dp.Attributes().PutStr("schema_name", schemaNameAttributeValue) + dp.Attributes().PutStr("execution_status", executionStatusAttributeValue) + dp.Attributes().PutStr("error_message", errorMessageAttributeValue) + dp.Attributes().PutStr("query_type", queryTypeAttributeValue) + dp.Attributes().PutStr("warehouse_name", warehouseNameAttributeValue) + dp.Attributes().PutStr("database_name", databaseNameAttributeValue) + dp.Attributes().PutStr("warehouse_size", warehouseSizeAttributeValue) +} + +// updateCapacity saves max length of data point slices that will be used for the slice capacity. +func (m *metricSnowflakeQueuedOverloadTimeTotal) updateCapacity() { + if m.data.Gauge().DataPoints().Len() > m.capacity { + m.capacity = m.data.Gauge().DataPoints().Len() + } +} + +// emit appends recorded metric data to a metrics slice and prepares it for recording another set of data points. +func (m *metricSnowflakeQueuedOverloadTimeTotal) emit(metrics pmetric.MetricSlice) { + if m.settings.Enabled && m.data.Gauge().DataPoints().Len() > 0 { + m.updateCapacity() + m.data.MoveTo(metrics.AppendEmpty()) + m.init() + } +} + +func newMetricSnowflakeQueuedOverloadTimeTotal(settings MetricSettings) metricSnowflakeQueuedOverloadTimeTotal { + m := metricSnowflakeQueuedOverloadTimeTotal{settings: settings} + if settings.Enabled { + m.data = pmetric.NewMetric() + m.init() + } + return m +} + +type metricSnowflakeQueuedProvisioningTimeAvg struct { + data pmetric.Metric // data buffer for generated metric. + settings MetricSettings // metric settings provided by user. + capacity int // max observed number of data points added to the metric. +} + +// init fills snowflake.queued_provisioning_time.avg metric with initial data. +func (m *metricSnowflakeQueuedProvisioningTimeAvg) init() { + m.data.SetName("snowflake.queued_provisioning_time.avg") + m.data.SetDescription("Average time spent in warehouse queue waiting for resources to provision.") + m.data.SetUnit("s") + m.data.SetEmptyGauge() + m.data.Gauge().DataPoints().EnsureCapacity(m.capacity) +} + +func (m *metricSnowflakeQueuedProvisioningTimeAvg) recordDataPoint(start pcommon.Timestamp, ts pcommon.Timestamp, val float64, schemaNameAttributeValue string, executionStatusAttributeValue string, errorMessageAttributeValue string, queryTypeAttributeValue string, warehouseNameAttributeValue string, databaseNameAttributeValue string, warehouseSizeAttributeValue string) { + if !m.settings.Enabled { + return + } + dp := m.data.Gauge().DataPoints().AppendEmpty() + dp.SetStartTimestamp(start) + dp.SetTimestamp(ts) + dp.SetDoubleValue(val) + dp.Attributes().PutStr("schema_name", schemaNameAttributeValue) + dp.Attributes().PutStr("execution_status", executionStatusAttributeValue) + dp.Attributes().PutStr("error_message", errorMessageAttributeValue) + dp.Attributes().PutStr("query_type", queryTypeAttributeValue) + dp.Attributes().PutStr("warehouse_name", warehouseNameAttributeValue) + dp.Attributes().PutStr("database_name", databaseNameAttributeValue) + dp.Attributes().PutStr("warehouse_size", warehouseSizeAttributeValue) +} + +// updateCapacity saves max length of data point slices that will be used for the slice capacity. +func (m *metricSnowflakeQueuedProvisioningTimeAvg) updateCapacity() { + if m.data.Gauge().DataPoints().Len() > m.capacity { + m.capacity = m.data.Gauge().DataPoints().Len() + } +} + +// emit appends recorded metric data to a metrics slice and prepares it for recording another set of data points. +func (m *metricSnowflakeQueuedProvisioningTimeAvg) emit(metrics pmetric.MetricSlice) { + if m.settings.Enabled && m.data.Gauge().DataPoints().Len() > 0 { + m.updateCapacity() + m.data.MoveTo(metrics.AppendEmpty()) + m.init() + } +} + +func newMetricSnowflakeQueuedProvisioningTimeAvg(settings MetricSettings) metricSnowflakeQueuedProvisioningTimeAvg { + m := metricSnowflakeQueuedProvisioningTimeAvg{settings: settings} + if settings.Enabled { + m.data = pmetric.NewMetric() + m.init() + } + return m +} + +type metricSnowflakeQueuedProvisioningTimeTotal struct { + data pmetric.Metric // data buffer for generated metric. + settings MetricSettings // metric settings provided by user. + capacity int // max observed number of data points added to the metric. +} + +// init fills snowflake.queued_provisioning_time.total metric with initial data. +func (m *metricSnowflakeQueuedProvisioningTimeTotal) init() { + m.data.SetName("snowflake.queued_provisioning_time.total") + m.data.SetDescription("Total time spent in warehouse queue waiting for resources to provision.") + m.data.SetUnit("s") + m.data.SetEmptyGauge() + m.data.Gauge().DataPoints().EnsureCapacity(m.capacity) +} + +func (m *metricSnowflakeQueuedProvisioningTimeTotal) recordDataPoint(start pcommon.Timestamp, ts pcommon.Timestamp, val int64, schemaNameAttributeValue string, executionStatusAttributeValue string, errorMessageAttributeValue string, queryTypeAttributeValue string, warehouseNameAttributeValue string, databaseNameAttributeValue string, warehouseSizeAttributeValue string) { + if !m.settings.Enabled { + return + } + dp := m.data.Gauge().DataPoints().AppendEmpty() + dp.SetStartTimestamp(start) + dp.SetTimestamp(ts) + dp.SetIntValue(val) + dp.Attributes().PutStr("schema_name", schemaNameAttributeValue) + dp.Attributes().PutStr("execution_status", executionStatusAttributeValue) + dp.Attributes().PutStr("error_message", errorMessageAttributeValue) + dp.Attributes().PutStr("query_type", queryTypeAttributeValue) + dp.Attributes().PutStr("warehouse_name", warehouseNameAttributeValue) + dp.Attributes().PutStr("database_name", databaseNameAttributeValue) + dp.Attributes().PutStr("warehouse_size", warehouseSizeAttributeValue) +} + +// updateCapacity saves max length of data point slices that will be used for the slice capacity. +func (m *metricSnowflakeQueuedProvisioningTimeTotal) updateCapacity() { + if m.data.Gauge().DataPoints().Len() > m.capacity { + m.capacity = m.data.Gauge().DataPoints().Len() + } +} + +// emit appends recorded metric data to a metrics slice and prepares it for recording another set of data points. +func (m *metricSnowflakeQueuedProvisioningTimeTotal) emit(metrics pmetric.MetricSlice) { + if m.settings.Enabled && m.data.Gauge().DataPoints().Len() > 0 { + m.updateCapacity() + m.data.MoveTo(metrics.AppendEmpty()) + m.init() + } +} + +func newMetricSnowflakeQueuedProvisioningTimeTotal(settings MetricSettings) metricSnowflakeQueuedProvisioningTimeTotal { + m := metricSnowflakeQueuedProvisioningTimeTotal{settings: settings} + if settings.Enabled { + m.data = pmetric.NewMetric() + m.init() + } + return m +} + +type metricSnowflakeQueuedRepairTimeAvg struct { + data pmetric.Metric // data buffer for generated metric. + settings MetricSettings // metric settings provided by user. + capacity int // max observed number of data points added to the metric. +} + +// init fills snowflake.queued_repair_time.avg metric with initial data. +func (m *metricSnowflakeQueuedRepairTimeAvg) init() { + m.data.SetName("snowflake.queued_repair_time.avg") + m.data.SetDescription("Average time spent in warehouse queue waiting for compute resources to be repaired.") + m.data.SetUnit("s") + m.data.SetEmptyGauge() + m.data.Gauge().DataPoints().EnsureCapacity(m.capacity) +} + +func (m *metricSnowflakeQueuedRepairTimeAvg) recordDataPoint(start pcommon.Timestamp, ts pcommon.Timestamp, val float64, schemaNameAttributeValue string, executionStatusAttributeValue string, errorMessageAttributeValue string, queryTypeAttributeValue string, warehouseNameAttributeValue string, databaseNameAttributeValue string, warehouseSizeAttributeValue string) { + if !m.settings.Enabled { + return + } + dp := m.data.Gauge().DataPoints().AppendEmpty() + dp.SetStartTimestamp(start) + dp.SetTimestamp(ts) + dp.SetDoubleValue(val) + dp.Attributes().PutStr("schema_name", schemaNameAttributeValue) + dp.Attributes().PutStr("execution_status", executionStatusAttributeValue) + dp.Attributes().PutStr("error_message", errorMessageAttributeValue) + dp.Attributes().PutStr("query_type", queryTypeAttributeValue) + dp.Attributes().PutStr("warehouse_name", warehouseNameAttributeValue) + dp.Attributes().PutStr("database_name", databaseNameAttributeValue) + dp.Attributes().PutStr("warehouse_size", warehouseSizeAttributeValue) +} + +// updateCapacity saves max length of data point slices that will be used for the slice capacity. +func (m *metricSnowflakeQueuedRepairTimeAvg) updateCapacity() { + if m.data.Gauge().DataPoints().Len() > m.capacity { + m.capacity = m.data.Gauge().DataPoints().Len() + } +} + +// emit appends recorded metric data to a metrics slice and prepares it for recording another set of data points. +func (m *metricSnowflakeQueuedRepairTimeAvg) emit(metrics pmetric.MetricSlice) { + if m.settings.Enabled && m.data.Gauge().DataPoints().Len() > 0 { + m.updateCapacity() + m.data.MoveTo(metrics.AppendEmpty()) + m.init() + } +} + +func newMetricSnowflakeQueuedRepairTimeAvg(settings MetricSettings) metricSnowflakeQueuedRepairTimeAvg { + m := metricSnowflakeQueuedRepairTimeAvg{settings: settings} + if settings.Enabled { + m.data = pmetric.NewMetric() + m.init() + } + return m +} + +type metricSnowflakeQueuedRepairTimeTotal struct { + data pmetric.Metric // data buffer for generated metric. + settings MetricSettings // metric settings provided by user. + capacity int // max observed number of data points added to the metric. +} + +// init fills snowflake.queued_repair_time.total metric with initial data. +func (m *metricSnowflakeQueuedRepairTimeTotal) init() { + m.data.SetName("snowflake.queued_repair_time.total") + m.data.SetDescription("Total time spent in warehouse queue waiting for compute resources to be repaired.") + m.data.SetUnit("s") + m.data.SetEmptyGauge() + m.data.Gauge().DataPoints().EnsureCapacity(m.capacity) +} + +func (m *metricSnowflakeQueuedRepairTimeTotal) recordDataPoint(start pcommon.Timestamp, ts pcommon.Timestamp, val int64, schemaNameAttributeValue string, executionStatusAttributeValue string, errorMessageAttributeValue string, queryTypeAttributeValue string, warehouseNameAttributeValue string, databaseNameAttributeValue string, warehouseSizeAttributeValue string) { + if !m.settings.Enabled { + return + } + dp := m.data.Gauge().DataPoints().AppendEmpty() + dp.SetStartTimestamp(start) + dp.SetTimestamp(ts) + dp.SetIntValue(val) + dp.Attributes().PutStr("schema_name", schemaNameAttributeValue) + dp.Attributes().PutStr("execution_status", executionStatusAttributeValue) + dp.Attributes().PutStr("error_message", errorMessageAttributeValue) + dp.Attributes().PutStr("query_type", queryTypeAttributeValue) + dp.Attributes().PutStr("warehouse_name", warehouseNameAttributeValue) + dp.Attributes().PutStr("database_name", databaseNameAttributeValue) + dp.Attributes().PutStr("warehouse_size", warehouseSizeAttributeValue) +} + +// updateCapacity saves max length of data point slices that will be used for the slice capacity. +func (m *metricSnowflakeQueuedRepairTimeTotal) updateCapacity() { + if m.data.Gauge().DataPoints().Len() > m.capacity { + m.capacity = m.data.Gauge().DataPoints().Len() + } +} + +// emit appends recorded metric data to a metrics slice and prepares it for recording another set of data points. +func (m *metricSnowflakeQueuedRepairTimeTotal) emit(metrics pmetric.MetricSlice) { + if m.settings.Enabled && m.data.Gauge().DataPoints().Len() > 0 { + m.updateCapacity() + m.data.MoveTo(metrics.AppendEmpty()) + m.init() + } +} + +func newMetricSnowflakeQueuedRepairTimeTotal(settings MetricSettings) metricSnowflakeQueuedRepairTimeTotal { + m := metricSnowflakeQueuedRepairTimeTotal{settings: settings} + if settings.Enabled { + m.data = pmetric.NewMetric() + m.init() + } + return m +} + +type metricSnowflakeRowsDeletedTotal struct { + data pmetric.Metric // data buffer for generated metric. + settings MetricSettings // metric settings provided by user. + capacity int // max observed number of data points added to the metric. +} + +// init fills snowflake.rows_deleted.total metric with initial data. +func (m *metricSnowflakeRowsDeletedTotal) init() { + m.data.SetName("snowflake.rows_deleted.total") + m.data.SetDescription("Number of rows deleted from a table (or tables).") + m.data.SetUnit("{rows}") + m.data.SetEmptyGauge() + m.data.Gauge().DataPoints().EnsureCapacity(m.capacity) +} + +func (m *metricSnowflakeRowsDeletedTotal) recordDataPoint(start pcommon.Timestamp, ts pcommon.Timestamp, val int64, schemaNameAttributeValue string, executionStatusAttributeValue string, errorMessageAttributeValue string, queryTypeAttributeValue string, warehouseNameAttributeValue string, databaseNameAttributeValue string, warehouseSizeAttributeValue string) { + if !m.settings.Enabled { + return + } + dp := m.data.Gauge().DataPoints().AppendEmpty() + dp.SetStartTimestamp(start) + dp.SetTimestamp(ts) + dp.SetIntValue(val) + dp.Attributes().PutStr("schema_name", schemaNameAttributeValue) + dp.Attributes().PutStr("execution_status", executionStatusAttributeValue) + dp.Attributes().PutStr("error_message", errorMessageAttributeValue) + dp.Attributes().PutStr("query_type", queryTypeAttributeValue) + dp.Attributes().PutStr("warehouse_name", warehouseNameAttributeValue) + dp.Attributes().PutStr("database_name", databaseNameAttributeValue) + dp.Attributes().PutStr("warehouse_size", warehouseSizeAttributeValue) +} + +// updateCapacity saves max length of data point slices that will be used for the slice capacity. +func (m *metricSnowflakeRowsDeletedTotal) updateCapacity() { + if m.data.Gauge().DataPoints().Len() > m.capacity { + m.capacity = m.data.Gauge().DataPoints().Len() + } +} + +// emit appends recorded metric data to a metrics slice and prepares it for recording another set of data points. +func (m *metricSnowflakeRowsDeletedTotal) emit(metrics pmetric.MetricSlice) { + if m.settings.Enabled && m.data.Gauge().DataPoints().Len() > 0 { + m.updateCapacity() + m.data.MoveTo(metrics.AppendEmpty()) + m.init() + } +} + +func newMetricSnowflakeRowsDeletedTotal(settings MetricSettings) metricSnowflakeRowsDeletedTotal { + m := metricSnowflakeRowsDeletedTotal{settings: settings} + if settings.Enabled { + m.data = pmetric.NewMetric() + m.init() + } + return m +} + +type metricSnowflakeRowsInsertedTotal struct { + data pmetric.Metric // data buffer for generated metric. + settings MetricSettings // metric settings provided by user. + capacity int // max observed number of data points added to the metric. +} + +// init fills snowflake.rows_inserted.total metric with initial data. +func (m *metricSnowflakeRowsInsertedTotal) init() { + m.data.SetName("snowflake.rows_inserted.total") + m.data.SetDescription("Number of rows inserted into a table (or tables).") + m.data.SetUnit("{rows}") + m.data.SetEmptyGauge() + m.data.Gauge().DataPoints().EnsureCapacity(m.capacity) +} + +func (m *metricSnowflakeRowsInsertedTotal) recordDataPoint(start pcommon.Timestamp, ts pcommon.Timestamp, val int64, schemaNameAttributeValue string, executionStatusAttributeValue string, errorMessageAttributeValue string, queryTypeAttributeValue string, warehouseNameAttributeValue string, databaseNameAttributeValue string, warehouseSizeAttributeValue string) { + if !m.settings.Enabled { + return + } + dp := m.data.Gauge().DataPoints().AppendEmpty() + dp.SetStartTimestamp(start) + dp.SetTimestamp(ts) + dp.SetIntValue(val) + dp.Attributes().PutStr("schema_name", schemaNameAttributeValue) + dp.Attributes().PutStr("execution_status", executionStatusAttributeValue) + dp.Attributes().PutStr("error_message", errorMessageAttributeValue) + dp.Attributes().PutStr("query_type", queryTypeAttributeValue) + dp.Attributes().PutStr("warehouse_name", warehouseNameAttributeValue) + dp.Attributes().PutStr("database_name", databaseNameAttributeValue) + dp.Attributes().PutStr("warehouse_size", warehouseSizeAttributeValue) +} + +// updateCapacity saves max length of data point slices that will be used for the slice capacity. +func (m *metricSnowflakeRowsInsertedTotal) updateCapacity() { + if m.data.Gauge().DataPoints().Len() > m.capacity { + m.capacity = m.data.Gauge().DataPoints().Len() + } +} + +// emit appends recorded metric data to a metrics slice and prepares it for recording another set of data points. +func (m *metricSnowflakeRowsInsertedTotal) emit(metrics pmetric.MetricSlice) { + if m.settings.Enabled && m.data.Gauge().DataPoints().Len() > 0 { + m.updateCapacity() + m.data.MoveTo(metrics.AppendEmpty()) + m.init() + } +} + +func newMetricSnowflakeRowsInsertedTotal(settings MetricSettings) metricSnowflakeRowsInsertedTotal { + m := metricSnowflakeRowsInsertedTotal{settings: settings} + if settings.Enabled { + m.data = pmetric.NewMetric() + m.init() + } + return m +} + +type metricSnowflakeRowsProducedTotal struct { + data pmetric.Metric // data buffer for generated metric. + settings MetricSettings // metric settings provided by user. + capacity int // max observed number of data points added to the metric. +} + +// init fills snowflake.rows_produced.total metric with initial data. +func (m *metricSnowflakeRowsProducedTotal) init() { + m.data.SetName("snowflake.rows_produced.total") + m.data.SetDescription("Total number of rows produced by statement.") + m.data.SetUnit("{rows}") + m.data.SetEmptyGauge() + m.data.Gauge().DataPoints().EnsureCapacity(m.capacity) +} + +func (m *metricSnowflakeRowsProducedTotal) recordDataPoint(start pcommon.Timestamp, ts pcommon.Timestamp, val int64, schemaNameAttributeValue string, executionStatusAttributeValue string, errorMessageAttributeValue string, queryTypeAttributeValue string, warehouseNameAttributeValue string, databaseNameAttributeValue string, warehouseSizeAttributeValue string) { + if !m.settings.Enabled { + return + } + dp := m.data.Gauge().DataPoints().AppendEmpty() + dp.SetStartTimestamp(start) + dp.SetTimestamp(ts) + dp.SetIntValue(val) + dp.Attributes().PutStr("schema_name", schemaNameAttributeValue) + dp.Attributes().PutStr("execution_status", executionStatusAttributeValue) + dp.Attributes().PutStr("error_message", errorMessageAttributeValue) + dp.Attributes().PutStr("query_type", queryTypeAttributeValue) + dp.Attributes().PutStr("warehouse_name", warehouseNameAttributeValue) + dp.Attributes().PutStr("database_name", databaseNameAttributeValue) + dp.Attributes().PutStr("warehouse_size", warehouseSizeAttributeValue) +} + +// updateCapacity saves max length of data point slices that will be used for the slice capacity. +func (m *metricSnowflakeRowsProducedTotal) updateCapacity() { + if m.data.Gauge().DataPoints().Len() > m.capacity { + m.capacity = m.data.Gauge().DataPoints().Len() + } +} + +// emit appends recorded metric data to a metrics slice and prepares it for recording another set of data points. +func (m *metricSnowflakeRowsProducedTotal) emit(metrics pmetric.MetricSlice) { + if m.settings.Enabled && m.data.Gauge().DataPoints().Len() > 0 { + m.updateCapacity() + m.data.MoveTo(metrics.AppendEmpty()) + m.init() + } +} + +func newMetricSnowflakeRowsProducedTotal(settings MetricSettings) metricSnowflakeRowsProducedTotal { + m := metricSnowflakeRowsProducedTotal{settings: settings} + if settings.Enabled { + m.data = pmetric.NewMetric() + m.init() + } + return m +} + +type metricSnowflakeRowsUnloadedTotal struct { + data pmetric.Metric // data buffer for generated metric. + settings MetricSettings // metric settings provided by user. + capacity int // max observed number of data points added to the metric. +} + +// init fills snowflake.rows_unloaded.total metric with initial data. +func (m *metricSnowflakeRowsUnloadedTotal) init() { + m.data.SetName("snowflake.rows_unloaded.total") + m.data.SetDescription("Total number of rows unloaded during data export.") + m.data.SetUnit("{rows}") + m.data.SetEmptyGauge() + m.data.Gauge().DataPoints().EnsureCapacity(m.capacity) +} + +func (m *metricSnowflakeRowsUnloadedTotal) recordDataPoint(start pcommon.Timestamp, ts pcommon.Timestamp, val int64, schemaNameAttributeValue string, executionStatusAttributeValue string, errorMessageAttributeValue string, queryTypeAttributeValue string, warehouseNameAttributeValue string, databaseNameAttributeValue string, warehouseSizeAttributeValue string) { + if !m.settings.Enabled { + return + } + dp := m.data.Gauge().DataPoints().AppendEmpty() + dp.SetStartTimestamp(start) + dp.SetTimestamp(ts) + dp.SetIntValue(val) + dp.Attributes().PutStr("schema_name", schemaNameAttributeValue) + dp.Attributes().PutStr("execution_status", executionStatusAttributeValue) + dp.Attributes().PutStr("error_message", errorMessageAttributeValue) + dp.Attributes().PutStr("query_type", queryTypeAttributeValue) + dp.Attributes().PutStr("warehouse_name", warehouseNameAttributeValue) + dp.Attributes().PutStr("database_name", databaseNameAttributeValue) + dp.Attributes().PutStr("warehouse_size", warehouseSizeAttributeValue) +} + +// updateCapacity saves max length of data point slices that will be used for the slice capacity. +func (m *metricSnowflakeRowsUnloadedTotal) updateCapacity() { + if m.data.Gauge().DataPoints().Len() > m.capacity { + m.capacity = m.data.Gauge().DataPoints().Len() + } +} + +// emit appends recorded metric data to a metrics slice and prepares it for recording another set of data points. +func (m *metricSnowflakeRowsUnloadedTotal) emit(metrics pmetric.MetricSlice) { + if m.settings.Enabled && m.data.Gauge().DataPoints().Len() > 0 { + m.updateCapacity() + m.data.MoveTo(metrics.AppendEmpty()) + m.init() + } +} + +func newMetricSnowflakeRowsUnloadedTotal(settings MetricSettings) metricSnowflakeRowsUnloadedTotal { + m := metricSnowflakeRowsUnloadedTotal{settings: settings} + if settings.Enabled { + m.data = pmetric.NewMetric() + m.init() + } + return m +} + +type metricSnowflakeRowsUpdatedTotal struct { + data pmetric.Metric // data buffer for generated metric. + settings MetricSettings // metric settings provided by user. + capacity int // max observed number of data points added to the metric. +} + +// init fills snowflake.rows_updated.total metric with initial data. +func (m *metricSnowflakeRowsUpdatedTotal) init() { + m.data.SetName("snowflake.rows_updated.total") + m.data.SetDescription("Total number of rows updated in a table.") + m.data.SetUnit("{rows}") + m.data.SetEmptyGauge() + m.data.Gauge().DataPoints().EnsureCapacity(m.capacity) +} + +func (m *metricSnowflakeRowsUpdatedTotal) recordDataPoint(start pcommon.Timestamp, ts pcommon.Timestamp, val int64, schemaNameAttributeValue string, executionStatusAttributeValue string, errorMessageAttributeValue string, queryTypeAttributeValue string, warehouseNameAttributeValue string, databaseNameAttributeValue string, warehouseSizeAttributeValue string) { + if !m.settings.Enabled { + return + } + dp := m.data.Gauge().DataPoints().AppendEmpty() + dp.SetStartTimestamp(start) + dp.SetTimestamp(ts) + dp.SetIntValue(val) + dp.Attributes().PutStr("schema_name", schemaNameAttributeValue) + dp.Attributes().PutStr("execution_status", executionStatusAttributeValue) + dp.Attributes().PutStr("error_message", errorMessageAttributeValue) + dp.Attributes().PutStr("query_type", queryTypeAttributeValue) + dp.Attributes().PutStr("warehouse_name", warehouseNameAttributeValue) + dp.Attributes().PutStr("database_name", databaseNameAttributeValue) + dp.Attributes().PutStr("warehouse_size", warehouseSizeAttributeValue) +} + +// updateCapacity saves max length of data point slices that will be used for the slice capacity. +func (m *metricSnowflakeRowsUpdatedTotal) updateCapacity() { + if m.data.Gauge().DataPoints().Len() > m.capacity { + m.capacity = m.data.Gauge().DataPoints().Len() + } +} + +// emit appends recorded metric data to a metrics slice and prepares it for recording another set of data points. +func (m *metricSnowflakeRowsUpdatedTotal) emit(metrics pmetric.MetricSlice) { + if m.settings.Enabled && m.data.Gauge().DataPoints().Len() > 0 { + m.updateCapacity() + m.data.MoveTo(metrics.AppendEmpty()) + m.init() + } +} + +func newMetricSnowflakeRowsUpdatedTotal(settings MetricSettings) metricSnowflakeRowsUpdatedTotal { + m := metricSnowflakeRowsUpdatedTotal{settings: settings} + if settings.Enabled { + m.data = pmetric.NewMetric() + m.init() + } + return m +} + +type metricSnowflakeSessionIDCount struct { + data pmetric.Metric // data buffer for generated metric. + settings MetricSettings // metric settings provided by user. + capacity int // max observed number of data points added to the metric. +} + +// init fills snowflake.session_id.count metric with initial data. +func (m *metricSnowflakeSessionIDCount) init() { + m.data.SetName("snowflake.session_id.count") + m.data.SetDescription("Distinct session id's associated with snowflake username.") + m.data.SetUnit("1") + m.data.SetEmptyGauge() + m.data.Gauge().DataPoints().EnsureCapacity(m.capacity) +} + +func (m *metricSnowflakeSessionIDCount) recordDataPoint(start pcommon.Timestamp, ts pcommon.Timestamp, val int64, userNameAttributeValue string) { + if !m.settings.Enabled { + return + } + dp := m.data.Gauge().DataPoints().AppendEmpty() + dp.SetStartTimestamp(start) + dp.SetTimestamp(ts) + dp.SetIntValue(val) + dp.Attributes().PutStr("user_name", userNameAttributeValue) +} + +// updateCapacity saves max length of data point slices that will be used for the slice capacity. +func (m *metricSnowflakeSessionIDCount) updateCapacity() { + if m.data.Gauge().DataPoints().Len() > m.capacity { + m.capacity = m.data.Gauge().DataPoints().Len() + } +} + +// emit appends recorded metric data to a metrics slice and prepares it for recording another set of data points. +func (m *metricSnowflakeSessionIDCount) emit(metrics pmetric.MetricSlice) { + if m.settings.Enabled && m.data.Gauge().DataPoints().Len() > 0 { + m.updateCapacity() + m.data.MoveTo(metrics.AppendEmpty()) + m.init() + } +} + +func newMetricSnowflakeSessionIDCount(settings MetricSettings) metricSnowflakeSessionIDCount { + m := metricSnowflakeSessionIDCount{settings: settings} + if settings.Enabled { + m.data = pmetric.NewMetric() + m.init() + } + return m +} + +type metricSnowflakeStorageFailsafeBytesTotal struct { + data pmetric.Metric // data buffer for generated metric. + settings MetricSettings // metric settings provided by user. + capacity int // max observed number of data points added to the metric. +} + +// init fills snowflake.storage.failsafe_bytes.total metric with initial data. +func (m *metricSnowflakeStorageFailsafeBytesTotal) init() { + m.data.SetName("snowflake.storage.failsafe_bytes.total") + m.data.SetDescription("Number of bytes of data in Fail-safe.") + m.data.SetUnit("By") + m.data.SetEmptyGauge() +} + +func (m *metricSnowflakeStorageFailsafeBytesTotal) recordDataPoint(start pcommon.Timestamp, ts pcommon.Timestamp, val int64) { + if !m.settings.Enabled { + return + } + dp := m.data.Gauge().DataPoints().AppendEmpty() + dp.SetStartTimestamp(start) + dp.SetTimestamp(ts) + dp.SetIntValue(val) +} + +// updateCapacity saves max length of data point slices that will be used for the slice capacity. +func (m *metricSnowflakeStorageFailsafeBytesTotal) updateCapacity() { + if m.data.Gauge().DataPoints().Len() > m.capacity { + m.capacity = m.data.Gauge().DataPoints().Len() + } +} + +// emit appends recorded metric data to a metrics slice and prepares it for recording another set of data points. +func (m *metricSnowflakeStorageFailsafeBytesTotal) emit(metrics pmetric.MetricSlice) { + if m.settings.Enabled && m.data.Gauge().DataPoints().Len() > 0 { + m.updateCapacity() + m.data.MoveTo(metrics.AppendEmpty()) + m.init() + } +} + +func newMetricSnowflakeStorageFailsafeBytesTotal(settings MetricSettings) metricSnowflakeStorageFailsafeBytesTotal { + m := metricSnowflakeStorageFailsafeBytesTotal{settings: settings} + if settings.Enabled { + m.data = pmetric.NewMetric() + m.init() + } + return m +} + +type metricSnowflakeStorageStageBytesTotal struct { + data pmetric.Metric // data buffer for generated metric. + settings MetricSettings // metric settings provided by user. + capacity int // max observed number of data points added to the metric. +} + +// init fills snowflake.storage.stage_bytes.total metric with initial data. +func (m *metricSnowflakeStorageStageBytesTotal) init() { + m.data.SetName("snowflake.storage.stage_bytes.total") + m.data.SetDescription("Number of bytes of stage storage used by files in all internal stages (named, table, user).") + m.data.SetUnit("By") + m.data.SetEmptyGauge() +} + +func (m *metricSnowflakeStorageStageBytesTotal) recordDataPoint(start pcommon.Timestamp, ts pcommon.Timestamp, val int64) { + if !m.settings.Enabled { + return + } + dp := m.data.Gauge().DataPoints().AppendEmpty() + dp.SetStartTimestamp(start) + dp.SetTimestamp(ts) + dp.SetIntValue(val) +} + +// updateCapacity saves max length of data point slices that will be used for the slice capacity. +func (m *metricSnowflakeStorageStageBytesTotal) updateCapacity() { + if m.data.Gauge().DataPoints().Len() > m.capacity { + m.capacity = m.data.Gauge().DataPoints().Len() + } +} + +// emit appends recorded metric data to a metrics slice and prepares it for recording another set of data points. +func (m *metricSnowflakeStorageStageBytesTotal) emit(metrics pmetric.MetricSlice) { + if m.settings.Enabled && m.data.Gauge().DataPoints().Len() > 0 { + m.updateCapacity() + m.data.MoveTo(metrics.AppendEmpty()) + m.init() + } +} + +func newMetricSnowflakeStorageStageBytesTotal(settings MetricSettings) metricSnowflakeStorageStageBytesTotal { + m := metricSnowflakeStorageStageBytesTotal{settings: settings} + if settings.Enabled { + m.data = pmetric.NewMetric() + m.init() + } + return m +} + +type metricSnowflakeStorageStorageBytesTotal struct { + data pmetric.Metric // data buffer for generated metric. + settings MetricSettings // metric settings provided by user. + capacity int // max observed number of data points added to the metric. +} + +// init fills snowflake.storage.storage_bytes.total metric with initial data. +func (m *metricSnowflakeStorageStorageBytesTotal) init() { + m.data.SetName("snowflake.storage.storage_bytes.total") + m.data.SetDescription("Number of bytes of table storage used, including bytes for data currently in Time Travel.") + m.data.SetUnit("By") + m.data.SetEmptyGauge() +} + +func (m *metricSnowflakeStorageStorageBytesTotal) recordDataPoint(start pcommon.Timestamp, ts pcommon.Timestamp, val int64) { + if !m.settings.Enabled { + return + } + dp := m.data.Gauge().DataPoints().AppendEmpty() + dp.SetStartTimestamp(start) + dp.SetTimestamp(ts) + dp.SetIntValue(val) +} + +// updateCapacity saves max length of data point slices that will be used for the slice capacity. +func (m *metricSnowflakeStorageStorageBytesTotal) updateCapacity() { + if m.data.Gauge().DataPoints().Len() > m.capacity { + m.capacity = m.data.Gauge().DataPoints().Len() + } +} + +// emit appends recorded metric data to a metrics slice and prepares it for recording another set of data points. +func (m *metricSnowflakeStorageStorageBytesTotal) emit(metrics pmetric.MetricSlice) { + if m.settings.Enabled && m.data.Gauge().DataPoints().Len() > 0 { + m.updateCapacity() + m.data.MoveTo(metrics.AppendEmpty()) + m.init() + } +} + +func newMetricSnowflakeStorageStorageBytesTotal(settings MetricSettings) metricSnowflakeStorageStorageBytesTotal { + m := metricSnowflakeStorageStorageBytesTotal{settings: settings} + if settings.Enabled { + m.data = pmetric.NewMetric() + m.init() + } + return m +} + +type metricSnowflakeTotalElapsedTimeAvg struct { + data pmetric.Metric // data buffer for generated metric. + settings MetricSettings // metric settings provided by user. + capacity int // max observed number of data points added to the metric. +} + +// init fills snowflake.total_elapsed_time.avg metric with initial data. +func (m *metricSnowflakeTotalElapsedTimeAvg) init() { + m.data.SetName("snowflake.total_elapsed_time.avg") + m.data.SetDescription("Average elapsed time.") + m.data.SetUnit("s") + m.data.SetEmptyGauge() + m.data.Gauge().DataPoints().EnsureCapacity(m.capacity) +} + +func (m *metricSnowflakeTotalElapsedTimeAvg) recordDataPoint(start pcommon.Timestamp, ts pcommon.Timestamp, val float64, schemaNameAttributeValue string, executionStatusAttributeValue string, errorMessageAttributeValue string, queryTypeAttributeValue string, warehouseNameAttributeValue string, databaseNameAttributeValue string, warehouseSizeAttributeValue string) { + if !m.settings.Enabled { + return + } + dp := m.data.Gauge().DataPoints().AppendEmpty() + dp.SetStartTimestamp(start) + dp.SetTimestamp(ts) + dp.SetDoubleValue(val) + dp.Attributes().PutStr("schema_name", schemaNameAttributeValue) + dp.Attributes().PutStr("execution_status", executionStatusAttributeValue) + dp.Attributes().PutStr("error_message", errorMessageAttributeValue) + dp.Attributes().PutStr("query_type", queryTypeAttributeValue) + dp.Attributes().PutStr("warehouse_name", warehouseNameAttributeValue) + dp.Attributes().PutStr("database_name", databaseNameAttributeValue) + dp.Attributes().PutStr("warehouse_size", warehouseSizeAttributeValue) +} + +// updateCapacity saves max length of data point slices that will be used for the slice capacity. +func (m *metricSnowflakeTotalElapsedTimeAvg) updateCapacity() { + if m.data.Gauge().DataPoints().Len() > m.capacity { + m.capacity = m.data.Gauge().DataPoints().Len() + } +} + +// emit appends recorded metric data to a metrics slice and prepares it for recording another set of data points. +func (m *metricSnowflakeTotalElapsedTimeAvg) emit(metrics pmetric.MetricSlice) { + if m.settings.Enabled && m.data.Gauge().DataPoints().Len() > 0 { + m.updateCapacity() + m.data.MoveTo(metrics.AppendEmpty()) + m.init() + } +} + +func newMetricSnowflakeTotalElapsedTimeAvg(settings MetricSettings) metricSnowflakeTotalElapsedTimeAvg { + m := metricSnowflakeTotalElapsedTimeAvg{settings: settings} + if settings.Enabled { + m.data = pmetric.NewMetric() + m.init() + } + return m +} + +type metricSnowflakeTotalElapsedTimeTotal struct { + data pmetric.Metric // data buffer for generated metric. + settings MetricSettings // metric settings provided by user. + capacity int // max observed number of data points added to the metric. +} + +// init fills snowflake.total_elapsed_time.total metric with initial data. +func (m *metricSnowflakeTotalElapsedTimeTotal) init() { + m.data.SetName("snowflake.total_elapsed_time.total") + m.data.SetDescription("Total elapsed time.") + m.data.SetUnit("s") + m.data.SetEmptyGauge() + m.data.Gauge().DataPoints().EnsureCapacity(m.capacity) +} + +func (m *metricSnowflakeTotalElapsedTimeTotal) recordDataPoint(start pcommon.Timestamp, ts pcommon.Timestamp, val int64, schemaNameAttributeValue string, executionStatusAttributeValue string, errorMessageAttributeValue string, queryTypeAttributeValue string, warehouseNameAttributeValue string, databaseNameAttributeValue string, warehouseSizeAttributeValue string) { + if !m.settings.Enabled { + return + } + dp := m.data.Gauge().DataPoints().AppendEmpty() + dp.SetStartTimestamp(start) + dp.SetTimestamp(ts) + dp.SetIntValue(val) + dp.Attributes().PutStr("schema_name", schemaNameAttributeValue) + dp.Attributes().PutStr("execution_status", executionStatusAttributeValue) + dp.Attributes().PutStr("error_message", errorMessageAttributeValue) + dp.Attributes().PutStr("query_type", queryTypeAttributeValue) + dp.Attributes().PutStr("warehouse_name", warehouseNameAttributeValue) + dp.Attributes().PutStr("database_name", databaseNameAttributeValue) + dp.Attributes().PutStr("warehouse_size", warehouseSizeAttributeValue) +} + +// updateCapacity saves max length of data point slices that will be used for the slice capacity. +func (m *metricSnowflakeTotalElapsedTimeTotal) updateCapacity() { + if m.data.Gauge().DataPoints().Len() > m.capacity { + m.capacity = m.data.Gauge().DataPoints().Len() + } +} + +// emit appends recorded metric data to a metrics slice and prepares it for recording another set of data points. +func (m *metricSnowflakeTotalElapsedTimeTotal) emit(metrics pmetric.MetricSlice) { + if m.settings.Enabled && m.data.Gauge().DataPoints().Len() > 0 { + m.updateCapacity() + m.data.MoveTo(metrics.AppendEmpty()) + m.init() + } +} + +func newMetricSnowflakeTotalElapsedTimeTotal(settings MetricSettings) metricSnowflakeTotalElapsedTimeTotal { + m := metricSnowflakeTotalElapsedTimeTotal{settings: settings} + if settings.Enabled { + m.data = pmetric.NewMetric() + m.init() + } + return m +} + +// MetricsBuilder provides an interface for scrapers to report metrics while taking care of all the transformations +// required to produce metric representation defined in metadata and user settings. +type MetricsBuilder struct { + startTime pcommon.Timestamp // start time that will be applied to all recorded data points. + metricsCapacity int // maximum observed number of metrics per resource. + resourceCapacity int // maximum observed number of resource attributes. + metricsBuffer pmetric.Metrics // accumulates metrics data before emitting. + buildInfo component.BuildInfo // contains version information + metricSnowflakeBillingCloudServiceTotal metricSnowflakeBillingCloudServiceTotal + metricSnowflakeBillingTotalCreditTotal metricSnowflakeBillingTotalCreditTotal + metricSnowflakeBillingVirtualWarehouseTotal metricSnowflakeBillingVirtualWarehouseTotal + metricSnowflakeBillingWarehouseCloudServiceTotal metricSnowflakeBillingWarehouseCloudServiceTotal + metricSnowflakeBillingWarehouseTotalCreditTotal metricSnowflakeBillingWarehouseTotalCreditTotal + metricSnowflakeBillingWarehouseVirtualWarehouseTotal metricSnowflakeBillingWarehouseVirtualWarehouseTotal + metricSnowflakeDatabaseBytesScannedAvg metricSnowflakeDatabaseBytesScannedAvg + metricSnowflakeDatabaseQueryCount metricSnowflakeDatabaseQueryCount + metricSnowflakeLoginsTotal metricSnowflakeLoginsTotal + metricSnowflakePipeCreditsUsedTotal metricSnowflakePipeCreditsUsedTotal + metricSnowflakeQueryBlocked metricSnowflakeQueryBlocked + metricSnowflakeQueryBytesDeletedTotal metricSnowflakeQueryBytesDeletedTotal + metricSnowflakeQueryBytesScannedTotal metricSnowflakeQueryBytesScannedTotal + metricSnowflakeQueryBytesSpilledLocalTotal metricSnowflakeQueryBytesSpilledLocalTotal + metricSnowflakeQueryBytesSpilledRemoteTotal metricSnowflakeQueryBytesSpilledRemoteTotal + metricSnowflakeQueryBytesWrittenTotal metricSnowflakeQueryBytesWrittenTotal + metricSnowflakeQueryCompilationTimeTotal metricSnowflakeQueryCompilationTimeTotal + metricSnowflakeQueryDataScannedCacheAvg metricSnowflakeQueryDataScannedCacheAvg + metricSnowflakeQueryExecuted metricSnowflakeQueryExecuted + metricSnowflakeQueryExecutionTimeTotal metricSnowflakeQueryExecutionTimeTotal + metricSnowflakeQueryPartitionsScannedTotal metricSnowflakeQueryPartitionsScannedTotal + metricSnowflakeQueryQueuedOverload metricSnowflakeQueryQueuedOverload + metricSnowflakeQueryQueuedProvision metricSnowflakeQueryQueuedProvision + metricSnowflakeQueuedOverloadTimeAvg metricSnowflakeQueuedOverloadTimeAvg + metricSnowflakeQueuedOverloadTimeTotal metricSnowflakeQueuedOverloadTimeTotal + metricSnowflakeQueuedProvisioningTimeAvg metricSnowflakeQueuedProvisioningTimeAvg + metricSnowflakeQueuedProvisioningTimeTotal metricSnowflakeQueuedProvisioningTimeTotal + metricSnowflakeQueuedRepairTimeAvg metricSnowflakeQueuedRepairTimeAvg + metricSnowflakeQueuedRepairTimeTotal metricSnowflakeQueuedRepairTimeTotal + metricSnowflakeRowsDeletedTotal metricSnowflakeRowsDeletedTotal + metricSnowflakeRowsInsertedTotal metricSnowflakeRowsInsertedTotal + metricSnowflakeRowsProducedTotal metricSnowflakeRowsProducedTotal + metricSnowflakeRowsUnloadedTotal metricSnowflakeRowsUnloadedTotal + metricSnowflakeRowsUpdatedTotal metricSnowflakeRowsUpdatedTotal + metricSnowflakeSessionIDCount metricSnowflakeSessionIDCount + metricSnowflakeStorageFailsafeBytesTotal metricSnowflakeStorageFailsafeBytesTotal + metricSnowflakeStorageStageBytesTotal metricSnowflakeStorageStageBytesTotal + metricSnowflakeStorageStorageBytesTotal metricSnowflakeStorageStorageBytesTotal + metricSnowflakeTotalElapsedTimeAvg metricSnowflakeTotalElapsedTimeAvg + metricSnowflakeTotalElapsedTimeTotal metricSnowflakeTotalElapsedTimeTotal +} + +// metricBuilderOption applies changes to default metrics builder. +type metricBuilderOption func(*MetricsBuilder) + +// WithStartTime sets startTime on the metrics builder. +func WithStartTime(startTime pcommon.Timestamp) metricBuilderOption { + return func(mb *MetricsBuilder) { + mb.startTime = startTime + } +} + +func NewMetricsBuilder(settings MetricsSettings, buildInfo component.BuildInfo, options ...metricBuilderOption) *MetricsBuilder { + mb := &MetricsBuilder{ + startTime: pcommon.NewTimestampFromTime(time.Now()), + metricsBuffer: pmetric.NewMetrics(), + buildInfo: buildInfo, + metricSnowflakeBillingCloudServiceTotal: newMetricSnowflakeBillingCloudServiceTotal(settings.SnowflakeBillingCloudServiceTotal), + metricSnowflakeBillingTotalCreditTotal: newMetricSnowflakeBillingTotalCreditTotal(settings.SnowflakeBillingTotalCreditTotal), + metricSnowflakeBillingVirtualWarehouseTotal: newMetricSnowflakeBillingVirtualWarehouseTotal(settings.SnowflakeBillingVirtualWarehouseTotal), + metricSnowflakeBillingWarehouseCloudServiceTotal: newMetricSnowflakeBillingWarehouseCloudServiceTotal(settings.SnowflakeBillingWarehouseCloudServiceTotal), + metricSnowflakeBillingWarehouseTotalCreditTotal: newMetricSnowflakeBillingWarehouseTotalCreditTotal(settings.SnowflakeBillingWarehouseTotalCreditTotal), + metricSnowflakeBillingWarehouseVirtualWarehouseTotal: newMetricSnowflakeBillingWarehouseVirtualWarehouseTotal(settings.SnowflakeBillingWarehouseVirtualWarehouseTotal), + metricSnowflakeDatabaseBytesScannedAvg: newMetricSnowflakeDatabaseBytesScannedAvg(settings.SnowflakeDatabaseBytesScannedAvg), + metricSnowflakeDatabaseQueryCount: newMetricSnowflakeDatabaseQueryCount(settings.SnowflakeDatabaseQueryCount), + metricSnowflakeLoginsTotal: newMetricSnowflakeLoginsTotal(settings.SnowflakeLoginsTotal), + metricSnowflakePipeCreditsUsedTotal: newMetricSnowflakePipeCreditsUsedTotal(settings.SnowflakePipeCreditsUsedTotal), + metricSnowflakeQueryBlocked: newMetricSnowflakeQueryBlocked(settings.SnowflakeQueryBlocked), + metricSnowflakeQueryBytesDeletedTotal: newMetricSnowflakeQueryBytesDeletedTotal(settings.SnowflakeQueryBytesDeletedTotal), + metricSnowflakeQueryBytesScannedTotal: newMetricSnowflakeQueryBytesScannedTotal(settings.SnowflakeQueryBytesScannedTotal), + metricSnowflakeQueryBytesSpilledLocalTotal: newMetricSnowflakeQueryBytesSpilledLocalTotal(settings.SnowflakeQueryBytesSpilledLocalTotal), + metricSnowflakeQueryBytesSpilledRemoteTotal: newMetricSnowflakeQueryBytesSpilledRemoteTotal(settings.SnowflakeQueryBytesSpilledRemoteTotal), + metricSnowflakeQueryBytesWrittenTotal: newMetricSnowflakeQueryBytesWrittenTotal(settings.SnowflakeQueryBytesWrittenTotal), + metricSnowflakeQueryCompilationTimeTotal: newMetricSnowflakeQueryCompilationTimeTotal(settings.SnowflakeQueryCompilationTimeTotal), + metricSnowflakeQueryDataScannedCacheAvg: newMetricSnowflakeQueryDataScannedCacheAvg(settings.SnowflakeQueryDataScannedCacheAvg), + metricSnowflakeQueryExecuted: newMetricSnowflakeQueryExecuted(settings.SnowflakeQueryExecuted), + metricSnowflakeQueryExecutionTimeTotal: newMetricSnowflakeQueryExecutionTimeTotal(settings.SnowflakeQueryExecutionTimeTotal), + metricSnowflakeQueryPartitionsScannedTotal: newMetricSnowflakeQueryPartitionsScannedTotal(settings.SnowflakeQueryPartitionsScannedTotal), + metricSnowflakeQueryQueuedOverload: newMetricSnowflakeQueryQueuedOverload(settings.SnowflakeQueryQueuedOverload), + metricSnowflakeQueryQueuedProvision: newMetricSnowflakeQueryQueuedProvision(settings.SnowflakeQueryQueuedProvision), + metricSnowflakeQueuedOverloadTimeAvg: newMetricSnowflakeQueuedOverloadTimeAvg(settings.SnowflakeQueuedOverloadTimeAvg), + metricSnowflakeQueuedOverloadTimeTotal: newMetricSnowflakeQueuedOverloadTimeTotal(settings.SnowflakeQueuedOverloadTimeTotal), + metricSnowflakeQueuedProvisioningTimeAvg: newMetricSnowflakeQueuedProvisioningTimeAvg(settings.SnowflakeQueuedProvisioningTimeAvg), + metricSnowflakeQueuedProvisioningTimeTotal: newMetricSnowflakeQueuedProvisioningTimeTotal(settings.SnowflakeQueuedProvisioningTimeTotal), + metricSnowflakeQueuedRepairTimeAvg: newMetricSnowflakeQueuedRepairTimeAvg(settings.SnowflakeQueuedRepairTimeAvg), + metricSnowflakeQueuedRepairTimeTotal: newMetricSnowflakeQueuedRepairTimeTotal(settings.SnowflakeQueuedRepairTimeTotal), + metricSnowflakeRowsDeletedTotal: newMetricSnowflakeRowsDeletedTotal(settings.SnowflakeRowsDeletedTotal), + metricSnowflakeRowsInsertedTotal: newMetricSnowflakeRowsInsertedTotal(settings.SnowflakeRowsInsertedTotal), + metricSnowflakeRowsProducedTotal: newMetricSnowflakeRowsProducedTotal(settings.SnowflakeRowsProducedTotal), + metricSnowflakeRowsUnloadedTotal: newMetricSnowflakeRowsUnloadedTotal(settings.SnowflakeRowsUnloadedTotal), + metricSnowflakeRowsUpdatedTotal: newMetricSnowflakeRowsUpdatedTotal(settings.SnowflakeRowsUpdatedTotal), + metricSnowflakeSessionIDCount: newMetricSnowflakeSessionIDCount(settings.SnowflakeSessionIDCount), + metricSnowflakeStorageFailsafeBytesTotal: newMetricSnowflakeStorageFailsafeBytesTotal(settings.SnowflakeStorageFailsafeBytesTotal), + metricSnowflakeStorageStageBytesTotal: newMetricSnowflakeStorageStageBytesTotal(settings.SnowflakeStorageStageBytesTotal), + metricSnowflakeStorageStorageBytesTotal: newMetricSnowflakeStorageStorageBytesTotal(settings.SnowflakeStorageStorageBytesTotal), + metricSnowflakeTotalElapsedTimeAvg: newMetricSnowflakeTotalElapsedTimeAvg(settings.SnowflakeTotalElapsedTimeAvg), + metricSnowflakeTotalElapsedTimeTotal: newMetricSnowflakeTotalElapsedTimeTotal(settings.SnowflakeTotalElapsedTimeTotal), + } + for _, op := range options { + op(mb) + } + return mb +} + +// updateCapacity updates max length of metrics and resource attributes that will be used for the slice capacity. +func (mb *MetricsBuilder) updateCapacity(rm pmetric.ResourceMetrics) { + if mb.metricsCapacity < rm.ScopeMetrics().At(0).Metrics().Len() { + mb.metricsCapacity = rm.ScopeMetrics().At(0).Metrics().Len() + } + if mb.resourceCapacity < rm.Resource().Attributes().Len() { + mb.resourceCapacity = rm.Resource().Attributes().Len() + } +} + +// ResourceMetricsOption applies changes to provided resource metrics. +type ResourceMetricsOption func(pmetric.ResourceMetrics) + +// WithSnowflakeAccountName sets provided value as "snowflake.account.name" attribute for current resource. +func WithSnowflakeAccountName(val string) ResourceMetricsOption { + return func(rm pmetric.ResourceMetrics) { + rm.Resource().Attributes().PutStr("snowflake.account.name", val) + } +} + +// WithSnowflakeUsername sets provided value as "snowflake.username" attribute for current resource. +func WithSnowflakeUsername(val string) ResourceMetricsOption { + return func(rm pmetric.ResourceMetrics) { + rm.Resource().Attributes().PutStr("snowflake.username", val) + } +} + +// WithSnowflakeWarehouseName sets provided value as "snowflake.warehouse.name" attribute for current resource. +func WithSnowflakeWarehouseName(val string) ResourceMetricsOption { + return func(rm pmetric.ResourceMetrics) { + rm.Resource().Attributes().PutStr("snowflake.warehouse.name", val) + } +} + +// WithStartTimeOverride overrides start time for all the resource metrics data points. +// This option should be only used if different start time has to be set on metrics coming from different resources. +func WithStartTimeOverride(start pcommon.Timestamp) ResourceMetricsOption { + return func(rm pmetric.ResourceMetrics) { + var dps pmetric.NumberDataPointSlice + metrics := rm.ScopeMetrics().At(0).Metrics() + for i := 0; i < metrics.Len(); i++ { + switch metrics.At(i).Type() { + case pmetric.MetricTypeGauge: + dps = metrics.At(i).Gauge().DataPoints() + case pmetric.MetricTypeSum: + dps = metrics.At(i).Sum().DataPoints() + } + for j := 0; j < dps.Len(); j++ { + dps.At(j).SetStartTimestamp(start) + } + } + } +} + +// EmitForResource saves all the generated metrics under a new resource and updates the internal state to be ready for +// recording another set of data points as part of another resource. This function can be helpful when one scraper +// needs to emit metrics from several resources. Otherwise calling this function is not required, +// just `Emit` function can be called instead. +// Resource attributes should be provided as ResourceMetricsOption arguments. +func (mb *MetricsBuilder) EmitForResource(rmo ...ResourceMetricsOption) { + rm := pmetric.NewResourceMetrics() + rm.Resource().Attributes().EnsureCapacity(mb.resourceCapacity) + ils := rm.ScopeMetrics().AppendEmpty() + ils.Scope().SetName("otelcol/snowflakereceiver") + ils.Scope().SetVersion(mb.buildInfo.Version) + ils.Metrics().EnsureCapacity(mb.metricsCapacity) + mb.metricSnowflakeBillingCloudServiceTotal.emit(ils.Metrics()) + mb.metricSnowflakeBillingTotalCreditTotal.emit(ils.Metrics()) + mb.metricSnowflakeBillingVirtualWarehouseTotal.emit(ils.Metrics()) + mb.metricSnowflakeBillingWarehouseCloudServiceTotal.emit(ils.Metrics()) + mb.metricSnowflakeBillingWarehouseTotalCreditTotal.emit(ils.Metrics()) + mb.metricSnowflakeBillingWarehouseVirtualWarehouseTotal.emit(ils.Metrics()) + mb.metricSnowflakeDatabaseBytesScannedAvg.emit(ils.Metrics()) + mb.metricSnowflakeDatabaseQueryCount.emit(ils.Metrics()) + mb.metricSnowflakeLoginsTotal.emit(ils.Metrics()) + mb.metricSnowflakePipeCreditsUsedTotal.emit(ils.Metrics()) + mb.metricSnowflakeQueryBlocked.emit(ils.Metrics()) + mb.metricSnowflakeQueryBytesDeletedTotal.emit(ils.Metrics()) + mb.metricSnowflakeQueryBytesScannedTotal.emit(ils.Metrics()) + mb.metricSnowflakeQueryBytesSpilledLocalTotal.emit(ils.Metrics()) + mb.metricSnowflakeQueryBytesSpilledRemoteTotal.emit(ils.Metrics()) + mb.metricSnowflakeQueryBytesWrittenTotal.emit(ils.Metrics()) + mb.metricSnowflakeQueryCompilationTimeTotal.emit(ils.Metrics()) + mb.metricSnowflakeQueryDataScannedCacheAvg.emit(ils.Metrics()) + mb.metricSnowflakeQueryExecuted.emit(ils.Metrics()) + mb.metricSnowflakeQueryExecutionTimeTotal.emit(ils.Metrics()) + mb.metricSnowflakeQueryPartitionsScannedTotal.emit(ils.Metrics()) + mb.metricSnowflakeQueryQueuedOverload.emit(ils.Metrics()) + mb.metricSnowflakeQueryQueuedProvision.emit(ils.Metrics()) + mb.metricSnowflakeQueuedOverloadTimeAvg.emit(ils.Metrics()) + mb.metricSnowflakeQueuedOverloadTimeTotal.emit(ils.Metrics()) + mb.metricSnowflakeQueuedProvisioningTimeAvg.emit(ils.Metrics()) + mb.metricSnowflakeQueuedProvisioningTimeTotal.emit(ils.Metrics()) + mb.metricSnowflakeQueuedRepairTimeAvg.emit(ils.Metrics()) + mb.metricSnowflakeQueuedRepairTimeTotal.emit(ils.Metrics()) + mb.metricSnowflakeRowsDeletedTotal.emit(ils.Metrics()) + mb.metricSnowflakeRowsInsertedTotal.emit(ils.Metrics()) + mb.metricSnowflakeRowsProducedTotal.emit(ils.Metrics()) + mb.metricSnowflakeRowsUnloadedTotal.emit(ils.Metrics()) + mb.metricSnowflakeRowsUpdatedTotal.emit(ils.Metrics()) + mb.metricSnowflakeSessionIDCount.emit(ils.Metrics()) + mb.metricSnowflakeStorageFailsafeBytesTotal.emit(ils.Metrics()) + mb.metricSnowflakeStorageStageBytesTotal.emit(ils.Metrics()) + mb.metricSnowflakeStorageStorageBytesTotal.emit(ils.Metrics()) + mb.metricSnowflakeTotalElapsedTimeAvg.emit(ils.Metrics()) + mb.metricSnowflakeTotalElapsedTimeTotal.emit(ils.Metrics()) + for _, op := range rmo { + op(rm) + } + if ils.Metrics().Len() > 0 { + mb.updateCapacity(rm) + rm.MoveTo(mb.metricsBuffer.ResourceMetrics().AppendEmpty()) + } +} + +// Emit returns all the metrics accumulated by the metrics builder and updates the internal state to be ready for +// recording another set of metrics. This function will be responsible for applying all the transformations required to +// produce metric representation defined in metadata and user settings, e.g. delta or cumulative. +func (mb *MetricsBuilder) Emit(rmo ...ResourceMetricsOption) pmetric.Metrics { + mb.EmitForResource(rmo...) + metrics := pmetric.NewMetrics() + mb.metricsBuffer.MoveTo(metrics) + return metrics +} + +// RecordSnowflakeBillingCloudServiceTotalDataPoint adds a data point to snowflake.billing.cloud_service.total metric. +func (mb *MetricsBuilder) RecordSnowflakeBillingCloudServiceTotalDataPoint(ts pcommon.Timestamp, val float64, serviceTypeAttributeValue string) { + mb.metricSnowflakeBillingCloudServiceTotal.recordDataPoint(mb.startTime, ts, val, serviceTypeAttributeValue) +} + +// RecordSnowflakeBillingTotalCreditTotalDataPoint adds a data point to snowflake.billing.total_credit.total metric. +func (mb *MetricsBuilder) RecordSnowflakeBillingTotalCreditTotalDataPoint(ts pcommon.Timestamp, val float64, serviceTypeAttributeValue string) { + mb.metricSnowflakeBillingTotalCreditTotal.recordDataPoint(mb.startTime, ts, val, serviceTypeAttributeValue) +} + +// RecordSnowflakeBillingVirtualWarehouseTotalDataPoint adds a data point to snowflake.billing.virtual_warehouse.total metric. +func (mb *MetricsBuilder) RecordSnowflakeBillingVirtualWarehouseTotalDataPoint(ts pcommon.Timestamp, val float64, serviceTypeAttributeValue string) { + mb.metricSnowflakeBillingVirtualWarehouseTotal.recordDataPoint(mb.startTime, ts, val, serviceTypeAttributeValue) +} + +// RecordSnowflakeBillingWarehouseCloudServiceTotalDataPoint adds a data point to snowflake.billing.warehouse.cloud_service.total metric. +func (mb *MetricsBuilder) RecordSnowflakeBillingWarehouseCloudServiceTotalDataPoint(ts pcommon.Timestamp, val float64, warehouseNameAttributeValue string) { + mb.metricSnowflakeBillingWarehouseCloudServiceTotal.recordDataPoint(mb.startTime, ts, val, warehouseNameAttributeValue) +} + +// RecordSnowflakeBillingWarehouseTotalCreditTotalDataPoint adds a data point to snowflake.billing.warehouse.total_credit.total metric. +func (mb *MetricsBuilder) RecordSnowflakeBillingWarehouseTotalCreditTotalDataPoint(ts pcommon.Timestamp, val float64, warehouseNameAttributeValue string) { + mb.metricSnowflakeBillingWarehouseTotalCreditTotal.recordDataPoint(mb.startTime, ts, val, warehouseNameAttributeValue) +} + +// RecordSnowflakeBillingWarehouseVirtualWarehouseTotalDataPoint adds a data point to snowflake.billing.warehouse.virtual_warehouse.total metric. +func (mb *MetricsBuilder) RecordSnowflakeBillingWarehouseVirtualWarehouseTotalDataPoint(ts pcommon.Timestamp, val float64, warehouseNameAttributeValue string) { + mb.metricSnowflakeBillingWarehouseVirtualWarehouseTotal.recordDataPoint(mb.startTime, ts, val, warehouseNameAttributeValue) +} + +// RecordSnowflakeDatabaseBytesScannedAvgDataPoint adds a data point to snowflake.database.bytes_scanned.avg metric. +func (mb *MetricsBuilder) RecordSnowflakeDatabaseBytesScannedAvgDataPoint(ts pcommon.Timestamp, val float64, schemaNameAttributeValue string, executionStatusAttributeValue string, errorMessageAttributeValue string, queryTypeAttributeValue string, warehouseNameAttributeValue string, databaseNameAttributeValue string, warehouseSizeAttributeValue string) { + mb.metricSnowflakeDatabaseBytesScannedAvg.recordDataPoint(mb.startTime, ts, val, schemaNameAttributeValue, executionStatusAttributeValue, errorMessageAttributeValue, queryTypeAttributeValue, warehouseNameAttributeValue, databaseNameAttributeValue, warehouseSizeAttributeValue) +} + +// RecordSnowflakeDatabaseQueryCountDataPoint adds a data point to snowflake.database.query.count metric. +func (mb *MetricsBuilder) RecordSnowflakeDatabaseQueryCountDataPoint(ts pcommon.Timestamp, val int64, schemaNameAttributeValue string, executionStatusAttributeValue string, errorMessageAttributeValue string, queryTypeAttributeValue string, warehouseNameAttributeValue string, databaseNameAttributeValue string, warehouseSizeAttributeValue string) { + mb.metricSnowflakeDatabaseQueryCount.recordDataPoint(mb.startTime, ts, val, schemaNameAttributeValue, executionStatusAttributeValue, errorMessageAttributeValue, queryTypeAttributeValue, warehouseNameAttributeValue, databaseNameAttributeValue, warehouseSizeAttributeValue) +} + +// RecordSnowflakeLoginsTotalDataPoint adds a data point to snowflake.logins.total metric. +func (mb *MetricsBuilder) RecordSnowflakeLoginsTotalDataPoint(ts pcommon.Timestamp, val int64, errorMessageAttributeValue string, reportedClientTypeAttributeValue string, isSuccessAttributeValue string) { + mb.metricSnowflakeLoginsTotal.recordDataPoint(mb.startTime, ts, val, errorMessageAttributeValue, reportedClientTypeAttributeValue, isSuccessAttributeValue) +} + +// RecordSnowflakePipeCreditsUsedTotalDataPoint adds a data point to snowflake.pipe.credits_used.total metric. +func (mb *MetricsBuilder) RecordSnowflakePipeCreditsUsedTotalDataPoint(ts pcommon.Timestamp, val float64, pipeNameAttributeValue string) { + mb.metricSnowflakePipeCreditsUsedTotal.recordDataPoint(mb.startTime, ts, val, pipeNameAttributeValue) +} + +// RecordSnowflakeQueryBlockedDataPoint adds a data point to snowflake.query.blocked metric. +func (mb *MetricsBuilder) RecordSnowflakeQueryBlockedDataPoint(ts pcommon.Timestamp, val float64, warehouseNameAttributeValue string) { + mb.metricSnowflakeQueryBlocked.recordDataPoint(mb.startTime, ts, val, warehouseNameAttributeValue) +} + +// RecordSnowflakeQueryBytesDeletedTotalDataPoint adds a data point to snowflake.query.bytes_deleted.total metric. +func (mb *MetricsBuilder) RecordSnowflakeQueryBytesDeletedTotalDataPoint(ts pcommon.Timestamp, val int64, schemaNameAttributeValue string, executionStatusAttributeValue string, errorMessageAttributeValue string, queryTypeAttributeValue string, warehouseNameAttributeValue string, databaseNameAttributeValue string, warehouseSizeAttributeValue string) { + mb.metricSnowflakeQueryBytesDeletedTotal.recordDataPoint(mb.startTime, ts, val, schemaNameAttributeValue, executionStatusAttributeValue, errorMessageAttributeValue, queryTypeAttributeValue, warehouseNameAttributeValue, databaseNameAttributeValue, warehouseSizeAttributeValue) +} + +// RecordSnowflakeQueryBytesScannedTotalDataPoint adds a data point to snowflake.query.bytes_scanned.total metric. +func (mb *MetricsBuilder) RecordSnowflakeQueryBytesScannedTotalDataPoint(ts pcommon.Timestamp, val int64, schemaNameAttributeValue string, executionStatusAttributeValue string, errorMessageAttributeValue string, queryTypeAttributeValue string, warehouseNameAttributeValue string, databaseNameAttributeValue string, warehouseSizeAttributeValue string) { + mb.metricSnowflakeQueryBytesScannedTotal.recordDataPoint(mb.startTime, ts, val, schemaNameAttributeValue, executionStatusAttributeValue, errorMessageAttributeValue, queryTypeAttributeValue, warehouseNameAttributeValue, databaseNameAttributeValue, warehouseSizeAttributeValue) +} + +// RecordSnowflakeQueryBytesSpilledLocalTotalDataPoint adds a data point to snowflake.query.bytes_spilled.local.total metric. +func (mb *MetricsBuilder) RecordSnowflakeQueryBytesSpilledLocalTotalDataPoint(ts pcommon.Timestamp, val int64, schemaNameAttributeValue string, executionStatusAttributeValue string, errorMessageAttributeValue string, queryTypeAttributeValue string, warehouseNameAttributeValue string, databaseNameAttributeValue string, warehouseSizeAttributeValue string) { + mb.metricSnowflakeQueryBytesSpilledLocalTotal.recordDataPoint(mb.startTime, ts, val, schemaNameAttributeValue, executionStatusAttributeValue, errorMessageAttributeValue, queryTypeAttributeValue, warehouseNameAttributeValue, databaseNameAttributeValue, warehouseSizeAttributeValue) +} + +// RecordSnowflakeQueryBytesSpilledRemoteTotalDataPoint adds a data point to snowflake.query.bytes_spilled.remote.total metric. +func (mb *MetricsBuilder) RecordSnowflakeQueryBytesSpilledRemoteTotalDataPoint(ts pcommon.Timestamp, val int64, schemaNameAttributeValue string, executionStatusAttributeValue string, errorMessageAttributeValue string, queryTypeAttributeValue string, warehouseNameAttributeValue string, databaseNameAttributeValue string, warehouseSizeAttributeValue string) { + mb.metricSnowflakeQueryBytesSpilledRemoteTotal.recordDataPoint(mb.startTime, ts, val, schemaNameAttributeValue, executionStatusAttributeValue, errorMessageAttributeValue, queryTypeAttributeValue, warehouseNameAttributeValue, databaseNameAttributeValue, warehouseSizeAttributeValue) +} + +// RecordSnowflakeQueryBytesWrittenTotalDataPoint adds a data point to snowflake.query.bytes_written.total metric. +func (mb *MetricsBuilder) RecordSnowflakeQueryBytesWrittenTotalDataPoint(ts pcommon.Timestamp, val int64, schemaNameAttributeValue string, executionStatusAttributeValue string, errorMessageAttributeValue string, queryTypeAttributeValue string, warehouseNameAttributeValue string, databaseNameAttributeValue string, warehouseSizeAttributeValue string) { + mb.metricSnowflakeQueryBytesWrittenTotal.recordDataPoint(mb.startTime, ts, val, schemaNameAttributeValue, executionStatusAttributeValue, errorMessageAttributeValue, queryTypeAttributeValue, warehouseNameAttributeValue, databaseNameAttributeValue, warehouseSizeAttributeValue) +} + +// RecordSnowflakeQueryCompilationTimeTotalDataPoint adds a data point to snowflake.query.compilation_time.total metric. +func (mb *MetricsBuilder) RecordSnowflakeQueryCompilationTimeTotalDataPoint(ts pcommon.Timestamp, val float64, schemaNameAttributeValue string, executionStatusAttributeValue string, errorMessageAttributeValue string, queryTypeAttributeValue string, warehouseNameAttributeValue string, databaseNameAttributeValue string, warehouseSizeAttributeValue string) { + mb.metricSnowflakeQueryCompilationTimeTotal.recordDataPoint(mb.startTime, ts, val, schemaNameAttributeValue, executionStatusAttributeValue, errorMessageAttributeValue, queryTypeAttributeValue, warehouseNameAttributeValue, databaseNameAttributeValue, warehouseSizeAttributeValue) +} + +// RecordSnowflakeQueryDataScannedCacheAvgDataPoint adds a data point to snowflake.query.data_scanned_cache.avg metric. +func (mb *MetricsBuilder) RecordSnowflakeQueryDataScannedCacheAvgDataPoint(ts pcommon.Timestamp, val float64, schemaNameAttributeValue string, executionStatusAttributeValue string, errorMessageAttributeValue string, queryTypeAttributeValue string, warehouseNameAttributeValue string, databaseNameAttributeValue string, warehouseSizeAttributeValue string) { + mb.metricSnowflakeQueryDataScannedCacheAvg.recordDataPoint(mb.startTime, ts, val, schemaNameAttributeValue, executionStatusAttributeValue, errorMessageAttributeValue, queryTypeAttributeValue, warehouseNameAttributeValue, databaseNameAttributeValue, warehouseSizeAttributeValue) +} + +// RecordSnowflakeQueryExecutedDataPoint adds a data point to snowflake.query.executed metric. +func (mb *MetricsBuilder) RecordSnowflakeQueryExecutedDataPoint(ts pcommon.Timestamp, val float64, warehouseNameAttributeValue string) { + mb.metricSnowflakeQueryExecuted.recordDataPoint(mb.startTime, ts, val, warehouseNameAttributeValue) +} + +// RecordSnowflakeQueryExecutionTimeTotalDataPoint adds a data point to snowflake.query.execution_time.total metric. +func (mb *MetricsBuilder) RecordSnowflakeQueryExecutionTimeTotalDataPoint(ts pcommon.Timestamp, val float64, schemaNameAttributeValue string, executionStatusAttributeValue string, errorMessageAttributeValue string, queryTypeAttributeValue string, warehouseNameAttributeValue string, databaseNameAttributeValue string, warehouseSizeAttributeValue string) { + mb.metricSnowflakeQueryExecutionTimeTotal.recordDataPoint(mb.startTime, ts, val, schemaNameAttributeValue, executionStatusAttributeValue, errorMessageAttributeValue, queryTypeAttributeValue, warehouseNameAttributeValue, databaseNameAttributeValue, warehouseSizeAttributeValue) +} + +// RecordSnowflakeQueryPartitionsScannedTotalDataPoint adds a data point to snowflake.query.partitions_scanned.total metric. +func (mb *MetricsBuilder) RecordSnowflakeQueryPartitionsScannedTotalDataPoint(ts pcommon.Timestamp, val int64, schemaNameAttributeValue string, executionStatusAttributeValue string, errorMessageAttributeValue string, queryTypeAttributeValue string, warehouseNameAttributeValue string, databaseNameAttributeValue string, warehouseSizeAttributeValue string) { + mb.metricSnowflakeQueryPartitionsScannedTotal.recordDataPoint(mb.startTime, ts, val, schemaNameAttributeValue, executionStatusAttributeValue, errorMessageAttributeValue, queryTypeAttributeValue, warehouseNameAttributeValue, databaseNameAttributeValue, warehouseSizeAttributeValue) +} + +// RecordSnowflakeQueryQueuedOverloadDataPoint adds a data point to snowflake.query.queued_overload metric. +func (mb *MetricsBuilder) RecordSnowflakeQueryQueuedOverloadDataPoint(ts pcommon.Timestamp, val float64, warehouseNameAttributeValue string) { + mb.metricSnowflakeQueryQueuedOverload.recordDataPoint(mb.startTime, ts, val, warehouseNameAttributeValue) +} + +// RecordSnowflakeQueryQueuedProvisionDataPoint adds a data point to snowflake.query.queued_provision metric. +func (mb *MetricsBuilder) RecordSnowflakeQueryQueuedProvisionDataPoint(ts pcommon.Timestamp, val float64, warehouseNameAttributeValue string) { + mb.metricSnowflakeQueryQueuedProvision.recordDataPoint(mb.startTime, ts, val, warehouseNameAttributeValue) +} + +// RecordSnowflakeQueuedOverloadTimeAvgDataPoint adds a data point to snowflake.queued_overload_time.avg metric. +func (mb *MetricsBuilder) RecordSnowflakeQueuedOverloadTimeAvgDataPoint(ts pcommon.Timestamp, val float64, schemaNameAttributeValue string, executionStatusAttributeValue string, errorMessageAttributeValue string, queryTypeAttributeValue string, warehouseNameAttributeValue string, databaseNameAttributeValue string, warehouseSizeAttributeValue string) { + mb.metricSnowflakeQueuedOverloadTimeAvg.recordDataPoint(mb.startTime, ts, val, schemaNameAttributeValue, executionStatusAttributeValue, errorMessageAttributeValue, queryTypeAttributeValue, warehouseNameAttributeValue, databaseNameAttributeValue, warehouseSizeAttributeValue) +} + +// RecordSnowflakeQueuedOverloadTimeTotalDataPoint adds a data point to snowflake.queued_overload_time.total metric. +func (mb *MetricsBuilder) RecordSnowflakeQueuedOverloadTimeTotalDataPoint(ts pcommon.Timestamp, val int64, schemaNameAttributeValue string, executionStatusAttributeValue string, errorMessageAttributeValue string, queryTypeAttributeValue string, warehouseNameAttributeValue string, databaseNameAttributeValue string, warehouseSizeAttributeValue string) { + mb.metricSnowflakeQueuedOverloadTimeTotal.recordDataPoint(mb.startTime, ts, val, schemaNameAttributeValue, executionStatusAttributeValue, errorMessageAttributeValue, queryTypeAttributeValue, warehouseNameAttributeValue, databaseNameAttributeValue, warehouseSizeAttributeValue) +} + +// RecordSnowflakeQueuedProvisioningTimeAvgDataPoint adds a data point to snowflake.queued_provisioning_time.avg metric. +func (mb *MetricsBuilder) RecordSnowflakeQueuedProvisioningTimeAvgDataPoint(ts pcommon.Timestamp, val float64, schemaNameAttributeValue string, executionStatusAttributeValue string, errorMessageAttributeValue string, queryTypeAttributeValue string, warehouseNameAttributeValue string, databaseNameAttributeValue string, warehouseSizeAttributeValue string) { + mb.metricSnowflakeQueuedProvisioningTimeAvg.recordDataPoint(mb.startTime, ts, val, schemaNameAttributeValue, executionStatusAttributeValue, errorMessageAttributeValue, queryTypeAttributeValue, warehouseNameAttributeValue, databaseNameAttributeValue, warehouseSizeAttributeValue) +} + +// RecordSnowflakeQueuedProvisioningTimeTotalDataPoint adds a data point to snowflake.queued_provisioning_time.total metric. +func (mb *MetricsBuilder) RecordSnowflakeQueuedProvisioningTimeTotalDataPoint(ts pcommon.Timestamp, val int64, schemaNameAttributeValue string, executionStatusAttributeValue string, errorMessageAttributeValue string, queryTypeAttributeValue string, warehouseNameAttributeValue string, databaseNameAttributeValue string, warehouseSizeAttributeValue string) { + mb.metricSnowflakeQueuedProvisioningTimeTotal.recordDataPoint(mb.startTime, ts, val, schemaNameAttributeValue, executionStatusAttributeValue, errorMessageAttributeValue, queryTypeAttributeValue, warehouseNameAttributeValue, databaseNameAttributeValue, warehouseSizeAttributeValue) +} + +// RecordSnowflakeQueuedRepairTimeAvgDataPoint adds a data point to snowflake.queued_repair_time.avg metric. +func (mb *MetricsBuilder) RecordSnowflakeQueuedRepairTimeAvgDataPoint(ts pcommon.Timestamp, val float64, schemaNameAttributeValue string, executionStatusAttributeValue string, errorMessageAttributeValue string, queryTypeAttributeValue string, warehouseNameAttributeValue string, databaseNameAttributeValue string, warehouseSizeAttributeValue string) { + mb.metricSnowflakeQueuedRepairTimeAvg.recordDataPoint(mb.startTime, ts, val, schemaNameAttributeValue, executionStatusAttributeValue, errorMessageAttributeValue, queryTypeAttributeValue, warehouseNameAttributeValue, databaseNameAttributeValue, warehouseSizeAttributeValue) +} + +// RecordSnowflakeQueuedRepairTimeTotalDataPoint adds a data point to snowflake.queued_repair_time.total metric. +func (mb *MetricsBuilder) RecordSnowflakeQueuedRepairTimeTotalDataPoint(ts pcommon.Timestamp, val int64, schemaNameAttributeValue string, executionStatusAttributeValue string, errorMessageAttributeValue string, queryTypeAttributeValue string, warehouseNameAttributeValue string, databaseNameAttributeValue string, warehouseSizeAttributeValue string) { + mb.metricSnowflakeQueuedRepairTimeTotal.recordDataPoint(mb.startTime, ts, val, schemaNameAttributeValue, executionStatusAttributeValue, errorMessageAttributeValue, queryTypeAttributeValue, warehouseNameAttributeValue, databaseNameAttributeValue, warehouseSizeAttributeValue) +} + +// RecordSnowflakeRowsDeletedTotalDataPoint adds a data point to snowflake.rows_deleted.total metric. +func (mb *MetricsBuilder) RecordSnowflakeRowsDeletedTotalDataPoint(ts pcommon.Timestamp, val int64, schemaNameAttributeValue string, executionStatusAttributeValue string, errorMessageAttributeValue string, queryTypeAttributeValue string, warehouseNameAttributeValue string, databaseNameAttributeValue string, warehouseSizeAttributeValue string) { + mb.metricSnowflakeRowsDeletedTotal.recordDataPoint(mb.startTime, ts, val, schemaNameAttributeValue, executionStatusAttributeValue, errorMessageAttributeValue, queryTypeAttributeValue, warehouseNameAttributeValue, databaseNameAttributeValue, warehouseSizeAttributeValue) +} + +// RecordSnowflakeRowsInsertedTotalDataPoint adds a data point to snowflake.rows_inserted.total metric. +func (mb *MetricsBuilder) RecordSnowflakeRowsInsertedTotalDataPoint(ts pcommon.Timestamp, val int64, schemaNameAttributeValue string, executionStatusAttributeValue string, errorMessageAttributeValue string, queryTypeAttributeValue string, warehouseNameAttributeValue string, databaseNameAttributeValue string, warehouseSizeAttributeValue string) { + mb.metricSnowflakeRowsInsertedTotal.recordDataPoint(mb.startTime, ts, val, schemaNameAttributeValue, executionStatusAttributeValue, errorMessageAttributeValue, queryTypeAttributeValue, warehouseNameAttributeValue, databaseNameAttributeValue, warehouseSizeAttributeValue) +} + +// RecordSnowflakeRowsProducedTotalDataPoint adds a data point to snowflake.rows_produced.total metric. +func (mb *MetricsBuilder) RecordSnowflakeRowsProducedTotalDataPoint(ts pcommon.Timestamp, val int64, schemaNameAttributeValue string, executionStatusAttributeValue string, errorMessageAttributeValue string, queryTypeAttributeValue string, warehouseNameAttributeValue string, databaseNameAttributeValue string, warehouseSizeAttributeValue string) { + mb.metricSnowflakeRowsProducedTotal.recordDataPoint(mb.startTime, ts, val, schemaNameAttributeValue, executionStatusAttributeValue, errorMessageAttributeValue, queryTypeAttributeValue, warehouseNameAttributeValue, databaseNameAttributeValue, warehouseSizeAttributeValue) +} + +// RecordSnowflakeRowsUnloadedTotalDataPoint adds a data point to snowflake.rows_unloaded.total metric. +func (mb *MetricsBuilder) RecordSnowflakeRowsUnloadedTotalDataPoint(ts pcommon.Timestamp, val int64, schemaNameAttributeValue string, executionStatusAttributeValue string, errorMessageAttributeValue string, queryTypeAttributeValue string, warehouseNameAttributeValue string, databaseNameAttributeValue string, warehouseSizeAttributeValue string) { + mb.metricSnowflakeRowsUnloadedTotal.recordDataPoint(mb.startTime, ts, val, schemaNameAttributeValue, executionStatusAttributeValue, errorMessageAttributeValue, queryTypeAttributeValue, warehouseNameAttributeValue, databaseNameAttributeValue, warehouseSizeAttributeValue) +} + +// RecordSnowflakeRowsUpdatedTotalDataPoint adds a data point to snowflake.rows_updated.total metric. +func (mb *MetricsBuilder) RecordSnowflakeRowsUpdatedTotalDataPoint(ts pcommon.Timestamp, val int64, schemaNameAttributeValue string, executionStatusAttributeValue string, errorMessageAttributeValue string, queryTypeAttributeValue string, warehouseNameAttributeValue string, databaseNameAttributeValue string, warehouseSizeAttributeValue string) { + mb.metricSnowflakeRowsUpdatedTotal.recordDataPoint(mb.startTime, ts, val, schemaNameAttributeValue, executionStatusAttributeValue, errorMessageAttributeValue, queryTypeAttributeValue, warehouseNameAttributeValue, databaseNameAttributeValue, warehouseSizeAttributeValue) +} + +// RecordSnowflakeSessionIDCountDataPoint adds a data point to snowflake.session_id.count metric. +func (mb *MetricsBuilder) RecordSnowflakeSessionIDCountDataPoint(ts pcommon.Timestamp, val int64, userNameAttributeValue string) { + mb.metricSnowflakeSessionIDCount.recordDataPoint(mb.startTime, ts, val, userNameAttributeValue) +} + +// RecordSnowflakeStorageFailsafeBytesTotalDataPoint adds a data point to snowflake.storage.failsafe_bytes.total metric. +func (mb *MetricsBuilder) RecordSnowflakeStorageFailsafeBytesTotalDataPoint(ts pcommon.Timestamp, val int64) { + mb.metricSnowflakeStorageFailsafeBytesTotal.recordDataPoint(mb.startTime, ts, val) +} + +// RecordSnowflakeStorageStageBytesTotalDataPoint adds a data point to snowflake.storage.stage_bytes.total metric. +func (mb *MetricsBuilder) RecordSnowflakeStorageStageBytesTotalDataPoint(ts pcommon.Timestamp, val int64) { + mb.metricSnowflakeStorageStageBytesTotal.recordDataPoint(mb.startTime, ts, val) +} + +// RecordSnowflakeStorageStorageBytesTotalDataPoint adds a data point to snowflake.storage.storage_bytes.total metric. +func (mb *MetricsBuilder) RecordSnowflakeStorageStorageBytesTotalDataPoint(ts pcommon.Timestamp, val int64) { + mb.metricSnowflakeStorageStorageBytesTotal.recordDataPoint(mb.startTime, ts, val) +} + +// RecordSnowflakeTotalElapsedTimeAvgDataPoint adds a data point to snowflake.total_elapsed_time.avg metric. +func (mb *MetricsBuilder) RecordSnowflakeTotalElapsedTimeAvgDataPoint(ts pcommon.Timestamp, val float64, schemaNameAttributeValue string, executionStatusAttributeValue string, errorMessageAttributeValue string, queryTypeAttributeValue string, warehouseNameAttributeValue string, databaseNameAttributeValue string, warehouseSizeAttributeValue string) { + mb.metricSnowflakeTotalElapsedTimeAvg.recordDataPoint(mb.startTime, ts, val, schemaNameAttributeValue, executionStatusAttributeValue, errorMessageAttributeValue, queryTypeAttributeValue, warehouseNameAttributeValue, databaseNameAttributeValue, warehouseSizeAttributeValue) +} + +// RecordSnowflakeTotalElapsedTimeTotalDataPoint adds a data point to snowflake.total_elapsed_time.total metric. +func (mb *MetricsBuilder) RecordSnowflakeTotalElapsedTimeTotalDataPoint(ts pcommon.Timestamp, val int64, schemaNameAttributeValue string, executionStatusAttributeValue string, errorMessageAttributeValue string, queryTypeAttributeValue string, warehouseNameAttributeValue string, databaseNameAttributeValue string, warehouseSizeAttributeValue string) { + mb.metricSnowflakeTotalElapsedTimeTotal.recordDataPoint(mb.startTime, ts, val, schemaNameAttributeValue, executionStatusAttributeValue, errorMessageAttributeValue, queryTypeAttributeValue, warehouseNameAttributeValue, databaseNameAttributeValue, warehouseSizeAttributeValue) +} + +// Reset resets metrics builder to its initial state. It should be used when external metrics source is restarted, +// and metrics builder should update its startTime and reset it's internal state accordingly. +func (mb *MetricsBuilder) Reset(options ...metricBuilderOption) { + mb.startTime = pcommon.NewTimestampFromTime(time.Now()) + for _, op := range options { + op(mb) + } +} diff --git a/receiver/snowflakereceiver/internal/metadata/generated_metrics_test.go b/receiver/snowflakereceiver/internal/metadata/generated_metrics_test.go new file mode 100644 index 000000000000..a2218427f0dd --- /dev/null +++ b/receiver/snowflakereceiver/internal/metadata/generated_metrics_test.go @@ -0,0 +1,1330 @@ +// Code generated by mdatagen. DO NOT EDIT. + +package metadata + +import ( + "reflect" + "testing" + + "github.com/stretchr/testify/assert" + "go.opentelemetry.io/collector/component" + "go.opentelemetry.io/collector/pdata/pcommon" + "go.opentelemetry.io/collector/pdata/pmetric" +) + +func TestDefaultMetrics(t *testing.T) { + start := pcommon.Timestamp(1_000_000_000) + ts := pcommon.Timestamp(1_000_001_000) + mb := NewMetricsBuilder(DefaultMetricsSettings(), component.BuildInfo{}, WithStartTime(start)) + enabledMetrics := make(map[string]bool) + + mb.RecordSnowflakeBillingCloudServiceTotalDataPoint(ts, 1, "attr-val") + + mb.RecordSnowflakeBillingTotalCreditTotalDataPoint(ts, 1, "attr-val") + + mb.RecordSnowflakeBillingVirtualWarehouseTotalDataPoint(ts, 1, "attr-val") + + mb.RecordSnowflakeBillingWarehouseCloudServiceTotalDataPoint(ts, 1, "attr-val") + + mb.RecordSnowflakeBillingWarehouseTotalCreditTotalDataPoint(ts, 1, "attr-val") + + mb.RecordSnowflakeBillingWarehouseVirtualWarehouseTotalDataPoint(ts, 1, "attr-val") + + enabledMetrics["snowflake.database.bytes_scanned.avg"] = true + mb.RecordSnowflakeDatabaseBytesScannedAvgDataPoint(ts, 1, "attr-val", "attr-val", "attr-val", "attr-val", "attr-val", "attr-val", "attr-val") + + enabledMetrics["snowflake.database.query.count"] = true + mb.RecordSnowflakeDatabaseQueryCountDataPoint(ts, 1, "attr-val", "attr-val", "attr-val", "attr-val", "attr-val", "attr-val", "attr-val") + + mb.RecordSnowflakeLoginsTotalDataPoint(ts, 1, "attr-val", "attr-val", "attr-val") + + mb.RecordSnowflakePipeCreditsUsedTotalDataPoint(ts, 1, "attr-val") + + enabledMetrics["snowflake.query.blocked"] = true + mb.RecordSnowflakeQueryBlockedDataPoint(ts, 1, "attr-val") + + enabledMetrics["snowflake.query.bytes_deleted.total"] = true + mb.RecordSnowflakeQueryBytesDeletedTotalDataPoint(ts, 1, "attr-val", "attr-val", "attr-val", "attr-val", "attr-val", "attr-val", "attr-val") + + mb.RecordSnowflakeQueryBytesScannedTotalDataPoint(ts, 1, "attr-val", "attr-val", "attr-val", "attr-val", "attr-val", "attr-val", "attr-val") + + mb.RecordSnowflakeQueryBytesSpilledLocalTotalDataPoint(ts, 1, "attr-val", "attr-val", "attr-val", "attr-val", "attr-val", "attr-val", "attr-val") + + mb.RecordSnowflakeQueryBytesSpilledRemoteTotalDataPoint(ts, 1, "attr-val", "attr-val", "attr-val", "attr-val", "attr-val", "attr-val", "attr-val") + + enabledMetrics["snowflake.query.bytes_written.total"] = true + mb.RecordSnowflakeQueryBytesWrittenTotalDataPoint(ts, 1, "attr-val", "attr-val", "attr-val", "attr-val", "attr-val", "attr-val", "attr-val") + + enabledMetrics["snowflake.query.compilation_time.total"] = true + mb.RecordSnowflakeQueryCompilationTimeTotalDataPoint(ts, 1, "attr-val", "attr-val", "attr-val", "attr-val", "attr-val", "attr-val", "attr-val") + + mb.RecordSnowflakeQueryDataScannedCacheAvgDataPoint(ts, 1, "attr-val", "attr-val", "attr-val", "attr-val", "attr-val", "attr-val", "attr-val") + + enabledMetrics["snowflake.query.executed"] = true + mb.RecordSnowflakeQueryExecutedDataPoint(ts, 1, "attr-val") + + enabledMetrics["snowflake.query.execution_time.total"] = true + mb.RecordSnowflakeQueryExecutionTimeTotalDataPoint(ts, 1, "attr-val", "attr-val", "attr-val", "attr-val", "attr-val", "attr-val", "attr-val") + + mb.RecordSnowflakeQueryPartitionsScannedTotalDataPoint(ts, 1, "attr-val", "attr-val", "attr-val", "attr-val", "attr-val", "attr-val", "attr-val") + + enabledMetrics["snowflake.query.queued_overload"] = true + mb.RecordSnowflakeQueryQueuedOverloadDataPoint(ts, 1, "attr-val") + + enabledMetrics["snowflake.query.queued_provision"] = true + mb.RecordSnowflakeQueryQueuedProvisionDataPoint(ts, 1, "attr-val") + + enabledMetrics["snowflake.queued_overload_time.avg"] = true + mb.RecordSnowflakeQueuedOverloadTimeAvgDataPoint(ts, 1, "attr-val", "attr-val", "attr-val", "attr-val", "attr-val", "attr-val", "attr-val") + + mb.RecordSnowflakeQueuedOverloadTimeTotalDataPoint(ts, 1, "attr-val", "attr-val", "attr-val", "attr-val", "attr-val", "attr-val", "attr-val") + + enabledMetrics["snowflake.queued_provisioning_time.avg"] = true + mb.RecordSnowflakeQueuedProvisioningTimeAvgDataPoint(ts, 1, "attr-val", "attr-val", "attr-val", "attr-val", "attr-val", "attr-val", "attr-val") + + mb.RecordSnowflakeQueuedProvisioningTimeTotalDataPoint(ts, 1, "attr-val", "attr-val", "attr-val", "attr-val", "attr-val", "attr-val", "attr-val") + + enabledMetrics["snowflake.queued_repair_time.avg"] = true + mb.RecordSnowflakeQueuedRepairTimeAvgDataPoint(ts, 1, "attr-val", "attr-val", "attr-val", "attr-val", "attr-val", "attr-val", "attr-val") + + mb.RecordSnowflakeQueuedRepairTimeTotalDataPoint(ts, 1, "attr-val", "attr-val", "attr-val", "attr-val", "attr-val", "attr-val", "attr-val") + + mb.RecordSnowflakeRowsDeletedTotalDataPoint(ts, 1, "attr-val", "attr-val", "attr-val", "attr-val", "attr-val", "attr-val", "attr-val") + + mb.RecordSnowflakeRowsInsertedTotalDataPoint(ts, 1, "attr-val", "attr-val", "attr-val", "attr-val", "attr-val", "attr-val", "attr-val") + + mb.RecordSnowflakeRowsProducedTotalDataPoint(ts, 1, "attr-val", "attr-val", "attr-val", "attr-val", "attr-val", "attr-val", "attr-val") + + mb.RecordSnowflakeRowsUnloadedTotalDataPoint(ts, 1, "attr-val", "attr-val", "attr-val", "attr-val", "attr-val", "attr-val", "attr-val") + + mb.RecordSnowflakeRowsUpdatedTotalDataPoint(ts, 1, "attr-val", "attr-val", "attr-val", "attr-val", "attr-val", "attr-val", "attr-val") + + mb.RecordSnowflakeSessionIDCountDataPoint(ts, 1, "attr-val") + + mb.RecordSnowflakeStorageFailsafeBytesTotalDataPoint(ts, 1) + + enabledMetrics["snowflake.storage.stage_bytes.total"] = true + mb.RecordSnowflakeStorageStageBytesTotalDataPoint(ts, 1) + + enabledMetrics["snowflake.storage.storage_bytes.total"] = true + mb.RecordSnowflakeStorageStorageBytesTotalDataPoint(ts, 1) + + enabledMetrics["snowflake.total_elapsed_time.avg"] = true + mb.RecordSnowflakeTotalElapsedTimeAvgDataPoint(ts, 1, "attr-val", "attr-val", "attr-val", "attr-val", "attr-val", "attr-val", "attr-val") + + mb.RecordSnowflakeTotalElapsedTimeTotalDataPoint(ts, 1, "attr-val", "attr-val", "attr-val", "attr-val", "attr-val", "attr-val", "attr-val") + + metrics := mb.Emit() + + assert.Equal(t, 1, metrics.ResourceMetrics().Len()) + sm := metrics.ResourceMetrics().At(0).ScopeMetrics() + assert.Equal(t, 1, sm.Len()) + ms := sm.At(0).Metrics() + assert.Equal(t, len(enabledMetrics), ms.Len()) + seenMetrics := make(map[string]bool) + for i := 0; i < ms.Len(); i++ { + assert.True(t, enabledMetrics[ms.At(i).Name()]) + seenMetrics[ms.At(i).Name()] = true + } + assert.Equal(t, len(enabledMetrics), len(seenMetrics)) +} + +func TestAllMetrics(t *testing.T) { + start := pcommon.Timestamp(1_000_000_000) + ts := pcommon.Timestamp(1_000_001_000) + settings := MetricsSettings{ + SnowflakeBillingCloudServiceTotal: MetricSettings{Enabled: true}, + SnowflakeBillingTotalCreditTotal: MetricSettings{Enabled: true}, + SnowflakeBillingVirtualWarehouseTotal: MetricSettings{Enabled: true}, + SnowflakeBillingWarehouseCloudServiceTotal: MetricSettings{Enabled: true}, + SnowflakeBillingWarehouseTotalCreditTotal: MetricSettings{Enabled: true}, + SnowflakeBillingWarehouseVirtualWarehouseTotal: MetricSettings{Enabled: true}, + SnowflakeDatabaseBytesScannedAvg: MetricSettings{Enabled: true}, + SnowflakeDatabaseQueryCount: MetricSettings{Enabled: true}, + SnowflakeLoginsTotal: MetricSettings{Enabled: true}, + SnowflakePipeCreditsUsedTotal: MetricSettings{Enabled: true}, + SnowflakeQueryBlocked: MetricSettings{Enabled: true}, + SnowflakeQueryBytesDeletedTotal: MetricSettings{Enabled: true}, + SnowflakeQueryBytesScannedTotal: MetricSettings{Enabled: true}, + SnowflakeQueryBytesSpilledLocalTotal: MetricSettings{Enabled: true}, + SnowflakeQueryBytesSpilledRemoteTotal: MetricSettings{Enabled: true}, + SnowflakeQueryBytesWrittenTotal: MetricSettings{Enabled: true}, + SnowflakeQueryCompilationTimeTotal: MetricSettings{Enabled: true}, + SnowflakeQueryDataScannedCacheAvg: MetricSettings{Enabled: true}, + SnowflakeQueryExecuted: MetricSettings{Enabled: true}, + SnowflakeQueryExecutionTimeTotal: MetricSettings{Enabled: true}, + SnowflakeQueryPartitionsScannedTotal: MetricSettings{Enabled: true}, + SnowflakeQueryQueuedOverload: MetricSettings{Enabled: true}, + SnowflakeQueryQueuedProvision: MetricSettings{Enabled: true}, + SnowflakeQueuedOverloadTimeAvg: MetricSettings{Enabled: true}, + SnowflakeQueuedOverloadTimeTotal: MetricSettings{Enabled: true}, + SnowflakeQueuedProvisioningTimeAvg: MetricSettings{Enabled: true}, + SnowflakeQueuedProvisioningTimeTotal: MetricSettings{Enabled: true}, + SnowflakeQueuedRepairTimeAvg: MetricSettings{Enabled: true}, + SnowflakeQueuedRepairTimeTotal: MetricSettings{Enabled: true}, + SnowflakeRowsDeletedTotal: MetricSettings{Enabled: true}, + SnowflakeRowsInsertedTotal: MetricSettings{Enabled: true}, + SnowflakeRowsProducedTotal: MetricSettings{Enabled: true}, + SnowflakeRowsUnloadedTotal: MetricSettings{Enabled: true}, + SnowflakeRowsUpdatedTotal: MetricSettings{Enabled: true}, + SnowflakeSessionIDCount: MetricSettings{Enabled: true}, + SnowflakeStorageFailsafeBytesTotal: MetricSettings{Enabled: true}, + SnowflakeStorageStageBytesTotal: MetricSettings{Enabled: true}, + SnowflakeStorageStorageBytesTotal: MetricSettings{Enabled: true}, + SnowflakeTotalElapsedTimeAvg: MetricSettings{Enabled: true}, + SnowflakeTotalElapsedTimeTotal: MetricSettings{Enabled: true}, + } + mb := NewMetricsBuilder(settings, component.BuildInfo{}, WithStartTime(start)) + + mb.RecordSnowflakeBillingCloudServiceTotalDataPoint(ts, 1, "attr-val") + mb.RecordSnowflakeBillingTotalCreditTotalDataPoint(ts, 1, "attr-val") + mb.RecordSnowflakeBillingVirtualWarehouseTotalDataPoint(ts, 1, "attr-val") + mb.RecordSnowflakeBillingWarehouseCloudServiceTotalDataPoint(ts, 1, "attr-val") + mb.RecordSnowflakeBillingWarehouseTotalCreditTotalDataPoint(ts, 1, "attr-val") + mb.RecordSnowflakeBillingWarehouseVirtualWarehouseTotalDataPoint(ts, 1, "attr-val") + mb.RecordSnowflakeDatabaseBytesScannedAvgDataPoint(ts, 1, "attr-val", "attr-val", "attr-val", "attr-val", "attr-val", "attr-val", "attr-val") + mb.RecordSnowflakeDatabaseQueryCountDataPoint(ts, 1, "attr-val", "attr-val", "attr-val", "attr-val", "attr-val", "attr-val", "attr-val") + mb.RecordSnowflakeLoginsTotalDataPoint(ts, 1, "attr-val", "attr-val", "attr-val") + mb.RecordSnowflakePipeCreditsUsedTotalDataPoint(ts, 1, "attr-val") + mb.RecordSnowflakeQueryBlockedDataPoint(ts, 1, "attr-val") + mb.RecordSnowflakeQueryBytesDeletedTotalDataPoint(ts, 1, "attr-val", "attr-val", "attr-val", "attr-val", "attr-val", "attr-val", "attr-val") + mb.RecordSnowflakeQueryBytesScannedTotalDataPoint(ts, 1, "attr-val", "attr-val", "attr-val", "attr-val", "attr-val", "attr-val", "attr-val") + mb.RecordSnowflakeQueryBytesSpilledLocalTotalDataPoint(ts, 1, "attr-val", "attr-val", "attr-val", "attr-val", "attr-val", "attr-val", "attr-val") + mb.RecordSnowflakeQueryBytesSpilledRemoteTotalDataPoint(ts, 1, "attr-val", "attr-val", "attr-val", "attr-val", "attr-val", "attr-val", "attr-val") + mb.RecordSnowflakeQueryBytesWrittenTotalDataPoint(ts, 1, "attr-val", "attr-val", "attr-val", "attr-val", "attr-val", "attr-val", "attr-val") + mb.RecordSnowflakeQueryCompilationTimeTotalDataPoint(ts, 1, "attr-val", "attr-val", "attr-val", "attr-val", "attr-val", "attr-val", "attr-val") + mb.RecordSnowflakeQueryDataScannedCacheAvgDataPoint(ts, 1, "attr-val", "attr-val", "attr-val", "attr-val", "attr-val", "attr-val", "attr-val") + mb.RecordSnowflakeQueryExecutedDataPoint(ts, 1, "attr-val") + mb.RecordSnowflakeQueryExecutionTimeTotalDataPoint(ts, 1, "attr-val", "attr-val", "attr-val", "attr-val", "attr-val", "attr-val", "attr-val") + mb.RecordSnowflakeQueryPartitionsScannedTotalDataPoint(ts, 1, "attr-val", "attr-val", "attr-val", "attr-val", "attr-val", "attr-val", "attr-val") + mb.RecordSnowflakeQueryQueuedOverloadDataPoint(ts, 1, "attr-val") + mb.RecordSnowflakeQueryQueuedProvisionDataPoint(ts, 1, "attr-val") + mb.RecordSnowflakeQueuedOverloadTimeAvgDataPoint(ts, 1, "attr-val", "attr-val", "attr-val", "attr-val", "attr-val", "attr-val", "attr-val") + mb.RecordSnowflakeQueuedOverloadTimeTotalDataPoint(ts, 1, "attr-val", "attr-val", "attr-val", "attr-val", "attr-val", "attr-val", "attr-val") + mb.RecordSnowflakeQueuedProvisioningTimeAvgDataPoint(ts, 1, "attr-val", "attr-val", "attr-val", "attr-val", "attr-val", "attr-val", "attr-val") + mb.RecordSnowflakeQueuedProvisioningTimeTotalDataPoint(ts, 1, "attr-val", "attr-val", "attr-val", "attr-val", "attr-val", "attr-val", "attr-val") + mb.RecordSnowflakeQueuedRepairTimeAvgDataPoint(ts, 1, "attr-val", "attr-val", "attr-val", "attr-val", "attr-val", "attr-val", "attr-val") + mb.RecordSnowflakeQueuedRepairTimeTotalDataPoint(ts, 1, "attr-val", "attr-val", "attr-val", "attr-val", "attr-val", "attr-val", "attr-val") + mb.RecordSnowflakeRowsDeletedTotalDataPoint(ts, 1, "attr-val", "attr-val", "attr-val", "attr-val", "attr-val", "attr-val", "attr-val") + mb.RecordSnowflakeRowsInsertedTotalDataPoint(ts, 1, "attr-val", "attr-val", "attr-val", "attr-val", "attr-val", "attr-val", "attr-val") + mb.RecordSnowflakeRowsProducedTotalDataPoint(ts, 1, "attr-val", "attr-val", "attr-val", "attr-val", "attr-val", "attr-val", "attr-val") + mb.RecordSnowflakeRowsUnloadedTotalDataPoint(ts, 1, "attr-val", "attr-val", "attr-val", "attr-val", "attr-val", "attr-val", "attr-val") + mb.RecordSnowflakeRowsUpdatedTotalDataPoint(ts, 1, "attr-val", "attr-val", "attr-val", "attr-val", "attr-val", "attr-val", "attr-val") + mb.RecordSnowflakeSessionIDCountDataPoint(ts, 1, "attr-val") + mb.RecordSnowflakeStorageFailsafeBytesTotalDataPoint(ts, 1) + mb.RecordSnowflakeStorageStageBytesTotalDataPoint(ts, 1) + mb.RecordSnowflakeStorageStorageBytesTotalDataPoint(ts, 1) + mb.RecordSnowflakeTotalElapsedTimeAvgDataPoint(ts, 1, "attr-val", "attr-val", "attr-val", "attr-val", "attr-val", "attr-val", "attr-val") + mb.RecordSnowflakeTotalElapsedTimeTotalDataPoint(ts, 1, "attr-val", "attr-val", "attr-val", "attr-val", "attr-val", "attr-val", "attr-val") + + metrics := mb.Emit(WithSnowflakeAccountName("attr-val"), WithSnowflakeUsername("attr-val"), WithSnowflakeWarehouseName("attr-val")) + + assert.Equal(t, 1, metrics.ResourceMetrics().Len()) + rm := metrics.ResourceMetrics().At(0) + attrCount := 0 + attrCount++ + attrVal, ok := rm.Resource().Attributes().Get("snowflake.account.name") + assert.True(t, ok) + assert.EqualValues(t, "attr-val", attrVal.Str()) + attrCount++ + attrVal, ok = rm.Resource().Attributes().Get("snowflake.username") + assert.True(t, ok) + assert.EqualValues(t, "attr-val", attrVal.Str()) + attrCount++ + attrVal, ok = rm.Resource().Attributes().Get("snowflake.warehouse.name") + assert.True(t, ok) + assert.EqualValues(t, "attr-val", attrVal.Str()) + assert.Equal(t, attrCount, rm.Resource().Attributes().Len()) + + assert.Equal(t, 1, rm.ScopeMetrics().Len()) + ms := rm.ScopeMetrics().At(0).Metrics() + allMetricsCount := reflect.TypeOf(MetricsSettings{}).NumField() + assert.Equal(t, allMetricsCount, ms.Len()) + validatedMetrics := make(map[string]struct{}) + for i := 0; i < ms.Len(); i++ { + switch ms.At(i).Name() { + case "snowflake.billing.cloud_service.total": + assert.Equal(t, pmetric.MetricTypeGauge, ms.At(i).Type()) + assert.Equal(t, 1, ms.At(i).Gauge().DataPoints().Len()) + assert.Equal(t, "Reported total credits used in the cloud service.", ms.At(i).Description()) + assert.Equal(t, "{credits}", ms.At(i).Unit()) + dp := ms.At(i).Gauge().DataPoints().At(0) + assert.Equal(t, start, dp.StartTimestamp()) + assert.Equal(t, ts, dp.Timestamp()) + assert.Equal(t, pmetric.NumberDataPointValueTypeDouble, dp.ValueType()) + assert.Equal(t, float64(1), dp.DoubleValue()) + attrVal, ok := dp.Attributes().Get("service_type") + assert.True(t, ok) + assert.EqualValues(t, "attr-val", attrVal.Str()) + validatedMetrics["snowflake.billing.cloud_service.total"] = struct{}{} + case "snowflake.billing.total_credit.total": + assert.Equal(t, pmetric.MetricTypeGauge, ms.At(i).Type()) + assert.Equal(t, 1, ms.At(i).Gauge().DataPoints().Len()) + assert.Equal(t, "Reported total credits used across account.", ms.At(i).Description()) + assert.Equal(t, "{credits}", ms.At(i).Unit()) + dp := ms.At(i).Gauge().DataPoints().At(0) + assert.Equal(t, start, dp.StartTimestamp()) + assert.Equal(t, ts, dp.Timestamp()) + assert.Equal(t, pmetric.NumberDataPointValueTypeDouble, dp.ValueType()) + assert.Equal(t, float64(1), dp.DoubleValue()) + attrVal, ok := dp.Attributes().Get("service_type") + assert.True(t, ok) + assert.EqualValues(t, "attr-val", attrVal.Str()) + validatedMetrics["snowflake.billing.total_credit.total"] = struct{}{} + case "snowflake.billing.virtual_warehouse.total": + assert.Equal(t, pmetric.MetricTypeGauge, ms.At(i).Type()) + assert.Equal(t, 1, ms.At(i).Gauge().DataPoints().Len()) + assert.Equal(t, "Reported total credits used by virtual warehouse service.", ms.At(i).Description()) + assert.Equal(t, "{credits}", ms.At(i).Unit()) + dp := ms.At(i).Gauge().DataPoints().At(0) + assert.Equal(t, start, dp.StartTimestamp()) + assert.Equal(t, ts, dp.Timestamp()) + assert.Equal(t, pmetric.NumberDataPointValueTypeDouble, dp.ValueType()) + assert.Equal(t, float64(1), dp.DoubleValue()) + attrVal, ok := dp.Attributes().Get("service_type") + assert.True(t, ok) + assert.EqualValues(t, "attr-val", attrVal.Str()) + validatedMetrics["snowflake.billing.virtual_warehouse.total"] = struct{}{} + case "snowflake.billing.warehouse.cloud_service.total": + assert.Equal(t, pmetric.MetricTypeGauge, ms.At(i).Type()) + assert.Equal(t, 1, ms.At(i).Gauge().DataPoints().Len()) + assert.Equal(t, "Credits used across cloud service for given warehouse.", ms.At(i).Description()) + assert.Equal(t, "{credits}", ms.At(i).Unit()) + dp := ms.At(i).Gauge().DataPoints().At(0) + assert.Equal(t, start, dp.StartTimestamp()) + assert.Equal(t, ts, dp.Timestamp()) + assert.Equal(t, pmetric.NumberDataPointValueTypeDouble, dp.ValueType()) + assert.Equal(t, float64(1), dp.DoubleValue()) + attrVal, ok := dp.Attributes().Get("warehouse_name") + assert.True(t, ok) + assert.EqualValues(t, "attr-val", attrVal.Str()) + validatedMetrics["snowflake.billing.warehouse.cloud_service.total"] = struct{}{} + case "snowflake.billing.warehouse.total_credit.total": + assert.Equal(t, pmetric.MetricTypeGauge, ms.At(i).Type()) + assert.Equal(t, 1, ms.At(i).Gauge().DataPoints().Len()) + assert.Equal(t, "Total credits used associated with given warehouse.", ms.At(i).Description()) + assert.Equal(t, "{credits}", ms.At(i).Unit()) + dp := ms.At(i).Gauge().DataPoints().At(0) + assert.Equal(t, start, dp.StartTimestamp()) + assert.Equal(t, ts, dp.Timestamp()) + assert.Equal(t, pmetric.NumberDataPointValueTypeDouble, dp.ValueType()) + assert.Equal(t, float64(1), dp.DoubleValue()) + attrVal, ok := dp.Attributes().Get("warehouse_name") + assert.True(t, ok) + assert.EqualValues(t, "attr-val", attrVal.Str()) + validatedMetrics["snowflake.billing.warehouse.total_credit.total"] = struct{}{} + case "snowflake.billing.warehouse.virtual_warehouse.total": + assert.Equal(t, pmetric.MetricTypeGauge, ms.At(i).Type()) + assert.Equal(t, 1, ms.At(i).Gauge().DataPoints().Len()) + assert.Equal(t, "Total credits used by virtual warehouse service for given warehouse.", ms.At(i).Description()) + assert.Equal(t, "{credits}", ms.At(i).Unit()) + dp := ms.At(i).Gauge().DataPoints().At(0) + assert.Equal(t, start, dp.StartTimestamp()) + assert.Equal(t, ts, dp.Timestamp()) + assert.Equal(t, pmetric.NumberDataPointValueTypeDouble, dp.ValueType()) + assert.Equal(t, float64(1), dp.DoubleValue()) + attrVal, ok := dp.Attributes().Get("warehouse_name") + assert.True(t, ok) + assert.EqualValues(t, "attr-val", attrVal.Str()) + validatedMetrics["snowflake.billing.warehouse.virtual_warehouse.total"] = struct{}{} + case "snowflake.database.bytes_scanned.avg": + assert.Equal(t, pmetric.MetricTypeGauge, ms.At(i).Type()) + assert.Equal(t, 1, ms.At(i).Gauge().DataPoints().Len()) + assert.Equal(t, "Average bytes scanned in a database.", ms.At(i).Description()) + assert.Equal(t, "By", ms.At(i).Unit()) + dp := ms.At(i).Gauge().DataPoints().At(0) + assert.Equal(t, start, dp.StartTimestamp()) + assert.Equal(t, ts, dp.Timestamp()) + assert.Equal(t, pmetric.NumberDataPointValueTypeDouble, dp.ValueType()) + assert.Equal(t, float64(1), dp.DoubleValue()) + attrVal, ok := dp.Attributes().Get("schema_name") + assert.True(t, ok) + assert.EqualValues(t, "attr-val", attrVal.Str()) + attrVal, ok = dp.Attributes().Get("execution_status") + assert.True(t, ok) + assert.EqualValues(t, "attr-val", attrVal.Str()) + attrVal, ok = dp.Attributes().Get("error_message") + assert.True(t, ok) + assert.EqualValues(t, "attr-val", attrVal.Str()) + attrVal, ok = dp.Attributes().Get("query_type") + assert.True(t, ok) + assert.EqualValues(t, "attr-val", attrVal.Str()) + attrVal, ok = dp.Attributes().Get("warehouse_name") + assert.True(t, ok) + assert.EqualValues(t, "attr-val", attrVal.Str()) + attrVal, ok = dp.Attributes().Get("database_name") + assert.True(t, ok) + assert.EqualValues(t, "attr-val", attrVal.Str()) + attrVal, ok = dp.Attributes().Get("warehouse_size") + assert.True(t, ok) + assert.EqualValues(t, "attr-val", attrVal.Str()) + validatedMetrics["snowflake.database.bytes_scanned.avg"] = struct{}{} + case "snowflake.database.query.count": + assert.Equal(t, pmetric.MetricTypeGauge, ms.At(i).Type()) + assert.Equal(t, 1, ms.At(i).Gauge().DataPoints().Len()) + assert.Equal(t, "Total query count for database.", ms.At(i).Description()) + assert.Equal(t, "1", ms.At(i).Unit()) + dp := ms.At(i).Gauge().DataPoints().At(0) + assert.Equal(t, start, dp.StartTimestamp()) + assert.Equal(t, ts, dp.Timestamp()) + assert.Equal(t, pmetric.NumberDataPointValueTypeInt, dp.ValueType()) + assert.Equal(t, int64(1), dp.IntValue()) + attrVal, ok := dp.Attributes().Get("schema_name") + assert.True(t, ok) + assert.EqualValues(t, "attr-val", attrVal.Str()) + attrVal, ok = dp.Attributes().Get("execution_status") + assert.True(t, ok) + assert.EqualValues(t, "attr-val", attrVal.Str()) + attrVal, ok = dp.Attributes().Get("error_message") + assert.True(t, ok) + assert.EqualValues(t, "attr-val", attrVal.Str()) + attrVal, ok = dp.Attributes().Get("query_type") + assert.True(t, ok) + assert.EqualValues(t, "attr-val", attrVal.Str()) + attrVal, ok = dp.Attributes().Get("warehouse_name") + assert.True(t, ok) + assert.EqualValues(t, "attr-val", attrVal.Str()) + attrVal, ok = dp.Attributes().Get("database_name") + assert.True(t, ok) + assert.EqualValues(t, "attr-val", attrVal.Str()) + attrVal, ok = dp.Attributes().Get("warehouse_size") + assert.True(t, ok) + assert.EqualValues(t, "attr-val", attrVal.Str()) + validatedMetrics["snowflake.database.query.count"] = struct{}{} + case "snowflake.logins.total": + assert.Equal(t, pmetric.MetricTypeGauge, ms.At(i).Type()) + assert.Equal(t, 1, ms.At(i).Gauge().DataPoints().Len()) + assert.Equal(t, "Total login attempts for account.", ms.At(i).Description()) + assert.Equal(t, "1", ms.At(i).Unit()) + dp := ms.At(i).Gauge().DataPoints().At(0) + assert.Equal(t, start, dp.StartTimestamp()) + assert.Equal(t, ts, dp.Timestamp()) + assert.Equal(t, pmetric.NumberDataPointValueTypeInt, dp.ValueType()) + assert.Equal(t, int64(1), dp.IntValue()) + attrVal, ok := dp.Attributes().Get("error_message") + assert.True(t, ok) + assert.EqualValues(t, "attr-val", attrVal.Str()) + attrVal, ok = dp.Attributes().Get("reported_client_type") + assert.True(t, ok) + assert.EqualValues(t, "attr-val", attrVal.Str()) + attrVal, ok = dp.Attributes().Get("is_success") + assert.True(t, ok) + assert.EqualValues(t, "attr-val", attrVal.Str()) + validatedMetrics["snowflake.logins.total"] = struct{}{} + case "snowflake.pipe.credits_used.total": + assert.Equal(t, pmetric.MetricTypeGauge, ms.At(i).Type()) + assert.Equal(t, 1, ms.At(i).Gauge().DataPoints().Len()) + assert.Equal(t, "Snow pipe credits contotaled.", ms.At(i).Description()) + assert.Equal(t, "{credits}", ms.At(i).Unit()) + dp := ms.At(i).Gauge().DataPoints().At(0) + assert.Equal(t, start, dp.StartTimestamp()) + assert.Equal(t, ts, dp.Timestamp()) + assert.Equal(t, pmetric.NumberDataPointValueTypeDouble, dp.ValueType()) + assert.Equal(t, float64(1), dp.DoubleValue()) + attrVal, ok := dp.Attributes().Get("pipe_name") + assert.True(t, ok) + assert.EqualValues(t, "attr-val", attrVal.Str()) + validatedMetrics["snowflake.pipe.credits_used.total"] = struct{}{} + case "snowflake.query.blocked": + assert.Equal(t, pmetric.MetricTypeGauge, ms.At(i).Type()) + assert.Equal(t, 1, ms.At(i).Gauge().DataPoints().Len()) + assert.Equal(t, "Blocked query count for warehouse.", ms.At(i).Description()) + assert.Equal(t, "1", ms.At(i).Unit()) + dp := ms.At(i).Gauge().DataPoints().At(0) + assert.Equal(t, start, dp.StartTimestamp()) + assert.Equal(t, ts, dp.Timestamp()) + assert.Equal(t, pmetric.NumberDataPointValueTypeDouble, dp.ValueType()) + assert.Equal(t, float64(1), dp.DoubleValue()) + attrVal, ok := dp.Attributes().Get("warehouse_name") + assert.True(t, ok) + assert.EqualValues(t, "attr-val", attrVal.Str()) + validatedMetrics["snowflake.query.blocked"] = struct{}{} + case "snowflake.query.bytes_deleted.total": + assert.Equal(t, pmetric.MetricTypeGauge, ms.At(i).Type()) + assert.Equal(t, 1, ms.At(i).Gauge().DataPoints().Len()) + assert.Equal(t, "Total bytes deleted in database.", ms.At(i).Description()) + assert.Equal(t, "By", ms.At(i).Unit()) + dp := ms.At(i).Gauge().DataPoints().At(0) + assert.Equal(t, start, dp.StartTimestamp()) + assert.Equal(t, ts, dp.Timestamp()) + assert.Equal(t, pmetric.NumberDataPointValueTypeInt, dp.ValueType()) + assert.Equal(t, int64(1), dp.IntValue()) + attrVal, ok := dp.Attributes().Get("schema_name") + assert.True(t, ok) + assert.EqualValues(t, "attr-val", attrVal.Str()) + attrVal, ok = dp.Attributes().Get("execution_status") + assert.True(t, ok) + assert.EqualValues(t, "attr-val", attrVal.Str()) + attrVal, ok = dp.Attributes().Get("error_message") + assert.True(t, ok) + assert.EqualValues(t, "attr-val", attrVal.Str()) + attrVal, ok = dp.Attributes().Get("query_type") + assert.True(t, ok) + assert.EqualValues(t, "attr-val", attrVal.Str()) + attrVal, ok = dp.Attributes().Get("warehouse_name") + assert.True(t, ok) + assert.EqualValues(t, "attr-val", attrVal.Str()) + attrVal, ok = dp.Attributes().Get("database_name") + assert.True(t, ok) + assert.EqualValues(t, "attr-val", attrVal.Str()) + attrVal, ok = dp.Attributes().Get("warehouse_size") + assert.True(t, ok) + assert.EqualValues(t, "attr-val", attrVal.Str()) + validatedMetrics["snowflake.query.bytes_deleted.total"] = struct{}{} + case "snowflake.query.bytes_scanned.total": + assert.Equal(t, pmetric.MetricTypeGauge, ms.At(i).Type()) + assert.Equal(t, 1, ms.At(i).Gauge().DataPoints().Len()) + assert.Equal(t, "Total bytes scanend in database.", ms.At(i).Description()) + assert.Equal(t, "By", ms.At(i).Unit()) + dp := ms.At(i).Gauge().DataPoints().At(0) + assert.Equal(t, start, dp.StartTimestamp()) + assert.Equal(t, ts, dp.Timestamp()) + assert.Equal(t, pmetric.NumberDataPointValueTypeInt, dp.ValueType()) + assert.Equal(t, int64(1), dp.IntValue()) + attrVal, ok := dp.Attributes().Get("schema_name") + assert.True(t, ok) + assert.EqualValues(t, "attr-val", attrVal.Str()) + attrVal, ok = dp.Attributes().Get("execution_status") + assert.True(t, ok) + assert.EqualValues(t, "attr-val", attrVal.Str()) + attrVal, ok = dp.Attributes().Get("error_message") + assert.True(t, ok) + assert.EqualValues(t, "attr-val", attrVal.Str()) + attrVal, ok = dp.Attributes().Get("query_type") + assert.True(t, ok) + assert.EqualValues(t, "attr-val", attrVal.Str()) + attrVal, ok = dp.Attributes().Get("warehouse_name") + assert.True(t, ok) + assert.EqualValues(t, "attr-val", attrVal.Str()) + attrVal, ok = dp.Attributes().Get("database_name") + assert.True(t, ok) + assert.EqualValues(t, "attr-val", attrVal.Str()) + attrVal, ok = dp.Attributes().Get("warehouse_size") + assert.True(t, ok) + assert.EqualValues(t, "attr-val", attrVal.Str()) + validatedMetrics["snowflake.query.bytes_scanned.total"] = struct{}{} + case "snowflake.query.bytes_spilled.local.total": + assert.Equal(t, pmetric.MetricTypeGauge, ms.At(i).Type()) + assert.Equal(t, 1, ms.At(i).Gauge().DataPoints().Len()) + assert.Equal(t, "Total bytes spilled (intermediate results do not fit in memory) by local storage.", ms.At(i).Description()) + assert.Equal(t, "By", ms.At(i).Unit()) + dp := ms.At(i).Gauge().DataPoints().At(0) + assert.Equal(t, start, dp.StartTimestamp()) + assert.Equal(t, ts, dp.Timestamp()) + assert.Equal(t, pmetric.NumberDataPointValueTypeInt, dp.ValueType()) + assert.Equal(t, int64(1), dp.IntValue()) + attrVal, ok := dp.Attributes().Get("schema_name") + assert.True(t, ok) + assert.EqualValues(t, "attr-val", attrVal.Str()) + attrVal, ok = dp.Attributes().Get("execution_status") + assert.True(t, ok) + assert.EqualValues(t, "attr-val", attrVal.Str()) + attrVal, ok = dp.Attributes().Get("error_message") + assert.True(t, ok) + assert.EqualValues(t, "attr-val", attrVal.Str()) + attrVal, ok = dp.Attributes().Get("query_type") + assert.True(t, ok) + assert.EqualValues(t, "attr-val", attrVal.Str()) + attrVal, ok = dp.Attributes().Get("warehouse_name") + assert.True(t, ok) + assert.EqualValues(t, "attr-val", attrVal.Str()) + attrVal, ok = dp.Attributes().Get("database_name") + assert.True(t, ok) + assert.EqualValues(t, "attr-val", attrVal.Str()) + attrVal, ok = dp.Attributes().Get("warehouse_size") + assert.True(t, ok) + assert.EqualValues(t, "attr-val", attrVal.Str()) + validatedMetrics["snowflake.query.bytes_spilled.local.total"] = struct{}{} + case "snowflake.query.bytes_spilled.remote.total": + assert.Equal(t, pmetric.MetricTypeGauge, ms.At(i).Type()) + assert.Equal(t, 1, ms.At(i).Gauge().DataPoints().Len()) + assert.Equal(t, "Total bytes spilled (intermediate results do not fit in memory) by remote storage.", ms.At(i).Description()) + assert.Equal(t, "By", ms.At(i).Unit()) + dp := ms.At(i).Gauge().DataPoints().At(0) + assert.Equal(t, start, dp.StartTimestamp()) + assert.Equal(t, ts, dp.Timestamp()) + assert.Equal(t, pmetric.NumberDataPointValueTypeInt, dp.ValueType()) + assert.Equal(t, int64(1), dp.IntValue()) + attrVal, ok := dp.Attributes().Get("schema_name") + assert.True(t, ok) + assert.EqualValues(t, "attr-val", attrVal.Str()) + attrVal, ok = dp.Attributes().Get("execution_status") + assert.True(t, ok) + assert.EqualValues(t, "attr-val", attrVal.Str()) + attrVal, ok = dp.Attributes().Get("error_message") + assert.True(t, ok) + assert.EqualValues(t, "attr-val", attrVal.Str()) + attrVal, ok = dp.Attributes().Get("query_type") + assert.True(t, ok) + assert.EqualValues(t, "attr-val", attrVal.Str()) + attrVal, ok = dp.Attributes().Get("warehouse_name") + assert.True(t, ok) + assert.EqualValues(t, "attr-val", attrVal.Str()) + attrVal, ok = dp.Attributes().Get("database_name") + assert.True(t, ok) + assert.EqualValues(t, "attr-val", attrVal.Str()) + attrVal, ok = dp.Attributes().Get("warehouse_size") + assert.True(t, ok) + assert.EqualValues(t, "attr-val", attrVal.Str()) + validatedMetrics["snowflake.query.bytes_spilled.remote.total"] = struct{}{} + case "snowflake.query.bytes_written.total": + assert.Equal(t, pmetric.MetricTypeGauge, ms.At(i).Type()) + assert.Equal(t, 1, ms.At(i).Gauge().DataPoints().Len()) + assert.Equal(t, "Total bytes written by database.", ms.At(i).Description()) + assert.Equal(t, "By", ms.At(i).Unit()) + dp := ms.At(i).Gauge().DataPoints().At(0) + assert.Equal(t, start, dp.StartTimestamp()) + assert.Equal(t, ts, dp.Timestamp()) + assert.Equal(t, pmetric.NumberDataPointValueTypeInt, dp.ValueType()) + assert.Equal(t, int64(1), dp.IntValue()) + attrVal, ok := dp.Attributes().Get("schema_name") + assert.True(t, ok) + assert.EqualValues(t, "attr-val", attrVal.Str()) + attrVal, ok = dp.Attributes().Get("execution_status") + assert.True(t, ok) + assert.EqualValues(t, "attr-val", attrVal.Str()) + attrVal, ok = dp.Attributes().Get("error_message") + assert.True(t, ok) + assert.EqualValues(t, "attr-val", attrVal.Str()) + attrVal, ok = dp.Attributes().Get("query_type") + assert.True(t, ok) + assert.EqualValues(t, "attr-val", attrVal.Str()) + attrVal, ok = dp.Attributes().Get("warehouse_name") + assert.True(t, ok) + assert.EqualValues(t, "attr-val", attrVal.Str()) + attrVal, ok = dp.Attributes().Get("database_name") + assert.True(t, ok) + assert.EqualValues(t, "attr-val", attrVal.Str()) + attrVal, ok = dp.Attributes().Get("warehouse_size") + assert.True(t, ok) + assert.EqualValues(t, "attr-val", attrVal.Str()) + validatedMetrics["snowflake.query.bytes_written.total"] = struct{}{} + case "snowflake.query.compilation_time.total": + assert.Equal(t, pmetric.MetricTypeGauge, ms.At(i).Type()) + assert.Equal(t, 1, ms.At(i).Gauge().DataPoints().Len()) + assert.Equal(t, "Total time taken to compile query.", ms.At(i).Description()) + assert.Equal(t, "s", ms.At(i).Unit()) + dp := ms.At(i).Gauge().DataPoints().At(0) + assert.Equal(t, start, dp.StartTimestamp()) + assert.Equal(t, ts, dp.Timestamp()) + assert.Equal(t, pmetric.NumberDataPointValueTypeDouble, dp.ValueType()) + assert.Equal(t, float64(1), dp.DoubleValue()) + attrVal, ok := dp.Attributes().Get("schema_name") + assert.True(t, ok) + assert.EqualValues(t, "attr-val", attrVal.Str()) + attrVal, ok = dp.Attributes().Get("execution_status") + assert.True(t, ok) + assert.EqualValues(t, "attr-val", attrVal.Str()) + attrVal, ok = dp.Attributes().Get("error_message") + assert.True(t, ok) + assert.EqualValues(t, "attr-val", attrVal.Str()) + attrVal, ok = dp.Attributes().Get("query_type") + assert.True(t, ok) + assert.EqualValues(t, "attr-val", attrVal.Str()) + attrVal, ok = dp.Attributes().Get("warehouse_name") + assert.True(t, ok) + assert.EqualValues(t, "attr-val", attrVal.Str()) + attrVal, ok = dp.Attributes().Get("database_name") + assert.True(t, ok) + assert.EqualValues(t, "attr-val", attrVal.Str()) + attrVal, ok = dp.Attributes().Get("warehouse_size") + assert.True(t, ok) + assert.EqualValues(t, "attr-val", attrVal.Str()) + validatedMetrics["snowflake.query.compilation_time.total"] = struct{}{} + case "snowflake.query.data_scanned_cache.avg": + assert.Equal(t, pmetric.MetricTypeGauge, ms.At(i).Type()) + assert.Equal(t, 1, ms.At(i).Gauge().DataPoints().Len()) + assert.Equal(t, "Average percentage of data scanned from cache.", ms.At(i).Description()) + assert.Equal(t, "1", ms.At(i).Unit()) + dp := ms.At(i).Gauge().DataPoints().At(0) + assert.Equal(t, start, dp.StartTimestamp()) + assert.Equal(t, ts, dp.Timestamp()) + assert.Equal(t, pmetric.NumberDataPointValueTypeDouble, dp.ValueType()) + assert.Equal(t, float64(1), dp.DoubleValue()) + attrVal, ok := dp.Attributes().Get("schema_name") + assert.True(t, ok) + assert.EqualValues(t, "attr-val", attrVal.Str()) + attrVal, ok = dp.Attributes().Get("execution_status") + assert.True(t, ok) + assert.EqualValues(t, "attr-val", attrVal.Str()) + attrVal, ok = dp.Attributes().Get("error_message") + assert.True(t, ok) + assert.EqualValues(t, "attr-val", attrVal.Str()) + attrVal, ok = dp.Attributes().Get("query_type") + assert.True(t, ok) + assert.EqualValues(t, "attr-val", attrVal.Str()) + attrVal, ok = dp.Attributes().Get("warehouse_name") + assert.True(t, ok) + assert.EqualValues(t, "attr-val", attrVal.Str()) + attrVal, ok = dp.Attributes().Get("database_name") + assert.True(t, ok) + assert.EqualValues(t, "attr-val", attrVal.Str()) + attrVal, ok = dp.Attributes().Get("warehouse_size") + assert.True(t, ok) + assert.EqualValues(t, "attr-val", attrVal.Str()) + validatedMetrics["snowflake.query.data_scanned_cache.avg"] = struct{}{} + case "snowflake.query.executed": + assert.Equal(t, pmetric.MetricTypeGauge, ms.At(i).Type()) + assert.Equal(t, 1, ms.At(i).Gauge().DataPoints().Len()) + assert.Equal(t, "Executed query count for warehouse.", ms.At(i).Description()) + assert.Equal(t, "1", ms.At(i).Unit()) + dp := ms.At(i).Gauge().DataPoints().At(0) + assert.Equal(t, start, dp.StartTimestamp()) + assert.Equal(t, ts, dp.Timestamp()) + assert.Equal(t, pmetric.NumberDataPointValueTypeDouble, dp.ValueType()) + assert.Equal(t, float64(1), dp.DoubleValue()) + attrVal, ok := dp.Attributes().Get("warehouse_name") + assert.True(t, ok) + assert.EqualValues(t, "attr-val", attrVal.Str()) + validatedMetrics["snowflake.query.executed"] = struct{}{} + case "snowflake.query.execution_time.total": + assert.Equal(t, pmetric.MetricTypeGauge, ms.At(i).Type()) + assert.Equal(t, 1, ms.At(i).Gauge().DataPoints().Len()) + assert.Equal(t, "Total time spent executing queries in database.", ms.At(i).Description()) + assert.Equal(t, "s", ms.At(i).Unit()) + dp := ms.At(i).Gauge().DataPoints().At(0) + assert.Equal(t, start, dp.StartTimestamp()) + assert.Equal(t, ts, dp.Timestamp()) + assert.Equal(t, pmetric.NumberDataPointValueTypeDouble, dp.ValueType()) + assert.Equal(t, float64(1), dp.DoubleValue()) + attrVal, ok := dp.Attributes().Get("schema_name") + assert.True(t, ok) + assert.EqualValues(t, "attr-val", attrVal.Str()) + attrVal, ok = dp.Attributes().Get("execution_status") + assert.True(t, ok) + assert.EqualValues(t, "attr-val", attrVal.Str()) + attrVal, ok = dp.Attributes().Get("error_message") + assert.True(t, ok) + assert.EqualValues(t, "attr-val", attrVal.Str()) + attrVal, ok = dp.Attributes().Get("query_type") + assert.True(t, ok) + assert.EqualValues(t, "attr-val", attrVal.Str()) + attrVal, ok = dp.Attributes().Get("warehouse_name") + assert.True(t, ok) + assert.EqualValues(t, "attr-val", attrVal.Str()) + attrVal, ok = dp.Attributes().Get("database_name") + assert.True(t, ok) + assert.EqualValues(t, "attr-val", attrVal.Str()) + attrVal, ok = dp.Attributes().Get("warehouse_size") + assert.True(t, ok) + assert.EqualValues(t, "attr-val", attrVal.Str()) + validatedMetrics["snowflake.query.execution_time.total"] = struct{}{} + case "snowflake.query.partitions_scanned.total": + assert.Equal(t, pmetric.MetricTypeGauge, ms.At(i).Type()) + assert.Equal(t, 1, ms.At(i).Gauge().DataPoints().Len()) + assert.Equal(t, "Number of partitions scanned during query so far.", ms.At(i).Description()) + assert.Equal(t, "1", ms.At(i).Unit()) + dp := ms.At(i).Gauge().DataPoints().At(0) + assert.Equal(t, start, dp.StartTimestamp()) + assert.Equal(t, ts, dp.Timestamp()) + assert.Equal(t, pmetric.NumberDataPointValueTypeInt, dp.ValueType()) + assert.Equal(t, int64(1), dp.IntValue()) + attrVal, ok := dp.Attributes().Get("schema_name") + assert.True(t, ok) + assert.EqualValues(t, "attr-val", attrVal.Str()) + attrVal, ok = dp.Attributes().Get("execution_status") + assert.True(t, ok) + assert.EqualValues(t, "attr-val", attrVal.Str()) + attrVal, ok = dp.Attributes().Get("error_message") + assert.True(t, ok) + assert.EqualValues(t, "attr-val", attrVal.Str()) + attrVal, ok = dp.Attributes().Get("query_type") + assert.True(t, ok) + assert.EqualValues(t, "attr-val", attrVal.Str()) + attrVal, ok = dp.Attributes().Get("warehouse_name") + assert.True(t, ok) + assert.EqualValues(t, "attr-val", attrVal.Str()) + attrVal, ok = dp.Attributes().Get("database_name") + assert.True(t, ok) + assert.EqualValues(t, "attr-val", attrVal.Str()) + attrVal, ok = dp.Attributes().Get("warehouse_size") + assert.True(t, ok) + assert.EqualValues(t, "attr-val", attrVal.Str()) + validatedMetrics["snowflake.query.partitions_scanned.total"] = struct{}{} + case "snowflake.query.queued_overload": + assert.Equal(t, pmetric.MetricTypeGauge, ms.At(i).Type()) + assert.Equal(t, 1, ms.At(i).Gauge().DataPoints().Len()) + assert.Equal(t, "Overloaded query count for warehouse.", ms.At(i).Description()) + assert.Equal(t, "1", ms.At(i).Unit()) + dp := ms.At(i).Gauge().DataPoints().At(0) + assert.Equal(t, start, dp.StartTimestamp()) + assert.Equal(t, ts, dp.Timestamp()) + assert.Equal(t, pmetric.NumberDataPointValueTypeDouble, dp.ValueType()) + assert.Equal(t, float64(1), dp.DoubleValue()) + attrVal, ok := dp.Attributes().Get("warehouse_name") + assert.True(t, ok) + assert.EqualValues(t, "attr-val", attrVal.Str()) + validatedMetrics["snowflake.query.queued_overload"] = struct{}{} + case "snowflake.query.queued_provision": + assert.Equal(t, pmetric.MetricTypeGauge, ms.At(i).Type()) + assert.Equal(t, 1, ms.At(i).Gauge().DataPoints().Len()) + assert.Equal(t, "Number of compute resources queued for provisioning.", ms.At(i).Description()) + assert.Equal(t, "1", ms.At(i).Unit()) + dp := ms.At(i).Gauge().DataPoints().At(0) + assert.Equal(t, start, dp.StartTimestamp()) + assert.Equal(t, ts, dp.Timestamp()) + assert.Equal(t, pmetric.NumberDataPointValueTypeDouble, dp.ValueType()) + assert.Equal(t, float64(1), dp.DoubleValue()) + attrVal, ok := dp.Attributes().Get("warehouse_name") + assert.True(t, ok) + assert.EqualValues(t, "attr-val", attrVal.Str()) + validatedMetrics["snowflake.query.queued_provision"] = struct{}{} + case "snowflake.queued_overload_time.avg": + assert.Equal(t, pmetric.MetricTypeGauge, ms.At(i).Type()) + assert.Equal(t, 1, ms.At(i).Gauge().DataPoints().Len()) + assert.Equal(t, "Average time spent in warehouse queue due to warehouse being overloaded.", ms.At(i).Description()) + assert.Equal(t, "s", ms.At(i).Unit()) + dp := ms.At(i).Gauge().DataPoints().At(0) + assert.Equal(t, start, dp.StartTimestamp()) + assert.Equal(t, ts, dp.Timestamp()) + assert.Equal(t, pmetric.NumberDataPointValueTypeDouble, dp.ValueType()) + assert.Equal(t, float64(1), dp.DoubleValue()) + attrVal, ok := dp.Attributes().Get("schema_name") + assert.True(t, ok) + assert.EqualValues(t, "attr-val", attrVal.Str()) + attrVal, ok = dp.Attributes().Get("execution_status") + assert.True(t, ok) + assert.EqualValues(t, "attr-val", attrVal.Str()) + attrVal, ok = dp.Attributes().Get("error_message") + assert.True(t, ok) + assert.EqualValues(t, "attr-val", attrVal.Str()) + attrVal, ok = dp.Attributes().Get("query_type") + assert.True(t, ok) + assert.EqualValues(t, "attr-val", attrVal.Str()) + attrVal, ok = dp.Attributes().Get("warehouse_name") + assert.True(t, ok) + assert.EqualValues(t, "attr-val", attrVal.Str()) + attrVal, ok = dp.Attributes().Get("database_name") + assert.True(t, ok) + assert.EqualValues(t, "attr-val", attrVal.Str()) + attrVal, ok = dp.Attributes().Get("warehouse_size") + assert.True(t, ok) + assert.EqualValues(t, "attr-val", attrVal.Str()) + validatedMetrics["snowflake.queued_overload_time.avg"] = struct{}{} + case "snowflake.queued_overload_time.total": + assert.Equal(t, pmetric.MetricTypeGauge, ms.At(i).Type()) + assert.Equal(t, 1, ms.At(i).Gauge().DataPoints().Len()) + assert.Equal(t, "Total time spent in warehouse queue due to warehouse being overloaded.", ms.At(i).Description()) + assert.Equal(t, "s", ms.At(i).Unit()) + dp := ms.At(i).Gauge().DataPoints().At(0) + assert.Equal(t, start, dp.StartTimestamp()) + assert.Equal(t, ts, dp.Timestamp()) + assert.Equal(t, pmetric.NumberDataPointValueTypeInt, dp.ValueType()) + assert.Equal(t, int64(1), dp.IntValue()) + attrVal, ok := dp.Attributes().Get("schema_name") + assert.True(t, ok) + assert.EqualValues(t, "attr-val", attrVal.Str()) + attrVal, ok = dp.Attributes().Get("execution_status") + assert.True(t, ok) + assert.EqualValues(t, "attr-val", attrVal.Str()) + attrVal, ok = dp.Attributes().Get("error_message") + assert.True(t, ok) + assert.EqualValues(t, "attr-val", attrVal.Str()) + attrVal, ok = dp.Attributes().Get("query_type") + assert.True(t, ok) + assert.EqualValues(t, "attr-val", attrVal.Str()) + attrVal, ok = dp.Attributes().Get("warehouse_name") + assert.True(t, ok) + assert.EqualValues(t, "attr-val", attrVal.Str()) + attrVal, ok = dp.Attributes().Get("database_name") + assert.True(t, ok) + assert.EqualValues(t, "attr-val", attrVal.Str()) + attrVal, ok = dp.Attributes().Get("warehouse_size") + assert.True(t, ok) + assert.EqualValues(t, "attr-val", attrVal.Str()) + validatedMetrics["snowflake.queued_overload_time.total"] = struct{}{} + case "snowflake.queued_provisioning_time.avg": + assert.Equal(t, pmetric.MetricTypeGauge, ms.At(i).Type()) + assert.Equal(t, 1, ms.At(i).Gauge().DataPoints().Len()) + assert.Equal(t, "Average time spent in warehouse queue waiting for resources to provision.", ms.At(i).Description()) + assert.Equal(t, "s", ms.At(i).Unit()) + dp := ms.At(i).Gauge().DataPoints().At(0) + assert.Equal(t, start, dp.StartTimestamp()) + assert.Equal(t, ts, dp.Timestamp()) + assert.Equal(t, pmetric.NumberDataPointValueTypeDouble, dp.ValueType()) + assert.Equal(t, float64(1), dp.DoubleValue()) + attrVal, ok := dp.Attributes().Get("schema_name") + assert.True(t, ok) + assert.EqualValues(t, "attr-val", attrVal.Str()) + attrVal, ok = dp.Attributes().Get("execution_status") + assert.True(t, ok) + assert.EqualValues(t, "attr-val", attrVal.Str()) + attrVal, ok = dp.Attributes().Get("error_message") + assert.True(t, ok) + assert.EqualValues(t, "attr-val", attrVal.Str()) + attrVal, ok = dp.Attributes().Get("query_type") + assert.True(t, ok) + assert.EqualValues(t, "attr-val", attrVal.Str()) + attrVal, ok = dp.Attributes().Get("warehouse_name") + assert.True(t, ok) + assert.EqualValues(t, "attr-val", attrVal.Str()) + attrVal, ok = dp.Attributes().Get("database_name") + assert.True(t, ok) + assert.EqualValues(t, "attr-val", attrVal.Str()) + attrVal, ok = dp.Attributes().Get("warehouse_size") + assert.True(t, ok) + assert.EqualValues(t, "attr-val", attrVal.Str()) + validatedMetrics["snowflake.queued_provisioning_time.avg"] = struct{}{} + case "snowflake.queued_provisioning_time.total": + assert.Equal(t, pmetric.MetricTypeGauge, ms.At(i).Type()) + assert.Equal(t, 1, ms.At(i).Gauge().DataPoints().Len()) + assert.Equal(t, "Total time spent in warehouse queue waiting for resources to provision.", ms.At(i).Description()) + assert.Equal(t, "s", ms.At(i).Unit()) + dp := ms.At(i).Gauge().DataPoints().At(0) + assert.Equal(t, start, dp.StartTimestamp()) + assert.Equal(t, ts, dp.Timestamp()) + assert.Equal(t, pmetric.NumberDataPointValueTypeInt, dp.ValueType()) + assert.Equal(t, int64(1), dp.IntValue()) + attrVal, ok := dp.Attributes().Get("schema_name") + assert.True(t, ok) + assert.EqualValues(t, "attr-val", attrVal.Str()) + attrVal, ok = dp.Attributes().Get("execution_status") + assert.True(t, ok) + assert.EqualValues(t, "attr-val", attrVal.Str()) + attrVal, ok = dp.Attributes().Get("error_message") + assert.True(t, ok) + assert.EqualValues(t, "attr-val", attrVal.Str()) + attrVal, ok = dp.Attributes().Get("query_type") + assert.True(t, ok) + assert.EqualValues(t, "attr-val", attrVal.Str()) + attrVal, ok = dp.Attributes().Get("warehouse_name") + assert.True(t, ok) + assert.EqualValues(t, "attr-val", attrVal.Str()) + attrVal, ok = dp.Attributes().Get("database_name") + assert.True(t, ok) + assert.EqualValues(t, "attr-val", attrVal.Str()) + attrVal, ok = dp.Attributes().Get("warehouse_size") + assert.True(t, ok) + assert.EqualValues(t, "attr-val", attrVal.Str()) + validatedMetrics["snowflake.queued_provisioning_time.total"] = struct{}{} + case "snowflake.queued_repair_time.avg": + assert.Equal(t, pmetric.MetricTypeGauge, ms.At(i).Type()) + assert.Equal(t, 1, ms.At(i).Gauge().DataPoints().Len()) + assert.Equal(t, "Average time spent in warehouse queue waiting for compute resources to be repaired.", ms.At(i).Description()) + assert.Equal(t, "s", ms.At(i).Unit()) + dp := ms.At(i).Gauge().DataPoints().At(0) + assert.Equal(t, start, dp.StartTimestamp()) + assert.Equal(t, ts, dp.Timestamp()) + assert.Equal(t, pmetric.NumberDataPointValueTypeDouble, dp.ValueType()) + assert.Equal(t, float64(1), dp.DoubleValue()) + attrVal, ok := dp.Attributes().Get("schema_name") + assert.True(t, ok) + assert.EqualValues(t, "attr-val", attrVal.Str()) + attrVal, ok = dp.Attributes().Get("execution_status") + assert.True(t, ok) + assert.EqualValues(t, "attr-val", attrVal.Str()) + attrVal, ok = dp.Attributes().Get("error_message") + assert.True(t, ok) + assert.EqualValues(t, "attr-val", attrVal.Str()) + attrVal, ok = dp.Attributes().Get("query_type") + assert.True(t, ok) + assert.EqualValues(t, "attr-val", attrVal.Str()) + attrVal, ok = dp.Attributes().Get("warehouse_name") + assert.True(t, ok) + assert.EqualValues(t, "attr-val", attrVal.Str()) + attrVal, ok = dp.Attributes().Get("database_name") + assert.True(t, ok) + assert.EqualValues(t, "attr-val", attrVal.Str()) + attrVal, ok = dp.Attributes().Get("warehouse_size") + assert.True(t, ok) + assert.EqualValues(t, "attr-val", attrVal.Str()) + validatedMetrics["snowflake.queued_repair_time.avg"] = struct{}{} + case "snowflake.queued_repair_time.total": + assert.Equal(t, pmetric.MetricTypeGauge, ms.At(i).Type()) + assert.Equal(t, 1, ms.At(i).Gauge().DataPoints().Len()) + assert.Equal(t, "Total time spent in warehouse queue waiting for compute resources to be repaired.", ms.At(i).Description()) + assert.Equal(t, "s", ms.At(i).Unit()) + dp := ms.At(i).Gauge().DataPoints().At(0) + assert.Equal(t, start, dp.StartTimestamp()) + assert.Equal(t, ts, dp.Timestamp()) + assert.Equal(t, pmetric.NumberDataPointValueTypeInt, dp.ValueType()) + assert.Equal(t, int64(1), dp.IntValue()) + attrVal, ok := dp.Attributes().Get("schema_name") + assert.True(t, ok) + assert.EqualValues(t, "attr-val", attrVal.Str()) + attrVal, ok = dp.Attributes().Get("execution_status") + assert.True(t, ok) + assert.EqualValues(t, "attr-val", attrVal.Str()) + attrVal, ok = dp.Attributes().Get("error_message") + assert.True(t, ok) + assert.EqualValues(t, "attr-val", attrVal.Str()) + attrVal, ok = dp.Attributes().Get("query_type") + assert.True(t, ok) + assert.EqualValues(t, "attr-val", attrVal.Str()) + attrVal, ok = dp.Attributes().Get("warehouse_name") + assert.True(t, ok) + assert.EqualValues(t, "attr-val", attrVal.Str()) + attrVal, ok = dp.Attributes().Get("database_name") + assert.True(t, ok) + assert.EqualValues(t, "attr-val", attrVal.Str()) + attrVal, ok = dp.Attributes().Get("warehouse_size") + assert.True(t, ok) + assert.EqualValues(t, "attr-val", attrVal.Str()) + validatedMetrics["snowflake.queued_repair_time.total"] = struct{}{} + case "snowflake.rows_deleted.total": + assert.Equal(t, pmetric.MetricTypeGauge, ms.At(i).Type()) + assert.Equal(t, 1, ms.At(i).Gauge().DataPoints().Len()) + assert.Equal(t, "Number of rows deleted from a table (or tables).", ms.At(i).Description()) + assert.Equal(t, "{rows}", ms.At(i).Unit()) + dp := ms.At(i).Gauge().DataPoints().At(0) + assert.Equal(t, start, dp.StartTimestamp()) + assert.Equal(t, ts, dp.Timestamp()) + assert.Equal(t, pmetric.NumberDataPointValueTypeInt, dp.ValueType()) + assert.Equal(t, int64(1), dp.IntValue()) + attrVal, ok := dp.Attributes().Get("schema_name") + assert.True(t, ok) + assert.EqualValues(t, "attr-val", attrVal.Str()) + attrVal, ok = dp.Attributes().Get("execution_status") + assert.True(t, ok) + assert.EqualValues(t, "attr-val", attrVal.Str()) + attrVal, ok = dp.Attributes().Get("error_message") + assert.True(t, ok) + assert.EqualValues(t, "attr-val", attrVal.Str()) + attrVal, ok = dp.Attributes().Get("query_type") + assert.True(t, ok) + assert.EqualValues(t, "attr-val", attrVal.Str()) + attrVal, ok = dp.Attributes().Get("warehouse_name") + assert.True(t, ok) + assert.EqualValues(t, "attr-val", attrVal.Str()) + attrVal, ok = dp.Attributes().Get("database_name") + assert.True(t, ok) + assert.EqualValues(t, "attr-val", attrVal.Str()) + attrVal, ok = dp.Attributes().Get("warehouse_size") + assert.True(t, ok) + assert.EqualValues(t, "attr-val", attrVal.Str()) + validatedMetrics["snowflake.rows_deleted.total"] = struct{}{} + case "snowflake.rows_inserted.total": + assert.Equal(t, pmetric.MetricTypeGauge, ms.At(i).Type()) + assert.Equal(t, 1, ms.At(i).Gauge().DataPoints().Len()) + assert.Equal(t, "Number of rows inserted into a table (or tables).", ms.At(i).Description()) + assert.Equal(t, "{rows}", ms.At(i).Unit()) + dp := ms.At(i).Gauge().DataPoints().At(0) + assert.Equal(t, start, dp.StartTimestamp()) + assert.Equal(t, ts, dp.Timestamp()) + assert.Equal(t, pmetric.NumberDataPointValueTypeInt, dp.ValueType()) + assert.Equal(t, int64(1), dp.IntValue()) + attrVal, ok := dp.Attributes().Get("schema_name") + assert.True(t, ok) + assert.EqualValues(t, "attr-val", attrVal.Str()) + attrVal, ok = dp.Attributes().Get("execution_status") + assert.True(t, ok) + assert.EqualValues(t, "attr-val", attrVal.Str()) + attrVal, ok = dp.Attributes().Get("error_message") + assert.True(t, ok) + assert.EqualValues(t, "attr-val", attrVal.Str()) + attrVal, ok = dp.Attributes().Get("query_type") + assert.True(t, ok) + assert.EqualValues(t, "attr-val", attrVal.Str()) + attrVal, ok = dp.Attributes().Get("warehouse_name") + assert.True(t, ok) + assert.EqualValues(t, "attr-val", attrVal.Str()) + attrVal, ok = dp.Attributes().Get("database_name") + assert.True(t, ok) + assert.EqualValues(t, "attr-val", attrVal.Str()) + attrVal, ok = dp.Attributes().Get("warehouse_size") + assert.True(t, ok) + assert.EqualValues(t, "attr-val", attrVal.Str()) + validatedMetrics["snowflake.rows_inserted.total"] = struct{}{} + case "snowflake.rows_produced.total": + assert.Equal(t, pmetric.MetricTypeGauge, ms.At(i).Type()) + assert.Equal(t, 1, ms.At(i).Gauge().DataPoints().Len()) + assert.Equal(t, "Total number of rows produced by statement.", ms.At(i).Description()) + assert.Equal(t, "{rows}", ms.At(i).Unit()) + dp := ms.At(i).Gauge().DataPoints().At(0) + assert.Equal(t, start, dp.StartTimestamp()) + assert.Equal(t, ts, dp.Timestamp()) + assert.Equal(t, pmetric.NumberDataPointValueTypeInt, dp.ValueType()) + assert.Equal(t, int64(1), dp.IntValue()) + attrVal, ok := dp.Attributes().Get("schema_name") + assert.True(t, ok) + assert.EqualValues(t, "attr-val", attrVal.Str()) + attrVal, ok = dp.Attributes().Get("execution_status") + assert.True(t, ok) + assert.EqualValues(t, "attr-val", attrVal.Str()) + attrVal, ok = dp.Attributes().Get("error_message") + assert.True(t, ok) + assert.EqualValues(t, "attr-val", attrVal.Str()) + attrVal, ok = dp.Attributes().Get("query_type") + assert.True(t, ok) + assert.EqualValues(t, "attr-val", attrVal.Str()) + attrVal, ok = dp.Attributes().Get("warehouse_name") + assert.True(t, ok) + assert.EqualValues(t, "attr-val", attrVal.Str()) + attrVal, ok = dp.Attributes().Get("database_name") + assert.True(t, ok) + assert.EqualValues(t, "attr-val", attrVal.Str()) + attrVal, ok = dp.Attributes().Get("warehouse_size") + assert.True(t, ok) + assert.EqualValues(t, "attr-val", attrVal.Str()) + validatedMetrics["snowflake.rows_produced.total"] = struct{}{} + case "snowflake.rows_unloaded.total": + assert.Equal(t, pmetric.MetricTypeGauge, ms.At(i).Type()) + assert.Equal(t, 1, ms.At(i).Gauge().DataPoints().Len()) + assert.Equal(t, "Total number of rows unloaded during data export.", ms.At(i).Description()) + assert.Equal(t, "{rows}", ms.At(i).Unit()) + dp := ms.At(i).Gauge().DataPoints().At(0) + assert.Equal(t, start, dp.StartTimestamp()) + assert.Equal(t, ts, dp.Timestamp()) + assert.Equal(t, pmetric.NumberDataPointValueTypeInt, dp.ValueType()) + assert.Equal(t, int64(1), dp.IntValue()) + attrVal, ok := dp.Attributes().Get("schema_name") + assert.True(t, ok) + assert.EqualValues(t, "attr-val", attrVal.Str()) + attrVal, ok = dp.Attributes().Get("execution_status") + assert.True(t, ok) + assert.EqualValues(t, "attr-val", attrVal.Str()) + attrVal, ok = dp.Attributes().Get("error_message") + assert.True(t, ok) + assert.EqualValues(t, "attr-val", attrVal.Str()) + attrVal, ok = dp.Attributes().Get("query_type") + assert.True(t, ok) + assert.EqualValues(t, "attr-val", attrVal.Str()) + attrVal, ok = dp.Attributes().Get("warehouse_name") + assert.True(t, ok) + assert.EqualValues(t, "attr-val", attrVal.Str()) + attrVal, ok = dp.Attributes().Get("database_name") + assert.True(t, ok) + assert.EqualValues(t, "attr-val", attrVal.Str()) + attrVal, ok = dp.Attributes().Get("warehouse_size") + assert.True(t, ok) + assert.EqualValues(t, "attr-val", attrVal.Str()) + validatedMetrics["snowflake.rows_unloaded.total"] = struct{}{} + case "snowflake.rows_updated.total": + assert.Equal(t, pmetric.MetricTypeGauge, ms.At(i).Type()) + assert.Equal(t, 1, ms.At(i).Gauge().DataPoints().Len()) + assert.Equal(t, "Total number of rows updated in a table.", ms.At(i).Description()) + assert.Equal(t, "{rows}", ms.At(i).Unit()) + dp := ms.At(i).Gauge().DataPoints().At(0) + assert.Equal(t, start, dp.StartTimestamp()) + assert.Equal(t, ts, dp.Timestamp()) + assert.Equal(t, pmetric.NumberDataPointValueTypeInt, dp.ValueType()) + assert.Equal(t, int64(1), dp.IntValue()) + attrVal, ok := dp.Attributes().Get("schema_name") + assert.True(t, ok) + assert.EqualValues(t, "attr-val", attrVal.Str()) + attrVal, ok = dp.Attributes().Get("execution_status") + assert.True(t, ok) + assert.EqualValues(t, "attr-val", attrVal.Str()) + attrVal, ok = dp.Attributes().Get("error_message") + assert.True(t, ok) + assert.EqualValues(t, "attr-val", attrVal.Str()) + attrVal, ok = dp.Attributes().Get("query_type") + assert.True(t, ok) + assert.EqualValues(t, "attr-val", attrVal.Str()) + attrVal, ok = dp.Attributes().Get("warehouse_name") + assert.True(t, ok) + assert.EqualValues(t, "attr-val", attrVal.Str()) + attrVal, ok = dp.Attributes().Get("database_name") + assert.True(t, ok) + assert.EqualValues(t, "attr-val", attrVal.Str()) + attrVal, ok = dp.Attributes().Get("warehouse_size") + assert.True(t, ok) + assert.EqualValues(t, "attr-val", attrVal.Str()) + validatedMetrics["snowflake.rows_updated.total"] = struct{}{} + case "snowflake.session_id.count": + assert.Equal(t, pmetric.MetricTypeGauge, ms.At(i).Type()) + assert.Equal(t, 1, ms.At(i).Gauge().DataPoints().Len()) + assert.Equal(t, "Distinct session id's associated with snowflake username.", ms.At(i).Description()) + assert.Equal(t, "1", ms.At(i).Unit()) + dp := ms.At(i).Gauge().DataPoints().At(0) + assert.Equal(t, start, dp.StartTimestamp()) + assert.Equal(t, ts, dp.Timestamp()) + assert.Equal(t, pmetric.NumberDataPointValueTypeInt, dp.ValueType()) + assert.Equal(t, int64(1), dp.IntValue()) + attrVal, ok := dp.Attributes().Get("user_name") + assert.True(t, ok) + assert.EqualValues(t, "attr-val", attrVal.Str()) + validatedMetrics["snowflake.session_id.count"] = struct{}{} + case "snowflake.storage.failsafe_bytes.total": + assert.Equal(t, pmetric.MetricTypeGauge, ms.At(i).Type()) + assert.Equal(t, 1, ms.At(i).Gauge().DataPoints().Len()) + assert.Equal(t, "Number of bytes of data in Fail-safe.", ms.At(i).Description()) + assert.Equal(t, "By", ms.At(i).Unit()) + dp := ms.At(i).Gauge().DataPoints().At(0) + assert.Equal(t, start, dp.StartTimestamp()) + assert.Equal(t, ts, dp.Timestamp()) + assert.Equal(t, pmetric.NumberDataPointValueTypeInt, dp.ValueType()) + assert.Equal(t, int64(1), dp.IntValue()) + validatedMetrics["snowflake.storage.failsafe_bytes.total"] = struct{}{} + case "snowflake.storage.stage_bytes.total": + assert.Equal(t, pmetric.MetricTypeGauge, ms.At(i).Type()) + assert.Equal(t, 1, ms.At(i).Gauge().DataPoints().Len()) + assert.Equal(t, "Number of bytes of stage storage used by files in all internal stages (named, table, user).", ms.At(i).Description()) + assert.Equal(t, "By", ms.At(i).Unit()) + dp := ms.At(i).Gauge().DataPoints().At(0) + assert.Equal(t, start, dp.StartTimestamp()) + assert.Equal(t, ts, dp.Timestamp()) + assert.Equal(t, pmetric.NumberDataPointValueTypeInt, dp.ValueType()) + assert.Equal(t, int64(1), dp.IntValue()) + validatedMetrics["snowflake.storage.stage_bytes.total"] = struct{}{} + case "snowflake.storage.storage_bytes.total": + assert.Equal(t, pmetric.MetricTypeGauge, ms.At(i).Type()) + assert.Equal(t, 1, ms.At(i).Gauge().DataPoints().Len()) + assert.Equal(t, "Number of bytes of table storage used, including bytes for data currently in Time Travel.", ms.At(i).Description()) + assert.Equal(t, "By", ms.At(i).Unit()) + dp := ms.At(i).Gauge().DataPoints().At(0) + assert.Equal(t, start, dp.StartTimestamp()) + assert.Equal(t, ts, dp.Timestamp()) + assert.Equal(t, pmetric.NumberDataPointValueTypeInt, dp.ValueType()) + assert.Equal(t, int64(1), dp.IntValue()) + validatedMetrics["snowflake.storage.storage_bytes.total"] = struct{}{} + case "snowflake.total_elapsed_time.avg": + assert.Equal(t, pmetric.MetricTypeGauge, ms.At(i).Type()) + assert.Equal(t, 1, ms.At(i).Gauge().DataPoints().Len()) + assert.Equal(t, "Average elapsed time.", ms.At(i).Description()) + assert.Equal(t, "s", ms.At(i).Unit()) + dp := ms.At(i).Gauge().DataPoints().At(0) + assert.Equal(t, start, dp.StartTimestamp()) + assert.Equal(t, ts, dp.Timestamp()) + assert.Equal(t, pmetric.NumberDataPointValueTypeDouble, dp.ValueType()) + assert.Equal(t, float64(1), dp.DoubleValue()) + attrVal, ok := dp.Attributes().Get("schema_name") + assert.True(t, ok) + assert.EqualValues(t, "attr-val", attrVal.Str()) + attrVal, ok = dp.Attributes().Get("execution_status") + assert.True(t, ok) + assert.EqualValues(t, "attr-val", attrVal.Str()) + attrVal, ok = dp.Attributes().Get("error_message") + assert.True(t, ok) + assert.EqualValues(t, "attr-val", attrVal.Str()) + attrVal, ok = dp.Attributes().Get("query_type") + assert.True(t, ok) + assert.EqualValues(t, "attr-val", attrVal.Str()) + attrVal, ok = dp.Attributes().Get("warehouse_name") + assert.True(t, ok) + assert.EqualValues(t, "attr-val", attrVal.Str()) + attrVal, ok = dp.Attributes().Get("database_name") + assert.True(t, ok) + assert.EqualValues(t, "attr-val", attrVal.Str()) + attrVal, ok = dp.Attributes().Get("warehouse_size") + assert.True(t, ok) + assert.EqualValues(t, "attr-val", attrVal.Str()) + validatedMetrics["snowflake.total_elapsed_time.avg"] = struct{}{} + case "snowflake.total_elapsed_time.total": + assert.Equal(t, pmetric.MetricTypeGauge, ms.At(i).Type()) + assert.Equal(t, 1, ms.At(i).Gauge().DataPoints().Len()) + assert.Equal(t, "Total elapsed time.", ms.At(i).Description()) + assert.Equal(t, "s", ms.At(i).Unit()) + dp := ms.At(i).Gauge().DataPoints().At(0) + assert.Equal(t, start, dp.StartTimestamp()) + assert.Equal(t, ts, dp.Timestamp()) + assert.Equal(t, pmetric.NumberDataPointValueTypeInt, dp.ValueType()) + assert.Equal(t, int64(1), dp.IntValue()) + attrVal, ok := dp.Attributes().Get("schema_name") + assert.True(t, ok) + assert.EqualValues(t, "attr-val", attrVal.Str()) + attrVal, ok = dp.Attributes().Get("execution_status") + assert.True(t, ok) + assert.EqualValues(t, "attr-val", attrVal.Str()) + attrVal, ok = dp.Attributes().Get("error_message") + assert.True(t, ok) + assert.EqualValues(t, "attr-val", attrVal.Str()) + attrVal, ok = dp.Attributes().Get("query_type") + assert.True(t, ok) + assert.EqualValues(t, "attr-val", attrVal.Str()) + attrVal, ok = dp.Attributes().Get("warehouse_name") + assert.True(t, ok) + assert.EqualValues(t, "attr-val", attrVal.Str()) + attrVal, ok = dp.Attributes().Get("database_name") + assert.True(t, ok) + assert.EqualValues(t, "attr-val", attrVal.Str()) + attrVal, ok = dp.Attributes().Get("warehouse_size") + assert.True(t, ok) + assert.EqualValues(t, "attr-val", attrVal.Str()) + validatedMetrics["snowflake.total_elapsed_time.total"] = struct{}{} + } + } + assert.Equal(t, allMetricsCount, len(validatedMetrics)) +} + +func TestNoMetrics(t *testing.T) { + start := pcommon.Timestamp(1_000_000_000) + ts := pcommon.Timestamp(1_000_001_000) + settings := MetricsSettings{ + SnowflakeBillingCloudServiceTotal: MetricSettings{Enabled: false}, + SnowflakeBillingTotalCreditTotal: MetricSettings{Enabled: false}, + SnowflakeBillingVirtualWarehouseTotal: MetricSettings{Enabled: false}, + SnowflakeBillingWarehouseCloudServiceTotal: MetricSettings{Enabled: false}, + SnowflakeBillingWarehouseTotalCreditTotal: MetricSettings{Enabled: false}, + SnowflakeBillingWarehouseVirtualWarehouseTotal: MetricSettings{Enabled: false}, + SnowflakeDatabaseBytesScannedAvg: MetricSettings{Enabled: false}, + SnowflakeDatabaseQueryCount: MetricSettings{Enabled: false}, + SnowflakeLoginsTotal: MetricSettings{Enabled: false}, + SnowflakePipeCreditsUsedTotal: MetricSettings{Enabled: false}, + SnowflakeQueryBlocked: MetricSettings{Enabled: false}, + SnowflakeQueryBytesDeletedTotal: MetricSettings{Enabled: false}, + SnowflakeQueryBytesScannedTotal: MetricSettings{Enabled: false}, + SnowflakeQueryBytesSpilledLocalTotal: MetricSettings{Enabled: false}, + SnowflakeQueryBytesSpilledRemoteTotal: MetricSettings{Enabled: false}, + SnowflakeQueryBytesWrittenTotal: MetricSettings{Enabled: false}, + SnowflakeQueryCompilationTimeTotal: MetricSettings{Enabled: false}, + SnowflakeQueryDataScannedCacheAvg: MetricSettings{Enabled: false}, + SnowflakeQueryExecuted: MetricSettings{Enabled: false}, + SnowflakeQueryExecutionTimeTotal: MetricSettings{Enabled: false}, + SnowflakeQueryPartitionsScannedTotal: MetricSettings{Enabled: false}, + SnowflakeQueryQueuedOverload: MetricSettings{Enabled: false}, + SnowflakeQueryQueuedProvision: MetricSettings{Enabled: false}, + SnowflakeQueuedOverloadTimeAvg: MetricSettings{Enabled: false}, + SnowflakeQueuedOverloadTimeTotal: MetricSettings{Enabled: false}, + SnowflakeQueuedProvisioningTimeAvg: MetricSettings{Enabled: false}, + SnowflakeQueuedProvisioningTimeTotal: MetricSettings{Enabled: false}, + SnowflakeQueuedRepairTimeAvg: MetricSettings{Enabled: false}, + SnowflakeQueuedRepairTimeTotal: MetricSettings{Enabled: false}, + SnowflakeRowsDeletedTotal: MetricSettings{Enabled: false}, + SnowflakeRowsInsertedTotal: MetricSettings{Enabled: false}, + SnowflakeRowsProducedTotal: MetricSettings{Enabled: false}, + SnowflakeRowsUnloadedTotal: MetricSettings{Enabled: false}, + SnowflakeRowsUpdatedTotal: MetricSettings{Enabled: false}, + SnowflakeSessionIDCount: MetricSettings{Enabled: false}, + SnowflakeStorageFailsafeBytesTotal: MetricSettings{Enabled: false}, + SnowflakeStorageStageBytesTotal: MetricSettings{Enabled: false}, + SnowflakeStorageStorageBytesTotal: MetricSettings{Enabled: false}, + SnowflakeTotalElapsedTimeAvg: MetricSettings{Enabled: false}, + SnowflakeTotalElapsedTimeTotal: MetricSettings{Enabled: false}, + } + mb := NewMetricsBuilder(settings, component.BuildInfo{}, WithStartTime(start)) + mb.RecordSnowflakeBillingCloudServiceTotalDataPoint(ts, 1, "attr-val") + mb.RecordSnowflakeBillingTotalCreditTotalDataPoint(ts, 1, "attr-val") + mb.RecordSnowflakeBillingVirtualWarehouseTotalDataPoint(ts, 1, "attr-val") + mb.RecordSnowflakeBillingWarehouseCloudServiceTotalDataPoint(ts, 1, "attr-val") + mb.RecordSnowflakeBillingWarehouseTotalCreditTotalDataPoint(ts, 1, "attr-val") + mb.RecordSnowflakeBillingWarehouseVirtualWarehouseTotalDataPoint(ts, 1, "attr-val") + mb.RecordSnowflakeDatabaseBytesScannedAvgDataPoint(ts, 1, "attr-val", "attr-val", "attr-val", "attr-val", "attr-val", "attr-val", "attr-val") + mb.RecordSnowflakeDatabaseQueryCountDataPoint(ts, 1, "attr-val", "attr-val", "attr-val", "attr-val", "attr-val", "attr-val", "attr-val") + mb.RecordSnowflakeLoginsTotalDataPoint(ts, 1, "attr-val", "attr-val", "attr-val") + mb.RecordSnowflakePipeCreditsUsedTotalDataPoint(ts, 1, "attr-val") + mb.RecordSnowflakeQueryBlockedDataPoint(ts, 1, "attr-val") + mb.RecordSnowflakeQueryBytesDeletedTotalDataPoint(ts, 1, "attr-val", "attr-val", "attr-val", "attr-val", "attr-val", "attr-val", "attr-val") + mb.RecordSnowflakeQueryBytesScannedTotalDataPoint(ts, 1, "attr-val", "attr-val", "attr-val", "attr-val", "attr-val", "attr-val", "attr-val") + mb.RecordSnowflakeQueryBytesSpilledLocalTotalDataPoint(ts, 1, "attr-val", "attr-val", "attr-val", "attr-val", "attr-val", "attr-val", "attr-val") + mb.RecordSnowflakeQueryBytesSpilledRemoteTotalDataPoint(ts, 1, "attr-val", "attr-val", "attr-val", "attr-val", "attr-val", "attr-val", "attr-val") + mb.RecordSnowflakeQueryBytesWrittenTotalDataPoint(ts, 1, "attr-val", "attr-val", "attr-val", "attr-val", "attr-val", "attr-val", "attr-val") + mb.RecordSnowflakeQueryCompilationTimeTotalDataPoint(ts, 1, "attr-val", "attr-val", "attr-val", "attr-val", "attr-val", "attr-val", "attr-val") + mb.RecordSnowflakeQueryDataScannedCacheAvgDataPoint(ts, 1, "attr-val", "attr-val", "attr-val", "attr-val", "attr-val", "attr-val", "attr-val") + mb.RecordSnowflakeQueryExecutedDataPoint(ts, 1, "attr-val") + mb.RecordSnowflakeQueryExecutionTimeTotalDataPoint(ts, 1, "attr-val", "attr-val", "attr-val", "attr-val", "attr-val", "attr-val", "attr-val") + mb.RecordSnowflakeQueryPartitionsScannedTotalDataPoint(ts, 1, "attr-val", "attr-val", "attr-val", "attr-val", "attr-val", "attr-val", "attr-val") + mb.RecordSnowflakeQueryQueuedOverloadDataPoint(ts, 1, "attr-val") + mb.RecordSnowflakeQueryQueuedProvisionDataPoint(ts, 1, "attr-val") + mb.RecordSnowflakeQueuedOverloadTimeAvgDataPoint(ts, 1, "attr-val", "attr-val", "attr-val", "attr-val", "attr-val", "attr-val", "attr-val") + mb.RecordSnowflakeQueuedOverloadTimeTotalDataPoint(ts, 1, "attr-val", "attr-val", "attr-val", "attr-val", "attr-val", "attr-val", "attr-val") + mb.RecordSnowflakeQueuedProvisioningTimeAvgDataPoint(ts, 1, "attr-val", "attr-val", "attr-val", "attr-val", "attr-val", "attr-val", "attr-val") + mb.RecordSnowflakeQueuedProvisioningTimeTotalDataPoint(ts, 1, "attr-val", "attr-val", "attr-val", "attr-val", "attr-val", "attr-val", "attr-val") + mb.RecordSnowflakeQueuedRepairTimeAvgDataPoint(ts, 1, "attr-val", "attr-val", "attr-val", "attr-val", "attr-val", "attr-val", "attr-val") + mb.RecordSnowflakeQueuedRepairTimeTotalDataPoint(ts, 1, "attr-val", "attr-val", "attr-val", "attr-val", "attr-val", "attr-val", "attr-val") + mb.RecordSnowflakeRowsDeletedTotalDataPoint(ts, 1, "attr-val", "attr-val", "attr-val", "attr-val", "attr-val", "attr-val", "attr-val") + mb.RecordSnowflakeRowsInsertedTotalDataPoint(ts, 1, "attr-val", "attr-val", "attr-val", "attr-val", "attr-val", "attr-val", "attr-val") + mb.RecordSnowflakeRowsProducedTotalDataPoint(ts, 1, "attr-val", "attr-val", "attr-val", "attr-val", "attr-val", "attr-val", "attr-val") + mb.RecordSnowflakeRowsUnloadedTotalDataPoint(ts, 1, "attr-val", "attr-val", "attr-val", "attr-val", "attr-val", "attr-val", "attr-val") + mb.RecordSnowflakeRowsUpdatedTotalDataPoint(ts, 1, "attr-val", "attr-val", "attr-val", "attr-val", "attr-val", "attr-val", "attr-val") + mb.RecordSnowflakeSessionIDCountDataPoint(ts, 1, "attr-val") + mb.RecordSnowflakeStorageFailsafeBytesTotalDataPoint(ts, 1) + mb.RecordSnowflakeStorageStageBytesTotalDataPoint(ts, 1) + mb.RecordSnowflakeStorageStorageBytesTotalDataPoint(ts, 1) + mb.RecordSnowflakeTotalElapsedTimeAvgDataPoint(ts, 1, "attr-val", "attr-val", "attr-val", "attr-val", "attr-val", "attr-val", "attr-val") + mb.RecordSnowflakeTotalElapsedTimeTotalDataPoint(ts, 1, "attr-val", "attr-val", "attr-val", "attr-val", "attr-val", "attr-val", "attr-val") + + metrics := mb.Emit() + + assert.Equal(t, 0, metrics.ResourceMetrics().Len()) +} diff --git a/receiver/snowflakereceiver/metadata.yaml b/receiver/snowflakereceiver/metadata.yaml index 8b137891791f..eaef71d6ce60 100644 --- a/receiver/snowflakereceiver/metadata.yaml +++ b/receiver/snowflakereceiver/metadata.yaml @@ -1 +1,306 @@ +name: snowflakereceiver +# every meter will have these attributes +resource_attributes: + snowflake.account.name: + description: Snowflake account being used by receiver. + type: string + +attributes: + service_type: + description: Service type associateed with metric query + type: string + error_message: + description: Error message reported by query if present + type: string + reported_client_type: + description: Client type used for attempt + type: string + is_success: + description: Login status (success or failure). + type: string + schema_name: + description: Name of schema associated with query result. + type: string + execution_status: + description: Execution status of query being reported. + type: string + query_type: + description: Type of query performed. + type: string + database_name: + description: Name of database being queried (default is snowflake). + type: string + warehouse_size: + description: Size of warehouse being reported on. + type: string + pipe_name: + description: Name of snowpipe. + type: string + warehouse_name: + description: Name of warehouse in query being reported on. + type: string + user_name: + description: Username in query being reported. + type: string + + +# sql query associated with each group of metrics included +metrics: + # Billing Metrics + snowflake.billing.cloud_service.total: + description: Reported total credits used in the cloud service over the last 24 hour window. + unit: "{credits}" + gauge: + value_type: double + enabled: false + attributes: [service_type] + snowflake.billing.total_credit.total: + description: Reported total credits used across account over the last 24 hour window. + unit: "{credits}" + gauge: + value_type: double + enabled: false + attributes: [service_type] + snowflake.billing.virtual_warehouse.total: + description: Reported total credits used by virtual warehouse service over the last 24 hour window. + unit: "{credits}" + attributes: [service_type] + gauge: + value_type: double + enabled: false + + # Warehouse Billing Metrics + snowflake.billing.warehouse.cloud_service.total: + description: Credits used across cloud service for given warehouse over the last 24 hour window. + unit: "{credits}" + gauge: + value_type: double + enabled: false + attributes: [warehouse_name] + snowflake.billing.warehouse.total_credit.total: + description: Total credits used associated with given warehouse over the last 24 hour window. + unit: "{credits}" + gauge: + value_type: double + enabled: false + attributes: [warehouse_name] + snowflake.billing.warehouse.virtual_warehouse.total: + description: Total credits used by virtual warehouse service for given warehouse over the last 24 hour window. + unit: "{credits}" + gauge: + value_type: double + enabled: false + attributes: [warehouse_name] + + # Login (Security) metrics + snowflake.logins.total: + description: Total login attempts for account over the last 24 hour window. + unit: 1 + gauge: + value_type: int + enabled: false + attributes: [error_message, reported_client_type, is_success] + + # High level low dimensionality query metrics + snowflake.query.blocked: + description: Blocked query count for warehouse over the last 24 hour window. + unit: 1 + gauge: + value_type: double + enabled: true + attributes: [warehouse_name] + snowflake.query.executed: + description: Executed query count for warehouse over the last 24 hour window. + unit: 1 + gauge: + value_type: double + enabled: true + attributes: [warehouse_name] + snowflake.query.queued_overload: + description: Overloaded query count for warehouse over the last 24 hour window. + unit: 1 + gauge: + value_type: double + enabled: true + attributes: [warehouse_name] + snowflake.query.queued_provision: + description: Number of compute resources queued for provisioning over the last 24 hour window. + unit: 1 + gauge: + value_type: double + enabled: true + attributes: [warehouse_name] + + # DB metrics + snowflake.database.query.count: + description: Total query count for database over the last 24 hour window. + unit: 1 + gauge: + value_type: int + enabled: true + attributes: [schema_name, execution_status, error_message, query_type, warehouse_name, database_name, warehouse_size] + snowflake.database.bytes_scanned.avg: + description: Average bytes scanned in a database over the last 24 hour window. + unit: By + gauge: + value_type: double + enabled: true + attributes: [schema_name, execution_status, error_message, query_type, warehouse_name, database_name, warehouse_size] + snowflake.query.bytes_deleted.avg: + description: Average bytes deleted in database over the last 24 hour window. + unit: By + gauge: + value_type: int + enabled: true + attributes: [schema_name, execution_status, error_message, query_type, warehouse_name, database_name, warehouse_size] + snowflake.query.bytes_spilled.local.avg: + description: Avergae bytes spilled (intermediate results do not fit in memory) by local storage over the last 24 hour window. + unit: By + gauge: + value_type: int + enabled: false + attributes: [schema_name, execution_status, error_message, query_type, warehouse_name, database_name, warehouse_size] + snowflake.query.bytes_spilled.remote.avg: + description: Avergae bytes spilled (intermediate results do not fit in memory) by remote storage over the last 24 hour window. + unit: By + gauge: + value_type: int + enabled: false + attributes: [schema_name, execution_status, error_message, query_type, warehouse_name, database_name, warehouse_size] + snowflake.query.bytes_written.avg: + description: Average bytes written by database over the last 24 hour window. + unit: By + gauge: + value_type: int + enabled: true + attributes: [schema_name, execution_status, error_message, query_type, warehouse_name, database_name, warehouse_size] + snowflake.query.compilation_time.avg: + description: Average time taken to compile query over the last 24 hour window. + unit: s + gauge: + value_type: double + enabled: true + attributes: [schema_name, execution_status, error_message, query_type, warehouse_name, database_name, warehouse_size] + snowflake.query.data_scanned_cache.avg: + description: Average percentage of data scanned from cache over the last 24 hour window. + unit: 1 + gauge: + value_type: double + enabled: false + attributes: [schema_name, execution_status, error_message, query_type, warehouse_name, database_name, warehouse_size] + snowflake.query.execution_time.avg: + description: Average time spent executing queries in database over the last 24 hour window. + unit: s + gauge: + value_type: double + enabled: true + attributes: [schema_name, execution_status, error_message, query_type, warehouse_name, database_name, warehouse_size] + snowflake.query.partitions_scanned.avg: + description: Number of partitions scanned during query so far over the last 24 hour window. + unit: 1 + gauge: + value_type: int + enabled: false + attributes: [schema_name, execution_status, error_message, query_type, warehouse_name, database_name, warehouse_size] + snowflake.queued_overload_time.avg: + description: Average time spent in warehouse queue due to warehouse being overloaded over the last 24 hour window. + unit: s + gauge: + value_type: double + enabled: true + attributes: [schema_name, execution_status, error_message, query_type, warehouse_name, database_name, warehouse_size] + snowflake.queued_provisioning_time.avg: + description: Average time spent in warehouse queue waiting for resources to provision over the last 24 hour window. + unit: s + gauge: + value_type: double + enabled: true + attributes: [schema_name, execution_status, error_message, query_type, warehouse_name, database_name, warehouse_size] + snowflake.queued_repair_time.avg: + description: Average time spent in warehouse queue waiting for compute resources to be repaired over the last 24 hour window. + unit: s + gauge: + value_type: double + enabled: true + attributes: [schema_name, execution_status, error_message, query_type, warehouse_name, database_name, warehouse_size] + snowflake.rows_inserted.avg: + description: Number of rows inserted into a table (or tables) over the last 24 hour window. + unit: "{rows}" + gauge: + value_type: int + enabled: false + attributes: [schema_name, execution_status, error_message, query_type, warehouse_name, database_name, warehouse_size] + snowflake.rows_deleted.avg: + description: Number of rows deleted from a table (or tables) over the last 24 hour window. + unit: "{rows}" + gauge: + value_type: int + enabled: false + attributes: [schema_name, execution_status, error_message, query_type, warehouse_name, database_name, warehouse_size] + snowflake.rows_produced.avg: + description: Average number of rows produced by statement over the last 24 hour window. + unit: "{rows}" + gauge: + value_type: int + enabled: false + attributes: [schema_name, execution_status, error_message, query_type, warehouse_name, database_name, warehouse_size] + snowflake.rows_unloaded.avg: + description: Average number of rows unloaded during data export over the last 24 hour window. + unit: "{rows}" + gauge: + value_type: int + enabled: false + attributes: [schema_name, execution_status, error_message, query_type, warehouse_name, database_name, warehouse_size] + snowflake.rows_updated.avg: + description: Average number of rows updated in a table over the last 24 hour window. + unit: "{rows}" + gauge: + value_type: int + enabled: false + attributes: [schema_name, execution_status, error_message, query_type, warehouse_name, database_name, warehouse_size] + snowflake.total_elapsed_time.avg: + description: Average elapsed time over the last 24 hour window. + unit: s + gauge: + value_type: double + enabled: true + attributes: [schema_name, execution_status, error_message, query_type, warehouse_name, database_name, warehouse_size] + + # Session metric + snowflake.session_id.count: + description: Distinct session id's associated with snowflake username over the last 24 hour window. + unit: 1 + gauge: + value_type: int + enabled: false + attributes: [user_name] + + # Snowpipe usage metrics + snowflake.pipe.credits_used.total: + description: Snow pipe credits contotaled over the last 24 hour window. + unit: "{credits}" + gauge: + value_type: double + enabled: false + attributes: [pipe_name] + + # Storage usage metrics + snowflake.storage.storage_bytes.total: + description: Number of bytes of table storage used, including bytes for data currently in Time Travel. + unit: By + gauge: + value_type: int + enabled: true + snowflake.storage.stage_bytes.total: + description: Number of bytes of stage storage used by files in all internal stages (named, table, user). + unit: By + gauge: + value_type: int + enabled: true + snowflake.storage.failsafe_bytes.total: + description: Number of bytes of data in Fail-safe. + unit: By + gauge: + value_type: int + enabled: false