diff --git a/CHANGELOG.md b/CHANGELOG.md index bea2a5f468d..006c6eaaa5b 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -8,6 +8,22 @@ * [CHANGE] Store-gateway: When a query hits `max_fetched_chunks_per_query` and `max_fetched_series_per_query` limits, an error with the status code `422` is created and returned. #4056 * [CHANGE] Packaging: Migrate FPM packaging solution to NFPM. Rationalize packages dependencies and add package for all binaries. #3911 * [CHANGE] Store-gateway: Deprecate flag `-blocks-storage.bucket-store.chunks-cache.subrange-size` since there's no benefit to changing the default of `16000`. #4135 +* [CHANGE] Experimental support for ephemeral storage introduced in Mimir 2.6.0 has been removed. Following options are no longer available: #4252 + * `-blocks-storage.ephemeral-tsdb.*` + * `-distributor.ephemeral-series-enabled` + * `-distributor.ephemeral-series-matchers` + * `-ingester.max-ephemeral-series-per-user` + * `-ingester.instance-limits.max-ephemeral-series` +Querying with using `{__mimir_storage__="ephemeral"}` selector no longer works. All label values with `ephemeral-` prefix in `reason` label of `cortex_discarded_samples_total` metric are no longer available. Following metrics have been removed: + * `cortex_ingester_ephemeral_series` + * `cortex_ingester_ephemeral_series_created_total` + * `cortex_ingester_ephemeral_series_removed_total` + * `cortex_ingester_ingested_ephemeral_samples_total` + * `cortex_ingester_ingested_ephemeral_samples_failures_total` + * `cortex_ingester_memory_ephemeral_users` + * `cortex_ingester_queries_ephemeral_total` + * `cortex_ingester_queried_ephemeral_samples` + * `cortex_ingester_queried_ephemeral_series` * [FEATURE] Ruler: added `keep_firing_for` support to alerting rules. #4099 * [FEATURE] Distributor, ingester: ingestion of native histograms. The new per-tenant limit `-ingester.native-histograms-ingestion-enabled` controls whether native histograms are stored or ignored. #4159 * [FEATURE] Query-frontend: Introduce experimental `-query-frontend.query-sharding-target-series-per-shard` to allow query sharding to take into account cardinality of similar requests executed previously. #4121 #4177 #4188 diff --git a/cmd/mimir/config-descriptor.json b/cmd/mimir/config-descriptor.json index 5cd85d74a78..57ecae88d15 100644 --- a/cmd/mimir/config-descriptor.json +++ b/cmd/mimir/config-descriptor.json @@ -1695,17 +1695,6 @@ ], "fieldValue": null, "fieldDefaultValue": null - }, - { - "kind": "field", - "name": "ephemeral_series_enabled", - "required": false, - "desc": "Enable marking series as ephemeral based on the given matchers in the runtime config.", - "fieldValue": null, - "fieldDefaultValue": false, - "fieldFlag": "distributor.ephemeral-series-enabled", - "fieldType": "boolean", - "fieldCategory": "experimental" } ], "fieldValue": null, @@ -2780,17 +2769,6 @@ "fieldType": "int", "fieldCategory": "advanced" }, - { - "kind": "field", - "name": "max_ephemeral_series", - "required": false, - "desc": "Max ephemeral series that this ingester can hold (across all tenants). Requests to create additional ephemeral series will be rejected. 0 = unlimited.", - "fieldValue": null, - "fieldDefaultValue": 0, - "fieldFlag": "ingester.instance-limits.max-ephemeral-series", - "fieldType": "int", - "fieldCategory": "experimental" - }, { "kind": "field", "name": "max_inflight_push_requests", @@ -3043,17 +3021,6 @@ "fieldFlag": "ingester.max-global-series-per-metric", "fieldType": "int" }, - { - "kind": "field", - "name": "max_ephemeral_series_per_user", - "required": false, - "desc": "The maximum number of in-memory ephemeral series per tenant, across the cluster before replication. 0 to disable ephemeral storage.", - "fieldValue": null, - "fieldDefaultValue": 0, - "fieldFlag": "ingester.max-ephemeral-series-per-user", - "fieldType": "int", - "fieldCategory": "experimental" - }, { "kind": "field", "name": "max_global_metadata_per_user", @@ -3587,17 +3554,6 @@ "fieldValue": null, "fieldDefaultValue": {}, "fieldType": "map of string to validation.ForwardingRule" - }, - { - "kind": "field", - "name": "ephemeral_series_matchers", - "required": false, - "desc": "Lists of series matchers prefixed by the source. The source must be one of any, api, rule. If an incoming sample matches at least one of the matchers with its source it gets marked as ephemeral. The format of the value looks like: api:{namespace=\"dev\"};rule:{host=\"server1\",namespace=\"prod\"}", - "fieldValue": null, - "fieldDefaultValue": {}, - "fieldFlag": "distributor.ephemeral-series-matchers", - "fieldType": "map of source name (string) to series matchers ([]string)", - "fieldCategory": "experimental" } ], "fieldValue": null, @@ -6078,104 +6034,6 @@ ], "fieldValue": null, "fieldDefaultValue": null - }, - { - "kind": "block", - "name": "ephemeral_tsdb", - "required": false, - "desc": "", - "blockEntries": [ - { - "kind": "field", - "name": "retention_period", - "required": false, - "desc": "Retention of ephemeral series.", - "fieldValue": null, - "fieldDefaultValue": 600000000000, - "fieldFlag": "blocks-storage.ephemeral-tsdb.retention-period", - "fieldType": "duration", - "fieldCategory": "experimental" - }, - { - "kind": "field", - "name": "head_chunks_write_buffer_size_bytes", - "required": false, - "desc": "The write buffer size used by the head chunks mapper. Lower values reduce memory utilisation on clusters with a large number of tenants at the cost of increased disk I/O operations.", - "fieldValue": null, - "fieldDefaultValue": 4194304, - "fieldFlag": "blocks-storage.ephemeral-tsdb.head-chunks-write-buffer-size-bytes", - "fieldType": "int", - "fieldCategory": "experimental" - }, - { - "kind": "field", - "name": "head_chunks_end_time_variance", - "required": false, - "desc": "How much variance (as percentage between 0 and 1) should be applied to the chunk end time, to spread chunks writing across time. Doesn't apply to the last chunk of the chunk range. 0 means no variance.", - "fieldValue": null, - "fieldDefaultValue": 0, - "fieldFlag": "blocks-storage.ephemeral-tsdb.head-chunks-end-time-variance", - "fieldType": "float", - "fieldCategory": "experimental" - }, - { - "kind": "field", - "name": "stripe_size", - "required": false, - "desc": "The number of shards of series to use in TSDB (must be a power of 2). Reducing this will decrease memory footprint, but can negatively impact performance.", - "fieldValue": null, - "fieldDefaultValue": 16384, - "fieldFlag": "blocks-storage.ephemeral-tsdb.stripe-size", - "fieldType": "int", - "fieldCategory": "experimental" - }, - { - "kind": "field", - "name": "head_chunks_write_queue_size", - "required": false, - "desc": "The size of the write queue used by the head chunks mapper. Lower values reduce memory utilisation at the cost of potentially higher ingest latency. Value of 0 switches chunks mapper to implementation without a queue.", - "fieldValue": null, - "fieldDefaultValue": 1000000, - "fieldFlag": "blocks-storage.ephemeral-tsdb.head-chunks-write-queue-size", - "fieldType": "int", - "fieldCategory": "experimental" - }, - { - "kind": "field", - "name": "head_postings_for_matchers_cache_ttl", - "required": false, - "desc": "How long to cache postings for matchers in the Head and OOOHead. 0 disables the cache and just deduplicates the in-flight calls.", - "fieldValue": null, - "fieldDefaultValue": 10000000000, - "fieldFlag": "blocks-storage.ephemeral-tsdb.head-postings-for-matchers-cache-ttl", - "fieldType": "duration", - "fieldCategory": "experimental" - }, - { - "kind": "field", - "name": "head_postings_for_matchers_cache_size", - "required": false, - "desc": "Maximum number of entries in the cache for postings for matchers in the Head and OOOHead when ttl \u003e 0.", - "fieldValue": null, - "fieldDefaultValue": 100, - "fieldFlag": "blocks-storage.ephemeral-tsdb.head-postings-for-matchers-cache-size", - "fieldType": "int", - "fieldCategory": "experimental" - }, - { - "kind": "field", - "name": "head_postings_for_matchers_cache_force", - "required": false, - "desc": "Force the cache to be used for postings for matchers in the Head and OOOHead, even if it's not a concurrent (query-sharding) call.", - "fieldValue": null, - "fieldDefaultValue": false, - "fieldFlag": "blocks-storage.ephemeral-tsdb.head-postings-for-matchers-cache-force", - "fieldType": "boolean", - "fieldCategory": "experimental" - } - ], - "fieldValue": null, - "fieldDefaultValue": null } ], "fieldValue": null, diff --git a/cmd/mimir/help-all.txt.tmpl b/cmd/mimir/help-all.txt.tmpl index 58aeda78847..8550201a89f 100644 --- a/cmd/mimir/help-all.txt.tmpl +++ b/cmd/mimir/help-all.txt.tmpl @@ -403,22 +403,6 @@ Usage of ./cmd/mimir/mimir: How frequently to scan the bucket, or to refresh the bucket index (if enabled), in order to look for changes (new blocks shipped by ingesters and blocks deleted by retention or compaction). (default 15m0s) -blocks-storage.bucket-store.tenant-sync-concurrency int Maximum number of concurrent tenants synching blocks. (default 10) - -blocks-storage.ephemeral-tsdb.head-chunks-end-time-variance float - [experimental] How much variance (as percentage between 0 and 1) should be applied to the chunk end time, to spread chunks writing across time. Doesn't apply to the last chunk of the chunk range. 0 means no variance. - -blocks-storage.ephemeral-tsdb.head-chunks-write-buffer-size-bytes int - [experimental] The write buffer size used by the head chunks mapper. Lower values reduce memory utilisation on clusters with a large number of tenants at the cost of increased disk I/O operations. (default 4194304) - -blocks-storage.ephemeral-tsdb.head-chunks-write-queue-size int - [experimental] The size of the write queue used by the head chunks mapper. Lower values reduce memory utilisation at the cost of potentially higher ingest latency. Value of 0 switches chunks mapper to implementation without a queue. (default 1000000) - -blocks-storage.ephemeral-tsdb.head-postings-for-matchers-cache-force - [experimental] Force the cache to be used for postings for matchers in the Head and OOOHead, even if it's not a concurrent (query-sharding) call. - -blocks-storage.ephemeral-tsdb.head-postings-for-matchers-cache-size int - [experimental] Maximum number of entries in the cache for postings for matchers in the Head and OOOHead when ttl > 0. (default 100) - -blocks-storage.ephemeral-tsdb.head-postings-for-matchers-cache-ttl duration - [experimental] How long to cache postings for matchers in the Head and OOOHead. 0 disables the cache and just deduplicates the in-flight calls. (default 10s) - -blocks-storage.ephemeral-tsdb.retention-period duration - [experimental] Retention of ephemeral series. (default 10m0s) - -blocks-storage.ephemeral-tsdb.stripe-size int - [experimental] The number of shards of series to use in TSDB (must be a power of 2). Reducing this will decrease memory footprint, but can negatively impact performance. (default 16384) -blocks-storage.filesystem.dir string Local filesystem storage directory. (default "blocks") -blocks-storage.gcs.bucket-name string @@ -769,10 +753,6 @@ Usage of ./cmd/mimir/mimir: How frequently to clean up clients for ingesters that have gone away. (default 15s) -distributor.drop-label string This flag can be used to specify label names that to drop during sample ingestion within the distributor and can be repeated in order to drop multiple labels. - -distributor.ephemeral-series-enabled - [experimental] Enable marking series as ephemeral based on the given matchers in the runtime config. - -distributor.ephemeral-series-matchers value - Lists of series matchers prefixed by the source. The source must be one of any, api, rule. If an incoming sample matches at least one of the matchers with its source it gets marked as ephemeral. The format of the value looks like: api:{namespace="dev"};rule:{host="server1",namespace="prod"} -distributor.forwarding.enabled [experimental] Enables the feature to forward certain metrics in remote_write requests, depending on defined rules. -distributor.forwarding.grpc-client.backoff-max-period duration @@ -1025,8 +1005,6 @@ Usage of ./cmd/mimir/mimir: Override the expected name on the server certificate. -ingester.ignore-series-limit-for-metric-names string Comma-separated list of metric names, for which the -ingester.max-global-series-per-metric limit will be ignored. Does not affect the -ingester.max-global-series-per-user limit. - -ingester.instance-limits.max-ephemeral-series int - [experimental] Max ephemeral series that this ingester can hold (across all tenants). Requests to create additional ephemeral series will be rejected. 0 = unlimited. -ingester.instance-limits.max-inflight-push-requests int Max inflight push requests that this ingester can handle (across all tenants). Additional requests will be rejected. 0 = unlimited. (default 30000) -ingester.instance-limits.max-ingestion-rate float @@ -1035,8 +1013,6 @@ Usage of ./cmd/mimir/mimir: Max series that this ingester can hold (across all tenants). Requests to create additional series will be rejected. 0 = unlimited. -ingester.instance-limits.max-tenants int Max tenants that this ingester can hold. Requests from additional tenants will be rejected. 0 = unlimited. - -ingester.max-ephemeral-series-per-user int - [experimental] The maximum number of in-memory ephemeral series per tenant, across the cluster before replication. 0 to disable ephemeral storage. -ingester.max-global-exemplars-per-user int [experimental] The maximum number of exemplars in memory, across the cluster. 0 to disable exemplars ingestion. -ingester.max-global-metadata-per-metric int diff --git a/cmd/mimir/help.txt.tmpl b/cmd/mimir/help.txt.tmpl index 998244b8dee..0d2aa6071c9 100644 --- a/cmd/mimir/help.txt.tmpl +++ b/cmd/mimir/help.txt.tmpl @@ -287,8 +287,6 @@ Usage of ./cmd/mimir/mimir: Expands ${var} or $var in config according to the values of the environment variables. -config.file value Configuration file to load. - -distributor.ephemeral-series-matchers value - Lists of series matchers prefixed by the source. The source must be one of any, api, rule. If an incoming sample matches at least one of the matchers with its source it gets marked as ephemeral. The format of the value looks like: api:{namespace="dev"};rule:{host="server1",namespace="prod"} -distributor.ha-tracker.cluster string Prometheus label to look for in samples to identify a Prometheus HA cluster. (default "cluster") -distributor.ha-tracker.consul.hostname string diff --git a/docs/sources/mimir/operators-guide/configure/about-versioning.md b/docs/sources/mimir/operators-guide/configure/about-versioning.md index a897a4ae4f9..0c2b3f7ddf1 100644 --- a/docs/sources/mimir/operators-guide/configure/about-versioning.md +++ b/docs/sources/mimir/operators-guide/configure/about-versioning.md @@ -55,9 +55,6 @@ The following features are currently experimental: - Distributor - Metrics relabeling - OTLP ingestion path - - Marking of series for ephemeral storage - - `-distributor.ephemeral-series-enabled` - - `-distributor.ephemeral-series-matchers` - Hash ring - Disabling ring heartbeat timeouts - `-distributor.ring.heartbeat-timeout=0` @@ -85,11 +82,6 @@ The following features are currently experimental: - `-blocks-storage.tsdb.head-postings-for-matchers-cache-ttl` - `-blocks-storage.tsdb.head-postings-for-matchers-cache-size` - `-blocks-storage.tsdb.head-postings-for-matchers-cache-force` - - Support for ephemeral storage: - - `-ingester.max-ephemeral-series-per-user` - - `-ingester.instance-limits.max-ephemeral-series` - - Use of `__mimir_storage__` label matcher. - - All `-blocks-storage.ephemeral-tsdb.*` options. - Query-frontend - `-query-frontend.querier-forget-delay` - Instant query splitting (`-query-frontend.split-instant-queries-by-interval`) diff --git a/docs/sources/mimir/operators-guide/mimir-runbooks/_index.md b/docs/sources/mimir/operators-guide/mimir-runbooks/_index.md index bbc2fad96b0..46b036722f5 100644 --- a/docs/sources/mimir/operators-guide/mimir-runbooks/_index.md +++ b/docs/sources/mimir/operators-guide/mimir-runbooks/_index.md @@ -1391,21 +1391,6 @@ How to **fix** it: - See [`MimirIngesterReachingSeriesLimit`](#MimirIngesterReachingSeriesLimit) runbook. -### err-mimir-ingester-max-ephemeral-series - -This critical error occurs when an ingester rejects a write request because it reached the maximum number of ephemeral series. - -How it **works**: - -- The ingester keeps all ephemeral series in memory. -- The ingester has a per-instance limit on the number of ephemeral series, used to protect the ingester from overloading in case of high traffic. -- When the limit on the number of ephemeral series is reached, new ephemeral series are rejected, while samples can still be appended to existing ones. -- To configure the limit, set the `-ingester.instance-limits.max-ephemeral-series` option (or `max_ephemeral_series` in the runtime config). - -How to **fix** it: - -- Increase the limit, or reshard the tenants between ingesters. Please see [`MimirIngesterReachingSeriesLimit`](#MimirIngesterReachingSeriesLimit) runbook for more details (it describes persistent storage, but same principles apply to ephemeral storage). - ### err-mimir-ingester-max-inflight-push-requests This error occurs when an ingester rejects a write request because the maximum in-flight requests limit has been reached. @@ -1434,18 +1419,6 @@ How to **fix** it: - Ensure the actual number of series written by the affected tenant is legit. - Consider increasing the per-tenant limit by using the `-ingester.max-global-series-per-user` option (or `max_global_series_per_user` in the runtime configuration). -### err-mimir-max-ephemeral-series-per-user - -This error occurs when the number of ephemeral series for a given tenant exceeds the configured limit. - -The limit is used to protect ingesters from overloading in case a tenant writes a high number of ephemeral series, as well as to protect the whole system’s stability from potential abuse or mistakes. -To configure the limit on a per-tenant basis, use the `-ingester.max-ephemeral-series-per-user` option (or `max_ephemeral_series_per_user` in the runtime configuration). - -How to **fix** it: - -- Ensure the actual number of ephemeral series written by the affected tenant is legit. -- Consider increasing the per-tenant limit by using the `-ingester.max-ephemeral-series-per-user` option (or `max_ephemeral_series_per_user` in the runtime configuration). - ### err-mimir-max-series-per-metric This error occurs when the number of in-memory series for a given tenant and metric name exceeds the configured limit. @@ -1611,14 +1584,6 @@ How it **works**: > **Note:** If the out-of-order sample ingestion is enabled, then this error is similar to `err-mimir-sample-out-of-order` below with a difference that the sample is older than the out-of-order time window as it relates to the latest sample for that particular time series or the TSDB. -### err-mimir-ephemeral-sample-timestamp-too-old - -This error occurs when the ingester rejects a sample because its timestamp older than configured retention of ephemeral storage. - -How it **works**: - -- Ephemeral storage in ingesters can only hold samples that not older than `-blocks-storage.ephemeral-tsdb.retention-period` value. If the incoming timestamp is older than "now - retention", it is rejected. - ### err-mimir-sample-out-of-order This error occurs when the ingester rejects a sample because another sample with a more recent timestamp has already been ingested. @@ -1638,14 +1603,6 @@ Common **causes**: > **Note:** You can learn more about out of order samples in Prometheus, in the blog post [Debugging out of order samples](https://www.robustperception.io/debugging-out-of-order-samples/). -### err-mimir-ephemeral-sample-out-of-order - -This error occurs when the ingester rejects a sample because another sample with a more recent timestamp has already been ingested for the same series in the ephemeral storage. - -Please refer to [err-mimir-sample-out-of-order](#err-mimir-sample-out-of-order) for possible reasons. - -> **Note**: It is not possible to enable out-of-order sample ingestion for ephemeral storage. - ### err-mimir-sample-duplicate-timestamp This error occurs when the ingester rejects a sample because it is a duplicate of a previously received sample with the same timestamp but different value in the same time series. @@ -1655,15 +1612,6 @@ Common **causes**: - Multiple endpoints are exporting the same metrics, or multiple Prometheus instances are scraping different metrics with identical labels. - Prometheus relabelling has been configured and it causes series to clash after the relabelling. Check the error message for information about which series has received a duplicate sample. -### err-mimir-ephemeral-sample-duplicate-timestamp - -This error occurs when the ingester rejects a sample because it is a duplicate of a previously received sample with the same timestamp but different value for the same ephemeral series. - -Common **causes**: - -- Multiple endpoints are exporting the same metrics, or multiple Prometheus instances are scraping different metrics with identical labels. -- Prometheus relabelling has been configured and it causes series to clash after the relabelling. Check the error message for information about which series has received a duplicate sample. - ### err-mimir-exemplar-series-missing This error occurs when the ingester rejects an exemplar because its related series has not been ingested yet. @@ -1719,18 +1667,6 @@ How to **fix** it: - Increase the allowed limit by using the `-distributor.max-recv-msg-size` option. -### err-mimir-ephemeral-storage-not-enabled-for-user - -Ingester returns this error when a write request contains ephemeral series, but ephemeral storage is disabled for user. - -Ephemeral storage is disabled when `-ingester.max-ephemeral-series-per-user` (or corresponding `max_ephemeral_series_per_user` limit in runtime configuration) is set to 0 for given tenant. - -How to **fix** it: - -- Disable support for ephemeral series in distributor by setting `-distributor.ephemeral-series-enabled` to `false`. -- Remove rules for marking incoming series as ephemeral for given tenant by removing `-distributor.ephemeral-series-matchers` (or `ephemeral_series_matchers` in runtime configuration). -- Enable ephemeral storage for tenant by setting the `-ingester.max-ephemeral-series-per-user` (or corresponding `max_ephemeral_series_per_user` limit in runtime configuration) to positive number. - ## Mimir routes by path **Write path**: diff --git a/docs/sources/mimir/reference-configuration-parameters/index.md b/docs/sources/mimir/reference-configuration-parameters/index.md index 54fa4ade97c..3520fc70cec 100644 --- a/docs/sources/mimir/reference-configuration-parameters/index.md +++ b/docs/sources/mimir/reference-configuration-parameters/index.md @@ -749,11 +749,6 @@ forwarding: # The CLI flags prefix for this block configuration is: # distributor.forwarding.grpc-client [grpc_client: ] - -# (experimental) Enable marking series as ephemeral based on the given matchers -# in the runtime config. -# CLI flag: -distributor.ephemeral-series-enabled -[ephemeral_series_enabled: | default = false] ``` ### ingester @@ -935,12 +930,6 @@ instance_limits: # CLI flag: -ingester.instance-limits.max-series [max_series: | default = 0] - # (experimental) Max ephemeral series that this ingester can hold (across all - # tenants). Requests to create additional ephemeral series will be rejected. 0 - # = unlimited. - # CLI flag: -ingester.instance-limits.max-ephemeral-series - [max_ephemeral_series: | default = 0] - # (advanced) Max inflight push requests that this ingester can handle (across # all tenants). Additional requests will be rejected. 0 = unlimited. # CLI flag: -ingester.instance-limits.max-inflight-push-requests @@ -2559,11 +2548,6 @@ The `limits` block configures default and per-tenant limits imposed by component # CLI flag: -ingester.max-global-series-per-metric [max_global_series_per_metric: | default = 0] -# (experimental) The maximum number of in-memory ephemeral series per tenant, -# across the cluster before replication. 0 to disable ephemeral storage. -# CLI flag: -ingester.max-ephemeral-series-per-user -[max_ephemeral_series_per_user: | default = 0] - # The maximum number of in-memory metrics with metadata per tenant, across the # cluster. 0 to disable. # CLI flag: -ingester.max-global-metadata-per-user @@ -2878,13 +2862,6 @@ The `limits` block configures default and per-tenant limits imposed by component # Rules based on which the Distributor decides whether a metric should be # forwarded to an alternative remote_write API endpoint. [forwarding_rules: | default = ] - -# (experimental) Lists of series matchers prefixed by the source. The source -# must be one of any, api, rule. If an incoming sample matches at least one of -# the matchers with its source it gets marked as ephemeral. The format of the -# value looks like: api:{namespace="dev"};rule:{host="server1",namespace="prod"} -# CLI flag: -distributor.ephemeral-series-matchers -[ephemeral_series_matchers: | default = ] ``` ### blocks_storage @@ -3312,51 +3289,6 @@ tsdb: # Head and OOOHead, even if it's not a concurrent (query-sharding) call. # CLI flag: -blocks-storage.tsdb.head-postings-for-matchers-cache-force [head_postings_for_matchers_cache_force: | default = false] - -ephemeral_tsdb: - # (experimental) Retention of ephemeral series. - # CLI flag: -blocks-storage.ephemeral-tsdb.retention-period - [retention_period: | default = 10m] - - # (experimental) The write buffer size used by the head chunks mapper. Lower - # values reduce memory utilisation on clusters with a large number of tenants - # at the cost of increased disk I/O operations. - # CLI flag: -blocks-storage.ephemeral-tsdb.head-chunks-write-buffer-size-bytes - [head_chunks_write_buffer_size_bytes: | default = 4194304] - - # (experimental) How much variance (as percentage between 0 and 1) should be - # applied to the chunk end time, to spread chunks writing across time. Doesn't - # apply to the last chunk of the chunk range. 0 means no variance. - # CLI flag: -blocks-storage.ephemeral-tsdb.head-chunks-end-time-variance - [head_chunks_end_time_variance: | default = 0] - - # (experimental) The number of shards of series to use in TSDB (must be a - # power of 2). Reducing this will decrease memory footprint, but can - # negatively impact performance. - # CLI flag: -blocks-storage.ephemeral-tsdb.stripe-size - [stripe_size: | default = 16384] - - # (experimental) The size of the write queue used by the head chunks mapper. - # Lower values reduce memory utilisation at the cost of potentially higher - # ingest latency. Value of 0 switches chunks mapper to implementation without - # a queue. - # CLI flag: -blocks-storage.ephemeral-tsdb.head-chunks-write-queue-size - [head_chunks_write_queue_size: | default = 1000000] - - # (experimental) How long to cache postings for matchers in the Head and - # OOOHead. 0 disables the cache and just deduplicates the in-flight calls. - # CLI flag: -blocks-storage.ephemeral-tsdb.head-postings-for-matchers-cache-ttl - [head_postings_for_matchers_cache_ttl: | default = 10s] - - # (experimental) Maximum number of entries in the cache for postings for - # matchers in the Head and OOOHead when ttl > 0. - # CLI flag: -blocks-storage.ephemeral-tsdb.head-postings-for-matchers-cache-size - [head_postings_for_matchers_cache_size: | default = 100] - - # (experimental) Force the cache to be used for postings for matchers in the - # Head and OOOHead, even if it's not a concurrent (query-sharding) call. - # CLI flag: -blocks-storage.ephemeral-tsdb.head-postings-for-matchers-cache-force - [head_postings_for_matchers_cache_force: | default = false] ``` ### compactor diff --git a/pkg/distributor/distributor.go b/pkg/distributor/distributor.go index cc653b74ccc..0af8e30e8f2 100644 --- a/pkg/distributor/distributor.go +++ b/pkg/distributor/distributor.go @@ -45,7 +45,6 @@ import ( ingester_client "github.com/grafana/mimir/pkg/ingester/client" "github.com/grafana/mimir/pkg/mimirpb" "github.com/grafana/mimir/pkg/util" - "github.com/grafana/mimir/pkg/util/ephemeral" "github.com/grafana/mimir/pkg/util/globalerror" "github.com/grafana/mimir/pkg/util/httpgrpcutil" util_math "github.com/grafana/mimir/pkg/util/math" @@ -118,8 +117,6 @@ type Distributor struct { inflightPushRequests atomic.Int64 inflightPushRequestsBytes atomic.Int64 - ephemeralCheckerByUser ephemeral.SeriesCheckerByUser - // Metrics queryDuration *instrument.HistogramCollector ingesterChunksDeduplicated prometheus.Counter @@ -181,9 +178,6 @@ type Config struct { // Configuration for forwarding of metrics to alternative ingestion endpoint. Forwarding forwarding.Config - // Enable the experimental feature to mark series as ephemeral. - EphemeralSeriesEnabled bool `yaml:"ephemeral_series_enabled" category:"experimental"` - // This allows downstream projects to wrap the distributor push function // and access the deserialized write requests before/after they are pushed. // These functions will only receive samples that don't get forwarded to an @@ -213,7 +207,6 @@ func (cfg *Config) RegisterFlags(f *flag.FlagSet, logger log.Logger) { f.Float64Var(&cfg.InstanceLimits.MaxIngestionRate, maxIngestionRateFlag, 0, "Max ingestion rate (samples/sec) that this distributor will accept. This limit is per-distributor, not per-tenant. Additional push requests will be rejected. Current ingestion rate is computed as exponentially weighted moving average, updated every second. 0 = unlimited.") f.IntVar(&cfg.InstanceLimits.MaxInflightPushRequests, maxInflightPushRequestsFlag, 2000, "Max inflight push requests that this distributor can handle. This limit is per-distributor, not per-tenant. Additional requests will be rejected. 0 = unlimited.") f.IntVar(&cfg.InstanceLimits.MaxInflightPushRequestsBytes, maxInflightPushRequestsBytesFlag, 0, "The sum of the request sizes in bytes of inflight push requests that this distributor can handle. This limit is per-distributor, not per-tenant. Additional requests will be rejected. 0 = unlimited.") - f.BoolVar(&cfg.EphemeralSeriesEnabled, "distributor.ephemeral-series-enabled", false, "Enable marking series as ephemeral based on the given matchers in the runtime config.") } // Validate config and returns error on failure @@ -237,7 +230,7 @@ const ( ) // New constructs a new Distributor -func New(cfg Config, clientConfig ingester_client.Config, limits *validation.Overrides, activeGroupsCleanupService *util.ActiveGroupsCleanupService, ingestersRing ring.ReadRing, ephemeralChecker ephemeral.SeriesCheckerByUser, canJoinDistributorsRing bool, reg prometheus.Registerer, log log.Logger) (*Distributor, error) { +func New(cfg Config, clientConfig ingester_client.Config, limits *validation.Overrides, activeGroupsCleanupService *util.ActiveGroupsCleanupService, ingestersRing ring.ReadRing, canJoinDistributorsRing bool, reg prometheus.Registerer, log log.Logger) (*Distributor, error) { if cfg.IngesterClientFactory == nil { cfg.IngesterClientFactory = func(addr string) (ring_client.PoolClient, error) { return ingester_client.MakeIngesterClient(addr, clientConfig) @@ -255,15 +248,14 @@ func New(cfg Config, clientConfig ingester_client.Config, limits *validation.Ove subservices = append(subservices, haTracker) d := &Distributor{ - cfg: cfg, - log: log, - ingestersRing: ingestersRing, - ingesterPool: NewPool(cfg.PoolConfig, ingestersRing, cfg.IngesterClientFactory, log), - ephemeralCheckerByUser: ephemeralChecker, - healthyInstancesCount: atomic.NewUint32(0), - limits: limits, - HATracker: haTracker, - ingestionRate: util_math.NewEWMARate(0.2, instanceIngestionRateTickInterval), + cfg: cfg, + log: log, + ingestersRing: ingestersRing, + ingesterPool: NewPool(cfg.PoolConfig, ingestersRing, cfg.IngesterClientFactory, log), + healthyInstancesCount: atomic.NewUint32(0), + limits: limits, + HATracker: haTracker, + ingestionRate: util_math.NewEWMARate(0.2, instanceIngestionRateTickInterval), queryDuration: instrument.NewHistogramCollector(promauto.With(reg).NewHistogramVec(prometheus.HistogramOpts{ Namespace: "cortex", @@ -729,7 +721,6 @@ func (d *Distributor) wrapPushWithMiddlewares(next push.Func) push.Func { middlewares = append(middlewares, d.prePushRelabelMiddleware) middlewares = append(middlewares, d.prePushValidationMiddleware) middlewares = append(middlewares, d.prePushForwardingMiddleware) - middlewares = append(middlewares, d.prePushEphemeralMiddleware) middlewares = append(middlewares, d.cfg.PushWrappers...) for ix := len(middlewares) - 1; ix >= 0; ix-- { @@ -1060,61 +1051,6 @@ func (d *Distributor) prePushForwardingMiddleware(next push.Func) push.Func { } } -// prePushEphemeralMiddleware is used as push.Func middleware in front of push method. -// If marking series as ephemeral is enabled, this middleware uses the ephemeral series -// provider to determine whether a time series should be marked as ephemeral. -func (d *Distributor) prePushEphemeralMiddleware(next push.Func) push.Func { - if !d.cfg.EphemeralSeriesEnabled { - return next - } - - return func(ctx context.Context, pushReq *push.Request) (*mimirpb.WriteResponse, error) { - userID, err := tenant.TenantID(ctx) - if err != nil { - return nil, err - } - - req, err := pushReq.WriteRequest() - if err != nil { - return nil, err - } - - // Ensure that only time series which shall be marked as ephemeral according to the ephemeral checker get - // ingested into the ephemeral store. - req.EphemeralTimeseries = nil - - ephemeralChecker := d.ephemeralCheckerByUser.EphemeralChecker(userID, req.Source) - if ephemeralChecker != nil { - first := true - var deleteTs []int - for ix := 0; ix < len(req.Timeseries); ix++ { - ts := req.Timeseries[ix] - - if !ephemeralChecker.ShouldMarkEphemeral(ts.Labels) { - continue - } - - if first { - req.EphemeralTimeseries = mimirpb.PreallocTimeseriesSliceFromPool() - deleteTs = make([]int, 0, len(req.Timeseries)-ix) - first = false - } - - // Move this series from persistent to ephemeral storage. We don't ingest exemplars for ephemeral series. - mimirpb.ClearExemplars(ts.TimeSeries) - req.EphemeralTimeseries = append(req.EphemeralTimeseries, ts) - deleteTs = append(deleteTs, ix) - } - - if len(deleteTs) > 0 { - req.Timeseries = util.RemoveSliceIndexes(req.Timeseries, deleteTs) - } - } - - return next(ctx, pushReq) - } -} - // metricsMiddleware updates metrics which are expected to account for all received data, // including data that later gets modified or dropped. func (d *Distributor) metricsMiddleware(next push.Func) push.Func { @@ -1242,7 +1178,6 @@ func (d *Distributor) Push(ctx context.Context, req *mimirpb.WriteRequest) (*mim pushReq := push.NewParsedRequest(req) pushReq.AddCleanup(func() { mimirpb.ReuseSlice(req.Timeseries) - mimirpb.ReuseSlice(req.EphemeralTimeseries) }) return d.PushWithMiddlewares(ctx, pushReq) @@ -1272,7 +1207,7 @@ func (d *Distributor) push(ctx context.Context, pushReq *push.Request) (*mimirpb d.updateReceivedMetrics(req, userID) - if len(req.Timeseries) == 0 && len(req.EphemeralTimeseries) == 0 && len(req.Metadata) == 0 { + if len(req.Timeseries) == 0 && len(req.Metadata) == 0 { return &mimirpb.WriteResponse{}, nil } @@ -1282,7 +1217,6 @@ func (d *Distributor) push(ctx context.Context, pushReq *push.Request) (*mimirpb } seriesKeys := d.getTokensForSeries(userID, req.Timeseries) - ephemeralSeriesKeys := d.getTokensForSeries(userID, req.EphemeralTimeseries) metadataKeys := make([]uint32, 0, len(req.Metadata)) for _, m := range req.Metadata { @@ -1302,24 +1236,20 @@ func (d *Distributor) push(ctx context.Context, pushReq *push.Request) (*mimirpb localCtx = opentracing.ContextWithSpan(localCtx, sp) } - // All tokens, stored in order: series, metadata, ephemeral series. - keys := make([]uint32, len(seriesKeys)+len(metadataKeys)+len(ephemeralSeriesKeys)) + // All tokens, stored in order: series, metadata. + keys := make([]uint32, len(seriesKeys)+len(metadataKeys)) initialMetadataIndex := len(seriesKeys) - initialEphemeralIndex := initialMetadataIndex + len(metadataKeys) copy(keys, seriesKeys) copy(keys[initialMetadataIndex:], metadataKeys) - copy(keys[initialEphemeralIndex:], ephemeralSeriesKeys) // we must not re-use buffers now until all DoBatch goroutines have finished, // so set this flag false and pass cleanup() to DoBatch. cleanupInDefer = false err = ring.DoBatch(ctx, ring.WriteNoExtend, subRing, keys, func(ingester ring.InstanceDesc, indexes []int) error { - var timeseriesCount, ephemeralCount, metadataCount int + var timeseriesCount, metadataCount int for _, i := range indexes { - if i >= initialEphemeralIndex { - ephemeralCount++ - } else if i >= initialMetadataIndex { + if i >= initialMetadataIndex { metadataCount++ } else { timeseriesCount++ @@ -1327,20 +1257,17 @@ func (d *Distributor) push(ctx context.Context, pushReq *push.Request) (*mimirpb } timeseries := preallocSliceIfNeeded[mimirpb.PreallocTimeseries](timeseriesCount) - ephemeral := preallocSliceIfNeeded[mimirpb.PreallocTimeseries](ephemeralCount) metadata := preallocSliceIfNeeded[*mimirpb.MetricMetadata](metadataCount) for _, i := range indexes { - if i >= initialEphemeralIndex { - ephemeral = append(ephemeral, req.EphemeralTimeseries[i-initialEphemeralIndex]) - } else if i >= initialMetadataIndex { + if i >= initialMetadataIndex { metadata = append(metadata, req.Metadata[i-initialMetadataIndex]) } else { timeseries = append(timeseries, req.Timeseries[i]) } } - err := d.send(localCtx, ingester, timeseries, ephemeral, metadata, req.Source) + err := d.send(localCtx, ingester, timeseries, metadata, req.Source) if errors.Is(err, context.DeadlineExceeded) { return httpgrpc.Errorf(500, "exceeded configured distributor remote timeout: %s", err.Error()) } @@ -1411,7 +1338,7 @@ func sortLabelsIfNeeded(labels []mimirpb.LabelAdapter) { }) } -func (d *Distributor) send(ctx context.Context, ingester ring.InstanceDesc, timeseries, ephemeral []mimirpb.PreallocTimeseries, metadata []*mimirpb.MetricMetadata, source mimirpb.WriteRequest_SourceEnum) error { +func (d *Distributor) send(ctx context.Context, ingester ring.InstanceDesc, timeseries []mimirpb.PreallocTimeseries, metadata []*mimirpb.MetricMetadata, source mimirpb.WriteRequest_SourceEnum) error { h, err := d.ingesterPool.GetClientFor(ingester.Addr) if err != nil { return err @@ -1419,10 +1346,9 @@ func (d *Distributor) send(ctx context.Context, ingester ring.InstanceDesc, time c := h.(ingester_client.IngesterClient) req := mimirpb.WriteRequest{ - Timeseries: timeseries, - Metadata: metadata, - Source: source, - EphemeralTimeseries: ephemeral, + Timeseries: timeseries, + Metadata: metadata, + Source: source, } _, err = c.Push(ctx, &req) if resp, ok := httpgrpc.HTTPResponseFromError(err); ok { diff --git a/pkg/distributor/distributor_test.go b/pkg/distributor/distributor_test.go index 02e43dc9a9b..2353136fab1 100644 --- a/pkg/distributor/distributor_test.go +++ b/pkg/distributor/distributor_test.go @@ -53,8 +53,6 @@ import ( "github.com/grafana/mimir/pkg/mimirpb" "github.com/grafana/mimir/pkg/storage/chunk" "github.com/grafana/mimir/pkg/util/chunkcompat" - "github.com/grafana/mimir/pkg/util/ephemeral" - "github.com/grafana/mimir/pkg/util/extract" "github.com/grafana/mimir/pkg/util/globalerror" "github.com/grafana/mimir/pkg/util/limiter" util_math "github.com/grafana/mimir/pkg/util/math" @@ -1887,7 +1885,7 @@ func BenchmarkDistributor_Push(b *testing.B) { require.NoError(b, err) // Start the distributor. - distributor, err := New(distributorCfg, clientConfig, overrides, nil, ingestersRing, nil, true, nil, log.NewNopLogger()) + distributor, err := New(distributorCfg, clientConfig, overrides, nil, ingestersRing, true, nil, log.NewNopLogger()) require.NoError(b, err) require.NoError(b, services.StartAndAwaitRunning(context.Background(), distributor)) @@ -3175,86 +3173,6 @@ func TestRelabelMiddleware(t *testing.T) { } } -func TestMarkEphemeralMiddleware(t *testing.T) { - tenant := "user" - ctx := user.InjectOrgID(context.Background(), tenant) - - type testCase struct { - name string - ephemeralSeries []string - reqs []*mimirpb.WriteRequest - expectedReqs []*mimirpb.WriteRequest - } - testCases := []testCase{ - { - name: "half - half", - ephemeralSeries: []string{ - "metric2", - "metric3", - }, - reqs: []*mimirpb.WriteRequest{makeWriteRequest(1000, 1, 0, false, false, "metric0", "metric1", "metric2", "metric3")}, - expectedReqs: []*mimirpb.WriteRequest{markEphemeral(makeWriteRequest(1000, 1, 0, false, false, "metric0", "metric1", "metric2", "metric3"), 2, 3)}, - }, { - name: "no ephemeral", - ephemeralSeries: []string{ - "metric100", - "metric101", - }, - reqs: []*mimirpb.WriteRequest{makeWriteRequest(1000, 1, 0, false, false, "metric0", "metric1", "metric2", "metric3")}, - expectedReqs: []*mimirpb.WriteRequest{makeWriteRequest(1000, 1, 0, false, false, "metric0", "metric1", "metric2", "metric3")}, - }, { - name: "all ephemeral", - ephemeralSeries: []string{ - "metric0", - "metric1", - "metric2", - "metric3", - }, - reqs: []*mimirpb.WriteRequest{makeWriteRequest(1000, 1, 0, false, false, "metric0", "metric1", "metric2", "metric3")}, - expectedReqs: []*mimirpb.WriteRequest{markEphemeral(makeWriteRequest(1000, 1, 0, false, false, "metric0", "metric1", "metric2", "metric3"), 0, 1, 2, 3)}, - }, - } - - for _, tc := range testCases { - t.Run(tc.name, func(t *testing.T) { - cleanupCallCount := 0 - cleanup := func() { - cleanupCallCount++ - } - - var gotReqs []*mimirpb.WriteRequest - next := func(ctx context.Context, pushReq *push.Request) (*mimirpb.WriteResponse, error) { - req, err := pushReq.WriteRequest() - require.NoError(t, err) - gotReqs = append(gotReqs, req) - pushReq.CleanUp() - return nil, nil - } - - ds, _, _ := prepare(t, prepConfig{ - numDistributors: 1, - markEphemeral: true, - getEphemeralSeriesProvider: func() ephemeral.SeriesCheckerByUser { - memp := &mockEphemeralSeriesProvider{t, tc.ephemeralSeries} - return memp - }, - }) - middleware := ds[0].prePushEphemeralMiddleware(next) - - for _, req := range tc.reqs { - pushReq := push.NewParsedRequest(req) - pushReq.AddCleanup(cleanup) - _, _ = middleware(ctx, pushReq) - } - - assert.Equal(t, tc.expectedReqs, gotReqs) - - // Cleanup must have been called once per request. - assert.Equal(t, len(tc.reqs), cleanupCallCount) - }) - } -} - func TestHaDedupeAndRelabelBeforeForwarding(t *testing.T) { ctx := user.InjectOrgID(context.Background(), "user") const replica1 = "replicaA" @@ -3520,8 +3438,6 @@ type prepConfig struct { labelNamesStreamZonesResponseDelay map[string]time.Duration forwarding bool getForwarder func() forwarding.Forwarder - getEphemeralSeriesProvider func() ephemeral.SeriesCheckerByUser - markEphemeral bool timeOut bool } @@ -3639,10 +3555,6 @@ func prepare(t *testing.T, cfg prepConfig) ([]*Distributor, []mockIngester, []*p distributorCfg.Forwarding.RequestConcurrency = 5 } - if cfg.markEphemeral { - distributorCfg.EphemeralSeriesEnabled = true - } - cfg.limits.IngestionTenantShardSize = cfg.shuffleShardSize if cfg.enableTracker { @@ -3664,13 +3576,8 @@ func prepare(t *testing.T, cfg prepConfig) ([]*Distributor, []mockIngester, []*p overrides, err := validation.NewOverrides(*cfg.limits, nil) require.NoError(t, err) - var ephemeralChecker ephemeral.SeriesCheckerByUser - if cfg.markEphemeral && cfg.getEphemeralSeriesProvider != nil { - ephemeralChecker = cfg.getEphemeralSeriesProvider() - } - reg := prometheus.NewPedanticRegistry() - d, err := New(distributorCfg, clientConfig, overrides, nil, ingestersRing, ephemeralChecker, true, reg, log.NewNopLogger()) + d, err := New(distributorCfg, clientConfig, overrides, nil, ingestersRing, true, reg, log.NewNopLogger()) require.NoError(t, err) if cfg.forwarding && cfg.getForwarder != nil { @@ -3705,16 +3612,6 @@ func stopAll(ds []*Distributor, r *ring.Ring) { r.StopAsync() } -func markEphemeral(req *mimirpb.WriteRequest, indexes ...int) *mimirpb.WriteRequest { - var deletedCount int - for _, idx := range indexes { - req.EphemeralTimeseries = append(req.EphemeralTimeseries, req.Timeseries[idx-deletedCount]) - req.Timeseries = append(req.Timeseries[:idx-deletedCount], req.Timeseries[idx-deletedCount+1:]...) - deletedCount++ - } - return req -} - func makeWriteRequest(startTimestampMs int64, samples, metadata int, exemplars, histograms bool, metrics ...string) *mimirpb.WriteRequest { request := &mimirpb.WriteRequest{} @@ -3965,7 +3862,6 @@ type mockIngester struct { happy bool stats client.UsersStatsResponse timeseries map[uint32]*mimirpb.PreallocTimeseries - ephemeralTimeseries map[uint32]*mimirpb.PreallocTimeseries metadata map[uint32]map[mimirpb.MetricMetadata]struct{} queryDelay time.Duration pushDelay time.Duration @@ -4021,10 +3917,6 @@ func (i *mockIngester) Push(ctx context.Context, req *mimirpb.WriteRequest, opts i.timeseries = map[uint32]*mimirpb.PreallocTimeseries{} } - if len(req.EphemeralTimeseries) > 0 && i.ephemeralTimeseries == nil { - i.ephemeralTimeseries = map[uint32]*mimirpb.PreallocTimeseries{} - } - if i.metadata == nil { i.metadata = map[uint32]map[mimirpb.MetricMetadata]struct{}{} } @@ -4034,23 +3926,9 @@ func (i *mockIngester) Push(ctx context.Context, req *mimirpb.WriteRequest, opts return nil, err } - for j := range append(req.Timeseries, req.EphemeralTimeseries...) { - var series mimirpb.PreallocTimeseries - ephemeral := false - if j < len(req.Timeseries) { - series = req.Timeseries[j] - } else { - series = req.EphemeralTimeseries[j-len(req.Timeseries)] - ephemeral = true - } + for _, series := range req.Timeseries { hash := shardByAllLabels(orgid, series.Labels) - var existing *mimirpb.PreallocTimeseries - var ok bool - if ephemeral { - existing, ok = i.ephemeralTimeseries[hash] - } else { - existing, ok = i.timeseries[hash] - } + existing, ok := i.timeseries[hash] if !ok { // Make a copy because the request Timeseries are reused item := mimirpb.TimeSeries{ @@ -4061,11 +3939,7 @@ func (i *mockIngester) Push(ctx context.Context, req *mimirpb.WriteRequest, opts copy(item.Labels, series.TimeSeries.Labels) copy(item.Samples, series.TimeSeries.Samples) - if ephemeral { - i.ephemeralTimeseries[hash] = &mimirpb.PreallocTimeseries{TimeSeries: &item} - } else { - i.timeseries[hash] = &mimirpb.PreallocTimeseries{TimeSeries: &item} - } + i.timeseries[hash] = &mimirpb.PreallocTimeseries{TimeSeries: &item} } else { existing.Samples = append(existing.Samples, series.Samples...) } @@ -4461,32 +4335,6 @@ func (m *mockForwarder) Forward(ctx context.Context, endpoint string, dontForwar func (m *mockForwarder) DeleteMetricsForUser(user string) {} -type mockEphemeralSeriesProvider struct { - t *testing.T - ephemeralMetrics []string -} - -func (m mockEphemeralSeriesProvider) EphemeralChecker(user string, source mimirpb.WriteRequest_SourceEnum) ephemeral.SeriesChecker { - return &mockEphemeralSeriesChecker{m} -} - -type mockEphemeralSeriesChecker struct { - mockEphemeralSeriesProvider -} - -func (m mockEphemeralSeriesChecker) ShouldMarkEphemeral(lset []mimirpb.LabelAdapter) bool { - metricName, err := extract.UnsafeMetricNameFromLabelAdapters(lset) - require.NoError(m.t, err) - - for _, m := range m.ephemeralMetrics { - if m == metricName { - return true - } - } - - return false -} - func ingestAllTimeseriesMutator(ts []mimirpb.PreallocTimeseries) []mimirpb.PreallocTimeseries { return ts } @@ -5199,9 +5047,6 @@ func TestSeriesAreShardedToCorrectIngesters(t *testing.T) { const userName = "userName" req := makeWriteRequestForGenerators(series, uniqueMetricsGen, exemplarLabelGen, metaDataGen) - all := req.Timeseries - req.Timeseries = all[:len(all)/2] - req.EphemeralTimeseries = all[len(all)/2:] ctx := user.InjectOrgID(context.Background(), userName) // skip all the middlewares, just do the push @@ -5211,11 +5056,9 @@ func TestSeriesAreShardedToCorrectIngesters(t *testing.T) { // Verify that each ingester only received series and metadata that it should receive. totalSeries := 0 - totalEphemeral := 0 totalMetadata := 0 for ix := range ing { totalSeries += len(ing[ix].timeseries) - totalEphemeral += len(ing[ix].ephemeralTimeseries) totalMetadata += len(ing[ix].metadata) for _, ts := range ing[ix].timeseries { @@ -5224,12 +5067,6 @@ func TestSeriesAreShardedToCorrectIngesters(t *testing.T) { assert.Equal(t, ix, ingIx) } - for _, ts := range ing[ix].ephemeralTimeseries { - token := distrib.tokenForLabels(userName, ts.Labels) - ingIx := getIngesterIndexForToken(token, ing) - assert.Equal(t, ix, ingIx) - } - for _, metadataMap := range ing[ix].metadata { for m := range metadataMap { token := distrib.tokenForMetadata(userName, m.MetricFamilyName) @@ -5239,25 +5076,15 @@ func TestSeriesAreShardedToCorrectIngesters(t *testing.T) { } } - // Verify that timeseries were forwarded as timeseries, and ephemeral timeseries as ephemeral timeseries, and there is no mixup. + // Verify that all timeseries were forwarded to ingesters. for _, ts := range req.Timeseries { token := distrib.tokenForLabels(userName, ts.Labels) ingIx := getIngesterIndexForToken(token, ing) assert.Equal(t, ts.Labels, ing[ingIx].timeseries[token].Labels) - assert.Equal(t, (*mimirpb.PreallocTimeseries)(nil), ing[ingIx].ephemeralTimeseries[token]) - } - - for _, ts := range req.EphemeralTimeseries { - token := distrib.tokenForLabels(userName, ts.Labels) - ingIx := getIngesterIndexForToken(token, ing) - - assert.Equal(t, ts.Labels, ing[ingIx].ephemeralTimeseries[token].Labels) - assert.Equal(t, (*mimirpb.PreallocTimeseries)(nil), ing[ingIx].timeseries[token]) } - assert.Equal(t, series/2, totalSeries) - assert.Equal(t, series/2, totalEphemeral) + assert.Equal(t, series, totalSeries) assert.Equal(t, series, totalMetadata) // each series has unique metric name, and each metric name gets metadata } diff --git a/pkg/ingester/ingester.go b/pkg/ingester/ingester.go index 2d90346c079..eb126ed2fb9 100644 --- a/pkg/ingester/ingester.go +++ b/pkg/ingester/ingester.go @@ -99,9 +99,6 @@ const ( perUserSeriesLimit = "per_user_series_limit" perMetricSeriesLimit = "per_metric_series_limit" - // Prefix for discard reasons when ingesting ephemeral series. - ephemeralDiscardPrefix = "ephemeral-" - replicationFactorStatsName = "ingester_replication_factor" ringStoreStatsName = "ingester_ring_store" memorySeriesStatsName = "ingester_inmemory_series" @@ -112,25 +109,8 @@ const ( tenantsWithOutOfOrderEnabledStatName = "ingester_ooo_enabled_tenants" minOutOfOrderTimeWindowSecondsStatName = "ingester_ooo_min_window" maxOutOfOrderTimeWindowSecondsStatName = "ingester_ooo_max_window" - - // Prefix used in Prometheus registry for ephemeral storage. - ephemeralPrometheusMetricsPrefix = "ephemeral_" - - // StorageLabelName is a label name used to select queried storage type. - StorageLabelName = "__mimir_storage__" - EphemeralStorageLabelValue = "ephemeral" - PersistentStorageLabelValue = "persistent" - - errInvalidStorageLabelValue = "invalid value of " + StorageLabelName + " label: %s" ) -var ( - errInvalidStorageMatcherType = fmt.Errorf("invalid matcher used together with %s label, only equality check supported", StorageLabelName) - errMultipleStorageMatchersFound = fmt.Errorf("multiple matchers for %s label found, only one matcher supported", StorageLabelName) -) - -var errEphemeralStorageDisabledForUser = errors.New("ephemeral storage is not enabled for user") - // BlocksUploader interface is used to have an easy way to mock it in tests. type BlocksUploader interface { Sync(ctx context.Context) (uploaded int, err error) @@ -255,8 +235,7 @@ type Ingester struct { compactionIdleTimeout time.Duration // Number of series in memory, across all tenants. - persistentSeriesCount atomic.Int64 - ephemeralSeriesCount atomic.Int64 + seriesCount atomic.Int64 // For storing metadata ingested. usersMetadataMtx sync.RWMutex @@ -332,14 +311,7 @@ func New(cfg Config, limits *validation.Overrides, activeGroupsCleanupService *u Name: "cortex_ingester_memory_series", Help: "The current number of series in memory.", }, func() float64 { - return float64(i.persistentSeriesCount.Load()) - }) - - promauto.With(registerer).NewGaugeFunc(prometheus.GaugeOpts{ - Name: "cortex_ingester_memory_ephemeral_series", - Help: "The current number of ephemeral series in memory.", - }, func() float64 { - return float64(i.ephemeralSeriesCount.Load()) + return float64(i.seriesCount.Load()) }) promauto.With(registerer).NewGaugeFunc(prometheus.GaugeOpts{ @@ -720,7 +692,7 @@ func (i *Ingester) PushWithCleanup(ctx context.Context, pushReq *push.Request) ( } // Early exit if no timeseries in request - don't create a TSDB or an appender. - if len(req.Timeseries) == 0 && len(req.EphemeralTimeseries) == 0 { + if len(req.Timeseries) == 0 { return &mimirpb.WriteResponse{}, nil } @@ -743,7 +715,7 @@ func (i *Ingester) PushWithCleanup(ctx context.Context, pushReq *push.Request) ( // Keep track of some stats which are tracked only if the samples will be // successfully committed - persistentStats, ephemeralStats pushStats + stats pushStats firstPartialErr error updateFirstPartial = func(errFn func() error) { @@ -754,72 +726,23 @@ func (i *Ingester) PushWithCleanup(ctx context.Context, pushReq *push.Request) ( ) // Walk the samples, appending them to the users database - var persistentApp, ephemeralApp extendedAppender + app := db.Appender(ctx).(extendedAppender) + level.Debug(spanlog).Log("event", "got appender for timeseries", "series", len(req.Timeseries)) - rollback := func() { - if persistentApp != nil { - if err := persistentApp.Rollback(); err != nil { - level.Warn(i.logger).Log("msg", "failed to rollback persistent appender on error", "user", userID, "err", err) - } - } - if ephemeralApp != nil { - if err := ephemeralApp.Rollback(); err != nil { - level.Warn(i.logger).Log("msg", "failed to rollback ephemeral appender on error", "user", userID, "err", err) - } - } + var activeSeries *activeseries.ActiveSeries + if i.cfg.ActiveSeriesMetricsEnabled { + activeSeries = db.activeSeries } - if len(req.Timeseries) > 0 { - persistentApp = db.Appender(ctx).(extendedAppender) - - level.Debug(spanlog).Log("event", "got appender for persistent series", "series", len(req.Timeseries)) - - var activeSeries *activeseries.ActiveSeries - if i.cfg.ActiveSeriesMetricsEnabled { - activeSeries = db.activeSeries - } - - minAppendTime, minAppendTimeAvailable := db.Head().AppendableMinValidTime() + minAppendTime, minAppendTimeAvailable := db.Head().AppendableMinValidTime() - err = i.pushSamplesToAppender(userID, req.Timeseries, persistentApp, startAppend, &persistentStats, updateFirstPartial, activeSeries, i.limits.OutOfOrderTimeWindow(userID), minAppendTimeAvailable, minAppendTime, false) - if err != nil { - rollback() - return nil, err + err = i.pushSamplesToAppender(userID, req.Timeseries, app, startAppend, &stats, updateFirstPartial, activeSeries, i.limits.OutOfOrderTimeWindow(userID), minAppendTimeAvailable, minAppendTime) + if err != nil { + if err := app.Rollback(); err != nil { + level.Warn(i.logger).Log("msg", "failed to rollback appender on error", "user", userID, "err", err) } - } - - if len(req.EphemeralTimeseries) > 0 { - a, err := db.EphemeralAppender(ctx) - - switch { - case err != nil && !errors.Is(err, errEphemeralStorageDisabledForUser): - rollback() - return nil, err - - case err != nil && errors.Is(err, errEphemeralStorageDisabledForUser): - // Add all samples for ephemeral series as "failed". - for _, ts := range req.EphemeralTimeseries { - ephemeralStats.failedSamplesCount += len(ts.Samples) + len(ts.Histograms) - } - - updateFirstPartial(func() error { - return fmt.Errorf(globalerror.EphemeralStorageNotEnabledForUser.Message(errEphemeralStorageDisabledForUser.Error())) - }) - // No rollback, we will append persistent samples. - - default: - ephemeralApp = a.(extendedAppender) - level.Debug(spanlog).Log("event", "got appender for ephemeral series", "ephemeralSeries", len(req.EphemeralTimeseries)) - - minAppendTime, minAppendTimeAvailable := db.getEphemeralStorage().AppendableMinValidTime() - - err = i.pushSamplesToAppender(userID, req.EphemeralTimeseries, ephemeralApp, startAppend, &ephemeralStats, updateFirstPartial, nil, 0, minAppendTimeAvailable, minAppendTime, true) - if err != nil { - rollback() - return nil, err - } - } + return nil, err } // At this point all samples have been added to the appender, so we can track the time it took. @@ -827,34 +750,15 @@ func (i *Ingester) PushWithCleanup(ctx context.Context, pushReq *push.Request) ( level.Debug(spanlog).Log( "event", "start commit", - "succeededSamplesCount", persistentStats.succeededSamplesCount, - "failedSamplesCount", persistentStats.failedSamplesCount, - "succeededExemplarsCount", persistentStats.succeededExemplarsCount, - "failedExemplarsCount", persistentStats.failedExemplarsCount, - "ephemeralSucceededSamplesCount", ephemeralStats.succeededSamplesCount, - "ephemeralFailedSamplesCount", ephemeralStats.failedSamplesCount, - "ephemeralSucceededExemplarsCount", ephemeralStats.succeededExemplarsCount, - "ephemeralFailedExemplarsCount", ephemeralStats.failedExemplarsCount, + "succeededSamplesCount", stats.succeededSamplesCount, + "failedSamplesCount", stats.failedSamplesCount, + "succeededExemplarsCount", stats.succeededExemplarsCount, + "failedExemplarsCount", stats.failedExemplarsCount, ) startCommit := time.Now() - if persistentApp != nil { - app := persistentApp - persistentApp = nil // Disable rollback for appender. If Commit fails, it auto-rollbacks. - - if err := app.Commit(); err != nil { - rollback() - return nil, wrapWithUser(err, userID) - } - } - if ephemeralApp != nil { - app := ephemeralApp - ephemeralApp = nil // Disable rollback for appender. If Commit fails, it auto-rollbacks. - - if err := app.Commit(); err != nil { - rollback() - return nil, wrapWithUser(err, userID) - } + if err := app.Commit(); err != nil { + return nil, wrapWithUser(err, userID) } commitDuration := time.Since(startCommit) @@ -862,31 +766,23 @@ func (i *Ingester) PushWithCleanup(ctx context.Context, pushReq *push.Request) ( level.Debug(spanlog).Log("event", "complete commit", "commitDuration", commitDuration.String()) // If only invalid samples are pushed, don't change "last update", as TSDB was not modified. - if persistentStats.succeededSamplesCount > 0 || ephemeralStats.succeededSamplesCount > 0 { + if stats.succeededSamplesCount > 0 { db.setLastUpdate(time.Now()) } // Increment metrics only if the samples have been successfully committed. // If the code didn't reach this point, it means that we returned an error // which will be converted into an HTTP 5xx and the client should/will retry. - i.metrics.ingestedSamples.WithLabelValues(userID).Add(float64(persistentStats.succeededSamplesCount)) - i.metrics.ingestedSamplesFail.WithLabelValues(userID).Add(float64(persistentStats.failedSamplesCount)) - i.metrics.ingestedExemplars.Add(float64(persistentStats.succeededExemplarsCount)) - i.metrics.ingestedExemplarsFail.Add(float64(persistentStats.failedExemplarsCount)) - i.appendedSamplesStats.Inc(int64(persistentStats.succeededSamplesCount)) - i.appendedExemplarsStats.Inc(int64(persistentStats.succeededExemplarsCount)) - - if ephemeralStats.succeededSamplesCount > 0 || ephemeralStats.failedSamplesCount > 0 { - i.metrics.ephemeralIngestedSamples.WithLabelValues(userID).Add(float64(ephemeralStats.succeededSamplesCount)) - i.metrics.ephemeralIngestedSamplesFail.WithLabelValues(userID).Add(float64(ephemeralStats.failedSamplesCount)) - - i.appendedSamplesStats.Inc(int64(ephemeralStats.succeededSamplesCount)) - } + i.metrics.ingestedSamples.WithLabelValues(userID).Add(float64(stats.succeededSamplesCount)) + i.metrics.ingestedSamplesFail.WithLabelValues(userID).Add(float64(stats.failedSamplesCount)) + i.metrics.ingestedExemplars.Add(float64(stats.succeededExemplarsCount)) + i.metrics.ingestedExemplarsFail.Add(float64(stats.failedExemplarsCount)) + i.appendedSamplesStats.Inc(int64(stats.succeededSamplesCount)) + i.appendedExemplarsStats.Inc(int64(stats.succeededExemplarsCount)) group := i.activeGroups.UpdateActiveGroupTimestamp(userID, validation.GroupLabel(i.limits, userID, req.Timeseries), startAppend) - i.updateMetricsFromPushStats(userID, group, &persistentStats, req.Source, db, i.metrics.discardedPersistent) - i.updateMetricsFromPushStats(userID, group, &ephemeralStats, req.Source, db, i.metrics.discardedEphemeral) + i.updateMetricsFromPushStats(userID, group, &stats, req.Source, db, i.metrics.discarded) if firstPartialErr != nil { code := http.StatusBadRequest @@ -934,7 +830,7 @@ func (i *Ingester) updateMetricsFromPushStats(userID string, group string, stats // but in case of unhandled errors, appender is rolled back and such error is returned. func (i *Ingester) pushSamplesToAppender(userID string, timeseries []mimirpb.PreallocTimeseries, app extendedAppender, startAppend time.Time, stats *pushStats, updateFirstPartial func(errFn func() error), activeSeries *activeseries.ActiveSeries, - outOfOrderWindow model.Duration, minAppendTimeAvailable bool, minAppendTime int64, ephemeral bool) error { + outOfOrderWindow model.Duration, minAppendTimeAvailable bool, minAppendTime int64) error { // Return true if handled as soft error, and we can ingest more series. handleAppendError := func(err error, timestamp int64, labels []mimirpb.LabelAdapter) bool { @@ -947,9 +843,6 @@ func (i *Ingester) pushSamplesToAppender(userID string, timeseries []mimirpb.Pre case storage.ErrOutOfBounds: stats.sampleOutOfBoundsCount++ updateFirstPartial(func() error { - if ephemeral { - return newEphemeralIngestErrSampleTimestampTooOld(model.Time(timestamp), labels) - } return newIngestErrSampleTimestampTooOld(model.Time(timestamp), labels) }) return true @@ -957,9 +850,6 @@ func (i *Ingester) pushSamplesToAppender(userID string, timeseries []mimirpb.Pre case storage.ErrOutOfOrderSample: stats.sampleOutOfOrderCount++ updateFirstPartial(func() error { - if ephemeral { - return newEphemeralIngestErrSampleOutOfOrder(model.Time(timestamp), labels) - } return newIngestErrSampleOutOfOrder(model.Time(timestamp), labels) }) return true @@ -967,7 +857,6 @@ func (i *Ingester) pushSamplesToAppender(userID string, timeseries []mimirpb.Pre case storage.ErrTooOldSample: stats.sampleTooOldCount++ updateFirstPartial(func() error { - // OOO is not enabled for ephemeral storage, so we can't get this error. return newIngestErrSampleTimestampTooOldOOOEnabled(model.Time(timestamp), labels, outOfOrderWindow) }) return true @@ -975,19 +864,13 @@ func (i *Ingester) pushSamplesToAppender(userID string, timeseries []mimirpb.Pre case storage.ErrDuplicateSampleForTimestamp: stats.newValueForTimestampCount++ updateFirstPartial(func() error { - if ephemeral { - return newEphemeralIngestErrSampleDuplicateTimestamp(model.Time(timestamp), labels) - } return newIngestErrSampleDuplicateTimestamp(model.Time(timestamp), labels) }) return true - case errMaxSeriesPerUserLimitExceeded, errMaxEphemeralSeriesPerUserLimitExceeded: // we have special error for this, as we want different help message from FormatError. + case errMaxSeriesPerUserLimitExceeded: stats.perUserSeriesLimitCount++ updateFirstPartial(func() error { - if ephemeral { - return makeLimitError(i.limiter.FormatError(userID, cause)) - } return makeLimitError(i.limiter.FormatError(userID, cause)) }) return true @@ -995,7 +878,6 @@ func (i *Ingester) pushSamplesToAppender(userID string, timeseries []mimirpb.Pre case errMaxSeriesPerMetricLimitExceeded: stats.perMetricSeriesLimitCount++ updateFirstPartial(func() error { - // Ephemeral storage doesn't have this limit. return makeMetricLimitError(mimirpb.FromLabelAdaptersToLabelsWithCopy(labels), i.limiter.FormatError(userID, cause)) }) return true @@ -1032,9 +914,6 @@ func (i *Ingester) pushSamplesToAppender(userID string, timeseries []mimirpb.Pre } updateFirstPartial(func() error { - if ephemeral { - return newEphemeralIngestErrSampleTimestampTooOld(model.Time(firstTimestamp), ts.Labels) - } return newIngestErrSampleTimestampTooOld(model.Time(firstTimestamp), ts.Labels) }) continue @@ -1050,9 +929,6 @@ func (i *Ingester) pushSamplesToAppender(userID string, timeseries []mimirpb.Pre firstTimestamp := ts.Samples[0].TimestampMs updateFirstPartial(func() error { - if ephemeral { - return newEphemeralIngestErrSampleTimestampTooOld(model.Time(firstTimestamp), ts.Labels) - } return newIngestErrSampleTimestampTooOld(model.Time(firstTimestamp), ts.Labels) }) continue @@ -1145,7 +1021,7 @@ func (i *Ingester) pushSamplesToAppender(userID string, timeseries []mimirpb.Pre }) } - if !ephemeral && len(ts.Exemplars) > 0 && i.limits.MaxGlobalExemplarsPerUser(userID) > 0 { + if len(ts.Exemplars) > 0 && i.limits.MaxGlobalExemplarsPerUser(userID) > 0 { // app.AppendExemplar currently doesn't create the series, it must // already exist. If it does not then drop. if ref == 0 { @@ -1254,7 +1130,7 @@ func (i *Ingester) LabelValues(ctx context.Context, req *client.LabelValuesReque return &client.LabelValuesResponse{}, nil } - q, err := db.Querier(ctx, startTimestampMs, endTimestampMs, false) + q, err := db.Querier(ctx, startTimestampMs, endTimestampMs) if err != nil { return nil, err } @@ -1290,7 +1166,7 @@ func (i *Ingester) LabelNames(ctx context.Context, req *client.LabelNamesRequest return nil, err } - q, err := db.Querier(ctx, mint, maxt, false) + q, err := db.Querier(ctx, mint, maxt) if err != nil { return nil, err } @@ -1329,7 +1205,7 @@ func (i *Ingester) MetricsForLabelMatchers(ctx context.Context, req *client.Metr } mint, maxt := req.StartTimestampMs, req.EndTimestampMs - q, err := db.Querier(ctx, mint, maxt, false) + q, err := db.Querier(ctx, mint, maxt) if err != nil { return nil, err } @@ -1518,17 +1394,7 @@ func (i *Ingester) QueryStream(req *client.QueryRequest, stream client.Ingester_ return err } - storageType, matchers, err := removeStorageMatcherAndGetStorageType(matchers) - if err != nil { - return err - } - - ephemeral := storageType == EphemeralStorageLabelValue - if ephemeral { - i.metrics.ephemeralQueries.Inc() - } else { - i.metrics.queries.Inc() - } + i.metrics.queries.Inc() db := i.getTSDB(userID) if db == nil { @@ -1557,28 +1423,23 @@ func (i *Ingester) QueryStream(req *client.QueryRequest, stream client.Ingester_ if streamType == QueryStreamChunks { level.Debug(spanlog).Log("msg", "using queryStreamChunks") - numSeries, numSamples, err = i.queryStreamChunks(ctx, db, int64(from), int64(through), matchers, shard, stream, ephemeral) + numSeries, numSamples, err = i.queryStreamChunks(ctx, db, int64(from), int64(through), matchers, shard, stream) } else { level.Debug(spanlog).Log("msg", "using queryStreamSamples") - numSeries, numSamples, err = i.queryStreamSamples(ctx, db, int64(from), int64(through), matchers, shard, stream, ephemeral) + numSeries, numSamples, err = i.queryStreamSamples(ctx, db, int64(from), int64(through), matchers, shard, stream) } if err != nil { return err } - if ephemeral { - i.metrics.ephemeralQueriedSeries.Observe(float64(numSeries)) - i.metrics.ephemeralQueriedSamples.Observe(float64(numSamples)) - } else { - i.metrics.queriedSeries.Observe(float64(numSeries)) - i.metrics.queriedSamples.Observe(float64(numSamples)) - } - level.Debug(spanlog).Log("series", numSeries, "samples", numSamples, "storage", storageType) + i.metrics.queriedSeries.Observe(float64(numSeries)) + i.metrics.queriedSamples.Observe(float64(numSamples)) + level.Debug(spanlog).Log("series", numSeries, "samples", numSamples) return nil } -func (i *Ingester) queryStreamSamples(ctx context.Context, db *userTSDB, from, through int64, matchers []*labels.Matcher, shard *sharding.ShardSelector, stream client.Ingester_QueryStreamServer, ephemeral bool) (numSeries, numSamples int, _ error) { - q, err := db.Querier(ctx, from, through, ephemeral) +func (i *Ingester) queryStreamSamples(ctx context.Context, db *userTSDB, from, through int64, matchers []*labels.Matcher, shard *sharding.ShardSelector, stream client.Ingester_QueryStreamServer) (numSeries, numSamples int, _ error) { + q, err := db.Querier(ctx, from, through) if err != nil { return 0, 0, err } @@ -1661,13 +1522,13 @@ func (i *Ingester) queryStreamSamples(ctx context.Context, db *userTSDB, from, t } // queryStreamChunks streams metrics from a TSDB. This implements the client.IngesterServer interface -func (i *Ingester) queryStreamChunks(ctx context.Context, db *userTSDB, from, through int64, matchers []*labels.Matcher, shard *sharding.ShardSelector, stream client.Ingester_QueryStreamServer, ephemeral bool) (numSeries, numSamples int, _ error) { +func (i *Ingester) queryStreamChunks(ctx context.Context, db *userTSDB, from, through int64, matchers []*labels.Matcher, shard *sharding.ShardSelector, stream client.Ingester_QueryStreamServer) (numSeries, numSamples int, _ error) { var q storage.ChunkQuerier var err error if i.limits.OutOfOrderTimeWindow(db.userID) > 0 { - q, err = db.UnorderedChunkQuerier(ctx, from, through, ephemeral) + q, err = db.UnorderedChunkQuerier(ctx, from, through) } else { - q, err = db.ChunkQuerier(ctx, from, through, ephemeral) + q, err = db.ChunkQuerier(ctx, from, through) } if err != nil { return 0, 0, err @@ -1844,22 +1705,20 @@ func (i *Ingester) createTSDB(userID string) (*userTSDB, error) { matchersConfig := i.limits.ActiveSeriesCustomTrackersConfig(userID) userDB := &userTSDB{ - userID: userID, - activeSeries: activeseries.NewActiveSeries(activeseries.NewMatchers(matchersConfig), i.cfg.ActiveSeriesMetricsIdleTimeout), - seriesInMetric: newMetricCounter(i.limiter, i.cfg.getIgnoreSeriesLimitForMetricNamesMap()), - ingestedAPISamples: util_math.NewEWMARate(0.2, i.cfg.RateUpdatePeriod), - ingestedRuleSamples: util_math.NewEWMARate(0.2, i.cfg.RateUpdatePeriod), - instanceLimitsFn: i.getInstanceLimits, - instanceSeriesCount: &i.persistentSeriesCount, - instanceEphemeralSeriesCount: &i.ephemeralSeriesCount, - blockMinRetention: i.cfg.BlocksStorageConfig.TSDB.Retention, + userID: userID, + activeSeries: activeseries.NewActiveSeries(activeseries.NewMatchers(matchersConfig), i.cfg.ActiveSeriesMetricsIdleTimeout), + seriesInMetric: newMetricCounter(i.limiter, i.cfg.getIgnoreSeriesLimitForMetricNamesMap()), + ingestedAPISamples: util_math.NewEWMARate(0.2, i.cfg.RateUpdatePeriod), + ingestedRuleSamples: util_math.NewEWMARate(0.2, i.cfg.RateUpdatePeriod), + instanceLimitsFn: i.getInstanceLimits, + instanceSeriesCount: &i.seriesCount, + blockMinRetention: i.cfg.BlocksStorageConfig.TSDB.Retention, } maxExemplars := i.limiter.convertGlobalToLocalLimit(userID, i.limits.MaxGlobalExemplarsPerUser(userID)) oooTW := time.Duration(i.limits.OutOfOrderTimeWindow(userID)) // Create a new user database - const storageKey = "storage" - db, err := tsdb.Open(udir, log.With(userLogger, storageKey, "persistent"), tsdbPromReg, &tsdb.Options{ + db, err := tsdb.Open(udir, userLogger, tsdbPromReg, &tsdb.Options{ RetentionDuration: i.cfg.BlocksStorageConfig.TSDB.Retention.Milliseconds(), MinBlockDuration: blockRanges[0], MaxBlockDuration: blockRanges[len(blockRanges)-1], @@ -1869,7 +1728,7 @@ func (i *Ingester) createTSDB(userID string) (*userTSDB, error) { HeadChunksEndTimeVariance: i.cfg.BlocksStorageConfig.TSDB.HeadChunksEndTimeVariance, WALCompression: i.cfg.BlocksStorageConfig.TSDB.WALCompressionEnabled, WALSegmentSize: i.cfg.BlocksStorageConfig.TSDB.WALSegmentSizeBytes, - SeriesLifecycleCallback: userDB.persistentSeriesCallback(), + SeriesLifecycleCallback: userDB, BlocksToDelete: userDB.blocksToDelete, EnableExemplarStorage: true, // enable for everyone so we can raise the limit later MaxExemplars: int64(maxExemplars), @@ -1932,53 +1791,6 @@ func (i *Ingester) createTSDB(userID string) (*userTSDB, error) { } i.tsdbMetrics.setRegistryForUser(userID, tsdbPromReg) - - userDB.ephemeralSeriesRetentionPeriod = i.cfg.BlocksStorageConfig.EphemeralTSDB.Retention - userDB.ephemeralFactory = func() (*tsdb.Head, error) { - if i.limits.MaxEphemeralSeriesPerUser(userID) <= 0 { - return nil, errEphemeralStorageDisabledForUser - } - - headOptions := &tsdb.HeadOptions{ - ChunkRange: i.cfg.BlocksStorageConfig.EphemeralTSDB.Retention.Milliseconds(), - ChunkDirRoot: filepath.Join(udir, "ephemeral_chunks"), - ChunkPool: nil, - ChunkWriteBufferSize: i.cfg.BlocksStorageConfig.EphemeralTSDB.HeadChunksWriteBufferSize, - ChunkEndTimeVariance: i.cfg.BlocksStorageConfig.EphemeralTSDB.HeadChunksEndTimeVariance, - ChunkWriteQueueSize: i.cfg.BlocksStorageConfig.EphemeralTSDB.HeadChunksWriteQueueSize, - StripeSize: i.cfg.BlocksStorageConfig.EphemeralTSDB.StripeSize, - SeriesCallback: userDB.ephemeralSeriesCallback(), - EnableExemplarStorage: false, - EnableMemorySnapshotOnShutdown: false, - IsolationDisabled: true, - PostingsForMatchersCacheTTL: i.cfg.BlocksStorageConfig.EphemeralTSDB.HeadPostingsForMatchersCacheTTL, - PostingsForMatchersCacheSize: i.cfg.BlocksStorageConfig.EphemeralTSDB.HeadPostingsForMatchersCacheSize, - PostingsForMatchersCacheForce: i.cfg.BlocksStorageConfig.EphemeralTSDB.HeadPostingsForMatchersCacheForce, - } - - headOptions.MaxExemplars.Store(0) - headOptions.OutOfOrderTimeWindow.Store(0) - headOptions.OutOfOrderCapMax.Store(int64(tsdb.DefaultOutOfOrderCapMax)) // We need to set this, despite OOO time window being 0. - headOptions.EnableNativeHistograms.Store(false) - - h, err := tsdb.NewHead(prometheus.WrapRegistererWithPrefix(ephemeralPrometheusMetricsPrefix, tsdbPromReg), log.With(userLogger, storageKey, "ephemeral"), nil, nil, headOptions, nil) - if err != nil { - return nil, err - } - - i.metrics.memEphemeralUsers.Inc() - - // Don't allow ingestion of old samples into ephemeral storage. We use Truncate here on empty head, which is pointless, - // but we do it for its side effects: it sets both minTime and minValidTime to specified timestamp. - // - // We could have used h.SetMinValidTime() instead, but that only sets minValidTime and not minTime, - // and calling h.AppendableMinValidTime() then doesn't return set value. There is no such problem with Truncate. - if err := h.Truncate(time.Now().Add(-i.cfg.BlocksStorageConfig.EphemeralTSDB.Retention).UnixMilli()); err != nil { - return nil, err - } - return h, err - } - return userDB, nil } @@ -1995,8 +1807,6 @@ func (i *Ingester) closeAllTSDB() { go func(db *userTSDB) { defer wg.Done() - ephemeral := db.hasEphemeralStorage() - if err := db.Close(); err != nil { level.Warn(i.logger).Log("msg", "unable to close TSDB", "err", err, "user", userID) return @@ -2012,9 +1822,6 @@ func (i *Ingester) closeAllTSDB() { i.metrics.memUsers.Dec() i.metrics.deletePerUserCustomTrackerMetrics(userID, db.activeSeries.CurrentMatcherNames()) - if ephemeral { - i.metrics.memEphemeralUsers.Dec() - } }(userDB) } @@ -2282,12 +2089,7 @@ func (i *Ingester) compactBlocks(ctx context.Context, force bool, allowed *util. return nil } - // Truncate ephemeral storage first. - if err := userDB.TruncateEphemeral(time.Now()); err != nil { - level.Warn(i.logger).Log("msg", "truncating ephemeral storage for user has failed", "user", userID, "err", err) - } - - // Don't do anything, if there is nothing to compact (in persistent storage). + // Don't do anything, if there is nothing to compact. h := userDB.Head() if h.NumSeries() == 0 { return nil @@ -2372,11 +2174,7 @@ func (i *Ingester) closeAndDeleteUserTSDBIfIdle(userID string) tsdbCloseCheckRes // At this point there are no more pushes to TSDB, and no possible compaction. Normally TSDB is empty, // but if we're closing TSDB because of tenant deletion mark, then it may still contain some series. // We need to remove these series from series count. - i.persistentSeriesCount.Sub(int64(userDB.Head().NumSeries())) - eph := userDB.getEphemeralStorage() - if eph != nil { - i.ephemeralSeriesCount.Sub(int64(eph.NumSeries())) - } + i.seriesCount.Sub(int64(userDB.Head().NumSeries())) dir := userDB.db.Dir() @@ -2402,9 +2200,6 @@ func (i *Ingester) closeAndDeleteUserTSDBIfIdle(userID string) tsdbCloseCheckRes }() i.metrics.memUsers.Dec() - if eph != nil { - i.metrics.memEphemeralUsers.Dec() - } i.tsdbMetrics.removeRegistryForUser(userID) i.deleteUserMetadata(userID) @@ -2542,10 +2337,6 @@ func newIngestErrSampleTimestampTooOld(timestamp model.Time, labels []mimirpb.La return newIngestErr(globalerror.SampleTimestampTooOld, "the sample has been rejected because its timestamp is too old", timestamp, labels) } -func newEphemeralIngestErrSampleTimestampTooOld(timestamp model.Time, labels []mimirpb.LabelAdapter) error { - return newIngestErr(globalerror.EphemeralSampleTimestampTooOld, "the sample for ephemeral series has been rejected because its timestamp is too old", timestamp, labels) -} - func newIngestErrSampleTimestampTooOldOOOEnabled(timestamp model.Time, labels []mimirpb.LabelAdapter, oooTimeWindow model.Duration) error { return newIngestErr(globalerror.SampleTimestampTooOld, fmt.Sprintf("the sample has been rejected because another sample with a more recent timestamp has already been ingested and this sample is beyond the out-of-order time window of %s", oooTimeWindow.String()), timestamp, labels) } @@ -2554,18 +2345,10 @@ func newIngestErrSampleOutOfOrder(timestamp model.Time, labels []mimirpb.LabelAd return newIngestErr(globalerror.SampleOutOfOrder, "the sample has been rejected because another sample with a more recent timestamp has already been ingested and out-of-order samples are not allowed", timestamp, labels) } -func newEphemeralIngestErrSampleOutOfOrder(timestamp model.Time, labels []mimirpb.LabelAdapter) error { - return newIngestErr(globalerror.EphemeralSampleOutOfOrder, "the sample for ephemeral series has been rejected because another sample with a more recent timestamp has already been ingested and out-of-order samples are not allowed", timestamp, labels) -} - func newIngestErrSampleDuplicateTimestamp(timestamp model.Time, labels []mimirpb.LabelAdapter) error { return newIngestErr(globalerror.SampleDuplicateTimestamp, "the sample has been rejected because another sample with the same timestamp, but a different value, has already been ingested", timestamp, labels) } -func newEphemeralIngestErrSampleDuplicateTimestamp(timestamp model.Time, labels []mimirpb.LabelAdapter) error { - return newIngestErr(globalerror.EphemeralSampleDuplicateTimestamp, "the sample for ephemeral series has been rejected because another sample with the same timestamp, but a different value, has already been ingested", timestamp, labels) -} - func newIngestErrExemplarMissingSeries(timestamp model.Time, seriesLabels, exemplarLabels []mimirpb.LabelAdapter) error { return fmt.Errorf("%v. The affected exemplar is %s with timestamp %s for series %s", globalerror.ExemplarSeriesMissing.Message("the exemplar has been rejected because the related series has not been ingested yet"), @@ -2639,7 +2422,6 @@ func (i *Ingester) Push(ctx context.Context, req *mimirpb.WriteRequest) (*mimirp pushReq := push.NewParsedRequest(req) pushReq.AddCleanup(func() { mimirpb.ReuseSlice(req.Timeseries) - mimirpb.ReuseSlice(req.EphemeralTimeseries) }) return i.PushWithCleanup(ctx, pushReq) } @@ -2836,46 +2618,3 @@ func (i *Ingester) UserRegistryHandler(w http.ResponseWriter, r *http.Request) { Timeout: 10 * time.Second, }).ServeHTTP(w, r) } - -// findStorageLabelMatcher returns value of storage label matcher and its index, if it exists. -func findStorageLabelMatcher(matchers []*labels.Matcher) (string, int, error) { - resultVal, resultIdx := "", -1 - - for idx, matcher := range matchers { - if matcher.Name == StorageLabelName { - if resultIdx >= 0 { - return "", idx, errMultipleStorageMatchersFound - } - if matcher.Type != labels.MatchEqual { - return "", idx, errInvalidStorageMatcherType - } - resultVal = matcher.Value - resultIdx = idx - } - } - - return resultVal, resultIdx, nil -} - -// This function returns the storage type (PersistentStorageLabelValue or EphemeralStorageLabelValue) from label matchers. -// If storage label is not found, returns PersistentStorageLabelValue. -// If storage label matcher is invalid (wrong type or value), returns error. -// Returned matchers have storage label matcher removed (original slice is reused). -func removeStorageMatcherAndGetStorageType(matchers []*labels.Matcher) (storageType string, filtered []*labels.Matcher, _ error) { - val, idx, err := findStorageLabelMatcher(matchers) - if err != nil { - return PersistentStorageLabelValue, matchers, err - } - if idx < 0 { - return PersistentStorageLabelValue, matchers, nil - } - - if val != PersistentStorageLabelValue && val != EphemeralStorageLabelValue { - return val, matchers, fmt.Errorf(errInvalidStorageLabelValue, val) - } - - // Prepare slice without storage matcher. - copy(matchers[idx:], matchers[idx+1:]) - filtered = matchers[:len(matchers)-1] - return val, filtered, nil -} diff --git a/pkg/ingester/ingester_test.go b/pkg/ingester/ingester_test.go index dbd78c9b30a..ac839b00058 100644 --- a/pkg/ingester/ingester_test.go +++ b/pkg/ingester/ingester_test.go @@ -63,7 +63,6 @@ import ( "github.com/grafana/mimir/pkg/usagestats" "github.com/grafana/mimir/pkg/util" "github.com/grafana/mimir/pkg/util/chunkcompat" - "github.com/grafana/mimir/pkg/util/globalerror" util_math "github.com/grafana/mimir/pkg/util/math" "github.com/grafana/mimir/pkg/util/push" "github.com/grafana/mimir/pkg/util/validation" @@ -3351,9 +3350,9 @@ func TestIngester_dontShipBlocksWhenTenantDeletionMarkerIsPresent(t *testing.T) }) pushSingleSampleWithMetadata(t, i) - require.Equal(t, int64(1), i.persistentSeriesCount.Load()) + require.Equal(t, int64(1), i.seriesCount.Load()) i.compactBlocks(context.Background(), true, nil) - require.Equal(t, int64(0), i.persistentSeriesCount.Load()) + require.Equal(t, int64(0), i.seriesCount.Load()) i.shipBlocks(context.Background(), nil) numObjects := len(bucket.Objects()) @@ -3368,9 +3367,9 @@ func TestIngester_dontShipBlocksWhenTenantDeletionMarkerIsPresent(t *testing.T) // After writing tenant deletion mark, pushSingleSampleWithMetadata(t, i) - require.Equal(t, int64(1), i.persistentSeriesCount.Load()) + require.Equal(t, int64(1), i.seriesCount.Load()) i.compactBlocks(context.Background(), true, nil) - require.Equal(t, int64(0), i.persistentSeriesCount.Load()) + require.Equal(t, int64(0), i.seriesCount.Load()) i.shipBlocks(context.Background(), nil) numObjectsAfterMarkingTenantForDeletion := len(bucket.Objects()) @@ -3402,7 +3401,7 @@ func TestIngester_seriesCountIsCorrectAfterClosingTSDBForDeletedTenant(t *testin }) pushSingleSampleWithMetadata(t, i) - require.Equal(t, int64(1), i.persistentSeriesCount.Load()) + require.Equal(t, int64(1), i.seriesCount.Load()) // We call shipBlocks to check for deletion marker (it happens inside this method). i.shipBlocks(context.Background(), nil) @@ -3417,7 +3416,7 @@ func TestIngester_seriesCountIsCorrectAfterClosingTSDBForDeletedTenant(t *testin require.Equal(t, tsdbTenantMarkedForDeletion, i.closeAndDeleteUserTSDBIfIdle(userID)) // Closing should decrease series count. - require.Equal(t, int64(0), i.persistentSeriesCount.Load()) + require.Equal(t, int64(0), i.seriesCount.Load()) } func TestIngester_closeAndDeleteUserTSDBIfIdle_shouldNotCloseTSDBIfShippingIsInProgress(t *testing.T) { @@ -4104,7 +4103,7 @@ func TestIngesterCompactAndCloseIdleTSDB(t *testing.T) { pushSingleSampleWithMetadata(t, i) i.updateActiveSeries(time.Now()) - require.Equal(t, int64(1), i.persistentSeriesCount.Load()) + require.Equal(t, int64(1), i.seriesCount.Load()) metricsToCheck := []string{"cortex_ingester_memory_series_created_total", "cortex_ingester_memory_series_removed_total", "cortex_ingester_memory_users", "cortex_ingester_active_series", "cortex_ingester_memory_metadata", "cortex_ingester_memory_metadata_created_total", "cortex_ingester_memory_metadata_removed_total"} @@ -4144,7 +4143,7 @@ func TestIngesterCompactAndCloseIdleTSDB(t *testing.T) { require.Greater(t, testutil.ToFloat64(i.metrics.idleTsdbChecks.WithLabelValues(string(tsdbIdleClosed))), float64(0)) i.updateActiveSeries(time.Now()) - require.Equal(t, int64(0), i.persistentSeriesCount.Load()) // Flushing removed all series from memory. + require.Equal(t, int64(0), i.seriesCount.Load()) // Flushing removed all series from memory. // Verify that user has disappeared from metrics. require.NoError(t, testutil.GatherAndCompare(r, strings.NewReader(` @@ -4837,10 +4836,9 @@ func TestIngester_instanceLimitsMetrics(t *testing.T) { reg := prometheus.NewRegistry() l := InstanceLimits{ - MaxIngestionRate: 10, - MaxInMemoryTenants: 20, - MaxInMemorySeries: 30, - MaxInMemoryEphemeralSeries: 50, + MaxIngestionRate: 10, + MaxInMemoryTenants: 20, + MaxInMemorySeries: 30, } cfg := defaultIngesterTestConfig(t) @@ -4858,7 +4856,6 @@ func TestIngester_instanceLimitsMetrics(t *testing.T) { cortex_ingester_instance_limits{limit="max_ingestion_rate"} 10 cortex_ingester_instance_limits{limit="max_series"} 30 cortex_ingester_instance_limits{limit="max_tenants"} 20 - cortex_ingester_instance_limits{limit="max_ephemeral_series"} 50 `), "cortex_ingester_instance_limits")) l.MaxInMemoryTenants = 1000 @@ -4871,7 +4868,6 @@ func TestIngester_instanceLimitsMetrics(t *testing.T) { cortex_ingester_instance_limits{limit="max_ingestion_rate"} 10 cortex_ingester_instance_limits{limit="max_series"} 2000 cortex_ingester_instance_limits{limit="max_tenants"} 1000 - cortex_ingester_instance_limits{limit="max_ephemeral_series"} 50 `), "cortex_ingester_instance_limits")) } @@ -6426,10 +6422,6 @@ func TestNewIngestErrMsgs(t *testing.T) { err: newIngestErrSampleTimestampTooOld(timestamp, metricLabelAdapters), msg: `the sample has been rejected because its timestamp is too old (err-mimir-sample-timestamp-too-old). The affected sample has timestamp 1970-01-19T05:30:43.969Z and is from series {__name__="test"}`, }, - "newEphemeralIngestErrSampleTimestampTooOld": { - err: newEphemeralIngestErrSampleTimestampTooOld(timestamp, metricLabelAdapters), - msg: `the sample for ephemeral series has been rejected because its timestamp is too old (err-mimir-ephemeral-sample-timestamp-too-old). The affected sample has timestamp 1970-01-19T05:30:43.969Z and is from series {__name__="test"}`, - }, "newIngestErrSampleTimestampTooOld_out_of_order_enabled": { err: newIngestErrSampleTimestampTooOldOOOEnabled(timestamp, metricLabelAdapters, model.Duration(2*time.Hour)), msg: `the sample has been rejected because another sample with a more recent timestamp has already been ingested and this sample is beyond the out-of-order time window of 2h (err-mimir-sample-timestamp-too-old). The affected sample has timestamp 1970-01-19T05:30:43.969Z and is from series {__name__="test"}`, @@ -6438,18 +6430,10 @@ func TestNewIngestErrMsgs(t *testing.T) { err: newIngestErrSampleOutOfOrder(timestamp, metricLabelAdapters), msg: `the sample has been rejected because another sample with a more recent timestamp has already been ingested and out-of-order samples are not allowed (err-mimir-sample-out-of-order). The affected sample has timestamp 1970-01-19T05:30:43.969Z and is from series {__name__="test"}`, }, - "newEphemeralIngestErrSampleOutOfOrder": { - err: newEphemeralIngestErrSampleOutOfOrder(timestamp, metricLabelAdapters), - msg: `the sample for ephemeral series has been rejected because another sample with a more recent timestamp has already been ingested and out-of-order samples are not allowed (err-mimir-ephemeral-sample-out-of-order). The affected sample has timestamp 1970-01-19T05:30:43.969Z and is from series {__name__="test"}`, - }, "newIngestErrSampleDuplicateTimestamp": { err: newIngestErrSampleDuplicateTimestamp(timestamp, metricLabelAdapters), msg: `the sample has been rejected because another sample with the same timestamp, but a different value, has already been ingested (err-mimir-sample-duplicate-timestamp). The affected sample has timestamp 1970-01-19T05:30:43.969Z and is from series {__name__="test"}`, }, - "newEphemeralIngestErrSampleDuplicateTimestamp": { - err: newEphemeralIngestErrSampleDuplicateTimestamp(timestamp, metricLabelAdapters), - msg: `the sample for ephemeral series has been rejected because another sample with the same timestamp, but a different value, has already been ingested (err-mimir-ephemeral-sample-duplicate-timestamp). The affected sample has timestamp 1970-01-19T05:30:43.969Z and is from series {__name__="test"}`, - }, "newIngestErrExemplarMissingSeries": { err: newIngestErrExemplarMissingSeries(timestamp, metricLabelAdapters, []mimirpb.LabelAdapter{{Name: "traceID", Value: "123"}}), msg: `the exemplar has been rejected because the related series has not been ingested yet (err-mimir-exemplar-series-missing). The affected exemplar is {traceID="123"} with timestamp 1970-01-19T05:30:43.969Z for series {__name__="test"}`, @@ -6463,1072 +6447,6 @@ func TestNewIngestErrMsgs(t *testing.T) { } } -func TestIngester_PushAndQueryEphemeral(t *testing.T) { - metricLabelAdapters := []mimirpb.LabelAdapter{{Name: labels.MetricName, Value: "test"}} - metricLabels := mimirpb.FromLabelAdaptersToLabels(metricLabelAdapters) - metricLabelSet := mimirpb.FromLabelAdaptersToMetric(metricLabelAdapters) - - metricNames := []string{ - "cortex_ingester_ingested_ephemeral_samples_total", - "cortex_ingester_ingested_ephemeral_samples_failures_total", - "cortex_ingester_memory_ephemeral_series", - "cortex_ingester_memory_series", - "cortex_ingester_memory_users", - "cortex_ingester_ephemeral_series_created_total", - "cortex_ingester_ephemeral_series_removed_total", - "cortex_discarded_samples_total", - "cortex_ingester_memory_ephemeral_users", - "cortex_ingester_queries_ephemeral_total", - "cortex_ingester_queried_ephemeral_samples", - "cortex_ingester_queried_ephemeral_series", - } - userID := "test" - - now := time.Now() - - tests := map[string]struct { - reqs []*mimirpb.WriteRequest - additionalMetrics []string - maxEphemeralSeriesLimit int - expectedIngestedEphemeral model.Matrix - expectedIngestedPersistent model.Matrix - expectedErr error - expectedMetrics string - }{ - "should succeed on pushing valid series to ephemeral storage": { - reqs: []*mimirpb.WriteRequest{ - ToWriteRequestEphemeral( - []labels.Labels{metricLabels}, - []mimirpb.Sample{{Value: 1, TimestampMs: now.UnixMilli() - 10}}, - nil, - nil, - mimirpb.API), - - ToWriteRequestEphemeral( - []labels.Labels{metricLabels}, - []mimirpb.Sample{{Value: 2, TimestampMs: now.UnixMilli()}}, - nil, - nil, - mimirpb.API), - }, - maxEphemeralSeriesLimit: 10, - expectedErr: nil, - expectedIngestedEphemeral: model.Matrix{ - &model.SampleStream{Metric: metricLabelSet, Values: []model.SamplePair{{Value: 1, Timestamp: model.Time(now.UnixMilli() - 10)}, {Value: 2, Timestamp: model.Time(now.UnixMilli())}}}, - }, - expectedMetrics: ` - # HELP cortex_ingester_ingested_ephemeral_samples_total The total number of samples ingested per user for ephemeral series. - # TYPE cortex_ingester_ingested_ephemeral_samples_total counter - cortex_ingester_ingested_ephemeral_samples_total{user="test"} 2 - - # HELP cortex_ingester_ingested_ephemeral_samples_failures_total The total number of samples that errored on ingestion per user for ephemeral series. - # TYPE cortex_ingester_ingested_ephemeral_samples_failures_total counter - cortex_ingester_ingested_ephemeral_samples_failures_total{user="test"} 0 - - # HELP cortex_ingester_ephemeral_series_created_total The total number of series in ephemeral storage that were created per user. - # TYPE cortex_ingester_ephemeral_series_created_total counter - cortex_ingester_ephemeral_series_created_total{user="test"} 1 - - # HELP cortex_ingester_ephemeral_series_removed_total The total number of series in ephemeral storage that were removed per user. - # TYPE cortex_ingester_ephemeral_series_removed_total counter - cortex_ingester_ephemeral_series_removed_total{user="test"} 0 - - # HELP cortex_ingester_memory_ephemeral_series The current number of ephemeral series in memory. - # TYPE cortex_ingester_memory_ephemeral_series gauge - cortex_ingester_memory_ephemeral_series 1 - - # HELP cortex_ingester_memory_series The current number of series in memory. - # TYPE cortex_ingester_memory_series gauge - cortex_ingester_memory_series 0 - - # HELP cortex_ingester_memory_users The current number of users in memory. - # TYPE cortex_ingester_memory_users gauge - cortex_ingester_memory_users 1 - - # HELP cortex_ingester_memory_ephemeral_users The current number of users with ephemeral storage in memory. - # TYPE cortex_ingester_memory_ephemeral_users gauge - cortex_ingester_memory_ephemeral_users 1 - - # HELP cortex_ingester_queried_ephemeral_samples The total number of samples from ephemeral storage returned per query. - # TYPE cortex_ingester_queried_ephemeral_samples histogram - cortex_ingester_queried_ephemeral_samples_bucket{le="10"} 1 - cortex_ingester_queried_ephemeral_samples_bucket{le="80"} 1 - cortex_ingester_queried_ephemeral_samples_bucket{le="640"} 1 - cortex_ingester_queried_ephemeral_samples_bucket{le="5120"} 1 - cortex_ingester_queried_ephemeral_samples_bucket{le="40960"} 1 - cortex_ingester_queried_ephemeral_samples_bucket{le="327680"} 1 - cortex_ingester_queried_ephemeral_samples_bucket{le="2.62144e+06"} 1 - cortex_ingester_queried_ephemeral_samples_bucket{le="2.097152e+07"} 1 - cortex_ingester_queried_ephemeral_samples_bucket{le="+Inf"} 1 - cortex_ingester_queried_ephemeral_samples_sum 2 - cortex_ingester_queried_ephemeral_samples_count 1 - - # HELP cortex_ingester_queried_ephemeral_series The total number of ephemeral series returned from queries. - # TYPE cortex_ingester_queried_ephemeral_series histogram - cortex_ingester_queried_ephemeral_series_bucket{le="10"} 1 - cortex_ingester_queried_ephemeral_series_bucket{le="80"} 1 - cortex_ingester_queried_ephemeral_series_bucket{le="640"} 1 - cortex_ingester_queried_ephemeral_series_bucket{le="5120"} 1 - cortex_ingester_queried_ephemeral_series_bucket{le="40960"} 1 - cortex_ingester_queried_ephemeral_series_bucket{le="327680"} 1 - cortex_ingester_queried_ephemeral_series_bucket{le="+Inf"} 1 - cortex_ingester_queried_ephemeral_series_sum 1 - cortex_ingester_queried_ephemeral_series_count 1 - - # HELP cortex_ingester_queries_ephemeral_total The total number of queries the ingester has handled for ephemeral storage. - # TYPE cortex_ingester_queries_ephemeral_total counter - cortex_ingester_queries_ephemeral_total 1 - `, - }, - "old ephemeral samples are discarded": { - reqs: []*mimirpb.WriteRequest{ - ToWriteRequestEphemeral( - []labels.Labels{metricLabels}, - []mimirpb.Sample{{Value: 1, TimestampMs: 100}}, - nil, - nil, - mimirpb.API), - }, - maxEphemeralSeriesLimit: 10, - expectedErr: httpgrpc.Errorf(http.StatusBadRequest, wrapWithUser(newEphemeralIngestErrSampleTimestampTooOld(model.Time(100), mimirpb.FromLabelsToLabelAdapters(metricLabels)), userID).Error()), - expectedIngestedEphemeral: nil, // No returned samples. - expectedMetrics: ` - # HELP cortex_ingester_ingested_ephemeral_samples_total The total number of samples ingested per user for ephemeral series. - # TYPE cortex_ingester_ingested_ephemeral_samples_total counter - cortex_ingester_ingested_ephemeral_samples_total{user="test"} 0 - - # HELP cortex_ingester_ingested_ephemeral_samples_failures_total The total number of samples that errored on ingestion per user for ephemeral series. - # TYPE cortex_ingester_ingested_ephemeral_samples_failures_total counter - cortex_ingester_ingested_ephemeral_samples_failures_total{user="test"} 1 - - # HELP cortex_ingester_ephemeral_series_created_total The total number of series in ephemeral storage that were created per user. - # TYPE cortex_ingester_ephemeral_series_created_total counter - cortex_ingester_ephemeral_series_created_total{user="test"} 0 - - # HELP cortex_ingester_ephemeral_series_removed_total The total number of series in ephemeral storage that were removed per user. - # TYPE cortex_ingester_ephemeral_series_removed_total counter - cortex_ingester_ephemeral_series_removed_total{user="test"} 0 - - # HELP cortex_ingester_memory_ephemeral_series The current number of ephemeral series in memory. - # TYPE cortex_ingester_memory_ephemeral_series gauge - cortex_ingester_memory_ephemeral_series 0 - - # HELP cortex_ingester_memory_series The current number of series in memory. - # TYPE cortex_ingester_memory_series gauge - cortex_ingester_memory_series 0 - - # HELP cortex_ingester_memory_users The current number of users in memory. - # TYPE cortex_ingester_memory_users gauge - cortex_ingester_memory_users 1 - - # HELP cortex_discarded_samples_total The total number of samples that were discarded. - # TYPE cortex_discarded_samples_total counter - cortex_discarded_samples_total{group="",reason="ephemeral-sample-out-of-bounds",user="test"} 1 - - # HELP cortex_ingester_memory_ephemeral_users The current number of users with ephemeral storage in memory. - # TYPE cortex_ingester_memory_ephemeral_users gauge - cortex_ingester_memory_ephemeral_users 1 - - # HELP cortex_ingester_queried_ephemeral_samples The total number of samples from ephemeral storage returned per query. - # TYPE cortex_ingester_queried_ephemeral_samples histogram - cortex_ingester_queried_ephemeral_samples_bucket{le="10"} 1 - cortex_ingester_queried_ephemeral_samples_bucket{le="80"} 1 - cortex_ingester_queried_ephemeral_samples_bucket{le="640"} 1 - cortex_ingester_queried_ephemeral_samples_bucket{le="5120"} 1 - cortex_ingester_queried_ephemeral_samples_bucket{le="40960"} 1 - cortex_ingester_queried_ephemeral_samples_bucket{le="327680"} 1 - cortex_ingester_queried_ephemeral_samples_bucket{le="2.62144e+06"} 1 - cortex_ingester_queried_ephemeral_samples_bucket{le="2.097152e+07"} 1 - cortex_ingester_queried_ephemeral_samples_bucket{le="+Inf"} 1 - cortex_ingester_queried_ephemeral_samples_sum 0 - cortex_ingester_queried_ephemeral_samples_count 1 - - # HELP cortex_ingester_queried_ephemeral_series The total number of ephemeral series returned from queries. - # TYPE cortex_ingester_queried_ephemeral_series histogram - cortex_ingester_queried_ephemeral_series_bucket{le="10"} 1 - cortex_ingester_queried_ephemeral_series_bucket{le="80"} 1 - cortex_ingester_queried_ephemeral_series_bucket{le="640"} 1 - cortex_ingester_queried_ephemeral_series_bucket{le="5120"} 1 - cortex_ingester_queried_ephemeral_series_bucket{le="40960"} 1 - cortex_ingester_queried_ephemeral_series_bucket{le="327680"} 1 - cortex_ingester_queried_ephemeral_series_bucket{le="+Inf"} 1 - cortex_ingester_queried_ephemeral_series_sum 0 - cortex_ingester_queried_ephemeral_series_count 1 - - # HELP cortex_ingester_queries_ephemeral_total The total number of queries the ingester has handled for ephemeral storage. - # TYPE cortex_ingester_queries_ephemeral_total counter - cortex_ingester_queries_ephemeral_total 1 - `, - }, - "should fail on out-of-order samples": { - reqs: []*mimirpb.WriteRequest{ - ToWriteRequestEphemeral( - []labels.Labels{metricLabels}, - []mimirpb.Sample{{Value: 2, TimestampMs: now.UnixMilli()}}, - nil, - nil, - mimirpb.API, - ), - - ToWriteRequestEphemeral( - []labels.Labels{metricLabels}, - []mimirpb.Sample{{Value: 1, TimestampMs: now.UnixMilli() - 10}}, - nil, - nil, - mimirpb.API, - ), - }, - maxEphemeralSeriesLimit: 10, - expectedErr: httpgrpc.Errorf(http.StatusBadRequest, wrapWithUser(newEphemeralIngestErrSampleOutOfOrder(model.Time(now.UnixMilli()-10), mimirpb.FromLabelsToLabelAdapters(metricLabels)), userID).Error()), - expectedIngestedEphemeral: model.Matrix{ - &model.SampleStream{Metric: metricLabelSet, Values: []model.SamplePair{{Value: 2, Timestamp: model.Time(now.UnixMilli())}}}, - }, - expectedMetrics: ` - # HELP cortex_ingester_ingested_ephemeral_samples_total The total number of samples ingested per user for ephemeral series. - # TYPE cortex_ingester_ingested_ephemeral_samples_total counter - cortex_ingester_ingested_ephemeral_samples_total{user="test"} 1 - - # HELP cortex_ingester_ingested_ephemeral_samples_failures_total The total number of samples that errored on ingestion per user for ephemeral series. - # TYPE cortex_ingester_ingested_ephemeral_samples_failures_total counter - cortex_ingester_ingested_ephemeral_samples_failures_total{user="test"} 1 - - # HELP cortex_ingester_ephemeral_series_created_total The total number of series in ephemeral storage that were created per user. - # TYPE cortex_ingester_ephemeral_series_created_total counter - cortex_ingester_ephemeral_series_created_total{user="test"} 1 - - # HELP cortex_ingester_ephemeral_series_removed_total The total number of series in ephemeral storage that were removed per user. - # TYPE cortex_ingester_ephemeral_series_removed_total counter - cortex_ingester_ephemeral_series_removed_total{user="test"} 0 - - # HELP cortex_ingester_memory_ephemeral_series The current number of ephemeral series in memory. - # TYPE cortex_ingester_memory_ephemeral_series gauge - cortex_ingester_memory_ephemeral_series 1 - - # HELP cortex_ingester_memory_series The current number of series in memory. - # TYPE cortex_ingester_memory_series gauge - cortex_ingester_memory_series 0 - - # HELP cortex_ingester_memory_users The current number of users in memory. - # TYPE cortex_ingester_memory_users gauge - cortex_ingester_memory_users 1 - - # HELP cortex_discarded_samples_total The total number of samples that were discarded. - # TYPE cortex_discarded_samples_total counter - cortex_discarded_samples_total{group="",reason="ephemeral-sample-out-of-order",user="test"} 1 - - # HELP cortex_ingester_memory_ephemeral_users The current number of users with ephemeral storage in memory. - # TYPE cortex_ingester_memory_ephemeral_users gauge - cortex_ingester_memory_ephemeral_users 1 - - # HELP cortex_ingester_queried_ephemeral_samples The total number of samples from ephemeral storage returned per query. - # TYPE cortex_ingester_queried_ephemeral_samples histogram - cortex_ingester_queried_ephemeral_samples_bucket{le="10"} 1 - cortex_ingester_queried_ephemeral_samples_bucket{le="80"} 1 - cortex_ingester_queried_ephemeral_samples_bucket{le="640"} 1 - cortex_ingester_queried_ephemeral_samples_bucket{le="5120"} 1 - cortex_ingester_queried_ephemeral_samples_bucket{le="40960"} 1 - cortex_ingester_queried_ephemeral_samples_bucket{le="327680"} 1 - cortex_ingester_queried_ephemeral_samples_bucket{le="2.62144e+06"} 1 - cortex_ingester_queried_ephemeral_samples_bucket{le="2.097152e+07"} 1 - cortex_ingester_queried_ephemeral_samples_bucket{le="+Inf"} 1 - cortex_ingester_queried_ephemeral_samples_sum 1 - cortex_ingester_queried_ephemeral_samples_count 1 - - # HELP cortex_ingester_queried_ephemeral_series The total number of ephemeral series returned from queries. - # TYPE cortex_ingester_queried_ephemeral_series histogram - cortex_ingester_queried_ephemeral_series_bucket{le="10"} 1 - cortex_ingester_queried_ephemeral_series_bucket{le="80"} 1 - cortex_ingester_queried_ephemeral_series_bucket{le="640"} 1 - cortex_ingester_queried_ephemeral_series_bucket{le="5120"} 1 - cortex_ingester_queried_ephemeral_series_bucket{le="40960"} 1 - cortex_ingester_queried_ephemeral_series_bucket{le="327680"} 1 - cortex_ingester_queried_ephemeral_series_bucket{le="+Inf"} 1 - cortex_ingester_queried_ephemeral_series_sum 1 - cortex_ingester_queried_ephemeral_series_count 1 - - # HELP cortex_ingester_queries_ephemeral_total The total number of queries the ingester has handled for ephemeral storage. - # TYPE cortex_ingester_queries_ephemeral_total counter - cortex_ingester_queries_ephemeral_total 1 - `, - }, - "request with mix of ephemeral and persistent series, with some good and some bad samples plus some metadata": { - reqs: []*mimirpb.WriteRequest{ - { - Source: mimirpb.API, - EphemeralTimeseries: []mimirpb.PreallocTimeseries{{ - TimeSeries: &mimirpb.TimeSeries{ - Labels: []mimirpb.LabelAdapter{{Name: labels.MetricName, Value: "eph_metric2"}}, - Samples: []mimirpb.Sample{ - {TimestampMs: now.UnixMilli(), Value: 100}, // Good sample. Next request will contain sample with lower TS. - }, - }, - }}, - }, - - { - Source: mimirpb.API, - Metadata: []*mimirpb.MetricMetadata{ - { - Type: mimirpb.COUNTER, - MetricFamilyName: "per_metric", - Help: "Some help goes here...", - Unit: "light years", - }, - }, - - Timeseries: []mimirpb.PreallocTimeseries{{ - TimeSeries: &mimirpb.TimeSeries{ - Labels: []mimirpb.LabelAdapter{ - {Name: labels.MetricName, Value: "per_metric"}, - }, - Exemplars: []mimirpb.Exemplar{{ - Labels: []mimirpb.LabelAdapter{{Name: "traceID", Value: "123"}}, - TimestampMs: 1000, - Value: 1000, - }}, - Samples: []mimirpb.Sample{ - {TimestampMs: now.UnixMilli(), Value: 100}, - {TimestampMs: now.UnixMilli() + 1000, Value: 200}, - }, - }, - }}, - - EphemeralTimeseries: []mimirpb.PreallocTimeseries{{ - TimeSeries: &mimirpb.TimeSeries{ - Labels: []mimirpb.LabelAdapter{{Name: labels.MetricName, Value: "eph_metric1"}}, - Exemplars: []mimirpb.Exemplar{{ - Labels: []mimirpb.LabelAdapter{{Name: "traceID", Value: "123"}}, - TimestampMs: 1000, - Value: 1000, - }}, - Samples: []mimirpb.Sample{ - {TimestampMs: 100, Value: 100}, // out of bounds, this will be reported as first error - {TimestampMs: now.UnixMilli(), Value: 200}, - {TimestampMs: now.UnixMilli() + 1000, Value: 200}, - {TimestampMs: now.UnixMilli() + 2000, Value: 300}, - }, - }, - }, { - TimeSeries: &mimirpb.TimeSeries{ - Labels: []mimirpb.LabelAdapter{{Name: labels.MetricName, Value: "eph_metric2"}}, - Samples: []mimirpb.Sample{ - {TimestampMs: now.UnixMilli() - 1000, Value: 100}, // out of order (compared to previous request) - {TimestampMs: now.UnixMilli(), Value: 500}, // This sample was sent in previous request, with different value. - {TimestampMs: now.UnixMilli() + 1000, Value: 1000}, - }, - }, - }}, - }}, - maxEphemeralSeriesLimit: 10, - expectedErr: httpgrpc.Errorf(http.StatusBadRequest, wrapWithUser(newEphemeralIngestErrSampleTimestampTooOld(model.Time(100), []mimirpb.LabelAdapter{{Name: labels.MetricName, Value: "eph_metric1"}}), userID).Error()), - expectedIngestedEphemeral: model.Matrix{ - &model.SampleStream{ - Metric: map[model.LabelName]model.LabelValue{labels.MetricName: "eph_metric2"}, - Values: []model.SamplePair{ - {Value: 100, Timestamp: model.Time(now.UnixMilli())}, - {Value: 1000, Timestamp: model.Time(now.UnixMilli() + 1000)}, - }, - }, - &model.SampleStream{ - Metric: map[model.LabelName]model.LabelValue{labels.MetricName: "eph_metric1"}, - Values: []model.SamplePair{ - {Value: 200, Timestamp: model.Time(now.UnixMilli())}, - {Value: 200, Timestamp: model.Time(now.UnixMilli() + 1000)}, - {Value: 300, Timestamp: model.Time(now.UnixMilli() + 2000)}, - }, - }, - }, - expectedIngestedPersistent: model.Matrix{ - &model.SampleStream{ - Metric: map[model.LabelName]model.LabelValue{labels.MetricName: "per_metric"}, - Values: []model.SamplePair{ - {Value: 100, Timestamp: model.Time(now.UnixMilli())}, - {Value: 200, Timestamp: model.Time(now.UnixMilli() + 1000)}, - }, - }, - }, - additionalMetrics: []string{"cortex_ingester_ingested_samples_total", "cortex_ingester_ingested_exemplars_total", "cortex_ingester_ingested_metadata_total"}, - expectedMetrics: ` - # HELP cortex_ingester_ingested_exemplars_total The total number of exemplars ingested. - # TYPE cortex_ingester_ingested_exemplars_total counter - cortex_ingester_ingested_exemplars_total 1 - - # HELP cortex_ingester_ingested_samples_total The total number of samples ingested per user. - # TYPE cortex_ingester_ingested_samples_total counter - cortex_ingester_ingested_samples_total{user="test"} 2 - - # HELP cortex_ingester_memory_series The current number of series in memory. - # TYPE cortex_ingester_memory_series gauge - cortex_ingester_memory_series 1 - - # HELP cortex_ingester_memory_users The current number of users in memory. - # TYPE cortex_ingester_memory_users gauge - cortex_ingester_memory_users 1 - - # HELP cortex_ingester_ingested_metadata_total The total number of metadata ingested. - # TYPE cortex_ingester_ingested_metadata_total counter - cortex_ingester_ingested_metadata_total 1 - - # HELP cortex_ingester_ingested_ephemeral_samples_total The total number of samples ingested per user for ephemeral series. - # TYPE cortex_ingester_ingested_ephemeral_samples_total counter - cortex_ingester_ingested_ephemeral_samples_total{user="test"} 5 - - # HELP cortex_ingester_ingested_ephemeral_samples_failures_total The total number of samples that errored on ingestion per user for ephemeral series. - # TYPE cortex_ingester_ingested_ephemeral_samples_failures_total counter - cortex_ingester_ingested_ephemeral_samples_failures_total{user="test"} 3 - - # HELP cortex_ingester_ephemeral_series_created_total The total number of series in ephemeral storage that were created per user. - # TYPE cortex_ingester_ephemeral_series_created_total counter - cortex_ingester_ephemeral_series_created_total{user="test"} 2 - - # HELP cortex_ingester_ephemeral_series_removed_total The total number of series in ephemeral storage that were removed per user. - # TYPE cortex_ingester_ephemeral_series_removed_total counter - cortex_ingester_ephemeral_series_removed_total{user="test"} 0 - - # HELP cortex_ingester_memory_ephemeral_series The current number of ephemeral series in memory. - # TYPE cortex_ingester_memory_ephemeral_series gauge - cortex_ingester_memory_ephemeral_series 2 - - # HELP cortex_discarded_samples_total The total number of samples that were discarded. - # TYPE cortex_discarded_samples_total counter - cortex_discarded_samples_total{group="",reason="ephemeral-sample-out-of-bounds",user="test"} 1 - cortex_discarded_samples_total{group="",reason="ephemeral-sample-out-of-order",user="test"} 1 - cortex_discarded_samples_total{group="",reason="ephemeral-new-value-for-timestamp",user="test"} 1 - - # HELP cortex_ingester_memory_ephemeral_users The current number of users with ephemeral storage in memory. - # TYPE cortex_ingester_memory_ephemeral_users gauge - cortex_ingester_memory_ephemeral_users 1 - - # HELP cortex_ingester_queried_ephemeral_samples The total number of samples from ephemeral storage returned per query. - # TYPE cortex_ingester_queried_ephemeral_samples histogram - cortex_ingester_queried_ephemeral_samples_bucket{le="10"} 1 - cortex_ingester_queried_ephemeral_samples_bucket{le="80"} 1 - cortex_ingester_queried_ephemeral_samples_bucket{le="640"} 1 - cortex_ingester_queried_ephemeral_samples_bucket{le="5120"} 1 - cortex_ingester_queried_ephemeral_samples_bucket{le="40960"} 1 - cortex_ingester_queried_ephemeral_samples_bucket{le="327680"} 1 - cortex_ingester_queried_ephemeral_samples_bucket{le="2.62144e+06"} 1 - cortex_ingester_queried_ephemeral_samples_bucket{le="2.097152e+07"} 1 - cortex_ingester_queried_ephemeral_samples_bucket{le="+Inf"} 1 - cortex_ingester_queried_ephemeral_samples_sum 5 - cortex_ingester_queried_ephemeral_samples_count 1 - - # HELP cortex_ingester_queried_ephemeral_series The total number of ephemeral series returned from queries. - # TYPE cortex_ingester_queried_ephemeral_series histogram - cortex_ingester_queried_ephemeral_series_bucket{le="10"} 1 - cortex_ingester_queried_ephemeral_series_bucket{le="80"} 1 - cortex_ingester_queried_ephemeral_series_bucket{le="640"} 1 - cortex_ingester_queried_ephemeral_series_bucket{le="5120"} 1 - cortex_ingester_queried_ephemeral_series_bucket{le="40960"} 1 - cortex_ingester_queried_ephemeral_series_bucket{le="327680"} 1 - cortex_ingester_queried_ephemeral_series_bucket{le="+Inf"} 1 - cortex_ingester_queried_ephemeral_series_sum 2 - cortex_ingester_queried_ephemeral_series_count 1 - - # HELP cortex_ingester_queries_ephemeral_total The total number of queries the ingester has handled for ephemeral storage. - # TYPE cortex_ingester_queries_ephemeral_total counter - cortex_ingester_queries_ephemeral_total 1 - `, - }, - "only persistent series -- does not initialize ephemeral storage": { - reqs: []*mimirpb.WriteRequest{ - mimirpb.ToWriteRequest( - []labels.Labels{metricLabels}, - []mimirpb.Sample{{Value: 1, TimestampMs: now.UnixMilli() - 10}}, - nil, - nil, - mimirpb.API), - }, - expectedErr: nil, - expectedIngestedPersistent: model.Matrix{ - &model.SampleStream{ - Metric: metricLabelSet, - Values: []model.SamplePair{ - {Value: 1, Timestamp: model.Time(now.UnixMilli() - 10)}, - }, - }, - }, - expectedMetrics: ` - # HELP cortex_ingester_memory_ephemeral_series The current number of ephemeral series in memory. - # TYPE cortex_ingester_memory_ephemeral_series gauge - cortex_ingester_memory_ephemeral_series 0 - - # HELP cortex_ingester_memory_series The current number of series in memory. - # TYPE cortex_ingester_memory_series gauge - cortex_ingester_memory_series 1 - - # HELP cortex_ingester_memory_users The current number of users in memory. - # TYPE cortex_ingester_memory_users gauge - cortex_ingester_memory_users 1 - - # HELP cortex_ingester_memory_ephemeral_users The current number of users with ephemeral storage in memory. - # TYPE cortex_ingester_memory_ephemeral_users gauge - cortex_ingester_memory_ephemeral_users 0 - - # HELP cortex_ingester_queried_ephemeral_samples The total number of samples from ephemeral storage returned per query. - # TYPE cortex_ingester_queried_ephemeral_samples histogram - cortex_ingester_queried_ephemeral_samples_bucket{le="10"} 1 - cortex_ingester_queried_ephemeral_samples_bucket{le="80"} 1 - cortex_ingester_queried_ephemeral_samples_bucket{le="640"} 1 - cortex_ingester_queried_ephemeral_samples_bucket{le="5120"} 1 - cortex_ingester_queried_ephemeral_samples_bucket{le="40960"} 1 - cortex_ingester_queried_ephemeral_samples_bucket{le="327680"} 1 - cortex_ingester_queried_ephemeral_samples_bucket{le="2.62144e+06"} 1 - cortex_ingester_queried_ephemeral_samples_bucket{le="2.097152e+07"} 1 - cortex_ingester_queried_ephemeral_samples_bucket{le="+Inf"} 1 - cortex_ingester_queried_ephemeral_samples_sum 0 - cortex_ingester_queried_ephemeral_samples_count 1 - - # HELP cortex_ingester_queried_ephemeral_series The total number of ephemeral series returned from queries. - # TYPE cortex_ingester_queried_ephemeral_series histogram - cortex_ingester_queried_ephemeral_series_bucket{le="10"} 1 - cortex_ingester_queried_ephemeral_series_bucket{le="80"} 1 - cortex_ingester_queried_ephemeral_series_bucket{le="640"} 1 - cortex_ingester_queried_ephemeral_series_bucket{le="5120"} 1 - cortex_ingester_queried_ephemeral_series_bucket{le="40960"} 1 - cortex_ingester_queried_ephemeral_series_bucket{le="327680"} 1 - cortex_ingester_queried_ephemeral_series_bucket{le="+Inf"} 1 - cortex_ingester_queried_ephemeral_series_sum 0 - cortex_ingester_queried_ephemeral_series_count 1 - - # HELP cortex_ingester_queries_ephemeral_total The total number of queries the ingester has handled for ephemeral storage. - # TYPE cortex_ingester_queries_ephemeral_total counter - cortex_ingester_queries_ephemeral_total 1 - `, - }, - "ephemeral storage disabled": { - reqs: []*mimirpb.WriteRequest{ - { - Source: mimirpb.API, - EphemeralTimeseries: []mimirpb.PreallocTimeseries{{ - TimeSeries: &mimirpb.TimeSeries{ - Labels: []mimirpb.LabelAdapter{{Name: labels.MetricName, Value: "test"}}, - Samples: []mimirpb.Sample{ - {TimestampMs: now.UnixMilli(), Value: 100}, - {TimestampMs: now.UnixMilli() + 1, Value: 100}, - {TimestampMs: now.UnixMilli() + 2, Value: 100}, - }, - }, - }}, - }, - }, - maxEphemeralSeriesLimit: 0, - expectedErr: httpgrpc.Errorf(http.StatusBadRequest, wrapWithUser(errors.New("ephemeral storage is not enabled for user (err-mimir-ephemeral-storage-not-enabled-for-user)"), userID).Error()), - expectedMetrics: ` - # HELP cortex_ingester_memory_ephemeral_series The current number of ephemeral series in memory. - # TYPE cortex_ingester_memory_ephemeral_series gauge - cortex_ingester_memory_ephemeral_series 0 - - # HELP cortex_ingester_memory_series The current number of series in memory. - # TYPE cortex_ingester_memory_series gauge - cortex_ingester_memory_series 0 - - # HELP cortex_ingester_memory_users The current number of users in memory. - # TYPE cortex_ingester_memory_users gauge - cortex_ingester_memory_users 1 - - # HELP cortex_ingester_memory_ephemeral_users The current number of users with ephemeral storage in memory. - # TYPE cortex_ingester_memory_ephemeral_users gauge - cortex_ingester_memory_ephemeral_users 0 - - # HELP cortex_ingester_queried_ephemeral_samples The total number of samples from ephemeral storage returned per query. - # TYPE cortex_ingester_queried_ephemeral_samples histogram - cortex_ingester_queried_ephemeral_samples_bucket{le="10"} 1 - cortex_ingester_queried_ephemeral_samples_bucket{le="80"} 1 - cortex_ingester_queried_ephemeral_samples_bucket{le="640"} 1 - cortex_ingester_queried_ephemeral_samples_bucket{le="5120"} 1 - cortex_ingester_queried_ephemeral_samples_bucket{le="40960"} 1 - cortex_ingester_queried_ephemeral_samples_bucket{le="327680"} 1 - cortex_ingester_queried_ephemeral_samples_bucket{le="2.62144e+06"} 1 - cortex_ingester_queried_ephemeral_samples_bucket{le="2.097152e+07"} 1 - cortex_ingester_queried_ephemeral_samples_bucket{le="+Inf"} 1 - cortex_ingester_queried_ephemeral_samples_sum 0 - cortex_ingester_queried_ephemeral_samples_count 1 - - # HELP cortex_ingester_queried_ephemeral_series The total number of ephemeral series returned from queries. - # TYPE cortex_ingester_queried_ephemeral_series histogram - cortex_ingester_queried_ephemeral_series_bucket{le="10"} 1 - cortex_ingester_queried_ephemeral_series_bucket{le="80"} 1 - cortex_ingester_queried_ephemeral_series_bucket{le="640"} 1 - cortex_ingester_queried_ephemeral_series_bucket{le="5120"} 1 - cortex_ingester_queried_ephemeral_series_bucket{le="40960"} 1 - cortex_ingester_queried_ephemeral_series_bucket{le="327680"} 1 - cortex_ingester_queried_ephemeral_series_bucket{le="+Inf"} 1 - cortex_ingester_queried_ephemeral_series_sum 0 - cortex_ingester_queried_ephemeral_series_count 1 - - # HELP cortex_ingester_queries_ephemeral_total The total number of queries the ingester has handled for ephemeral storage. - # TYPE cortex_ingester_queries_ephemeral_total counter - cortex_ingester_queries_ephemeral_total 1 - - # HELP cortex_ingester_ingested_ephemeral_samples_failures_total The total number of samples that errored on ingestion per user for ephemeral series. - # TYPE cortex_ingester_ingested_ephemeral_samples_failures_total counter - cortex_ingester_ingested_ephemeral_samples_failures_total{user="test"} 3 - - # HELP cortex_ingester_ingested_ephemeral_samples_total The total number of samples ingested per user for ephemeral series. - # TYPE cortex_ingester_ingested_ephemeral_samples_total counter - cortex_ingester_ingested_ephemeral_samples_total{user="test"} 0 - `, - }, - - "only allow single ephemeral series": { - reqs: []*mimirpb.WriteRequest{ - ToWriteRequestEphemeral( - []labels.Labels{metricLabels}, - []mimirpb.Sample{{Value: 1, TimestampMs: now.UnixMilli()}}, - nil, - nil, - mimirpb.API), - - ToWriteRequestEphemeral( - []labels.Labels{{{Name: labels.MetricName, Value: "second metric"}}}, - []mimirpb.Sample{{Value: 2, TimestampMs: now.UnixMilli()}}, - nil, - nil, - mimirpb.API), - }, - maxEphemeralSeriesLimit: 1, - expectedErr: httpgrpc.Errorf(http.StatusBadRequest, wrapWithUser(errors.New(globalerror.MaxEphemeralSeriesPerUser.MessageWithPerTenantLimitConfig(fmt.Sprintf("per-user ephemeral series limit of %d exceeded", 1), validation.MaxEphemeralSeriesPerUserFlag)), userID).Error()), - expectedIngestedEphemeral: model.Matrix{ - &model.SampleStream{Metric: metricLabelSet, Values: []model.SamplePair{{Value: 1, Timestamp: model.Time(now.UnixMilli())}}}, - }, - expectedMetrics: ` - # HELP cortex_ingester_ingested_ephemeral_samples_total The total number of samples ingested per user for ephemeral series. - # TYPE cortex_ingester_ingested_ephemeral_samples_total counter - cortex_ingester_ingested_ephemeral_samples_total{user="test"} 1 - - # HELP cortex_ingester_ingested_ephemeral_samples_failures_total The total number of samples that errored on ingestion per user for ephemeral series. - # TYPE cortex_ingester_ingested_ephemeral_samples_failures_total counter - cortex_ingester_ingested_ephemeral_samples_failures_total{user="test"} 1 - - # HELP cortex_ingester_ephemeral_series_created_total The total number of series in ephemeral storage that were created per user. - # TYPE cortex_ingester_ephemeral_series_created_total counter - cortex_ingester_ephemeral_series_created_total{user="test"} 1 - - # HELP cortex_ingester_ephemeral_series_removed_total The total number of series in ephemeral storage that were removed per user. - # TYPE cortex_ingester_ephemeral_series_removed_total counter - cortex_ingester_ephemeral_series_removed_total{user="test"} 0 - - # HELP cortex_ingester_memory_ephemeral_series The current number of ephemeral series in memory. - # TYPE cortex_ingester_memory_ephemeral_series gauge - cortex_ingester_memory_ephemeral_series 1 - - # HELP cortex_ingester_memory_series The current number of series in memory. - # TYPE cortex_ingester_memory_series gauge - cortex_ingester_memory_series 0 - - # HELP cortex_ingester_memory_users The current number of users in memory. - # TYPE cortex_ingester_memory_users gauge - cortex_ingester_memory_users 1 - - # HELP cortex_ingester_memory_ephemeral_users The current number of users with ephemeral storage in memory. - # TYPE cortex_ingester_memory_ephemeral_users gauge - cortex_ingester_memory_ephemeral_users 1 - - # HELP cortex_ingester_queried_ephemeral_samples The total number of samples from ephemeral storage returned per query. - # TYPE cortex_ingester_queried_ephemeral_samples histogram - cortex_ingester_queried_ephemeral_samples_bucket{le="10"} 1 - cortex_ingester_queried_ephemeral_samples_bucket{le="80"} 1 - cortex_ingester_queried_ephemeral_samples_bucket{le="640"} 1 - cortex_ingester_queried_ephemeral_samples_bucket{le="5120"} 1 - cortex_ingester_queried_ephemeral_samples_bucket{le="40960"} 1 - cortex_ingester_queried_ephemeral_samples_bucket{le="327680"} 1 - cortex_ingester_queried_ephemeral_samples_bucket{le="2.62144e+06"} 1 - cortex_ingester_queried_ephemeral_samples_bucket{le="2.097152e+07"} 1 - cortex_ingester_queried_ephemeral_samples_bucket{le="+Inf"} 1 - cortex_ingester_queried_ephemeral_samples_sum 1 - cortex_ingester_queried_ephemeral_samples_count 1 - - # HELP cortex_ingester_queried_ephemeral_series The total number of ephemeral series returned from queries. - # TYPE cortex_ingester_queried_ephemeral_series histogram - cortex_ingester_queried_ephemeral_series_bucket{le="10"} 1 - cortex_ingester_queried_ephemeral_series_bucket{le="80"} 1 - cortex_ingester_queried_ephemeral_series_bucket{le="640"} 1 - cortex_ingester_queried_ephemeral_series_bucket{le="5120"} 1 - cortex_ingester_queried_ephemeral_series_bucket{le="40960"} 1 - cortex_ingester_queried_ephemeral_series_bucket{le="327680"} 1 - cortex_ingester_queried_ephemeral_series_bucket{le="+Inf"} 1 - cortex_ingester_queried_ephemeral_series_sum 1 - cortex_ingester_queried_ephemeral_series_count 1 - - # HELP cortex_ingester_queries_ephemeral_total The total number of queries the ingester has handled for ephemeral storage. - # TYPE cortex_ingester_queries_ephemeral_total counter - cortex_ingester_queries_ephemeral_total 1 - - # HELP cortex_discarded_samples_total The total number of samples that were discarded. - # TYPE cortex_discarded_samples_total counter - cortex_discarded_samples_total{group="",reason="ephemeral-per_user_series_limit",user="test"} 1 - `, - }, - } - - for testName, testData := range tests { - t.Run(testName, func(t *testing.T) { - registry := prometheus.NewRegistry() - - // Create a mocked ingester - cfg := defaultIngesterTestConfig(t) - cfg.IngesterRing.ReplicationFactor = 1 - cfg.ActiveSeriesMetricsEnabled = false - limits := defaultLimitsTestConfig() - limits.MaxGlobalExemplarsPerUser = 100 - limits.MaxEphemeralSeriesPerUser = testData.maxEphemeralSeriesLimit - limits.OutOfOrderTimeWindow = model.Duration(time.Minute * 10) - - i, err := prepareIngesterWithBlocksStorageAndLimits(t, cfg, limits, "", registry) - require.NoError(t, err) - require.NoError(t, services.StartAndAwaitRunning(context.Background(), i)) - defer services.StopAndAwaitTerminated(context.Background(), i) //nolint:errcheck - - ctx := user.InjectOrgID(context.Background(), userID) - - // Wait until the ingester is healthy - test.Poll(t, 100*time.Millisecond, 1, func() interface{} { - return i.lifecycler.HealthyInstancesCount() - }) - - // Push timeseries - for idx, req := range testData.reqs { - // Push metrics to the ingester. Override the default cleanup method of mimirpb.ReuseSlice with a no-op one. - _, err := i.PushWithCleanup(ctx, push.NewParsedRequest(req)) - - // We expect no error on any request except the last one - // which may error (and in that case we assert on it) - if idx < len(testData.reqs)-1 { - assert.NoError(t, err) - } else { - assert.Equal(t, testData.expectedErr, err) - } - } - - verifyIngestedSamples := func(t *testing.T, matchers []*client.LabelMatcher, expected model.Matrix) { - s := &stream{ctx: ctx} - err = i.QueryStream(&client.QueryRequest{ - StartTimestampMs: math.MinInt64, - EndTimestampMs: math.MaxInt64, - Matchers: matchers, - }, s) - require.NoError(t, err) - - res, err := chunkcompat.StreamsToMatrix(model.Earliest, model.Latest, s.responses) - require.NoError(t, err) - if len(res) == 0 { - res = nil - } - assert.Equal(t, expected, res) - } - - // Verify ephemeral samples. - verifyIngestedSamples(t, []*client.LabelMatcher{ - {Type: client.REGEX_MATCH, Name: labels.MetricName, Value: ".*"}, - {Type: client.EQUAL, Name: StorageLabelName, Value: EphemeralStorageLabelValue}, - }, testData.expectedIngestedEphemeral) - - // Verify persistent samples. - verifyIngestedSamples(t, []*client.LabelMatcher{ - {Type: client.REGEX_MATCH, Name: labels.MetricName, Value: ".*"}, - }, testData.expectedIngestedPersistent) - - // Verify persistent samples again, this time with storage label. - verifyIngestedSamples(t, []*client.LabelMatcher{ - {Type: client.REGEX_MATCH, Name: labels.MetricName, Value: ".*"}, - {Type: client.EQUAL, Name: StorageLabelName, Value: PersistentStorageLabelValue}, - }, testData.expectedIngestedPersistent) - - // Check tracked Prometheus metrics - err = testutil.GatherAndCompare(registry, strings.NewReader(testData.expectedMetrics), append(metricNames, testData.additionalMetrics...)...) - assert.NoError(t, err) - }) - } -} - -func ToWriteRequestEphemeral(lbls []labels.Labels, samples []mimirpb.Sample, exemplars []*mimirpb.Exemplar, metadata []*mimirpb.MetricMetadata, source mimirpb.WriteRequest_SourceEnum) *mimirpb.WriteRequest { - req := mimirpb.ToWriteRequest(lbls, samples, exemplars, metadata, source) - req.EphemeralTimeseries = req.Timeseries - req.Timeseries = nil - return req -} - -func TestIngesterTruncationOfEphemeralSeries(t *testing.T) { - cfg := defaultIngesterTestConfig(t) - cfg.BlocksStorageConfig.EphemeralTSDB.Retention = 10 * time.Minute - cfg.IngesterRing.ReplicationFactor = 1 // for computing limits. - - limits := defaultLimitsTestConfig() - limits.MaxEphemeralSeriesPerUser = 1 - - reg := prometheus.NewPedanticRegistry() - i, err := prepareIngesterWithBlocksStorageAndLimits(t, cfg, limits, "", reg) - require.NoError(t, err) - - require.NoError(t, services.StartAndAwaitRunning(context.Background(), i)) - t.Cleanup(func() { - _ = services.StopAndAwaitTerminated(context.Background(), i) - }) - - // Wait until it's healthy - test.Poll(t, 1*time.Second, 1, func() interface{} { - return i.lifecycler.HealthyInstancesCount() - }) - - now := time.Now() - - // Push ephemeral series with good timestamps (in last 10 minutes) - ctx := user.InjectOrgID(context.Background(), userID) - oldTS := now.Add(-9 * time.Minute).UnixMilli() - // This is a function, because i.Push() cleans up the passed request, but we want to reuse it. - oldReq := func() *mimirpb.WriteRequest { - return ToWriteRequestEphemeral( - []labels.Labels{{{Name: labels.MetricName, Value: "test"}}}, - []mimirpb.Sample{{Value: float64(100), TimestampMs: oldTS}}, - nil, - nil, - mimirpb.API, - ) - } - { - r := oldReq() - require.NotEmpty(t, r.EphemeralTimeseries[0].Labels[0].Name) - - _, err = i.Push(ctx, r) - require.NoError(t, err) - - // Verify that ephemeral timeseries were cleaned in Push. - require.Empty(t, r.EphemeralTimeseries[0].Labels) - } - - // Query back the series and verify that sample exists. - verifySamples := func(t *testing.T, expected model.Matrix) { - s := &stream{ctx: ctx} - err = i.QueryStream(&client.QueryRequest{ - StartTimestampMs: math.MinInt64, - EndTimestampMs: math.MaxInt64, - Matchers: []*client.LabelMatcher{ - // Query everything from ephemeral storage. - {Type: client.REGEX_MATCH, Name: labels.MetricName, Value: ".*"}, - {Type: client.EQUAL, Name: StorageLabelName, Value: EphemeralStorageLabelValue}, - }, - }, s) - require.NoError(t, err) - - res, err := chunkcompat.StreamsToMatrix(model.Earliest, model.Latest, s.responses) - require.NoError(t, err) - if len(res) == 0 { - res = nil - } - assert.Equal(t, expected, res) - } - - verifySamples(t, model.Matrix{ - &model.SampleStream{ - Metric: map[model.LabelName]model.LabelValue{labels.MetricName: "test"}, - Values: []model.SamplePair{ - {Value: 100, Timestamp: model.Time(now.Add(-9 * time.Minute).UnixMilli())}, - }, - }, - }) - - // This is a function because i.Push cleans up the request, but we want to reuse it. - newReq := func() *mimirpb.WriteRequest { - return ToWriteRequestEphemeral( - []labels.Labels{{{Name: labels.MetricName, Value: "new-metric"}}}, - []mimirpb.Sample{{Value: float64(500), TimestampMs: now.UnixMilli()}}, - nil, - nil, - mimirpb.API, - ) - } - // Pushing new series fails, because of limit of 1. - _, err = i.Push(ctx, newReq()) - require.Equal(t, httpgrpc.Errorf(http.StatusBadRequest, wrapWithUser(i.limiter.FormatError(userID, errMaxEphemeralSeriesPerUserLimitExceeded), userID).Error()), err) - - db := i.getTSDB(userID) - require.NotNil(t, db) - - // Advance time for ephemeral storage - require.Nil(t, db.TruncateEphemeral(now.Add(5*time.Minute))) - - // Old sample is no longer available for querying. - verifySamples(t, nil) - - // Pushing the same request should now fail, because min valid time for ephemeral storage has moved on to (now + 5 minutes - ephemeral series retention = now - 5 minutes) - _, err = i.Push(ctx, oldReq()) - require.Equal(t, httpgrpc.Errorf(http.StatusBadRequest, wrapWithUser(newEphemeralIngestErrSampleTimestampTooOld(model.Time(oldTS), []mimirpb.LabelAdapter{{Name: labels.MetricName, Value: "test"}}), userID).Error()), err) - - // Pushing new series now works. - _, err = i.Push(ctx, newReq()) - require.NoError(t, err) - - verifySamples(t, model.Matrix{ - &model.SampleStream{ - Metric: map[model.LabelName]model.LabelValue{labels.MetricName: "new-metric"}, - Values: []model.SamplePair{ - {Value: 500, Timestamp: model.Time(now.UnixMilli())}, - }, - }, - }) -} - -func TestIngesterCompactionTruncatesEphemeralStorage(t *testing.T) { - cfg := defaultIngesterTestConfig(t) - cfg.BlocksStorageConfig.EphemeralTSDB.Retention = 10 * time.Minute - cfg.IngesterRing.ReplicationFactor = 1 // for computing limits. - cfg.BlocksStorageConfig.TSDB.HeadCompactionInterval = 10 * time.Minute // to avoid running it from test. - - limits := defaultLimitsTestConfig() - limits.MaxEphemeralSeriesPerUser = 1 - - reg := prometheus.NewPedanticRegistry() - i, err := prepareIngesterWithBlocksStorageAndLimits(t, cfg, limits, "", reg) - require.NoError(t, err) - - require.NoError(t, services.StartAndAwaitRunning(context.Background(), i)) - t.Cleanup(func() { - _ = services.StopAndAwaitTerminated(context.Background(), i) - }) - - // Wait until it's healthy - test.Poll(t, 1*time.Second, 1, func() interface{} { - return i.lifecycler.HealthyInstancesCount() - }) - - db, err := i.getOrCreateTSDB(userID, false) - require.NoError(t, err) - require.NotNil(t, db) - - eph, err := db.createEphemeralStorage() - require.NoError(t, err) - require.NotNil(t, eph) - - oldMinT, ok := eph.AppendableMinValidTime() - require.True(t, ok) - - // Wait a bit, just enough to make sure that truncation makes noticeable difference in min valid time. - test.Poll(t, 2*time.Second, true, func() interface{} { - n := time.Now().Add(-cfg.BlocksStorageConfig.EphemeralTSDB.Retention) - return n.UnixMilli() > oldMinT - }) - - // Compact TSDBs, which also truncates ephemeral storage. - i.compactBlocks(context.Background(), false, nil) - - newMinT, ok := eph.AppendableMinValidTime() - require.True(t, ok) - require.Greater(t, newMinT, oldMinT) -} - -func TestIngesterQueryingWithStorageLabelErrorHandling(t *testing.T) { - cfg := defaultIngesterTestConfig(t) - - // Create ingester - reg := prometheus.NewPedanticRegistry() - i, err := prepareIngesterWithBlocksStorage(t, cfg, reg) - require.NoError(t, err) - - require.NoError(t, services.StartAndAwaitRunning(context.Background(), i)) - t.Cleanup(func() { - _ = services.StopAndAwaitTerminated(context.Background(), i) - }) - - // Wait until it's healthy - test.Poll(t, 1*time.Second, 1, func() interface{} { - return i.lifecycler.HealthyInstancesCount() - }) - - ctx := user.InjectOrgID(context.Background(), userID) - - type testCase struct { - matchers []*client.LabelMatcher - expectedErr error - } - - for name, tc := range map[string]testCase{ - "no error on missing storage label": { - matchers: []*client.LabelMatcher{ - {Type: client.REGEX_MATCH, Name: labels.MetricName, Value: ".*"}, - }, - expectedErr: nil, - }, - - "no error on valid ephemeral storage matcher": { - matchers: []*client.LabelMatcher{ - {Type: client.REGEX_MATCH, Name: labels.MetricName, Value: ".*"}, - {Type: client.EQUAL, Name: StorageLabelName, Value: EphemeralStorageLabelValue}, - }, - expectedErr: nil, - }, - - "no error on valid persistent storage matcher": { - matchers: []*client.LabelMatcher{ - {Type: client.REGEX_MATCH, Name: labels.MetricName, Value: ".*"}, - {Type: client.EQUAL, Name: StorageLabelName, Value: PersistentStorageLabelValue}, - }, - expectedErr: nil, - }, - - "error on invalid storage label value": { - matchers: []*client.LabelMatcher{ - {Type: client.REGEX_MATCH, Name: labels.MetricName, Value: ".*"}, - {Type: client.EQUAL, Name: StorageLabelName, Value: "invalid"}, - }, - expectedErr: fmt.Errorf(errInvalidStorageLabelValue, "invalid"), - }, - - "error on invalid matcher type !=": { - matchers: []*client.LabelMatcher{ - {Type: client.NOT_EQUAL, Name: StorageLabelName, Value: PersistentStorageLabelValue}, - }, - expectedErr: errInvalidStorageMatcherType, - }, - - "error on invalid matcher type =~": { - matchers: []*client.LabelMatcher{ - {Type: client.REGEX_MATCH, Name: StorageLabelName, Value: PersistentStorageLabelValue}, - }, - expectedErr: errInvalidStorageMatcherType, - }, - - "error on invalid matcher type !~": { - matchers: []*client.LabelMatcher{ - {Type: client.REGEX_NO_MATCH, Name: StorageLabelName, Value: PersistentStorageLabelValue}, - }, - expectedErr: errInvalidStorageMatcherType, - }, - - "no real matchers is fine": { - matchers: []*client.LabelMatcher{ - {Type: client.EQUAL, Name: StorageLabelName, Value: EphemeralStorageLabelValue}, - }, - expectedErr: nil, - }, - - "multiple storage labels return error": { - matchers: []*client.LabelMatcher{ - {Type: client.EQUAL, Name: StorageLabelName, Value: EphemeralStorageLabelValue}, - {Type: client.EQUAL, Name: StorageLabelName, Value: PersistentStorageLabelValue}, - }, - expectedErr: errMultipleStorageMatchersFound, - }, - - "multiple storage labels return error, even if they are the same": { - matchers: []*client.LabelMatcher{ - {Type: client.EQUAL, Name: StorageLabelName, Value: EphemeralStorageLabelValue}, - {Type: client.EQUAL, Name: StorageLabelName, Value: EphemeralStorageLabelValue}, - }, - expectedErr: errMultipleStorageMatchersFound, - }, - } { - t.Run(name, func(t *testing.T) { - err = i.QueryStream(&client.QueryRequest{ - StartTimestampMs: math.MinInt64, - EndTimestampMs: math.MaxInt64, - Matchers: tc.matchers, - }, &stream{ctx: ctx}) - require.Equal(t, tc.expectedErr, err) - }) - } -} - func TestIngesterCanEnableIngestAndQueryNativeHistograms(t *testing.T) { expectedSampleHistogram := mimirpb.FromMimirSampleToPromHistogram(mimirpb.FromFloatHistogramToSampleHistogram(tsdb.GenerateTestFloatHistogram(0))) diff --git a/pkg/ingester/instance_limits.go b/pkg/ingester/instance_limits.go index e8a1917802b..8e298ffd87d 100644 --- a/pkg/ingester/instance_limits.go +++ b/pkg/ingester/instance_limits.go @@ -15,37 +15,33 @@ import ( ) const ( - maxIngestionRateFlag = "ingester.instance-limits.max-ingestion-rate" - maxInMemoryTenantsFlag = "ingester.instance-limits.max-tenants" - maxInMemorySeriesFlag = "ingester.instance-limits.max-series" - maxInMemoryEphemeralSeriesFlag = "ingester.instance-limits.max-ephemeral-series" - maxInflightPushRequestsFlag = "ingester.instance-limits.max-inflight-push-requests" + maxIngestionRateFlag = "ingester.instance-limits.max-ingestion-rate" + maxInMemoryTenantsFlag = "ingester.instance-limits.max-tenants" + maxInMemorySeriesFlag = "ingester.instance-limits.max-series" + maxInflightPushRequestsFlag = "ingester.instance-limits.max-inflight-push-requests" ) var ( // We don't include values in the message to avoid leaking Mimir cluster configuration to users. - errMaxIngestionRateReached = errors.New(globalerror.IngesterMaxIngestionRate.MessageWithPerInstanceLimitConfig("the write request has been rejected because the ingester exceeded the samples ingestion rate limit", maxIngestionRateFlag)) - errMaxTenantsReached = errors.New(globalerror.IngesterMaxTenants.MessageWithPerInstanceLimitConfig("the write request has been rejected because the ingester exceeded the allowed number of tenants", maxInMemoryTenantsFlag)) - errMaxInMemorySeriesReached = errors.New(globalerror.IngesterMaxInMemorySeries.MessageWithPerInstanceLimitConfig("the write request has been rejected because the ingester exceeded the allowed number of in-memory series", maxInMemorySeriesFlag)) - errMaxInMemoryEphemeralSeriesReached = errors.New(globalerror.IngesterMaxInMemoryEphemeralSeries.MessageWithPerInstanceLimitConfig("the write request has been rejected because the ingester exceeded the allowed number of ephemeral in-memory series", maxInMemoryEphemeralSeriesFlag)) - errMaxInflightRequestsReached = errors.New(globalerror.IngesterMaxInflightPushRequests.MessageWithPerInstanceLimitConfig("the write request has been rejected because the ingester exceeded the allowed number of inflight push requests", maxInflightPushRequestsFlag)) + errMaxIngestionRateReached = errors.New(globalerror.IngesterMaxIngestionRate.MessageWithPerInstanceLimitConfig("the write request has been rejected because the ingester exceeded the samples ingestion rate limit", maxIngestionRateFlag)) + errMaxTenantsReached = errors.New(globalerror.IngesterMaxTenants.MessageWithPerInstanceLimitConfig("the write request has been rejected because the ingester exceeded the allowed number of tenants", maxInMemoryTenantsFlag)) + errMaxInMemorySeriesReached = errors.New(globalerror.IngesterMaxInMemorySeries.MessageWithPerInstanceLimitConfig("the write request has been rejected because the ingester exceeded the allowed number of in-memory series", maxInMemorySeriesFlag)) + errMaxInflightRequestsReached = errors.New(globalerror.IngesterMaxInflightPushRequests.MessageWithPerInstanceLimitConfig("the write request has been rejected because the ingester exceeded the allowed number of inflight push requests", maxInflightPushRequestsFlag)) ) // InstanceLimits describes limits used by ingester. Reaching any of these will result in Push method to return // (internal) error. type InstanceLimits struct { - MaxIngestionRate float64 `yaml:"max_ingestion_rate" category:"advanced"` - MaxInMemoryTenants int64 `yaml:"max_tenants" category:"advanced"` - MaxInMemorySeries int64 `yaml:"max_series" category:"advanced"` - MaxInMemoryEphemeralSeries int64 `yaml:"max_ephemeral_series" category:"experimental"` - MaxInflightPushRequests int64 `yaml:"max_inflight_push_requests" category:"advanced"` + MaxIngestionRate float64 `yaml:"max_ingestion_rate" category:"advanced"` + MaxInMemoryTenants int64 `yaml:"max_tenants" category:"advanced"` + MaxInMemorySeries int64 `yaml:"max_series" category:"advanced"` + MaxInflightPushRequests int64 `yaml:"max_inflight_push_requests" category:"advanced"` } func (l *InstanceLimits) RegisterFlags(f *flag.FlagSet) { f.Float64Var(&l.MaxIngestionRate, maxIngestionRateFlag, 0, "Max ingestion rate (samples/sec) that ingester will accept. This limit is per-ingester, not per-tenant. Additional push requests will be rejected. Current ingestion rate is computed as exponentially weighted moving average, updated every second. 0 = unlimited.") f.Int64Var(&l.MaxInMemoryTenants, maxInMemoryTenantsFlag, 0, "Max tenants that this ingester can hold. Requests from additional tenants will be rejected. 0 = unlimited.") f.Int64Var(&l.MaxInMemorySeries, maxInMemorySeriesFlag, 0, "Max series that this ingester can hold (across all tenants). Requests to create additional series will be rejected. 0 = unlimited.") - f.Int64Var(&l.MaxInMemoryEphemeralSeries, maxInMemoryEphemeralSeriesFlag, 0, "Max ephemeral series that this ingester can hold (across all tenants). Requests to create additional ephemeral series will be rejected. 0 = unlimited.") f.Int64Var(&l.MaxInflightPushRequests, maxInflightPushRequestsFlag, 30000, "Max inflight push requests that this ingester can handle (across all tenants). Additional requests will be rejected. 0 = unlimited.") } diff --git a/pkg/ingester/limiter.go b/pkg/ingester/limiter.go index 4c85c68e98f..5d6e0967fca 100644 --- a/pkg/ingester/limiter.go +++ b/pkg/ingester/limiter.go @@ -19,11 +19,10 @@ import ( var ( // These errors are only internal, to change the API error messages, see Limiter's methods below. - errMaxSeriesPerMetricLimitExceeded = errors.New("per-metric series limit exceeded") - errMaxMetadataPerMetricLimitExceeded = errors.New("per-metric metadata limit exceeded") - errMaxSeriesPerUserLimitExceeded = errors.New("per-user series limit exceeded") - errMaxEphemeralSeriesPerUserLimitExceeded = errors.New("per-user ephemeral series limit exceeded") - errMaxMetadataPerUserLimitExceeded = errors.New("per-user metric metadata limit exceeded") + errMaxSeriesPerMetricLimitExceeded = errors.New("per-metric series limit exceeded") + errMaxMetadataPerMetricLimitExceeded = errors.New("per-metric metadata limit exceeded") + errMaxSeriesPerUserLimitExceeded = errors.New("per-user series limit exceeded") + errMaxMetadataPerUserLimitExceeded = errors.New("per-user metric metadata limit exceeded") ) // RingCount is the interface exposed by a ring implementation which allows @@ -87,16 +86,6 @@ func (l *Limiter) AssertMaxSeriesPerUser(userID string, series int) error { return errMaxSeriesPerUserLimitExceeded } -// AssertMaxEphemeralSeriesPerUser limit has not been reached compared to the current -// number of series in input and returns an error if so. -func (l *Limiter) AssertMaxEphemeralSeriesPerUser(userID string, series int) error { - if actualLimit := l.maxEphemeralSeriesPerUser(userID); series < actualLimit { - return nil - } - - return errMaxEphemeralSeriesPerUserLimitExceeded -} - // AssertMaxMetricsWithMetadataPerUser limit has not been reached compared to the current // number of metrics with metadata in input and returns an error if so. func (l *Limiter) AssertMaxMetricsWithMetadataPerUser(userID string, metrics int) error { @@ -118,8 +107,6 @@ func (l *Limiter) FormatError(userID string, err error) error { return l.formatMaxSeriesPerMetricError(userID) case errMaxMetadataPerUserLimitExceeded: return l.formatMaxMetadataPerUserError(userID) - case errMaxEphemeralSeriesPerUserLimitExceeded: - return l.formatMaxEphemeralSeriesPerUserError(userID) case errMaxMetadataPerMetricLimitExceeded: return l.formatMaxMetadataPerMetricError(userID) default: @@ -145,15 +132,6 @@ func (l *Limiter) formatMaxSeriesPerMetricError(userID string) error { )) } -func (l *Limiter) formatMaxEphemeralSeriesPerUserError(userID string) error { - globalLimit := l.limits.MaxEphemeralSeriesPerUser(userID) - - return errors.New(globalerror.MaxEphemeralSeriesPerUser.MessageWithPerTenantLimitConfig( - fmt.Sprintf("per-user ephemeral series limit of %d exceeded", globalLimit), - validation.MaxEphemeralSeriesPerUserFlag, - )) -} - func (l *Limiter) formatMaxMetadataPerUserError(userID string) error { globalLimit := l.limits.MaxGlobalMetricsWithMetadataPerUser(userID) @@ -184,10 +162,6 @@ func (l *Limiter) maxSeriesPerUser(userID string) int { return l.convertGlobalToLocalLimitOrUnlimited(userID, l.limits.MaxGlobalSeriesPerUser) } -func (l *Limiter) maxEphemeralSeriesPerUser(userID string) int { - return l.convertGlobalToLocalLimitOrUnlimited(userID, l.limits.MaxEphemeralSeriesPerUser) -} - func (l *Limiter) maxMetadataPerUser(userID string) int { return l.convertGlobalToLocalLimitOrUnlimited(userID, l.limits.MaxGlobalMetricsWithMetadataPerUser) } diff --git a/pkg/ingester/metrics.go b/pkg/ingester/metrics.go index 95b67aef86b..3342553b104 100644 --- a/pkg/ingester/metrics.go +++ b/pkg/ingester/metrics.go @@ -24,21 +24,13 @@ type ingesterMetrics struct { ingestedExemplarsFail prometheus.Counter ingestedMetadataFail prometheus.Counter - ephemeralIngestedSamples *prometheus.CounterVec - ephemeralIngestedSamplesFail *prometheus.CounterVec - queries prometheus.Counter queriedSamples prometheus.Histogram queriedExemplars prometheus.Histogram queriedSeries prometheus.Histogram - ephemeralQueries prometheus.Counter - ephemeralQueriedSamples prometheus.Histogram - ephemeralQueriedSeries prometheus.Histogram - memMetadata prometheus.Gauge memUsers prometheus.Gauge - memEphemeralUsers prometheus.Gauge memMetadataCreatedTotal *prometheus.CounterVec memMetadataRemovedTotal *prometheus.CounterVec @@ -49,7 +41,6 @@ type ingesterMetrics struct { // Global limit metrics maxUsersGauge prometheus.GaugeFunc maxSeriesGauge prometheus.GaugeFunc - maxEphemeralSeriesGauge prometheus.GaugeFunc maxIngestionRate prometheus.GaugeFunc ingestionRate prometheus.GaugeFunc maxInflightPushRequests prometheus.GaugeFunc @@ -63,8 +54,7 @@ type ingesterMetrics struct { appenderCommitDuration prometheus.Histogram idleTsdbChecks *prometheus.CounterVec - discardedPersistent *discardedMetrics - discardedEphemeral *discardedMetrics + discarded *discardedMetrics // Discarded metadata discardedMetadataPerUserMetadataLimit *prometheus.CounterVec @@ -131,14 +121,6 @@ func newIngesterMetrics( Name: "cortex_ingester_ingested_metadata_failures_total", Help: "The total number of metadata that errored on ingestion.", }), - ephemeralIngestedSamples: promauto.With(r).NewCounterVec(prometheus.CounterOpts{ - Name: "cortex_ingester_ingested_ephemeral_samples_total", - Help: "The total number of samples ingested per user for ephemeral series.", - }, []string{"user"}), - ephemeralIngestedSamplesFail: promauto.With(r).NewCounterVec(prometheus.CounterOpts{ - Name: "cortex_ingester_ingested_ephemeral_samples_failures_total", - Help: "The total number of samples that errored on ingestion per user for ephemeral series.", - }, []string{"user"}), queries: promauto.With(r).NewCounter(prometheus.CounterOpts{ Name: "cortex_ingester_queries_total", Help: "The total number of queries the ingester has handled.", @@ -161,20 +143,6 @@ func newIngesterMetrics( // A reasonable upper bound is around 100k - 10*(8^(6-1)) = 327k. Buckets: prometheus.ExponentialBuckets(10, 8, 6), }), - ephemeralQueries: promauto.With(r).NewCounter(prometheus.CounterOpts{ - Name: "cortex_ingester_queries_ephemeral_total", - Help: "The total number of queries the ingester has handled for ephemeral storage.", - }), - ephemeralQueriedSamples: promauto.With(r).NewHistogram(prometheus.HistogramOpts{ - Name: "cortex_ingester_queried_ephemeral_samples", - Help: "The total number of samples from ephemeral storage returned per query.", - Buckets: prometheus.ExponentialBuckets(10, 8, 8), - }), - ephemeralQueriedSeries: promauto.With(r).NewHistogram(prometheus.HistogramOpts{ - Name: "cortex_ingester_queried_ephemeral_series", - Help: "The total number of ephemeral series returned from queries.", - Buckets: prometheus.ExponentialBuckets(10, 8, 6), - }), memMetadata: promauto.With(r).NewGauge(prometheus.GaugeOpts{ Name: "cortex_ingester_memory_metadata", Help: "The current number of metadata in memory.", @@ -183,10 +151,6 @@ func newIngesterMetrics( Name: "cortex_ingester_memory_users", Help: "The current number of users in memory.", }), - memEphemeralUsers: promauto.With(r).NewGauge(prometheus.GaugeOpts{ - Name: "cortex_ingester_memory_ephemeral_users", - Help: "The current number of users with ephemeral storage in memory.", - }), memMetadataCreatedTotal: promauto.With(r).NewCounterVec(prometheus.CounterOpts{ Name: "cortex_ingester_memory_metadata_created_total", Help: "The total number of metadata that were created per user", @@ -218,17 +182,6 @@ func newIngesterMetrics( return 0 }), - maxEphemeralSeriesGauge: promauto.With(r).NewGaugeFunc(prometheus.GaugeOpts{ - Name: instanceLimits, - Help: instanceLimitsHelp, - ConstLabels: map[string]string{limitLabel: "max_ephemeral_series"}, - }, func() float64 { - if g := instanceLimitsFn(); g != nil { - return float64(g.MaxInMemoryEphemeralSeries) - } - return 0 - }), - maxIngestionRate: promauto.With(r).NewGaugeFunc(prometheus.GaugeOpts{ Name: instanceLimits, Help: instanceLimitsHelp, @@ -310,14 +263,13 @@ func newIngesterMetrics( }), appenderCommitDuration: promauto.With(r).NewHistogram(prometheus.HistogramOpts{ Name: "cortex_ingester_tsdb_appender_commit_duration_seconds", - Help: "The total time it takes for a push request to commit samples appended to TSDB (both persistent and ephemeral).", + Help: "The total time it takes for a push request to commit samples appended to TSDB.", Buckets: []float64{.001, .005, .01, .025, .05, .1, .25, .5, 1, 2.5, 5, 10}, }), idleTsdbChecks: idleTsdbChecks, - discardedPersistent: newDiscardedMetrics(r, ""), - discardedEphemeral: newDiscardedMetrics(r, ephemeralDiscardPrefix), + discarded: newDiscardedMetrics(r), discardedMetadataPerUserMetadataLimit: validation.DiscardedMetadataCounter(r, perUserMetadataLimit), discardedMetadataPerMetricMetadataLimit: validation.DiscardedMetadataCounter(r, perMetricMetadataLimit), @@ -332,20 +284,15 @@ func (m *ingesterMetrics) deletePerUserMetrics(userID string) { m.memMetadataCreatedTotal.DeleteLabelValues(userID) m.memMetadataRemovedTotal.DeleteLabelValues(userID) - m.ephemeralIngestedSamples.DeleteLabelValues(userID) - m.ephemeralIngestedSamplesFail.DeleteLabelValues(userID) - filter := prometheus.Labels{"user": userID} - m.discardedPersistent.DeletePartialMatch(filter) - m.discardedEphemeral.DeletePartialMatch(filter) + m.discarded.DeletePartialMatch(filter) m.discardedMetadataPerUserMetadataLimit.DeleteLabelValues(userID) m.discardedMetadataPerMetricMetadataLimit.DeleteLabelValues(userID) } func (m *ingesterMetrics) deletePerGroupMetricsForUser(userID, group string) { - m.discardedPersistent.DeleteLabelValues(userID, group) - m.discardedEphemeral.DeleteLabelValues(userID, group) + m.discarded.DeleteLabelValues(userID, group) } func (m *ingesterMetrics) deletePerUserCustomTrackerMetrics(userID string, customTrackerMetrics []string) { @@ -365,14 +312,14 @@ type discardedMetrics struct { perMetricSeriesLimit *prometheus.CounterVec } -func newDiscardedMetrics(r prometheus.Registerer, prefix string) *discardedMetrics { +func newDiscardedMetrics(r prometheus.Registerer) *discardedMetrics { return &discardedMetrics{ - sampleOutOfBounds: validation.DiscardedSamplesCounter(r, prefix+sampleOutOfBounds), - sampleOutOfOrder: validation.DiscardedSamplesCounter(r, prefix+sampleOutOfOrder), - sampleTooOld: validation.DiscardedSamplesCounter(r, prefix+sampleTooOld), - newValueForTimestamp: validation.DiscardedSamplesCounter(r, prefix+newValueForTimestamp), - perUserSeriesLimit: validation.DiscardedSamplesCounter(r, prefix+perUserSeriesLimit), - perMetricSeriesLimit: validation.DiscardedSamplesCounter(r, prefix+perMetricSeriesLimit), + sampleOutOfBounds: validation.DiscardedSamplesCounter(r, sampleOutOfBounds), + sampleOutOfOrder: validation.DiscardedSamplesCounter(r, sampleOutOfOrder), + sampleTooOld: validation.DiscardedSamplesCounter(r, sampleTooOld), + newValueForTimestamp: validation.DiscardedSamplesCounter(r, newValueForTimestamp), + perUserSeriesLimit: validation.DiscardedSamplesCounter(r, perUserSeriesLimit), + perMetricSeriesLimit: validation.DiscardedSamplesCounter(r, perMetricSeriesLimit), } } @@ -449,13 +396,6 @@ type tsdbMetrics struct { memSeriesCreatedTotal *prometheus.Desc memSeriesRemovedTotal *prometheus.Desc - ephemeralHeadTruncateFail *prometheus.Desc - ephemeralHeadTruncateTotal *prometheus.Desc - ephemeralHeadGcDuration *prometheus.Desc - - ephemeralSeriesCreatedTotal *prometheus.Desc - ephemeralSeriesRemovedTotal *prometheus.Desc - regs *dskit_metrics.TenantRegistries } @@ -643,28 +583,6 @@ func newTSDBMetrics(r prometheus.Registerer, logger log.Logger) *tsdbMetrics { "cortex_ingester_memory_series_removed_total", "The total number of series that were removed per user.", []string{"user"}, nil), - - ephemeralHeadTruncateFail: prometheus.NewDesc( - "cortex_ingester_ephemeral_head_truncations_failed_total", - "Total number of TSDB head truncations that failed for ephemeral storage.", - nil, nil), - ephemeralHeadTruncateTotal: prometheus.NewDesc( - "cortex_ingester_ephemeral_head_truncations_total", - "Total number of TSDB head truncations attempted for ephemeral storage.", - nil, nil), - ephemeralHeadGcDuration: prometheus.NewDesc( - "cortex_ingester_ephemeral_head_gc_duration_seconds", - "Runtime of garbage collection in the TSDB head for ephemeral storage.", - nil, nil), - - ephemeralSeriesCreatedTotal: prometheus.NewDesc( - "cortex_ingester_ephemeral_series_created_total", - "The total number of series in ephemeral storage that were created per user.", - []string{"user"}, nil), - ephemeralSeriesRemovedTotal: prometheus.NewDesc( - "cortex_ingester_ephemeral_series_removed_total", - "The total number of series in ephemeral storage that were removed per user.", - []string{"user"}, nil), } if r != nil { @@ -721,12 +639,6 @@ func (sm *tsdbMetrics) Describe(out chan<- *prometheus.Desc) { out <- sm.memSeriesCreatedTotal out <- sm.memSeriesRemovedTotal - - out <- sm.ephemeralHeadTruncateFail - out <- sm.ephemeralHeadTruncateTotal - out <- sm.ephemeralHeadGcDuration - out <- sm.ephemeralSeriesCreatedTotal - out <- sm.ephemeralSeriesRemovedTotal } func (sm *tsdbMetrics) Collect(out chan<- prometheus.Metric) { @@ -779,12 +691,6 @@ func (sm *tsdbMetrics) Collect(out chan<- prometheus.Metric) { data.SendSumOfCountersPerTenant(out, sm.memSeriesCreatedTotal, "prometheus_tsdb_head_series_created_total") data.SendSumOfCountersPerTenant(out, sm.memSeriesRemovedTotal, "prometheus_tsdb_head_series_removed_total") - - data.SendSumOfCounters(out, sm.ephemeralHeadTruncateFail, ephemeralPrometheusMetricsPrefix+"prometheus_tsdb_head_truncations_failed_total") - data.SendSumOfCounters(out, sm.ephemeralHeadTruncateTotal, ephemeralPrometheusMetricsPrefix+"prometheus_tsdb_head_truncations_total") - data.SendSumOfSummaries(out, sm.ephemeralHeadGcDuration, ephemeralPrometheusMetricsPrefix+"prometheus_tsdb_head_gc_duration_seconds") - data.SendSumOfCountersPerTenant(out, sm.ephemeralSeriesCreatedTotal, ephemeralPrometheusMetricsPrefix+"prometheus_tsdb_head_series_created_total") - data.SendSumOfCountersPerTenant(out, sm.ephemeralSeriesRemovedTotal, ephemeralPrometheusMetricsPrefix+"prometheus_tsdb_head_series_removed_total") } func (sm *tsdbMetrics) setRegistryForUser(userID string, registry *prometheus.Registry) { diff --git a/pkg/ingester/metrics_test.go b/pkg/ingester/metrics_test.go index 98b6e646b5c..eb6ee3b494d 100644 --- a/pkg/ingester/metrics_test.go +++ b/pkg/ingester/metrics_test.go @@ -127,31 +127,6 @@ func TestTSDBMetrics(t *testing.T) { # TYPE cortex_ingester_tsdb_checkpoint_creations_total counter cortex_ingester_tsdb_checkpoint_creations_total 1883489 - # HELP cortex_ingester_ephemeral_head_gc_duration_seconds Runtime of garbage collection in the TSDB head for ephemeral storage. - # TYPE cortex_ingester_ephemeral_head_gc_duration_seconds summary - cortex_ingester_ephemeral_head_gc_duration_seconds_sum 5.154812e+06 - cortex_ingester_ephemeral_head_gc_duration_seconds_count 3 - - # HELP cortex_ingester_ephemeral_head_truncations_failed_total Total number of TSDB head truncations that failed for ephemeral storage. - # TYPE cortex_ingester_ephemeral_head_truncations_failed_total counter - cortex_ingester_ephemeral_head_truncations_failed_total 4.95655e+06 - - # HELP cortex_ingester_ephemeral_head_truncations_total Total number of TSDB head truncations attempted for ephemeral storage. - # TYPE cortex_ingester_ephemeral_head_truncations_total counter - cortex_ingester_ephemeral_head_truncations_total 5.055681e+06 - - # HELP cortex_ingester_ephemeral_series_created_total The total number of series in ephemeral storage that were created per user. - # TYPE cortex_ingester_ephemeral_series_created_total counter - cortex_ingester_ephemeral_series_created_total{user="user1"} 654285 - cortex_ingester_ephemeral_series_created_total{user="user2"} 4.546711e+06 - cortex_ingester_ephemeral_series_created_total{user="user3"} 52947 - - # HELP cortex_ingester_ephemeral_series_removed_total The total number of series in ephemeral storage that were removed per user. - # TYPE cortex_ingester_ephemeral_series_removed_total counter - cortex_ingester_ephemeral_series_removed_total{user="user1"} 666630 - cortex_ingester_ephemeral_series_removed_total{user="user2"} 4.632498e+06 - cortex_ingester_ephemeral_series_removed_total{user="user3"} 53946 - # HELP cortex_ingester_memory_series_created_total The total number of series that were created per user. # TYPE cortex_ingester_memory_series_created_total counter # 5 * (12345, 85787 and 999 respectively) @@ -506,29 +481,6 @@ func TestTSDBMetricsWithRemoval(t *testing.T) { # TYPE cortex_ingester_tsdb_out_of_order_samples_appended_total counter cortex_ingester_tsdb_out_of_order_samples_appended_total{user="user1"} 3 cortex_ingester_tsdb_out_of_order_samples_appended_total{user="user2"} 3 - - # HELP cortex_ingester_ephemeral_head_gc_duration_seconds Runtime of garbage collection in the TSDB head for ephemeral storage. - # TYPE cortex_ingester_ephemeral_head_gc_duration_seconds summary - cortex_ingester_ephemeral_head_gc_duration_seconds_sum 5.154812e+06 - cortex_ingester_ephemeral_head_gc_duration_seconds_count 3 - - # HELP cortex_ingester_ephemeral_head_truncations_failed_total Total number of TSDB head truncations that failed for ephemeral storage. - # TYPE cortex_ingester_ephemeral_head_truncations_failed_total counter - cortex_ingester_ephemeral_head_truncations_failed_total 4.95655e+06 - - # HELP cortex_ingester_ephemeral_head_truncations_total Total number of TSDB head truncations attempted for ephemeral storage. - # TYPE cortex_ingester_ephemeral_head_truncations_total counter - cortex_ingester_ephemeral_head_truncations_total 5.055681e+06 - - # HELP cortex_ingester_ephemeral_series_created_total The total number of series in ephemeral storage that were created per user. - # TYPE cortex_ingester_ephemeral_series_created_total counter - cortex_ingester_ephemeral_series_created_total{user="user1"} 654285 - cortex_ingester_ephemeral_series_created_total{user="user2"} 4.546711e+06 - - # HELP cortex_ingester_ephemeral_series_removed_total The total number of series in ephemeral storage that were removed per user. - # TYPE cortex_ingester_ephemeral_series_removed_total counter - cortex_ingester_ephemeral_series_removed_total{user="user1"} 666630 - cortex_ingester_ephemeral_series_removed_total{user="user2"} 4.632498e+06 `)) require.NoError(t, err) } @@ -801,33 +753,5 @@ func populateTSDBMetrics(base float64) *prometheus.Registry { }) outOfOrderSamplesAppendedTotal.Add(3) - ephHeadTruncateFail := promauto.With(r).NewCounter(prometheus.CounterOpts{ - Name: ephemeralPrometheusMetricsPrefix + "prometheus_tsdb_head_truncations_failed_total", - Help: "Total number of head truncations that failed.", - }) - ephHeadTruncateFail.Add(50 * base) - - ephHeadTruncateTotal := promauto.With(r).NewCounter(prometheus.CounterOpts{ - Name: ephemeralPrometheusMetricsPrefix + "prometheus_tsdb_head_truncations_total", - Help: "Total number of head truncations attempted.", - }) - ephHeadTruncateTotal.Add(51 * base) - - ephGcDuration := promauto.With(r).NewSummary(prometheus.SummaryOpts{ - Name: ephemeralPrometheusMetricsPrefix + "prometheus_tsdb_head_gc_duration_seconds", - Help: "Runtime of garbage collection in the head block.", - }) - ephGcDuration.Observe(52 * base) - - ephSeriesCreated := promauto.With(r).NewCounter(prometheus.CounterOpts{ - Name: ephemeralPrometheusMetricsPrefix + "prometheus_tsdb_head_series_created_total", - }) - ephSeriesCreated.Add(53 * base) - - ephSeriesRemoved := promauto.With(r).NewCounter(prometheus.CounterOpts{ - Name: ephemeralPrometheusMetricsPrefix + "prometheus_tsdb_head_series_removed_total", - }) - ephSeriesRemoved.Add(54 * base) - return r } diff --git a/pkg/ingester/user_tsdb.go b/pkg/ingester/user_tsdb.go index 9f5ade2e7ab..8c6c80c1303 100644 --- a/pkg/ingester/user_tsdb.go +++ b/pkg/ingester/user_tsdb.go @@ -11,7 +11,6 @@ import ( "sync" "time" - "github.com/grafana/dskit/multierror" "github.com/oklog/ulid" "github.com/pkg/errors" "github.com/prometheus/prometheus/model/labels" @@ -79,16 +78,8 @@ type userTSDB struct { seriesInMetric *metricCounter limiter *Limiter - // Function that creates ephemeral storage (*tsdb.Head) for the user. - ephemeralFactory func() (*tsdb.Head, error) - ephemeralSeriesRetentionPeriod time.Duration - - ephemeralMtx sync.RWMutex - ephemeralStorage *tsdb.Head - - instanceSeriesCount *atomic.Int64 // Shared across all userTSDB instances created by ingester. - instanceEphemeralSeriesCount *atomic.Int64 // Shared across all userTSDB instances created by ingester. - instanceLimitsFn func() *InstanceLimits + instanceSeriesCount *atomic.Int64 // Shared across all userTSDB instances created by ingester. + instanceLimitsFn func() *InstanceLimits stateMtx sync.RWMutex state tsdbState @@ -125,82 +116,16 @@ func (u *userTSDB) Appender(ctx context.Context) storage.Appender { return u.db.Appender(ctx) } -func (u *userTSDB) EphemeralAppender(ctx context.Context) (storage.Appender, error) { - es := u.getEphemeralStorage() - if es != nil { - return es.Appender(ctx), nil - } - - es, err := u.createEphemeralStorage() - if err != nil { - return nil, err - } - - return es.Appender(ctx), nil -} - -func (u *userTSDB) createEphemeralStorage() (*tsdb.Head, error) { - u.ephemeralMtx.Lock() - defer u.ephemeralMtx.Unlock() - - if u.ephemeralStorage != nil { - return u.ephemeralStorage, nil - } - - es, err := u.ephemeralFactory() - if err == nil { - u.ephemeralStorage = es - } - return u.ephemeralStorage, err -} - -// getEphemeralStorage returns ephemeral storage, if it exists, or nil otherwise. -func (u *userTSDB) getEphemeralStorage() *tsdb.Head { - u.ephemeralMtx.RLock() - defer u.ephemeralMtx.RUnlock() - - return u.ephemeralStorage -} - -func (u *userTSDB) hasEphemeralStorage() bool { - u.ephemeralMtx.RLock() - defer u.ephemeralMtx.RUnlock() - - return u.ephemeralStorage != nil -} - // Querier returns a new querier over the data partition for the given time range. -func (u *userTSDB) Querier(ctx context.Context, mint, maxt int64, ephemeral bool) (storage.Querier, error) { - if ephemeral { - eph := u.getEphemeralStorage() - if eph == nil { - return storage.NoopQuerier(), nil - } - - return tsdb.NewBlockQuerier(eph, mint, maxt) - } - +func (u *userTSDB) Querier(ctx context.Context, mint, maxt int64) (storage.Querier, error) { return u.db.Querier(ctx, mint, maxt) } -func (u *userTSDB) ChunkQuerier(ctx context.Context, mint, maxt int64, ephemeral bool) (storage.ChunkQuerier, error) { - if ephemeral { - eph := u.getEphemeralStorage() - if eph == nil { - return storage.NoopChunkedQuerier(), nil - } - - return tsdb.NewBlockChunkQuerier(eph, mint, maxt) - } - +func (u *userTSDB) ChunkQuerier(ctx context.Context, mint, maxt int64) (storage.ChunkQuerier, error) { return u.db.ChunkQuerier(ctx, mint, maxt) } -func (u *userTSDB) UnorderedChunkQuerier(ctx context.Context, mint, maxt int64, ephemeral bool) (storage.ChunkQuerier, error) { - if ephemeral { - // There is no "unordered chunk querier" for tsdb Head. - return u.ChunkQuerier(ctx, mint, maxt, ephemeral) - } +func (u *userTSDB) UnorderedChunkQuerier(ctx context.Context, mint, maxt int64) (storage.ChunkQuerier, error) { return u.db.UnorderedChunkQuerier(ctx, mint, maxt) } @@ -217,23 +142,7 @@ func (u *userTSDB) Blocks() []*tsdb.Block { } func (u *userTSDB) Close() error { - var merr multierror.MultiError - - eph := u.getEphemeralStorage() - if eph != nil { - merr.Add(errors.Wrap(eph.Close(), "ephemeral storage")) - } - - merr.Add(errors.Wrap(u.db.Close(), "persistent storage")) - return merr.Err() -} - -func (u *userTSDB) TruncateEphemeral(now time.Time) error { - eph := u.getEphemeralStorage() - if eph != nil { - return eph.Truncate(now.Add(-u.ephemeralSeriesRetentionPeriod).UnixMilli()) - } - return nil + return u.db.Close() } func (u *userTSDB) Compact() error { @@ -292,15 +201,7 @@ func (u *userTSDB) compactHead(blockDuration int64) error { return u.db.CompactOOOHead() } -func (u *userTSDB) persistentSeriesCallback() tsdb.SeriesLifecycleCallback { - return seriesLifecycleCallback{ - preCreation: u.persistentPreCreation, - postCreation: u.persistentPostCreation, - postDeletion: u.persistentPostDeletion, - } -} - -func (u *userTSDB) persistentPreCreation(metric labels.Labels) error { +func (u *userTSDB) PreCreation(metric labels.Labels) error { if u.limiter == nil { return nil } @@ -330,7 +231,7 @@ func (u *userTSDB) persistentPreCreation(metric labels.Labels) error { return nil } -func (u *userTSDB) persistentPostCreation(metric labels.Labels) { +func (u *userTSDB) PostCreation(metric labels.Labels) { u.instanceSeriesCount.Inc() metricName, err := extract.MetricNameFromLabels(metric) @@ -341,7 +242,7 @@ func (u *userTSDB) persistentPostCreation(metric labels.Labels) { u.seriesInMetric.increaseSeriesForMetric(metricName) } -func (u *userTSDB) persistentPostDeletion(metrics ...labels.Labels) { +func (u *userTSDB) PostDeletion(metrics ...labels.Labels) { u.instanceSeriesCount.Sub(int64(len(metrics))) for _, metric := range metrics { @@ -354,49 +255,6 @@ func (u *userTSDB) persistentPostDeletion(metrics ...labels.Labels) { } } -func (u *userTSDB) ephemeralSeriesCallback() tsdb.SeriesLifecycleCallback { - return seriesLifecycleCallback{ - preCreation: u.ephemeralPreCreation, - postCreation: u.ephemeralPostCreation, - postDeletion: u.ephemeralPostDeletion, - } -} - -func (u *userTSDB) ephemeralPreCreation(_ labels.Labels) error { - if u.limiter == nil { - return nil - } - - // Verify ingester's global limit - gl := u.instanceLimitsFn() - if gl != nil && gl.MaxInMemoryEphemeralSeries > 0 { - if series := u.instanceEphemeralSeriesCount.Load(); series >= gl.MaxInMemoryEphemeralSeries { - return errMaxInMemoryEphemeralSeriesReached - } - } - - eph := u.getEphemeralStorage() - if eph == nil { - // if ephemeralPreCreation is called, ephemeral storage should exist, but check here is better than panic. - return errors.New("ephemeral storage not created") - } - - // Total series limit. - if err := u.limiter.AssertMaxEphemeralSeriesPerUser(u.userID, int(eph.NumSeries())); err != nil { - return err - } - - return nil -} - -func (u *userTSDB) ephemeralPostCreation(_ labels.Labels) { - u.instanceEphemeralSeriesCount.Inc() -} - -func (u *userTSDB) ephemeralPostDeletion(metrics ...labels.Labels) { - u.instanceEphemeralSeriesCount.Sub(int64(len(metrics))) -} - // blocksToDelete filters the input blocks and returns the blocks which are safe to be deleted from the ingester. func (u *userTSDB) blocksToDelete(blocks []*tsdb.Block) map[ulid.ULID]struct{} { if u.db == nil { @@ -529,13 +387,3 @@ func (u *userTSDB) acquireAppendLock() error { func (u *userTSDB) releaseAppendLock() { u.pushesInFlight.Done() } - -type seriesLifecycleCallback struct { - preCreation func(metric labels.Labels) error - postCreation func(metric labels.Labels) - postDeletion func(metric ...labels.Labels) -} - -func (s seriesLifecycleCallback) PreCreation(l labels.Labels) error { return s.preCreation(l) } -func (s seriesLifecycleCallback) PostCreation(l labels.Labels) { s.postCreation(l) } -func (s seriesLifecycleCallback) PostDeletion(l ...labels.Labels) { s.postDeletion(l...) } diff --git a/pkg/mimir/mimir.go b/pkg/mimir/mimir.go index 4119ff6a87e..3a1631ce20e 100644 --- a/pkg/mimir/mimir.go +++ b/pkg/mimir/mimir.go @@ -67,7 +67,6 @@ import ( "github.com/grafana/mimir/pkg/usagestats" "github.com/grafana/mimir/pkg/util" "github.com/grafana/mimir/pkg/util/activitytracker" - "github.com/grafana/mimir/pkg/util/ephemeral" util_log "github.com/grafana/mimir/pkg/util/log" "github.com/grafana/mimir/pkg/util/noauth" "github.com/grafana/mimir/pkg/util/process" @@ -669,7 +668,6 @@ type Mimir struct { ActivityTracker *activitytracker.ActivityTracker UsageStatsReporter *usagestats.Reporter BuildInfoHandler http.Handler - EphemeralChecker ephemeral.SeriesCheckerByUser // Queryables that the querier should use to query the long term storage. StoreQueryables []querier.QueryableWithFilter diff --git a/pkg/mimir/modules.go b/pkg/mimir/modules.go index 7edaa4d5166..bee839e19dc 100644 --- a/pkg/mimir/modules.go +++ b/pkg/mimir/modules.go @@ -72,7 +72,6 @@ const ( ActiveGroupsCleanupService string = "active-groups-cleanup-service" Distributor string = "distributor" DistributorService string = "distributor-service" - EphemeralChecker string = "ephemeral-checker" Ingester string = "ingester" IngesterService string = "ingester-service" Flusher string = "flusher" @@ -300,7 +299,7 @@ func (t *Mimir) initDistributorService() (serv services.Service, err error) { // ruler's dependency) canJoinDistributorsRing := t.Cfg.isAnyModuleEnabled(Distributor, Write, All) - t.Distributor, err = distributor.New(t.Cfg.Distributor, t.Cfg.IngesterClient, t.Overrides, t.ActiveGroupsCleanup, t.Ring, t.EphemeralChecker, canJoinDistributorsRing, t.Registerer, util_log.Logger) + t.Distributor, err = distributor.New(t.Cfg.Distributor, t.Cfg.IngesterClient, t.Overrides, t.ActiveGroupsCleanup, t.Ring, canJoinDistributorsRing, t.Registerer, util_log.Logger) if err != nil { return } @@ -312,11 +311,6 @@ func (t *Mimir) initDistributorService() (serv services.Service, err error) { return t.Distributor, nil } -func (t *Mimir) initEphemeralChecker() (serv services.Service, err error) { - t.EphemeralChecker = t.Overrides - return nil, nil -} - func (t *Mimir) initDistributor() (serv services.Service, err error) { t.API.RegisterDistributor(t.Distributor, t.Cfg.Distributor, t.Registerer) @@ -857,7 +851,6 @@ func (t *Mimir) setupModuleManager() error { mm.RegisterModule(ActiveGroupsCleanupService, t.initActiveGroupsCleanupService, modules.UserInvisibleModule) mm.RegisterModule(Distributor, t.initDistributor) mm.RegisterModule(DistributorService, t.initDistributorService, modules.UserInvisibleModule) - mm.RegisterModule(EphemeralChecker, t.initEphemeralChecker, modules.UserInvisibleModule) mm.RegisterModule(Ingester, t.initIngester) mm.RegisterModule(IngesterService, t.initIngesterService, modules.UserInvisibleModule) mm.RegisterModule(Flusher, t.initFlusher) @@ -889,8 +882,7 @@ func (t *Mimir) setupModuleManager() error { Overrides: {RuntimeConfig}, OverridesExporter: {Overrides, MemberlistKV}, Distributor: {DistributorService, API, ActiveGroupsCleanupService}, - DistributorService: {Ring, Overrides, EphemeralChecker}, - EphemeralChecker: {Overrides}, + DistributorService: {Ring, Overrides}, Ingester: {IngesterService, API, ActiveGroupsCleanupService}, IngesterService: {Overrides, RuntimeConfig, MemberlistKV}, Flusher: {Overrides, API}, diff --git a/pkg/mimirpb/mimir.pb.go b/pkg/mimirpb/mimir.pb.go index 8a30a677ebb..4d7fed549b6 100644 --- a/pkg/mimirpb/mimir.pb.go +++ b/pkg/mimirpb/mimir.pb.go @@ -183,8 +183,6 @@ type WriteRequest struct { Metadata []*MetricMetadata `protobuf:"bytes,3,rep,name=metadata,proto3" json:"metadata,omitempty"` // Skip validation of label names. SkipLabelNameValidation bool `protobuf:"varint,1000,opt,name=skip_label_name_validation,json=skipLabelNameValidation,proto3" json:"skip_label_name_validation,omitempty"` - // Timeseries that are stored to ephemeral storage only. - EphemeralTimeseries []PreallocTimeseries `protobuf:"bytes,1001,rep,name=ephemeral_timeseries,json=ephemeralTimeseries,proto3,customtype=PreallocTimeseries" json:"ephemeral_timeseries"` } func (m *WriteRequest) Reset() { *m = WriteRequest{} } @@ -1793,114 +1791,113 @@ func init() { func init() { proto.RegisterFile("mimir.proto", fileDescriptor_86d4d7485f544059) } var fileDescriptor_86d4d7485f544059 = []byte{ - // 1707 bytes of a gzipped FileDescriptorProto - 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xc4, 0x58, 0x4f, 0x73, 0x1b, 0x49, - 0x15, 0x57, 0x4b, 0x63, 0x49, 0xf3, 0x2c, 0xcb, 0xb3, 0x1d, 0x13, 0x44, 0x8a, 0x1d, 0x3b, 0x43, - 0xb1, 0xb8, 0x28, 0xf0, 0x52, 0x59, 0x36, 0x5b, 0xbb, 0x15, 0xfe, 0x8c, 0xec, 0x49, 0xec, 0xac, - 0x2d, 0x85, 0x96, 0x94, 0x25, 0x70, 0x50, 0x8d, 0xe5, 0xb6, 0x34, 0xb5, 0xf3, 0x8f, 0x99, 0x51, - 0x88, 0x39, 0xc1, 0x01, 0x8a, 0x23, 0x55, 0xdc, 0x28, 0x8a, 0x0b, 0x17, 0xbe, 0x00, 0xdf, 0x21, - 0x55, 0x5c, 0x72, 0xdc, 0xe2, 0x90, 0x22, 0xce, 0x65, 0xb9, 0xed, 0x81, 0x0f, 0x40, 0xf5, 0x9b, - 0x3f, 0xad, 0x91, 0xed, 0xa5, 0x6a, 0x2b, 0x61, 0x6f, 0xf3, 0x5e, 0xff, 0xde, 0xeb, 0x5f, 0xf7, - 0xfb, 0x75, 0xeb, 0xb5, 0x60, 0xd5, 0x73, 0x3c, 0x27, 0xda, 0x09, 0xa3, 0x20, 0x09, 0x68, 0x73, - 0x12, 0x44, 0x09, 0x7f, 0x12, 0x1e, 0xdf, 0xf8, 0xee, 0xd4, 0x49, 0x66, 0xf3, 0xe3, 0x9d, 0x49, - 0xe0, 0xbd, 0x3d, 0x0d, 0xa6, 0xc1, 0xdb, 0x08, 0x38, 0x9e, 0x9f, 0xa2, 0x85, 0x06, 0x7e, 0xa5, - 0x81, 0xc6, 0x9f, 0x6b, 0xd0, 0xfa, 0x28, 0x72, 0x12, 0xce, 0xf8, 0x2f, 0xe6, 0x3c, 0x4e, 0xe8, - 0x03, 0x80, 0xc4, 0xf1, 0x78, 0xcc, 0x23, 0x87, 0xc7, 0x1d, 0xb2, 0x55, 0xdb, 0x5e, 0xbd, 0xb5, - 0xb1, 0x93, 0xa7, 0xdf, 0x19, 0x3a, 0x1e, 0x1f, 0xe0, 0x58, 0xf7, 0xc6, 0xd3, 0xe7, 0x9b, 0x95, - 0x7f, 0x3e, 0xdf, 0xa4, 0x0f, 0x22, 0x6e, 0xbb, 0x6e, 0x30, 0x19, 0x16, 0x71, 0x6c, 0x21, 0x07, - 0x7d, 0x1f, 0xea, 0x83, 0x60, 0x1e, 0x4d, 0x78, 0xa7, 0xba, 0x45, 0xb6, 0xdb, 0xb7, 0x6e, 0xca, - 0x6c, 0x8b, 0x33, 0xef, 0xa4, 0x20, 0xcb, 0x9f, 0x7b, 0x2c, 0x0b, 0xa0, 0x1f, 0x40, 0xd3, 0xe3, - 0x89, 0x7d, 0x62, 0x27, 0x76, 0xa7, 0x86, 0x54, 0x3a, 0x32, 0xf8, 0x88, 0x27, 0x91, 0x33, 0x39, - 0xca, 0xc6, 0xbb, 0xca, 0xd3, 0xe7, 0x9b, 0x84, 0x15, 0x78, 0x7a, 0x07, 0x6e, 0xc4, 0x1f, 0x3b, - 0xe1, 0xd8, 0xb5, 0x8f, 0xb9, 0x3b, 0xf6, 0x6d, 0x8f, 0x8f, 0x1f, 0xdb, 0xae, 0x73, 0x62, 0x27, - 0x4e, 0xe0, 0x77, 0x3e, 0x6d, 0x6c, 0x91, 0xed, 0x26, 0xfb, 0xaa, 0x80, 0x1c, 0x0a, 0x44, 0xcf, - 0xf6, 0xf8, 0xc3, 0x62, 0x9c, 0x9e, 0xc0, 0x06, 0x0f, 0x67, 0xdc, 0xe3, 0x91, 0xed, 0x8e, 0x17, - 0x36, 0xe4, 0xdf, 0x8d, 0x2f, 0xb8, 0x23, 0xd7, 0x8a, 0x74, 0xd2, 0x69, 0x6c, 0x02, 0xc8, 0x55, - 0xd3, 0x06, 0xd4, 0xcc, 0x07, 0x07, 0x5a, 0x85, 0x36, 0x41, 0x61, 0xa3, 0x43, 0x4b, 0x23, 0xc6, - 0x3a, 0xac, 0x65, 0x7b, 0x14, 0x87, 0x81, 0x1f, 0x73, 0xe3, 0x3f, 0x04, 0x40, 0xce, 0x48, 0x4d, - 0xa8, 0xe3, 0xfa, 0xf2, 0x4a, 0x5d, 0x93, 0xbc, 0x70, 0x55, 0x0f, 0x6c, 0x27, 0xea, 0x6e, 0x64, - 0xb4, 0x5a, 0xe8, 0x32, 0x4f, 0xec, 0x30, 0xe1, 0x11, 0xcb, 0x02, 0xe9, 0xf7, 0xa0, 0x11, 0xdb, - 0x5e, 0xe8, 0xf2, 0xb8, 0x53, 0xc5, 0x1c, 0x9a, 0xcc, 0x31, 0xc0, 0x01, 0xdc, 0xda, 0x0a, 0xcb, - 0x61, 0xf4, 0x36, 0xa8, 0xfc, 0x09, 0xf7, 0x42, 0xd7, 0x8e, 0xe2, 0xac, 0x2c, 0x54, 0xc6, 0x58, - 0xd9, 0x50, 0x16, 0x25, 0xa1, 0xf4, 0x7d, 0x80, 0x99, 0x13, 0x27, 0xc1, 0x34, 0xb2, 0xbd, 0xb8, - 0xa3, 0x2c, 0x13, 0xde, 0xcf, 0xc7, 0xb2, 0xc8, 0x05, 0xb0, 0xf1, 0x2e, 0xa8, 0xc5, 0x7a, 0x28, - 0x05, 0x45, 0x94, 0xb3, 0x43, 0xb6, 0xc8, 0x76, 0x8b, 0xe1, 0x37, 0xdd, 0x80, 0x95, 0xc7, 0xb6, - 0x3b, 0x4f, 0x35, 0xd6, 0x62, 0xa9, 0x61, 0x98, 0x50, 0x4f, 0x97, 0x40, 0x6f, 0x42, 0x0b, 0xab, - 0x98, 0xd8, 0x5e, 0x38, 0xf6, 0x62, 0x84, 0xd5, 0xd8, 0x6a, 0xe1, 0x3b, 0x8a, 0x65, 0x0a, 0x91, - 0x97, 0xe4, 0x29, 0xfe, 0x54, 0x85, 0x76, 0x59, 0x69, 0xf4, 0x3d, 0x50, 0x92, 0xb3, 0x30, 0xc5, - 0xb5, 0x6f, 0x7d, 0xe3, 0x2a, 0x45, 0x66, 0xe6, 0xf0, 0x2c, 0xe4, 0x0c, 0x03, 0xe8, 0x77, 0x80, - 0x7a, 0xe8, 0x1b, 0x9f, 0xda, 0x9e, 0xe3, 0x9e, 0xa1, 0x2a, 0x91, 0x8a, 0xca, 0xb4, 0x74, 0xe4, - 0x2e, 0x0e, 0x08, 0x31, 0x8a, 0x65, 0xce, 0xb8, 0x1b, 0x76, 0x14, 0x1c, 0xc7, 0x6f, 0xe1, 0x9b, - 0xfb, 0x4e, 0xd2, 0x59, 0x49, 0x7d, 0xe2, 0xdb, 0x38, 0x03, 0x90, 0x33, 0xd1, 0x55, 0x68, 0x8c, - 0x7a, 0x1f, 0xf6, 0xfa, 0x1f, 0xf5, 0xb4, 0x8a, 0x30, 0x76, 0xfb, 0xa3, 0xde, 0xd0, 0x62, 0x1a, - 0xa1, 0x2a, 0xac, 0xdc, 0x33, 0x47, 0xf7, 0x2c, 0xad, 0x4a, 0xd7, 0x40, 0xdd, 0x3f, 0x18, 0x0c, - 0xfb, 0xf7, 0x98, 0x79, 0xa4, 0xd5, 0x28, 0x85, 0x36, 0x8e, 0x48, 0x9f, 0x22, 0x42, 0x07, 0xa3, - 0xa3, 0x23, 0x93, 0x3d, 0xd2, 0x56, 0x84, 0x20, 0x0f, 0x7a, 0x77, 0xfb, 0x5a, 0x9d, 0xb6, 0xa0, - 0x39, 0x18, 0x9a, 0x43, 0x6b, 0x60, 0x0d, 0xb5, 0x86, 0xf1, 0x21, 0xd4, 0xd3, 0xa9, 0x5f, 0x81, - 0x10, 0x8d, 0xdf, 0x11, 0x68, 0xe6, 0xe2, 0x79, 0x15, 0xc2, 0x2e, 0x49, 0x22, 0xaf, 0xe7, 0x05, - 0x21, 0xd4, 0x2e, 0x08, 0xc1, 0xf8, 0xc7, 0x0a, 0xa8, 0x85, 0x18, 0xe9, 0x9b, 0xa0, 0x4e, 0x82, - 0xb9, 0x9f, 0x8c, 0x1d, 0x3f, 0xc1, 0x92, 0x2b, 0xfb, 0x15, 0xd6, 0x44, 0xd7, 0x81, 0x9f, 0xd0, - 0x9b, 0xb0, 0x9a, 0x0e, 0x9f, 0xba, 0x81, 0x9d, 0xa4, 0x73, 0xed, 0x57, 0x18, 0xa0, 0xf3, 0xae, - 0xf0, 0x51, 0x0d, 0x6a, 0xf1, 0xdc, 0xc3, 0x99, 0x08, 0x13, 0x9f, 0xf4, 0x3a, 0xd4, 0xe3, 0xc9, - 0x8c, 0x7b, 0x36, 0x16, 0xf7, 0x0d, 0x96, 0x59, 0xf4, 0x9b, 0xd0, 0xfe, 0x15, 0x8f, 0x82, 0x71, - 0x32, 0x8b, 0x78, 0x3c, 0x0b, 0xdc, 0x13, 0x2c, 0x34, 0x61, 0x6b, 0xc2, 0x3b, 0xcc, 0x9d, 0xf4, - 0xad, 0x0c, 0x26, 0x79, 0xd5, 0x91, 0x17, 0x61, 0x2d, 0xe1, 0xdf, 0xcd, 0xb9, 0x7d, 0x1b, 0xb4, - 0x05, 0x5c, 0x4a, 0xb0, 0x81, 0x04, 0x09, 0x6b, 0x17, 0xc8, 0x94, 0xa4, 0x09, 0x6d, 0x9f, 0x4f, - 0xed, 0xc4, 0x79, 0xcc, 0xc7, 0x71, 0x68, 0xfb, 0x71, 0xa7, 0xb9, 0x7c, 0xd3, 0x75, 0xe7, 0x93, - 0x8f, 0x79, 0x32, 0x08, 0x6d, 0x3f, 0x3b, 0xa1, 0x6b, 0x79, 0x84, 0xf0, 0xc5, 0xf4, 0x5b, 0xb0, - 0x5e, 0xa4, 0x38, 0xe1, 0x6e, 0x62, 0xc7, 0x1d, 0x75, 0xab, 0xb6, 0x4d, 0x59, 0x91, 0x79, 0x0f, - 0xbd, 0x25, 0x20, 0x72, 0x8b, 0x3b, 0xb0, 0x55, 0xdb, 0x26, 0x12, 0x88, 0xc4, 0xc4, 0xf5, 0xd6, - 0x0e, 0x83, 0xd8, 0x59, 0x20, 0xb5, 0xfa, 0xbf, 0x49, 0xe5, 0x11, 0x05, 0xa9, 0x22, 0x45, 0x46, - 0xaa, 0x95, 0x92, 0xca, 0xdd, 0x92, 0x54, 0x01, 0xcc, 0x48, 0xad, 0xa5, 0xa4, 0x72, 0x77, 0x46, - 0xea, 0x0e, 0x40, 0xc4, 0x63, 0x9e, 0x8c, 0x67, 0x62, 0xe7, 0xdb, 0x78, 0x09, 0xbc, 0x79, 0xc9, - 0x35, 0xb6, 0xc3, 0x04, 0x6a, 0xdf, 0xf1, 0x13, 0xa6, 0x46, 0xf9, 0x27, 0xfd, 0x3a, 0xa8, 0x85, - 0xd6, 0x3a, 0xeb, 0x28, 0x3e, 0xe9, 0x30, 0x3e, 0x00, 0xb5, 0x88, 0x2a, 0x1f, 0xe5, 0x06, 0xd4, - 0x1e, 0x59, 0x03, 0x8d, 0xd0, 0x3a, 0x54, 0x7b, 0x7d, 0xad, 0x2a, 0x8f, 0x73, 0xed, 0x86, 0xf2, - 0xfb, 0xbf, 0xea, 0xa4, 0xdb, 0x80, 0x15, 0xe4, 0xdd, 0x6d, 0x01, 0xc8, 0xb2, 0x1b, 0x7f, 0x57, - 0xa0, 0x8d, 0x25, 0x96, 0x92, 0xde, 0xbc, 0x44, 0xb3, 0xaf, 0x57, 0xb1, 0xdb, 0x57, 0x29, 0xf1, - 0x75, 0xeb, 0xf0, 0xff, 0x29, 0xaf, 0x2f, 0x4b, 0x35, 0xf7, 0x95, 0x26, 0xd1, 0xaa, 0xf7, 0x95, - 0x66, 0x5d, 0x6b, 0xdc, 0x57, 0x9a, 0xaa, 0x06, 0xf7, 0x95, 0x66, 0x4b, 0x5b, 0x63, 0xf2, 0xe2, - 0x62, 0x4b, 0x17, 0x06, 0x5b, 0x3e, 0xa9, 0x6c, 0xf9, 0x94, 0x18, 0x77, 0x00, 0xe4, 0xe2, 0x45, - 0xfd, 0x83, 0xd3, 0xd3, 0x98, 0xa7, 0x57, 0xe0, 0x1b, 0x2c, 0xb3, 0x84, 0xdf, 0xe5, 0xfe, 0x34, - 0x99, 0xa1, 0x8a, 0xd6, 0x58, 0x66, 0x19, 0x3e, 0xac, 0xa7, 0xbf, 0xbc, 0x52, 0x75, 0x1b, 0x99, - 0x3e, 0xf3, 0xdf, 0x57, 0x34, 0x72, 0xa9, 0x55, 0xa5, 0xd4, 0xde, 0x81, 0xc6, 0x31, 0x4e, 0x9c, - 0x37, 0x17, 0x5f, 0xbb, 0xac, 0x47, 0x40, 0x04, 0xcb, 0x91, 0x46, 0x0c, 0xeb, 0x4b, 0x63, 0x54, - 0x07, 0x38, 0x0e, 0xe6, 0xfe, 0x89, 0x9d, 0x75, 0xb2, 0x64, 0x7b, 0x85, 0x2d, 0x78, 0x04, 0x1f, - 0x37, 0xf8, 0x25, 0x8f, 0xf2, 0xdf, 0x07, 0x34, 0x84, 0x77, 0x1e, 0x86, 0x3c, 0xca, 0xc4, 0x9f, - 0x1a, 0x92, 0xbb, 0xb2, 0xc0, 0xdd, 0x70, 0xe1, 0xda, 0xd2, 0x22, 0xb1, 0x3f, 0x29, 0x15, 0xab, - 0xba, 0x54, 0x2c, 0xfa, 0x1e, 0xa8, 0x45, 0x63, 0x83, 0xac, 0x4a, 0x0b, 0x5c, 0xca, 0xc7, 0x24, - 0xd6, 0xf8, 0xa3, 0x02, 0x6b, 0x3f, 0x99, 0xf3, 0xe8, 0x2c, 0x6f, 0x06, 0xe9, 0x6d, 0xa8, 0xc7, - 0x89, 0x9d, 0xcc, 0xe3, 0xac, 0x15, 0xd1, 0x65, 0x9e, 0x12, 0x70, 0x67, 0x80, 0x28, 0x96, 0xa1, - 0xe9, 0x8f, 0x01, 0x78, 0x14, 0x05, 0xd1, 0x18, 0xdb, 0x98, 0x0b, 0x5d, 0x79, 0x39, 0xd6, 0x12, - 0x48, 0x6c, 0x62, 0x54, 0x9e, 0x7f, 0x8a, 0xfd, 0x40, 0x03, 0x77, 0x49, 0x65, 0xa9, 0x41, 0x77, - 0x04, 0x9f, 0xc8, 0xf1, 0xa7, 0xb8, 0x4d, 0xa5, 0x73, 0x34, 0x40, 0xff, 0x9e, 0x9d, 0xd8, 0xfb, - 0x15, 0x96, 0xa1, 0x04, 0xfe, 0x31, 0x9f, 0x24, 0x41, 0x84, 0x97, 0x46, 0x09, 0xff, 0x10, 0xfd, - 0x39, 0x3e, 0x45, 0x61, 0xfe, 0x89, 0xed, 0xda, 0x11, 0xfe, 0xde, 0x95, 0xf3, 0xa3, 0xbf, 0xc8, - 0x8f, 0x96, 0xc0, 0x7b, 0x76, 0x12, 0x39, 0x4f, 0xf0, 0xae, 0x29, 0xe1, 0x8f, 0xd0, 0x9f, 0xe3, - 0x53, 0x94, 0xf1, 0x16, 0xd4, 0xd3, 0x9d, 0x12, 0x97, 0xab, 0xc5, 0x58, 0x9f, 0xa5, 0x3d, 0xd4, - 0x60, 0xb4, 0xbb, 0x6b, 0x0d, 0x06, 0x1a, 0x49, 0x6f, 0x5a, 0xe3, 0x37, 0x04, 0xd4, 0x62, 0x5b, - 0x44, 0x73, 0xd4, 0xeb, 0xf7, 0xac, 0x14, 0x3a, 0x3c, 0x38, 0xb2, 0xfa, 0xa3, 0xa1, 0x46, 0x44, - 0xa7, 0xb4, 0x6b, 0xf6, 0x76, 0xad, 0x43, 0x6b, 0x2f, 0xed, 0xb8, 0xac, 0x9f, 0x5a, 0xbb, 0xa3, - 0xe1, 0x41, 0xbf, 0xa7, 0xd5, 0xc4, 0x60, 0xd7, 0xdc, 0x1b, 0xef, 0x99, 0x43, 0x53, 0x53, 0x84, - 0x75, 0x20, 0x9a, 0xb4, 0x9e, 0x79, 0xa8, 0xad, 0xd0, 0x75, 0x58, 0x1d, 0xf5, 0xcc, 0x87, 0xe6, - 0xc1, 0xa1, 0xd9, 0x3d, 0xb4, 0xb4, 0xba, 0x88, 0xed, 0xf5, 0x87, 0xe3, 0xbb, 0xfd, 0x51, 0x6f, - 0x4f, 0x6b, 0x64, 0xb7, 0x7d, 0x1d, 0x14, 0xd1, 0x64, 0x1a, 0x8f, 0x00, 0xe4, 0xde, 0x96, 0x7b, - 0x58, 0x35, 0xef, 0x79, 0xde, 0x85, 0xeb, 0x0b, 0x3d, 0x8f, 0xe3, 0xba, 0x4e, 0xcc, 0x27, 0x81, - 0x7f, 0x92, 0xb7, 0xc1, 0x5f, 0x91, 0xdd, 0xcf, 0xc2, 0xa0, 0xf1, 0x5b, 0x02, 0x20, 0xeb, 0x40, - 0x6f, 0xcb, 0x87, 0x42, 0xda, 0x93, 0x5d, 0x5f, 0x2e, 0xd7, 0xe5, 0xcf, 0x85, 0x1f, 0x95, 0xda, - 0xfe, 0xea, 0xf2, 0x91, 0x4e, 0x43, 0x3f, 0xaf, 0xf9, 0x8f, 0xa1, 0xb5, 0x98, 0x5f, 0xdc, 0x39, - 0x69, 0xb3, 0x8c, 0x3c, 0x54, 0x96, 0x59, 0x57, 0x34, 0x7c, 0x57, 0x2f, 0xbe, 0xf6, 0x79, 0x8b, - 0x9f, 0xc2, 0xfa, 0x12, 0xb3, 0x2b, 0xe7, 0xbd, 0xb3, 0x78, 0xa2, 0xab, 0xa8, 0xb4, 0x85, 0x67, - 0x6a, 0xf9, 0xb7, 0x37, 0x7f, 0x15, 0xc9, 0x63, 0x2d, 0x0a, 0x58, 0x88, 0xf7, 0xf2, 0x47, 0xc8, - 0x17, 0x2d, 0x60, 0x17, 0x40, 0xea, 0x9c, 0x7e, 0x1f, 0xea, 0xa5, 0x57, 0xfd, 0xf5, 0xe5, 0xd3, - 0x90, 0xbd, 0x62, 0x53, 0x86, 0x19, 0xd6, 0xf8, 0x0b, 0x81, 0xd6, 0xe2, 0xf0, 0x95, 0xbb, 0x70, - 0x7b, 0xf9, 0x1d, 0x79, 0x31, 0xff, 0xa5, 0xf2, 0xf8, 0x61, 0x49, 0x1e, 0x17, 0x5e, 0xf9, 0x97, - 0x6e, 0xdf, 0xa2, 0x3a, 0x7e, 0x5e, 0xf0, 0x4b, 0xd5, 0xf1, 0x2a, 0x77, 0xb0, 0xfb, 0x83, 0x67, - 0x2f, 0xf4, 0xca, 0x27, 0x2f, 0xf4, 0xca, 0x67, 0x2f, 0x74, 0xf2, 0xeb, 0x73, 0x9d, 0xfc, 0xed, - 0x5c, 0x27, 0x4f, 0xcf, 0x75, 0xf2, 0xec, 0x5c, 0x27, 0xff, 0x3a, 0xd7, 0xc9, 0xa7, 0xe7, 0x7a, - 0xe5, 0xb3, 0x73, 0x9d, 0xfc, 0xe1, 0xa5, 0x5e, 0x79, 0xf6, 0x52, 0xaf, 0x7c, 0xf2, 0x52, 0xaf, - 0xfc, 0xac, 0x81, 0x7f, 0xcd, 0x84, 0xc7, 0xc7, 0x75, 0xfc, 0x93, 0xe5, 0x9d, 0xff, 0x06, 0x00, - 0x00, 0xff, 0xff, 0xd3, 0x13, 0x0b, 0xc7, 0xac, 0x11, 0x00, 0x00, + // 1682 bytes of a gzipped FileDescriptorProto + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xc4, 0x58, 0x4f, 0x8f, 0x1b, 0x49, + 0x15, 0x77, 0xd9, 0x3d, 0xb6, 0xfb, 0x8d, 0xc7, 0xd3, 0x5b, 0x1b, 0x82, 0x89, 0xd8, 0x9e, 0x49, + 0x23, 0x16, 0x0b, 0x81, 0x17, 0x65, 0xd9, 0xac, 0x76, 0x15, 0xfe, 0xb4, 0x67, 0x3a, 0x19, 0x67, + 0xc7, 0x76, 0x28, 0xdb, 0x59, 0x02, 0x07, 0xab, 0xed, 0xa9, 0xb1, 0x5b, 0xdb, 0xff, 0xe8, 0x6e, + 0x87, 0x0c, 0x27, 0x38, 0x80, 0x38, 0x22, 0x71, 0xe3, 0xc0, 0x85, 0x0b, 0x5f, 0x00, 0x3e, 0x43, + 0x24, 0x2e, 0x39, 0xae, 0x38, 0x44, 0x64, 0x72, 0xd9, 0xe3, 0x1e, 0xf8, 0x00, 0xa8, 0x5e, 0xff, + 0x73, 0x7b, 0x66, 0x16, 0x09, 0x25, 0xec, 0xad, 0xdf, 0xab, 0xdf, 0x7b, 0xf5, 0xab, 0x57, 0xbf, + 0x2a, 0xbf, 0x32, 0x6c, 0x3b, 0x96, 0x63, 0x05, 0x1d, 0x3f, 0xf0, 0x22, 0x8f, 0xd6, 0xe7, 0x5e, + 0x10, 0xf1, 0x27, 0xfe, 0xec, 0xc6, 0x77, 0x17, 0x56, 0xb4, 0x5c, 0xcd, 0x3a, 0x73, 0xcf, 0x79, + 0x67, 0xe1, 0x2d, 0xbc, 0x77, 0x10, 0x30, 0x5b, 0x9d, 0xa2, 0x85, 0x06, 0x7e, 0xc5, 0x81, 0xda, + 0xdf, 0xcb, 0xd0, 0xf8, 0x38, 0xb0, 0x22, 0xce, 0xf8, 0x2f, 0x56, 0x3c, 0x8c, 0xe8, 0x03, 0x80, + 0xc8, 0x72, 0x78, 0xc8, 0x03, 0x8b, 0x87, 0x2d, 0xb2, 0x5f, 0x69, 0x6f, 0xdf, 0xba, 0xd6, 0x49, + 0xd3, 0x77, 0xc6, 0x96, 0xc3, 0x47, 0x38, 0xd6, 0xbd, 0xf1, 0xf4, 0xf9, 0x5e, 0xe9, 0x9f, 0xcf, + 0xf7, 0xe8, 0x83, 0x80, 0x9b, 0xb6, 0xed, 0xcd, 0xc7, 0x59, 0x1c, 0x5b, 0xcb, 0x41, 0x3f, 0x80, + 0xea, 0xc8, 0x5b, 0x05, 0x73, 0xde, 0x2a, 0xef, 0x93, 0x76, 0xf3, 0xd6, 0xcd, 0x3c, 0xdb, 0xfa, + 0xcc, 0x9d, 0x18, 0x64, 0xb8, 0x2b, 0x87, 0x25, 0x01, 0xf4, 0x43, 0xa8, 0x3b, 0x3c, 0x32, 0x4f, + 0xcc, 0xc8, 0x6c, 0x55, 0x90, 0x4a, 0x2b, 0x0f, 0xee, 0xf3, 0x28, 0xb0, 0xe6, 0xfd, 0x64, 0xbc, + 0x2b, 0x3d, 0x7d, 0xbe, 0x47, 0x58, 0x86, 0xa7, 0x77, 0xe0, 0x46, 0xf8, 0x89, 0xe5, 0x4f, 0x6d, + 0x73, 0xc6, 0xed, 0xa9, 0x6b, 0x3a, 0x7c, 0xfa, 0xd8, 0xb4, 0xad, 0x13, 0x33, 0xb2, 0x3c, 0xb7, + 0xf5, 0x59, 0x6d, 0x9f, 0xb4, 0xeb, 0xec, 0xab, 0x02, 0x72, 0x2c, 0x10, 0x03, 0xd3, 0xe1, 0x0f, + 0xb3, 0x71, 0x6d, 0x0f, 0x20, 0xe7, 0x43, 0x6b, 0x50, 0xd1, 0x1f, 0xf4, 0x94, 0x12, 0xad, 0x83, + 0xc4, 0x26, 0xc7, 0x86, 0x42, 0xb4, 0x5d, 0xd8, 0x49, 0xd8, 0x87, 0xbe, 0xe7, 0x86, 0x5c, 0xfb, + 0x37, 0x01, 0xc8, 0xab, 0x43, 0x75, 0xa8, 0xe2, 0xcc, 0x69, 0x0d, 0xdf, 0xcc, 0x89, 0xe3, 0x7c, + 0x0f, 0x4c, 0x2b, 0xe8, 0x5e, 0x4b, 0x4a, 0xd8, 0x40, 0x97, 0x7e, 0x62, 0xfa, 0x11, 0x0f, 0x58, + 0x12, 0x48, 0xbf, 0x07, 0xb5, 0xd0, 0x74, 0x7c, 0x9b, 0x87, 0xad, 0x32, 0xe6, 0x50, 0xf2, 0x1c, + 0x23, 0x1c, 0xc0, 0x45, 0x97, 0x58, 0x0a, 0xa3, 0xb7, 0x41, 0xe6, 0x4f, 0xb8, 0xe3, 0xdb, 0x66, + 0x10, 0x26, 0x05, 0xa3, 0x79, 0x8c, 0x91, 0x0c, 0x25, 0x51, 0x39, 0x94, 0x7e, 0x00, 0xb0, 0xb4, + 0xc2, 0xc8, 0x5b, 0x04, 0xa6, 0x13, 0xb6, 0xa4, 0x4d, 0xc2, 0x47, 0xe9, 0x58, 0x12, 0xb9, 0x06, + 0xd6, 0xde, 0x03, 0x39, 0x5b, 0x0f, 0xa5, 0x20, 0x89, 0x42, 0xb7, 0xc8, 0x3e, 0x69, 0x37, 0x18, + 0x7e, 0xd3, 0x6b, 0xb0, 0xf5, 0xd8, 0xb4, 0x57, 0xf1, 0xee, 0x37, 0x58, 0x6c, 0x68, 0x3a, 0x54, + 0xe3, 0x25, 0xd0, 0x9b, 0xd0, 0x40, 0xb1, 0x44, 0xa6, 0xe3, 0x4f, 0x9d, 0x10, 0x61, 0x15, 0xb6, + 0x9d, 0xf9, 0xfa, 0x61, 0x9e, 0x42, 0xe4, 0x25, 0x69, 0x8a, 0x3f, 0x95, 0xa1, 0x59, 0xd4, 0x00, + 0x7d, 0x1f, 0xa4, 0xe8, 0xcc, 0x8f, 0x71, 0xcd, 0x5b, 0xdf, 0xb8, 0x4a, 0x2b, 0x89, 0x39, 0x3e, + 0xf3, 0x39, 0xc3, 0x00, 0xfa, 0x1d, 0xa0, 0x0e, 0xfa, 0xa6, 0xa7, 0xa6, 0x63, 0xd9, 0x67, 0xa8, + 0x17, 0xa4, 0x22, 0x33, 0x25, 0x1e, 0xb9, 0x8b, 0x03, 0x42, 0x26, 0x62, 0x99, 0x4b, 0x6e, 0xfb, + 0x2d, 0x09, 0xc7, 0xf1, 0x5b, 0xf8, 0x56, 0xae, 0x15, 0xb5, 0xb6, 0x62, 0x9f, 0xf8, 0xd6, 0xce, + 0x00, 0xf2, 0x99, 0xe8, 0x36, 0xd4, 0x26, 0x83, 0x8f, 0x06, 0xc3, 0x8f, 0x07, 0x4a, 0x49, 0x18, + 0x07, 0xc3, 0xc9, 0x60, 0x6c, 0x30, 0x85, 0x50, 0x19, 0xb6, 0xee, 0xe9, 0x93, 0x7b, 0x86, 0x52, + 0xa6, 0x3b, 0x20, 0x1f, 0xf5, 0x46, 0xe3, 0xe1, 0x3d, 0xa6, 0xf7, 0x95, 0x0a, 0xa5, 0xd0, 0xc4, + 0x91, 0xdc, 0x27, 0x89, 0xd0, 0xd1, 0xa4, 0xdf, 0xd7, 0xd9, 0x23, 0x65, 0x4b, 0x08, 0xb2, 0x37, + 0xb8, 0x3b, 0x54, 0xaa, 0xb4, 0x01, 0xf5, 0xd1, 0x58, 0x1f, 0x1b, 0x23, 0x63, 0xac, 0xd4, 0xb4, + 0x8f, 0xa0, 0x1a, 0x4f, 0xfd, 0x0a, 0x84, 0xa8, 0xfd, 0x8e, 0x40, 0x3d, 0x15, 0xcf, 0xab, 0x10, + 0x76, 0x41, 0x12, 0xe9, 0x7e, 0x5e, 0x10, 0x42, 0xe5, 0x82, 0x10, 0xb4, 0x7f, 0x6c, 0x81, 0x9c, + 0x89, 0x91, 0xbe, 0x05, 0xf2, 0xdc, 0x5b, 0xb9, 0xd1, 0xd4, 0x72, 0x23, 0xdc, 0x72, 0xe9, 0xa8, + 0xc4, 0xea, 0xe8, 0xea, 0xb9, 0x11, 0xbd, 0x09, 0xdb, 0xf1, 0xf0, 0xa9, 0xed, 0x99, 0x51, 0x3c, + 0xd7, 0x51, 0x89, 0x01, 0x3a, 0xef, 0x0a, 0x1f, 0x55, 0xa0, 0x12, 0xae, 0x1c, 0x9c, 0x89, 0x30, + 0xf1, 0x49, 0xaf, 0x43, 0x35, 0x9c, 0x2f, 0xb9, 0x63, 0xe2, 0xe6, 0xbe, 0xc1, 0x12, 0x8b, 0x7e, + 0x13, 0x9a, 0xbf, 0xe2, 0x81, 0x37, 0x8d, 0x96, 0x01, 0x0f, 0x97, 0x9e, 0x7d, 0x82, 0x1b, 0x4d, + 0xd8, 0x8e, 0xf0, 0x8e, 0x53, 0x27, 0x7d, 0x3b, 0x81, 0xe5, 0xbc, 0xaa, 0xc8, 0x8b, 0xb0, 0x86, + 0xf0, 0x1f, 0xa4, 0xdc, 0xbe, 0x0d, 0xca, 0x1a, 0x2e, 0x26, 0x58, 0x43, 0x82, 0x84, 0x35, 0x33, + 0x64, 0x4c, 0x52, 0x87, 0xa6, 0xcb, 0x17, 0x66, 0x64, 0x3d, 0xe6, 0xd3, 0xd0, 0x37, 0xdd, 0xb0, + 0x55, 0xdf, 0xbc, 0x95, 0xbb, 0xab, 0xf9, 0x27, 0x3c, 0x1a, 0xf9, 0xa6, 0x9b, 0x9c, 0xd0, 0x9d, + 0x34, 0x42, 0xf8, 0x42, 0xfa, 0x2d, 0xd8, 0xcd, 0x52, 0x9c, 0x70, 0x3b, 0x32, 0xc3, 0x96, 0xbc, + 0x5f, 0x69, 0x53, 0x96, 0x65, 0x3e, 0x44, 0x6f, 0x01, 0x88, 0xdc, 0xc2, 0x16, 0xec, 0x57, 0xda, + 0x24, 0x07, 0x22, 0x31, 0x71, 0xbd, 0x35, 0x7d, 0x2f, 0xb4, 0xd6, 0x48, 0x6d, 0xff, 0x77, 0x52, + 0x69, 0x44, 0x46, 0x2a, 0x4b, 0x91, 0x90, 0x6a, 0xc4, 0xa4, 0x52, 0x77, 0x4e, 0x2a, 0x03, 0x26, + 0xa4, 0x76, 0x62, 0x52, 0xa9, 0x3b, 0x21, 0x75, 0x07, 0x20, 0xe0, 0x21, 0x8f, 0xa6, 0x4b, 0x51, + 0xf9, 0x26, 0x5e, 0x02, 0x6f, 0x5d, 0x72, 0x8d, 0x75, 0x98, 0x40, 0x1d, 0x59, 0x6e, 0xc4, 0xe4, + 0x20, 0xfd, 0xa4, 0x5f, 0x07, 0x39, 0xd3, 0x5a, 0x6b, 0x17, 0xc5, 0x97, 0x3b, 0xb4, 0x0f, 0x41, + 0xce, 0xa2, 0x8a, 0x47, 0xb9, 0x06, 0x95, 0x47, 0xc6, 0x48, 0x21, 0xb4, 0x0a, 0xe5, 0xc1, 0x50, + 0x29, 0xe7, 0xc7, 0xb9, 0x72, 0x43, 0xfa, 0xfd, 0x5f, 0x54, 0xd2, 0xad, 0xc1, 0x16, 0xf2, 0xee, + 0x36, 0x00, 0xf2, 0x6d, 0xd7, 0xfe, 0x26, 0x41, 0x13, 0xb7, 0x38, 0x97, 0xf4, 0xde, 0x25, 0x9a, + 0x7d, 0xbd, 0x8a, 0x6d, 0x5f, 0xa5, 0xc4, 0xd7, 0xad, 0xc3, 0xff, 0xa7, 0xbc, 0xbe, 0x2c, 0xd5, + 0xdc, 0x97, 0xea, 0x44, 0x29, 0xdf, 0x97, 0xea, 0x55, 0xa5, 0x76, 0x5f, 0xaa, 0xcb, 0x0a, 0xdc, + 0x97, 0xea, 0x0d, 0x65, 0x87, 0xe5, 0x17, 0x17, 0xdb, 0xb8, 0x30, 0xd8, 0xe6, 0x49, 0x65, 0x9b, + 0xa7, 0x44, 0xbb, 0x03, 0x90, 0x2f, 0x5e, 0xec, 0xbf, 0x77, 0x7a, 0x1a, 0xf2, 0xf8, 0x0a, 0x7c, + 0x83, 0x25, 0x96, 0xf0, 0xdb, 0xdc, 0x5d, 0x44, 0x4b, 0x54, 0xd1, 0x0e, 0x4b, 0x2c, 0xcd, 0x85, + 0xdd, 0xf8, 0x97, 0x37, 0x57, 0xdd, 0xb5, 0x44, 0x9f, 0xe9, 0xef, 0x2b, 0x1a, 0xa9, 0xd4, 0xca, + 0xb9, 0xd4, 0xde, 0x85, 0xda, 0x0c, 0x27, 0x4e, 0x9b, 0x8b, 0xaf, 0x5d, 0xd6, 0x23, 0x20, 0x82, + 0xa5, 0x48, 0x2d, 0x84, 0xdd, 0x8d, 0x31, 0xaa, 0x02, 0xcc, 0xbc, 0x95, 0x7b, 0x62, 0x26, 0x3d, + 0x26, 0x69, 0x6f, 0xb1, 0x35, 0x8f, 0xe0, 0x63, 0x7b, 0xbf, 0xe4, 0x41, 0xfa, 0xfb, 0x80, 0x86, + 0xf0, 0xae, 0x7c, 0x9f, 0x07, 0x89, 0xf8, 0x63, 0x23, 0xe7, 0x2e, 0xad, 0x71, 0xd7, 0x6c, 0x78, + 0x73, 0x63, 0x91, 0xd8, 0x9f, 0x14, 0x36, 0xab, 0xbc, 0xb1, 0x59, 0xf4, 0x7d, 0x90, 0xb3, 0xc6, + 0x06, 0x59, 0x15, 0x16, 0xb8, 0x91, 0x8f, 0xe5, 0x58, 0xed, 0x8f, 0x12, 0xec, 0xfc, 0x64, 0xc5, + 0x83, 0xb3, 0xb4, 0x19, 0xa4, 0xb7, 0xa1, 0x1a, 0x46, 0x66, 0xb4, 0x0a, 0x93, 0x56, 0x44, 0xcd, + 0xf3, 0x14, 0x80, 0x9d, 0x11, 0xa2, 0x58, 0x82, 0xa6, 0x3f, 0x06, 0xe0, 0x41, 0xe0, 0x05, 0x53, + 0x6c, 0x63, 0x2e, 0xf4, 0xcb, 0xc5, 0x58, 0x43, 0x20, 0xb1, 0x89, 0x91, 0x79, 0xfa, 0x29, 0xea, + 0x81, 0x06, 0x56, 0x49, 0x66, 0xb1, 0x41, 0x3b, 0x82, 0x4f, 0x60, 0xb9, 0x0b, 0x2c, 0x53, 0xe1, + 0x1c, 0x8d, 0xd0, 0x7f, 0x68, 0x46, 0xe6, 0x51, 0x89, 0x25, 0x28, 0x81, 0x7f, 0xcc, 0xe7, 0x91, + 0x17, 0xe0, 0xa5, 0x51, 0xc0, 0x3f, 0x44, 0x7f, 0x8a, 0x8f, 0x51, 0x98, 0x7f, 0x6e, 0xda, 0x66, + 0x80, 0xbf, 0x77, 0xc5, 0xfc, 0xe8, 0xcf, 0xf2, 0xa3, 0x25, 0xf0, 0x8e, 0x19, 0x05, 0xd6, 0x13, + 0xbc, 0x6b, 0x0a, 0xf8, 0x3e, 0xfa, 0x53, 0x7c, 0x8c, 0xd2, 0xde, 0x86, 0x6a, 0x5c, 0x29, 0x71, + 0xb9, 0x1a, 0x8c, 0x0d, 0x59, 0xdc, 0x43, 0x8d, 0x26, 0x07, 0x07, 0xc6, 0x68, 0xa4, 0x90, 0xf8, + 0xa6, 0xd5, 0x7e, 0x43, 0x40, 0xce, 0xca, 0x22, 0x9a, 0xa3, 0xc1, 0x70, 0x60, 0xc4, 0xd0, 0x71, + 0xaf, 0x6f, 0x0c, 0x27, 0x63, 0x85, 0x88, 0x4e, 0xe9, 0x40, 0x1f, 0x1c, 0x18, 0xc7, 0xc6, 0x61, + 0xdc, 0x71, 0x19, 0x3f, 0x35, 0x0e, 0x26, 0xe3, 0xde, 0x70, 0xa0, 0x54, 0xc4, 0x60, 0x57, 0x3f, + 0x9c, 0x1e, 0xea, 0x63, 0x5d, 0x91, 0x84, 0xd5, 0x13, 0x4d, 0xda, 0x40, 0x3f, 0x56, 0xb6, 0xe8, + 0x2e, 0x6c, 0x4f, 0x06, 0xfa, 0x43, 0xbd, 0x77, 0xac, 0x77, 0x8f, 0x0d, 0xa5, 0x2a, 0x62, 0x07, + 0xc3, 0xf1, 0xf4, 0xee, 0x70, 0x32, 0x38, 0x54, 0x6a, 0xc9, 0x6d, 0x5f, 0x05, 0x49, 0x34, 0x99, + 0xda, 0x23, 0x80, 0xbc, 0xb6, 0xc5, 0x1e, 0x56, 0x4e, 0x7b, 0x9e, 0xf7, 0xe0, 0xfa, 0x5a, 0xcf, + 0x63, 0xd9, 0xb6, 0x15, 0xf2, 0xb9, 0xe7, 0x9e, 0xa4, 0x6d, 0xf0, 0x57, 0xf2, 0xee, 0x67, 0x6d, + 0x50, 0xfb, 0x2d, 0x01, 0xc8, 0xf7, 0x81, 0xde, 0xce, 0x1f, 0x0a, 0x71, 0x4f, 0x76, 0x7d, 0x73, + 0xbb, 0x2e, 0x7f, 0x2e, 0xfc, 0xa8, 0xd0, 0xf6, 0x97, 0x37, 0x8f, 0x74, 0x1c, 0xfa, 0x45, 0xcd, + 0x7f, 0x08, 0x8d, 0xf5, 0xfc, 0xe2, 0xce, 0x89, 0x9b, 0x65, 0xe4, 0x21, 0xb3, 0xc4, 0xba, 0xa2, + 0xe1, 0xbb, 0x7a, 0xf1, 0x95, 0x2f, 0x5a, 0xfc, 0x02, 0x76, 0x37, 0x98, 0x5d, 0x39, 0xef, 0x9d, + 0xf5, 0x13, 0x5d, 0x46, 0xa5, 0xad, 0x3d, 0x20, 0x8b, 0xbf, 0xbd, 0xe9, 0xab, 0x28, 0x3f, 0xd6, + 0x62, 0x03, 0x33, 0xf1, 0x5e, 0xfe, 0x08, 0xf9, 0x5f, 0x37, 0xb0, 0x0b, 0x90, 0xeb, 0x9c, 0x7e, + 0x1f, 0xaa, 0x85, 0xf7, 0xf6, 0xf5, 0xcd, 0xd3, 0x90, 0xbc, 0xb8, 0x63, 0x86, 0x09, 0x56, 0xfb, + 0x33, 0x81, 0xc6, 0xfa, 0xf0, 0x95, 0x55, 0xb8, 0xbd, 0xf9, 0x8e, 0xbc, 0x98, 0xff, 0x52, 0x79, + 0xfc, 0xb0, 0x20, 0x8f, 0x0b, 0xef, 0xef, 0x4b, 0xcb, 0xb7, 0xae, 0x8e, 0x9f, 0x67, 0xfc, 0x62, + 0x75, 0xbc, 0xca, 0x0a, 0x76, 0x7f, 0xf0, 0xec, 0x85, 0x5a, 0xfa, 0xf4, 0x85, 0x5a, 0xfa, 0xfc, + 0x85, 0x4a, 0x7e, 0x7d, 0xae, 0x92, 0xbf, 0x9e, 0xab, 0xe4, 0xe9, 0xb9, 0x4a, 0x9e, 0x9d, 0xab, + 0xe4, 0x5f, 0xe7, 0x2a, 0xf9, 0xec, 0x5c, 0x2d, 0x7d, 0x7e, 0xae, 0x92, 0x3f, 0xbc, 0x54, 0x4b, + 0xcf, 0x5e, 0xaa, 0xa5, 0x4f, 0x5f, 0xaa, 0xa5, 0x9f, 0xd5, 0xf0, 0x4f, 0x13, 0x7f, 0x36, 0xab, + 0xe2, 0xdf, 0x1f, 0xef, 0xfe, 0x27, 0x00, 0x00, 0xff, 0xff, 0xac, 0xf7, 0x3c, 0xcc, 0x46, 0x11, + 0x00, 0x00, } func (x WriteRequest_SourceEnum) String() string { @@ -1979,14 +1976,6 @@ func (this *WriteRequest) Equal(that interface{}) bool { if this.SkipLabelNameValidation != that1.SkipLabelNameValidation { return false } - if len(this.EphemeralTimeseries) != len(that1.EphemeralTimeseries) { - return false - } - for i := range this.EphemeralTimeseries { - if !this.EphemeralTimeseries[i].Equal(that1.EphemeralTimeseries[i]) { - return false - } - } return true } func (this *WriteResponse) Equal(that interface{}) bool { @@ -3006,7 +2995,7 @@ func (this *WriteRequest) GoString() string { if this == nil { return "nil" } - s := make([]string, 0, 9) + s := make([]string, 0, 8) s = append(s, "&mimirpb.WriteRequest{") s = append(s, "Timeseries: "+fmt.Sprintf("%#v", this.Timeseries)+",\n") s = append(s, "Source: "+fmt.Sprintf("%#v", this.Source)+",\n") @@ -3014,7 +3003,6 @@ func (this *WriteRequest) GoString() string { s = append(s, "Metadata: "+fmt.Sprintf("%#v", this.Metadata)+",\n") } s = append(s, "SkipLabelNameValidation: "+fmt.Sprintf("%#v", this.SkipLabelNameValidation)+",\n") - s = append(s, "EphemeralTimeseries: "+fmt.Sprintf("%#v", this.EphemeralTimeseries)+",\n") s = append(s, "}") return strings.Join(s, "") } @@ -3462,22 +3450,6 @@ func (m *WriteRequest) MarshalToSizedBuffer(dAtA []byte) (int, error) { _ = i var l int _ = l - if len(m.EphemeralTimeseries) > 0 { - for iNdEx := len(m.EphemeralTimeseries) - 1; iNdEx >= 0; iNdEx-- { - { - size := m.EphemeralTimeseries[iNdEx].Size() - i -= size - if _, err := m.EphemeralTimeseries[iNdEx].MarshalTo(dAtA[i:]); err != nil { - return 0, err - } - i = encodeVarintMimir(dAtA, i, uint64(size)) - } - i-- - dAtA[i] = 0x3e - i-- - dAtA[i] = 0xca - } - } if m.SkipLabelNameValidation { i-- if m.SkipLabelNameValidation { @@ -4811,12 +4783,6 @@ func (m *WriteRequest) Size() (n int) { if m.SkipLabelNameValidation { n += 3 } - if len(m.EphemeralTimeseries) > 0 { - for _, e := range m.EphemeralTimeseries { - l = e.Size() - n += 2 + l + sovMimir(uint64(l)) - } - } return n } @@ -5410,7 +5376,6 @@ func (this *WriteRequest) String() string { `Source:` + fmt.Sprintf("%v", this.Source) + `,`, `Metadata:` + repeatedStringForMetadata + `,`, `SkipLabelNameValidation:` + fmt.Sprintf("%v", this.SkipLabelNameValidation) + `,`, - `EphemeralTimeseries:` + fmt.Sprintf("%v", this.EphemeralTimeseries) + `,`, `}`, }, "") return s @@ -5974,40 +5939,6 @@ func (m *WriteRequest) Unmarshal(dAtA []byte) error { } } m.SkipLabelNameValidation = bool(v != 0) - case 1001: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field EphemeralTimeseries", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowMimir - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthMimir - } - postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthMimir - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.EphemeralTimeseries = append(m.EphemeralTimeseries, PreallocTimeseries{}) - if err := m.EphemeralTimeseries[len(m.EphemeralTimeseries)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex default: iNdEx = preIndex skippy, err := skipMimir(dAtA[iNdEx:]) diff --git a/pkg/mimirpb/mimir.proto b/pkg/mimirpb/mimir.proto index 48fc1413dfa..6b453f14df3 100644 --- a/pkg/mimirpb/mimir.proto +++ b/pkg/mimirpb/mimir.proto @@ -30,9 +30,6 @@ message WriteRequest { // Skip validation of label names. bool skip_label_name_validation = 1000; - - // Timeseries that are stored to ephemeral storage only. - repeated TimeSeries ephemeral_timeseries = 1001 [(gogoproto.nullable) = false, (gogoproto.customtype) = "PreallocTimeseries"]; } message WriteResponse {} diff --git a/pkg/storage/tsdb/config.go b/pkg/storage/tsdb/config.go index c05ee6b2135..981129698df 100644 --- a/pkg/storage/tsdb/config.go +++ b/pkg/storage/tsdb/config.go @@ -95,10 +95,9 @@ var ( // BlocksStorageConfig holds the config information for the blocks storage. type BlocksStorageConfig struct { - Bucket bucket.Config `yaml:",inline"` - BucketStore BucketStoreConfig `yaml:"bucket_store" doc:"description=This configures how the querier and store-gateway discover and synchronize blocks stored in the bucket."` - TSDB TSDBConfig `yaml:"tsdb"` - EphemeralTSDB EphemeralTSDBConfig `yaml:"ephemeral_tsdb"` + Bucket bucket.Config `yaml:",inline"` + BucketStore BucketStoreConfig `yaml:"bucket_store" doc:"description=This configures how the querier and store-gateway discover and synchronize blocks stored in the bucket."` + TSDB TSDBConfig `yaml:"tsdb"` } // DurationList is the block ranges for a tsdb @@ -143,7 +142,6 @@ func (cfg *BlocksStorageConfig) RegisterFlags(f *flag.FlagSet, logger log.Logger cfg.Bucket.RegisterFlagsWithPrefixAndDefaultDirectory("blocks-storage.", "blocks", f, logger) cfg.BucketStore.RegisterFlags(f, logger) cfg.TSDB.RegisterFlags(f) - cfg.EphemeralTSDB.RegisterFlags(f) } // Validate the config. @@ -389,40 +387,3 @@ func (cfg *BucketIndexConfig) RegisterFlagsWithPrefix(f *flag.FlagSet, prefix st f.DurationVar(&cfg.IdleTimeout, prefix+"idle-timeout", time.Hour, "How long a unused bucket index should be cached. Once this timeout expires, the unused bucket index is removed from the in-memory cache. This option is used only by querier.") f.DurationVar(&cfg.MaxStalePeriod, prefix+"max-stale-period", time.Hour, "The maximum allowed age of a bucket index (last updated) before queries start failing because the bucket index is too old. The bucket index is periodically updated by the compactor, and this check is enforced in the querier (at query time).") } - -// EphemeralTSDBConfig holds the config for Ephemeral Storage opened in the ingesters. -type EphemeralTSDBConfig struct { - Retention time.Duration `yaml:"retention_period" category:"experimental"` - HeadChunksWriteBufferSize int `yaml:"head_chunks_write_buffer_size_bytes" category:"experimental"` - HeadChunksEndTimeVariance float64 `yaml:"head_chunks_end_time_variance" category:"experimental"` - StripeSize int `yaml:"stripe_size" category:"experimental"` - HeadChunksWriteQueueSize int `yaml:"head_chunks_write_queue_size" category:"experimental"` - HeadPostingsForMatchersCacheTTL time.Duration `yaml:"head_postings_for_matchers_cache_ttl" category:"experimental"` - HeadPostingsForMatchersCacheSize int `yaml:"head_postings_for_matchers_cache_size" category:"experimental"` - HeadPostingsForMatchersCacheForce bool `yaml:"head_postings_for_matchers_cache_force" category:"experimental"` -} - -// RegisterFlags registers the TSDBConfig flags. -func (cfg *EphemeralTSDBConfig) RegisterFlags(f *flag.FlagSet) { - f.DurationVar(&cfg.Retention, "blocks-storage.ephemeral-tsdb.retention-period", 10*time.Minute, "Retention of ephemeral series.") - f.IntVar(&cfg.HeadChunksWriteBufferSize, "blocks-storage.ephemeral-tsdb.head-chunks-write-buffer-size-bytes", chunks.DefaultWriteBufferSize, headChunkWriterBufferSizeHelp) - f.Float64Var(&cfg.HeadChunksEndTimeVariance, "blocks-storage.ephemeral-tsdb.head-chunks-end-time-variance", 0, headChunksEndTimeVarianceHelp) - f.IntVar(&cfg.StripeSize, "blocks-storage.ephemeral-tsdb.stripe-size", 16384, headStripeSizeHelp) - f.IntVar(&cfg.HeadChunksWriteQueueSize, "blocks-storage.ephemeral-tsdb.head-chunks-write-queue-size", 1000000, headChunksWriteQueueSizeHelp) - f.DurationVar(&cfg.HeadPostingsForMatchersCacheTTL, "blocks-storage.ephemeral-tsdb.head-postings-for-matchers-cache-ttl", 10*time.Second, headPostingsForMatchersCacheTTLHelp) - f.IntVar(&cfg.HeadPostingsForMatchersCacheSize, "blocks-storage.ephemeral-tsdb.head-postings-for-matchers-cache-size", 100, headPostingsForMatchersCacheSizeHelp) - f.BoolVar(&cfg.HeadPostingsForMatchersCacheForce, "blocks-storage.ephemeral-tsdb.head-postings-for-matchers-cache-force", false, headPostingsForMatchersCacheForce) -} - -// Validate the config. -func (cfg *EphemeralTSDBConfig) Validate() error { - if cfg.HeadChunksWriteBufferSize < chunks.MinWriteBufferSize || cfg.HeadChunksWriteBufferSize > chunks.MaxWriteBufferSize || cfg.HeadChunksWriteBufferSize%1024 != 0 { - return errors.Errorf("head chunks write buffer size must be a multiple of 1024 between %d and %d", chunks.MinWriteBufferSize, chunks.MaxWriteBufferSize) - } - - if cfg.StripeSize <= 1 || (cfg.StripeSize&(cfg.StripeSize-1)) != 0 { // ensure stripe size is a positive power of 2 - return errInvalidStripeSize - } - - return nil -} diff --git a/pkg/util/ephemeral/interface.go b/pkg/util/ephemeral/interface.go deleted file mode 100644 index bf80c871892..00000000000 --- a/pkg/util/ephemeral/interface.go +++ /dev/null @@ -1,17 +0,0 @@ -// SPDX-License-Identifier: AGPL-3.0-only - -package ephemeral - -import "github.com/grafana/mimir/pkg/mimirpb" - -type SeriesCheckerByUser interface { - // EphemeralChecker returns an object which checks a series and decides whether - // this series should be ephemeral based on the given user's configuration. - // Can return nil if the user has no relevant configuration. - EphemeralChecker(user string, source mimirpb.WriteRequest_SourceEnum) SeriesChecker -} - -type SeriesChecker interface { - // ShouldMarkEphemeral checks if a series with the given labelset should be marked as ephemeral. - ShouldMarkEphemeral(labelSet []mimirpb.LabelAdapter) bool -} diff --git a/pkg/util/ephemeral/label_matchers.go b/pkg/util/ephemeral/label_matchers.go deleted file mode 100644 index f20e0c73264..00000000000 --- a/pkg/util/ephemeral/label_matchers.go +++ /dev/null @@ -1,243 +0,0 @@ -// SPDX-License-Identifier: AGPL-3.0-only - -package ephemeral - -import ( - "encoding/json" - "fmt" - "sort" - "strings" - - "github.com/pkg/errors" - amlabels "github.com/prometheus/alertmanager/pkg/labels" - "github.com/prometheus/prometheus/model/labels" - "gopkg.in/yaml.v3" - - "github.com/grafana/mimir/pkg/mimirpb" -) - -// LabelMatchers configures matchers based on which series get marked as ephemeral. -type LabelMatchers struct { - raw map[Source][]string - bySource map[Source]MatcherSetsForSource - string string -} - -type MatcherSetsForSource []matcherSet - -// matcherSet is like alertmanager's labels.Matchers but for Prometheus' labels.Matcher slice -type matcherSet []*labels.Matcher - -// matches checks whether all the matchers match the given label set. -func (ms matcherSet) matches(lset []mimirpb.LabelAdapter) bool { - for _, m := range ms { - var lv string - for _, l := range lset { - if l.Name == m.Name { - lv = l.Value - break - } - } - - if !m.Matches(lv) { - return false - } - } - - return true -} - -// HasMatchers returns true if there is at least one matcher defined, otherwise it returns false. -func (m MatcherSetsForSource) HasMatchers() bool { - return len(m) > 0 -} - -func (m MatcherSetsForSource) ShouldMarkEphemeral(lset []mimirpb.LabelAdapter) bool { - for _, ms := range m { - if ms.matches(lset) { - return true - } - } - - return false -} - -// String is a canonical representation of the config, it is compatible with the flag definition. -// String is needed to implement flag.Value. -func (c *LabelMatchers) String() string { - return c.string -} - -// Set implements flag.Value, and is used to set the config value from a flag value provided as string. -// Set is needed to implement flag.Value. -func (c *LabelMatchers) Set(s string) error { - s = strings.TrimSpace(s) - if s == "" { - return nil - } - - rawMatchers := map[Source][]string{} - for _, matcherSet := range strings.Split(s, ";") { - splits := strings.SplitN(matcherSet, ":", 2) - if len(splits) < 2 { - return fmt.Errorf("invalid matcher %q", matcherSet) - } - - source, err := convertStringToSource(splits[0]) - if err != nil { - return errors.Wrapf(err, "can't set matcher source %q", splits[0]) - } - - rawMatchers[source] = append(rawMatchers[source], splits[1]) - } - - var err error - *c, err = parseLabelMatchers(rawMatchers) - return err -} - -// UnmarshalYAML implements the yaml.Unmarshaler interface. -func (c *LabelMatchers) UnmarshalYAML(value *yaml.Node) error { - rawMatchers := map[Source][]string{} - err := value.DecodeWithOptions(&rawMatchers, yaml.DecodeOptions{KnownFields: true}) - if err != nil { - return err - } - - *c, err = parseLabelMatchers(rawMatchers) - return err -} - -func (c *LabelMatchers) UnmarshalJSON(data []byte) error { - m := map[Source][]string{} - err := json.Unmarshal(data, &m) - if err != nil { - return err - } - - *c, err = parseLabelMatchers(m) - return err -} - -func (c *LabelMatchers) ForSource(source mimirpb.WriteRequest_SourceEnum) MatcherSetsForSource { - return c.bySource[convertMimirpbSource(source)] -} - -func convertMimirpbSource(source mimirpb.WriteRequest_SourceEnum) Source { - switch source { - case mimirpb.API: - return API - case mimirpb.RULE: - return RULE - default: - return INVALID - } -} - -func convertStringToSource(source string) (Source, error) { - switch strings.ToLower(source) { - case "any": - return ANY, nil - case "api": - return API, nil - case "rule": - return RULE, nil - } - return INVALID, fmt.Errorf("invalid source %q", source) -} - -func parseLabelMatchers(configIn map[Source][]string) (c LabelMatchers, err error) { - c.raw = configIn - c.bySource = map[Source]MatcherSetsForSource{} - - // Iterate over ValidSources instead of configIn to keep the order deterministic. - for _, source := range ValidSources { - for _, matcherSetRaw := range configIn[source] { - amMatchers, err := amlabels.ParseMatchers(matcherSetRaw) - if err != nil { - return c, fmt.Errorf("can't build ephemeral series matcher %q: %w", matcherSetRaw, err) - } - - promMatchers := make(matcherSet, len(amMatchers)) - for i, m := range amMatchers { - promMatchers[i] = amlabelMatcherToProm(m) - } - - var addToSources []Source - if source == ANY { - // Add to all valid sources. - addToSources = ValidSources - } else { - // Add to the specified source and the "any" source. - addToSources = []Source{source, ANY} - } - - for _, addToSource := range addToSources { - bySource := c.bySource[addToSource] - bySource = append(bySource, promMatchers) - c.bySource[addToSource] = bySource - } - } - } - - c.string = matchersConfigString(c.raw) - - return c, nil -} - -func amlabelMatcherToProm(m *amlabels.Matcher) *labels.Matcher { - // labels.MatchType(m.Type) is a risky conversion because it depends on the iota order, but we have a test for it - return labels.MustNewMatcher(labels.MatchType(m.Type), m.Name, m.Value) -} - -func matchersConfigString(matchers map[Source][]string) string { - if len(matchers) == 0 { - return "" - } - - // Sort sources to have a deterministic output. - sources := make([]Source, 0, len(matchers)) - for source := range matchers { - sources = append(sources, source) - } - sort.Slice(sources, func(i, j int) bool { - return sources[i] < sources[j] - }) - - var sb strings.Builder - for _, source := range sources { - matcherSetsRaw := matchers[source] - for _, matcherSetRaw := range matcherSetsRaw { - if sb.Len() > 0 { - sb.WriteByte(';') - } - sb.WriteString(source.String()) - sb.WriteByte(':') - sb.WriteString(matcherSetRaw) - } - } - - return sb.String() -} - -func (c LabelMatchers) MarshalYAML() (interface{}, error) { - return c.getMap(), nil -} - -func (c LabelMatchers) MarshalJSON() ([]byte, error) { - return json.Marshal(c.getMap()) -} - -func (c LabelMatchers) getMap() map[Source][]string { - res := map[Source][]string{} - - for _, source := range ValidSources { - if len(c.raw[source]) == 0 { - continue - } - - res[source] = c.raw[source] - } - - return res -} diff --git a/pkg/util/ephemeral/label_matchers_test.go b/pkg/util/ephemeral/label_matchers_test.go deleted file mode 100644 index d3026a2c3ad..00000000000 --- a/pkg/util/ephemeral/label_matchers_test.go +++ /dev/null @@ -1,268 +0,0 @@ -// SPDX-License-Identifier: AGPL-3.0-only - -package ephemeral - -import ( - "encoding/json" - "testing" - - "github.com/prometheus/prometheus/model/labels" - "github.com/stretchr/testify/require" - "gopkg.in/yaml.v3" - - "github.com/grafana/mimir/pkg/mimirpb" -) - -func TestParseLabelMatchers(t *testing.T) { - type testCase struct { - name string - inputStringArg string - inputYamlBlob string - inputJSONBlob string - expect LabelMatchers - expectErr bool - } - - testCases := []testCase{ - { - name: "simple api matcher for metric name", - inputStringArg: `api:{__name__="foo"}`, - inputYamlBlob: `api: - - '{__name__="foo"}' -`, - inputJSONBlob: `{"api":["{__name__=\"foo\"}"]}`, - expect: LabelMatchers{ - raw: map[Source][]string{API: {`{__name__="foo"}`}}, - bySource: map[Source]MatcherSetsForSource{ - ANY: {{{ - Type: labels.MatchEqual, - Name: "__name__", - Value: "foo", - }}}, - API: {{{ - Type: labels.MatchEqual, - Name: "__name__", - Value: "foo", - }}}, - }, - string: "api:{__name__=\"foo\"}", - }, - expectErr: false, - }, { - name: "two matcher sets with two matchers each per source, unsorted", - inputStringArg: `api:{__name__="bar_api", testLabel2="testValue2"};rule:{__name__="foo_rule", testLabel1="testValue1"};rule:{__name__="bar_rule", testLabel2="testValue2"};api:{__name__="foo_api", testLabel1="testValue1"};any:{__name__="foo_any", testLabel1="testValue1"};any:{__name__="bar_any", testLabel2="testValue2"}`, - inputYamlBlob: `any: - - '{__name__="foo_any", testLabel1="testValue1"}' - - '{__name__="bar_any", testLabel2="testValue2"}' -api: - - '{__name__="bar_api", testLabel2="testValue2"}' - - '{__name__="foo_api", testLabel1="testValue1"}' -rule: - - '{__name__="foo_rule", testLabel1="testValue1"}' - - '{__name__="bar_rule", testLabel2="testValue2"}' -`, - inputJSONBlob: `{"any":["{__name__=\"foo_any\", testLabel1=\"testValue1\"}","{__name__=\"bar_any\", testLabel2=\"testValue2\"}"],"api":["{__name__=\"bar_api\", testLabel2=\"testValue2\"}","{__name__=\"foo_api\", testLabel1=\"testValue1\"}"],"rule":["{__name__=\"foo_rule\", testLabel1=\"testValue1\"}","{__name__=\"bar_rule\", testLabel2=\"testValue2\"}"]}`, - expect: LabelMatchers{ - raw: map[Source][]string{ - API: {`{__name__="bar_api", testLabel2="testValue2"}`, `{__name__="foo_api", testLabel1="testValue1"}`}, - RULE: {`{__name__="foo_rule", testLabel1="testValue1"}`, `{__name__="bar_rule", testLabel2="testValue2"}`}, - ANY: {`{__name__="foo_any", testLabel1="testValue1"}`, `{__name__="bar_any", testLabel2="testValue2"}`}, - }, - bySource: map[Source]MatcherSetsForSource{ - API: {{ - {Type: labels.MatchEqual, Name: "__name__", Value: "foo_any"}, - {Type: labels.MatchEqual, Name: "testLabel1", Value: "testValue1"}, - }, { - {Type: labels.MatchEqual, Name: "__name__", Value: "bar_any"}, - {Type: labels.MatchEqual, Name: "testLabel2", Value: "testValue2"}, - }, { - {Type: labels.MatchEqual, Name: "__name__", Value: "bar_api"}, - {Type: labels.MatchEqual, Name: "testLabel2", Value: "testValue2"}, - }, { - {Type: labels.MatchEqual, Name: "__name__", Value: "foo_api"}, - {Type: labels.MatchEqual, Name: "testLabel1", Value: "testValue1"}, - }}, - RULE: {{ - {Type: labels.MatchEqual, Name: "__name__", Value: "foo_any"}, - {Type: labels.MatchEqual, Name: "testLabel1", Value: "testValue1"}, - }, { - {Type: labels.MatchEqual, Name: "__name__", Value: "bar_any"}, - {Type: labels.MatchEqual, Name: "testLabel2", Value: "testValue2"}, - }, { - {Type: labels.MatchEqual, Name: "__name__", Value: "foo_rule"}, - {Type: labels.MatchEqual, Name: "testLabel1", Value: "testValue1"}, - }, { - {Type: labels.MatchEqual, Name: "__name__", Value: "bar_rule"}, - {Type: labels.MatchEqual, Name: "testLabel2", Value: "testValue2"}, - }}, - ANY: {{ - {Type: labels.MatchEqual, Name: "__name__", Value: "foo_any"}, - {Type: labels.MatchEqual, Name: "testLabel1", Value: "testValue1"}, - }, { - {Type: labels.MatchEqual, Name: "__name__", Value: "bar_any"}, - {Type: labels.MatchEqual, Name: "testLabel2", Value: "testValue2"}, - }, { - {Type: labels.MatchEqual, Name: "__name__", Value: "bar_api"}, - {Type: labels.MatchEqual, Name: "testLabel2", Value: "testValue2"}, - }, { - {Type: labels.MatchEqual, Name: "__name__", Value: "foo_api"}, - {Type: labels.MatchEqual, Name: "testLabel1", Value: "testValue1"}, - }, { - {Type: labels.MatchEqual, Name: "__name__", Value: "foo_rule"}, - {Type: labels.MatchEqual, Name: "testLabel1", Value: "testValue1"}, - }, { - {Type: labels.MatchEqual, Name: "__name__", Value: "bar_rule"}, - {Type: labels.MatchEqual, Name: "testLabel2", Value: "testValue2"}, - }}, - }, - string: `any:{__name__="foo_any", testLabel1="testValue1"};any:{__name__="bar_any", testLabel2="testValue2"};api:{__name__="bar_api", testLabel2="testValue2"};api:{__name__="foo_api", testLabel1="testValue1"};rule:{__name__="foo_rule", testLabel1="testValue1"};rule:{__name__="bar_rule", testLabel2="testValue2"}`, - }, - expectErr: false, - }, { - name: "invalid matcher", - inputStringArg: `{__name__==="foo"}`, - inputYamlBlob: ` -api: -- '{__name__==="foo"}' -`, - expectErr: true, - }, - } - - for _, tc := range testCases { - t.Run(tc.name, func(t *testing.T) { - check := func(t *testing.T, expect, got LabelMatchers, expectErr bool, gotErr error) { - if expectErr { - require.Error(t, gotErr) - } else { - require.NoError(t, gotErr) - require.Equal(t, expect, got) - } - } - - t.Run("unmarshal yaml", func(t *testing.T) { - got := LabelMatchers{} - gotErr := yaml.Unmarshal([]byte(tc.inputYamlBlob), &got) - check(t, tc.expect, got, tc.expectErr, gotErr) - - if !tc.expectErr { - t.Run("marshal yaml", func(t *testing.T) { - gotYaml, err := yaml.Marshal(&got) - require.NoError(t, err) - require.Equal(t, tc.inputYamlBlob, string(gotYaml)) - }) - - t.Run("marshal yaml (non-pointer)", func(t *testing.T) { - gotYaml, err := yaml.Marshal(got) - require.NoError(t, err) - require.Equal(t, tc.inputYamlBlob, string(gotYaml)) - }) - } - }) - - t.Run("unmarshal json", func(t *testing.T) { - got := LabelMatchers{} - gotErr := json.Unmarshal([]byte(tc.inputJSONBlob), &got) - check(t, tc.expect, got, tc.expectErr, gotErr) - - if !tc.expectErr { - t.Run("marshal json", func(t *testing.T) { - gotJSON, err := json.Marshal(&got) - require.NoError(t, err) - require.Equal(t, tc.inputJSONBlob, string(gotJSON)) - }) - - t.Run("marshal json (non-pointer)", func(t *testing.T) { - gotJSON, err := json.Marshal(got) - require.NoError(t, err) - require.Equal(t, tc.inputJSONBlob, string(gotJSON)) - }) - } - }) - - t.Run("set string arg", func(t *testing.T) { - got := LabelMatchers{} - gotErr := got.Set(tc.inputStringArg) - check(t, tc.expect, got, tc.expectErr, gotErr) - }) - }) - } -} - -func TestIsEphemeral(t *testing.T) { - labelBuilder := labels.NewBuilder(nil) - labelBuilder.Set("__name__", "test_metric") - labelBuilder.Set("testLabel1", "testValue1") - testLabels := mimirpb.FromLabelsToLabelAdapters(labelBuilder.Labels(nil)) - - type testCase struct { - name string - matchers string - source mimirpb.WriteRequest_SourceEnum - seriesLabels []mimirpb.LabelAdapter - expectResult bool - } - - testCases := []testCase{ - { - name: "no matchers", - matchers: "", - source: mimirpb.API, - seriesLabels: testLabels, - expectResult: false, - }, { - name: "matching labels but with different source", - matchers: `api:{__name__="test_metric"}`, - source: mimirpb.RULE, - seriesLabels: testLabels, - expectResult: false, - }, { - name: "matching source but with different labels", - matchers: `api:{__name__="different_metric"}`, - source: mimirpb.API, - seriesLabels: testLabels, - expectResult: false, - }, { - name: "matching source and labels, matching on metric name", - matchers: `api:{__name__="test_metric"}`, - source: mimirpb.API, - seriesLabels: testLabels, - expectResult: true, - }, { - name: "matching source and labels, matching on other label", - matchers: `api:{testLabel1="testValue1"}`, - source: mimirpb.API, - seriesLabels: testLabels, - expectResult: true, - }, { - name: "matching source and labels, matching on both labels", - matchers: `api:{__name__="test_metric", testLabel1="testValue1"}`, - source: mimirpb.API, - seriesLabels: testLabels, - expectResult: true, - }, { - name: "matching source and labels, matching on both labels, unsorted", - matchers: `api:{testLabel1="testValue1", __name__="test_metric"}`, - source: mimirpb.API, - seriesLabels: testLabels, - expectResult: true, - }, { - name: "matching rule for source 'any'", - matchers: `any:{testLabel1="testValue1", __name__="test_metric"}`, - source: mimirpb.API, - seriesLabels: testLabels, - expectResult: true, - }, - } - - for _, tc := range testCases { - t.Run(tc.name, func(t *testing.T) { - var lb LabelMatchers - require.NoError(t, lb.Set(tc.matchers)) - - lbForSource := lb.ForSource(tc.source) - got := lbForSource.ShouldMarkEphemeral(tc.seriesLabels) - require.Equal(t, tc.expectResult, got) - }) - } -} diff --git a/pkg/util/ephemeral/source.go b/pkg/util/ephemeral/source.go deleted file mode 100644 index f06168fe8da..00000000000 --- a/pkg/util/ephemeral/source.go +++ /dev/null @@ -1,67 +0,0 @@ -// SPDX-License-Identifier: AGPL-3.0-only - -package ephemeral - -import ( - "github.com/pkg/errors" - "gopkg.in/yaml.v3" -) - -type Source uint8 - -const ( - INVALID Source = iota - ANY - API - RULE -) - -var ValidSources = []Source{ANY, API, RULE} -var ValidSourceStrings []string -var Sources = []Source{INVALID, ANY, API, RULE} - -func init() { - for _, s := range ValidSources { - ValidSourceStrings = append(ValidSourceStrings, s.String()) - } -} - -func (s Source) String() string { - switch s { - case ANY: - return "any" - case API: - return "api" - case RULE: - return "rule" - default: - return "unknown" - } -} - -// MarshalYAML implements yaml.Marshaler. -func (s Source) MarshalYAML() (interface{}, error) { - return s.String(), nil -} - -func (s *Source) UnmarshalYAML(value *yaml.Node) error { - source, err := convertStringToSource(value.Value) - if err != nil { - return errors.Wrapf(err, "can't unmarshal source %q", value.Value) - } - *s = source - return nil -} - -func (s Source) MarshalText() (text []byte, err error) { - return []byte(s.String()), nil -} - -func (s *Source) UnmarshalText(text []byte) error { - source, err := convertStringToSource(string(text)) - if err != nil { - return errors.Wrapf(err, "can't unmarshal source %q", string(text)) - } - *s = source - return nil -} diff --git a/pkg/util/globalerror/errors.go b/pkg/util/globalerror/errors.go index 2c1cf222a7d..f9f13615bc1 100644 --- a/pkg/util/globalerror/errors.go +++ b/pkg/util/globalerror/errors.go @@ -26,7 +26,6 @@ const ( MaxSeriesPerMetric ID = "max-series-per-metric" MaxMetadataPerMetric ID = "max-metadata-per-metric" MaxSeriesPerUser ID = "max-series-per-user" - MaxEphemeralSeriesPerUser ID = "max-ephemeral-series-per-user" MaxMetadataPerUser ID = "max-metadata-per-user" MaxChunksPerQuery ID = "max-chunks-per-query" MaxSeriesPerQuery ID = "max-series-per-query" @@ -36,11 +35,10 @@ const ( DistributorMaxInflightPushRequests ID = "distributor-max-inflight-push-requests" DistributorMaxInflightPushRequestsBytes ID = "distributor-max-inflight-push-requests-bytes" - IngesterMaxIngestionRate ID = "ingester-max-ingestion-rate" - IngesterMaxTenants ID = "ingester-max-tenants" - IngesterMaxInMemorySeries ID = "ingester-max-series" - IngesterMaxInMemoryEphemeralSeries ID = "ingester-max-ephemeral-series" - IngesterMaxInflightPushRequests ID = "ingester-max-inflight-push-requests" + IngesterMaxIngestionRate ID = "ingester-max-ingestion-rate" + IngesterMaxTenants ID = "ingester-max-tenants" + IngesterMaxInMemorySeries ID = "ingester-max-series" + IngesterMaxInflightPushRequests ID = "ingester-max-inflight-push-requests" ExemplarLabelsMissing ID = "exemplar-labels-missing" ExemplarLabelsTooLong ID = "exemplar-labels-too-long" @@ -62,16 +60,10 @@ const ( SampleDuplicateTimestamp ID = "sample-duplicate-timestamp" ExemplarSeriesMissing ID = "exemplar-series-missing" - EphemeralSampleTimestampTooOld ID = "ephemeral-sample-timestamp-too-old" - EphemeralSampleOutOfOrder ID = "ephemeral-sample-out-of-order" - EphemeralSampleDuplicateTimestamp ID = "ephemeral-sample-duplicate-timestamp" - StoreConsistencyCheckFailed ID = "store-consistency-check-failed" BucketIndexTooOld ID = "bucket-index-too-old" DistributorMaxWriteMessageSize ID = "distributor-max-write-message-size" - - EphemeralStorageNotEnabledForUser ID = "ephemeral-storage-not-enabled-for-user" ) // Message returns the provided msg, appending the error id. diff --git a/pkg/util/push/push.go b/pkg/util/push/push.go index e3db7952348..d8ea90062ca 100644 --- a/pkg/util/push/push.go +++ b/pkg/util/push/push.go @@ -110,7 +110,6 @@ func handler(maxRecvMsgSize int, cleanup := func() { mimirpb.ReuseSlice(req.Timeseries) - mimirpb.ReuseSlice(req.EphemeralTimeseries) bufferPool.Put(bufHolder) } return &req.WriteRequest, cleanup, nil diff --git a/pkg/util/validation/limits.go b/pkg/util/validation/limits.go index 5fae921cb8b..0a4272c0ac1 100644 --- a/pkg/util/validation/limits.go +++ b/pkg/util/validation/limits.go @@ -21,33 +21,30 @@ import ( "gopkg.in/yaml.v3" "github.com/grafana/mimir/pkg/ingester/activeseries" - "github.com/grafana/mimir/pkg/mimirpb" "github.com/grafana/mimir/pkg/storage/tsdb/block" - "github.com/grafana/mimir/pkg/util/ephemeral" ) const ( - MaxSeriesPerMetricFlag = "ingester.max-global-series-per-metric" - MaxMetadataPerMetricFlag = "ingester.max-global-metadata-per-metric" - MaxSeriesPerUserFlag = "ingester.max-global-series-per-user" - MaxEphemeralSeriesPerUserFlag = "ingester.max-ephemeral-series-per-user" - MaxMetadataPerUserFlag = "ingester.max-global-metadata-per-user" - MaxChunksPerQueryFlag = "querier.max-fetched-chunks-per-query" - MaxChunkBytesPerQueryFlag = "querier.max-fetched-chunk-bytes-per-query" - MaxSeriesPerQueryFlag = "querier.max-fetched-series-per-query" - maxLabelNamesPerSeriesFlag = "validation.max-label-names-per-series" - maxLabelNameLengthFlag = "validation.max-length-label-name" - maxLabelValueLengthFlag = "validation.max-length-label-value" - maxMetadataLengthFlag = "validation.max-metadata-length" - creationGracePeriodFlag = "validation.create-grace-period" - maxQueryLengthFlag = "store.max-query-length" - maxPartialQueryLengthFlag = "querier.max-partial-query-length" - maxTotalQueryLengthFlag = "query-frontend.max-total-query-length" - requestRateFlag = "distributor.request-rate-limit" - requestBurstSizeFlag = "distributor.request-burst-size" - ingestionRateFlag = "distributor.ingestion-rate-limit" - ingestionBurstSizeFlag = "distributor.ingestion-burst-size" - HATrackerMaxClustersFlag = "distributor.ha-tracker.max-clusters" + MaxSeriesPerMetricFlag = "ingester.max-global-series-per-metric" + MaxMetadataPerMetricFlag = "ingester.max-global-metadata-per-metric" + MaxSeriesPerUserFlag = "ingester.max-global-series-per-user" + MaxMetadataPerUserFlag = "ingester.max-global-metadata-per-user" + MaxChunksPerQueryFlag = "querier.max-fetched-chunks-per-query" + MaxChunkBytesPerQueryFlag = "querier.max-fetched-chunk-bytes-per-query" + MaxSeriesPerQueryFlag = "querier.max-fetched-series-per-query" + maxLabelNamesPerSeriesFlag = "validation.max-label-names-per-series" + maxLabelNameLengthFlag = "validation.max-length-label-name" + maxLabelValueLengthFlag = "validation.max-length-label-value" + maxMetadataLengthFlag = "validation.max-metadata-length" + creationGracePeriodFlag = "validation.create-grace-period" + maxQueryLengthFlag = "store.max-query-length" + maxPartialQueryLengthFlag = "querier.max-partial-query-length" + maxTotalQueryLengthFlag = "query-frontend.max-total-query-length" + requestRateFlag = "distributor.request-rate-limit" + requestBurstSizeFlag = "distributor.request-burst-size" + ingestionRateFlag = "distributor.ingestion-rate-limit" + ingestionBurstSizeFlag = "distributor.ingestion-burst-size" + HATrackerMaxClustersFlag = "distributor.ha-tracker.max-clusters" // MinCompactorPartialBlockDeletionDelay is the minimum partial blocks deletion delay that can be configured in Mimir. MinCompactorPartialBlockDeletionDelay = 4 * time.Hour @@ -94,8 +91,6 @@ type Limits struct { // Series MaxGlobalSeriesPerUser int `yaml:"max_global_series_per_user" json:"max_global_series_per_user"` MaxGlobalSeriesPerMetric int `yaml:"max_global_series_per_metric" json:"max_global_series_per_metric"` - // Ephemeral series - MaxEphemeralSeriesPerUser int `yaml:"max_ephemeral_series_per_user" json:"max_ephemeral_series_per_user" category:"experimental"` // Metadata MaxGlobalMetricsWithMetadataPerUser int `yaml:"max_global_metadata_per_user" json:"max_global_metadata_per_user"` MaxGlobalMetadataPerMetric int `yaml:"max_global_metadata_per_metric" json:"max_global_metadata_per_metric"` @@ -177,8 +172,6 @@ type Limits struct { ForwardingEndpoint string `yaml:"forwarding_endpoint" json:"forwarding_endpoint" doc:"nocli|description=Remote-write endpoint where metrics specified in forwarding_rules are forwarded to. If set, takes precedence over endpoints specified in forwarding rules."` ForwardingDropOlderThan model.Duration `yaml:"forwarding_drop_older_than" json:"forwarding_drop_older_than" doc:"nocli|description=If set, forwarding drops samples that are older than this duration. If unset or 0, no samples get dropped."` ForwardingRules ForwardingRules `yaml:"forwarding_rules" json:"forwarding_rules" doc:"nocli|description=Rules based on which the Distributor decides whether a metric should be forwarded to an alternative remote_write API endpoint."` - - EphemeralSeriesMatchers ephemeral.LabelMatchers `yaml:"ephemeral_series_matchers" json:"ephemeral_series_matchers" category:"experimental"` } // RegisterFlags adds the flags required to config this to the given FlagSet @@ -203,7 +196,6 @@ func (l *Limits) RegisterFlags(f *flag.FlagSet) { f.IntVar(&l.MaxGlobalSeriesPerUser, MaxSeriesPerUserFlag, 150000, "The maximum number of in-memory series per tenant, across the cluster before replication. 0 to disable.") f.IntVar(&l.MaxGlobalSeriesPerMetric, MaxSeriesPerMetricFlag, 0, "The maximum number of in-memory series per metric name, across the cluster before replication. 0 to disable.") - f.IntVar(&l.MaxEphemeralSeriesPerUser, MaxEphemeralSeriesPerUserFlag, 0, "The maximum number of in-memory ephemeral series per tenant, across the cluster before replication. 0 to disable ephemeral storage.") f.IntVar(&l.MaxGlobalMetricsWithMetadataPerUser, MaxMetadataPerUserFlag, 0, "The maximum number of in-memory metrics with metadata per tenant, across the cluster. 0 to disable.") f.IntVar(&l.MaxGlobalMetadataPerMetric, MaxMetadataPerMetricFlag, 0, "The maximum number of metadata per metric, across the cluster. 0 to disable.") @@ -270,8 +262,6 @@ func (l *Limits) RegisterFlags(f *flag.FlagSet) { f.IntVar(&l.AlertmanagerMaxDispatcherAggregationGroups, "alertmanager.max-dispatcher-aggregation-groups", 0, "Maximum number of aggregation groups in Alertmanager's dispatcher that a tenant can have. Each active aggregation group uses single goroutine. When the limit is reached, dispatcher will not dispatch alerts that belong to additional aggregation groups, but existing groups will keep working properly. 0 = no limit.") f.IntVar(&l.AlertmanagerMaxAlertsCount, "alertmanager.max-alerts-count", 0, "Maximum number of alerts that a single tenant can have. Inserting more alerts will fail with a log message and metric increment. 0 = no limit.") f.IntVar(&l.AlertmanagerMaxAlertsSizeBytes, "alertmanager.max-alerts-size-bytes", 0, "Maximum total size of alerts that a single tenant can have, alert size is the sum of the bytes of its labels, annotations and generatorURL. Inserting more alerts will fail with a log message and metric increment. 0 = no limit.") - - f.Var(&l.EphemeralSeriesMatchers, "distributor.ephemeral-series-matchers", fmt.Sprintf("Lists of series matchers prefixed by the source. The source must be one of %s. If an incoming sample matches at least one of the matchers with its source it gets marked as ephemeral. The format of the value looks like: %s:{namespace=\"dev\"};%s:{host=\"server1\",namespace=\"prod\"}", strings.Join(ephemeral.ValidSourceStrings, ", "), ephemeral.API, ephemeral.RULE)) } // UnmarshalYAML implements the yaml.Unmarshaler interface. @@ -465,11 +455,6 @@ func (o *Overrides) MaxGlobalSeriesPerMetric(userID string) int { return o.getOverridesForUser(userID).MaxGlobalSeriesPerMetric } -// MaxEphemeralSeriesPerUser returns the maximum number of ephemeral series a user is allowed to store across the cluster. -func (o *Overrides) MaxEphemeralSeriesPerUser(userID string) int { - return o.getOverridesForUser(userID).MaxEphemeralSeriesPerUser -} - func (o *Overrides) MaxChunksPerQuery(userID string) int { return o.getOverridesForUser(userID).MaxChunksPerQuery } @@ -794,15 +779,6 @@ func (o *Overrides) ForwardingEndpoint(user string) string { return o.getOverridesForUser(user).ForwardingEndpoint } -func (o *Overrides) EphemeralChecker(user string, source mimirpb.WriteRequest_SourceEnum) ephemeral.SeriesChecker { - m := o.getOverridesForUser(user).EphemeralSeriesMatchers.ForSource(source) - if m.HasMatchers() { - return &m - } - - return nil -} - func (o *Overrides) ForwardingDropOlderThan(user string) time.Duration { return time.Duration(o.getOverridesForUser(user).ForwardingDropOlderThan) } diff --git a/pkg/util/validation/limits_test.go b/pkg/util/validation/limits_test.go index b4fb1804376..acc342f26cc 100644 --- a/pkg/util/validation/limits_test.go +++ b/pkg/util/validation/limits_test.go @@ -20,7 +20,6 @@ import ( "gopkg.in/yaml.v3" "github.com/grafana/mimir/pkg/ingester/activeseries" - "github.com/grafana/mimir/pkg/mimirpb" ) func TestOverridesManager_GetOverrides(t *testing.T) { @@ -668,35 +667,3 @@ func TestEnabledByAnyTenant(t *testing.T) { require.True(t, EnabledByAnyTenant([]string{"tenant1", "tenant2", "tenant3"}, ov.NativeHistogramsIngestionEnabled)) } - -func TestYamlUnmarshalMarshalLabelMatchers(t *testing.T) { - cfg := ` -ephemeral_series_matchers: - any: - - '{__name__!=""}' -` - - limits := Limits{} - err := yaml.Unmarshal([]byte(cfg), &limits) - require.NoError(t, err) - - require.True(t, limits.EphemeralSeriesMatchers.ForSource(mimirpb.API).HasMatchers()) - - out, err := yaml.Marshal(&limits) - require.NoError(t, err) - require.Contains(t, string(out), cfg) // output contains many fields from Limits struct, but we only care for ephemeral_series_matchers -} - -func TestJsonUnmarshalMarshalLabelMatchers(t *testing.T) { - cfg := `"ephemeral_series_matchers":{"any":["{__name__!=\"\"}"]}` - - limits := Limits{} - err := json.Unmarshal([]byte("{"+cfg+"}"), &limits) - require.NoError(t, err) - - require.True(t, limits.EphemeralSeriesMatchers.ForSource(mimirpb.API).HasMatchers()) - - out, err := json.Marshal(&limits) - require.NoError(t, err) - require.Contains(t, string(out), cfg) // output contains many fields from Limits struct, but we only care for ephemeral_series_matchers -} diff --git a/tools/doc-generator/parse/parser.go b/tools/doc-generator/parse/parser.go index fc0750d92d4..6412c483039 100644 --- a/tools/doc-generator/parse/parser.go +++ b/tools/doc-generator/parse/parser.go @@ -24,7 +24,6 @@ import ( "github.com/grafana/mimir/pkg/ingester/activeseries" "github.com/grafana/mimir/pkg/storage/tsdb" - "github.com/grafana/mimir/pkg/util/ephemeral" "github.com/grafana/mimir/pkg/util/fieldcategory" "github.com/grafana/mimir/pkg/util/validation" ) @@ -349,8 +348,6 @@ func getFieldCustomType(t reflect.Type) (string, bool) { return "relabel_config...", true case reflect.TypeOf(activeseries.CustomTrackersConfig{}).String(): return "map of tracker name (string) to matcher (string)", true - case reflect.TypeOf(ephemeral.LabelMatchers{}).String(): - return "map of source name (string) to series matchers ([]string)", true default: return "", false } @@ -431,8 +428,6 @@ func getCustomFieldType(t reflect.Type) (string, bool) { return "relabel_config...", true case reflect.TypeOf(activeseries.CustomTrackersConfig{}).String(): return "map of tracker name (string) to matcher (string)", true - case reflect.TypeOf(ephemeral.LabelMatchers{}).String(): - return "map of source name (string) to series matchers ([]string)", true default: return "", false } @@ -468,8 +463,6 @@ func ReflectType(typ string) reflect.Type { return reflect.TypeOf(tsdb.DurationList{}) case "map of string to validation.ForwardingRule": return reflect.TypeOf(map[string]validation.ForwardingRule{}) - case "map of source name (string) to series matchers ([]string)": - return reflect.TypeOf(ephemeral.LabelMatchers{}) default: panic("unknown field type " + typ) }