From b05e5490b5acbcb973e0df3159c58c8f6b70241c Mon Sep 17 00:00:00 2001 From: tesla59 Date: Mon, 17 Jun 2024 20:07:33 +0530 Subject: [PATCH 1/4] Add distributor.shard-by-all-labels to Deprecated flag Signed-off-by: tesla59 --- CHANGELOG.md | 1 + docs/configuration/arguments.md | 14 -------------- docs/configuration/config-file-reference.md | 5 ----- integration/ingester_limits_test.go | 1 - integration/ingester_sharding_test.go | 1 - integration/zone_aware_test.go | 2 -- pkg/distributor/distributor.go | 5 ++++- 7 files changed, 5 insertions(+), 24 deletions(-) diff --git a/CHANGELOG.md b/CHANGELOG.md index 8328ea318a..ae377df892 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -3,6 +3,7 @@ ## master / unreleased * [CHANGE] Upgrade Dockerfile Node version from 14x to 18x. #5906 * [CHANGE] Ingester: Remove `-querier.query-store-for-labels-enabled` flag. Querying long-term store for labels is always enabled. #5984 +* [CHANGE] Distributor: Remove `-distributor.shard-by-all-labels` flag. Shard by all labels is always enabled now. The flag is ignored now and will be removed in cortex v1.19.0 #6021 * [FEATURE] Ingester: Experimental: Enable native histogram ingestion via `-blocks-storage.tsdb.enable-native-histograms` flag. #5986 * [ENHANCEMENT] rulers: Add support to persist tokens in rulers. #5987 * [ENHANCEMENT] Query Frontend/Querier: Added store gateway postings touched count and touched size in Querier stats and log in Query Frontend. #5892 diff --git a/docs/configuration/arguments.md b/docs/configuration/arguments.md index 145a5caea9..a6f0351735 100644 --- a/docs/configuration/arguments.md +++ b/docs/configuration/arguments.md @@ -89,20 +89,6 @@ The next three options only apply when the querier is used together with the Que ## Distributor -- `-distributor.shard-by-all-labels` - - In the original Cortex design, samples were sharded amongst distributors by the combination of (userid, metric name). Sharding by metric name was designed to reduce the number of ingesters you need to hit on the read path; the downside was that you could hotspot the write path. - - In hindsight, this seems like the wrong choice: we do many orders of magnitude more writes than reads, and ingester reads are in-memory and cheap. It seems the right thing to do is to use all the labels to shard, improving load balancing and support for very high cardinality metrics. - - Set this flag to `true` for the new behaviour. - - Important to note is that when setting this flag to `true`, it has to be set on both the distributor and the querier (called `-distributor.shard-by-all-labels` on Querier as well). If the flag is only set on the distributor and not on the querier, you will get incomplete query results because not all ingesters are queried. - - **Upgrade notes**: As this flag also makes all queries always read from all ingesters, the upgrade path is pretty trivial; just enable the flag. When you do enable it, you'll see a spike in the number of active series as the writes are "reshuffled" amongst the ingesters, but over the next stale period all the old series will be flushed, and you should end up with much better load balancing. With this flag enabled in the queriers, reads will always catch all the data from all ingesters. - - **Warning**: disabling this flag can lead to a much less balanced distribution of load among the ingesters. - - `-distributor.extra-query-delay` This is used by a component with an embedded distributor (Querier and Ruler) to control how long to wait until sending more than the minimum amount of queries needed for a successful response. diff --git a/docs/configuration/config-file-reference.md b/docs/configuration/config-file-reference.md index ab0570c67e..a5685d1328 100644 --- a/docs/configuration/config-file-reference.md +++ b/docs/configuration/config-file-reference.md @@ -2447,11 +2447,6 @@ ha_tracker: # CLI flag: -distributor.sharding-strategy [sharding_strategy: | default = "default"] -# Distribute samples based on all labels, as opposed to solely by user and -# metric name. -# CLI flag: -distributor.shard-by-all-labels -[shard_by_all_labels: | default = false] - # Try writing to an additional ingester in the presence of an ingester not in # the ACTIVE state. It is useful to disable this along with # -ingester.unregister-on-shutdown=false in order to not spread samples to extra diff --git a/integration/ingester_limits_test.go b/integration/ingester_limits_test.go index e94c83d5bf..682797371f 100644 --- a/integration/ingester_limits_test.go +++ b/integration/ingester_limits_test.go @@ -49,7 +49,6 @@ func TestIngesterGlobalLimits(t *testing.T) { flags := BlocksStorageFlags() flags["-distributor.replication-factor"] = "1" - flags["-distributor.shard-by-all-labels"] = "true" flags["-distributor.sharding-strategy"] = testData.shardingStrategy flags["-distributor.ingestion-tenant-shard-size"] = strconv.Itoa(testData.tenantShardSize) flags["-ingester.max-series-per-user"] = "0" diff --git a/integration/ingester_sharding_test.go b/integration/ingester_sharding_test.go index aeffc1f0cc..633cdb4e7c 100644 --- a/integration/ingester_sharding_test.go +++ b/integration/ingester_sharding_test.go @@ -58,7 +58,6 @@ func TestIngesterSharding(t *testing.T) { defer s.Close() flags := BlocksStorageFlags() - flags["-distributor.shard-by-all-labels"] = "true" flags["-distributor.sharding-strategy"] = testData.shardingStrategy flags["-distributor.ingestion-tenant-shard-size"] = strconv.Itoa(testData.tenantShardSize) diff --git a/integration/zone_aware_test.go b/integration/zone_aware_test.go index a2e983eb9b..59519a9db9 100644 --- a/integration/zone_aware_test.go +++ b/integration/zone_aware_test.go @@ -26,7 +26,6 @@ func TestZoneAwareReplication(t *testing.T) { defer s.Close() flags := BlocksStorageFlags() - flags["-distributor.shard-by-all-labels"] = "true" flags["-distributor.replication-factor"] = "3" flags["-distributor.zone-awareness-enabled"] = "true" @@ -158,7 +157,6 @@ func TestZoneResultsQuorum(t *testing.T) { defer s.Close() flags := BlocksStorageFlags() - flags["-distributor.shard-by-all-labels"] = "true" flags["-distributor.replication-factor"] = "3" flags["-distributor.zone-awareness-enabled"] = "true" diff --git a/pkg/distributor/distributor.go b/pkg/distributor/distributor.go index 6c1600c50b..4964648430 100644 --- a/pkg/distributor/distributor.go +++ b/pkg/distributor/distributor.go @@ -34,6 +34,7 @@ import ( "github.com/cortexproject/cortex/pkg/tenant" "github.com/cortexproject/cortex/pkg/util" "github.com/cortexproject/cortex/pkg/util/extract" + "github.com/cortexproject/cortex/pkg/util/flagext" "github.com/cortexproject/cortex/pkg/util/limiter" util_log "github.com/cortexproject/cortex/pkg/util/log" util_math "github.com/cortexproject/cortex/pkg/util/math" @@ -174,11 +175,11 @@ func (cfg *Config) RegisterFlags(f *flag.FlagSet) { cfg.PoolConfig.RegisterFlags(f) cfg.HATrackerConfig.RegisterFlags(f) cfg.DistributorRing.RegisterFlags(f) + cfg.ShardByAllLabels = true f.IntVar(&cfg.MaxRecvMsgSize, "distributor.max-recv-msg-size", 100<<20, "remote_write API max receive message size (bytes).") f.DurationVar(&cfg.RemoteTimeout, "distributor.remote-timeout", 2*time.Second, "Timeout for downstream ingesters.") f.DurationVar(&cfg.ExtraQueryDelay, "distributor.extra-query-delay", 0, "Time to wait before sending more than the minimum successful query requests.") - f.BoolVar(&cfg.ShardByAllLabels, "distributor.shard-by-all-labels", false, "Distribute samples based on all labels, as opposed to solely by user and metric name.") f.BoolVar(&cfg.SignWriteRequestsEnabled, "distributor.sign-write-requests", false, "EXPERIMENTAL: If enabled, sign the write request between distributors and ingesters.") f.StringVar(&cfg.ShardingStrategy, "distributor.sharding-strategy", util.ShardingStrategyDefault, fmt.Sprintf("The sharding strategy to use. Supported values are: %s.", strings.Join(supportedShardingStrategies, ", "))) f.BoolVar(&cfg.ExtendWrites, "distributor.extend-writes", true, "Try writing to an additional ingester in the presence of an ingester not in the ACTIVE state. It is useful to disable this along with -ingester.unregister-on-shutdown=false in order to not spread samples to extra ingesters during rolling restarts with consistent naming.") @@ -186,6 +187,8 @@ func (cfg *Config) RegisterFlags(f *flag.FlagSet) { f.Float64Var(&cfg.InstanceLimits.MaxIngestionRate, "distributor.instance-limits.max-ingestion-rate", 0, "Max ingestion rate (samples/sec) that this distributor will accept. This limit is per-distributor, not per-tenant. Additional push requests will be rejected. Current ingestion rate is computed as exponentially weighted moving average, updated every second. 0 = unlimited.") f.IntVar(&cfg.InstanceLimits.MaxInflightPushRequests, "distributor.instance-limits.max-inflight-push-requests", 0, "Max inflight push requests that this distributor can handle. This limit is per-distributor, not per-tenant. Additional requests will be rejected. 0 = unlimited.") + + flagext.DeprecatedFlag(f, "distributor.shard-by-all-labels", "Deprecated: Setting this flag will not take any effect, shard by all labels is always enabled now.", util_log.Logger) } // Validate config and returns error on failure From 6b1a4ec0202321f9b6d968979e66f86b5751b36e Mon Sep 17 00:00:00 2001 From: tesla59 Date: Wed, 19 Jun 2024 14:18:41 +0530 Subject: [PATCH 2/4] deprecate usage of ShardByAllLabels Signed-off-by: tesla59 --- pkg/cortex/cortex.go | 3 --- pkg/cortex/modules.go | 1 - pkg/distributor/distributor.go | 19 ++----------------- pkg/distributor/distributor_test.go | 2 -- pkg/distributor/query.go | 10 ---------- pkg/ingester/ingester.go | 3 --- pkg/ingester/limiter.go | 2 -- 7 files changed, 2 insertions(+), 38 deletions(-) diff --git a/pkg/cortex/cortex.go b/pkg/cortex/cortex.go index 90e268e1ae..bbfb4cd722 100644 --- a/pkg/cortex/cortex.go +++ b/pkg/cortex/cortex.go @@ -193,9 +193,6 @@ func (c *Config) Validate(log log.Logger) error { if err := c.BlocksStorage.Validate(); err != nil { return errors.Wrap(err, "invalid TSDB config") } - if err := c.LimitsConfig.Validate(c.Distributor.ShardByAllLabels); err != nil { - return errors.Wrap(err, "invalid limits config") - } if err := c.Distributor.Validate(c.LimitsConfig); err != nil { return errors.Wrap(err, "invalid distributor config") } diff --git a/pkg/cortex/modules.go b/pkg/cortex/modules.go index de1f15d260..a0d9ea5b2b 100644 --- a/pkg/cortex/modules.go +++ b/pkg/cortex/modules.go @@ -401,7 +401,6 @@ func (t *Cortex) initIngesterService() (serv services.Service, err error) { t.Cfg.Ingester.LifecyclerConfig.RingConfig.KVStore.Multi.ConfigProvider = multiClientRuntimeConfigChannel(t.RuntimeConfig) t.Cfg.Ingester.LifecyclerConfig.ListenPort = t.Cfg.Server.GRPCListenPort t.Cfg.Ingester.DistributorShardingStrategy = t.Cfg.Distributor.ShardingStrategy - t.Cfg.Ingester.DistributorShardByAllLabels = t.Cfg.Distributor.ShardByAllLabels t.Cfg.Ingester.InstanceLimitsFn = ingesterInstanceLimits(t.RuntimeConfig) t.Cfg.Ingester.QueryIngestersWithin = t.Cfg.Querier.QueryIngestersWithin t.tsdbIngesterConfig() diff --git a/pkg/distributor/distributor.go b/pkg/distributor/distributor.go index 4964648430..b454071e4f 100644 --- a/pkg/distributor/distributor.go +++ b/pkg/distributor/distributor.go @@ -33,7 +33,6 @@ import ( ring_client "github.com/cortexproject/cortex/pkg/ring/client" "github.com/cortexproject/cortex/pkg/tenant" "github.com/cortexproject/cortex/pkg/util" - "github.com/cortexproject/cortex/pkg/util/extract" "github.com/cortexproject/cortex/pkg/util/flagext" "github.com/cortexproject/cortex/pkg/util/limiter" util_log "github.com/cortexproject/cortex/pkg/util/log" @@ -139,7 +138,6 @@ type Config struct { ExtraQueryDelay time.Duration `yaml:"extra_queue_delay"` ShardingStrategy string `yaml:"sharding_strategy"` - ShardByAllLabels bool `yaml:"shard_by_all_labels"` ExtendWrites bool `yaml:"extend_writes"` SignWriteRequestsEnabled bool `yaml:"sign_write_requests"` @@ -175,7 +173,6 @@ func (cfg *Config) RegisterFlags(f *flag.FlagSet) { cfg.PoolConfig.RegisterFlags(f) cfg.HATrackerConfig.RegisterFlags(f) cfg.DistributorRing.RegisterFlags(f) - cfg.ShardByAllLabels = true f.IntVar(&cfg.MaxRecvMsgSize, "distributor.max-recv-msg-size", 100<<20, "remote_write API max receive message size (bytes).") f.DurationVar(&cfg.RemoteTimeout, "distributor.remote-timeout", 2*time.Second, "Timeout for downstream ingesters.") @@ -458,23 +455,11 @@ func (d *Distributor) stopping(_ error) error { } func (d *Distributor) tokenForLabels(userID string, labels []cortexpb.LabelAdapter) (uint32, error) { - if d.cfg.ShardByAllLabels { - return shardByAllLabels(userID, labels), nil - } - - unsafeMetricName, err := extract.UnsafeMetricNameFromLabelAdapters(labels) - if err != nil { - return 0, err - } - return shardByMetricName(userID, unsafeMetricName), nil + return shardByAllLabels(userID, labels), nil } func (d *Distributor) tokenForMetadata(userID string, metricName string) uint32 { - if d.cfg.ShardByAllLabels { - return shardByMetricName(userID, metricName) - } - - return shardByUser(userID) + return shardByMetricName(userID, metricName) } // shardByMetricName returns the token for the given metric. The provided metricName diff --git a/pkg/distributor/distributor_test.go b/pkg/distributor/distributor_test.go index cdd9ed0afc..6d73557b36 100644 --- a/pkg/distributor/distributor_test.go +++ b/pkg/distributor/distributor_test.go @@ -2047,7 +2047,6 @@ func BenchmarkDistributor_Push(b *testing.B) { limits.IngestionRate = 10000000 // Unlimited. testData.prepareConfig(&limits) - distributorCfg.ShardByAllLabels = true distributorCfg.IngesterClientFactory = func(addr string) (ring_client.PoolClient, error) { return &noopIngester{}, nil } @@ -2649,7 +2648,6 @@ func prepare(tb testing.TB, cfg prepConfig) ([]*Distributor, []*mockIngester, [] flagext.DefaultValues(&distributorCfg, &clientConfig) distributorCfg.IngesterClientFactory = factory - distributorCfg.ShardByAllLabels = cfg.shardByAllLabels distributorCfg.ExtraQueryDelay = 50 * time.Millisecond distributorCfg.DistributorRing.HeartbeatPeriod = 100 * time.Millisecond distributorCfg.DistributorRing.InstanceID = strconv.Itoa(i) diff --git a/pkg/distributor/query.go b/pkg/distributor/query.go index 882d67bc3d..354cc55e4a 100644 --- a/pkg/distributor/query.go +++ b/pkg/distributor/query.go @@ -17,7 +17,6 @@ import ( "github.com/cortexproject/cortex/pkg/ring" "github.com/cortexproject/cortex/pkg/tenant" "github.com/cortexproject/cortex/pkg/util" - "github.com/cortexproject/cortex/pkg/util/extract" "github.com/cortexproject/cortex/pkg/util/grpcutil" "github.com/cortexproject/cortex/pkg/util/limiter" "github.com/cortexproject/cortex/pkg/util/validation" @@ -96,15 +95,6 @@ func (d *Distributor) GetIngestersForQuery(ctx context.Context, matchers ...*lab } } - // If "shard by all labels" is disabled, we can get ingesters by metricName if exists. - if !d.cfg.ShardByAllLabels && len(matchers) > 0 { - metricNameMatcher, _, ok := extract.MetricNameMatcherFromMatchers(matchers) - - if ok && metricNameMatcher.Type == labels.MatchEqual { - return d.ingestersRing.Get(shardByMetricName(userID, metricNameMatcher.Value), ring.Read, nil, nil, nil) - } - } - return d.ingestersRing.GetReplicationSetForOperation(ring.Read) } diff --git a/pkg/ingester/ingester.go b/pkg/ingester/ingester.go index e067341049..84b91e8dc6 100644 --- a/pkg/ingester/ingester.go +++ b/pkg/ingester/ingester.go @@ -119,7 +119,6 @@ type Config struct { // Injected at runtime and read from the distributor config, required // to accurately apply global limits. DistributorShardingStrategy string `yaml:"-"` - DistributorShardByAllLabels bool `yaml:"-"` // Injected at runtime and read from querier config. QueryIngestersWithin time.Duration `yaml:"-"` @@ -693,7 +692,6 @@ func New(cfg Config, limits *validation.Overrides, registerer prometheus.Registe limits, i.lifecycler, cfg.DistributorShardingStrategy, - cfg.DistributorShardByAllLabels, cfg.LifecyclerConfig.RingConfig.ReplicationFactor, cfg.LifecyclerConfig.RingConfig.ZoneAwarenessEnabled, cfg.AdminLimitMessage, @@ -732,7 +730,6 @@ func NewForFlusher(cfg Config, limits *validation.Overrides, registerer promethe limits, i.lifecycler, cfg.DistributorShardingStrategy, - cfg.DistributorShardByAllLabels, cfg.LifecyclerConfig.RingConfig.ReplicationFactor, cfg.LifecyclerConfig.RingConfig.ZoneAwarenessEnabled, cfg.AdminLimitMessage, diff --git a/pkg/ingester/limiter.go b/pkg/ingester/limiter.go index 9b572a8409..83abac27a4 100644 --- a/pkg/ingester/limiter.go +++ b/pkg/ingester/limiter.go @@ -49,7 +49,6 @@ func NewLimiter( limits *validation.Overrides, ring RingCount, shardingStrategy string, - shardByAllLabels bool, replicationFactor int, zoneAwarenessEnabled bool, AdminLimitMessage string, @@ -59,7 +58,6 @@ func NewLimiter( ring: ring, replicationFactor: replicationFactor, shuffleShardingEnabled: shardingStrategy == util.ShardingStrategyShuffle, - shardByAllLabels: shardByAllLabels, zoneAwarenessEnabled: zoneAwarenessEnabled, AdminLimitMessage: AdminLimitMessage, } From c81a6c7247e42e49c8e18b6155dc2cf1be59f100 Mon Sep 17 00:00:00 2001 From: tesla59 Date: Sat, 22 Jun 2024 23:17:21 +0530 Subject: [PATCH 3/4] distributor_test: remove the tests for shardByAllLabels=false Signed-off-by: tesla59 --- pkg/distributor/distributor_test.go | 146 ++++++++++++---------------- 1 file changed, 64 insertions(+), 82 deletions(-) diff --git a/pkg/distributor/distributor_test.go b/pkg/distributor/distributor_test.go index 6d73557b36..f0d01e72ed 100644 --- a/pkg/distributor/distributor_test.go +++ b/pkg/distributor/distributor_test.go @@ -968,107 +968,89 @@ func TestDistributor_PushQuery(t *testing.T) { // coverage along quite a few different axis. testcases := []testcase{} - // Run every test in both sharding modes. - for _, shardByAllLabels := range []bool{true, false} { + // Test with between 2 and 10 ingesters. + for numIngesters := 2; numIngesters < 10; numIngesters++ { - // Test with between 2 and 10 ingesters. - for numIngesters := 2; numIngesters < 10; numIngesters++ { - - // Test with between 0 and numIngesters "happy" ingesters. - for happyIngesters := 0; happyIngesters <= numIngesters; happyIngesters++ { - - // Test either with shuffle-sharding enabled or disabled. - for _, shuffleShardEnabled := range []bool{false, true} { - scenario := fmt.Sprintf("shardByAllLabels=%v, numIngester=%d, happyIngester=%d, shuffleSharding=%v)", shardByAllLabels, numIngesters, happyIngesters, shuffleShardEnabled) - - // The number of ingesters we expect to query depends whether shuffle sharding and/or - // shard by all labels are enabled. - var expectedIngesters int - if shuffleShardEnabled { - expectedIngesters = min(shuffleShardSize, numIngesters) - } else if shardByAllLabels { - expectedIngesters = numIngesters - } else { - expectedIngesters = 3 // Replication factor - } + // Test with between 0 and numIngesters "happy" ingesters. + for happyIngesters := 0; happyIngesters <= numIngesters; happyIngesters++ { - // When we're not sharding by metric name, queriers with more than one - // failed ingester should fail. - if shardByAllLabels && numIngesters-happyIngesters > 1 { - testcases = append(testcases, testcase{ - name: fmt.Sprintf("ExpectFail(%s)", scenario), - numIngesters: numIngesters, - happyIngesters: happyIngesters, - matchers: []*labels.Matcher{nameMatcher, barMatcher}, - expectedError: errFail, - shardByAllLabels: shardByAllLabels, - shuffleShardEnabled: shuffleShardEnabled, - }) - continue - } + // Test either with shuffle-sharding enabled or disabled. + for _, shuffleShardEnabled := range []bool{false, true} { + scenario := fmt.Sprintf("numIngester=%d, happyIngester=%d, shuffleSharding=%v)", numIngesters, happyIngesters, shuffleShardEnabled) - // When we have less ingesters than replication factor, any failed ingester - // will cause a failure. - if numIngesters < 3 && happyIngesters < 2 { - testcases = append(testcases, testcase{ - name: fmt.Sprintf("ExpectFail(%s)", scenario), - numIngesters: numIngesters, - happyIngesters: happyIngesters, - matchers: []*labels.Matcher{nameMatcher, barMatcher}, - expectedError: errFail, - shardByAllLabels: shardByAllLabels, - shuffleShardEnabled: shuffleShardEnabled, - }) - continue - } + // The number of ingesters we expect to query depends whether shuffle sharding and/or + // shard by all labels are enabled. + var expectedIngesters int + if shuffleShardEnabled { + expectedIngesters = min(shuffleShardSize, numIngesters) + } else { + expectedIngesters = numIngesters + } - // If we're sharding by metric name and we have failed ingesters, we can't - // tell ahead of time if the query will succeed, as we don't know which - // ingesters will hold the results for the query. - if !shardByAllLabels && numIngesters-happyIngesters > 1 { - continue - } + // When we're not sharding by metric name, queriers with more than one + // failed ingester should fail. + if numIngesters-happyIngesters > 1 { + testcases = append(testcases, testcase{ + name: fmt.Sprintf("ExpectFail(%s)", scenario), + numIngesters: numIngesters, + happyIngesters: happyIngesters, + matchers: []*labels.Matcher{nameMatcher, barMatcher}, + expectedError: errFail, + shuffleShardEnabled: shuffleShardEnabled, + }) + continue + } - // Reading all the samples back should succeed. + // When we have less ingesters than replication factor, any failed ingester + // will cause a failure. + if numIngesters < 3 && happyIngesters < 2 { testcases = append(testcases, testcase{ - name: fmt.Sprintf("ReadAll(%s)", scenario), + name: fmt.Sprintf("ExpectFail(%s)", scenario), numIngesters: numIngesters, happyIngesters: happyIngesters, - samples: 10, matchers: []*labels.Matcher{nameMatcher, barMatcher}, - expectedResponse: expectedResponse(0, 10), - expectedIngesters: expectedIngesters, - shardByAllLabels: shardByAllLabels, + expectedError: errFail, shuffleShardEnabled: shuffleShardEnabled, }) + continue + } + + // Reading all the samples back should succeed. + testcases = append(testcases, testcase{ + name: fmt.Sprintf("ReadAll(%s)", scenario), + numIngesters: numIngesters, + happyIngesters: happyIngesters, + samples: 10, + matchers: []*labels.Matcher{nameMatcher, barMatcher}, + expectedResponse: expectedResponse(0, 10), + expectedIngesters: expectedIngesters, + shuffleShardEnabled: shuffleShardEnabled, + }) + + // As should reading none of the samples back. + testcases = append(testcases, testcase{ + name: fmt.Sprintf("ReadNone(%s)", scenario), + numIngesters: numIngesters, + happyIngesters: happyIngesters, + samples: 10, + matchers: []*labels.Matcher{nameMatcher, mustEqualMatcher("not", "found")}, + expectedResponse: expectedResponse(0, 0), + expectedIngesters: expectedIngesters, + shuffleShardEnabled: shuffleShardEnabled, + }) - // As should reading none of the samples back. + // And reading each sample individually. + for i := 0; i < 10; i++ { testcases = append(testcases, testcase{ - name: fmt.Sprintf("ReadNone(%s)", scenario), + name: fmt.Sprintf("ReadOne(%s, sample=%d)", scenario, i), numIngesters: numIngesters, happyIngesters: happyIngesters, samples: 10, - matchers: []*labels.Matcher{nameMatcher, mustEqualMatcher("not", "found")}, - expectedResponse: expectedResponse(0, 0), + matchers: []*labels.Matcher{nameMatcher, mustEqualMatcher("sample", strconv.Itoa(i))}, + expectedResponse: expectedResponse(i, i+1), expectedIngesters: expectedIngesters, - shardByAllLabels: shardByAllLabels, shuffleShardEnabled: shuffleShardEnabled, }) - - // And reading each sample individually. - for i := 0; i < 10; i++ { - testcases = append(testcases, testcase{ - name: fmt.Sprintf("ReadOne(%s, sample=%d)", scenario, i), - numIngesters: numIngesters, - happyIngesters: happyIngesters, - samples: 10, - matchers: []*labels.Matcher{nameMatcher, mustEqualMatcher("sample", strconv.Itoa(i))}, - expectedResponse: expectedResponse(i, i+1), - expectedIngesters: expectedIngesters, - shardByAllLabels: shardByAllLabels, - shuffleShardEnabled: shuffleShardEnabled, - }) - } } } } From 7bb0ff9aabe0f23ca6183b3f3e7ea1d1db12b777 Mon Sep 17 00:00:00 2001 From: tesla59 Date: Sat, 22 Jun 2024 23:18:54 +0530 Subject: [PATCH 4/4] config: deprecate shard_by_all_labels flag Signed-off-by: tesla59 --- development/tsdb-blocks-storage-s3-gossip/config/cortex.yaml | 1 - .../tsdb-blocks-storage-s3-single-binary/config/cortex.yaml | 1 - development/tsdb-blocks-storage-s3/config/cortex.yaml | 1 - .../tsdb-blocks-storage-swift-single-binary/config/cortex.yaml | 1 - docs/configuration/single-process-config-blocks-gossip-1.yaml | 1 - docs/configuration/single-process-config-blocks-gossip-2.yaml | 1 - docs/configuration/single-process-config-blocks-local.yaml | 1 - docs/configuration/single-process-config-blocks-tls.yaml | 1 - docs/configuration/single-process-config-blocks.yaml | 1 - docs/getting-started/cortex-config.yaml | 1 - docs/getting-started/cortex-values.yaml | 1 - 11 files changed, 11 deletions(-) diff --git a/development/tsdb-blocks-storage-s3-gossip/config/cortex.yaml b/development/tsdb-blocks-storage-s3-gossip/config/cortex.yaml index bc79dabb8c..2d1f1d5922 100644 --- a/development/tsdb-blocks-storage-s3-gossip/config/cortex.yaml +++ b/development/tsdb-blocks-storage-s3-gossip/config/cortex.yaml @@ -1,7 +1,6 @@ auth_enabled: false distributor: - shard_by_all_labels: true pool: health_check_ingesters: true diff --git a/development/tsdb-blocks-storage-s3-single-binary/config/cortex.yaml b/development/tsdb-blocks-storage-s3-single-binary/config/cortex.yaml index b7cd5e7398..8047802a56 100644 --- a/development/tsdb-blocks-storage-s3-single-binary/config/cortex.yaml +++ b/development/tsdb-blocks-storage-s3-single-binary/config/cortex.yaml @@ -1,7 +1,6 @@ auth_enabled: false distributor: - shard_by_all_labels: true pool: health_check_ingesters: true diff --git a/development/tsdb-blocks-storage-s3/config/cortex.yaml b/development/tsdb-blocks-storage-s3/config/cortex.yaml index 0f35136841..d8726163b3 100644 --- a/development/tsdb-blocks-storage-s3/config/cortex.yaml +++ b/development/tsdb-blocks-storage-s3/config/cortex.yaml @@ -1,7 +1,6 @@ auth_enabled: false distributor: - shard_by_all_labels: true pool: health_check_ingesters: true diff --git a/development/tsdb-blocks-storage-swift-single-binary/config/cortex.yaml b/development/tsdb-blocks-storage-swift-single-binary/config/cortex.yaml index 452fb906a6..d90ebda212 100644 --- a/development/tsdb-blocks-storage-swift-single-binary/config/cortex.yaml +++ b/development/tsdb-blocks-storage-swift-single-binary/config/cortex.yaml @@ -1,7 +1,6 @@ auth_enabled: false distributor: - shard_by_all_labels: true pool: health_check_ingesters: true diff --git a/docs/configuration/single-process-config-blocks-gossip-1.yaml b/docs/configuration/single-process-config-blocks-gossip-1.yaml index 5d53411921..35f7401e48 100644 --- a/docs/configuration/single-process-config-blocks-gossip-1.yaml +++ b/docs/configuration/single-process-config-blocks-gossip-1.yaml @@ -17,7 +17,6 @@ server: grpc_server_max_concurrent_streams: 1000 distributor: - shard_by_all_labels: true pool: health_check_ingesters: true diff --git a/docs/configuration/single-process-config-blocks-gossip-2.yaml b/docs/configuration/single-process-config-blocks-gossip-2.yaml index 419e70e9df..95bf74cf8c 100644 --- a/docs/configuration/single-process-config-blocks-gossip-2.yaml +++ b/docs/configuration/single-process-config-blocks-gossip-2.yaml @@ -17,7 +17,6 @@ server: grpc_server_max_concurrent_streams: 1000 distributor: - shard_by_all_labels: true pool: health_check_ingesters: true diff --git a/docs/configuration/single-process-config-blocks-local.yaml b/docs/configuration/single-process-config-blocks-local.yaml index a5eb711d97..30e4873931 100644 --- a/docs/configuration/single-process-config-blocks-local.yaml +++ b/docs/configuration/single-process-config-blocks-local.yaml @@ -16,7 +16,6 @@ server: grpc_server_max_concurrent_streams: 1000 distributor: - shard_by_all_labels: true pool: health_check_ingesters: true diff --git a/docs/configuration/single-process-config-blocks-tls.yaml b/docs/configuration/single-process-config-blocks-tls.yaml index ca468192c9..8ec9437ec0 100644 --- a/docs/configuration/single-process-config-blocks-tls.yaml +++ b/docs/configuration/single-process-config-blocks-tls.yaml @@ -22,7 +22,6 @@ server: distributor: - shard_by_all_labels: true pool: health_check_ingesters: true diff --git a/docs/configuration/single-process-config-blocks.yaml b/docs/configuration/single-process-config-blocks.yaml index 25d699a24f..3c93ededfd 100644 --- a/docs/configuration/single-process-config-blocks.yaml +++ b/docs/configuration/single-process-config-blocks.yaml @@ -16,7 +16,6 @@ server: grpc_server_max_concurrent_streams: 1000 distributor: - shard_by_all_labels: true pool: health_check_ingesters: true diff --git a/docs/getting-started/cortex-config.yaml b/docs/getting-started/cortex-config.yaml index f86f1c71d0..19a6b0848b 100644 --- a/docs/getting-started/cortex-config.yaml +++ b/docs/getting-started/cortex-config.yaml @@ -18,7 +18,6 @@ server: grpc_server_max_concurrent_streams: 1000 distributor: - shard_by_all_labels: true pool: health_check_ingesters: true diff --git a/docs/getting-started/cortex-values.yaml b/docs/getting-started/cortex-values.yaml index 477f4a8eef..99f36f563b 100644 --- a/docs/getting-started/cortex-values.yaml +++ b/docs/getting-started/cortex-values.yaml @@ -102,7 +102,6 @@ config: distributor: # -- Distribute samples based on all labels, as opposed to solely by user and # metric name. - shard_by_all_labels: true pool: health_check_ingesters: true memberlist: