Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

Tenant/unordered #4130

Merged
merged 2 commits into from
Aug 10, 2021
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
4 changes: 2 additions & 2 deletions pkg/ingester/instance.go
Original file line number Diff line number Diff line change
Expand Up @@ -132,7 +132,7 @@ func (i *instance) consumeChunk(ctx context.Context, ls labels.Labels, chunk *lo
if !ok {

sortedLabels := i.index.Add(cortexpb.FromLabelsToLabelAdapters(ls), fp)
stream = newStream(i.cfg, i.instanceID, fp, sortedLabels, i.metrics)
stream = newStream(i.cfg, i.instanceID, fp, sortedLabels, i.limiter.limits.UnorderedWrites(i.instanceID), i.metrics)
i.streamsByFP[fp] = stream
i.streams[stream.labelsString] = stream
i.streamsCreatedTotal.Inc()
Expand Down Expand Up @@ -243,7 +243,7 @@ func (i *instance) getOrCreateStream(pushReqStream logproto.Stream, lock bool, r
fp := i.getHashForLabels(labels)

sortedLabels := i.index.Add(cortexpb.FromLabelsToLabelAdapters(labels), fp)
stream = newStream(i.cfg, i.instanceID, fp, sortedLabels, i.metrics)
stream = newStream(i.cfg, i.instanceID, fp, sortedLabels, i.limiter.limits.UnorderedWrites(i.instanceID), i.metrics)
i.streams[pushReqStream.Labels] = stream
i.streamsByFP[fp] = stream

Expand Down
4 changes: 2 additions & 2 deletions pkg/ingester/instance_test.go
Original file line number Diff line number Diff line change
Expand Up @@ -178,7 +178,7 @@ func Test_SeriesQuery(t *testing.T) {
for _, testStream := range testStreams {
stream, err := instance.getOrCreateStream(testStream, false, recordPool.GetRecord())
require.NoError(t, err)
chunk := newStream(cfg, "fake", 0, nil, NilMetrics).NewChunk()
chunk := newStream(cfg, "fake", 0, nil, true, NilMetrics).NewChunk()
for _, entry := range testStream.Entries {
err = chunk.Append(&entry)
require.NoError(t, err)
Expand Down Expand Up @@ -334,7 +334,7 @@ func Benchmark_instance_addNewTailer(b *testing.B) {
lbs := makeRandomLabels()
b.Run("addTailersToNewStream", func(b *testing.B) {
for n := 0; n < b.N; n++ {
inst.addTailersToNewStream(newStream(nil, "fake", 0, lbs, NilMetrics))
inst.addTailersToNewStream(newStream(nil, "fake", 0, lbs, true, NilMetrics))
}
})
}
Expand Down
23 changes: 13 additions & 10 deletions pkg/ingester/stream.go
Original file line number Diff line number Diff line change
Expand Up @@ -95,6 +95,8 @@ type stream struct {
// errors were used to detect this, but this counter has been
// introduced to facilitate removing the ordering constraint.
entryCt int64

unorderedWrites bool
}

type chunkDesc struct {
Expand All @@ -111,15 +113,16 @@ type entryWithError struct {
e error
}

func newStream(cfg *Config, tenant string, fp model.Fingerprint, labels labels.Labels, metrics *ingesterMetrics) *stream {
func newStream(cfg *Config, tenant string, fp model.Fingerprint, labels labels.Labels, unorderedWrites bool, metrics *ingesterMetrics) *stream {
return &stream{
cfg: cfg,
fp: fp,
labels: labels,
labelsString: labels.String(),
tailers: map[uint32]*tailer{},
metrics: metrics,
tenant: tenant,
cfg: cfg,
fp: fp,
labels: labels,
labelsString: labels.String(),
tailers: map[uint32]*tailer{},
metrics: metrics,
tenant: tenant,
unorderedWrites: unorderedWrites,
}
}

Expand Down Expand Up @@ -159,7 +162,7 @@ func (s *stream) setChunks(chunks []Chunk) (bytesAdded, entriesAdded int, err er

func (s *stream) NewChunk() *chunkenc.MemChunk {
hbType := chunkenc.OrderedHeadBlockFmt
if s.cfg.UnorderedWrites {
if s.unorderedWrites {
hbType = chunkenc.UnorderedHeadBlockFmt
}
return chunkenc.NewMemChunk(s.cfg.parsedEncoding, hbType, s.cfg.BlockSize, s.cfg.TargetChunkSize)
Expand Down Expand Up @@ -225,7 +228,7 @@ func (s *stream) Push(
}

// The validity window for unordered writes is the highest timestamp present minus 1/2 * max-chunk-age.
if s.cfg.UnorderedWrites && !s.highestTs.IsZero() && s.highestTs.Add(-s.cfg.MaxChunkAge/2).After(entries[i].Timestamp) {
if s.unorderedWrites && !s.highestTs.IsZero() && s.highestTs.Add(-s.cfg.MaxChunkAge/2).After(entries[i].Timestamp) {
failedEntriesWithError = append(failedEntriesWithError, entryWithError{&entries[i], chunkenc.ErrOutOfOrder})
outOfOrderSamples++
outOfOrderBytes += len(entries[i].Line)
Expand Down
6 changes: 5 additions & 1 deletion pkg/ingester/stream_test.go
Original file line number Diff line number Diff line change
Expand Up @@ -53,6 +53,7 @@ func TestMaxReturnedStreamsErrors(t *testing.T) {
labels.Labels{
{Name: "foo", Value: "bar"},
},
true,
NilMetrics,
)

Expand Down Expand Up @@ -92,6 +93,7 @@ func TestPushDeduplication(t *testing.T) {
labels.Labels{
{Name: "foo", Value: "bar"},
},
true,
NilMetrics,
)

Expand All @@ -115,6 +117,7 @@ func TestPushRejectOldCounter(t *testing.T) {
labels.Labels{
{Name: "foo", Value: "bar"},
},
true,
NilMetrics,
)

Expand Down Expand Up @@ -203,6 +206,7 @@ func TestUnorderedPush(t *testing.T) {
labels.Labels{
{Name: "foo", Value: "bar"},
},
true,
NilMetrics,
)

Expand Down Expand Up @@ -300,7 +304,7 @@ func Benchmark_PushStream(b *testing.B) {
labels.Label{Name: "job", Value: "loki-dev/ingester"},
labels.Label{Name: "container", Value: "ingester"},
}
s := newStream(&Config{}, "fake", model.Fingerprint(0), ls, NilMetrics)
s := newStream(&Config{}, "fake", model.Fingerprint(0), ls, true, NilMetrics)
t, err := newTailer("foo", `{namespace="loki-dev"}`, &fakeTailServer{})
require.NoError(b, err)

Expand Down
10 changes: 8 additions & 2 deletions pkg/validation/limits.go
Original file line number Diff line number Diff line change
Expand Up @@ -42,8 +42,9 @@ type Limits struct {
MaxLineSizeTruncate bool `yaml:"max_line_size_truncate" json:"max_line_size_truncate"`

// Ingester enforced limits.
MaxLocalStreamsPerUser int `yaml:"max_streams_per_user" json:"max_streams_per_user"`
MaxGlobalStreamsPerUser int `yaml:"max_global_streams_per_user" json:"max_global_streams_per_user"`
MaxLocalStreamsPerUser int `yaml:"max_streams_per_user" json:"max_streams_per_user"`
MaxGlobalStreamsPerUser int `yaml:"max_global_streams_per_user" json:"max_global_streams_per_user"`
UnorderedWrites bool `yaml:"unordered_writes" json:"unordered_writes"`

// Querier enforced limits.
MaxChunksPerQuery int `yaml:"max_chunks_per_query" json:"max_chunks_per_query"`
Expand Down Expand Up @@ -105,6 +106,7 @@ func (l *Limits) RegisterFlags(f *flag.FlagSet) {

f.IntVar(&l.MaxLocalStreamsPerUser, "ingester.max-streams-per-user", 10e3, "Maximum number of active streams per user, per ingester. 0 to disable.")
f.IntVar(&l.MaxGlobalStreamsPerUser, "ingester.max-global-streams-per-user", 0, "Maximum number of active streams per user, across the cluster. 0 to disable.")
f.BoolVar(&l.UnorderedWrites, "ingester.unordered-writes", false, "(Experimental) Allow out of order writes.")

f.IntVar(&l.MaxChunksPerQuery, "store.query-chunk-limit", 2e6, "Maximum number of chunks that can be fetched in a single query.")

Expand Down Expand Up @@ -400,6 +402,10 @@ func (o *Overrides) StreamRetention(userID string) []StreamRetention {
return o.getOverridesForUser(userID).StreamRetention
}

func (o *Overrides) UnorderedWrites(userID string) bool {
return o.getOverridesForUser(userID).UnorderedWrites
}

func (o *Overrides) ForEachTenantLimit(callback ForEachTenantLimitCallback) {
o.tenantLimits.ForEachTenantLimit(callback)
}
Expand Down