From a25e730106b7f978b56451f2d898578cf8b72e93 Mon Sep 17 00:00:00 2001 From: Michael Okoko Date: Mon, 26 Apr 2021 11:44:35 +0100 Subject: [PATCH 01/26] import wal package from grafana/agent (with string interning disabled) Signed-off-by: Michael Okoko --- pkg/rules/remotewrite/series.go | 256 +++++++++++ pkg/rules/remotewrite/util.go | 118 ++++++ pkg/rules/remotewrite/wal.go | 678 ++++++++++++++++++++++++++++++ pkg/rules/remotewrite/wal_test.go | 374 ++++++++++++++++ 4 files changed, 1426 insertions(+) create mode 100644 pkg/rules/remotewrite/series.go create mode 100644 pkg/rules/remotewrite/util.go create mode 100644 pkg/rules/remotewrite/wal.go create mode 100644 pkg/rules/remotewrite/wal_test.go diff --git a/pkg/rules/remotewrite/series.go b/pkg/rules/remotewrite/series.go new file mode 100644 index 0000000000..f75d415ebb --- /dev/null +++ b/pkg/rules/remotewrite/series.go @@ -0,0 +1,256 @@ +package remotewrite + +import ( + "sync" + + "github.com/prometheus/prometheus/pkg/labels" +) + +type memSeries struct { + sync.Mutex + + ref uint64 + lset labels.Labels + lastTs int64 + + // TODO(rfratto): this solution below isn't perfect, and there's still + // the possibility for a series to be deleted before it's + // completely gone from the WAL. Rather, we should have gc return + // a "should delete" map and be given a "deleted" map. + // If a series that is going to be marked for deletion is in the + // "deleted" map, then it should be deleted instead. + // + // The "deleted" map will be populated by the Truncate function. + // It will be cleared with every call to gc. + + // willDelete marks a series as to be deleted on the next garbage + // collection. If it receives a write, willDelete is disabled. + willDelete bool + + // Whether this series has samples waiting to be committed to the WAL + pendingCommit bool +} + +func (s *memSeries) updateTs(ts int64) { + s.lastTs = ts + s.willDelete = false + s.pendingCommit = true +} + +// seriesHashmap is a simple hashmap for memSeries by their label set. It is +// built on top of a regular hashmap and holds a slice of series to resolve +// hash collisions. Its methods require the hash to be submitted with it to +// avoid re-computations throughout the code. +// +// This code is copied from the Prometheus TSDB. +type seriesHashmap map[uint64][]*memSeries + +func (m seriesHashmap) get(hash uint64, lset labels.Labels) *memSeries { + for _, s := range m[hash] { + if labels.Equal(s.lset, lset) { + return s + } + } + return nil +} + +func (m seriesHashmap) set(hash uint64, s *memSeries) { + l := m[hash] + for i, prev := range l { + if labels.Equal(prev.lset, s.lset) { + l[i] = s + return + } + } + m[hash] = append(l, s) +} + +func (m seriesHashmap) del(hash uint64, ref uint64) { + var rem []*memSeries + for _, s := range m[hash] { + if s.ref != ref { + rem = append(rem, s) + } else { + //intern.ReleaseLabels(intern.Global, s.lset) + } + } + if len(rem) == 0 { + delete(m, hash) + } else { + m[hash] = rem + } +} + +const ( + // defaultStripeSize is the default number of entries to allocate in the + // stripeSeries hash map. + defaultStripeSize = 1 << 14 +) + +// stripeSeries locks modulo ranges of IDs and hashes to reduce lock contention. +// The locks are padded to not be on the same cache line. Filling the padded space +// with the maps was profiled to be slower – likely due to the additional pointer +// dereferences. +// +// This code is copied from the Prometheus TSDB. +type stripeSeries struct { + size int + series []map[uint64]*memSeries + hashes []seriesHashmap + locks []stripeLock +} + +type stripeLock struct { + sync.RWMutex + // Padding to avoid multiple locks being on the same cache line. + _ [40]byte +} + +func newStripeSeries() *stripeSeries { + stripeSize := defaultStripeSize + s := &stripeSeries{ + size: stripeSize, + series: make([]map[uint64]*memSeries, stripeSize), + hashes: make([]seriesHashmap, stripeSize), + locks: make([]stripeLock, stripeSize), + } + + for i := range s.series { + s.series[i] = map[uint64]*memSeries{} + } + for i := range s.hashes { + s.hashes[i] = seriesHashmap{} + } + return s +} + +// gc garbage collects old chunks that are strictly before mint and removes +// series entirely that have no chunks left. +func (s *stripeSeries) gc(mint int64) map[uint64]struct{} { + var ( + deleted = map[uint64]struct{}{} + ) + + // Run through all series and find series that haven't been written to + // since mint. Mark those series as deleted and store their ID. + for i := 0; i < s.size; i++ { + s.locks[i].Lock() + + for _, series := range s.series[i] { + series.Lock() + seriesHash := series.lset.Hash() + + // If the series has received a write after mint, there's still + // data and it's not completely gone yet. + if series.lastTs >= mint || series.pendingCommit { + series.willDelete = false + series.Unlock() + continue + } + + // The series hasn't received any data and *might* be gone, but + // we want to give it an opportunity to come back before marking + // it as deleted, so we wait one more GC cycle. + if !series.willDelete { + series.willDelete = true + series.Unlock() + continue + } + + // The series is gone entirely. We'll need to delete the label + // hash (if one exists) so we'll obtain a lock for that too. + j := int(seriesHash) & (s.size - 1) + if i != j { + s.locks[j].Lock() + } + + deleted[series.ref] = struct{}{} + delete(s.series[i], series.ref) + s.hashes[j].del(seriesHash, series.ref) + + if i != j { + s.locks[j].Unlock() + } + + series.Unlock() + } + + s.locks[i].Unlock() + } + + return deleted +} + +func (s *stripeSeries) getByID(id uint64) *memSeries { + i := id & uint64(s.size-1) + + s.locks[i].RLock() + series := s.series[i][id] + s.locks[i].RUnlock() + + return series +} + +func (s *stripeSeries) getByHash(hash uint64, lset labels.Labels) *memSeries { + i := hash & uint64(s.size-1) + + s.locks[i].RLock() + series := s.hashes[i].get(hash, lset) + s.locks[i].RUnlock() + + return series +} + +func (s *stripeSeries) set(hash uint64, series *memSeries) { + i := hash & uint64(s.size-1) + s.locks[i].Lock() + s.hashes[i].set(hash, series) + s.locks[i].Unlock() + + i = series.ref & uint64(s.size-1) + s.locks[i].Lock() + s.series[i][series.ref] = series + s.locks[i].Unlock() +} + +func (s *stripeSeries) iterator() *stripeSeriesIterator { + return &stripeSeriesIterator{s} +} + +// stripeSeriesIterator allows to iterate over series through a channel. +// The channel should always be completely consumed to not leak. +type stripeSeriesIterator struct { + s *stripeSeries +} + +func (it *stripeSeriesIterator) Channel() <-chan *memSeries { + ret := make(chan *memSeries) + + go func() { + for i := 0; i < it.s.size; i++ { + it.s.locks[i].RLock() + + for _, series := range it.s.series[i] { + series.Lock() + + j := int(series.lset.Hash()) & (it.s.size - 1) + if i != j { + it.s.locks[j].RLock() + } + + ret <- series + + if i != j { + it.s.locks[j].RUnlock() + } + series.Unlock() + } + + it.s.locks[i].RUnlock() + } + + close(ret) + }() + + return ret +} diff --git a/pkg/rules/remotewrite/util.go b/pkg/rules/remotewrite/util.go new file mode 100644 index 0000000000..3a1e593e0f --- /dev/null +++ b/pkg/rules/remotewrite/util.go @@ -0,0 +1,118 @@ +package remotewrite + +import ( + "path/filepath" + "sync" + + "github.com/prometheus/prometheus/tsdb/record" + "github.com/prometheus/prometheus/tsdb/wal" +) + +type walReplayer struct { + w wal.WriteTo +} + +func (r walReplayer) Replay(dir string) error { + w, err := wal.Open(nil, dir) + if err != nil { + return err + } + + dir, startFrom, err := wal.LastCheckpoint(w.Dir()) + if err != nil && err != record.ErrNotFound { + return err + } + + if err == nil { + sr, err := wal.NewSegmentsReader(dir) + if err != nil { + return err + } + + err = r.replayWAL(wal.NewReader(sr)) + if closeErr := sr.Close(); closeErr != nil && err == nil { + err = closeErr + } + if err != nil { + return err + } + + startFrom++ + } + + _, last, err := wal.Segments(w.Dir()) + if err != nil { + return err + } + + for i := startFrom; i <= last; i++ { + s, err := wal.OpenReadSegment(wal.SegmentName(w.Dir(), i)) + if err != nil { + return err + } + + sr := wal.NewSegmentBufReader(s) + err = r.replayWAL(wal.NewReader(sr)) + if closeErr := sr.Close(); closeErr != nil && err == nil { + err = closeErr + } + if err != nil { + return err + } + } + + return nil +} + +func (r walReplayer) replayWAL(reader *wal.Reader) error { + var dec record.Decoder + + for reader.Next() { + rec := reader.Record() + switch dec.Type(rec) { + case record.Series: + series, err := dec.Series(rec, nil) + if err != nil { + return err + } + r.w.StoreSeries(series, 0) + case record.Samples: + samples, err := dec.Samples(rec, nil) + if err != nil { + return err + } + r.w.Append(samples) + } + } + + return nil +} + +type walDataCollector struct { + mut sync.Mutex + samples []record.RefSample + series []record.RefSeries +} + +func (c *walDataCollector) Append(samples []record.RefSample) bool { + c.mut.Lock() + defer c.mut.Unlock() + + c.samples = append(c.samples, samples...) + return true +} + +func (c *walDataCollector) StoreSeries(series []record.RefSeries, _ int) { + c.mut.Lock() + defer c.mut.Unlock() + + c.series = append(c.series, series...) +} + +func (c *walDataCollector) SeriesReset(_ int) {} + +// SubDirectory returns the subdirectory within a Storage directory used for +// the Prometheus WAL. +func SubDirectory(base string) string { + return filepath.Join(base, "wal") +} diff --git a/pkg/rules/remotewrite/wal.go b/pkg/rules/remotewrite/wal.go new file mode 100644 index 0000000000..a7851cff09 --- /dev/null +++ b/pkg/rules/remotewrite/wal.go @@ -0,0 +1,678 @@ +package remotewrite + +import ( + "context" + "fmt" + "math" + "sync" + "time" + + "github.com/go-kit/kit/log" + "github.com/go-kit/kit/log/level" + "github.com/pkg/errors" + "github.com/prometheus/client_golang/prometheus" + "github.com/prometheus/prometheus/pkg/exemplar" + "github.com/prometheus/prometheus/pkg/labels" + "github.com/prometheus/prometheus/pkg/timestamp" + "github.com/prometheus/prometheus/pkg/value" + "github.com/prometheus/prometheus/storage" + "github.com/prometheus/prometheus/tsdb" + "github.com/prometheus/prometheus/tsdb/record" + "github.com/prometheus/prometheus/tsdb/wal" +) + +// ErrWALClosed is an error returned when a WAL operation can't run because the +// storage has already been closed. +var ErrWALClosed = fmt.Errorf("WAL storage closed") + +type storageMetrics struct { + r prometheus.Registerer + + numActiveSeries prometheus.Gauge + numDeletedSeries prometheus.Gauge + totalCreatedSeries prometheus.Counter + totalRemovedSeries prometheus.Counter + totalAppendedSamples prometheus.Counter +} + +func newStorageMetrics(r prometheus.Registerer) *storageMetrics { + m := storageMetrics{r: r} + m.numActiveSeries = prometheus.NewGauge(prometheus.GaugeOpts{ + Name: "agent_wal_storage_active_series", + Help: "Current number of active series being tracked by the WAL storage", + }) + + m.numDeletedSeries = prometheus.NewGauge(prometheus.GaugeOpts{ + Name: "agent_wal_storage_deleted_series", + Help: "Current number of series marked for deletion from memory", + }) + + m.totalCreatedSeries = prometheus.NewCounter(prometheus.CounterOpts{ + Name: "agent_wal_storage_created_series_total", + Help: "Total number of created series appended to the WAL", + }) + + m.totalRemovedSeries = prometheus.NewCounter(prometheus.CounterOpts{ + Name: "agent_wal_storage_removed_series_total", + Help: "Total number of created series removed from the WAL", + }) + + m.totalAppendedSamples = prometheus.NewCounter(prometheus.CounterOpts{ + Name: "agent_wal_samples_appended_total", + Help: "Total number of samples appended to the WAL", + }) + + if r != nil { + r.MustRegister( + m.numActiveSeries, + m.numDeletedSeries, + m.totalCreatedSeries, + m.totalRemovedSeries, + m.totalAppendedSamples, + ) + } + + return &m +} + +func (m *storageMetrics) Unregister() { + if m.r == nil { + return + } + cs := []prometheus.Collector{ + m.numActiveSeries, + m.numDeletedSeries, + m.totalCreatedSeries, + m.totalRemovedSeries, + } + for _, c := range cs { + m.r.Unregister(c) + } +} + +// Storage implements storage.Storage, and just writes to the WAL. +type Storage struct { + // Embed Queryable/ChunkQueryable for compatibility, but don't actually implement it. + storage.Queryable + storage.ChunkQueryable + + // Operations against the WAL must be protected by a mutex so it doesn't get + // closed in the middle of an operation. Other operations are concurrency-safe, so we + // use a RWMutex to allow multiple usages of the WAL at once. If the WAL is closed, all + // operations that change the WAL must fail. + walMtx sync.RWMutex + walClosed bool + + path string + wal *wal.WAL + logger log.Logger + + appenderPool sync.Pool + bufPool sync.Pool + + mtx sync.RWMutex + nextRef uint64 + series *stripeSeries + + deletedMtx sync.Mutex + deleted map[uint64]int // Deleted series, and what WAL segment they must be kept until. + + metrics *storageMetrics +} + +// NewStorage makes a new Storage. +func NewStorage(logger log.Logger, registerer prometheus.Registerer, path string) (*Storage, error) { + w, err := wal.NewSize(logger, registerer, SubDirectory(path), wal.DefaultSegmentSize, true) + if err != nil { + return nil, err + } + + storage := &Storage{ + path: path, + wal: w, + logger: logger, + deleted: map[uint64]int{}, + series: newStripeSeries(), + metrics: newStorageMetrics(registerer), + + // The first ref ID must be non-zero, as the scraping code treats 0 as a + // non-existent ID and won't cache it. + nextRef: 1, + } + + storage.bufPool.New = func() interface{} { + b := make([]byte, 0, 1024) + return b + } + + storage.appenderPool.New = func() interface{} { + return &appender{ + w: storage, + series: make([]record.RefSeries, 0, 100), + samples: make([]record.RefSample, 0, 100), + } + } + + if err := storage.replayWAL(); err != nil { + level.Warn(storage.logger).Log("msg", "encountered WAL read error, attempting repair", "err", err) + if err := w.Repair(err); err != nil { + return nil, errors.Wrap(err, "repair corrupted WAL") + } + } + + return storage, nil +} + +func (w *Storage) replayWAL() error { + w.walMtx.RLock() + defer w.walMtx.RUnlock() + + if w.walClosed { + return ErrWALClosed + } + + level.Info(w.logger).Log("msg", "replaying WAL, this may take a while", "dir", w.wal.Dir()) + dir, startFrom, err := wal.LastCheckpoint(w.wal.Dir()) + if err != nil && err != record.ErrNotFound { + return errors.Wrap(err, "find last checkpoint") + } + + if err == nil { + sr, err := wal.NewSegmentsReader(dir) + if err != nil { + return errors.Wrap(err, "open checkpoint") + } + defer func() { + if err := sr.Close(); err != nil { + level.Warn(w.logger).Log("msg", "error while closing the wal segments reader", "err", err) + } + }() + + // A corrupted checkpoint is a hard error for now and requires user + // intervention. There's likely little data that can be recovered anyway. + if err := w.loadWAL(wal.NewReader(sr)); err != nil { + return errors.Wrap(err, "backfill checkpoint") + } + startFrom++ + level.Info(w.logger).Log("msg", "WAL checkpoint loaded") + } + + // Find the last segment. + _, last, err := wal.Segments(w.wal.Dir()) + if err != nil { + return errors.Wrap(err, "finding WAL segments") + } + + // Backfill segments from the most recent checkpoint onwards. + for i := startFrom; i <= last; i++ { + s, err := wal.OpenReadSegment(wal.SegmentName(w.wal.Dir(), i)) + if err != nil { + return errors.Wrap(err, fmt.Sprintf("open WAL segment: %d", i)) + } + + sr := wal.NewSegmentBufReader(s) + err = w.loadWAL(wal.NewReader(sr)) + if err := sr.Close(); err != nil { + level.Warn(w.logger).Log("msg", "error while closing the wal segments reader", "err", err) + } + if err != nil { + return err + } + level.Info(w.logger).Log("msg", "WAL segment loaded", "segment", i, "maxSegment", last) + } + + return nil +} + +func (w *Storage) loadWAL(r *wal.Reader) (err error) { + var ( + dec record.Decoder + ) + + var ( + decoded = make(chan interface{}, 10) + errCh = make(chan error, 1) + seriesPool = sync.Pool{ + New: func() interface{} { + return []record.RefSeries{} + }, + } + samplesPool = sync.Pool{ + New: func() interface{} { + return []record.RefSample{} + }, + } + ) + + go func() { + defer close(decoded) + for r.Next() { + rec := r.Record() + switch dec.Type(rec) { + case record.Series: + series := seriesPool.Get().([]record.RefSeries)[:0] + series, err = dec.Series(rec, series) + if err != nil { + errCh <- &wal.CorruptionErr{ + Err: errors.Wrap(err, "decode series"), + Segment: r.Segment(), + Offset: r.Offset(), + } + return + } + decoded <- series + case record.Samples: + samples := samplesPool.Get().([]record.RefSample)[:0] + samples, err = dec.Samples(rec, samples) + if err != nil { + errCh <- &wal.CorruptionErr{ + Err: errors.Wrap(err, "decode samples"), + Segment: r.Segment(), + Offset: r.Offset(), + } + } + decoded <- samples + case record.Tombstones: + // We don't care about tombstones + continue + default: + errCh <- &wal.CorruptionErr{ + Err: errors.Errorf("invalid record type %v", dec.Type(rec)), + Segment: r.Segment(), + Offset: r.Offset(), + } + return + } + } + }() + + for d := range decoded { + switch v := d.(type) { + case []record.RefSeries: + for _, s := range v { + // If this is a new series, create it in memory without a timestamp. + // If we read in a sample for it, we'll use the timestamp of the latest + // sample. Otherwise, the series is stale and will be deleted once + // the truncation is performed. + if w.series.getByID(s.Ref) == nil { + series := &memSeries{ref: s.Ref, lset: s.Labels, lastTs: 0} + w.series.set(s.Labels.Hash(), series) + + w.metrics.numActiveSeries.Inc() + w.metrics.totalCreatedSeries.Inc() + + w.mtx.Lock() + if w.nextRef <= s.Ref { + w.nextRef = s.Ref + 1 + } + w.mtx.Unlock() + } + } + + //nolint:staticcheck + seriesPool.Put(v) + case []record.RefSample: + for _, s := range v { + // Update the lastTs for the series based + series := w.series.getByID(s.Ref) + if series == nil { + level.Warn(w.logger).Log("msg", "found sample referencing non-existing series, skipping") + continue + } + + series.Lock() + if s.T > series.lastTs { + series.lastTs = s.T + } + series.Unlock() + } + + //nolint:staticcheck + samplesPool.Put(v) + default: + panic(fmt.Errorf("unexpected decoded type: %T", d)) + } + } + + select { + case err := <-errCh: + return err + default: + } + + if r.Err() != nil { + return errors.Wrap(r.Err(), "read records") + } + + return nil +} + +// Directory returns the path where the WAL storage is held. +func (w *Storage) Directory() string { + return w.path +} + +// Appender returns a new appender against the storage. +func (w *Storage) Appender(_ context.Context) storage.Appender { + return w.appenderPool.Get().(storage.Appender) +} + +// StartTime always returns 0, nil. It is implemented for compatibility with +// Prometheus, but is unused in the agent. +func (*Storage) StartTime() (int64, error) { + return 0, nil +} + +// Truncate removes all data from the WAL prior to the timestamp specified by +// mint. +func (w *Storage) Truncate(mint int64) error { + w.walMtx.RLock() + defer w.walMtx.RUnlock() + + if w.walClosed { + return ErrWALClosed + } + + start := time.Now() + + // Garbage collect series that haven't received an update since mint. + w.gc(mint) + level.Info(w.logger).Log("msg", "series GC completed", "duration", time.Since(start)) + + first, last, err := wal.Segments(w.wal.Dir()) + if err != nil { + return errors.Wrap(err, "get segment range") + } + + // Start a new segment, so low ingestion volume instance don't have more WAL + // than needed. + err = w.wal.NextSegment() + if err != nil { + return errors.Wrap(err, "next segment") + } + + last-- // Never consider last segment for checkpoint. + if last < 0 { + return nil // no segments yet. + } + + // The lower two thirds of segments should contain mostly obsolete samples. + // If we have less than two segments, it's not worth checkpointing yet. + last = first + (last-first)*2/3 + if last <= first { + return nil + } + + keep := func(id uint64) bool { + if w.series.getByID(id) != nil { + return true + } + + w.deletedMtx.Lock() + _, ok := w.deleted[id] + w.deletedMtx.Unlock() + return ok + } + if _, err = wal.Checkpoint(w.logger, w.wal, first, last, keep, mint); err != nil { + return errors.Wrap(err, "create checkpoint") + } + if err := w.wal.Truncate(last + 1); err != nil { + // If truncating fails, we'll just try again at the next checkpoint. + // Leftover segments will just be ignored in the future if there's a checkpoint + // that supersedes them. + level.Error(w.logger).Log("msg", "truncating segments failed", "err", err) + } + + // The checkpoint is written and segments before it is truncated, so we no + // longer need to track deleted series that are before it. + w.deletedMtx.Lock() + for ref, segment := range w.deleted { + if segment < first { + delete(w.deleted, ref) + w.metrics.totalRemovedSeries.Inc() + } + } + w.metrics.numDeletedSeries.Set(float64(len(w.deleted))) + w.deletedMtx.Unlock() + + if err := wal.DeleteCheckpoints(w.wal.Dir(), last); err != nil { + // Leftover old checkpoints do not cause problems down the line beyond + // occupying disk space. + // They will just be ignored since a higher checkpoint exists. + level.Error(w.logger).Log("msg", "delete old checkpoints", "err", err) + } + + level.Info(w.logger).Log("msg", "WAL checkpoint complete", + "first", first, "last", last, "duration", time.Since(start)) + return nil +} + +// gc removes data before the minimum timestamp from the head. +func (w *Storage) gc(mint int64) { + deleted := w.series.gc(mint) + w.metrics.numActiveSeries.Sub(float64(len(deleted))) + + _, last, _ := wal.Segments(w.wal.Dir()) + w.deletedMtx.Lock() + defer w.deletedMtx.Unlock() + + // We want to keep series records for any newly deleted series + // until we've passed the last recorded segment. The WAL will + // still contain samples records with all of the ref IDs until + // the segment's samples has been deleted from the checkpoint. + // + // If the series weren't kept on startup when the WAL was replied, + // the samples wouldn't be able to be used since there wouldn't + // be any labels for that ref ID. + for ref := range deleted { + w.deleted[ref] = last + } + + w.metrics.numDeletedSeries.Set(float64(len(w.deleted))) +} + +// WriteStalenessMarkers appends a staleness sample for all active series. +func (w *Storage) WriteStalenessMarkers(remoteTsFunc func() int64) error { + var lastErr error + var lastTs int64 + + app := w.Appender(context.Background()) + it := w.series.iterator() + for series := range it.Channel() { + var ( + ref = series.ref + lset = series.lset + ) + + ts := timestamp.FromTime(time.Now()) + _, err := app.Append(ref, lset, ts, math.Float64frombits(value.StaleNaN)) + if err != nil { + lastErr = err + } + + // Remove millisecond precision; the remote write timestamp we get + // only has second precision. + lastTs = (ts / 1000) * 1000 + } + + if lastErr == nil { + if err := app.Commit(); err != nil { + return fmt.Errorf("failed to commit staleness markers: %w", err) + } + + // Wait for remote write to write the lastTs, but give up after 1m + level.Info(w.logger).Log("msg", "waiting for remote write to write staleness markers...") + + stopCh := time.After(1 * time.Minute) + start := time.Now() + + Outer: + for { + select { + case <-stopCh: + level.Error(w.logger).Log("msg", "timed out waiting for staleness markers to be written") + break Outer + default: + writtenTs := remoteTsFunc() + if writtenTs >= lastTs { + duration := time.Since(start) + level.Info(w.logger).Log("msg", "remote write wrote staleness markers", "duration", duration) + break Outer + } + + level.Info(w.logger).Log("msg", "remote write hasn't written staleness markers yet", "remoteTs", writtenTs, "lastTs", lastTs) + + // Wait a bit before reading again + time.Sleep(5 * time.Second) + } + } + } + + return lastErr +} + +// Close closes the storage and all its underlying resources. +func (w *Storage) Close() error { + w.walMtx.Lock() + defer w.walMtx.Unlock() + + if w.walClosed { + return fmt.Errorf("already closed") + } + w.walClosed = true + + if w.metrics != nil { + w.metrics.Unregister() + } + return w.wal.Close() +} + +type appender struct { + w *Storage + series []record.RefSeries + samples []record.RefSample +} + +func (a *appender) Append(ref uint64, l labels.Labels, t int64, v float64) (uint64, error) { + if ref == 0 { + return a.Add(l, t, v) + } + return ref, a.AddFast(ref, t, v) +} + +func (a *appender) Add(l labels.Labels, t int64, v float64) (uint64, error) { + hash := l.Hash() + series := a.w.series.getByHash(hash, l) + if series != nil { + return series.ref, a.AddFast(series.ref, t, v) + } + + // Ensure no empty or duplicate labels have gotten through. This mirrors the + // equivalent validation code in the TSDB's headAppender. + l = l.WithoutEmpty() + if len(l) == 0 { + return 0, errors.Wrap(tsdb.ErrInvalidSample, "empty labelset") + } + + if lbl, dup := l.HasDuplicateLabelNames(); dup { + return 0, errors.Wrap(tsdb.ErrInvalidSample, fmt.Sprintf(`label name "%s" is not unique`, lbl)) + } + + a.w.mtx.Lock() + ref := a.w.nextRef + a.w.nextRef++ + a.w.mtx.Unlock() + + series = &memSeries{ref: ref, lset: l} + series.updateTs(t) + + a.series = append(a.series, record.RefSeries{ + Ref: ref, + Labels: l, + }) + + a.w.series.set(hash, series) + + a.w.metrics.numActiveSeries.Inc() + a.w.metrics.totalCreatedSeries.Inc() + a.w.metrics.totalAppendedSamples.Inc() + + return series.ref, a.AddFast(series.ref, t, v) +} + +func (a *appender) AddFast(ref uint64, t int64, v float64) error { + series := a.w.series.getByID(ref) + if series == nil { + return storage.ErrNotFound + } + series.Lock() + defer series.Unlock() + + // Update last recorded timestamp. Used by Storage.gc to determine if a + // series is dead. + series.updateTs(t) + + a.samples = append(a.samples, record.RefSample{ + Ref: ref, + T: t, + V: v, + }) + + a.w.metrics.totalAppendedSamples.Inc() + return nil +} + +func (a *appender) AppendExemplar(ref uint64, l labels.Labels, e exemplar.Exemplar) (uint64, error) { + // remote_write doesn't support exemplars yet, so do nothing here. + return 0, nil +} + +// Commit submits the collected samples and purges the batch. +func (a *appender) Commit() error { + a.w.walMtx.RLock() + defer a.w.walMtx.RUnlock() + + if a.w.walClosed { + return ErrWALClosed + } + + var encoder record.Encoder + buf := a.w.bufPool.Get().([]byte) + + if len(a.series) > 0 { + buf = encoder.Series(a.series, buf) + if err := a.w.wal.Log(buf); err != nil { + return err + } + buf = buf[:0] + } + + if len(a.samples) > 0 { + buf = encoder.Samples(a.samples, buf) + if err := a.w.wal.Log(buf); err != nil { + return err + } + buf = buf[:0] + } + + //nolint:staticcheck + a.w.bufPool.Put(buf) + + for _, sample := range a.samples { + series := a.w.series.getByID(sample.Ref) + if series != nil { + series.Lock() + series.pendingCommit = false + series.Unlock() + } + } + + return a.Rollback() +} + +func (a *appender) Rollback() error { + a.series = a.series[:0] + a.samples = a.samples[:0] + a.w.appenderPool.Put(a) + return nil +} diff --git a/pkg/rules/remotewrite/wal_test.go b/pkg/rules/remotewrite/wal_test.go new file mode 100644 index 0000000000..84787544eb --- /dev/null +++ b/pkg/rules/remotewrite/wal_test.go @@ -0,0 +1,374 @@ +package remotewrite + +import ( + "context" + "io/ioutil" + "math" + "os" + "sort" + "testing" + "time" + + "github.com/go-kit/kit/log" + "github.com/prometheus/prometheus/pkg/labels" + "github.com/prometheus/prometheus/pkg/value" + "github.com/prometheus/prometheus/storage" + "github.com/prometheus/prometheus/tsdb/record" + "github.com/stretchr/testify/require" +) + +func TestStorage_InvalidSeries(t *testing.T) { + walDir, err := ioutil.TempDir(os.TempDir(), "wal") + require.NoError(t, err) + defer os.RemoveAll(walDir) + + s, err := NewStorage(log.NewNopLogger(), nil, walDir) + require.NoError(t, err) + defer func() { + require.NoError(t, s.Close()) + }() + + app := s.Appender(context.Background()) + + _, err = app.Append(0, labels.Labels{}, 0, 0) + require.Error(t, err, "should reject empty labels") + + _, err = app.Append(0, labels.Labels{{Name: "a", Value: "1"}, {Name: "a", Value: "2"}}, 0, 0) + require.Error(t, err, "should reject duplicate labels") + + // Sanity check: valid series + _, err = app.Append(0, labels.Labels{{Name: "a", Value: "1"}}, 0, 0) + require.NoError(t, err, "should not reject valid series") +} + +func TestStorage(t *testing.T) { + walDir, err := ioutil.TempDir(os.TempDir(), "wal") + require.NoError(t, err) + defer os.RemoveAll(walDir) + + s, err := NewStorage(log.NewNopLogger(), nil, walDir) + require.NoError(t, err) + defer func() { + require.NoError(t, s.Close()) + }() + + app := s.Appender(context.Background()) + + // Write some samples + payload := seriesList{ + {name: "foo", samples: []sample{{1, 10.0}, {10, 100.0}}}, + {name: "bar", samples: []sample{{2, 20.0}, {20, 200.0}}}, + {name: "baz", samples: []sample{{3, 30.0}, {30, 300.0}}}, + } + for _, metric := range payload { + metric.Write(t, app) + } + + require.NoError(t, app.Commit()) + + collector := walDataCollector{} + replayer := walReplayer{w: &collector} + require.NoError(t, replayer.Replay(s.wal.Dir())) + + names := []string{} + for _, series := range collector.series { + names = append(names, series.Labels.Get("__name__")) + } + require.Equal(t, payload.SeriesNames(), names) + + expectedSamples := payload.ExpectedSamples() + actual := collector.samples + sort.Sort(byRefSample(actual)) + require.Equal(t, expectedSamples, actual) +} + +func TestStorage_ExistingWAL(t *testing.T) { + walDir, err := ioutil.TempDir(os.TempDir(), "wal") + require.NoError(t, err) + defer os.RemoveAll(walDir) + + s, err := NewStorage(log.NewNopLogger(), nil, walDir) + require.NoError(t, err) + + app := s.Appender(context.Background()) + payload := seriesList{ + {name: "foo", samples: []sample{{1, 10.0}, {10, 100.0}}}, + {name: "bar", samples: []sample{{2, 20.0}, {20, 200.0}}}, + {name: "baz", samples: []sample{{3, 30.0}, {30, 300.0}}}, + {name: "blerg", samples: []sample{{4, 40.0}, {40, 400.0}}}, + } + + // Write half of the samples. + for _, metric := range payload[0 : len(payload)/2] { + metric.Write(t, app) + } + + require.NoError(t, app.Commit()) + require.NoError(t, s.Close()) + + // We need to wait a little bit for the previous store to finish + // flushing. + time.Sleep(time.Millisecond * 150) + + // Create a new storage, write the other half of samples. + s, err = NewStorage(log.NewNopLogger(), nil, walDir) + require.NoError(t, err) + defer func() { + require.NoError(t, s.Close()) + }() + + // Verify that the storage picked up existing series when it + // replayed the WAL. + for series := range s.series.iterator().Channel() { + require.Greater(t, series.lastTs, int64(0), "series timestamp not updated") + } + + app = s.Appender(context.Background()) + + for _, metric := range payload[len(payload)/2:] { + metric.Write(t, app) + } + + require.NoError(t, app.Commit()) + + collector := walDataCollector{} + replayer := walReplayer{w: &collector} + require.NoError(t, replayer.Replay(s.wal.Dir())) + + names := []string{} + for _, series := range collector.series { + names = append(names, series.Labels.Get("__name__")) + } + require.Equal(t, payload.SeriesNames(), names) + + expectedSamples := payload.ExpectedSamples() + actual := collector.samples + sort.Sort(byRefSample(actual)) + require.Equal(t, expectedSamples, actual) +} + +func TestStorage_Truncate(t *testing.T) { + // Same as before but now do the following: + // after writing all the data, forcefully create 4 more segments, + // then do a truncate of a timestamp for _some_ of the data. + // then read data back in. Expect to only get the latter half of data. + walDir, err := ioutil.TempDir(os.TempDir(), "wal") + require.NoError(t, err) + defer os.RemoveAll(walDir) + + s, err := NewStorage(log.NewNopLogger(), nil, walDir) + require.NoError(t, err) + defer func() { + require.NoError(t, s.Close()) + }() + + app := s.Appender(context.Background()) + + payload := seriesList{ + {name: "foo", samples: []sample{{1, 10.0}, {10, 100.0}}}, + {name: "bar", samples: []sample{{2, 20.0}, {20, 200.0}}}, + {name: "baz", samples: []sample{{3, 30.0}, {30, 300.0}}}, + {name: "blerg", samples: []sample{{4, 40.0}, {40, 400.0}}}, + } + + for _, metric := range payload { + metric.Write(t, app) + } + + require.NoError(t, app.Commit()) + + // Forefully create a bunch of new segments so when we truncate + // there's enough segments to be considered for truncation. + for i := 0; i < 5; i++ { + require.NoError(t, s.wal.NextSegment()) + } + + // Truncate half of the samples, keeping only the second sample + // per series. + keepTs := payload[len(payload)-1].samples[0].ts + 1 + err = s.Truncate(keepTs) + require.NoError(t, err) + + payload = payload.Filter(func(s sample) bool { + return s.ts >= keepTs + }) + expectedSamples := payload.ExpectedSamples() + + // Read back the WAL, collect series and samples. + collector := walDataCollector{} + replayer := walReplayer{w: &collector} + require.NoError(t, replayer.Replay(s.wal.Dir())) + + names := []string{} + for _, series := range collector.series { + names = append(names, series.Labels.Get("__name__")) + } + require.Equal(t, payload.SeriesNames(), names) + + actual := collector.samples + sort.Sort(byRefSample(actual)) + require.Equal(t, expectedSamples, actual) +} + +func TestStorage_WriteStalenessMarkers(t *testing.T) { + walDir, err := ioutil.TempDir(os.TempDir(), "wal") + require.NoError(t, err) + defer os.RemoveAll(walDir) + + s, err := NewStorage(log.NewNopLogger(), nil, walDir) + require.NoError(t, err) + defer func() { + require.NoError(t, s.Close()) + }() + + app := s.Appender(context.Background()) + + // Write some samples + payload := seriesList{ + {name: "foo", samples: []sample{{1, 10.0}, {10, 100.0}}}, + {name: "bar", samples: []sample{{2, 20.0}, {20, 200.0}}}, + {name: "baz", samples: []sample{{3, 30.0}, {30, 300.0}}}, + } + for _, metric := range payload { + metric.Write(t, app) + } + + require.NoError(t, app.Commit()) + + // Write staleness markers for every series + require.NoError(t, s.WriteStalenessMarkers(func() int64 { + // Pass math.MaxInt64 so it seems like everything was written already + return math.MaxInt64 + })) + + // Read back the WAL, collect series and samples. + collector := walDataCollector{} + replayer := walReplayer{w: &collector} + require.NoError(t, replayer.Replay(s.wal.Dir())) + + actual := collector.samples + sort.Sort(byRefSample(actual)) + + staleMap := map[uint64]bool{} + for _, sample := range actual { + if _, ok := staleMap[sample.Ref]; !ok { + staleMap[sample.Ref] = false + } + if value.IsStaleNaN(sample.V) { + staleMap[sample.Ref] = true + } + } + + for ref, v := range staleMap { + require.True(t, v, "ref %d doesn't have stale marker", ref) + } +} + +func TestStoraeg_TruncateAfterClose(t *testing.T) { + walDir, err := ioutil.TempDir(os.TempDir(), "wal") + require.NoError(t, err) + defer os.RemoveAll(walDir) + + s, err := NewStorage(log.NewNopLogger(), nil, walDir) + require.NoError(t, err) + + require.NoError(t, s.Close()) + require.Error(t, ErrWALClosed, s.Truncate(0)) +} + +type sample struct { + ts int64 + val float64 +} + +type series struct { + name string + samples []sample + + ref *uint64 +} + +func (s *series) Write(t *testing.T, app storage.Appender) { + t.Helper() + + lbls := labels.FromMap(map[string]string{"__name__": s.name}) + + offset := 0 + if s.ref == nil { + // Write first sample to get ref ID + ref, err := app.Append(0, lbls, s.samples[0].ts, s.samples[0].val) + require.NoError(t, err) + + s.ref = &ref + offset = 1 + } + + // Write other data points with AddFast + for _, sample := range s.samples[offset:] { + _, err := app.Append(*s.ref, lbls, sample.ts, sample.val) + require.NoError(t, err) + } +} + +type seriesList []*series + +// Filter creates a new seriesList with series filtered by a sample +// keep predicate function. +func (s seriesList) Filter(fn func(s sample) bool) seriesList { + var ret seriesList + + for _, entry := range s { + var samples []sample + + for _, sample := range entry.samples { + if fn(sample) { + samples = append(samples, sample) + } + } + + if len(samples) > 0 { + ret = append(ret, &series{ + name: entry.name, + ref: entry.ref, + samples: samples, + }) + } + } + + return ret +} + +func (s seriesList) SeriesNames() []string { + names := make([]string, 0, len(s)) + for _, series := range s { + names = append(names, series.name) + } + return names +} + +// ExpectedSamples returns the list of expected samples, sorted by ref ID and timestamp +func (s seriesList) ExpectedSamples() []record.RefSample { + expect := []record.RefSample{} + for _, series := range s { + for _, sample := range series.samples { + expect = append(expect, record.RefSample{ + Ref: *series.ref, + T: sample.ts, + V: sample.val, + }) + } + } + sort.Sort(byRefSample(expect)) + return expect +} + +type byRefSample []record.RefSample + +func (b byRefSample) Len() int { return len(b) } +func (b byRefSample) Swap(i, j int) { b[i], b[j] = b[j], b[i] } +func (b byRefSample) Less(i, j int) bool { + if b[i].Ref == b[j].Ref { + return b[i].T < b[j].T + } + return b[i].Ref < b[j].Ref +} From 6a14fc5c8b9de67b8dd4346a6cf70b18713ffd6b Mon Sep 17 00:00:00 2001 From: Michael Okoko Date: Mon, 26 Apr 2021 11:45:55 +0100 Subject: [PATCH 02/26] Set up remote-write config and test skeleton Signed-off-by: Michael Okoko --- cmd/thanos/config.go | 12 ++++++ cmd/thanos/rule.go | 65 ++++++++++++++++++++++++-------- test/e2e/e2ethanos/services.go | 1 + test/e2e/rule_test.go | 68 ++++++++++++++++++++++++++++++++++ 4 files changed, 130 insertions(+), 16 deletions(-) diff --git a/cmd/thanos/config.go b/cmd/thanos/config.go index 3770389a68..f29e983b10 100644 --- a/cmd/thanos/config.go +++ b/cmd/thanos/config.go @@ -224,3 +224,15 @@ func (ac *alertMgrConfig) registerFlag(cmd extflag.FlagClause) *alertMgrConfig { ac.alertRelabelConfigPath = extflag.RegisterPathOrContent(cmd, "alert.relabel-config", "YAML file that contains alert relabelling configuration.", extflag.WithEnvSubstitution()) return ac } + +type ruleRWConfig struct { + remoteWrite bool + remoteWriteConfig *extflag.PathOrContent +} + +func (rc *ruleRWConfig) registerFlag(cmd extflag.FlagClause) *ruleRWConfig { + cmd.Flag("remote-write", "If true, directs ruler to remote-write evaluated samples to the server configured by 'remote-write.config'."). + BoolVar(&rc.remoteWrite) + rc.remoteWriteConfig = extflag.RegisterPathOrContent(cmd, "remote-write.config", "YAML config for the remote-write server where samples should be sent to. See https://thanos.io/tip/components/rule.md/#query-api", false) + return rc +} diff --git a/cmd/thanos/rule.go b/cmd/thanos/rule.go index d5893edd2a..8145317a48 100644 --- a/cmd/thanos/rule.go +++ b/cmd/thanos/rule.go @@ -15,6 +15,9 @@ import ( "strings" "time" + "github.com/prometheus/prometheus/storage" + "github.com/thanos-io/thanos/pkg/rules/remotewrite" + "github.com/go-kit/kit/log" "github.com/go-kit/kit/log/level" grpc_logging "github.com/grpc-ecosystem/go-grpc-middleware/v2/interceptors/logging" @@ -74,6 +77,9 @@ type ruleConfig struct { alertQueryURL *url.URL alertRelabelConfigYAML []byte + rwConfig ruleRWConfig + rwConfigYAML []byte + resendDelay time.Duration evalInterval time.Duration ruleFiles []string @@ -89,6 +95,7 @@ func (rc *ruleConfig) registerFlag(cmd extkingpin.FlagClause) { rc.shipper.registerFlag(cmd) rc.query.registerFlag(cmd) rc.alertmgr.registerFlag(cmd) + rc.rwConfig.registerFlag(cmd) } // registerRule registers a rule command. @@ -163,6 +170,14 @@ func registerRule(app *extkingpin.App) { return errors.New("--query/--query.sd-files and --query.config* parameters cannot be defined at the same time") } + // Parse and check remote-write config if it's enabled + if conf.rwConfig.remoteWrite { + conf.rwConfigYAML, err = conf.rwConfig.remoteWriteConfig.Content() + if err != nil { + return err + } + } + // Parse and check alerting configuration. conf.alertmgrsConfigYAML, err = conf.alertmgr.configPath.Content() if err != nil { @@ -318,25 +333,43 @@ func runRule( // Discover and resolve query addresses. addDiscoveryGroups(g, queryClient, conf.query.dnsSDInterval) } - - db, err := tsdb.Open(conf.dataDir, log.With(logger, "component", "tsdb"), reg, tsdbOpts, nil) + var ( + appendable storage.Appendable + queryable storage.Queryable + db *tsdb.DB + ) if err != nil { return errors.Wrap(err, "open TSDB") } + if conf.rwConfig.remoteWrite { + rw, err := remotewrite.NewStorage(logger, reg, "jfdlsfsl") + if err != nil { + return errors.Wrap(err, "open WAL storage") + } + appendable = rw + queryable = rw + } else { + db, err := tsdb.Open(conf.dataDir, log.With(logger, "component", "tsdb"), reg, tsdbOpts, nil) + if err != nil { + return errors.Wrap(err, "open TSDB") + } - level.Debug(logger).Log("msg", "removing storage lock file if any") - if err := removeLockfileIfAny(logger, conf.dataDir); err != nil { - return errors.Wrap(err, "remove storage lock files") - } + level.Debug(logger).Log("msg", "removing storage lock file if any") + if err := removeLockfileIfAny(logger, conf.dataDir); err != nil { + return errors.Wrap(err, "remove storage lock files") + } - { - done := make(chan struct{}) - g.Add(func() error { - <-done - return db.Close() - }, func(error) { - close(done) - }) + { + done := make(chan struct{}) + g.Add(func() error { + <-done + return db.Close() + }, func(error) { + close(done) + }) + } + appendable = db + queryable = db } // Build the Alertmanager clients. @@ -434,9 +467,9 @@ func runRule( rules.ManagerOptions{ NotifyFunc: notifyFunc, Logger: logger, - Appendable: db, + Appendable: appendable, ExternalURL: nil, - Queryable: db, + Queryable: queryable, ResendDelay: conf.resendDelay, }, queryFuncCreator(logger, queryClients, metrics.duplicatedQuery, metrics.ruleEvalWarnings, conf.query.httpMethod), diff --git a/test/e2e/e2ethanos/services.go b/test/e2e/e2ethanos/services.go index be8a56b3f3..668b92e952 100644 --- a/test/e2e/e2ethanos/services.go +++ b/test/e2e/e2ethanos/services.go @@ -514,6 +514,7 @@ func NewIngestingReceiver(e e2e.Environment, name string) (*e2e.InstrumentedRunn func NewRuler(e e2e.Environment, name, ruleSubDir string, amCfg []alert.AlertmanagerConfig, queryCfg []httpconfig.Config) (*e2e.InstrumentedRunnable, error) { dir := filepath.Join(e.SharedDir(), "data", "rule", name) container := filepath.Join(ContainerSharedDir, "data", "rule", name) + if err := os.MkdirAll(dir, 0750); err != nil { return nil, errors.Wrap(err, "create rule dir") } diff --git a/test/e2e/rule_test.go b/test/e2e/rule_test.go index c1756904b0..fc3cd1cd37 100644 --- a/test/e2e/rule_test.go +++ b/test/e2e/rule_test.go @@ -436,6 +436,74 @@ func TestRule(t *testing.T) { }) } +// TestStatelessRule verifies that Thanos Ruler can be run in stateless mode where it: +// evaluates rules against one/more Queriers. +// record the rule evaluations in a WAL +// the WAL gets replicated to a Receiver endpoint + +func TestStatelessRule(t *testing.T) { + t.Parallel() + + s, err := e2e.NewScenario("e2e_test_rule") + testutil.Ok(t, err) + t.Cleanup(e2ethanos.CleanScenario(t, s)) + + _, cancel := context.WithTimeout(context.Background(), 3*time.Minute) + t.Cleanup(cancel) + + // Prepare work dirs. + rulesSubDir := filepath.Join("rules") + rulesPath := filepath.Join(s.SharedDir(), rulesSubDir) + testutil.Ok(t, os.MkdirAll(rulesPath, os.ModePerm)) + createRuleFiles(t, rulesPath) + amTargetsSubDir := filepath.Join("rules_am_targets") + testutil.Ok(t, os.MkdirAll(filepath.Join(s.SharedDir(), amTargetsSubDir), os.ModePerm)) + queryTargetsSubDir := filepath.Join("rules_query_targets") + testutil.Ok(t, os.MkdirAll(filepath.Join(s.SharedDir(), queryTargetsSubDir), os.ModePerm)) + + am1, err := e2ethanos.NewAlertmanager(s.SharedDir(), "1") + testutil.Ok(t, err) + am2, err := e2ethanos.NewAlertmanager(s.SharedDir(), "2") + testutil.Ok(t, err) + testutil.Ok(t, s.StartAndWaitReady(am1, am2)) + + r, err := e2ethanos.NewRuler(s.SharedDir(), "1", rulesSubDir, []alert.AlertmanagerConfig{ + { + EndpointsConfig: http_util.EndpointsConfig{ + FileSDConfigs: []http_util.FileSDConfig{ + { + // FileSD which will be used to register discover dynamically am1. + Files: []string{filepath.Join(e2e.ContainerSharedDir, amTargetsSubDir, "*.yaml")}, + RefreshInterval: model.Duration(time.Second), + }, + }, + StaticAddresses: []string{ + am2.NetworkHTTPEndpoint(), + }, + Scheme: "http", + }, + Timeout: model.Duration(time.Second), + APIVersion: alert.APIv1, + }, + }, []query.Config{ + { + EndpointsConfig: http_util.EndpointsConfig{ + // We test Statically Addressed queries in other tests. Focus on FileSD here. + FileSDConfigs: []http_util.FileSDConfig{ + { + // FileSD which will be used to register discover dynamically q. + Files: []string{filepath.Join(e2e.ContainerSharedDir, queryTargetsSubDir, "*.yaml")}, + RefreshInterval: model.Duration(time.Second), + }, + }, + Scheme: "http", + }, + }, + }, true) + testutil.Ok(t, err) + testutil.Ok(t, s.StartAndWaitReady(r)) +} + // Test Ruler behavior on different storepb.PartialResponseStrategy when having partial response from single `failingStoreAPI`. func TestRulePartialResponse(t *testing.T) { t.Skip("TODO: Allow HTTP ports from binaries running on host to be accessible.") From 021cb36a2ab7fee894de54d190dd35df2fde1889 Mon Sep 17 00:00:00 2001 From: Michael Okoko Date: Tue, 4 May 2021 06:23:20 +0100 Subject: [PATCH 03/26] Setup fanout and related storages for stateless ruler Signed-off-by: Michael Okoko --- cmd/thanos/config.go | 6 +- cmd/thanos/rule.go | 23 +++++-- pkg/rules/remotewrite/remotewrite.go | 98 ++++++++++++++++++++++++++++ 3 files changed, 119 insertions(+), 8 deletions(-) create mode 100644 pkg/rules/remotewrite/remotewrite.go diff --git a/cmd/thanos/config.go b/cmd/thanos/config.go index f29e983b10..15fc0eed4f 100644 --- a/cmd/thanos/config.go +++ b/cmd/thanos/config.go @@ -226,13 +226,13 @@ func (ac *alertMgrConfig) registerFlag(cmd extflag.FlagClause) *alertMgrConfig { } type ruleRWConfig struct { - remoteWrite bool - remoteWriteConfig *extflag.PathOrContent + remoteWrite bool + configPath *extflag.PathOrContent } func (rc *ruleRWConfig) registerFlag(cmd extflag.FlagClause) *ruleRWConfig { cmd.Flag("remote-write", "If true, directs ruler to remote-write evaluated samples to the server configured by 'remote-write.config'."). BoolVar(&rc.remoteWrite) - rc.remoteWriteConfig = extflag.RegisterPathOrContent(cmd, "remote-write.config", "YAML config for the remote-write server where samples should be sent to. See https://thanos.io/tip/components/rule.md/#query-api", false) + rc.configPath = extflag.RegisterPathOrContent(cmd, "remote-write.config", "YAML config for the remote-write server where samples should be sent to. See https://thanos.io/tip/components/rule.md/#query-api", false) return rc } diff --git a/cmd/thanos/rule.go b/cmd/thanos/rule.go index 8145317a48..d3f04eaa7a 100644 --- a/cmd/thanos/rule.go +++ b/cmd/thanos/rule.go @@ -172,7 +172,7 @@ func registerRule(app *extkingpin.App) { // Parse and check remote-write config if it's enabled if conf.rwConfig.remoteWrite { - conf.rwConfigYAML, err = conf.rwConfig.remoteWriteConfig.Content() + conf.rwConfigYAML, err = conf.rwConfig.configPath.Content() if err != nil { return err } @@ -342,12 +342,25 @@ func runRule( return errors.Wrap(err, "open TSDB") } if conf.rwConfig.remoteWrite { - rw, err := remotewrite.NewStorage(logger, reg, "jfdlsfsl") + conf.rwConfigYAML, err = conf.rwConfig.configPath.Content() if err != nil { - return errors.Wrap(err, "open WAL storage") + return err + } + var rwCfg remotewrite.Config + if len(conf.rwConfigYAML) == 0 { + return errors.New("no --remote-write.config was given") + } + rwCfg, err = remotewrite.LoadRemoteWriteConfig(conf.rwConfigYAML) + if err != nil { + return err + } + walDir := filepath.Join(conf.dataDir, rwCfg.Name) + remoteStore, err := remotewrite.NewFanoutStorage(logger, reg, walDir, rwCfg) + if err != nil { + return errors.Wrap(err, "set up remote-write store for ruler") } - appendable = rw - queryable = rw + appendable = remoteStore + queryable = remoteStore } else { db, err := tsdb.Open(conf.dataDir, log.With(logger, "component", "tsdb"), reg, tsdbOpts, nil) if err != nil { diff --git a/pkg/rules/remotewrite/remotewrite.go b/pkg/rules/remotewrite/remotewrite.go new file mode 100644 index 0000000000..e007a86742 --- /dev/null +++ b/pkg/rules/remotewrite/remotewrite.go @@ -0,0 +1,98 @@ +package remotewrite + +import ( + "errors" + "fmt" + "github.com/go-kit/kit/log" + "github.com/prometheus/client_golang/prometheus" + "github.com/prometheus/prometheus/config" + "github.com/prometheus/prometheus/scrape" + "github.com/prometheus/prometheus/storage" + "github.com/prometheus/prometheus/storage/remote" + "gopkg.in/yaml.v2" + "sync" + "time" +) + +var ( + managerMtx sync.Mutex +) + +type Config struct { + Name string `yaml:"name"` + RemoteStore *config.RemoteWriteConfig `yaml:"remote_write,omitempty"` + ScrapeConfig *config.ScrapeConfig `yaml:"scrape_config,omitempty"` +} + +func LoadRemoteWriteConfig(configYAML []byte) (Config, error) { + var cfg Config + if err := yaml.Unmarshal(configYAML, &cfg); err != nil { + return cfg, err + } + return cfg, nil +} + +func NewFanoutStorage(logger log.Logger, reg prometheus.Registerer, walDir string, rwConfig Config) (storage.Storage, error) { + walStore, err := NewStorage(logger, reg, walDir) + if err != nil { + return nil, err + } + scrapeMgr := &readyScrapeManager{} + remoteStore := remote.NewStorage(logger, reg, walStore.StartTime, walStore.Directory(), 1*time.Minute, scrapeMgr) + err = remoteStore.ApplyConfig(&config.Config{ + GlobalConfig: config.DefaultGlobalConfig, + RemoteWriteConfigs: []*config.RemoteWriteConfig{rwConfig.RemoteStore}, + }) + if err != nil { + return nil, fmt.Errorf("failed applying config to remote storage: %w", err) + } + fanoutStorage := storage.NewFanout(logger, walStore, remoteStore) + + scrapeManager := newScrapeManager(log.With(logger, "component", "scrape manager"), fanoutStorage) + err = scrapeManager.ApplyConfig(&config.Config{ + GlobalConfig: config.DefaultGlobalConfig, + ScrapeConfigs: []*config.ScrapeConfig{rwConfig.ScrapeConfig}, + }) + if err != nil { + return nil, fmt.Errorf("failed applying config to scrape manager: %w", err) + } + return fanoutStorage, nil +} + +func newScrapeManager(logger log.Logger, app storage.Appendable) *scrape.Manager { + // scrape.NewManager modifies a global variable in Prometheus. To avoid a + // data race of modifying that global, we lock a mutex here briefly. + managerMtx.Lock() + defer managerMtx.Unlock() + return scrape.NewManager(logger, app) +} + +// ErrNotReady is returned when the scrape manager is used but has not been +// initialized yet. +var ErrNotReady = errors.New("scrape manager not ready") + +// readyScrapeManager allows a scrape manager to be retrieved. Even if it's set at a later point in time. +type readyScrapeManager struct { + mtx sync.RWMutex + m *scrape.Manager +} + +// Set the scrape manager. +func (rm *readyScrapeManager) Set(m *scrape.Manager) { + rm.mtx.Lock() + defer rm.mtx.Unlock() + + rm.m = m +} + +// Get the scrape manager. If is not ready, return an error. +func (rm *readyScrapeManager) Get() (*scrape.Manager, error) { + rm.mtx.RLock() + defer rm.mtx.RUnlock() + + if rm.m != nil { + return rm.m, nil + } + + return nil, ErrNotReady +} From fd19217548e7bca51fb81a385d79c211a4103fc8 Mon Sep 17 00:00:00 2001 From: Michael Okoko Date: Tue, 4 May 2021 06:23:42 +0100 Subject: [PATCH 04/26] Optionally run ruler in stateless mode Signed-off-by: Michael Okoko --- scripts/quickstart.sh | 14 ++++++++++++++ 1 file changed, 14 insertions(+) diff --git a/scripts/quickstart.sh b/scripts/quickstart.sh index e99ab8fdd9..fb4395e9d2 100755 --- a/scripts/quickstart.sh +++ b/scripts/quickstart.sh @@ -245,6 +245,19 @@ QUERIER_JAEGER_CONFIG=$( EOF ) +REMOTE_WRITE_FLAGS="" +if [ -n "${STATELESS_RULER_ENABLED}" ]; then + cat >/data/rule-remote-write.yaml <<-EOF + name: "thanos-receivers" + remote_write: + url: "http://127.0.0.1:10908/api/v1/receive" + name: "receive-0" +EOF + + REMOTE_WRITE_FLAGS="--remote-write --remote-write.config-file data/rule-remote-write.yaml + " +fi + # Start Thanos Ruler. ${THANOS_EXECUTABLE} rule \ --data-dir data/ \ @@ -256,6 +269,7 @@ ${THANOS_EXECUTABLE} rule \ --http-address="0.0.0.0:19999" \ --grpc-address="0.0.0.0:19998" \ --label 'rule="true"' \ + "${REMOTE_WRITE_FLAGS}" \ ${OBJSTORECFG} & STORES="${STORES} --store 127.0.0.1:19998" From 5814523b4b968e1d3c34082af09a2566ca156bea Mon Sep 17 00:00:00 2001 From: Michael Okoko Date: Mon, 10 May 2021 11:13:28 +0100 Subject: [PATCH 05/26] Set up tests and implementations for configuring remote-write for ruler Signed-off-by: Michael Okoko --- pkg/rules/remotewrite/remotewrite.go | 13 +++--- test/e2e/e2ethanos/services.go | 11 +++++ test/e2e/rule_test.go | 62 +++++++++++++++++++++------- 3 files changed, 65 insertions(+), 21 deletions(-) diff --git a/pkg/rules/remotewrite/remotewrite.go b/pkg/rules/remotewrite/remotewrite.go index e007a86742..c47e3f1968 100644 --- a/pkg/rules/remotewrite/remotewrite.go +++ b/pkg/rules/remotewrite/remotewrite.go @@ -3,6 +3,9 @@ package remotewrite import ( "errors" "fmt" + "sync" + "time" + "github.com/go-kit/kit/log" "github.com/prometheus/client_golang/prometheus" "github.com/prometheus/prometheus/config" @@ -10,18 +13,16 @@ import ( "github.com/prometheus/prometheus/storage" "github.com/prometheus/prometheus/storage/remote" "gopkg.in/yaml.v2" - "sync" - "time" ) var ( - managerMtx sync.Mutex + managerMtx sync.Mutex ) type Config struct { - Name string `yaml:"name"` - RemoteStore *config.RemoteWriteConfig `yaml:"remote_write,omitempty"` - ScrapeConfig *config.ScrapeConfig `yaml:"scrape_config,omitempty"` + Name string `yaml:"name"` + RemoteStore *config.RemoteWriteConfig `yaml:"remote_write,omitempty"` + ScrapeConfig *config.ScrapeConfig `yaml:"scrape_config,omitempty"` } func LoadRemoteWriteConfig(configYAML []byte) (Config, error) { diff --git a/test/e2e/e2ethanos/services.go b/test/e2e/e2ethanos/services.go index 668b92e952..770d21cc2e 100644 --- a/test/e2e/e2ethanos/services.go +++ b/test/e2e/e2ethanos/services.go @@ -6,6 +6,7 @@ package e2ethanos import ( "encoding/json" "fmt" + "github.com/thanos-io/thanos/pkg/rules/remotewrite" "io/ioutil" "os" "path/filepath" @@ -511,10 +512,20 @@ func NewIngestingReceiver(e e2e.Environment, name string) (*e2e.InstrumentedRunn return receiver, nil } +<<<<<<< HEAD func NewRuler(e e2e.Environment, name, ruleSubDir string, amCfg []alert.AlertmanagerConfig, queryCfg []httpconfig.Config) (*e2e.InstrumentedRunnable, error) { dir := filepath.Join(e.SharedDir(), "data", "rule", name) container := filepath.Join(ContainerSharedDir, "data", "rule", name) +======= +func NewTSDBRuler(sharedDir string, name string, ruleSubDir string, amCfg []alert.AlertmanagerConfig, queryCfg []query.Config) (*Service, error) { + return NewRuler(sharedDir, name, ruleSubDir, amCfg, queryCfg, false, remotewrite.Config{}) +} + +func NewRuler(sharedDir string, name string, ruleSubDir string, amCfg []alert.AlertmanagerConfig, queryCfg []query.Config, remoteWrite bool, remoteWriteCfg remotewrite.Config) (*Service, error) { + dir := filepath.Join(sharedDir, "data", "rule", name) + container := filepath.Join(e2e.ContainerSharedDir, "data", "rule", name) +>>>>>>> fc8f1035 (Set up tests and implementations for configuring remote-write for ruler) if err := os.MkdirAll(dir, 0750); err != nil { return nil, errors.Wrap(err, "create rule dir") } diff --git a/test/e2e/rule_test.go b/test/e2e/rule_test.go index fc3cd1cd37..6796329456 100644 --- a/test/e2e/rule_test.go +++ b/test/e2e/rule_test.go @@ -8,6 +8,7 @@ import ( "context" "encoding/json" "fmt" + "github.com/thanos-io/thanos/pkg/rules/remotewrite" "io/ioutil" "net/http" "os" @@ -441,32 +442,62 @@ func TestRule(t *testing.T) { // record the rule evaluations in a WAL // the WAL gets replicated to a Receiver endpoint -func TestStatelessRule(t *testing.T) { - t.Parallel() - s, err := e2e.NewScenario("e2e_test_rule") +func TestRule_CanRemoteWriteData(t *testing.T) { + s, err := e2e.NewScenario("e2e_test_rule_remote_write") testutil.Ok(t, err) t.Cleanup(e2ethanos.CleanScenario(t, s)) - _, cancel := context.WithTimeout(context.Background(), 3*time.Minute) - t.Cleanup(cancel) - - // Prepare work dirs. + // create rule files rulesSubDir := filepath.Join("rules") rulesPath := filepath.Join(s.SharedDir(), rulesSubDir) testutil.Ok(t, os.MkdirAll(rulesPath, os.ModePerm)) - createRuleFiles(t, rulesPath) - amTargetsSubDir := filepath.Join("rules_am_targets") + testAlertRuleRecordAbsentMetric := ` +record: test_absent_metric +expr: absent(nonexistent{job='thanos-receive'}) +labels: + severity: page +annotations: + summary: "tesemole hahaha" +` + createRuleFile(t, filepath.Join(rulesPath, "rw_rule-0.yaml"), testAlertRuleRecordAbsentMetric) + amTargetsSubDir := filepath.Join("rw_rules_am_targets") testutil.Ok(t, os.MkdirAll(filepath.Join(s.SharedDir(), amTargetsSubDir), os.ModePerm)) - queryTargetsSubDir := filepath.Join("rules_query_targets") + queryTargetsSubDir := filepath.Join("rw_rules_query_targets") testutil.Ok(t, os.MkdirAll(filepath.Join(s.SharedDir(), queryTargetsSubDir), os.ModePerm)) - am1, err := e2ethanos.NewAlertmanager(s.SharedDir(), "1") + receiver, err := e2ethanos.NewReceiver(s.SharedDir(), s.NetworkName(), "1", 1) + testutil.Ok(t, err) + testutil.Ok(t, s.StartAndWaitReady(receiver)) + + querier, err := e2ethanos.NewQuerier(s.SharedDir(), "1", []string{receiver.GRPCNetworkEndpoint()}, []string{receiver.GRPCNetworkEndpoint()}, nil, nil, nil, nil, "", "") + testutil.Ok(t, err) + testutil.Ok(t, s.StartAndWaitReady(querier)) + + ctx, cancel := context.WithTimeout(context.Background(), 1 * time.Minute) + t.Cleanup(cancel) + + // check that querier can talk to the receiver + t.Run("can query from receiver", func(t *testing.T) { + testAbsentAlert := "absent(nonexistent{job='thanos-receive'})" + queryAndAssertSeries(t, ctx, querier.HTTPEndpoint(), testAbsentAlert, promclient.QueryOptions{ + Deduplicate: false, + }, []model.Metric{ + { + "job": "thanos-receive", + }, + }) + }) + + am, err := e2ethanos.NewAlertmanager(s.SharedDir(), "1") testutil.Ok(t, err) - am2, err := e2ethanos.NewAlertmanager(s.SharedDir(), "2") + testutil.Ok(t, s.StartAndWaitReady(am)) + + rwURL := e2ethanos.RemoteWriteEndpoint(receiver.NetworkEndpoint(8081)) + fmt.Println(rwURL) testutil.Ok(t, err) - testutil.Ok(t, s.StartAndWaitReady(am1, am2)) + fmt.Println("AlertManager URL: ", am.HTTPPort()) r, err := e2ethanos.NewRuler(s.SharedDir(), "1", rulesSubDir, []alert.AlertmanagerConfig{ { EndpointsConfig: http_util.EndpointsConfig{ @@ -478,7 +509,7 @@ func TestStatelessRule(t *testing.T) { }, }, StaticAddresses: []string{ - am2.NetworkHTTPEndpoint(), + am.NetworkHTTPEndpoint(), }, Scheme: "http", }, @@ -499,9 +530,10 @@ func TestStatelessRule(t *testing.T) { Scheme: "http", }, }, - }, true) + }, true, remotewrite.Config{}) testutil.Ok(t, err) testutil.Ok(t, s.StartAndWaitReady(r)) + time.Sleep(5 * time.Minute) } // Test Ruler behavior on different storepb.PartialResponseStrategy when having partial response from single `failingStoreAPI`. From 7acfa2e00ec55cc629ef1aeba62dfd2dbc663000 Mon Sep 17 00:00:00 2001 From: Michael Okoko Date: Fri, 14 May 2021 12:48:54 +0100 Subject: [PATCH 06/26] Implement stub querier for WAL storage to fix nil pointer error Signed-off-by: Michael Okoko --- pkg/rules/remotewrite/remotewrite.go | 60 +-------------------- pkg/rules/remotewrite/wal.go | 22 ++++++++ test/e2e/rule_test.go | 78 ++++++++++++---------------- 3 files changed, 56 insertions(+), 104 deletions(-) diff --git a/pkg/rules/remotewrite/remotewrite.go b/pkg/rules/remotewrite/remotewrite.go index c47e3f1968..dd27cb139d 100644 --- a/pkg/rules/remotewrite/remotewrite.go +++ b/pkg/rules/remotewrite/remotewrite.go @@ -1,28 +1,20 @@ package remotewrite import ( - "errors" "fmt" - "sync" "time" "github.com/go-kit/kit/log" "github.com/prometheus/client_golang/prometheus" "github.com/prometheus/prometheus/config" - "github.com/prometheus/prometheus/scrape" "github.com/prometheus/prometheus/storage" "github.com/prometheus/prometheus/storage/remote" "gopkg.in/yaml.v2" ) -var ( - managerMtx sync.Mutex -) - type Config struct { Name string `yaml:"name"` RemoteStore *config.RemoteWriteConfig `yaml:"remote_write,omitempty"` - ScrapeConfig *config.ScrapeConfig `yaml:"scrape_config,omitempty"` } func LoadRemoteWriteConfig(configYAML []byte) (Config, error) { @@ -38,8 +30,7 @@ func NewFanoutStorage(logger log.Logger, reg prometheus.Registerer, walDir strin if err != nil { return nil, err } - scrapeMgr := &readyScrapeManager{} - remoteStore := remote.NewStorage(logger, reg, walStore.StartTime, walStore.Directory(), 1*time.Minute, scrapeMgr) + remoteStore := remote.NewStorage(logger, reg, walStore.StartTime, walStore.Directory(), 1*time.Minute, nil) err = remoteStore.ApplyConfig(&config.Config{ GlobalConfig: config.DefaultGlobalConfig, RemoteWriteConfigs: []*config.RemoteWriteConfig{rwConfig.RemoteStore}, @@ -48,52 +39,5 @@ func NewFanoutStorage(logger log.Logger, reg prometheus.Registerer, walDir strin return nil, fmt.Errorf("failed applying config to remote storage: %w", err) } fanoutStorage := storage.NewFanout(logger, walStore, remoteStore) - - scrapeManager := newScrapeManager(log.With(logger, "component", "scrape manager"), fanoutStorage) - err = scrapeManager.ApplyConfig(&config.Config{ - GlobalConfig: config.DefaultGlobalConfig, - ScrapeConfigs: []*config.ScrapeConfig{rwConfig.ScrapeConfig}, - }) - if err != nil { - return nil, fmt.Errorf("failed applying config to scrape manager: %w", err) - } return fanoutStorage, nil -} - -func newScrapeManager(logger log.Logger, app storage.Appendable) *scrape.Manager { - // scrape.NewManager modifies a global variable in Prometheus. To avoid a - // data race of modifying that global, we lock a mutex here briefly. - managerMtx.Lock() - defer managerMtx.Unlock() - return scrape.NewManager(logger, app) -} - -// ErrNotReady is returned when the scrape manager is used but has not been -// initialized yet. -var ErrNotReady = errors.New("scrape manager not ready") - -// readyScrapeManager allows a scrape manager to be retrieved. Even if it's set at a later point in time. -type readyScrapeManager struct { - mtx sync.RWMutex - m *scrape.Manager -} - -// Set the scrape manager. -func (rm *readyScrapeManager) Set(m *scrape.Manager) { - rm.mtx.Lock() - defer rm.mtx.Unlock() - - rm.m = m -} - -// Get the scrape manager. If is not ready, return an error. -func (rm *readyScrapeManager) Get() (*scrape.Manager, error) { - rm.mtx.RLock() - defer rm.mtx.RUnlock() - - if rm.m != nil { - return rm.m, nil - } - - return nil, ErrNotReady -} +} \ No newline at end of file diff --git a/pkg/rules/remotewrite/wal.go b/pkg/rules/remotewrite/wal.go index a7851cff09..40ac340960 100644 --- a/pkg/rules/remotewrite/wal.go +++ b/pkg/rules/remotewrite/wal.go @@ -357,6 +357,10 @@ func (w *Storage) Appender(_ context.Context) storage.Appender { return w.appenderPool.Get().(storage.Appender) } +func (w *Storage) Querier(ctx context.Context, mint, maxt int64) (storage.Querier, error) { + return &remoteWriteQueryable{}, nil +} + // StartTime always returns 0, nil. It is implemented for compatibility with // Prometheus, but is unused in the agent. func (*Storage) StartTime() (int64, error) { @@ -676,3 +680,21 @@ func (a *appender) Rollback() error { a.w.appenderPool.Put(a) return nil } + +type remoteWriteQueryable struct{} + +func (r *remoteWriteQueryable) LabelValues(name string, matchers ...*labels.Matcher) ([]string, storage.Warnings, error) { + return nil, nil, nil +} + +func (r *remoteWriteQueryable) LabelNames() ([]string, storage.Warnings, error) { + return nil, nil, nil +} + +func (r *remoteWriteQueryable) Close() error { + return nil +} + +func (r *remoteWriteQueryable) Select(sortSeries bool, hints *storage.SelectHints, matchers ...*labels.Matcher) storage.SeriesSet { + return nil +} diff --git a/test/e2e/rule_test.go b/test/e2e/rule_test.go index 6796329456..8cc9103553 100644 --- a/test/e2e/rule_test.go +++ b/test/e2e/rule_test.go @@ -8,9 +8,12 @@ import ( "context" "encoding/json" "fmt" + commoncfg "github.com/prometheus/common/config" + "github.com/prometheus/prometheus/config" "github.com/thanos-io/thanos/pkg/rules/remotewrite" "io/ioutil" "net/http" + "net/url" "os" "path/filepath" "testing" @@ -437,67 +440,43 @@ func TestRule(t *testing.T) { }) } -// TestStatelessRule verifies that Thanos Ruler can be run in stateless mode where it: -// evaluates rules against one/more Queriers. -// record the rule evaluations in a WAL -// the WAL gets replicated to a Receiver endpoint - - func TestRule_CanRemoteWriteData(t *testing.T) { + testAlertRuleRecordAbsentMetric := ` +groups: +- name: example_record_rules + interval: 100ms + rules: + - record: test_absent_metric + expr: absent(nonexistent{job='thanos-receive'}) +` + s, err := e2e.NewScenario("e2e_test_rule_remote_write") testutil.Ok(t, err) t.Cleanup(e2ethanos.CleanScenario(t, s)) - // create rule files + _, cancel := context.WithTimeout(context.Background(), 5*time.Minute) + t.Cleanup(cancel) + + // Prepare work dirs. rulesSubDir := filepath.Join("rules") rulesPath := filepath.Join(s.SharedDir(), rulesSubDir) testutil.Ok(t, os.MkdirAll(rulesPath, os.ModePerm)) - testAlertRuleRecordAbsentMetric := ` -record: test_absent_metric -expr: absent(nonexistent{job='thanos-receive'}) -labels: - severity: page -annotations: - summary: "tesemole hahaha" -` - createRuleFile(t, filepath.Join(rulesPath, "rw_rule-0.yaml"), testAlertRuleRecordAbsentMetric) - amTargetsSubDir := filepath.Join("rw_rules_am_targets") + createRuleFile(t, filepath.Join(rulesPath, fmt.Sprintf("rules-0.yaml")), testAlertRuleRecordAbsentMetric) + amTargetsSubDir := filepath.Join("rules_am_targets") testutil.Ok(t, os.MkdirAll(filepath.Join(s.SharedDir(), amTargetsSubDir), os.ModePerm)) - queryTargetsSubDir := filepath.Join("rw_rules_query_targets") + queryTargetsSubDir := filepath.Join("rules_query_targets") testutil.Ok(t, os.MkdirAll(filepath.Join(s.SharedDir(), queryTargetsSubDir), os.ModePerm)) - receiver, err := e2ethanos.NewReceiver(s.SharedDir(), s.NetworkName(), "1", 1) testutil.Ok(t, err) - testutil.Ok(t, s.StartAndWaitReady(receiver)) - - querier, err := e2ethanos.NewQuerier(s.SharedDir(), "1", []string{receiver.GRPCNetworkEndpoint()}, []string{receiver.GRPCNetworkEndpoint()}, nil, nil, nil, nil, "", "") + am2, err := e2ethanos.NewAlertmanager(s.SharedDir(), "2") testutil.Ok(t, err) - testutil.Ok(t, s.StartAndWaitReady(querier)) - - ctx, cancel := context.WithTimeout(context.Background(), 1 * time.Minute) - t.Cleanup(cancel) - - // check that querier can talk to the receiver - t.Run("can query from receiver", func(t *testing.T) { - testAbsentAlert := "absent(nonexistent{job='thanos-receive'})" - queryAndAssertSeries(t, ctx, querier.HTTPEndpoint(), testAbsentAlert, promclient.QueryOptions{ - Deduplicate: false, - }, []model.Metric{ - { - "job": "thanos-receive", - }, - }) - }) + testutil.Ok(t, s.StartAndWaitReady(am2)) - am, err := e2ethanos.NewAlertmanager(s.SharedDir(), "1") - testutil.Ok(t, err) - testutil.Ok(t, s.StartAndWaitReady(am)) - rwURL := e2ethanos.RemoteWriteEndpoint(receiver.NetworkEndpoint(8081)) - fmt.Println(rwURL) + //todo: replace am2 with actual receiver + rwURL, err := url.Parse(e2ethanos.RemoteWriteEndpoint(am2.NetworkHTTPEndpoint())) testutil.Ok(t, err) - fmt.Println("AlertManager URL: ", am.HTTPPort()) r, err := e2ethanos.NewRuler(s.SharedDir(), "1", rulesSubDir, []alert.AlertmanagerConfig{ { EndpointsConfig: http_util.EndpointsConfig{ @@ -509,7 +488,7 @@ annotations: }, }, StaticAddresses: []string{ - am.NetworkHTTPEndpoint(), + am2.NetworkHTTPEndpoint(), }, Scheme: "http", }, @@ -530,9 +509,16 @@ annotations: Scheme: "http", }, }, - }, true, remotewrite.Config{}) + }, true, remotewrite.Config{ + Name: "ruler-rw-receivers", + RemoteStore: &config.RemoteWriteConfig{ + URL: &commoncfg.URL{URL: rwURL}, + Name: "thanos-receiver", + }, + }) testutil.Ok(t, err) testutil.Ok(t, s.StartAndWaitReady(r)) + time.Sleep(5 * time.Minute) } From 2d7d6d96fcebd19e9fcde226eff83e35f47e4506 Mon Sep 17 00:00:00 2001 From: Michael Okoko Date: Fri, 14 May 2021 14:35:35 +0100 Subject: [PATCH 07/26] Setup e2e test for stateless ruler Signed-off-by: Michael Okoko --- test/e2e/rule_test.go | 71 +++++++++++++++++++++++++++++++++++++------ 1 file changed, 61 insertions(+), 10 deletions(-) diff --git a/test/e2e/rule_test.go b/test/e2e/rule_test.go index 8cc9103553..cb18fbf50b 100644 --- a/test/e2e/rule_test.go +++ b/test/e2e/rule_test.go @@ -441,6 +441,8 @@ func TestRule(t *testing.T) { } func TestRule_CanRemoteWriteData(t *testing.T) { + t.Parallel() + testAlertRuleRecordAbsentMetric := ` groups: - name: example_record_rules @@ -454,7 +456,7 @@ groups: testutil.Ok(t, err) t.Cleanup(e2ethanos.CleanScenario(t, s)) - _, cancel := context.WithTimeout(context.Background(), 5*time.Minute) + ctx, cancel := context.WithTimeout(context.Background(), 3*time.Minute) t.Cleanup(cancel) // Prepare work dirs. @@ -467,16 +469,19 @@ groups: queryTargetsSubDir := filepath.Join("rules_query_targets") testutil.Ok(t, os.MkdirAll(filepath.Join(s.SharedDir(), queryTargetsSubDir), os.ModePerm)) - testutil.Ok(t, err) - am2, err := e2ethanos.NewAlertmanager(s.SharedDir(), "2") - testutil.Ok(t, err) - testutil.Ok(t, s.StartAndWaitReady(am2)) - - //todo: replace am2 with actual receiver - rwURL, err := url.Parse(e2ethanos.RemoteWriteEndpoint(am2.NetworkHTTPEndpoint())) + am, err := e2ethanos.NewAlertmanager(s.SharedDir(), "1") testutil.Ok(t, err) + testutil.Ok(t, s.StartAndWaitReady(am)) + receiver, err := e2ethanos.NewReceiver(s.SharedDir(), s.NetworkName(), "1", 1) + testutil.Ok(t, err) + testutil.Ok(t, s.StartAndWaitReady(receiver)) + rwURL, err := url.Parse(e2ethanos.RemoteWriteEndpoint(receiver.NetworkEndpoint(8081))) + testutil.Ok(t, err) + querier, err := e2ethanos.NewQuerierBuilder(s.SharedDir(), "1", []string{receiver.GRPCNetworkEndpoint()}).Build() + testutil.Ok(t, err) + testutil.Ok(t, s.StartAndWaitReady(querier)) r, err := e2ethanos.NewRuler(s.SharedDir(), "1", rulesSubDir, []alert.AlertmanagerConfig{ { EndpointsConfig: http_util.EndpointsConfig{ @@ -488,7 +493,7 @@ groups: }, }, StaticAddresses: []string{ - am2.NetworkHTTPEndpoint(), + am.NetworkHTTPEndpoint(), }, Scheme: "http", }, @@ -519,7 +524,53 @@ groups: testutil.Ok(t, err) testutil.Ok(t, s.StartAndWaitReady(r)) - time.Sleep(5 * time.Minute) + writeTargets(t, filepath.Join(s.SharedDir(), queryTargetsSubDir, "targets.yaml"), querier.NetworkHTTPEndpoint()) + writeTargets(t, filepath.Join(s.SharedDir(), amTargetsSubDir, "targets.yaml"), am.NetworkHTTPEndpoint()) + + t.Run("inject samples into receiver to reset its StoreAPI MinTime", func(t *testing.T) { + // inject data into receiver to reset its minTime (so it doesn't get filtered out by store) + // the sample is injected through a prometheus instance that remote_writes samples into the receiver node + prom, _, err := e2ethanos.NewPrometheus(s.SharedDir(), "1", defaultPromConfig("prom", 0, e2ethanos.RemoteWriteEndpoint(receiver.NetworkEndpoint(8081)), ""), e2ethanos.DefaultPrometheusImage()) + testutil.Ok(t, err) + testutil.Ok(t, s.StartAndWaitReady(prom)) + + queryAndAssertSeries(t, ctx, querier.HTTPEndpoint(), queryUpWithoutInstance, promclient.QueryOptions{ + Deduplicate: false, + }, []model.Metric{ + { + "job":"myself", + "prometheus": "prom", + "receive": "1", + "replica": "0", + "tenant_id": "default-tenant", + }, + }) + }) + + t.Run("query can contact from receiver", func(t *testing.T) { + testAbsentQuery := "absent(nonexistent{job='thanos-receive'})" + queryAndAssertSeries(t, ctx, querier.HTTPEndpoint(), testAbsentQuery, promclient.QueryOptions{ + Deduplicate: false, + }, []model.Metric{ + { + "job": "thanos-receive", + }, + }) + }) + + t.Run("can fetch remote-written samples from receiver", func(t *testing.T) { + testRecordedSamples := "test_absent_metric" + queryAndAssertSeries(t, ctx, querier.HTTPEndpoint(), testRecordedSamples, promclient.QueryOptions{ + Deduplicate: false, + }, []model.Metric{ + { + "__name__": "test_absent_metric", + "job":"thanos-receive", + "receive": "1", + "tenant_id": "default-tenant", + }, + }) + }) } // Test Ruler behavior on different storepb.PartialResponseStrategy when having partial response from single `failingStoreAPI`. From c2f999c84fd6789ffa103e90ee6624cd01988684 Mon Sep 17 00:00:00 2001 From: Michael Okoko Date: Sat, 22 May 2021 13:34:45 +0100 Subject: [PATCH 08/26] Add copied code commentary to remotewrite packages Signed-off-by: Michael Okoko --- pkg/rules/remotewrite/remotewrite.go | 8 +++----- pkg/rules/remotewrite/series.go | 4 ++-- pkg/rules/remotewrite/util.go | 2 ++ pkg/rules/remotewrite/wal.go | 2 ++ pkg/rules/remotewrite/wal_test.go | 2 ++ 5 files changed, 11 insertions(+), 7 deletions(-) diff --git a/pkg/rules/remotewrite/remotewrite.go b/pkg/rules/remotewrite/remotewrite.go index dd27cb139d..7a9dc841e4 100644 --- a/pkg/rules/remotewrite/remotewrite.go +++ b/pkg/rules/remotewrite/remotewrite.go @@ -31,13 +31,11 @@ func NewFanoutStorage(logger log.Logger, reg prometheus.Registerer, walDir strin return nil, err } remoteStore := remote.NewStorage(logger, reg, walStore.StartTime, walStore.Directory(), 1*time.Minute, nil) - err = remoteStore.ApplyConfig(&config.Config{ + if err := remoteStore.ApplyConfig(&config.Config{ GlobalConfig: config.DefaultGlobalConfig, RemoteWriteConfigs: []*config.RemoteWriteConfig{rwConfig.RemoteStore}, - }) - if err != nil { + }); err != nil { return nil, fmt.Errorf("failed applying config to remote storage: %w", err) } - fanoutStorage := storage.NewFanout(logger, walStore, remoteStore) - return fanoutStorage, nil + return storage.NewFanout(logger, walStore, remoteStore), nil } \ No newline at end of file diff --git a/pkg/rules/remotewrite/series.go b/pkg/rules/remotewrite/series.go index f75d415ebb..d82a31e1a2 100644 --- a/pkg/rules/remotewrite/series.go +++ b/pkg/rules/remotewrite/series.go @@ -1,3 +1,5 @@ +// This is copied from https://github.com/grafana/agent/blob/a23bd5cf27c2ac99695b7449d38fb12444941a1c/pkg/prom/wal/series.go +// TODO(idoqo): Migrate to prometheus package when https://github.com/prometheus/prometheus/pull/8785 is ready. package remotewrite import ( @@ -70,8 +72,6 @@ func (m seriesHashmap) del(hash uint64, ref uint64) { for _, s := range m[hash] { if s.ref != ref { rem = append(rem, s) - } else { - //intern.ReleaseLabels(intern.Global, s.lset) } } if len(rem) == 0 { diff --git a/pkg/rules/remotewrite/util.go b/pkg/rules/remotewrite/util.go index 3a1e593e0f..b6ddb6b804 100644 --- a/pkg/rules/remotewrite/util.go +++ b/pkg/rules/remotewrite/util.go @@ -1,3 +1,5 @@ +// This is copied from https://github.com/grafana/agent/blob/a23bd5cf27c2ac99695b7449d38fb12444941a1c/pkg/prom/wal/util.go +// TODO(idoqo): Migrate to prometheus package when https://github.com/prometheus/prometheus/pull/8785 is ready. package remotewrite import ( diff --git a/pkg/rules/remotewrite/wal.go b/pkg/rules/remotewrite/wal.go index 40ac340960..6aad91b32f 100644 --- a/pkg/rules/remotewrite/wal.go +++ b/pkg/rules/remotewrite/wal.go @@ -1,3 +1,5 @@ +// This is copied from https://github.com/grafana/agent/blob/a23bd5cf27c2ac99695b7449d38fb12444941a1c/pkg/prom/wal/wal.go +// TODO(idoqo): Migrate to prometheus package when https://github.com/prometheus/prometheus/pull/8785 is ready. package remotewrite import ( diff --git a/pkg/rules/remotewrite/wal_test.go b/pkg/rules/remotewrite/wal_test.go index 84787544eb..d1f9eb684a 100644 --- a/pkg/rules/remotewrite/wal_test.go +++ b/pkg/rules/remotewrite/wal_test.go @@ -1,3 +1,5 @@ +// This is copied from https://github.com/grafana/agent/blob/a23bd5cf27c2ac99695b7449d38fb12444941a1c/pkg/prom/wal/wal_test.go +// TODO(idoqo): Migrate to prometheus package when https://github.com/prometheus/prometheus/pull/8785 is ready. package remotewrite import ( From 31a75f2cd712db60ac61c9cb14e062b08351a448 Mon Sep 17 00:00:00 2001 From: Michael Okoko Date: Mon, 24 May 2021 16:06:00 +0100 Subject: [PATCH 09/26] Use static addresses for am and querier Signed-off-by: Michael Okoko --- pkg/rules/remotewrite/remotewrite.go | 6 +-- test/e2e/e2ethanos/services.go | 11 ----- test/e2e/rule_test.go | 62 +++++++++++----------------- 3 files changed, 27 insertions(+), 52 deletions(-) diff --git a/pkg/rules/remotewrite/remotewrite.go b/pkg/rules/remotewrite/remotewrite.go index 7a9dc841e4..8e258fcff3 100644 --- a/pkg/rules/remotewrite/remotewrite.go +++ b/pkg/rules/remotewrite/remotewrite.go @@ -13,8 +13,8 @@ import ( ) type Config struct { - Name string `yaml:"name"` - RemoteStore *config.RemoteWriteConfig `yaml:"remote_write,omitempty"` + Name string `yaml:"name"` + RemoteStore *config.RemoteWriteConfig `yaml:"remote_write,omitempty"` } func LoadRemoteWriteConfig(configYAML []byte) (Config, error) { @@ -38,4 +38,4 @@ func NewFanoutStorage(logger log.Logger, reg prometheus.Registerer, walDir strin return nil, fmt.Errorf("failed applying config to remote storage: %w", err) } return storage.NewFanout(logger, walStore, remoteStore), nil -} \ No newline at end of file +} diff --git a/test/e2e/e2ethanos/services.go b/test/e2e/e2ethanos/services.go index 770d21cc2e..668b92e952 100644 --- a/test/e2e/e2ethanos/services.go +++ b/test/e2e/e2ethanos/services.go @@ -6,7 +6,6 @@ package e2ethanos import ( "encoding/json" "fmt" - "github.com/thanos-io/thanos/pkg/rules/remotewrite" "io/ioutil" "os" "path/filepath" @@ -512,20 +511,10 @@ func NewIngestingReceiver(e e2e.Environment, name string) (*e2e.InstrumentedRunn return receiver, nil } -<<<<<<< HEAD func NewRuler(e e2e.Environment, name, ruleSubDir string, amCfg []alert.AlertmanagerConfig, queryCfg []httpconfig.Config) (*e2e.InstrumentedRunnable, error) { dir := filepath.Join(e.SharedDir(), "data", "rule", name) container := filepath.Join(ContainerSharedDir, "data", "rule", name) -======= -func NewTSDBRuler(sharedDir string, name string, ruleSubDir string, amCfg []alert.AlertmanagerConfig, queryCfg []query.Config) (*Service, error) { - return NewRuler(sharedDir, name, ruleSubDir, amCfg, queryCfg, false, remotewrite.Config{}) -} - -func NewRuler(sharedDir string, name string, ruleSubDir string, amCfg []alert.AlertmanagerConfig, queryCfg []query.Config, remoteWrite bool, remoteWriteCfg remotewrite.Config) (*Service, error) { - dir := filepath.Join(sharedDir, "data", "rule", name) - container := filepath.Join(e2e.ContainerSharedDir, "data", "rule", name) ->>>>>>> fc8f1035 (Set up tests and implementations for configuring remote-write for ruler) if err := os.MkdirAll(dir, 0750); err != nil { return nil, errors.Wrap(err, "create rule dir") } diff --git a/test/e2e/rule_test.go b/test/e2e/rule_test.go index cb18fbf50b..66d6aa3101 100644 --- a/test/e2e/rule_test.go +++ b/test/e2e/rule_test.go @@ -8,12 +8,10 @@ import ( "context" "encoding/json" "fmt" - commoncfg "github.com/prometheus/common/config" - "github.com/prometheus/prometheus/config" - "github.com/thanos-io/thanos/pkg/rules/remotewrite" "io/ioutil" "net/http" "net/url" + "net/http/httptest" "os" "path/filepath" "testing" @@ -440,10 +438,13 @@ func TestRule(t *testing.T) { }) } +// TestRule_CanRemoteWriteData checks that Thanos Ruler can be run in stateless mode +// where it remote_writes rule evaluations to a Prometheus remote-write endpoint (typically +// a Thanos Receiver). func TestRule_CanRemoteWriteData(t *testing.T) { t.Parallel() - testAlertRuleRecordAbsentMetric := ` + testRuleRecordAbsentMetric := ` groups: - name: example_record_rules interval: 100ms @@ -459,16 +460,10 @@ groups: ctx, cancel := context.WithTimeout(context.Background(), 3*time.Minute) t.Cleanup(cancel) - // Prepare work dirs. - rulesSubDir := filepath.Join("rules") + rulesSubDir := "rules" rulesPath := filepath.Join(s.SharedDir(), rulesSubDir) testutil.Ok(t, os.MkdirAll(rulesPath, os.ModePerm)) - createRuleFile(t, filepath.Join(rulesPath, fmt.Sprintf("rules-0.yaml")), testAlertRuleRecordAbsentMetric) - amTargetsSubDir := filepath.Join("rules_am_targets") - testutil.Ok(t, os.MkdirAll(filepath.Join(s.SharedDir(), amTargetsSubDir), os.ModePerm)) - queryTargetsSubDir := filepath.Join("rules_query_targets") - testutil.Ok(t, os.MkdirAll(filepath.Join(s.SharedDir(), queryTargetsSubDir), os.ModePerm)) - + createRuleFile(t, filepath.Join(rulesPath, "rules-0.yaml"), testRuleRecordAbsentMetric) am, err := e2ethanos.NewAlertmanager(s.SharedDir(), "1") testutil.Ok(t, err) @@ -485,13 +480,6 @@ groups: r, err := e2ethanos.NewRuler(s.SharedDir(), "1", rulesSubDir, []alert.AlertmanagerConfig{ { EndpointsConfig: http_util.EndpointsConfig{ - FileSDConfigs: []http_util.FileSDConfig{ - { - // FileSD which will be used to register discover dynamically am1. - Files: []string{filepath.Join(e2e.ContainerSharedDir, amTargetsSubDir, "*.yaml")}, - RefreshInterval: model.Duration(time.Second), - }, - }, StaticAddresses: []string{ am.NetworkHTTPEndpoint(), }, @@ -503,30 +491,22 @@ groups: }, []query.Config{ { EndpointsConfig: http_util.EndpointsConfig{ - // We test Statically Addressed queries in other tests. Focus on FileSD here. - FileSDConfigs: []http_util.FileSDConfig{ - { - // FileSD which will be used to register discover dynamically q. - Files: []string{filepath.Join(e2e.ContainerSharedDir, queryTargetsSubDir, "*.yaml")}, - RefreshInterval: model.Duration(time.Second), - }, + StaticAddresses: []string{ + querier.NetworkHTTPEndpoint(), }, Scheme: "http", }, }, - }, true, remotewrite.Config{ + }, true, remotewrite.Config{ Name: "ruler-rw-receivers", RemoteStore: &config.RemoteWriteConfig{ - URL: &commoncfg.URL{URL: rwURL}, + URL: &commoncfg.URL{URL: rwURL}, Name: "thanos-receiver", }, }) testutil.Ok(t, err) testutil.Ok(t, s.StartAndWaitReady(r)) - writeTargets(t, filepath.Join(s.SharedDir(), queryTargetsSubDir, "targets.yaml"), querier.NetworkHTTPEndpoint()) - writeTargets(t, filepath.Join(s.SharedDir(), amTargetsSubDir, "targets.yaml"), am.NetworkHTTPEndpoint()) - t.Run("inject samples into receiver to reset its StoreAPI MinTime", func(t *testing.T) { // inject data into receiver to reset its minTime (so it doesn't get filtered out by store) // the sample is injected through a prometheus instance that remote_writes samples into the receiver node @@ -538,11 +518,11 @@ groups: Deduplicate: false, }, []model.Metric{ { - "job":"myself", + "job": "myself", "prometheus": "prom", - "receive": "1", - "replica": "0", - "tenant_id": "default-tenant", + "receive": "1", + "replica": "0", + "tenant_id": "default-tenant", }, }) }) @@ -564,15 +544,21 @@ groups: Deduplicate: false, }, []model.Metric{ { - "__name__": "test_absent_metric", - "job":"thanos-receive", - "receive": "1", + "__name__": "test_absent_metric", + "job": "thanos-receive", + "receive": "1", "tenant_id": "default-tenant", }, }) }) } +// TestRule_CanPersistWALData checks that in stateless mode, Thanos Ruler can persist rule evaluations +// which couldn't be sent to the remote write endpoint (e.g because receiver isn't available). +func TestRule_CanPersistWALData(t *testing.T) { + //TODO: Implement test with unavailable remote-write endpoint(receiver) +} + // Test Ruler behavior on different storepb.PartialResponseStrategy when having partial response from single `failingStoreAPI`. func TestRulePartialResponse(t *testing.T) { t.Skip("TODO: Allow HTTP ports from binaries running on host to be accessible.") From 30b081c62d12d28694dfe17a7c2663ad8229703b Mon Sep 17 00:00:00 2001 From: Michael Okoko Date: Tue, 1 Jun 2021 02:10:05 +0100 Subject: [PATCH 10/26] Remove need for separate remote-write flag for stateless ruler This removes the need to pass a separate `remote-write` flag to ruler to enable stateless mode. Instead, we now check if a remote-write config is provided and automatically enables stateless mode based off that. Ruler test is also cleaned up to remove unnecessary tests (i.e those that have been performed by other e2e suites). Signed-off-by: Michael Okoko --- cmd/thanos/config.go | 7 ++----- cmd/thanos/rule.go | 10 ++++------ test/e2e/rule_test.go | 38 +++----------------------------------- 3 files changed, 9 insertions(+), 46 deletions(-) diff --git a/cmd/thanos/config.go b/cmd/thanos/config.go index 15fc0eed4f..5760984b3d 100644 --- a/cmd/thanos/config.go +++ b/cmd/thanos/config.go @@ -226,13 +226,10 @@ func (ac *alertMgrConfig) registerFlag(cmd extflag.FlagClause) *alertMgrConfig { } type ruleRWConfig struct { - remoteWrite bool - configPath *extflag.PathOrContent + configPath *extflag.PathOrContent } func (rc *ruleRWConfig) registerFlag(cmd extflag.FlagClause) *ruleRWConfig { - cmd.Flag("remote-write", "If true, directs ruler to remote-write evaluated samples to the server configured by 'remote-write.config'."). - BoolVar(&rc.remoteWrite) - rc.configPath = extflag.RegisterPathOrContent(cmd, "remote-write.config", "YAML config for the remote-write server where samples should be sent to. See https://thanos.io/tip/components/rule.md/#query-api", false) + rc.configPath = extflag.RegisterPathOrContent(cmd, "remote-write.config", "YAML config for the remote-write server where samples should be sent to. This automatically enables stateless mode for ruler and no series will be stored in the ruler's TSDB. See https://thanos.io/tip/components/rule.md/#query-api", false) return rc } diff --git a/cmd/thanos/rule.go b/cmd/thanos/rule.go index d3f04eaa7a..f76beb7662 100644 --- a/cmd/thanos/rule.go +++ b/cmd/thanos/rule.go @@ -170,8 +170,8 @@ func registerRule(app *extkingpin.App) { return errors.New("--query/--query.sd-files and --query.config* parameters cannot be defined at the same time") } - // Parse and check remote-write config if it's enabled - if conf.rwConfig.remoteWrite { + // Parse and check remote-write config and enable stateless mode for ruler. + if conf.rwConfig.configPath != nil { conf.rwConfigYAML, err = conf.rwConfig.configPath.Content() if err != nil { return err @@ -338,10 +338,8 @@ func runRule( queryable storage.Queryable db *tsdb.DB ) - if err != nil { - return errors.Wrap(err, "open TSDB") - } - if conf.rwConfig.remoteWrite { + + if conf.rwConfig.configPath != nil { conf.rwConfigYAML, err = conf.rwConfig.configPath.Content() if err != nil { return err diff --git a/test/e2e/rule_test.go b/test/e2e/rule_test.go index 66d6aa3101..cb227e0ec3 100644 --- a/test/e2e/rule_test.go +++ b/test/e2e/rule_test.go @@ -11,7 +11,6 @@ import ( "io/ioutil" "net/http" "net/url" - "net/http/httptest" "os" "path/filepath" "testing" @@ -477,7 +476,7 @@ groups: querier, err := e2ethanos.NewQuerierBuilder(s.SharedDir(), "1", []string{receiver.GRPCNetworkEndpoint()}).Build() testutil.Ok(t, err) testutil.Ok(t, s.StartAndWaitReady(querier)) - r, err := e2ethanos.NewRuler(s.SharedDir(), "1", rulesSubDir, []alert.AlertmanagerConfig{ + r, err := e2ethanos.NewStatelessRuler(s.SharedDir(), "1", rulesSubDir, []alert.AlertmanagerConfig{ { EndpointsConfig: http_util.EndpointsConfig{ StaticAddresses: []string{ @@ -497,47 +496,16 @@ groups: Scheme: "http", }, }, - }, true, remotewrite.Config{ + }, &remotewrite.Config{ Name: "ruler-rw-receivers", RemoteStore: &config.RemoteWriteConfig{ - URL: &commoncfg.URL{URL: rwURL}, + URL: &common_cfg.URL{URL: rwURL}, Name: "thanos-receiver", }, }) testutil.Ok(t, err) testutil.Ok(t, s.StartAndWaitReady(r)) - t.Run("inject samples into receiver to reset its StoreAPI MinTime", func(t *testing.T) { - // inject data into receiver to reset its minTime (so it doesn't get filtered out by store) - // the sample is injected through a prometheus instance that remote_writes samples into the receiver node - prom, _, err := e2ethanos.NewPrometheus(s.SharedDir(), "1", defaultPromConfig("prom", 0, e2ethanos.RemoteWriteEndpoint(receiver.NetworkEndpoint(8081)), ""), e2ethanos.DefaultPrometheusImage()) - testutil.Ok(t, err) - testutil.Ok(t, s.StartAndWaitReady(prom)) - - queryAndAssertSeries(t, ctx, querier.HTTPEndpoint(), queryUpWithoutInstance, promclient.QueryOptions{ - Deduplicate: false, - }, []model.Metric{ - { - "job": "myself", - "prometheus": "prom", - "receive": "1", - "replica": "0", - "tenant_id": "default-tenant", - }, - }) - }) - - t.Run("query can contact from receiver", func(t *testing.T) { - testAbsentQuery := "absent(nonexistent{job='thanos-receive'})" - queryAndAssertSeries(t, ctx, querier.HTTPEndpoint(), testAbsentQuery, promclient.QueryOptions{ - Deduplicate: false, - }, []model.Metric{ - { - "job": "thanos-receive", - }, - }) - }) - t.Run("can fetch remote-written samples from receiver", func(t *testing.T) { testRecordedSamples := "test_absent_metric" queryAndAssertSeries(t, ctx, querier.HTTPEndpoint(), testRecordedSamples, promclient.QueryOptions{ From d567fafcab8eba66b6e48264d3e155f246022dc2 Mon Sep 17 00:00:00 2001 From: Michael Okoko Date: Mon, 7 Jun 2021 12:21:10 +0100 Subject: [PATCH 11/26] Generate docs for stateless ruler flags and fix tests Signed-off-by: Michael Okoko --- cmd/thanos/config.go | 9 --------- cmd/thanos/rule.go | 30 ++++++++++------------------ docs/components/rule.md | 19 +++++++++++++++++- pkg/rules/remotewrite/remotewrite.go | 9 +++++++-- pkg/rules/remotewrite/util.go | 5 +++++ scripts/quickstart.sh | 3 +-- test/e2e/rule_test.go | 12 +++++------ 7 files changed, 47 insertions(+), 40 deletions(-) diff --git a/cmd/thanos/config.go b/cmd/thanos/config.go index 5760984b3d..3770389a68 100644 --- a/cmd/thanos/config.go +++ b/cmd/thanos/config.go @@ -224,12 +224,3 @@ func (ac *alertMgrConfig) registerFlag(cmd extflag.FlagClause) *alertMgrConfig { ac.alertRelabelConfigPath = extflag.RegisterPathOrContent(cmd, "alert.relabel-config", "YAML file that contains alert relabelling configuration.", extflag.WithEnvSubstitution()) return ac } - -type ruleRWConfig struct { - configPath *extflag.PathOrContent -} - -func (rc *ruleRWConfig) registerFlag(cmd extflag.FlagClause) *ruleRWConfig { - rc.configPath = extflag.RegisterPathOrContent(cmd, "remote-write.config", "YAML config for the remote-write server where samples should be sent to. This automatically enables stateless mode for ruler and no series will be stored in the ruler's TSDB. See https://thanos.io/tip/components/rule.md/#query-api", false) - return rc -} diff --git a/cmd/thanos/rule.go b/cmd/thanos/rule.go index f76beb7662..65c93ffcf2 100644 --- a/cmd/thanos/rule.go +++ b/cmd/thanos/rule.go @@ -77,8 +77,7 @@ type ruleConfig struct { alertQueryURL *url.URL alertRelabelConfigYAML []byte - rwConfig ruleRWConfig - rwConfigYAML []byte + rwConfig *extflag.PathOrContent resendDelay time.Duration evalInterval time.Duration @@ -95,7 +94,6 @@ func (rc *ruleConfig) registerFlag(cmd extkingpin.FlagClause) { rc.shipper.registerFlag(cmd) rc.query.registerFlag(cmd) rc.alertmgr.registerFlag(cmd) - rc.rwConfig.registerFlag(cmd) } // registerRule registers a rule command. @@ -123,6 +121,8 @@ func registerRule(app *extkingpin.App) { cmd.Flag("eval-interval", "The default evaluation interval to use."). Default("30s").DurationVar(&conf.evalInterval) + conf.rwConfig = extflag.RegisterPathOrContent(cmd, "remote-write.config", "YAML config for the remote-write server where samples should be sent to. This automatically enables stateless mode for ruler and no series will be stored in the ruler's TSDB. If an empty config (or file) is provided, the flag is ignored and ruler is run with its own TSDB.", extflag.WithEnvSubstitution()) + reqLogDecision := cmd.Flag("log.request.decision", "Deprecation Warning - This flag would be soon deprecated, and replaced with `request.logging-config`. Request Logging for logging the start and end of requests. By default this flag is disabled. LogFinishCall: Logs the finish call of the requests. LogStartAndFinishCall: Logs the start and finish call of the requests. NoLogCall: Disable request logging.").Default("").Enum("NoLogCall", "LogFinishCall", "LogStartAndFinishCall", "") conf.objStoreConfig = extkingpin.RegisterCommonObjStoreFlags(cmd, "", false) @@ -170,14 +170,6 @@ func registerRule(app *extkingpin.App) { return errors.New("--query/--query.sd-files and --query.config* parameters cannot be defined at the same time") } - // Parse and check remote-write config and enable stateless mode for ruler. - if conf.rwConfig.configPath != nil { - conf.rwConfigYAML, err = conf.rwConfig.configPath.Content() - if err != nil { - return err - } - } - // Parse and check alerting configuration. conf.alertmgrsConfigYAML, err = conf.alertmgr.configPath.Content() if err != nil { @@ -339,16 +331,14 @@ func runRule( db *tsdb.DB ) - if conf.rwConfig.configPath != nil { - conf.rwConfigYAML, err = conf.rwConfig.configPath.Content() - if err != nil { - return err - } + rwCfgYAML, err := conf.rwConfig.Content() + if err != nil { + return err + } + + if len(rwCfgYAML) > 0 { var rwCfg remotewrite.Config - if len(conf.rwConfigYAML) == 0 { - return errors.New("no --remote-write.config was given") - } - rwCfg, err = remotewrite.LoadRemoteWriteConfig(conf.rwConfigYAML) + rwCfg, err = remotewrite.LoadRemoteWriteConfig(rwCfgYAML) if err != nil { return err } diff --git a/docs/components/rule.md b/docs/components/rule.md index 8e9da7773e..0b051a5d05 100644 --- a/docs/components/rule.md +++ b/docs/components/rule.md @@ -344,7 +344,24 @@ Flags: (repeatable). --query.sd-interval=5m Refresh interval to re-read file SD files. (used as a fallback) - --request.logging-config= + --remote-write.config= + Alternative to 'remote-write.config-file' flag + (mutually exclusive). Content of YAML config + for the remote-write server where samples + should be sent to. This automatically enables + stateless mode for ruler and no series will be + stored in the ruler's TSDB. If an empty config + (or file) is provided, the flag is ignored and + ruler is run with its own TSDB. + --remote-write.config-file= + Path to YAML config for the remote-write server + where samples should be sent to. This + automatically enables stateless mode for ruler + and no series will be stored in the ruler's + TSDB. If an empty config (or file) is provided, + the flag is ignored and ruler is run with its + own TSDB. + --request.logging-config= Alternative to 'request.logging-config-file' flag (mutually exclusive). Content of YAML file with request logging configuration. See format diff --git a/pkg/rules/remotewrite/remotewrite.go b/pkg/rules/remotewrite/remotewrite.go index 8e258fcff3..94e540f347 100644 --- a/pkg/rules/remotewrite/remotewrite.go +++ b/pkg/rules/remotewrite/remotewrite.go @@ -1,9 +1,10 @@ package remotewrite import ( - "fmt" "time" + "github.com/pkg/errors" + "github.com/go-kit/kit/log" "github.com/prometheus/client_golang/prometheus" "github.com/prometheus/prometheus/config" @@ -12,11 +13,13 @@ import ( "gopkg.in/yaml.v2" ) +// Config represents a remote write configuration for Thanos stateless ruler. type Config struct { Name string `yaml:"name"` RemoteStore *config.RemoteWriteConfig `yaml:"remote_write,omitempty"` } +// LoadRemoteWriteConfig prepares a Config instance from a given YAML config. func LoadRemoteWriteConfig(configYAML []byte) (Config, error) { var cfg Config if err := yaml.Unmarshal(configYAML, &cfg); err != nil { @@ -25,6 +28,8 @@ func LoadRemoteWriteConfig(configYAML []byte) (Config, error) { return cfg, nil } +// NewFanoutStorage creates a storage that fans-out to both the WAL and a configured remote storage. +// The remote storage tails the WAL and sends the metrics it reads using Prometheus' remote_write. func NewFanoutStorage(logger log.Logger, reg prometheus.Registerer, walDir string, rwConfig Config) (storage.Storage, error) { walStore, err := NewStorage(logger, reg, walDir) if err != nil { @@ -35,7 +40,7 @@ func NewFanoutStorage(logger log.Logger, reg prometheus.Registerer, walDir strin GlobalConfig: config.DefaultGlobalConfig, RemoteWriteConfigs: []*config.RemoteWriteConfig{rwConfig.RemoteStore}, }); err != nil { - return nil, fmt.Errorf("failed applying config to remote storage: %w", err) + return nil, errors.Wrap(err, "applying config to remote storage") } return storage.NewFanout(logger, walStore, remoteStore), nil } diff --git a/pkg/rules/remotewrite/util.go b/pkg/rules/remotewrite/util.go index b6ddb6b804..e9b6a573d0 100644 --- a/pkg/rules/remotewrite/util.go +++ b/pkg/rules/remotewrite/util.go @@ -104,6 +104,11 @@ func (c *walDataCollector) Append(samples []record.RefSample) bool { return true } +func (c *walDataCollector) AppendExemplars([]record.RefExemplar) bool { + // dummy implementation to make walDataCollector conform to the WriteTo interface + return true +} + func (c *walDataCollector) StoreSeries(series []record.RefSeries, _ int) { c.mut.Lock() defer c.mut.Unlock() diff --git a/scripts/quickstart.sh b/scripts/quickstart.sh index fb4395e9d2..779be02a71 100755 --- a/scripts/quickstart.sh +++ b/scripts/quickstart.sh @@ -254,8 +254,7 @@ if [ -n "${STATELESS_RULER_ENABLED}" ]; then name: "receive-0" EOF - REMOTE_WRITE_FLAGS="--remote-write --remote-write.config-file data/rule-remote-write.yaml - " + REMOTE_WRITE_FLAGS="--remote-write.config-file data/rule-remote-write.yaml" fi # Start Thanos Ruler. diff --git a/test/e2e/rule_test.go b/test/e2e/rule_test.go index cb227e0ec3..4b43ebda61 100644 --- a/test/e2e/rule_test.go +++ b/test/e2e/rule_test.go @@ -10,7 +10,6 @@ import ( "fmt" "io/ioutil" "net/http" - "net/url" "os" "path/filepath" "testing" @@ -97,6 +96,7 @@ groups: annotations: summary: "I always complain and I have been loaded via sighup signal." ` + amTimeout = model.Duration(10 * time.Second) ) type rulesResp struct { @@ -235,7 +235,7 @@ func TestRule(t *testing.T) { }, Scheme: "http", }, - Timeout: model.Duration(10 * time.Second), + Timeout: amTimeout, APIVersion: alert.APIv1, }, }, []httpconfig.Config{ @@ -446,7 +446,7 @@ func TestRule_CanRemoteWriteData(t *testing.T) { testRuleRecordAbsentMetric := ` groups: - name: example_record_rules - interval: 100ms + interval: 1s rules: - record: test_absent_metric expr: absent(nonexistent{job='thanos-receive'}) @@ -471,8 +471,8 @@ groups: receiver, err := e2ethanos.NewReceiver(s.SharedDir(), s.NetworkName(), "1", 1) testutil.Ok(t, err) testutil.Ok(t, s.StartAndWaitReady(receiver)) - rwURL, err := url.Parse(e2ethanos.RemoteWriteEndpoint(receiver.NetworkEndpoint(8081))) - testutil.Ok(t, err) + rwURL := mustURLParse(t, e2ethanos.RemoteWriteEndpoint(receiver.NetworkEndpoint(8081))) + querier, err := e2ethanos.NewQuerierBuilder(s.SharedDir(), "1", []string{receiver.GRPCNetworkEndpoint()}).Build() testutil.Ok(t, err) testutil.Ok(t, s.StartAndWaitReady(querier)) @@ -484,7 +484,7 @@ groups: }, Scheme: "http", }, - Timeout: model.Duration(time.Second), + Timeout: amTimeout, APIVersion: alert.APIv1, }, }, []query.Config{ From 6fb7d3b5d13a20983f29888e3b460df8d04fb60c Mon Sep 17 00:00:00 2001 From: Michael Okoko Date: Thu, 10 Jun 2021 14:07:53 +0100 Subject: [PATCH 12/26] Use promauto for prometheus primitives Signed-off-by: Michael Okoko --- pkg/rules/remotewrite/wal.go | 22 +++++++--------------- 1 file changed, 7 insertions(+), 15 deletions(-) diff --git a/pkg/rules/remotewrite/wal.go b/pkg/rules/remotewrite/wal.go index 6aad91b32f..25b9143567 100644 --- a/pkg/rules/remotewrite/wal.go +++ b/pkg/rules/remotewrite/wal.go @@ -1,5 +1,6 @@ // This is copied from https://github.com/grafana/agent/blob/a23bd5cf27c2ac99695b7449d38fb12444941a1c/pkg/prom/wal/wal.go // TODO(idoqo): Migrate to prometheus package when https://github.com/prometheus/prometheus/pull/8785 is ready. + package remotewrite import ( @@ -13,6 +14,7 @@ import ( "github.com/go-kit/kit/log/level" "github.com/pkg/errors" "github.com/prometheus/client_golang/prometheus" + "github.com/prometheus/client_golang/prometheus/promauto" "github.com/prometheus/prometheus/pkg/exemplar" "github.com/prometheus/prometheus/pkg/labels" "github.com/prometheus/prometheus/pkg/timestamp" @@ -39,41 +41,31 @@ type storageMetrics struct { func newStorageMetrics(r prometheus.Registerer) *storageMetrics { m := storageMetrics{r: r} - m.numActiveSeries = prometheus.NewGauge(prometheus.GaugeOpts{ + m.numActiveSeries = promauto.With(r).NewGauge(prometheus.GaugeOpts{ Name: "agent_wal_storage_active_series", Help: "Current number of active series being tracked by the WAL storage", }) - m.numDeletedSeries = prometheus.NewGauge(prometheus.GaugeOpts{ + m.numDeletedSeries = promauto.With(r).NewGauge(prometheus.GaugeOpts{ Name: "agent_wal_storage_deleted_series", Help: "Current number of series marked for deletion from memory", }) - m.totalCreatedSeries = prometheus.NewCounter(prometheus.CounterOpts{ + m.totalCreatedSeries = promauto.With(r).NewCounter(prometheus.CounterOpts{ Name: "agent_wal_storage_created_series_total", Help: "Total number of created series appended to the WAL", }) - m.totalRemovedSeries = prometheus.NewCounter(prometheus.CounterOpts{ + m.totalRemovedSeries = promauto.With(r).NewCounter(prometheus.CounterOpts{ Name: "agent_wal_storage_removed_series_total", Help: "Total number of created series removed from the WAL", }) - m.totalAppendedSamples = prometheus.NewCounter(prometheus.CounterOpts{ + m.totalAppendedSamples = promauto.With(r).NewCounter(prometheus.CounterOpts{ Name: "agent_wal_samples_appended_total", Help: "Total number of samples appended to the WAL", }) - if r != nil { - r.MustRegister( - m.numActiveSeries, - m.numDeletedSeries, - m.totalCreatedSeries, - m.totalRemovedSeries, - m.totalAppendedSamples, - ) - } - return &m } From 62ef47ca489672127552bfc9befab6400943342d Mon Sep 17 00:00:00 2001 From: Michael Okoko Date: Thu, 10 Jun 2021 14:58:45 +0100 Subject: [PATCH 13/26] Group imports and satisfy go-lint Signed-off-by: Michael Okoko --- cmd/thanos/rule.go | 5 ++--- pkg/rules/remotewrite/wal_test.go | 2 +- test/e2e/e2ethanos/services.go | 1 + test/e2e/rule_test.go | 1 + 4 files changed, 5 insertions(+), 4 deletions(-) diff --git a/cmd/thanos/rule.go b/cmd/thanos/rule.go index 65c93ffcf2..bc5a9b055b 100644 --- a/cmd/thanos/rule.go +++ b/cmd/thanos/rule.go @@ -15,9 +15,6 @@ import ( "strings" "time" - "github.com/prometheus/prometheus/storage" - "github.com/thanos-io/thanos/pkg/rules/remotewrite" - "github.com/go-kit/kit/log" "github.com/go-kit/kit/log/level" grpc_logging "github.com/grpc-ecosystem/go-grpc-middleware/v2/interceptors/logging" @@ -33,6 +30,7 @@ import ( "github.com/prometheus/prometheus/pkg/relabel" "github.com/prometheus/prometheus/promql" "github.com/prometheus/prometheus/rules" + "github.com/prometheus/prometheus/storage" "github.com/prometheus/prometheus/tsdb" "github.com/prometheus/prometheus/util/strutil" "github.com/thanos-io/thanos/pkg/errutil" @@ -52,6 +50,7 @@ import ( "github.com/thanos-io/thanos/pkg/prober" "github.com/thanos-io/thanos/pkg/promclient" thanosrules "github.com/thanos-io/thanos/pkg/rules" + "github.com/thanos-io/thanos/pkg/rules/remotewrite" "github.com/thanos-io/thanos/pkg/runutil" grpcserver "github.com/thanos-io/thanos/pkg/server/grpc" httpserver "github.com/thanos-io/thanos/pkg/server/http" diff --git a/pkg/rules/remotewrite/wal_test.go b/pkg/rules/remotewrite/wal_test.go index d1f9eb684a..555b71bb28 100644 --- a/pkg/rules/remotewrite/wal_test.go +++ b/pkg/rules/remotewrite/wal_test.go @@ -348,7 +348,7 @@ func (s seriesList) SeriesNames() []string { return names } -// ExpectedSamples returns the list of expected samples, sorted by ref ID and timestamp +// ExpectedSamples returns the list of expected samples, sorted by ref ID and timestamp. func (s seriesList) ExpectedSamples() []record.RefSample { expect := []record.RefSample{} for _, series := range s { diff --git a/test/e2e/e2ethanos/services.go b/test/e2e/e2ethanos/services.go index 668b92e952..796b958923 100644 --- a/test/e2e/e2ethanos/services.go +++ b/test/e2e/e2ethanos/services.go @@ -27,6 +27,7 @@ import ( "github.com/thanos-io/thanos/pkg/objstore/client" "github.com/thanos-io/thanos/pkg/queryfrontend" "github.com/thanos-io/thanos/pkg/receive" + "github.com/thanos-io/thanos/pkg/rules/remotewrite" ) const ( diff --git a/test/e2e/rule_test.go b/test/e2e/rule_test.go index 4b43ebda61..fe6d28ffc8 100644 --- a/test/e2e/rule_test.go +++ b/test/e2e/rule_test.go @@ -17,6 +17,7 @@ import ( "github.com/efficientgo/e2e" "github.com/prometheus/common/model" + "github.com/prometheus/prometheus/config" "github.com/prometheus/prometheus/discovery/targetgroup" "github.com/thanos-io/thanos/pkg/httpconfig" "gopkg.in/yaml.v2" From 205497170e86b1b7b9452e9540b2ac7f2288abf3 Mon Sep 17 00:00:00 2001 From: Michael Okoko Date: Tue, 6 Jul 2021 01:32:59 +0100 Subject: [PATCH 14/26] Always return empty series set from WAL storage Signed-off-by: Michael Okoko --- pkg/rules/remotewrite/wal.go | 2 +- test/e2e/rule_test.go | 26 ++++++++++++++------------ 2 files changed, 15 insertions(+), 13 deletions(-) diff --git a/pkg/rules/remotewrite/wal.go b/pkg/rules/remotewrite/wal.go index 25b9143567..eaf08f99eb 100644 --- a/pkg/rules/remotewrite/wal.go +++ b/pkg/rules/remotewrite/wal.go @@ -690,5 +690,5 @@ func (r *remoteWriteQueryable) Close() error { } func (r *remoteWriteQueryable) Select(sortSeries bool, hints *storage.SelectHints, matchers ...*labels.Matcher) storage.SeriesSet { - return nil + return storage.EmptySeriesSet() } diff --git a/test/e2e/rule_test.go b/test/e2e/rule_test.go index fe6d28ffc8..5ecaf77ce9 100644 --- a/test/e2e/rule_test.go +++ b/test/e2e/rule_test.go @@ -96,6 +96,14 @@ groups: severity: page annotations: summary: "I always complain and I have been loaded via sighup signal." +` + testRuleRecordAbsentMetric = ` +groups: +- name: example_record_rules + interval: 1s + rules: + - record: test_absent_metric + expr: absent(nonexistent{job='thanos-receive'}) ` amTimeout = model.Duration(10 * time.Second) ) @@ -444,15 +452,6 @@ func TestRule(t *testing.T) { func TestRule_CanRemoteWriteData(t *testing.T) { t.Parallel() - testRuleRecordAbsentMetric := ` -groups: -- name: example_record_rules - interval: 1s - rules: - - record: test_absent_metric - expr: absent(nonexistent{job='thanos-receive'}) -` - s, err := e2e.NewScenario("e2e_test_rule_remote_write") testutil.Ok(t, err) t.Cleanup(e2ethanos.CleanScenario(t, s)) @@ -463,13 +462,16 @@ groups: rulesSubDir := "rules" rulesPath := filepath.Join(s.SharedDir(), rulesSubDir) testutil.Ok(t, os.MkdirAll(rulesPath, os.ModePerm)) - createRuleFile(t, filepath.Join(rulesPath, "rules-0.yaml"), testRuleRecordAbsentMetric) + + for i, rule := range []string{testRuleRecordAbsentMetric, testAlertRuleWarnOnPartialResponse} { + createRuleFile(t, filepath.Join(rulesPath, fmt.Sprintf("rules-%d.yaml", i)), rule) + } am, err := e2ethanos.NewAlertmanager(s.SharedDir(), "1") testutil.Ok(t, err) testutil.Ok(t, s.StartAndWaitReady(am)) - receiver, err := e2ethanos.NewReceiver(s.SharedDir(), s.NetworkName(), "1", 1) + receiver, err := e2ethanos.NewIngestingReceiver(s.SharedDir(), s.NetworkName()) testutil.Ok(t, err) testutil.Ok(t, s.StartAndWaitReady(receiver)) rwURL := mustURLParse(t, e2ethanos.RemoteWriteEndpoint(receiver.NetworkEndpoint(8081))) @@ -515,7 +517,7 @@ groups: { "__name__": "test_absent_metric", "job": "thanos-receive", - "receive": "1", + "receive": "e2e_test_rule_remote_write", "tenant_id": "default-tenant", }, }) From 3d5413d87ca6a117229e74be0d5cd7abd459e1a4 Mon Sep 17 00:00:00 2001 From: Michael Okoko Date: Wed, 1 Sep 2021 23:29:32 +0100 Subject: [PATCH 15/26] re-generate rule documentation Signed-off-by: Michael Okoko --- docs/components/rule.md | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/docs/components/rule.md b/docs/components/rule.md index 0b051a5d05..6518674766 100644 --- a/docs/components/rule.md +++ b/docs/components/rule.md @@ -344,7 +344,7 @@ Flags: (repeatable). --query.sd-interval=5m Refresh interval to re-read file SD files. (used as a fallback) - --remote-write.config= + --remote-write.config= Alternative to 'remote-write.config-file' flag (mutually exclusive). Content of YAML config for the remote-write server where samples @@ -353,7 +353,7 @@ Flags: stored in the ruler's TSDB. If an empty config (or file) is provided, the flag is ignored and ruler is run with its own TSDB. - --remote-write.config-file= + --remote-write.config-file= Path to YAML config for the remote-write server where samples should be sent to. This automatically enables stateless mode for ruler @@ -361,7 +361,7 @@ Flags: TSDB. If an empty config (or file) is provided, the flag is ignored and ruler is run with its own TSDB. - --request.logging-config= + --request.logging-config= Alternative to 'request.logging-config-file' flag (mutually exclusive). Content of YAML file with request logging configuration. See format From 27f498417d2e56d0fd33ed4461729e50e7ab8aab Mon Sep 17 00:00:00 2001 From: Michael Okoko Date: Wed, 1 Sep 2021 23:41:34 +0100 Subject: [PATCH 16/26] copyright headers to satisfy golint Signed-off-by: Michael Okoko --- pkg/rules/remotewrite/remotewrite.go | 3 +++ pkg/rules/remotewrite/series.go | 3 +++ pkg/rules/remotewrite/util.go | 3 +++ pkg/rules/remotewrite/wal.go | 3 +++ pkg/rules/remotewrite/wal_test.go | 3 +++ 5 files changed, 15 insertions(+) diff --git a/pkg/rules/remotewrite/remotewrite.go b/pkg/rules/remotewrite/remotewrite.go index 94e540f347..13cf6378a4 100644 --- a/pkg/rules/remotewrite/remotewrite.go +++ b/pkg/rules/remotewrite/remotewrite.go @@ -1,3 +1,6 @@ +// Copyright (c) The Thanos Authors. +// Licensed under the Apache License 2.0. + package remotewrite import ( diff --git a/pkg/rules/remotewrite/series.go b/pkg/rules/remotewrite/series.go index d82a31e1a2..02063d8bfc 100644 --- a/pkg/rules/remotewrite/series.go +++ b/pkg/rules/remotewrite/series.go @@ -1,3 +1,6 @@ +// Copyright (c) The Thanos Authors. +// Licensed under the Apache License 2.0. + // This is copied from https://github.com/grafana/agent/blob/a23bd5cf27c2ac99695b7449d38fb12444941a1c/pkg/prom/wal/series.go // TODO(idoqo): Migrate to prometheus package when https://github.com/prometheus/prometheus/pull/8785 is ready. package remotewrite diff --git a/pkg/rules/remotewrite/util.go b/pkg/rules/remotewrite/util.go index e9b6a573d0..bb3b2ec7c6 100644 --- a/pkg/rules/remotewrite/util.go +++ b/pkg/rules/remotewrite/util.go @@ -1,3 +1,6 @@ +// Copyright (c) The Thanos Authors. +// Licensed under the Apache License 2.0. + // This is copied from https://github.com/grafana/agent/blob/a23bd5cf27c2ac99695b7449d38fb12444941a1c/pkg/prom/wal/util.go // TODO(idoqo): Migrate to prometheus package when https://github.com/prometheus/prometheus/pull/8785 is ready. package remotewrite diff --git a/pkg/rules/remotewrite/wal.go b/pkg/rules/remotewrite/wal.go index eaf08f99eb..bcd7fb409e 100644 --- a/pkg/rules/remotewrite/wal.go +++ b/pkg/rules/remotewrite/wal.go @@ -1,3 +1,6 @@ +// Copyright (c) The Thanos Authors. +// Licensed under the Apache License 2.0. + // This is copied from https://github.com/grafana/agent/blob/a23bd5cf27c2ac99695b7449d38fb12444941a1c/pkg/prom/wal/wal.go // TODO(idoqo): Migrate to prometheus package when https://github.com/prometheus/prometheus/pull/8785 is ready. diff --git a/pkg/rules/remotewrite/wal_test.go b/pkg/rules/remotewrite/wal_test.go index 555b71bb28..1843284081 100644 --- a/pkg/rules/remotewrite/wal_test.go +++ b/pkg/rules/remotewrite/wal_test.go @@ -1,3 +1,6 @@ +// Copyright (c) The Thanos Authors. +// Licensed under the Apache License 2.0. + // This is copied from https://github.com/grafana/agent/blob/a23bd5cf27c2ac99695b7449d38fb12444941a1c/pkg/prom/wal/wal_test.go // TODO(idoqo): Migrate to prometheus package when https://github.com/prometheus/prometheus/pull/8785 is ready. package remotewrite From 1c82a78dba821782096d1924b0d23eb06bc093d0 Mon Sep 17 00:00:00 2001 From: Michael Okoko Date: Fri, 17 Sep 2021 04:19:37 +0100 Subject: [PATCH 17/26] Rename wal storage metrics Signed-off-by: Michael Okoko --- pkg/rules/remotewrite/wal.go | 12 ++++++------ test/e2e/rule_test.go | 2 +- 2 files changed, 7 insertions(+), 7 deletions(-) diff --git a/pkg/rules/remotewrite/wal.go b/pkg/rules/remotewrite/wal.go index bcd7fb409e..bf2c0f033a 100644 --- a/pkg/rules/remotewrite/wal.go +++ b/pkg/rules/remotewrite/wal.go @@ -45,27 +45,27 @@ type storageMetrics struct { func newStorageMetrics(r prometheus.Registerer) *storageMetrics { m := storageMetrics{r: r} m.numActiveSeries = promauto.With(r).NewGauge(prometheus.GaugeOpts{ - Name: "agent_wal_storage_active_series", + Name: "thanos_wal_storage_active_series", Help: "Current number of active series being tracked by the WAL storage", }) m.numDeletedSeries = promauto.With(r).NewGauge(prometheus.GaugeOpts{ - Name: "agent_wal_storage_deleted_series", + Name: "thanos_wal_storage_deleted_series", Help: "Current number of series marked for deletion from memory", }) m.totalCreatedSeries = promauto.With(r).NewCounter(prometheus.CounterOpts{ - Name: "agent_wal_storage_created_series_total", + Name: "thanos_wal_storage_created_series_total", Help: "Total number of created series appended to the WAL", }) m.totalRemovedSeries = promauto.With(r).NewCounter(prometheus.CounterOpts{ - Name: "agent_wal_storage_removed_series_total", + Name: "thanos_wal_storage_removed_series_total", Help: "Total number of created series removed from the WAL", }) m.totalAppendedSamples = promauto.With(r).NewCounter(prometheus.CounterOpts{ - Name: "agent_wal_samples_appended_total", + Name: "thanos_wal_samples_appended_total", Help: "Total number of samples appended to the WAL", }) @@ -684,7 +684,7 @@ func (r *remoteWriteQueryable) LabelValues(name string, matchers ...*labels.Matc return nil, nil, nil } -func (r *remoteWriteQueryable) LabelNames() ([]string, storage.Warnings, error) { +func (r *remoteWriteQueryable) LabelNames(matchers ...*labels.Matcher) ([]string, storage.Warnings, error) { return nil, nil, nil } diff --git a/test/e2e/rule_test.go b/test/e2e/rule_test.go index 5ecaf77ce9..adfc516805 100644 --- a/test/e2e/rule_test.go +++ b/test/e2e/rule_test.go @@ -476,7 +476,7 @@ func TestRule_CanRemoteWriteData(t *testing.T) { testutil.Ok(t, s.StartAndWaitReady(receiver)) rwURL := mustURLParse(t, e2ethanos.RemoteWriteEndpoint(receiver.NetworkEndpoint(8081))) - querier, err := e2ethanos.NewQuerierBuilder(s.SharedDir(), "1", []string{receiver.GRPCNetworkEndpoint()}).Build() + querier, err := e2ethanos.NewQuerierBuilder(s.SharedDir(), "1", receiver.GRPCNetworkEndpoint()).Build() testutil.Ok(t, err) testutil.Ok(t, s.StartAndWaitReady(querier)) r, err := e2ethanos.NewStatelessRuler(s.SharedDir(), "1", rulesSubDir, []alert.AlertmanagerConfig{ From 760be9215811c1c1884301a3a97d3fd19d3e0b29 Mon Sep 17 00:00:00 2001 From: Michael Okoko Date: Fri, 17 Sep 2021 12:05:41 +0100 Subject: [PATCH 18/26] Use Prometheus' remote write config instead of rolling another Signed-off-by: Michael Okoko --- cmd/thanos/rule.go | 12 +++++++----- docs/components/rule.md | 25 ++++++++++++++----------- pkg/rules/remotewrite/remotewrite.go | 16 +++++----------- test/e2e/e2ethanos/services.go | 2 +- test/e2e/rule_test.go | 13 +++++++------ 5 files changed, 34 insertions(+), 34 deletions(-) diff --git a/cmd/thanos/rule.go b/cmd/thanos/rule.go index bc5a9b055b..467b81a827 100644 --- a/cmd/thanos/rule.go +++ b/cmd/thanos/rule.go @@ -26,6 +26,7 @@ import ( "github.com/prometheus/client_golang/prometheus/promauto" "github.com/prometheus/common/model" "github.com/prometheus/common/route" + "github.com/prometheus/prometheus/config" "github.com/prometheus/prometheus/pkg/labels" "github.com/prometheus/prometheus/pkg/relabel" "github.com/prometheus/prometheus/promql" @@ -120,7 +121,7 @@ func registerRule(app *extkingpin.App) { cmd.Flag("eval-interval", "The default evaluation interval to use."). Default("30s").DurationVar(&conf.evalInterval) - conf.rwConfig = extflag.RegisterPathOrContent(cmd, "remote-write.config", "YAML config for the remote-write server where samples should be sent to. This automatically enables stateless mode for ruler and no series will be stored in the ruler's TSDB. If an empty config (or file) is provided, the flag is ignored and ruler is run with its own TSDB.", extflag.WithEnvSubstitution()) + conf.rwConfig = extflag.RegisterPathOrContent(cmd, "remote-write.config", "YAML config for the remote-write server where samples should be sent to (see https://prometheus.io/docs/prometheus/latest/configuration/configuration/#remote_write). This automatically enables stateless mode for ruler and no series will be stored in the ruler's TSDB. If an empty config (or file) is provided, the flag is ignored and ruler is run with its own TSDB.", extflag.WithEnvSubstitution()) reqLogDecision := cmd.Flag("log.request.decision", "Deprecation Warning - This flag would be soon deprecated, and replaced with `request.logging-config`. Request Logging for logging the start and end of requests. By default this flag is disabled. LogFinishCall: Logs the finish call of the requests. LogStartAndFinishCall: Logs the start and finish call of the requests. NoLogCall: Disable request logging.").Default("").Enum("NoLogCall", "LogFinishCall", "LogStartAndFinishCall", "") @@ -336,20 +337,20 @@ func runRule( } if len(rwCfgYAML) > 0 { - var rwCfg remotewrite.Config + var rwCfg config.RemoteWriteConfig rwCfg, err = remotewrite.LoadRemoteWriteConfig(rwCfgYAML) if err != nil { return err } walDir := filepath.Join(conf.dataDir, rwCfg.Name) - remoteStore, err := remotewrite.NewFanoutStorage(logger, reg, walDir, rwCfg) + remoteStore, err := remotewrite.NewFanoutStorage(logger, reg, walDir, &rwCfg) if err != nil { return errors.Wrap(err, "set up remote-write store for ruler") } appendable = remoteStore queryable = remoteStore } else { - db, err := tsdb.Open(conf.dataDir, log.With(logger, "component", "tsdb"), reg, tsdbOpts, nil) + db, err = tsdb.Open(conf.dataDir, log.With(logger, "component", "tsdb"), reg, tsdbOpts, nil) if err != nil { return errors.Wrap(err, "open TSDB") } @@ -554,7 +555,7 @@ func runRule( ) // Start gRPC server. - { + if db != nil { tsdbStore := store.NewTSDBStore(logger, db, component.Rule, conf.lset) tlsCfg, err := tls.NewServerConfig(log.With(logger, "protocol", "gRPC"), conf.grpc.tlsSrvCert, conf.grpc.tlsSrvKey, conf.grpc.tlsSrvClientCA) @@ -579,6 +580,7 @@ func runRule( s.Shutdown(err) }) } + // Start UI & metrics HTTP server. { router := route.New() diff --git a/docs/components/rule.md b/docs/components/rule.md index 6518674766..b13470a22f 100644 --- a/docs/components/rule.md +++ b/docs/components/rule.md @@ -348,19 +348,22 @@ Flags: Alternative to 'remote-write.config-file' flag (mutually exclusive). Content of YAML config for the remote-write server where samples - should be sent to. This automatically enables - stateless mode for ruler and no series will be - stored in the ruler's TSDB. If an empty config - (or file) is provided, the flag is ignored and - ruler is run with its own TSDB. + should be sent to (see + https://prometheus.io/docs/prometheus/latest/configuration/configuration/#remote_write). + This automatically enables stateless mode for + ruler and no series will be stored in the + ruler's TSDB. If an empty config (or file) is + provided, the flag is ignored and ruler is run + with its own TSDB. --remote-write.config-file= Path to YAML config for the remote-write server - where samples should be sent to. This - automatically enables stateless mode for ruler - and no series will be stored in the ruler's - TSDB. If an empty config (or file) is provided, - the flag is ignored and ruler is run with its - own TSDB. + where samples should be sent to (see + https://prometheus.io/docs/prometheus/latest/configuration/configuration/#remote_write). + This automatically enables stateless mode for + ruler and no series will be stored in the + ruler's TSDB. If an empty config (or file) is + provided, the flag is ignored and ruler is run + with its own TSDB. --request.logging-config= Alternative to 'request.logging-config-file' flag (mutually exclusive). Content of YAML file diff --git a/pkg/rules/remotewrite/remotewrite.go b/pkg/rules/remotewrite/remotewrite.go index 13cf6378a4..9f1f5f9f46 100644 --- a/pkg/rules/remotewrite/remotewrite.go +++ b/pkg/rules/remotewrite/remotewrite.go @@ -16,15 +16,9 @@ import ( "gopkg.in/yaml.v2" ) -// Config represents a remote write configuration for Thanos stateless ruler. -type Config struct { - Name string `yaml:"name"` - RemoteStore *config.RemoteWriteConfig `yaml:"remote_write,omitempty"` -} - -// LoadRemoteWriteConfig prepares a Config instance from a given YAML config. -func LoadRemoteWriteConfig(configYAML []byte) (Config, error) { - var cfg Config +// LoadRemoteWriteConfig prepares a RemoteWriteConfig instance from a given YAML config. +func LoadRemoteWriteConfig(configYAML []byte) (config.RemoteWriteConfig, error) { + var cfg config.RemoteWriteConfig if err := yaml.Unmarshal(configYAML, &cfg); err != nil { return cfg, err } @@ -33,7 +27,7 @@ func LoadRemoteWriteConfig(configYAML []byte) (Config, error) { // NewFanoutStorage creates a storage that fans-out to both the WAL and a configured remote storage. // The remote storage tails the WAL and sends the metrics it reads using Prometheus' remote_write. -func NewFanoutStorage(logger log.Logger, reg prometheus.Registerer, walDir string, rwConfig Config) (storage.Storage, error) { +func NewFanoutStorage(logger log.Logger, reg prometheus.Registerer, walDir string, rwConfig *config.RemoteWriteConfig) (storage.Storage, error) { walStore, err := NewStorage(logger, reg, walDir) if err != nil { return nil, err @@ -41,7 +35,7 @@ func NewFanoutStorage(logger log.Logger, reg prometheus.Registerer, walDir strin remoteStore := remote.NewStorage(logger, reg, walStore.StartTime, walStore.Directory(), 1*time.Minute, nil) if err := remoteStore.ApplyConfig(&config.Config{ GlobalConfig: config.DefaultGlobalConfig, - RemoteWriteConfigs: []*config.RemoteWriteConfig{rwConfig.RemoteStore}, + RemoteWriteConfigs: []*config.RemoteWriteConfig{rwConfig}, }); err != nil { return nil, errors.Wrap(err, "applying config to remote storage") } diff --git a/test/e2e/e2ethanos/services.go b/test/e2e/e2ethanos/services.go index 796b958923..6fd0efde65 100644 --- a/test/e2e/e2ethanos/services.go +++ b/test/e2e/e2ethanos/services.go @@ -18,6 +18,7 @@ import ( "github.com/efficientgo/tools/core/pkg/backoff" "github.com/pkg/errors" "github.com/prometheus/common/model" + "github.com/prometheus/prometheus/config" "github.com/prometheus/prometheus/discovery/targetgroup" "github.com/prometheus/prometheus/pkg/relabel" "github.com/thanos-io/thanos/pkg/httpconfig" @@ -27,7 +28,6 @@ import ( "github.com/thanos-io/thanos/pkg/objstore/client" "github.com/thanos-io/thanos/pkg/queryfrontend" "github.com/thanos-io/thanos/pkg/receive" - "github.com/thanos-io/thanos/pkg/rules/remotewrite" ) const ( diff --git a/test/e2e/rule_test.go b/test/e2e/rule_test.go index adfc516805..797d405b2c 100644 --- a/test/e2e/rule_test.go +++ b/test/e2e/rule_test.go @@ -24,6 +24,10 @@ import ( "github.com/thanos-io/thanos/pkg/alert" "github.com/thanos-io/thanos/pkg/promclient" +<<<<<<< HEAD +======= + "github.com/thanos-io/thanos/pkg/query" +>>>>>>> 6b0612ca (Use Prometheus' remote write config instead of rolling another) "github.com/thanos-io/thanos/pkg/rules/rulespb" "github.com/thanos-io/thanos/pkg/testutil" "github.com/thanos-io/thanos/test/e2e/e2ethanos" @@ -499,12 +503,9 @@ func TestRule_CanRemoteWriteData(t *testing.T) { Scheme: "http", }, }, - }, &remotewrite.Config{ - Name: "ruler-rw-receivers", - RemoteStore: &config.RemoteWriteConfig{ - URL: &common_cfg.URL{URL: rwURL}, - Name: "thanos-receiver", - }, + }, &config.RemoteWriteConfig{ + URL: &common_cfg.URL{URL: rwURL}, + Name: "thanos-receiver", }) testutil.Ok(t, err) testutil.Ok(t, s.StartAndWaitReady(r)) From 45e9f99b40f952ba5b58991a49dcea9215be5cdb Mon Sep 17 00:00:00 2001 From: Ben Ye Date: Sun, 19 Sep 2021 17:50:50 -0700 Subject: [PATCH 19/26] Fix E2E tests Signed-off-by: Ben Ye --- cmd/thanos/rule.go | 44 ++++++++++++++--------------- test/e2e/e2ethanos/services.go | 51 ++++++++++++++++++++++------------ test/e2e/rule_test.go | 45 ++++++++++++++---------------- test/e2e/rules_api_test.go | 4 +-- 4 files changed, 79 insertions(+), 65 deletions(-) diff --git a/cmd/thanos/rule.go b/cmd/thanos/rule.go index 467b81a827..84e93fd782 100644 --- a/cmd/thanos/rule.go +++ b/cmd/thanos/rule.go @@ -555,31 +555,31 @@ func runRule( ) // Start gRPC server. + tlsCfg, err := tls.NewServerConfig(log.With(logger, "protocol", "gRPC"), conf.grpc.tlsSrvCert, conf.grpc.tlsSrvKey, conf.grpc.tlsSrvClientCA) + if err != nil { + return errors.Wrap(err, "setup gRPC server") + } + + options := []grpcserver.Option{ + grpcserver.WithServer(thanosrules.RegisterRulesServer(ruleMgr)), + grpcserver.WithListen(conf.grpc.bindAddress), + grpcserver.WithGracePeriod(time.Duration(conf.grpc.gracePeriod)), + grpcserver.WithTLSConfig(tlsCfg), + } if db != nil { tsdbStore := store.NewTSDBStore(logger, db, component.Rule, conf.lset) - - tlsCfg, err := tls.NewServerConfig(log.With(logger, "protocol", "gRPC"), conf.grpc.tlsSrvCert, conf.grpc.tlsSrvKey, conf.grpc.tlsSrvClientCA) - if err != nil { - return errors.Wrap(err, "setup gRPC server") - } - - // TODO: Add rules API implementation when ready. - s := grpcserver.New(logger, reg, tracer, grpcLogOpts, tagOpts, comp, grpcProbe, - grpcserver.WithServer(store.RegisterStoreServer(tsdbStore)), - grpcserver.WithServer(thanosrules.RegisterRulesServer(ruleMgr)), - grpcserver.WithListen(conf.grpc.bindAddress), - grpcserver.WithGracePeriod(time.Duration(conf.grpc.gracePeriod)), - grpcserver.WithTLSConfig(tlsCfg), - ) - - g.Add(func() error { - statusProber.Ready() - return s.ListenAndServe() - }, func(err error) { - statusProber.NotReady(err) - s.Shutdown(err) - }) + options = append(options, grpcserver.WithServer(store.RegisterStoreServer(tsdbStore))) } + // TODO: Add rules API implementation when ready. + s := grpcserver.New(logger, reg, tracer, grpcLogOpts, tagOpts, comp, grpcProbe, options...) + + g.Add(func() error { + statusProber.Ready() + return s.ListenAndServe() + }, func(err error) { + statusProber.NotReady(err) + s.Shutdown(err) + }) // Start UI & metrics HTTP server. { diff --git a/test/e2e/e2ethanos/services.go b/test/e2e/e2ethanos/services.go index 6fd0efde65..c6fe42d475 100644 --- a/test/e2e/e2ethanos/services.go +++ b/test/e2e/e2ethanos/services.go @@ -512,7 +512,15 @@ func NewIngestingReceiver(e e2e.Environment, name string) (*e2e.InstrumentedRunn return receiver, nil } -func NewRuler(e e2e.Environment, name, ruleSubDir string, amCfg []alert.AlertmanagerConfig, queryCfg []httpconfig.Config) (*e2e.InstrumentedRunnable, error) { +func NewTSDBRuler(e e2e.Environment, name, ruleSubDir string, amCfg []alert.AlertmanagerConfig, queryCfg []httpconfig.Config) (*e2e.InstrumentedRunnable, error) { + return newRuler(e, name, ruleSubDir, amCfg, queryCfg, nil) +} + +func NewStatelessRuler(e e2e.Environment, name, ruleSubDir string, amCfg []alert.AlertmanagerConfig, queryCfg []httpconfig.Config, remoteWriteCfg *config.RemoteWriteConfig) (*e2e.InstrumentedRunnable, error) { + return newRuler(e, name, ruleSubDir, amCfg, queryCfg, remoteWriteCfg) +} + +func newRuler(e e2e.Environment, name, ruleSubDir string, amCfg []alert.AlertmanagerConfig, queryCfg []httpconfig.Config, remoteWriteCfg *config.RemoteWriteConfig) (*e2e.InstrumentedRunnable, error) { dir := filepath.Join(e.SharedDir(), "data", "rule", name) container := filepath.Join(ContainerSharedDir, "data", "rule", name) @@ -532,25 +540,34 @@ func NewRuler(e e2e.Environment, name, ruleSubDir string, amCfg []alert.Alertman return nil, errors.Wrapf(err, "generate query file: %v", queryCfg) } + ruleArgs := map[string]string{ + "--debug.name": fmt.Sprintf("rule-%v", name), + "--grpc-address": ":9091", + "--grpc-grace-period": "0s", + "--http-address": ":8080", + "--label": fmt.Sprintf(`replica="%s"`, name), + "--data-dir": container, + "--rule-file": filepath.Join(ContainerSharedDir, ruleSubDir, "*.yaml"), + "--eval-interval": "1s", + "--alertmanagers.config": string(amCfgBytes), + "--alertmanagers.sd-dns-interval": "1s", + "--log.level": infoLogLevel, + "--query.config": string(queryCfgBytes), + "--query.sd-dns-interval": "1s", + "--resend-delay": "5s", + } + if remoteWriteCfg != nil { + rwCfgBytes, err := yaml.Marshal(remoteWriteCfg) + if err != nil { + return nil, errors.Wrapf(err, "generate remote write config: %v", remoteWriteCfg) + } + ruleArgs["--remote-write.config"] = string(rwCfgBytes) + } + ruler := NewService(e, fmt.Sprintf("rule-%v", name), DefaultImage(), - e2e.NewCommand("rule", e2e.BuildArgs(map[string]string{ - "--debug.name": fmt.Sprintf("rule-%v", name), - "--grpc-address": ":9091", - "--grpc-grace-period": "0s", - "--http-address": ":8080", - "--label": fmt.Sprintf(`replica="%s"`, name), - "--data-dir": container, - "--rule-file": filepath.Join(ContainerSharedDir, ruleSubDir, "*.yaml"), - "--eval-interval": "1s", - "--alertmanagers.config": string(amCfgBytes), - "--alertmanagers.sd-dns-interval": "1s", - "--log.level": infoLogLevel, - "--query.config": string(queryCfgBytes), - "--query.sd-dns-interval": "1s", - "--resend-delay": "5s", - })...), + e2e.NewCommand("rule", e2e.BuildArgs(ruleArgs)...), e2e.NewHTTPReadinessProbe("http", "/-/ready", 200, 200), 8080, 9091, diff --git a/test/e2e/rule_test.go b/test/e2e/rule_test.go index 797d405b2c..1c46595dbe 100644 --- a/test/e2e/rule_test.go +++ b/test/e2e/rule_test.go @@ -16,18 +16,15 @@ import ( "time" "github.com/efficientgo/e2e" + common_cfg "github.com/prometheus/common/config" "github.com/prometheus/common/model" "github.com/prometheus/prometheus/config" "github.com/prometheus/prometheus/discovery/targetgroup" - "github.com/thanos-io/thanos/pkg/httpconfig" "gopkg.in/yaml.v2" "github.com/thanos-io/thanos/pkg/alert" + "github.com/thanos-io/thanos/pkg/httpconfig" "github.com/thanos-io/thanos/pkg/promclient" -<<<<<<< HEAD -======= - "github.com/thanos-io/thanos/pkg/query" ->>>>>>> 6b0612ca (Use Prometheus' remote write config instead of rolling another) "github.com/thanos-io/thanos/pkg/rules/rulespb" "github.com/thanos-io/thanos/pkg/testutil" "github.com/thanos-io/thanos/test/e2e/e2ethanos" @@ -233,7 +230,7 @@ func TestRule(t *testing.T) { testutil.Ok(t, err) testutil.Ok(t, e2e.StartAndWaitReady(am1, am2)) - r, err := e2ethanos.NewRuler(e, "1", rulesSubDir, []alert.AlertmanagerConfig{ + r, err := e2ethanos.NewTSDBRuler(e, "1", rulesSubDir, []alert.AlertmanagerConfig{ { EndpointsConfig: httpconfig.EndpointsConfig{ FileSDConfigs: []httpconfig.FileSDConfig{ @@ -456,49 +453,49 @@ func TestRule(t *testing.T) { func TestRule_CanRemoteWriteData(t *testing.T) { t.Parallel() - s, err := e2e.NewScenario("e2e_test_rule_remote_write") + e, err := e2e.NewDockerEnvironment("e2e_test_rule_remote_write") testutil.Ok(t, err) - t.Cleanup(e2ethanos.CleanScenario(t, s)) + t.Cleanup(e2ethanos.CleanScenario(t, e)) ctx, cancel := context.WithTimeout(context.Background(), 3*time.Minute) t.Cleanup(cancel) rulesSubDir := "rules" - rulesPath := filepath.Join(s.SharedDir(), rulesSubDir) + rulesPath := filepath.Join(e.SharedDir(), rulesSubDir) testutil.Ok(t, os.MkdirAll(rulesPath, os.ModePerm)) for i, rule := range []string{testRuleRecordAbsentMetric, testAlertRuleWarnOnPartialResponse} { createRuleFile(t, filepath.Join(rulesPath, fmt.Sprintf("rules-%d.yaml", i)), rule) } - am, err := e2ethanos.NewAlertmanager(s.SharedDir(), "1") + am, err := e2ethanos.NewAlertmanager(e, "1") testutil.Ok(t, err) - testutil.Ok(t, s.StartAndWaitReady(am)) + testutil.Ok(t, e2e.StartAndWaitReady(am)) - receiver, err := e2ethanos.NewIngestingReceiver(s.SharedDir(), s.NetworkName()) + receiver, err := e2ethanos.NewIngestingReceiver(e, "1") testutil.Ok(t, err) - testutil.Ok(t, s.StartAndWaitReady(receiver)) - rwURL := mustURLParse(t, e2ethanos.RemoteWriteEndpoint(receiver.NetworkEndpoint(8081))) + testutil.Ok(t, e2e.StartAndWaitReady(receiver)) + rwURL := mustURLParse(t, e2ethanos.RemoteWriteEndpoint(receiver.Endpoint("remote-write"))) - querier, err := e2ethanos.NewQuerierBuilder(s.SharedDir(), "1", receiver.GRPCNetworkEndpoint()).Build() + q, err := e2ethanos.NewQuerierBuilder(e, "1", receiver.Endpoint("grpc")).Build() testutil.Ok(t, err) - testutil.Ok(t, s.StartAndWaitReady(querier)) - r, err := e2ethanos.NewStatelessRuler(s.SharedDir(), "1", rulesSubDir, []alert.AlertmanagerConfig{ + testutil.Ok(t, e2e.StartAndWaitReady(q)) + r, err := e2ethanos.NewStatelessRuler(e, "1", rulesSubDir, []alert.AlertmanagerConfig{ { - EndpointsConfig: http_util.EndpointsConfig{ + EndpointsConfig: httpconfig.EndpointsConfig{ StaticAddresses: []string{ - am.NetworkHTTPEndpoint(), + am.Endpoint("http"), }, Scheme: "http", }, Timeout: amTimeout, APIVersion: alert.APIv1, }, - }, []query.Config{ + }, []httpconfig.Config{ { - EndpointsConfig: http_util.EndpointsConfig{ + EndpointsConfig: httpconfig.EndpointsConfig{ StaticAddresses: []string{ - querier.NetworkHTTPEndpoint(), + q.Endpoint("http"), }, Scheme: "http", }, @@ -508,11 +505,11 @@ func TestRule_CanRemoteWriteData(t *testing.T) { Name: "thanos-receiver", }) testutil.Ok(t, err) - testutil.Ok(t, s.StartAndWaitReady(r)) + testutil.Ok(t, e2e.StartAndWaitReady(r)) t.Run("can fetch remote-written samples from receiver", func(t *testing.T) { testRecordedSamples := "test_absent_metric" - queryAndAssertSeries(t, ctx, querier.HTTPEndpoint(), testRecordedSamples, promclient.QueryOptions{ + queryAndAssertSeries(t, ctx, q.Endpoint("http"), testRecordedSamples, promclient.QueryOptions{ Deduplicate: false, }, []model.Metric{ { diff --git a/test/e2e/rules_api_test.go b/test/e2e/rules_api_test.go index 0d94317c8b..cdbe28e6f7 100644 --- a/test/e2e/rules_api_test.go +++ b/test/e2e/rules_api_test.go @@ -73,9 +73,9 @@ func TestRulesAPI_Fanout(t *testing.T) { } // Recreate rulers with the corresponding query config. - r1, err := e2ethanos.NewRuler(e, "rule1", thanosRulesSubDir, nil, queryCfg) + r1, err := e2ethanos.NewTSDBRuler(e, "rule1", thanosRulesSubDir, nil, queryCfg) testutil.Ok(t, err) - r2, err := e2ethanos.NewRuler(e, "rule2", thanosRulesSubDir, nil, queryCfg) + r2, err := e2ethanos.NewTSDBRuler(e, "rule2", thanosRulesSubDir, nil, queryCfg) testutil.Ok(t, err) testutil.Ok(t, e2e.StartAndWaitReady(r1, r2)) From 1eb0fe3a1ac2e77b89813e14de1c029c14b5b0d5 Mon Sep 17 00:00:00 2001 From: yeya24 Date: Sun, 3 Oct 2021 03:17:46 -0700 Subject: [PATCH 20/26] add changelog Signed-off-by: yeya24 --- CHANGELOG.md | 1 + go.mod | 1 + pkg/rules/remotewrite/remotewrite.go | 1 + test/e2e/rule_test.go | 10 +++++----- 4 files changed, 8 insertions(+), 5 deletions(-) diff --git a/CHANGELOG.md b/CHANGELOG.md index 014f974817..fb380278df 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -21,6 +21,7 @@ We use *breaking :warning:* to mark changes that are not backward compatible (re - [#4801](https://github.com/thanos-io/thanos/pull/4801) Compactor: added Prometheus metrics for tracking the progress of compaction and downsampling. - [#4444](https://github.com/thanos-io/thanos/pull/4444) UI: add mark deletion and no compaction to the Block UI. - [#4576](https://github.com/thanos-io/thanos/pull/4576) UI: add filter compaction level to the Block UI. +- [#4731](https://github.com/thanos-io/thanos/pull/4731) Rule: add stateless mode to ruler, based on https://github.com/thanos-io/thanos/pull/4250. ### Fixed diff --git a/go.mod b/go.mod index bad6a59bd9..22329a9dd2 100644 --- a/go.mod +++ b/go.mod @@ -60,6 +60,7 @@ require ( github.com/prometheus/common v0.30.0 github.com/prometheus/exporter-toolkit v0.6.1 github.com/prometheus/prometheus v1.8.2-0.20210914090109-37468d88dce8 + github.com/stretchr/testify v1.7.0 github.com/tencentyun/cos-go-sdk-v5 v0.7.31 github.com/uber/jaeger-client-go v2.29.1+incompatible github.com/uber/jaeger-lib v2.4.1+incompatible diff --git a/pkg/rules/remotewrite/remotewrite.go b/pkg/rules/remotewrite/remotewrite.go index 9f1f5f9f46..c23e4b10f1 100644 --- a/pkg/rules/remotewrite/remotewrite.go +++ b/pkg/rules/remotewrite/remotewrite.go @@ -32,6 +32,7 @@ func NewFanoutStorage(logger log.Logger, reg prometheus.Registerer, walDir strin if err != nil { return nil, err } + // flushDeadline is set to 1m, but it is for metadata watcher only so not used here. remoteStore := remote.NewStorage(logger, reg, walStore.StartTime, walStore.Directory(), 1*time.Minute, nil) if err := remoteStore.ApplyConfig(&config.Config{ GlobalConfig: config.DefaultGlobalConfig, diff --git a/test/e2e/rule_test.go b/test/e2e/rule_test.go index 1c46595dbe..050fd7f9ee 100644 --- a/test/e2e/rule_test.go +++ b/test/e2e/rule_test.go @@ -475,16 +475,16 @@ func TestRule_CanRemoteWriteData(t *testing.T) { receiver, err := e2ethanos.NewIngestingReceiver(e, "1") testutil.Ok(t, err) testutil.Ok(t, e2e.StartAndWaitReady(receiver)) - rwURL := mustURLParse(t, e2ethanos.RemoteWriteEndpoint(receiver.Endpoint("remote-write"))) + rwURL := mustURLParse(t, e2ethanos.RemoteWriteEndpoint(receiver.InternalEndpoint("remote-write"))) - q, err := e2ethanos.NewQuerierBuilder(e, "1", receiver.Endpoint("grpc")).Build() + q, err := e2ethanos.NewQuerierBuilder(e, "1", receiver.InternalEndpoint("grpc")).Build() testutil.Ok(t, err) testutil.Ok(t, e2e.StartAndWaitReady(q)) r, err := e2ethanos.NewStatelessRuler(e, "1", rulesSubDir, []alert.AlertmanagerConfig{ { EndpointsConfig: httpconfig.EndpointsConfig{ StaticAddresses: []string{ - am.Endpoint("http"), + am.InternalEndpoint("http"), }, Scheme: "http", }, @@ -495,7 +495,7 @@ func TestRule_CanRemoteWriteData(t *testing.T) { { EndpointsConfig: httpconfig.EndpointsConfig{ StaticAddresses: []string{ - q.Endpoint("http"), + q.InternalEndpoint("http"), }, Scheme: "http", }, @@ -515,7 +515,7 @@ func TestRule_CanRemoteWriteData(t *testing.T) { { "__name__": "test_absent_metric", "job": "thanos-receive", - "receive": "e2e_test_rule_remote_write", + "receive": "1", "tenant_id": "default-tenant", }, }) From 3b65c42781990d1ff29af7d61bb43704acfc3530 Mon Sep 17 00:00:00 2001 From: Ben Ye Date: Thu, 28 Oct 2021 13:05:52 -0700 Subject: [PATCH 21/26] remove wal related tests Signed-off-by: Ben Ye --- pkg/rules/remotewrite/util.go | 117 ---------------- pkg/rules/remotewrite/wal_test.go | 226 ------------------------------ 2 files changed, 343 deletions(-) diff --git a/pkg/rules/remotewrite/util.go b/pkg/rules/remotewrite/util.go index bb3b2ec7c6..15eb053859 100644 --- a/pkg/rules/remotewrite/util.go +++ b/pkg/rules/remotewrite/util.go @@ -1,126 +1,9 @@ -// Copyright (c) The Thanos Authors. -// Licensed under the Apache License 2.0. - -// This is copied from https://github.com/grafana/agent/blob/a23bd5cf27c2ac99695b7449d38fb12444941a1c/pkg/prom/wal/util.go -// TODO(idoqo): Migrate to prometheus package when https://github.com/prometheus/prometheus/pull/8785 is ready. package remotewrite import ( "path/filepath" - "sync" - - "github.com/prometheus/prometheus/tsdb/record" - "github.com/prometheus/prometheus/tsdb/wal" ) -type walReplayer struct { - w wal.WriteTo -} - -func (r walReplayer) Replay(dir string) error { - w, err := wal.Open(nil, dir) - if err != nil { - return err - } - - dir, startFrom, err := wal.LastCheckpoint(w.Dir()) - if err != nil && err != record.ErrNotFound { - return err - } - - if err == nil { - sr, err := wal.NewSegmentsReader(dir) - if err != nil { - return err - } - - err = r.replayWAL(wal.NewReader(sr)) - if closeErr := sr.Close(); closeErr != nil && err == nil { - err = closeErr - } - if err != nil { - return err - } - - startFrom++ - } - - _, last, err := wal.Segments(w.Dir()) - if err != nil { - return err - } - - for i := startFrom; i <= last; i++ { - s, err := wal.OpenReadSegment(wal.SegmentName(w.Dir(), i)) - if err != nil { - return err - } - - sr := wal.NewSegmentBufReader(s) - err = r.replayWAL(wal.NewReader(sr)) - if closeErr := sr.Close(); closeErr != nil && err == nil { - err = closeErr - } - if err != nil { - return err - } - } - - return nil -} - -func (r walReplayer) replayWAL(reader *wal.Reader) error { - var dec record.Decoder - - for reader.Next() { - rec := reader.Record() - switch dec.Type(rec) { - case record.Series: - series, err := dec.Series(rec, nil) - if err != nil { - return err - } - r.w.StoreSeries(series, 0) - case record.Samples: - samples, err := dec.Samples(rec, nil) - if err != nil { - return err - } - r.w.Append(samples) - } - } - - return nil -} - -type walDataCollector struct { - mut sync.Mutex - samples []record.RefSample - series []record.RefSeries -} - -func (c *walDataCollector) Append(samples []record.RefSample) bool { - c.mut.Lock() - defer c.mut.Unlock() - - c.samples = append(c.samples, samples...) - return true -} - -func (c *walDataCollector) AppendExemplars([]record.RefExemplar) bool { - // dummy implementation to make walDataCollector conform to the WriteTo interface - return true -} - -func (c *walDataCollector) StoreSeries(series []record.RefSeries, _ int) { - c.mut.Lock() - defer c.mut.Unlock() - - c.series = append(c.series, series...) -} - -func (c *walDataCollector) SeriesReset(_ int) {} - // SubDirectory returns the subdirectory within a Storage directory used for // the Prometheus WAL. func SubDirectory(base string) string { diff --git a/pkg/rules/remotewrite/wal_test.go b/pkg/rules/remotewrite/wal_test.go index 1843284081..6482840524 100644 --- a/pkg/rules/remotewrite/wal_test.go +++ b/pkg/rules/remotewrite/wal_test.go @@ -8,15 +8,12 @@ package remotewrite import ( "context" "io/ioutil" - "math" "os" "sort" "testing" - "time" "github.com/go-kit/kit/log" "github.com/prometheus/prometheus/pkg/labels" - "github.com/prometheus/prometheus/pkg/value" "github.com/prometheus/prometheus/storage" "github.com/prometheus/prometheus/tsdb/record" "github.com/stretchr/testify/require" @@ -46,229 +43,6 @@ func TestStorage_InvalidSeries(t *testing.T) { require.NoError(t, err, "should not reject valid series") } -func TestStorage(t *testing.T) { - walDir, err := ioutil.TempDir(os.TempDir(), "wal") - require.NoError(t, err) - defer os.RemoveAll(walDir) - - s, err := NewStorage(log.NewNopLogger(), nil, walDir) - require.NoError(t, err) - defer func() { - require.NoError(t, s.Close()) - }() - - app := s.Appender(context.Background()) - - // Write some samples - payload := seriesList{ - {name: "foo", samples: []sample{{1, 10.0}, {10, 100.0}}}, - {name: "bar", samples: []sample{{2, 20.0}, {20, 200.0}}}, - {name: "baz", samples: []sample{{3, 30.0}, {30, 300.0}}}, - } - for _, metric := range payload { - metric.Write(t, app) - } - - require.NoError(t, app.Commit()) - - collector := walDataCollector{} - replayer := walReplayer{w: &collector} - require.NoError(t, replayer.Replay(s.wal.Dir())) - - names := []string{} - for _, series := range collector.series { - names = append(names, series.Labels.Get("__name__")) - } - require.Equal(t, payload.SeriesNames(), names) - - expectedSamples := payload.ExpectedSamples() - actual := collector.samples - sort.Sort(byRefSample(actual)) - require.Equal(t, expectedSamples, actual) -} - -func TestStorage_ExistingWAL(t *testing.T) { - walDir, err := ioutil.TempDir(os.TempDir(), "wal") - require.NoError(t, err) - defer os.RemoveAll(walDir) - - s, err := NewStorage(log.NewNopLogger(), nil, walDir) - require.NoError(t, err) - - app := s.Appender(context.Background()) - payload := seriesList{ - {name: "foo", samples: []sample{{1, 10.0}, {10, 100.0}}}, - {name: "bar", samples: []sample{{2, 20.0}, {20, 200.0}}}, - {name: "baz", samples: []sample{{3, 30.0}, {30, 300.0}}}, - {name: "blerg", samples: []sample{{4, 40.0}, {40, 400.0}}}, - } - - // Write half of the samples. - for _, metric := range payload[0 : len(payload)/2] { - metric.Write(t, app) - } - - require.NoError(t, app.Commit()) - require.NoError(t, s.Close()) - - // We need to wait a little bit for the previous store to finish - // flushing. - time.Sleep(time.Millisecond * 150) - - // Create a new storage, write the other half of samples. - s, err = NewStorage(log.NewNopLogger(), nil, walDir) - require.NoError(t, err) - defer func() { - require.NoError(t, s.Close()) - }() - - // Verify that the storage picked up existing series when it - // replayed the WAL. - for series := range s.series.iterator().Channel() { - require.Greater(t, series.lastTs, int64(0), "series timestamp not updated") - } - - app = s.Appender(context.Background()) - - for _, metric := range payload[len(payload)/2:] { - metric.Write(t, app) - } - - require.NoError(t, app.Commit()) - - collector := walDataCollector{} - replayer := walReplayer{w: &collector} - require.NoError(t, replayer.Replay(s.wal.Dir())) - - names := []string{} - for _, series := range collector.series { - names = append(names, series.Labels.Get("__name__")) - } - require.Equal(t, payload.SeriesNames(), names) - - expectedSamples := payload.ExpectedSamples() - actual := collector.samples - sort.Sort(byRefSample(actual)) - require.Equal(t, expectedSamples, actual) -} - -func TestStorage_Truncate(t *testing.T) { - // Same as before but now do the following: - // after writing all the data, forcefully create 4 more segments, - // then do a truncate of a timestamp for _some_ of the data. - // then read data back in. Expect to only get the latter half of data. - walDir, err := ioutil.TempDir(os.TempDir(), "wal") - require.NoError(t, err) - defer os.RemoveAll(walDir) - - s, err := NewStorage(log.NewNopLogger(), nil, walDir) - require.NoError(t, err) - defer func() { - require.NoError(t, s.Close()) - }() - - app := s.Appender(context.Background()) - - payload := seriesList{ - {name: "foo", samples: []sample{{1, 10.0}, {10, 100.0}}}, - {name: "bar", samples: []sample{{2, 20.0}, {20, 200.0}}}, - {name: "baz", samples: []sample{{3, 30.0}, {30, 300.0}}}, - {name: "blerg", samples: []sample{{4, 40.0}, {40, 400.0}}}, - } - - for _, metric := range payload { - metric.Write(t, app) - } - - require.NoError(t, app.Commit()) - - // Forefully create a bunch of new segments so when we truncate - // there's enough segments to be considered for truncation. - for i := 0; i < 5; i++ { - require.NoError(t, s.wal.NextSegment()) - } - - // Truncate half of the samples, keeping only the second sample - // per series. - keepTs := payload[len(payload)-1].samples[0].ts + 1 - err = s.Truncate(keepTs) - require.NoError(t, err) - - payload = payload.Filter(func(s sample) bool { - return s.ts >= keepTs - }) - expectedSamples := payload.ExpectedSamples() - - // Read back the WAL, collect series and samples. - collector := walDataCollector{} - replayer := walReplayer{w: &collector} - require.NoError(t, replayer.Replay(s.wal.Dir())) - - names := []string{} - for _, series := range collector.series { - names = append(names, series.Labels.Get("__name__")) - } - require.Equal(t, payload.SeriesNames(), names) - - actual := collector.samples - sort.Sort(byRefSample(actual)) - require.Equal(t, expectedSamples, actual) -} - -func TestStorage_WriteStalenessMarkers(t *testing.T) { - walDir, err := ioutil.TempDir(os.TempDir(), "wal") - require.NoError(t, err) - defer os.RemoveAll(walDir) - - s, err := NewStorage(log.NewNopLogger(), nil, walDir) - require.NoError(t, err) - defer func() { - require.NoError(t, s.Close()) - }() - - app := s.Appender(context.Background()) - - // Write some samples - payload := seriesList{ - {name: "foo", samples: []sample{{1, 10.0}, {10, 100.0}}}, - {name: "bar", samples: []sample{{2, 20.0}, {20, 200.0}}}, - {name: "baz", samples: []sample{{3, 30.0}, {30, 300.0}}}, - } - for _, metric := range payload { - metric.Write(t, app) - } - - require.NoError(t, app.Commit()) - - // Write staleness markers for every series - require.NoError(t, s.WriteStalenessMarkers(func() int64 { - // Pass math.MaxInt64 so it seems like everything was written already - return math.MaxInt64 - })) - - // Read back the WAL, collect series and samples. - collector := walDataCollector{} - replayer := walReplayer{w: &collector} - require.NoError(t, replayer.Replay(s.wal.Dir())) - - actual := collector.samples - sort.Sort(byRefSample(actual)) - - staleMap := map[uint64]bool{} - for _, sample := range actual { - if _, ok := staleMap[sample.Ref]; !ok { - staleMap[sample.Ref] = false - } - if value.IsStaleNaN(sample.V) { - staleMap[sample.Ref] = true - } - } - - for ref, v := range staleMap { - require.True(t, v, "ref %d doesn't have stale marker", ref) - } -} - func TestStoraeg_TruncateAfterClose(t *testing.T) { walDir, err := ioutil.TempDir(os.TempDir(), "wal") require.NoError(t, err) From 69bb9b890210b2566128e22259c2c2455cbce6ca Mon Sep 17 00:00:00 2001 From: Ben Ye Date: Thu, 28 Oct 2021 17:49:20 -0700 Subject: [PATCH 22/26] fix e2e Signed-off-by: Ben Ye --- test/e2e/rule_test.go | 4 +++- 1 file changed, 3 insertions(+), 1 deletion(-) diff --git a/test/e2e/rule_test.go b/test/e2e/rule_test.go index 050fd7f9ee..ce02ae52d1 100644 --- a/test/e2e/rule_test.go +++ b/test/e2e/rule_test.go @@ -509,7 +509,9 @@ func TestRule_CanRemoteWriteData(t *testing.T) { t.Run("can fetch remote-written samples from receiver", func(t *testing.T) { testRecordedSamples := "test_absent_metric" - queryAndAssertSeries(t, ctx, q.Endpoint("http"), testRecordedSamples, promclient.QueryOptions{ + queryAndAssertSeries(t, ctx, q.Endpoint("http"), testRecordedSamples, func() time.Time { + return time.Now() + }, promclient.QueryOptions{ Deduplicate: false, }, []model.Metric{ { From 9ec5690bac8801ed5fd32a103900834b087106e7 Mon Sep 17 00:00:00 2001 From: Ben Ye Date: Thu, 28 Oct 2021 18:42:40 -0700 Subject: [PATCH 23/26] remove unused structs in tests Signed-off-by: Ben Ye --- pkg/rules/remotewrite/wal_test.go | 100 ------------------------------ 1 file changed, 100 deletions(-) diff --git a/pkg/rules/remotewrite/wal_test.go b/pkg/rules/remotewrite/wal_test.go index 6482840524..7ab724b486 100644 --- a/pkg/rules/remotewrite/wal_test.go +++ b/pkg/rules/remotewrite/wal_test.go @@ -9,13 +9,10 @@ import ( "context" "io/ioutil" "os" - "sort" "testing" "github.com/go-kit/kit/log" "github.com/prometheus/prometheus/pkg/labels" - "github.com/prometheus/prometheus/storage" - "github.com/prometheus/prometheus/tsdb/record" "github.com/stretchr/testify/require" ) @@ -54,100 +51,3 @@ func TestStoraeg_TruncateAfterClose(t *testing.T) { require.NoError(t, s.Close()) require.Error(t, ErrWALClosed, s.Truncate(0)) } - -type sample struct { - ts int64 - val float64 -} - -type series struct { - name string - samples []sample - - ref *uint64 -} - -func (s *series) Write(t *testing.T, app storage.Appender) { - t.Helper() - - lbls := labels.FromMap(map[string]string{"__name__": s.name}) - - offset := 0 - if s.ref == nil { - // Write first sample to get ref ID - ref, err := app.Append(0, lbls, s.samples[0].ts, s.samples[0].val) - require.NoError(t, err) - - s.ref = &ref - offset = 1 - } - - // Write other data points with AddFast - for _, sample := range s.samples[offset:] { - _, err := app.Append(*s.ref, lbls, sample.ts, sample.val) - require.NoError(t, err) - } -} - -type seriesList []*series - -// Filter creates a new seriesList with series filtered by a sample -// keep predicate function. -func (s seriesList) Filter(fn func(s sample) bool) seriesList { - var ret seriesList - - for _, entry := range s { - var samples []sample - - for _, sample := range entry.samples { - if fn(sample) { - samples = append(samples, sample) - } - } - - if len(samples) > 0 { - ret = append(ret, &series{ - name: entry.name, - ref: entry.ref, - samples: samples, - }) - } - } - - return ret -} - -func (s seriesList) SeriesNames() []string { - names := make([]string, 0, len(s)) - for _, series := range s { - names = append(names, series.name) - } - return names -} - -// ExpectedSamples returns the list of expected samples, sorted by ref ID and timestamp. -func (s seriesList) ExpectedSamples() []record.RefSample { - expect := []record.RefSample{} - for _, series := range s { - for _, sample := range series.samples { - expect = append(expect, record.RefSample{ - Ref: *series.ref, - T: sample.ts, - V: sample.val, - }) - } - } - sort.Sort(byRefSample(expect)) - return expect -} - -type byRefSample []record.RefSample - -func (b byRefSample) Len() int { return len(b) } -func (b byRefSample) Swap(i, j int) { b[i], b[j] = b[j], b[i] } -func (b byRefSample) Less(i, j int) bool { - if b[i].Ref == b[j].Ref { - return b[i].T < b[j].T - } - return b[i].Ref < b[j].Ref -} From d0bdcd536c49bcd222b72ef99a5277b1627804b9 Mon Sep 17 00:00:00 2001 From: Ben Ye Date: Thu, 28 Oct 2021 18:57:56 -0700 Subject: [PATCH 24/26] add licence header Signed-off-by: Ben Ye --- pkg/rules/remotewrite/util.go | 3 +++ 1 file changed, 3 insertions(+) diff --git a/pkg/rules/remotewrite/util.go b/pkg/rules/remotewrite/util.go index 15eb053859..86f3ac87bc 100644 --- a/pkg/rules/remotewrite/util.go +++ b/pkg/rules/remotewrite/util.go @@ -1,3 +1,6 @@ +// Copyright (c) The Thanos Authors. +// Licensed under the Apache License 2.0. + package remotewrite import ( From fb0dfba96e6f5ee32adb6bb39b6d47f070e1042a Mon Sep 17 00:00:00 2001 From: Ben Ye Date: Mon, 1 Nov 2021 22:00:16 -0700 Subject: [PATCH 25/26] use upstream agent package Signed-off-by: Ben Ye --- CHANGELOG.md | 2 +- cmd/thanos/rule.go | 35 +- go.mod | 23 +- go.sum | 175 ++++--- pkg/rules/remotewrite/remotewrite.go | 44 -- pkg/rules/remotewrite/series.go | 259 ---------- pkg/rules/remotewrite/util.go | 14 - pkg/rules/remotewrite/wal.go | 697 --------------------------- pkg/rules/remotewrite/wal_test.go | 53 -- 9 files changed, 144 insertions(+), 1158 deletions(-) delete mode 100644 pkg/rules/remotewrite/remotewrite.go delete mode 100644 pkg/rules/remotewrite/series.go delete mode 100644 pkg/rules/remotewrite/util.go delete mode 100644 pkg/rules/remotewrite/wal.go delete mode 100644 pkg/rules/remotewrite/wal_test.go diff --git a/CHANGELOG.md b/CHANGELOG.md index fb380278df..a9b8ea52b1 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -21,7 +21,7 @@ We use *breaking :warning:* to mark changes that are not backward compatible (re - [#4801](https://github.com/thanos-io/thanos/pull/4801) Compactor: added Prometheus metrics for tracking the progress of compaction and downsampling. - [#4444](https://github.com/thanos-io/thanos/pull/4444) UI: add mark deletion and no compaction to the Block UI. - [#4576](https://github.com/thanos-io/thanos/pull/4576) UI: add filter compaction level to the Block UI. -- [#4731](https://github.com/thanos-io/thanos/pull/4731) Rule: add stateless mode to ruler, based on https://github.com/thanos-io/thanos/pull/4250. +- [#4731](https://github.com/thanos-io/thanos/pull/4731) Rule: add stateless mode to ruler according to https://thanos.io/tip/proposals-accepted/202005-scalable-rule-storage.md/. Continue https://github.com/thanos-io/thanos/pull/4250. ### Fixed diff --git a/cmd/thanos/rule.go b/cmd/thanos/rule.go index 84e93fd782..4e2e046572 100644 --- a/cmd/thanos/rule.go +++ b/cmd/thanos/rule.go @@ -15,6 +15,7 @@ import ( "strings" "time" + extflag "github.com/efficientgo/tools/extkingpin" "github.com/go-kit/kit/log" "github.com/go-kit/kit/log/level" grpc_logging "github.com/grpc-ecosystem/go-grpc-middleware/v2/interceptors/logging" @@ -32,13 +33,15 @@ import ( "github.com/prometheus/prometheus/promql" "github.com/prometheus/prometheus/rules" "github.com/prometheus/prometheus/storage" + "github.com/prometheus/prometheus/storage/remote" "github.com/prometheus/prometheus/tsdb" + "github.com/prometheus/prometheus/tsdb/agent" "github.com/prometheus/prometheus/util/strutil" "github.com/thanos-io/thanos/pkg/errutil" "github.com/thanos-io/thanos/pkg/extkingpin" "github.com/thanos-io/thanos/pkg/httpconfig" + "gopkg.in/yaml.v2" - extflag "github.com/efficientgo/tools/extkingpin" "github.com/thanos-io/thanos/pkg/alert" v1 "github.com/thanos-io/thanos/pkg/api/rule" "github.com/thanos-io/thanos/pkg/block/metadata" @@ -51,7 +54,6 @@ import ( "github.com/thanos-io/thanos/pkg/prober" "github.com/thanos-io/thanos/pkg/promclient" thanosrules "github.com/thanos-io/thanos/pkg/rules" - "github.com/thanos-io/thanos/pkg/rules/remotewrite" "github.com/thanos-io/thanos/pkg/runutil" grpcserver "github.com/thanos-io/thanos/pkg/server/grpc" httpserver "github.com/thanos-io/thanos/pkg/server/http" @@ -149,6 +151,10 @@ func registerRule(app *extkingpin.App) { WALCompression: *walCompression, } + agentOpts := &agent.Options{ + WALCompression: *walCompression, + } + // Parse and check query configuration. lookupQueries := map[string]struct{}{} for _, q := range conf.query.addrs { @@ -206,6 +212,7 @@ func registerRule(app *extkingpin.App) { grpcLogOpts, tagOpts, tsdbOpts, + agentOpts, ) }) } @@ -269,6 +276,7 @@ func runRule( grpcLogOpts []grpc_logging.Option, tagOpts []tags.Option, tsdbOpts *tsdb.Options, + agentOpts *agent.Options, ) error { metrics := newRuleMetrics(reg) @@ -338,17 +346,28 @@ func runRule( if len(rwCfgYAML) > 0 { var rwCfg config.RemoteWriteConfig - rwCfg, err = remotewrite.LoadRemoteWriteConfig(rwCfgYAML) - if err != nil { + if err := yaml.Unmarshal(rwCfgYAML, &rwCfg); err != nil { return err } walDir := filepath.Join(conf.dataDir, rwCfg.Name) - remoteStore, err := remotewrite.NewFanoutStorage(logger, reg, walDir, &rwCfg) + // flushDeadline is set to 1m, but it is for metadata watcher only so not used here. + remoteStore := remote.NewStorage(logger, reg, func() (int64, error) { + return 0, nil + }, walDir, 1*time.Minute, nil) + if err := remoteStore.ApplyConfig(&config.Config{ + GlobalConfig: config.DefaultGlobalConfig, + RemoteWriteConfigs: []*config.RemoteWriteConfig{&rwCfg}, + }); err != nil { + return errors.Wrap(err, "applying config to remote storage") + } + + db, err := agent.Open(logger, reg, remoteStore, walDir, agentOpts) if err != nil { - return errors.Wrap(err, "set up remote-write store for ruler") + return errors.Wrap(err, "start remote write agent db") } - appendable = remoteStore - queryable = remoteStore + fanoutStore := storage.NewFanout(logger, db, remoteStore) + appendable = fanoutStore + queryable = fanoutStore } else { db, err = tsdb.Open(conf.dataDir, log.With(logger, "component", "tsdb"), reg, tsdbOpts, nil) if err != nil { diff --git a/go.mod b/go.mod index 22329a9dd2..aab8ce0199 100644 --- a/go.mod +++ b/go.mod @@ -5,10 +5,10 @@ require ( cloud.google.com/go/trace v0.1.0 github.com/Azure/azure-pipeline-go v0.2.3 github.com/Azure/azure-storage-blob-go v0.13.0 - github.com/Azure/go-autorest/autorest/adal v0.9.15 + github.com/Azure/go-autorest/autorest/adal v0.9.16 github.com/Azure/go-autorest/autorest/azure/auth v0.5.8 github.com/NYTimes/gziphandler v1.1.1 - github.com/alecthomas/units v0.0.0-20210208195552-ff826a37aa15 + github.com/alecthomas/units v0.0.0-20210927113745-59d0afb8317a github.com/aliyun/aliyun-oss-go-sdk v2.0.4+incompatible github.com/baidubce/bce-sdk-go v0.9.81 github.com/blang/semver/v4 v4.0.0 @@ -28,7 +28,7 @@ require ( github.com/fortytw2/leaktest v1.3.0 github.com/fsnotify/fsnotify v1.4.9 github.com/go-kit/kit v0.11.0 - github.com/go-openapi/strfmt v0.20.2 + github.com/go-openapi/strfmt v0.20.3 github.com/gogo/protobuf v1.3.2 github.com/gogo/status v1.1.0 github.com/golang/groupcache v0.0.0-20210331224755-41bb18bfe9da @@ -57,10 +57,9 @@ require ( github.com/prometheus/alertmanager v0.23.1-0.20210914172521-e35efbddb66a github.com/prometheus/client_golang v1.11.0 github.com/prometheus/client_model v0.2.0 - github.com/prometheus/common v0.30.0 - github.com/prometheus/exporter-toolkit v0.6.1 - github.com/prometheus/prometheus v1.8.2-0.20210914090109-37468d88dce8 - github.com/stretchr/testify v1.7.0 + github.com/prometheus/common v0.32.1 + github.com/prometheus/exporter-toolkit v0.7.0 + github.com/prometheus/prometheus v1.8.2-0.20211101135822-b862218389fc github.com/tencentyun/cos-go-sdk-v5 v0.7.31 github.com/uber/jaeger-client-go v2.29.1+incompatible github.com/uber/jaeger-lib v2.4.1+incompatible @@ -69,13 +68,13 @@ require ( go.elastic.co/apm/module/apmot v1.11.0 go.uber.org/atomic v1.9.0 go.uber.org/automaxprocs v1.4.0 - go.uber.org/goleak v1.1.10 + go.uber.org/goleak v1.1.12 golang.org/x/crypto v0.0.0-20210616213533-5ff15b29337e - golang.org/x/oauth2 v0.0.0-20210819190943-2bc19b11175f + golang.org/x/oauth2 v0.0.0-20211005180243-6b3c2da341f1 golang.org/x/sync v0.0.0-20210220032951-036812b2e83c golang.org/x/text v0.3.6 - google.golang.org/api v0.56.0 - google.golang.org/genproto v0.0.0-20210903162649-d08c68adba83 + google.golang.org/api v0.59.0 + google.golang.org/genproto v0.0.0-20211020151524-b7c3a969101a google.golang.org/grpc v1.40.0 gopkg.in/alecthomas/kingpin.v2 v2.2.6 gopkg.in/fsnotify.v1 v1.4.7 @@ -94,7 +93,7 @@ replace ( // TODO: Remove this: https://github.com/thanos-io/thanos/issues/3967. github.com/minio/minio-go/v7 => github.com/bwplotka/minio-go/v7 v7.0.11-0.20210324165441-f9927e5255a6 // Make sure Prometheus version is pinned as Prometheus semver does not include Go APIs. - github.com/prometheus/prometheus => github.com/prometheus/prometheus v1.8.2-0.20210914090109-37468d88dce8 + github.com/prometheus/prometheus => github.com/prometheus/prometheus v1.8.2-0.20211101135822-b862218389fc github.com/sercand/kuberesolver => github.com/sercand/kuberesolver v2.4.0+incompatible google.golang.org/grpc => google.golang.org/grpc v1.29.1 diff --git a/go.sum b/go.sum index 6bd3811a14..6e2a3366a5 100644 --- a/go.sum +++ b/go.sum @@ -26,8 +26,10 @@ cloud.google.com/go v0.84.0/go.mod h1:RazrYuxIK6Kb7YrzzhPoLmCVzl7Sup4NrbKPg8KHSU cloud.google.com/go v0.87.0/go.mod h1:TpDYlFy7vuLzZMMZ+B6iRiELaY7z/gJPaqbMx6mlWcY= cloud.google.com/go v0.90.0/go.mod h1:kRX0mNRHe0e2rC6oNakvwQqzyDmg57xJ+SZU1eT2aDQ= cloud.google.com/go v0.92.2/go.mod h1:8utlLll2EF5XMAV15woO4lSbWQlk8rer9aLOfLh7+YI= -cloud.google.com/go v0.93.3 h1:wPBktZFzYBcCZVARvwVKqH1uEj+aLXofJEtrb4oOsio= cloud.google.com/go v0.93.3/go.mod h1:8utlLll2EF5XMAV15woO4lSbWQlk8rer9aLOfLh7+YI= +cloud.google.com/go v0.94.1/go.mod h1:qAlAugsXlC+JWO+Bke5vCtc9ONxjQT3drlTTnAplMW4= +cloud.google.com/go v0.97.0 h1:3DXvAyifywvq64LfkKaMOmkWPS1CikIQdMe2lY9vxU8= +cloud.google.com/go v0.97.0/go.mod h1:GF7l59pYBVlXQIBLx3a761cZ41F9bBH3JUlihCt2Udc= cloud.google.com/go/bigquery v1.0.1/go.mod h1:i/xbL2UlR5RvWAURpBYZTtm/cXjCha9lbfbpx4poX+o= cloud.google.com/go/bigquery v1.3.0/go.mod h1:PjpwJnslEMmckchkHFfq+HTD2DmtT67aNFKH1/VBDHE= cloud.google.com/go/bigquery v1.4.0/go.mod h1:S8dzgnTigyfTmLBfrtrhyYhwRxG72rYxvftPBK2Dvzc= @@ -61,42 +63,38 @@ github.com/Azure/azure-pipeline-go v0.2.3 h1:7U9HBg1JFK3jHl5qmo4CTZKFTVgMwdFHMVt github.com/Azure/azure-pipeline-go v0.2.3/go.mod h1:x841ezTBIMG6O3lAcl8ATHnsOPVl2bqk7S3ta6S6u4k= github.com/Azure/azure-sdk-for-go v16.2.1+incompatible/go.mod h1:9XXNKU+eRnpl9moKnB4QOLf1HestfXbmab5FXxiDBjc= github.com/Azure/azure-sdk-for-go v36.1.0+incompatible/go.mod h1:9XXNKU+eRnpl9moKnB4QOLf1HestfXbmab5FXxiDBjc= -github.com/Azure/azure-sdk-for-go v57.1.0+incompatible h1:TKQ3ieyB0vVKkF6t9dsWbMjq56O1xU3eh3Ec09v6ajM= -github.com/Azure/azure-sdk-for-go v57.1.0+incompatible/go.mod h1:9XXNKU+eRnpl9moKnB4QOLf1HestfXbmab5FXxiDBjc= +github.com/Azure/azure-sdk-for-go v58.2.0+incompatible h1:iCb2tuoEm3N7ZpUDOvu1Yxl1B3iOVDmaD6weaRuIPzs= +github.com/Azure/azure-sdk-for-go v58.2.0+incompatible/go.mod h1:9XXNKU+eRnpl9moKnB4QOLf1HestfXbmab5FXxiDBjc= github.com/Azure/azure-storage-blob-go v0.8.0/go.mod h1:lPI3aLPpuLTeUwh1sViKXFxwl2B6teiRqI0deQUvsw0= github.com/Azure/azure-storage-blob-go v0.13.0 h1:lgWHvFh+UYBNVQLFHXkvul2f6yOPA9PIH82RTG2cSwc= github.com/Azure/azure-storage-blob-go v0.13.0/go.mod h1:pA9kNqtjUeQF2zOSu4s//nUdBD+e64lEuc4sVnuOfNs= -github.com/Azure/go-ansiterm v0.0.0-20170929234023-d6e3b3328b78 h1:w+iIsaOQNcT7OZ575w+acHgRric5iCyQh+xv+KJ4HB8= github.com/Azure/go-ansiterm v0.0.0-20170929234023-d6e3b3328b78/go.mod h1:LmzpDX56iTiv29bbRTIsUNlaFfuhWRQBWjQdVyAevI8= +github.com/Azure/go-ansiterm v0.0.0-20210617225240-d185dfc1b5a1 h1:UQHMgLO+TxOElx5B5HZ4hJQsoJ/PvUvKRhJHDQXO8P8= +github.com/Azure/go-ansiterm v0.0.0-20210617225240-d185dfc1b5a1/go.mod h1:xomTg63KZ2rFqZQzSB4Vz2SUXa1BpHTVz9L5PTmPC4E= github.com/Azure/go-autorest v10.8.1+incompatible/go.mod h1:r+4oMnoxhatjLLJ6zxSWATqVooLgysK6ZNox3g/xq24= github.com/Azure/go-autorest v14.2.0+incompatible h1:V5VMDjClD3GiElqLWO7mz2MxNAK/vTfRHdAubSIPRgs= github.com/Azure/go-autorest v14.2.0+incompatible/go.mod h1:r+4oMnoxhatjLLJ6zxSWATqVooLgysK6ZNox3g/xq24= github.com/Azure/go-autorest/autorest v0.9.0/go.mod h1:xyHB1BMZT0cuDHU7I0+g046+BFDTQ8rEZB0s4Yfa6bI= github.com/Azure/go-autorest/autorest v0.9.3-0.20191028180845-3492b2aff503/go.mod h1:xyHB1BMZT0cuDHU7I0+g046+BFDTQ8rEZB0s4Yfa6bI= -github.com/Azure/go-autorest/autorest v0.9.3/go.mod h1:GsRuLYvwzLjjjRoWEIyMUaYq8GNUx2nRB378IPt/1p0= -github.com/Azure/go-autorest/autorest v0.10.1/go.mod h1:/FALq9T/kS7b5J5qsQ+RSTUdAmGFqi0vUdVNNx8q630= github.com/Azure/go-autorest/autorest v0.11.1/go.mod h1:JFgpikqFJ/MleTTxwepExTKnFUKKszPS8UavbQYUMuw= +github.com/Azure/go-autorest/autorest v0.11.9/go.mod h1:eipySxLmqSyC5s5k1CLupqet0PSENBEDP93LQ9a8QYw= github.com/Azure/go-autorest/autorest v0.11.17/go.mod h1:eipySxLmqSyC5s5k1CLupqet0PSENBEDP93LQ9a8QYw= github.com/Azure/go-autorest/autorest v0.11.18/go.mod h1:dSiJPy22c3u0OtOKDNttNgqpNFY/GeWa7GH/Pz56QRA= -github.com/Azure/go-autorest/autorest v0.11.20 h1:s8H1PbCZSqg/DH7JMlOz6YMig6htWLNPsjDdlLqCx3M= -github.com/Azure/go-autorest/autorest v0.11.20/go.mod h1:o3tqFY+QR40VOlk+pV4d77mORO64jOXSgEnPQgLK6JY= +github.com/Azure/go-autorest/autorest v0.11.21 h1:w77zY/9RnUAWcIQyDC0Fc89mCvwftR8F+zsR/OH6enk= +github.com/Azure/go-autorest/autorest v0.11.21/go.mod h1:Do/yuMSW/13ayUkcVREpsMHGG+MvV81uzSCFgYPj4tM= github.com/Azure/go-autorest/autorest/adal v0.5.0/go.mod h1:8Z9fGy2MpX0PvDjB1pEgQTmVqjGhiHBW7RJJEciWzS0= -github.com/Azure/go-autorest/autorest/adal v0.8.0/go.mod h1:Z6vX6WXXuyieHAXwMj0S6HY6e6wcHn37qQMBQlvY3lc= github.com/Azure/go-autorest/autorest/adal v0.8.1-0.20191028180845-3492b2aff503/go.mod h1:Z6vX6WXXuyieHAXwMj0S6HY6e6wcHn37qQMBQlvY3lc= -github.com/Azure/go-autorest/autorest/adal v0.8.1/go.mod h1:ZjhuQClTqx435SRJ2iMlOxPYt3d2C/T/7TiQCVZSn3Q= -github.com/Azure/go-autorest/autorest/adal v0.8.2/go.mod h1:ZjhuQClTqx435SRJ2iMlOxPYt3d2C/T/7TiQCVZSn3Q= -github.com/Azure/go-autorest/autorest/adal v0.8.3/go.mod h1:ZjhuQClTqx435SRJ2iMlOxPYt3d2C/T/7TiQCVZSn3Q= github.com/Azure/go-autorest/autorest/adal v0.9.0/go.mod h1:/c022QCutn2P7uY+/oQWWNcK9YU+MH96NgK+jErpbcg= github.com/Azure/go-autorest/autorest/adal v0.9.2/go.mod h1:/3SMAM86bP6wC9Ev35peQDUeqFZBMH07vvUOmg4z/fE= github.com/Azure/go-autorest/autorest/adal v0.9.5/go.mod h1:B7KF7jKIeC9Mct5spmyCB/A8CG/sEz1vwIRGv/bbw7A= github.com/Azure/go-autorest/autorest/adal v0.9.11/go.mod h1:nBKAnTomx8gDtl+3ZCJv2v0KACFHWTB2drffI1B68Pk= github.com/Azure/go-autorest/autorest/adal v0.9.13/go.mod h1:W/MM4U6nLxnIskrw4UwWzlHfGjwUS50aOsc/I3yuU8M= -github.com/Azure/go-autorest/autorest/adal v0.9.15 h1:X+p2GF0GWyOiSmqohIaEeuNFNDY4I4EOlVuUQvFdWMk= -github.com/Azure/go-autorest/autorest/adal v0.9.15/go.mod h1:tGMin8I49Yij6AQ+rvV+Xa/zwxYQB5hmsd6DkfAx2+A= -github.com/Azure/go-autorest/autorest/azure/auth v0.4.2/go.mod h1:90gmfKdlmKgfjUpnCEpOJzsUEjrWDSLwHIG73tSXddM= +github.com/Azure/go-autorest/autorest/adal v0.9.14/go.mod h1:W/MM4U6nLxnIskrw4UwWzlHfGjwUS50aOsc/I3yuU8M= +github.com/Azure/go-autorest/autorest/adal v0.9.16 h1:P8An8Z9rH1ldbOLdFpxYorgOt2sywL9V24dAwWHPuGc= +github.com/Azure/go-autorest/autorest/adal v0.9.16/go.mod h1:tGMin8I49Yij6AQ+rvV+Xa/zwxYQB5hmsd6DkfAx2+A= +github.com/Azure/go-autorest/autorest/azure/auth v0.5.3/go.mod h1:4bJZhUhcq8LB20TruwHbAQsmUs2Xh+QR7utuJpLXX3A= github.com/Azure/go-autorest/autorest/azure/auth v0.5.8 h1:TzPg6B6fTZ0G1zBf3T54aI7p3cAT6u//TOXGPmFMOXg= github.com/Azure/go-autorest/autorest/azure/auth v0.5.8/go.mod h1:kxyKZTSfKh8OVFWPAgOgQ/frrJgeYQJPyR5fLFmXko4= -github.com/Azure/go-autorest/autorest/azure/cli v0.3.1/go.mod h1:ZG5p860J94/0kI9mNJVoIoLgXcirM2gF5i2kWloofxw= github.com/Azure/go-autorest/autorest/azure/cli v0.4.2 h1:dMOmEJfkLKW/7JsokJqkyoYSgmR08hi9KrhjZb+JALY= github.com/Azure/go-autorest/autorest/azure/cli v0.4.2/go.mod h1:7qkJkT+j6b+hIpzMOwPChJhTqS8VbsqqgULzMNRugoM= github.com/Azure/go-autorest/autorest/date v0.1.0/go.mod h1:plvfp3oPSKwf2DNjlBjWF/7vwR+cUD/ELuzDCXwHUVA= @@ -149,7 +147,7 @@ github.com/Microsoft/hcsshim v0.8.9/go.mod h1:5692vkUqntj1idxauYlpoINNKeqCiG6Sg3 github.com/Microsoft/hcsshim v0.8.14/go.mod h1:NtVKoYxQuTLx6gEq0L96c9Ju4JbRJ4nY2ow3VK6a9Lg= github.com/Microsoft/hcsshim v0.8.15/go.mod h1:x38A4YbHbdxJtc0sF6oIz+RG0npwSCAvn69iY6URG00= github.com/Microsoft/hcsshim v0.8.16/go.mod h1:o5/SZqmR7x9JNKsW3pu+nqHm0MF8vbA+VxGOoXdC600= -github.com/Microsoft/hcsshim v0.8.18/go.mod h1:+w2gRZ5ReXQhFOrvSQeNfhrYB/dg3oDwTOcER2fw4I4= +github.com/Microsoft/hcsshim v0.8.21/go.mod h1:+w2gRZ5ReXQhFOrvSQeNfhrYB/dg3oDwTOcER2fw4I4= github.com/Microsoft/hcsshim/test v0.0.0-20201218223536-d3e5debf77da/go.mod h1:5hlzMzRKMLyo42nCZ9oml8AdTlq/0cvIaBv6tK1RehU= github.com/Microsoft/hcsshim/test v0.0.0-20210227013316-43a75bb4edd3/go.mod h1:mw7qgWloBUl75W/gVH3cQszUg1+gUITj7D6NY7ywVnY= github.com/NYTimes/gziphandler v0.0.0-20170623195520-56545f4a5d46/go.mod h1:3wb06e3pkSAbeQ52E9H9iFoQsEEwGN64994WTCIhntQ= @@ -183,8 +181,9 @@ github.com/alecthomas/template v0.0.0-20190718012654-fb15b899a751/go.mod h1:LOuy github.com/alecthomas/units v0.0.0-20151022065526-2efee857e7cf/go.mod h1:ybxpYRFXyAe+OPACYpWeL0wqObRcbAqCMya13uyzqw0= github.com/alecthomas/units v0.0.0-20190717042225-c3de453c63f4/go.mod h1:ybxpYRFXyAe+OPACYpWeL0wqObRcbAqCMya13uyzqw0= github.com/alecthomas/units v0.0.0-20190924025748-f65c72e2690d/go.mod h1:rBZYJk541a8SKzHPHnH3zbiI+7dagKZ0cgpgrD7Fyho= -github.com/alecthomas/units v0.0.0-20210208195552-ff826a37aa15 h1:AUNCr9CiJuwrRYS3XieqF+Z9B9gNxo/eANAJCF2eiN4= github.com/alecthomas/units v0.0.0-20210208195552-ff826a37aa15/go.mod h1:OMCwj8VM1Kc9e19TLln2VL61YJF0x1XFtfdL4JdbSyE= +github.com/alecthomas/units v0.0.0-20210927113745-59d0afb8317a h1:E/8AP5dFtMhl5KPJz66Kt9G0n+7Sn41Fy1wv9/jHOrc= +github.com/alecthomas/units v0.0.0-20210927113745-59d0afb8317a/go.mod h1:OMCwj8VM1Kc9e19TLln2VL61YJF0x1XFtfdL4JdbSyE= github.com/alexflint/go-filemutex v0.0.0-20171022225611-72bdc8eae2ae/go.mod h1:CgnQgUtFrFz9mxFNtED3jI5tLDjKlOM+oUF/sTk6ps0= github.com/alicebob/gopher-json v0.0.0-20200520072559-a9ecdc9d1d3a h1:HbKu58rmZpUGpz5+4FfNmIU+FmZg2P3Xaj2v2bfNWmk= github.com/alicebob/gopher-json v0.0.0-20200520072559-a9ecdc9d1d3a/go.mod h1:SGnFV6hVsYE877CKEZ6tDNTjaSXYUk6QqoIK6PrAtcc= @@ -198,6 +197,7 @@ github.com/andreyvit/diff v0.0.0-20170406064948-c7f18ee00883/go.mod h1:rCTlJbsFo github.com/antihax/optional v0.0.0-20180407024304-ca021399b1a6/go.mod h1:V8iCPQYkqmusNa815XgQio277wI47sdRh1dUOLdyC6Q= github.com/antihax/optional v1.0.0/go.mod h1:uupD/76wgC+ih3iEmQUL+0Ugr19nfwCT1kdvxnR2qWY= github.com/aokoli/goutils v1.0.1/go.mod h1:SijmP0QR8LtwsmDs8Yii5Z/S4trXFGFC2oO5g9DP+DQ= +github.com/apache/arrow/go/arrow v0.0.0-20200601151325-b2287a20f230/go.mod h1:QNYViu/X0HXDHw7m3KXzWSVXIbfUvJqBFe6Gj8/pYA0= github.com/apache/arrow/go/arrow v0.0.0-20200923215132-ac86123a3f01/go.mod h1:QNYViu/X0HXDHw7m3KXzWSVXIbfUvJqBFe6Gj8/pYA0= github.com/apache/thrift v0.12.0/go.mod h1:cp2SuWMxlEZw2r+iP2GNCdIi4C1qmUzdZFSVb+bacwQ= github.com/apache/thrift v0.13.0/go.mod h1:cp2SuWMxlEZw2r+iP2GNCdIi4C1qmUzdZFSVb+bacwQ= @@ -236,8 +236,9 @@ github.com/aws/aws-sdk-go v1.38.3/go.mod h1:hcU610XS61/+aQV88ixoOzUoG7v3b31pl2zK github.com/aws/aws-sdk-go v1.38.35/go.mod h1:hcU610XS61/+aQV88ixoOzUoG7v3b31pl2zKMmprdro= github.com/aws/aws-sdk-go v1.38.68/go.mod h1:hcU610XS61/+aQV88ixoOzUoG7v3b31pl2zKMmprdro= github.com/aws/aws-sdk-go v1.40.11/go.mod h1:585smgzpB/KqRA+K3y/NL/oYRqQvpNJYvLm+LY1U59Q= -github.com/aws/aws-sdk-go v1.40.37 h1:I+Q6cLctkFyMMrKukcDnj+i2kjrQ37LGiOM6xmsxC48= github.com/aws/aws-sdk-go v1.40.37/go.mod h1:585smgzpB/KqRA+K3y/NL/oYRqQvpNJYvLm+LY1U59Q= +github.com/aws/aws-sdk-go v1.41.7 h1:vlpR8Cky3ZxUVNINgeRZS6N0p6zmFvu/ZqRRwrTI25U= +github.com/aws/aws-sdk-go v1.41.7/go.mod h1:585smgzpB/KqRA+K3y/NL/oYRqQvpNJYvLm+LY1U59Q= github.com/aws/aws-sdk-go-v2 v0.18.0/go.mod h1:JWVYvqSMppoMJC0x5wdwiImzgXTI9FuZwxzkQq9wy+g= github.com/aws/aws-sdk-go-v2 v1.7.0/go.mod h1:tb9wi5s61kTDA5qCkcDbt3KRVV74GGslQkl/DRdX/P4= github.com/aws/aws-sdk-go-v2/service/cloudwatch v1.5.0/go.mod h1:acH3+MQoiMzozT/ivU+DbRg7Ooo2298RdRaWcOv+4vM= @@ -259,6 +260,7 @@ github.com/beorn7/perks v1.0.1/go.mod h1:G2ZrVWU2WbWT9wwq4/hrbKbnv/1ERSJQ0ibhJ6r github.com/bgentry/speakeasy v0.1.0/go.mod h1:+zsyZBPWlz7T6j88CTgSN5bM796AkVf0kBD4zp0CCIs= github.com/bitly/go-hostpool v0.0.0-20171023180738-a3a6125de932/go.mod h1:NOuUCSz6Q9T7+igc/hlvDOUdtWKryOrtFyIVABv/p7k= github.com/bitly/go-simplejson v0.5.0/go.mod h1:cXHtHw4XUPsvGaxgjIAn8PhEWG9NfngEKAMDJEczWVA= +github.com/bits-and-blooms/bitset v1.2.0/go.mod h1:gIdJ4wp64HaoK2YrL1Q5/N7Y16edYb8uY+O0FJTyyDA= github.com/bketelsen/crypt v0.0.3-0.20200106085610-5cbc8cc4026c/go.mod h1:MKsuJmJgSg28kpZDP6UIiPt0e0Oz0kqKNGyRaWEPv84= github.com/blang/semver v3.1.0+incompatible/go.mod h1:kRBLl5iJ+tD4TcOOxsy/0fnwebNt5EWlYSAyrTnjyyk= github.com/blang/semver v3.5.0+incompatible/go.mod h1:kRBLl5iJ+tD4TcOOxsy/0fnwebNt5EWlYSAyrTnjyyk= @@ -268,7 +270,6 @@ github.com/blang/semver/v4 v4.0.0 h1:1PFHFE6yCCTv8C1TeyNNarDzntLi7wMI5i/pzqYIsAM github.com/blang/semver/v4 v4.0.0/go.mod h1:IbckMUScFkM3pff0VJDNKRiT6TG/YpiHIM2yvyW5YoQ= github.com/bmizerany/assert v0.0.0-20160611221934-b7ed37b82869/go.mod h1:Ekp36dRnpXw/yCqJaO+ZrUyxD+3VXMFFr56k5XYrpB4= github.com/bmizerany/pat v0.0.0-20170815010413-6226ea591a40/go.mod h1:8rLXio+WjiTceGBHIoTvn60HIbs7Hm7bcHjyrSqYB9c= -github.com/boltdb/bolt v1.3.1/go.mod h1:clJnj/oiGkjum5o1McbSZDSLxVThjynRyGBgiAx27Ps= github.com/bonitoo-io/go-sql-bigquery v0.3.4-1.4.0/go.mod h1:J4Y6YJm0qTWB9aFziB7cPeSyc6dOZFyJdteSeybVpXQ= github.com/bshuster-repo/logrus-logstash-hook v0.4.1/go.mod h1:zsTqEiSzDgAa/8GZR7E1qaXrhYNDKBYy5/dWPTIflbk= github.com/buger/jsonparser v0.0.0-20180808090653-f4dd9f5a6b44/go.mod h1:bbYlZJ7hK1yFx9hf58LP0zeX7UjIGs20ufpu3evjr+s= @@ -298,6 +299,7 @@ github.com/cespare/xxhash/v2 v2.1.1/go.mod h1:VGX0DQ3Q6kWi7AoAeZDth3/j3BFtOZR5XL github.com/cespare/xxhash/v2 v2.1.2 h1:YRXhKfTDauu4ajMg1TPgFO5jnlC2HCbmLXMcTG5cbYE= github.com/cespare/xxhash/v2 v2.1.2/go.mod h1:VGX0DQ3Q6kWi7AoAeZDth3/j3BFtOZR5XLFGgcrjCOs= github.com/checkpoint-restore/go-criu/v4 v4.1.0/go.mod h1:xUQBLp4RLc5zJtWY++yjOoMoB5lihDt7fai+75m+rGw= +github.com/checkpoint-restore/go-criu/v5 v5.0.0/go.mod h1:cfwC0EG7HMUenopBsUf9d89JlCLQIfgVcNsNN0t6T2M= github.com/chromedp/cdproto v0.0.0-20200116234248-4da64dd111ac/go.mod h1:PfAWWKJqjlGFYJEidUM6aVIWPr0EpobeyVWEEmplX7g= github.com/chromedp/cdproto v0.0.0-20200424080200-0de008e41fa0 h1:Mf2aT0YmWsdNULwaHeCktDLWHb1s+VoDi9xEcFboLQ4= github.com/chromedp/cdproto v0.0.0-20200424080200-0de008e41fa0/go.mod h1:PfAWWKJqjlGFYJEidUM6aVIWPr0EpobeyVWEEmplX7g= @@ -310,6 +312,7 @@ github.com/cilium/ebpf v0.0.0-20200110133405-4032b1d8aae3/go.mod h1:MA5e5Lr8slmE github.com/cilium/ebpf v0.0.0-20200702112145-1c8d4c9ef775/go.mod h1:7cR51M8ViRLIdUjrmSXlK9pkrsDlLHbO8jiB8X8JnOc= github.com/cilium/ebpf v0.2.0/go.mod h1:To2CFviqOWL/M0gIMsvSMlqe7em/l1ALkX1PyjrX2Qs= github.com/cilium/ebpf v0.4.0/go.mod h1:4tRaxcgiL706VnOzHOdBlY8IEAIdxINsQBcU4xJJXRs= +github.com/cilium/ebpf v0.6.2/go.mod h1:4tRaxcgiL706VnOzHOdBlY8IEAIdxINsQBcU4xJJXRs= github.com/circonus-labs/circonus-gometrics v2.3.1+incompatible/go.mod h1:nmEj6Dob7S7YxXgwXpfOuvO54S+tGdZdw9fuRZt25Ag= github.com/circonus-labs/circonusllhist v0.1.3/go.mod h1:kMXHVDlOchFAehlya5ePtbp5jckzBHf4XRpQvBOLI+I= github.com/clbanning/x2j v0.0.0-20191024224557-825249438eec/go.mod h1:jMjuTZXRI4dUb/I5gc9Hdhagfvm9+RyrPryS/auMzxE= @@ -361,8 +364,8 @@ github.com/containerd/containerd v1.5.0-beta.3/go.mod h1:/wr9AVtEM7x9c+n0+stptlo github.com/containerd/containerd v1.5.0-beta.4/go.mod h1:GmdgZd2zA2GYIBZ0w09ZvgqEq8EfBp/m3lcVZIvPHhI= github.com/containerd/containerd v1.5.0-rc.0/go.mod h1:V/IXoMqNGgBlabz3tHD2TWDoTJseu1FGOKuoA4nNb2s= github.com/containerd/containerd v1.5.1/go.mod h1:0DOxVqwDy2iZvrZp2JUx/E+hS0UNTVn7dJnIOwtYR4g= -github.com/containerd/containerd v1.5.4 h1:uPF0og3ByFzDnaStfiQj3fVGTEtaSNyU+bW7GR/nqGA= -github.com/containerd/containerd v1.5.4/go.mod h1:sx18RgvW6ABJ4iYUw7Q5x7bgFOAB9B6G7+yO0XBc4zw= +github.com/containerd/containerd v1.5.7 h1:rQyoYtj4KddB3bxG6SAqd4+08gePNyJjRqvOIfV3rkM= +github.com/containerd/containerd v1.5.7/go.mod h1:gyvv6+ugqY25TiXxcZC3L5yOeYgEw0QMhscqVp1AR9c= github.com/containerd/continuity v0.0.0-20190426062206-aaeac12a7ffc/go.mod h1:GL3xCUCBDV3CZiTSEKksMWbLE66hEyuu9qyDOOqM47Y= github.com/containerd/continuity v0.0.0-20190815185530-f2a389ac0a02/go.mod h1:GL3xCUCBDV3CZiTSEKksMWbLE66hEyuu9qyDOOqM47Y= github.com/containerd/continuity v0.0.0-20191127005431-f65d91d395eb/go.mod h1:GL3xCUCBDV3CZiTSEKksMWbLE66hEyuu9qyDOOqM47Y= @@ -484,15 +487,14 @@ github.com/dennwc/varint v1.0.0/go.mod h1:hnItb35rvZvJrbTALZtY/iQfDs48JKRG1RPpgz github.com/denverdino/aliyungo v0.0.0-20190125010748-a747050bb1ba/go.mod h1:dV8lFg6daOBZbT6/BDGIz6Y3WFGn8juu6G+CQ6LHtl0= github.com/dgrijalva/jwt-go v0.0.0-20170104182250-a601269ab70c/go.mod h1:E3ru+11k8xSBh+hMPgOLZmtrrCbhqsmaPHjLKYnJCaQ= github.com/dgrijalva/jwt-go v3.2.0+incompatible/go.mod h1:E3ru+11k8xSBh+hMPgOLZmtrrCbhqsmaPHjLKYnJCaQ= -github.com/dgrijalva/jwt-go/v4 v4.0.0-preview1/go.mod h1:+hnT3ywWDTAFrW5aE+u2Sa/wT555ZqwoCS+pk3p6ry4= github.com/dgryski/go-bitstream v0.0.0-20180413035011-3522498ce2c8/go.mod h1:VMaSuZ+SZcx/wljOQKvp5srsbCiKDEb6K2wC4+PiBmQ= github.com/dgryski/go-rendezvous v0.0.0-20200823014737-9f7001d12a5f h1:lO4WD4F/rVNCu3HqELle0jiPLLBs70cWOduZpkS1E78= github.com/dgryski/go-rendezvous v0.0.0-20200823014737-9f7001d12a5f/go.mod h1:cuUVRXasLTGF7a8hSLbxyZXjz+1KgoB3wDUb6vlszIc= github.com/dgryski/go-sip13 v0.0.0-20181026042036-e10d5fee7954/go.mod h1:vAd38F8PWV+bWy6jNmig1y/TA+kYO4g3RSRF0IAv0no= github.com/dgryski/go-sip13 v0.0.0-20200911182023-62edffca9245/go.mod h1:vAd38F8PWV+bWy6jNmig1y/TA+kYO4g3RSRF0IAv0no= github.com/dhui/dktest v0.3.0/go.mod h1:cyzIUfGsBEbZ6BT7tnXqAShHSXCZhSNmFl70sZ7c1yc= -github.com/digitalocean/godo v1.65.0 h1:3SywGJBC18HaYtPQF+T36jYzXBi+a6eIMonSjDll7TA= -github.com/digitalocean/godo v1.65.0/go.mod h1:p7dOjjtSBqCTUksqtA5Fd3uaKs9kyTq2xcz76ulEJRU= +github.com/digitalocean/godo v1.69.1 h1:aCyfwth8R3DeOaWB9J9E8v7cjlDIlF19eXTt8R3XhTE= +github.com/digitalocean/godo v1.69.1/go.mod h1:epPuOzTOOJujNo0nduDj2D5O1zu8cSpp9R+DdN0W9I0= github.com/dimchansky/utfbom v1.1.0/go.mod h1:rO41eb7gLfo8SF1jd9F8HplJm1Fewwi4mQvIirEdv+8= github.com/dimchansky/utfbom v1.1.1 h1:vV6w1AhK4VMnhBno/TPVCoK9U/LP0PkLCS9tbxHdi/U= github.com/dimchansky/utfbom v1.1.1/go.mod h1:SxdoEBH5qIqFocHMyGOXVAybYJdr71b1Q/j0mACtrfE= @@ -505,8 +507,8 @@ github.com/docker/distribution v2.7.1+incompatible h1:a5mlkVzth6W5A4fOsS3D2EO5BU github.com/docker/distribution v2.7.1+incompatible/go.mod h1:J2gT2udsDAN96Uj4KfcMRqY0/ypR+oyYUYmja8H+y+w= github.com/docker/docker v0.7.3-0.20190103212154-2b7e084dc98b/go.mod h1:eEKB0N0r5NX/I1kEveEz05bcu8tLC/8azJZsviup8Sk= github.com/docker/docker v0.7.3-0.20190817195342-4760db040282/go.mod h1:eEKB0N0r5NX/I1kEveEz05bcu8tLC/8azJZsviup8Sk= -github.com/docker/docker v20.10.8+incompatible h1:RVqD337BgQicVCzYrrlhLDWhq6OAD2PJDUg2LsEUvKM= -github.com/docker/docker v20.10.8+incompatible/go.mod h1:eEKB0N0r5NX/I1kEveEz05bcu8tLC/8azJZsviup8Sk= +github.com/docker/docker v20.10.9+incompatible h1:JlsVnETOjM2RLQa0Cc1XCIspUdXW3Zenq9P54uXBm6k= +github.com/docker/docker v20.10.9+incompatible/go.mod h1:eEKB0N0r5NX/I1kEveEz05bcu8tLC/8azJZsviup8Sk= github.com/docker/go-connections v0.4.0 h1:El9xVISelRB7BuFusrZozjnkIM5YnzCViNKohAFqRJQ= github.com/docker/go-connections v0.4.0/go.mod h1:Gbd7IOopHjR8Iph03tsViu4nIes5XhDvyHbTtUxmeec= github.com/docker/go-events v0.0.0-20170721190031-9461782956ad/go.mod h1:Uw6UezgYA44ePAFQYUehOuCzmy5zmg/+nl2ZfMWGkpA= @@ -603,8 +605,9 @@ github.com/go-kit/kit v0.9.0/go.mod h1:xBxKIO96dXMWWy0MnWVtmwkA9/13aqxPnvrjFYMA2 github.com/go-kit/kit v0.10.0/go.mod h1:xUsJbQ/Fp4kEt7AFgCuvyX4a71u8h9jB8tj/ORgOZ7o= github.com/go-kit/kit v0.11.0 h1:IGmIEl7aHTYh6E2HlT+ptILBotjo4xl8PMDl852etiI= github.com/go-kit/kit v0.11.0/go.mod h1:73/6Ixaufkvb5Osvkls8C79vuQ49Ba1rUEUYNSf+FUw= -github.com/go-kit/log v0.1.0 h1:DGJh0Sm43HbOeYDNnVZFl8BvcYVvjD5bqYJvp0REbwQ= github.com/go-kit/log v0.1.0/go.mod h1:zbhenjAZHb184qTLMA9ZjW7ThYL0H2mk7Q6pNt4vbaY= +github.com/go-kit/log v0.2.0 h1:7i2K3eKTos3Vc0enKCfnVcgHh2olr/MyfboYq7cAcFw= +github.com/go-kit/log v0.2.0/go.mod h1:NwTd00d/i8cPZ3xOwwiv2PO5MOcx78fFErGNcVmBjv0= github.com/go-logfmt/logfmt v0.3.0/go.mod h1:Qt1PoO58o5twSAckw1HlFXLmHsOX5/0LbT9GBnD5lWE= github.com/go-logfmt/logfmt v0.4.0/go.mod h1:3RMwSq7FuexP4Kalkev3ejPJsZTpXXBr9+V4qmtdjCk= github.com/go-logfmt/logfmt v0.5.0/go.mod h1:wCYkCAKZfumFQihp8CzCvQ3paCTfi41vtzG1KdI/P7A= @@ -690,8 +693,9 @@ github.com/go-openapi/strfmt v0.19.5/go.mod h1:eftuHTlB/dI8Uq8JJOyRlieZf+WkkxUuk github.com/go-openapi/strfmt v0.19.11/go.mod h1:UukAYgTaQfqJuAFlNxxMWNvMYiwiXtLsF2VwmoFtbtc= github.com/go-openapi/strfmt v0.20.0/go.mod h1:UukAYgTaQfqJuAFlNxxMWNvMYiwiXtLsF2VwmoFtbtc= github.com/go-openapi/strfmt v0.20.1/go.mod h1:43urheQI9dNtE5lTZQfuFJvjYJKPrxicATpEfZwHUNk= -github.com/go-openapi/strfmt v0.20.2 h1:6XZL+fF4VZYFxKQGLAUB358hOrRh/wS51uWEtlONADE= github.com/go-openapi/strfmt v0.20.2/go.mod h1:43urheQI9dNtE5lTZQfuFJvjYJKPrxicATpEfZwHUNk= +github.com/go-openapi/strfmt v0.20.3 h1:YVG4ZgPZ00km/lRHrIf7c6cKL5/4FAUtG2T9RxWAgDY= +github.com/go-openapi/strfmt v0.20.3/go.mod h1:43urheQI9dNtE5lTZQfuFJvjYJKPrxicATpEfZwHUNk= github.com/go-openapi/swag v0.0.0-20160704191624-1d0bd113de87/go.mod h1:DXUve3Dpr1UfpPtxFw+EFuQ41HhCWZfha5jSVRG7C7I= github.com/go-openapi/swag v0.17.0/go.mod h1:AByQ+nYG6gQg71GINrmuDXCPWdL640yX49/kXLo40Tg= github.com/go-openapi/swag v0.18.0/go.mod h1:AByQ+nYG6gQg71GINrmuDXCPWdL640yX49/kXLo40Tg= @@ -792,6 +796,8 @@ github.com/gogo/protobuf v1.3.2/go.mod h1:P1XiOD3dCwIKUDQYPy72D8LYyHL2YPYrpS2s69 github.com/gogo/status v1.0.3/go.mod h1:SavQ51ycCLnc7dGyJxp8YAmudx8xqiVrRf+6IXRsugc= github.com/gogo/status v1.1.0 h1:+eIkrewn5q6b30y+g/BJINVVdi2xH7je5MPJ3ZPK3JA= github.com/gogo/status v1.1.0/go.mod h1:BFv9nrluPLmrS0EmGVvLaPNmRosr9KapBYd5/hpY1WM= +github.com/golang-jwt/jwt v3.2.1+incompatible h1:73Z+4BJcrTC+KczS6WvTPvRGOp1WmfEP4Q1lOd9Z/+c= +github.com/golang-jwt/jwt v3.2.1+incompatible/go.mod h1:8pz2t5EyA70fFQQSrl6XZXzqecmYZeUEB8OUGHkxJ+I= github.com/golang-jwt/jwt/v4 v4.0.0 h1:RAqyYixv1p7uEnocuy8P1nru5wprCh/MH2BIlW5z5/o= github.com/golang-jwt/jwt/v4 v4.0.0/go.mod h1:/xlHOz8bRuivTWchD4jCa+NbatV+wEUSzwAxVc6locg= github.com/golang-migrate/migrate/v4 v4.7.0/go.mod h1:Qvut3N4xKWjoH3sokBccML6WyHSnggXm/DvMMnTsQIc= @@ -895,8 +901,8 @@ github.com/google/pprof v0.0.0-20210226084205-cbba55b83ad5/go.mod h1:kpwsk12EmLe github.com/google/pprof v0.0.0-20210601050228-01bbb1931b22/go.mod h1:kpwsk12EmLew5upagYY7GY0pfYCcupk39gWOCRROcvE= github.com/google/pprof v0.0.0-20210609004039-a478d1d731e9/go.mod h1:kpwsk12EmLew5upagYY7GY0pfYCcupk39gWOCRROcvE= github.com/google/pprof v0.0.0-20210720184732-4bb14d4b1be1/go.mod h1:kpwsk12EmLew5upagYY7GY0pfYCcupk39gWOCRROcvE= -github.com/google/pprof v0.0.0-20210827144239-02619b876842 h1:JCrt5MIE1fHQtdy1825HwJ45oVQaqHE6lgssRhjcg/o= -github.com/google/pprof v0.0.0-20210827144239-02619b876842/go.mod h1:kpwsk12EmLew5upagYY7GY0pfYCcupk39gWOCRROcvE= +github.com/google/pprof v0.0.0-20211008130755-947d60d73cc0 h1:zHs+jv3LO743/zFGcByu2KmpbliCU2AhjcGgrdTwSG4= +github.com/google/pprof v0.0.0-20211008130755-947d60d73cc0/go.mod h1:KgnwoLYCZ8IQu3XUZ8Nc/bM9CCZFOyjUNOSygVozoDg= github.com/google/renameio v0.1.0/go.mod h1:KWCgfxg9yswjAJkECMjeO8J8rahYeXnNhOm40UhjYkI= github.com/google/uuid v1.0.0/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= github.com/google/uuid v1.1.1/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= @@ -907,8 +913,9 @@ github.com/googleapis/gax-go v2.0.2+incompatible h1:silFMLAnr330+NRuag/VjIGF7TLp github.com/googleapis/gax-go v2.0.2+incompatible/go.mod h1:SFVmujtThgffbyetf+mdk2eWhX2bMyUtNHzFKcPA9HY= github.com/googleapis/gax-go/v2 v2.0.4/go.mod h1:0Wqv26UfaUD9n4G6kQubkQ+KchISgw+vpHVxEJEs9eg= github.com/googleapis/gax-go/v2 v2.0.5/go.mod h1:DWXyrwAJ9X0FpwwEdw+IPEYBICEFu5mhpdKc/us6bOk= -github.com/googleapis/gax-go/v2 v2.1.0 h1:6DWmvNpomjL1+3liNSZbVns3zsYzzCjm6pRBO1tLeso= github.com/googleapis/gax-go/v2 v2.1.0/go.mod h1:Q3nei7sK6ybPYH7twZdmQpAd1MKb7pfu6SK+H1/DsU0= +github.com/googleapis/gax-go/v2 v2.1.1 h1:dp3bWCh+PPO1zjRRiCSczJav13sBvG4UhNyVTa1KqdU= +github.com/googleapis/gax-go/v2 v2.1.1/go.mod h1:hddJymUZASv3XPyGkUpKj8pPO47Rmb0eJc8R6ouapiM= github.com/googleapis/gnostic v0.0.0-20170729233727-0c5108395e2d/go.mod h1:sJBsCZ4ayReDTBIg8b9dl28c5xFWyhBTVRp3pOg5EKY= github.com/googleapis/gnostic v0.3.1/go.mod h1:on+2t9HRStVgn95RSsFWFz+6Q0Snyqv1awfrALZdbtU= github.com/googleapis/gnostic v0.4.1/go.mod h1:LRhVm6pbyptWbWbuZ38d1eyptfvIytN3ir6b65WBswg= @@ -918,8 +925,8 @@ github.com/googleapis/gnostic v0.5.5/go.mod h1:7+EbHbldMins07ALC74bsA81Ovc97Dwqy github.com/gophercloud/gophercloud v0.6.0/go.mod h1:GICNByuaEBibcjmjvI7QvYJSZEbGkcYwAR7EZK2WMqM= github.com/gophercloud/gophercloud v0.12.0/go.mod h1:gmC5oQqMDOMO1t1gq5DquX/yAU808e/4mzjjDA76+Ss= github.com/gophercloud/gophercloud v0.13.0/go.mod h1:VX0Ibx85B60B5XOrZr6kaNwrmPUzcmMpwxvQ1WQIIWM= -github.com/gophercloud/gophercloud v0.20.0 h1:1+4jrsjVhdX5omlAo4jkmFc6ftLbuXLzgFo4i6lH+Gk= -github.com/gophercloud/gophercloud v0.20.0/go.mod h1:wRtmUelyIIv3CSSDI47aUwbs075O6i+LY+pXsKCBsb4= +github.com/gophercloud/gophercloud v0.22.0 h1:9lFISNLafZcecT0xUveIMt3IafexC6DIV9ek1SZdSMw= +github.com/gophercloud/gophercloud v0.22.0/go.mod h1:wRtmUelyIIv3CSSDI47aUwbs075O6i+LY+pXsKCBsb4= github.com/gopherjs/gopherjs v0.0.0-20181017120253-0766667cb4d1/go.mod h1:wJfORRmW1u3UXTncJ5qlYoELFm8eSnnEO6hX4iZ3EWY= github.com/gopherjs/gopherjs v0.0.0-20191106031601-ce3c9ade29de h1:F7WD09S8QB4LrkEpka0dFPLSotH11HRpCsLIbIcJ7sU= github.com/gopherjs/gopherjs v0.0.0-20191106031601-ce3c9ade29de/go.mod h1:wJfORRmW1u3UXTncJ5qlYoELFm8eSnnEO6hX4iZ3EWY= @@ -966,8 +973,8 @@ github.com/hashicorp/consul/api v1.5.0/go.mod h1:LqwrLNW876eYSuUOo4ZLHBcdKc038tx github.com/hashicorp/consul/api v1.7.0/go.mod h1:1NSuaUUkFaJzMasbfq/11wKYWSR67Xn6r2DXKhuDNFg= github.com/hashicorp/consul/api v1.8.1/go.mod h1:sDjTOq0yUyv5G4h+BqSea7Fn6BU+XbolEz1952UB+mk= github.com/hashicorp/consul/api v1.9.1/go.mod h1:XjsvQN+RJGWI2TWy1/kqaE16HrR2J/FWgkYjdZQsX9M= -github.com/hashicorp/consul/api v1.10.1 h1:MwZJp86nlnL+6+W1Zly4JUuVn9YHhMggBirMpHGD7kw= -github.com/hashicorp/consul/api v1.10.1/go.mod h1:XjsvQN+RJGWI2TWy1/kqaE16HrR2J/FWgkYjdZQsX9M= +github.com/hashicorp/consul/api v1.11.0 h1:Hw/G8TtRvOElqxVIhBzXciiSTbapq8hZ2XKZsXk5ZCE= +github.com/hashicorp/consul/api v1.11.0/go.mod h1:XjsvQN+RJGWI2TWy1/kqaE16HrR2J/FWgkYjdZQsX9M= github.com/hashicorp/consul/sdk v0.1.1/go.mod h1:VKf9jXwCTEY1QZP2MOLRhb5i/I/ssyNV1vwHyQBF0x8= github.com/hashicorp/consul/sdk v0.3.0/go.mod h1:VKf9jXwCTEY1QZP2MOLRhb5i/I/ssyNV1vwHyQBF0x8= github.com/hashicorp/consul/sdk v0.5.0/go.mod h1:fY08Y9z5SvJqevyZNy6WWPXiG3KwBPAvlcdx16zZ0fM= @@ -1039,22 +1046,24 @@ github.com/hudl/fargo v1.3.0/go.mod h1:y3CKSmjA+wD2gak7sUSXTAoopbhU08POFhmITJgmK github.com/iancoleman/strcase v0.0.0-20180726023541-3605ed457bf7/go.mod h1:SK73tn/9oHe+/Y0h39VT4UCxmurVJkR5NA7kMEAOgSE= github.com/ianlancetaylor/demangle v0.0.0-20181102032728-5e5cf60278f6/go.mod h1:aSSvb/t6k1mPoxDqO4vJh6VOCGPwU4O0C2/Eqndh1Sc= github.com/ianlancetaylor/demangle v0.0.0-20200824232613-28f6c0f3b639/go.mod h1:aSSvb/t6k1mPoxDqO4vJh6VOCGPwU4O0C2/Eqndh1Sc= +github.com/ianlancetaylor/demangle v0.0.0-20210905161508-09a460cdf81d/go.mod h1:aYm2/VgdVmcIU8iMfdMvDMsRAQjcfZSKFby6HOFvi/w= github.com/imdario/mergo v0.3.4/go.mod h1:2EnlNZ0deacrJVfApfmtdGgDfMuh/nq6Ok1EcJh5FfA= github.com/imdario/mergo v0.3.5/go.mod h1:2EnlNZ0deacrJVfApfmtdGgDfMuh/nq6Ok1EcJh5FfA= github.com/imdario/mergo v0.3.8/go.mod h1:2EnlNZ0deacrJVfApfmtdGgDfMuh/nq6Ok1EcJh5FfA= github.com/imdario/mergo v0.3.10/go.mod h1:jmQim1M+e3UYxmgPu/WyfjB3N3VflVyUjjjwH0dnCYA= -github.com/imdario/mergo v0.3.11 h1:3tnifQM4i+fbajXKBHXWEH+KvNHqojZ778UH75j3bGA= github.com/imdario/mergo v0.3.11/go.mod h1:jmQim1M+e3UYxmgPu/WyfjB3N3VflVyUjjjwH0dnCYA= +github.com/imdario/mergo v0.3.12 h1:b6R2BslTbIEToALKP7LxUvijTsNI9TAe80pLWN2g/HU= +github.com/imdario/mergo v0.3.12/go.mod h1:jmQim1M+e3UYxmgPu/WyfjB3N3VflVyUjjjwH0dnCYA= github.com/inconshreveable/mousetrap v1.0.0/go.mod h1:PxqpIevigyE2G7u3NXJIT2ANytuPF1OarO4DADm73n8= -github.com/influxdata/flux v0.120.1/go.mod h1:pGSAvyAA5d3et7SSzajaYShWYXmnRnJJq2qWi+WWZ2I= +github.com/influxdata/flux v0.131.0/go.mod h1:CKvnYe6FHpTj/E0YGI7TcOZdGiYHoToOPSnoa12RtKI= github.com/influxdata/httprouter v1.3.1-0.20191122104820-ee83e2772f69/go.mod h1:pwymjR6SrP3gD3pRj9RJwdl1j5s3doEEV8gS4X9qSzA= -github.com/influxdata/influxdb v1.9.3/go.mod h1:xD4ZjAgEJQO9/bX3NhFrssKtdNPi+ki1kjrttJRDhGc= +github.com/influxdata/influxdb v1.9.5/go.mod h1:4uPVvcry9KWQVWLxyT9641qpkRXUBN+xa0MJFFNNLKo= github.com/influxdata/influxdb-client-go/v2 v2.3.1-0.20210518120617-5d1fff431040/go.mod h1:vLNHdxTJkIf2mSLvGrpj8TCcISApPoXkaxP8g9uRlW8= github.com/influxdata/influxdb1-client v0.0.0-20191209144304-8bf82d3c094d/go.mod h1:qj24IKcXYK6Iy9ceXlo3Tc+vtHo9lIhSX5JddghvEPo= github.com/influxdata/influxdb1-client v0.0.0-20200827194710-b269163b24ab/go.mod h1:qj24IKcXYK6Iy9ceXlo3Tc+vtHo9lIhSX5JddghvEPo= github.com/influxdata/influxql v1.1.1-0.20210223160523-b6ab99450c93/go.mod h1:gHp9y86a/pxhjJ+zMjNXiQAA197Xk9wLxaz+fGG+kWk= github.com/influxdata/line-protocol v0.0.0-20200327222509-2487e7298839/go.mod h1:xaLFMmpvUxqXtVkUJfg9QmT88cDaCJ3ZKgdZ78oO8Qo= -github.com/influxdata/pkg-config v0.2.7/go.mod h1:EMS7Ll0S4qkzDk53XS3Z72/egBsPInt+BeRxb0WeSwk= +github.com/influxdata/pkg-config v0.2.8/go.mod h1:EMS7Ll0S4qkzDk53XS3Z72/egBsPInt+BeRxb0WeSwk= github.com/influxdata/promql/v2 v2.12.0/go.mod h1:fxOPu+DY0bqCTCECchSRtWfc+0X19ybifQhZoQNF5D8= github.com/influxdata/roaring v0.4.13-0.20180809181101-fc520f41fab6/go.mod h1:bSgUQ7q5ZLSO+bKBGqJiCBGAl+9DxyW63zLTujjUlOE= github.com/influxdata/tdigest v0.0.2-0.20210216194612-fc98d27c9e8b/go.mod h1:Z0kXnxzbTC2qrx4NaIzYkE1k66+6oEDQTvL95hQFh5Y= @@ -1093,8 +1102,9 @@ github.com/json-iterator/go v1.1.7/go.mod h1:KdQUCv79m/52Kvf8AW2vK1V8akMuk1QjK/u github.com/json-iterator/go v1.1.8/go.mod h1:KdQUCv79m/52Kvf8AW2vK1V8akMuk1QjK/uOdHXbAo4= github.com/json-iterator/go v1.1.9/go.mod h1:KdQUCv79m/52Kvf8AW2vK1V8akMuk1QjK/uOdHXbAo4= github.com/json-iterator/go v1.1.10/go.mod h1:KdQUCv79m/52Kvf8AW2vK1V8akMuk1QjK/uOdHXbAo4= -github.com/json-iterator/go v1.1.11 h1:uVUAXhF2To8cbw/3xN3pxj6kk7TYKs98NIrTqPlMWAQ= github.com/json-iterator/go v1.1.11/go.mod h1:KdQUCv79m/52Kvf8AW2vK1V8akMuk1QjK/uOdHXbAo4= +github.com/json-iterator/go v1.1.12 h1:PV8peI4a0ysnczrg+LtxykD8LfKY9ML6u2jnxaEnrnM= +github.com/json-iterator/go v1.1.12/go.mod h1:e30LSqwooZae/UwlEbR2852Gd8hjQvJoHmT4TnhNGBo= github.com/jstemmer/go-junit-report v0.0.0-20190106144839-af01ea7f8024/go.mod h1:6v2b51hI/fHJwM22ozAgKL4VKDeJcHhJFhtBdhmNjmU= github.com/jstemmer/go-junit-report v0.9.1/go.mod h1:Brl9GWCQeLvo8nXZwPNNblvFj/XSXhF0NWZEnDohbsk= github.com/jsternberg/zap-logfmt v1.2.0/go.mod h1:kz+1CUmCutPWABnNkOu9hOHKdT2q3TDYCcsFy9hpqb0= @@ -1124,6 +1134,8 @@ github.com/klauspost/cpuid v1.3.1/go.mod h1:bYW4mA6ZgKPob1/Dlai2LviZJO7KGI3uoWLd github.com/klauspost/pgzip v1.0.2-0.20170402124221-0bf5dcad4ada/go.mod h1:Ch1tH69qFZu15pkjo5kYi6mth2Zzwzt50oCQKQE9RUs= github.com/knq/sysutil v0.0.0-20191005231841-15668db23d08 h1:V0an7KRw92wmJysvFvtqtKMAPmvS5O0jtB0nYo6t+gs= github.com/knq/sysutil v0.0.0-20191005231841-15668db23d08/go.mod h1:dFWs1zEqDjFtnBXsd1vPOZaLsESovai349994nHx3e0= +github.com/kolo/xmlrpc v0.0.0-20201022064351-38db28db192b h1:iNjcivnc6lhbvJA3LD622NPrUponluJrBWPIwGG/3Bg= +github.com/kolo/xmlrpc v0.0.0-20201022064351-38db28db192b/go.mod h1:pcaDhQK0/NJZEvtCO0qQPPropqV0sJOJ6YW7X+9kRwM= github.com/konsorten/go-windows-terminal-sequences v1.0.1/go.mod h1:T0+1ngSBFLxvqU3pZ+m/2kptfBszLMUkC4ZK/EgS/cQ= github.com/konsorten/go-windows-terminal-sequences v1.0.2/go.mod h1:T0+1ngSBFLxvqU3pZ+m/2kptfBszLMUkC4ZK/EgS/cQ= github.com/konsorten/go-windows-terminal-sequences v1.0.3/go.mod h1:T0+1ngSBFLxvqU3pZ+m/2kptfBszLMUkC4ZK/EgS/cQ= @@ -1158,8 +1170,8 @@ github.com/lightstep/lightstep-tracer-common/golang/gogo v0.0.0-20190605223551-b github.com/lightstep/lightstep-tracer-go v0.18.0/go.mod h1:jlF1pusYV4pidLvZ+XD0UBX0ZE6WURAspgAczcDHrL4= github.com/lightstep/lightstep-tracer-go v0.18.1 h1:vi1F1IQ8N7hNWytK9DpJsUfQhGuNSc19z330K6vl4zk= github.com/lightstep/lightstep-tracer-go v0.18.1/go.mod h1:jlF1pusYV4pidLvZ+XD0UBX0ZE6WURAspgAczcDHrL4= -github.com/linode/linodego v0.32.0 h1:IK04cx2b/IwAAd6XLruf1Dl/n3dRXj87Uw/5qo6afVU= -github.com/linode/linodego v0.32.0/go.mod h1:BR0gVkCJffEdIGJSl6bHR80Ty+Uvg/2jkjmrWaFectM= +github.com/linode/linodego v1.1.0 h1:ZiFVUptlzuExtUbHZtXiN7I0dAOFQAyirBKb/6/n9n4= +github.com/linode/linodego v1.1.0/go.mod h1:x/7+BoaKd4unViBmS2umdjYyVAmpFtBtEXZ0wou7FYQ= github.com/lovoo/gcloud-opentracing v0.3.0 h1:nAeKG70rIsog0TelcEtt6KU0Y1s5qXtsDLnHp0urPLU= github.com/lovoo/gcloud-opentracing v0.3.0/go.mod h1:ZFqk2y38kMDDikZPAK7ynTTGuyt17nSPdS3K5e+ZTBY= github.com/lufia/iostat v1.1.0/go.mod h1:rEPNA0xXgjHQjuI5Cy05sLlS2oRcSlWHRLrvh/AQ+Pg= @@ -1264,15 +1276,16 @@ github.com/moby/sys/mountinfo v0.4.0/go.mod h1:rEr8tzG/lsIZHBtN/JjGG+LMYx9eXgW2J github.com/moby/sys/mountinfo v0.4.1/go.mod h1:rEr8tzG/lsIZHBtN/JjGG+LMYx9eXgW2JI+6q0qou+A= github.com/moby/sys/symlink v0.1.0/go.mod h1:GGDODQmbFOjFsXvfLVn3+ZRxkch54RkSiGqsZeMYowQ= github.com/moby/term v0.0.0-20200312100748-672ec06f55cd/go.mod h1:DdlQx2hp0Ss5/fLikoLlEeIYiATotOjgB//nb973jeo= -github.com/moby/term v0.0.0-20201216013528-df9cb8a40635 h1:rzf0wL0CHVc8CEsgyygG0Mn9CNCCPZqOPaz8RiiHYQk= -github.com/moby/term v0.0.0-20201216013528-df9cb8a40635/go.mod h1:FBS0z0QWA44HXygs7VXDUOGoN/1TV3RuWkLO04am3wc= +github.com/moby/term v0.0.0-20210619224110-3f7ff695adc6 h1:dcztxKSvZ4Id8iPpHERQBbIJfabdt4wUm5qy3wOL2Zc= +github.com/moby/term v0.0.0-20210619224110-3f7ff695adc6/go.mod h1:E2VnQOmVuvZB6UYnnDB0qG5Nq/1tD9acaOpo6xmt0Kw= github.com/modern-go/concurrent v0.0.0-20180228061459-e0a39a4cb421/go.mod h1:6dJC0mAP4ikYIbvyc7fijjWJddQyLn8Ig3JB5CqoB9Q= github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd h1:TRLaZ9cD/w8PVh93nsPXa1VrQ6jlwL5oN8l14QlcNfg= github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd/go.mod h1:6dJC0mAP4ikYIbvyc7fijjWJddQyLn8Ig3JB5CqoB9Q= github.com/modern-go/reflect2 v0.0.0-20180320133207-05fbef0ca5da/go.mod h1:bx2lNnkwVCuqBIxFjflWJWanXIb3RllmbCylyMrvgv0= github.com/modern-go/reflect2 v0.0.0-20180701023420-4b7aa43c6742/go.mod h1:bx2lNnkwVCuqBIxFjflWJWanXIb3RllmbCylyMrvgv0= -github.com/modern-go/reflect2 v1.0.1 h1:9f412s+6RmYXLWZSEzVVgPGK7C2PphHj5RJrvfx9AWI= github.com/modern-go/reflect2 v1.0.1/go.mod h1:bx2lNnkwVCuqBIxFjflWJWanXIb3RllmbCylyMrvgv0= +github.com/modern-go/reflect2 v1.0.2 h1:xBagoLtFs94CBntxluKeaWgTMpvLxC4ur3nMaC9Gz0M= +github.com/modern-go/reflect2 v1.0.2/go.mod h1:yWuevngMOJpCy52FWWMvUC8ws7m/LJsjYzDa0/r8luk= github.com/montanaflynn/stats v0.0.0-20171201202039-1bf9dbcd8cbe/go.mod h1:wL8QJuTMNUDYhXwkmfOly8iTdp5TEcJFWZD2D7SIkUc= github.com/morikuni/aec v0.0.0-20170113033406-39771216ff4c/go.mod h1:BbKIizmSmc5MMPqRYbxO4ZU0S0+P200+tUnFx7PXmsc= github.com/morikuni/aec v1.0.0 h1:nP9CBfwrvYnBRgY6qfDQkygYDmYwOilePFkwzv4dU8A= @@ -1358,15 +1371,18 @@ github.com/opencontainers/runc v0.1.1/go.mod h1:qT5XzbpPznkRYVz/mWwUaVBUv2rmF59P github.com/opencontainers/runc v1.0.0-rc8.0.20190926000215-3e425f80a8c9/go.mod h1:qT5XzbpPznkRYVz/mWwUaVBUv2rmF59PVA73FjuZG0U= github.com/opencontainers/runc v1.0.0-rc9/go.mod h1:qT5XzbpPznkRYVz/mWwUaVBUv2rmF59PVA73FjuZG0U= github.com/opencontainers/runc v1.0.0-rc93/go.mod h1:3NOsor4w32B2tC0Zbl8Knk4Wg84SM2ImC1fxBuqJ/H0= +github.com/opencontainers/runc v1.0.2/go.mod h1:aTaHFFwQXuA71CiyxOdFFIorAoemI04suvGRQFzWTD0= github.com/opencontainers/runtime-spec v0.1.2-0.20190507144316-5b71a03e2700/go.mod h1:jwyrGlmzljRJv/Fgzds9SsS/C5hL+LL3ko9hs6T5lQ0= github.com/opencontainers/runtime-spec v1.0.1/go.mod h1:jwyrGlmzljRJv/Fgzds9SsS/C5hL+LL3ko9hs6T5lQ0= github.com/opencontainers/runtime-spec v1.0.2-0.20190207185410-29686dbc5559/go.mod h1:jwyrGlmzljRJv/Fgzds9SsS/C5hL+LL3ko9hs6T5lQ0= github.com/opencontainers/runtime-spec v1.0.2/go.mod h1:jwyrGlmzljRJv/Fgzds9SsS/C5hL+LL3ko9hs6T5lQ0= -github.com/opencontainers/runtime-spec v1.0.3-0.20200929063507-e6143ca7d51d h1:pNa8metDkwZjb9g4T8s+krQ+HRgZAkqnXml+wNir/+s= github.com/opencontainers/runtime-spec v1.0.3-0.20200929063507-e6143ca7d51d/go.mod h1:jwyrGlmzljRJv/Fgzds9SsS/C5hL+LL3ko9hs6T5lQ0= +github.com/opencontainers/runtime-spec v1.0.3-0.20210326190908-1c3f411f0417 h1:3snG66yBm59tKhhSPQrQ/0bCrv1LQbKt40LnUPiUxdc= +github.com/opencontainers/runtime-spec v1.0.3-0.20210326190908-1c3f411f0417/go.mod h1:jwyrGlmzljRJv/Fgzds9SsS/C5hL+LL3ko9hs6T5lQ0= github.com/opencontainers/runtime-tools v0.0.0-20181011054405-1d69bd0f9c39/go.mod h1:r3f7wjNzSs2extwzU3Y+6pKfobzPh+kKFJ3ofN+3nfs= github.com/opencontainers/selinux v1.6.0/go.mod h1:VVGKuOLlE7v4PJyT6h7mNWvq1rzqiriPsEqVhc+svHE= github.com/opencontainers/selinux v1.8.0/go.mod h1:RScLhm78qiWa2gbVCcGkC7tCGdgk3ogry1nUQF8Evvo= +github.com/opencontainers/selinux v1.8.2/go.mod h1:MUIHuUEvKB1wtJjQdOyYRgOnLD2xAPP8dBsCoU0KuF8= github.com/opentracing-contrib/go-grpc v0.0.0-20180928155321-4b5a12d3ff02/go.mod h1:JNdpVEzCpXBgIiv4ds+TzhN1hrtxq6ClLrTlT9OQRSc= github.com/opentracing-contrib/go-grpc v0.0.0-20210225150812-73cb765af46e h1:4cPxUYdgaGzZIT5/j0IfqOrrXmq6bG8AwvwisMXpdrg= github.com/opentracing-contrib/go-grpc v0.0.0-20210225150812-73cb765af46e/go.mod h1:DYR5Eij8rJl8h7gblRrOZ8g0kW1umSpKqYIBTgeDtLo= @@ -1475,14 +1491,16 @@ github.com/prometheus/common v0.21.0/go.mod h1:U+gB1OBLb1lF3O42bTCL+FK18tX9Oar16 github.com/prometheus/common v0.23.0/go.mod h1:H6QK/N6XVT42whUeIdI3dp36w49c+/iMDk7UAI2qm7Q= github.com/prometheus/common v0.26.0/go.mod h1:M7rCNAaPfAosfx8veZJCuw84e35h3Cfd9VFqTh1DIvc= github.com/prometheus/common v0.29.0/go.mod h1:vu+V0TpY+O6vW9J44gczi3Ap/oXXR10b+M/gUGO4Hls= -github.com/prometheus/common v0.30.0 h1:JEkYlQnpzrzQFxi6gnukFPdQ+ac82oRhzMcIduJu/Ug= github.com/prometheus/common v0.30.0/go.mod h1:vu+V0TpY+O6vW9J44gczi3Ap/oXXR10b+M/gUGO4Hls= +github.com/prometheus/common v0.32.1 h1:hWIdL3N2HoUx3B8j3YN9mWor0qhY/NlEKZEaXxuIRh4= +github.com/prometheus/common v0.32.1/go.mod h1:vu+V0TpY+O6vW9J44gczi3Ap/oXXR10b+M/gUGO4Hls= github.com/prometheus/common/sigv4 v0.1.0 h1:qoVebwtwwEhS85Czm2dSROY5fTo2PAPEVdDeppTwGX4= github.com/prometheus/common/sigv4 v0.1.0/go.mod h1:2Jkxxk9yYvCkE5G1sQT7GuEXm57JrvHu9k5YwTjsNtI= github.com/prometheus/exporter-toolkit v0.5.0/go.mod h1:OCkM4805mmisBhLmVFw858QYi3v0wKdY6/UxrT0pZVg= github.com/prometheus/exporter-toolkit v0.5.1/go.mod h1:OCkM4805mmisBhLmVFw858QYi3v0wKdY6/UxrT0pZVg= -github.com/prometheus/exporter-toolkit v0.6.1 h1:Aqk75wQD92N9CqmTlZwjKwq6272nOGrWIbc8Z7+xQO0= github.com/prometheus/exporter-toolkit v0.6.1/go.mod h1:ZUBIj498ePooX9t/2xtDjeQYwvRpiPP2lh5u4iblj2g= +github.com/prometheus/exporter-toolkit v0.7.0 h1:XtYeVeeC5daG4txbc9+mieKq+/AK4gtIBLl9Mulrjnk= +github.com/prometheus/exporter-toolkit v0.7.0/go.mod h1:ZUBIj498ePooX9t/2xtDjeQYwvRpiPP2lh5u4iblj2g= github.com/prometheus/node_exporter v1.0.0-rc.0.0.20200428091818-01054558c289 h1:dTUS1vaLWq+Y6XKOTnrFpoVsQKLCbCp1OLj24TDi7oM= github.com/prometheus/node_exporter v1.0.0-rc.0.0.20200428091818-01054558c289/go.mod h1:FGbBv5OPKjch+jNUJmEQpMZytIdyW0NdBtWFcfSKusc= github.com/prometheus/procfs v0.0.0-20180125133057-cb4147076ac7/go.mod h1:c3At6R/oaqEKCNdg8wHV1ftS6bRYblBhIjjI8uT2IGk= @@ -1503,8 +1521,8 @@ github.com/prometheus/procfs v0.1.3/go.mod h1:lV6e/gmhEcM9IjHGsFOCxxuZ+z1YqCvr4O github.com/prometheus/procfs v0.2.0/go.mod h1:lV6e/gmhEcM9IjHGsFOCxxuZ+z1YqCvr4OA4YeYWdaU= github.com/prometheus/procfs v0.6.0 h1:mxy4L2jP6qMonqmq+aTtOx1ifVWUgG/TAmntgbh3xv4= github.com/prometheus/procfs v0.6.0/go.mod h1:cz+aTbrPOrUb4q7XlbU9ygM+/jj0fzG6c1xBZuNvfVA= -github.com/prometheus/prometheus v1.8.2-0.20210914090109-37468d88dce8 h1:U8ZpFGP11pZi0ZavWWGeAqck3dVT9AY6zbr4fbBftjA= -github.com/prometheus/prometheus v1.8.2-0.20210914090109-37468d88dce8/go.mod h1:02eURgmH1YsgJ2TtWNUGMQMCnLxmtHH9nOgvYxIjGAo= +github.com/prometheus/prometheus v1.8.2-0.20211101135822-b862218389fc h1:CYE+toBs8lmn7i8J4gWqb2CVn4qvvhiMHtr35VCGOWI= +github.com/prometheus/prometheus v1.8.2-0.20211101135822-b862218389fc/go.mod h1:07FWuvRzfovrwH/yP4gxJesTNGOj1RWoBDIkgWfthjk= github.com/prometheus/tsdb v0.7.1/go.mod h1:qhTCs0VvXwvX/y3TZrWD7rabWM+ijKTux40TwIPHuXU= github.com/rafaeljusto/redigomock v0.0.0-20190202135759-257e089e14a1/go.mod h1:JaY6n2sDr+z2WTsXkOmNRUfDy6FN0L6Nk7x06ndm4tY= github.com/rcrowley/go-metrics v0.0.0-20181016184325-3113b8401b8a/go.mod h1:bCqnVzQkZxMG4s8nGwiZ5l3QUCyqpo9Y+/ZMZ9VjZe4= @@ -1567,7 +1585,7 @@ github.com/smartystreets/assertions v1.0.1/go.mod h1:kHHU4qYBaI3q23Pp3VPrmWhuIUr github.com/smartystreets/goconvey v0.0.0-20190330032615-68dc04aab96a/go.mod h1:syvi0/a8iFYH4r/RixwvyeAJjdLS9QV7WQ/tjFTllLA= github.com/smartystreets/goconvey v1.6.4 h1:fv0U8FUIMPNf1L9lnHLvLhgicrIVChEkdzIKYqbNC9s= github.com/smartystreets/goconvey v1.6.4/go.mod h1:syvi0/a8iFYH4r/RixwvyeAJjdLS9QV7WQ/tjFTllLA= -github.com/snowflakedb/gosnowflake v1.3.4/go.mod h1:NsRq2QeiMUuoNUJhp5Q6xGC4uBrsS9g6LwZVEkTWgsE= +github.com/snowflakedb/gosnowflake v1.3.13/go.mod h1:6nfka9aTXkUNha1p1cjeeyjDvcyh7jfjp0l8kGpDBok= github.com/soheilhy/cmux v0.1.4/go.mod h1:IM3LyeVVIOuxMH7sFAkER9+bJ4dT7Ms6E4xg4kGIyLM= github.com/soheilhy/cmux v0.1.5-0.20210205191134-5ec6847320e5/go.mod h1:T7TcVDs9LWfQgPlPsdngu6I6QIoyIFZDDC6sNE1GqG0= github.com/soheilhy/cmux v0.1.5 h1:jjzc5WVemNEDTLwv9tlmemhC73tI08BNOIGwBOo10Js= @@ -1719,6 +1737,7 @@ github.com/yuin/goldmark v1.1.27/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9de github.com/yuin/goldmark v1.1.32/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74= github.com/yuin/goldmark v1.2.1/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74= github.com/yuin/goldmark v1.3.5/go.mod h1:mwnBkeHKe2W/ZEtQ+71ViKU8L12m81fl3OWwC1Zlc8k= +github.com/yuin/goldmark v1.4.0/go.mod h1:mwnBkeHKe2W/ZEtQ+71ViKU8L12m81fl3OWwC1Zlc8k= github.com/yuin/gopher-lua v0.0.0-20200816102855-ee81675732da h1:NimzV1aGyq29m5ukMK0AMWEhFaL/lrEOaephfuoiARg= github.com/yuin/gopher-lua v0.0.0-20200816102855-ee81675732da/go.mod h1:E1AXubJBdNmFERAOucpDIxNzeGfLzg0mYh+UfMWdChA= github.com/yvasiyarov/go-metrics v0.0.0-20140926110328-57bccd1ccd43/go.mod h1:aX5oPXxHm3bOH+xeAttToC8pqch2ScQN/JoXYupl6xs= @@ -1826,8 +1845,9 @@ go.uber.org/atomic v1.9.0/go.mod h1:fEN4uk6kAWBTFdckzkM89CLk9XfWZrxpCo0nPH17wJc= go.uber.org/automaxprocs v1.2.0/go.mod h1:YfO3fm683kQpzETxlTGZhGIVmXAhaw3gxeBADbpZtnU= go.uber.org/automaxprocs v1.4.0 h1:CpDZl6aOlLhReez+8S3eEotD7Jx0Os++lemPlMULQP0= go.uber.org/automaxprocs v1.4.0/go.mod h1:/mTEdr7LvHhs0v7mjdxDreTz1OG5zdZGqgOnhWiR/+Q= -go.uber.org/goleak v1.1.10 h1:z+mqJhf6ss6BSfSM671tgKyZBFPTTJM+HLxnhPC3wu0= go.uber.org/goleak v1.1.10/go.mod h1:8a7PlsEVH3e/a/GLqe5IIrQx6GzcnRmZEufDUTk4A7A= +go.uber.org/goleak v1.1.12 h1:gZAh5/EyT/HQwlpkCy6wTpqfH9H8Lz8zbm3dZh+OyzA= +go.uber.org/goleak v1.1.12/go.mod h1:cwTWslyiVhfpKIDGSZEM2HlOvcqm+tG4zioyIeLoqMQ= go.uber.org/multierr v1.1.0/go.mod h1:wR5kodmAFQ0UK8QlbwjlSNy0Z68gJhDJUG5sjR94q/0= go.uber.org/multierr v1.3.0/go.mod h1:VgVr7evmIr6uPjLBxg28wmKNXyqE9akIJ5XnfpiKl+4= go.uber.org/multierr v1.4.0/go.mod h1:VgVr7evmIr6uPjLBxg28wmKNXyqE9akIJ5XnfpiKl+4= @@ -1866,9 +1886,9 @@ golang.org/x/crypto v0.0.0-20190923035154-9ee001bba392/go.mod h1:/lpIB1dKB+9EgE3 golang.org/x/crypto v0.0.0-20191011191535-87dc89f01550/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= golang.org/x/crypto v0.0.0-20191112222119-e1110fd1c708/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto= golang.org/x/crypto v0.0.0-20191202143827-86a70503ff7e/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto= -golang.org/x/crypto v0.0.0-20191206172530-e9b2fee46413/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto= golang.org/x/crypto v0.0.0-20200302210943-78000ba7a073/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto= golang.org/x/crypto v0.0.0-20200323165209-0ec3e9974c59/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto= +golang.org/x/crypto v0.0.0-20200510223506-06a226fb4e37/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto= golang.org/x/crypto v0.0.0-20200622213623-75b288015ac9/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto= golang.org/x/crypto v0.0.0-20200709230013-948cd5f35899/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto= golang.org/x/crypto v0.0.0-20200728195943-123391ffb6de/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto= @@ -1996,8 +2016,10 @@ golang.org/x/net v0.0.0-20210520170846-37e1c6afe023/go.mod h1:9nx3DQGgdP8bBQD5qx golang.org/x/net v0.0.0-20210525063256-abc453219eb5/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y= golang.org/x/net v0.0.0-20210614182718-04defd469f4e/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y= golang.org/x/net v0.0.0-20210726213435-c6fcb2dbf985/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y= -golang.org/x/net v0.0.0-20210903162142-ad29c8ab022f h1:w6wWR0H+nyVpbSAQbzVEIACVyr/h8l/BEkY6Sokc7Eg= +golang.org/x/net v0.0.0-20210805182204-aaa1db679c0d/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y= golang.org/x/net v0.0.0-20210903162142-ad29c8ab022f/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y= +golang.org/x/net v0.0.0-20211020060615-d418f374d309 h1:A0lJIi+hcTR6aajJH4YqKWwohY4aW9RO7oRMcdv+HKI= +golang.org/x/net v0.0.0-20211020060615-d418f374d309/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y= golang.org/x/oauth2 v0.0.0-20180821212333-d2e6202438be/go.mod h1:N/0e6XlmueqKjAGxoOufVs8QHGRruUQn6yWY3a++T0U= golang.org/x/oauth2 v0.0.0-20181106182150-f42d05182288/go.mod h1:N/0e6XlmueqKjAGxoOufVs8QHGRruUQn6yWY3a++T0U= golang.org/x/oauth2 v0.0.0-20190226205417-e64efc72b421/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw= @@ -2016,8 +2038,9 @@ golang.org/x/oauth2 v0.0.0-20210427180440-81ed05c6b58c/go.mod h1:KelEdhl1UZF7XfJ golang.org/x/oauth2 v0.0.0-20210514164344-f6687ab2804c/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A= golang.org/x/oauth2 v0.0.0-20210628180205-a41e5a781914/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A= golang.org/x/oauth2 v0.0.0-20210805134026-6f1e6394065a/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A= -golang.org/x/oauth2 v0.0.0-20210819190943-2bc19b11175f h1:Qmd2pbz05z7z6lm0DrgQVVPuBm92jqujBKMHMOlOQEw= golang.org/x/oauth2 v0.0.0-20210819190943-2bc19b11175f/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A= +golang.org/x/oauth2 v0.0.0-20211005180243-6b3c2da341f1 h1:B333XXssMuKQeBwiNODx4TupZy7bf4sxFZnN2ZOcvUE= +golang.org/x/oauth2 v0.0.0-20211005180243-6b3c2da341f1/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A= golang.org/x/sync v0.0.0-20180314180146-1d60e4601c6f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20181108010431-42b317875d0f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20181221193216-37e7f081c4d4/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= @@ -2117,7 +2140,6 @@ golang.org/x/sys v0.0.0-20200803210538-64077c9b5642/go.mod h1:h1NjWce9XRLGQEsW7w golang.org/x/sys v0.0.0-20200817155316-9781c653f443/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20200826173525-f9321e4c35a6/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20200828194041-157a740278f4/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20200831180312-196b9ba8737a/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20200905004654-be1d3432aa8f/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20200909081042-eff7692f9009/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20200916030750-2334cc1a136f/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= @@ -2147,6 +2169,7 @@ golang.org/x/sys v0.0.0-20210324051608-47abb6519492/go.mod h1:h1NjWce9XRLGQEsW7w golang.org/x/sys v0.0.0-20210330210617-4fbd30eecc44/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20210403161142-5e06dd20ab57/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20210423082822-04245dca01da/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20210426230700-d19ff857e887/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20210503080704-8803ae5d1324/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20210510120138-977fb7262007/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20210514084401-e8d321eab015/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= @@ -2156,9 +2179,12 @@ golang.org/x/sys v0.0.0-20210615035016-665e8c7367d1/go.mod h1:oPkhp1MJrh7nUepCBc golang.org/x/sys v0.0.0-20210616094352-59db8d763f22/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20210630005230-0f9fa26af87c/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20210806184541-e5e7981a1069/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.0.0-20210809222454-d867a43fc93e/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20210823070655-63515b42dcdf/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= -golang.org/x/sys v0.0.0-20210906170528-6f6e22806c34 h1:GkvMjFtXUmahfDtashnc1mnrCtuBVcwse5QV2lUk/tI= -golang.org/x/sys v0.0.0-20210906170528-6f6e22806c34/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.0.0-20210908233432-aa78b53d3365/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.0.0-20211007075335-d3039528d8ac/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.0.0-20211020174200-9d6173849985 h1:LOlKVhfDyahgmqa97awczplwkjzNaELFg3zRIJ13RYo= +golang.org/x/sys v0.0.0-20211020174200-9d6173849985/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/term v0.0.0-20201117132131-f5c789dd3221/go.mod h1:Nr5EML6q2oocZ2LXRh80K7BxOlk5/8JxuGnuhpl+muw= golang.org/x/term v0.0.0-20201126162022-7de9c90e9dd1/go.mod h1:bj7SfCRtBDWHUb9snDiAeCFNEtKQo2Wmx5Cou7ajbmo= golang.org/x/term v0.0.0-20210220032956-6a3ed077a48d h1:SZxvLBoTP5yHO3Frd4z4vrF+DBX9vMVanchswa69toE= @@ -2269,8 +2295,9 @@ golang.org/x/tools v0.1.1/go.mod h1:o0xws9oXOQQZyjljx8fwUC0k7L1pTE6eaCbjGeHmOkk= golang.org/x/tools v0.1.2/go.mod h1:o0xws9oXOQQZyjljx8fwUC0k7L1pTE6eaCbjGeHmOkk= golang.org/x/tools v0.1.3/go.mod h1:o0xws9oXOQQZyjljx8fwUC0k7L1pTE6eaCbjGeHmOkk= golang.org/x/tools v0.1.4/go.mod h1:o0xws9oXOQQZyjljx8fwUC0k7L1pTE6eaCbjGeHmOkk= -golang.org/x/tools v0.1.5 h1:ouewzE6p+/VEB31YYnTbEJdi8pFqKp4P4n85vwo3DHA= golang.org/x/tools v0.1.5/go.mod h1:o0xws9oXOQQZyjljx8fwUC0k7L1pTE6eaCbjGeHmOkk= +golang.org/x/tools v0.1.7 h1:6j8CgantCy3yc8JGBqkDLMKWqZ0RDU2g1HVgacojGWQ= +golang.org/x/tools v0.1.7/go.mod h1:LGqMHiF4EqQNHR1JncWGqT5BVaXmza+X+BDGol+dOxo= golang.org/x/xerrors v0.0.0-20190717185122-a985d3407aa7/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= golang.org/x/xerrors v0.0.0-20191011141410-1b5146add898/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= @@ -2317,8 +2344,11 @@ google.golang.org/api v0.48.0/go.mod h1:71Pr1vy+TAZRPkPs/xlCf5SsU8WjuAWv1Pfjbtuk google.golang.org/api v0.50.0/go.mod h1:4bNT5pAuq5ji4SRZm+5QIkjny9JAyVD/3gaSihNefaw= google.golang.org/api v0.51.0/go.mod h1:t4HdrdoNgyN5cbEfm7Lum0lcLDLiise1F8qDKX00sOU= google.golang.org/api v0.54.0/go.mod h1:7C4bFFOvVDGXjfDTAsgGwDgAxRDeQ4X8NvUedIt6z3k= -google.golang.org/api v0.56.0 h1:08F9XVYTLOGeSQb3xI9C0gXMuQanhdGed0cWFhDozbI= +google.golang.org/api v0.55.0/go.mod h1:38yMfeP1kfjsl8isn0tliTjIb1rJXcQi4UXlbqivdVE= google.golang.org/api v0.56.0/go.mod h1:38yMfeP1kfjsl8isn0tliTjIb1rJXcQi4UXlbqivdVE= +google.golang.org/api v0.57.0/go.mod h1:dVPlbZyBo2/OjBpmvNdpn2GRm6rPy75jyU7bmhdrMgI= +google.golang.org/api v0.59.0 h1:fPfFO7gttlXYo2ALuD3HxJzh8vaF++4youI0BkFL6GE= +google.golang.org/api v0.59.0/go.mod h1:sT2boj7M9YJxZzgeZqXogmhfmRWDtPzT31xkieUbuZU= google.golang.org/appengine v1.2.0/go.mod h1:xpcJRLb0r/rnEns0DIKYYv+WjYCduHsrkT7/EB5XEv4= google.golang.org/appengine v1.3.0/go.mod h1:xpcJRLb0r/rnEns0DIKYYv+WjYCduHsrkT7/EB5XEv4= google.golang.org/appengine v1.4.0/go.mod h1:xpcJRLb0r/rnEns0DIKYYv+WjYCduHsrkT7/EB5XEv4= @@ -2398,8 +2428,13 @@ google.golang.org/genproto v0.0.0-20210805201207-89edb61ffb67/go.mod h1:ob2IJxKr google.golang.org/genproto v0.0.0-20210813162853-db860fec028c/go.mod h1:cFeNkxwySK631ADgubI+/XFU/xp8FD5KIVV4rj8UC5w= google.golang.org/genproto v0.0.0-20210821163610-241b8fcbd6c8/go.mod h1:eFjDcFEctNawg4eG61bRv87N7iHBWyVhJu7u1kqDUXY= google.golang.org/genproto v0.0.0-20210828152312-66f60bf46e71/go.mod h1:eFjDcFEctNawg4eG61bRv87N7iHBWyVhJu7u1kqDUXY= -google.golang.org/genproto v0.0.0-20210903162649-d08c68adba83 h1:3V2dxSZpz4zozWWUq36vUxXEKnSYitEH2LdsAx+RUmg= +google.golang.org/genproto v0.0.0-20210831024726-fe130286e0e2/go.mod h1:eFjDcFEctNawg4eG61bRv87N7iHBWyVhJu7u1kqDUXY= google.golang.org/genproto v0.0.0-20210903162649-d08c68adba83/go.mod h1:eFjDcFEctNawg4eG61bRv87N7iHBWyVhJu7u1kqDUXY= +google.golang.org/genproto v0.0.0-20210909211513-a8c4777a87af/go.mod h1:eFjDcFEctNawg4eG61bRv87N7iHBWyVhJu7u1kqDUXY= +google.golang.org/genproto v0.0.0-20210924002016-3dee208752a0/go.mod h1:5CzLGKJ67TSI2B9POpiiyGha0AjJvZIUgRMt1dSmuhc= +google.golang.org/genproto v0.0.0-20211008145708-270636b82663/go.mod h1:5CzLGKJ67TSI2B9POpiiyGha0AjJvZIUgRMt1dSmuhc= +google.golang.org/genproto v0.0.0-20211020151524-b7c3a969101a h1:8maMHMQp9NroHXhc3HelFX9Ay2lWlXLcdH5mw5Biz0s= +google.golang.org/genproto v0.0.0-20211020151524-b7c3a969101a/go.mod h1:5CzLGKJ67TSI2B9POpiiyGha0AjJvZIUgRMt1dSmuhc= google.golang.org/grpc v1.29.1 h1:EC2SB8S04d2r73uptxphDSUG+kTKVgjRPF+N3xpxRB4= google.golang.org/grpc v1.29.1/go.mod h1:itym6AZVZYACWQqET3MqgPpjcuV5QH3BxFS3IjizoKk= google.golang.org/grpc/cmd/protoc-gen-go-grpc v1.1.0/go.mod h1:6Kw0yEErY5E/yWrBtf03jp27GLLJujG4z/JK95pnjjw= @@ -2483,22 +2518,22 @@ k8s.io/api v0.0.0-20191115095533-47f6de673b26/go.mod h1:iA/8arsvelvo4IDqIhX4IbjT k8s.io/api v0.20.1/go.mod h1:KqwcCVogGxQY3nBlRpwt+wpAMF/KjaCc7RpywacvqUo= k8s.io/api v0.20.4/go.mod h1:++lNL1AJMkDymriNniQsWRkMDzRaX2Y/POTUi8yvqYQ= k8s.io/api v0.20.6/go.mod h1:X9e8Qag6JV/bL5G6bU8sdVRltWKmdHsFUGS3eVndqE8= -k8s.io/api v0.22.1 h1:ISu3tD/jRhYfSW8jI/Q1e+lRxkR7w9UwQEZ7FgslrwY= -k8s.io/api v0.22.1/go.mod h1:bh13rkTp3F1XEaLGykbyRD2QaTTzPm0e/BMd8ptFONY= +k8s.io/api v0.22.2 h1:M8ZzAD0V6725Fjg53fKeTJxGsJvRbk4TEm/fexHMtfw= +k8s.io/api v0.22.2/go.mod h1:y3ydYpLJAaDI+BbSe2xmGcqxiWHmWjkEeIbiwHvnPR8= k8s.io/apimachinery v0.0.0-20191115015347-3c7067801da2/go.mod h1:dXFS2zaQR8fyzuvRdJDHw2Aerij/yVGJSre0bZQSVJA= k8s.io/apimachinery v0.20.1/go.mod h1:WlLqWAHZGg07AeltaI0MV5uk1Omp8xaN0JGLY6gkRpU= k8s.io/apimachinery v0.20.4/go.mod h1:WlLqWAHZGg07AeltaI0MV5uk1Omp8xaN0JGLY6gkRpU= k8s.io/apimachinery v0.20.6/go.mod h1:ejZXtW1Ra6V1O5H8xPBGz+T3+4gfkTCeExAHKU57MAc= -k8s.io/apimachinery v0.22.1 h1:DTARnyzmdHMz7bFWFDDm22AM4pLWTQECMpRTFu2d2OM= -k8s.io/apimachinery v0.22.1/go.mod h1:O3oNtNadZdeOMxHFVxOreoznohCpy0z6mocxbZr7oJ0= +k8s.io/apimachinery v0.22.2 h1:ejz6y/zNma8clPVfNDLnPbleBo6MpoFy/HBiBqCouVk= +k8s.io/apimachinery v0.22.2/go.mod h1:O3oNtNadZdeOMxHFVxOreoznohCpy0z6mocxbZr7oJ0= k8s.io/apiserver v0.20.1/go.mod h1:ro5QHeQkgMS7ZGpvf4tSMx6bBOgPfE+f52KwvXfScaU= k8s.io/apiserver v0.20.4/go.mod h1:Mc80thBKOyy7tbvFtB4kJv1kbdD0eIH8k8vianJcbFM= k8s.io/apiserver v0.20.6/go.mod h1:QIJXNt6i6JB+0YQRNcS0hdRHJlMhflFmsBDeSgT1r8Q= k8s.io/client-go v0.20.1/go.mod h1:/zcHdt1TeWSd5HoUe6elJmHSQ6uLLgp4bIJHVEuy+/Y= k8s.io/client-go v0.20.4/go.mod h1:LiMv25ND1gLUdBeYxBIwKpkSC5IsozMMmOOeSJboP+k= k8s.io/client-go v0.20.6/go.mod h1:nNQMnOvEUEsOzRRFIIkdmYOjAZrC8bgq0ExboWSU1I0= -k8s.io/client-go v0.22.1 h1:jW0ZSHi8wW260FvcXHkIa0NLxFBQszTlhiAVsU5mopw= -k8s.io/client-go v0.22.1/go.mod h1:BquC5A4UOo4qVDUtoc04/+Nxp1MeHcVc1HJm1KmG8kk= +k8s.io/client-go v0.22.2 h1:DaSQgs02aCC1QcwUdkKZWOeaVsQjYvWv8ZazcZ6JcHc= +k8s.io/client-go v0.22.2/go.mod h1:sAlhrkVDf50ZHx6z4K0S40wISNTarf1r800F+RlCF6U= k8s.io/component-base v0.20.1/go.mod h1:guxkoJnNoh8LNrbtiQOlyp2Y2XFCZQmrcg2n/DeYNLk= k8s.io/component-base v0.20.4/go.mod h1:t4p9EdiagbVCJKrQ1RsA5/V4rFQNDfRlevJajlGwgjI= k8s.io/component-base v0.20.6/go.mod h1:6f1MPBAeI+mvuts3sIdtpjljHWBQ2cIy38oBIWMYnrM= @@ -2515,8 +2550,8 @@ k8s.io/kube-openapi v0.0.0-20210421082810-95288971da7e/go.mod h1:vHXdDvt9+2spS2R k8s.io/kubernetes v1.13.0/go.mod h1:ocZa8+6APFNC2tX1DZASIbocyYT5jHzqFVsY5aoB7Jk= k8s.io/utils v0.0.0-20191114200735-6ca3b61696b6/go.mod h1:sZAwmy6armz5eXlNoLmJcl4F1QuKu7sr+mFQ0byX7Ew= k8s.io/utils v0.0.0-20201110183641-67b214c5f920/go.mod h1:jPW/WVKK9YHAvNhRxK0md/EJ228hCsBRufyofKtW8HA= -k8s.io/utils v0.0.0-20210707171843-4b05e18ac7d9 h1:imL9YgXQ9p7xmPzHFm/vVd/cF78jad+n4wK1ABwYtMM= -k8s.io/utils v0.0.0-20210707171843-4b05e18ac7d9/go.mod h1:jPW/WVKK9YHAvNhRxK0md/EJ228hCsBRufyofKtW8HA= +k8s.io/utils v0.0.0-20210819203725-bdf08cb9a70a h1:8dYfu/Fc9Gz2rNJKB9IQRGgQOh2clmRzNIPPY1xLY5g= +k8s.io/utils v0.0.0-20210819203725-bdf08cb9a70a/go.mod h1:jPW/WVKK9YHAvNhRxK0md/EJ228hCsBRufyofKtW8HA= rsc.io/binaryregexp v0.2.0/go.mod h1:qTv7/COck+e2FymRvadv62gMdZztPaShugOCi3I+8D8= rsc.io/pdf v0.1.1/go.mod h1:n8OzWcQ6Sp37PL01nO98y4iUCRdTGarVfzxY20ICaU4= rsc.io/quote/v3 v3.1.0/go.mod h1:yEA65RcK8LyAZtP9Kv3t0HmxON59tX3rD+tICJqUlj0= diff --git a/pkg/rules/remotewrite/remotewrite.go b/pkg/rules/remotewrite/remotewrite.go deleted file mode 100644 index c23e4b10f1..0000000000 --- a/pkg/rules/remotewrite/remotewrite.go +++ /dev/null @@ -1,44 +0,0 @@ -// Copyright (c) The Thanos Authors. -// Licensed under the Apache License 2.0. - -package remotewrite - -import ( - "time" - - "github.com/pkg/errors" - - "github.com/go-kit/kit/log" - "github.com/prometheus/client_golang/prometheus" - "github.com/prometheus/prometheus/config" - "github.com/prometheus/prometheus/storage" - "github.com/prometheus/prometheus/storage/remote" - "gopkg.in/yaml.v2" -) - -// LoadRemoteWriteConfig prepares a RemoteWriteConfig instance from a given YAML config. -func LoadRemoteWriteConfig(configYAML []byte) (config.RemoteWriteConfig, error) { - var cfg config.RemoteWriteConfig - if err := yaml.Unmarshal(configYAML, &cfg); err != nil { - return cfg, err - } - return cfg, nil -} - -// NewFanoutStorage creates a storage that fans-out to both the WAL and a configured remote storage. -// The remote storage tails the WAL and sends the metrics it reads using Prometheus' remote_write. -func NewFanoutStorage(logger log.Logger, reg prometheus.Registerer, walDir string, rwConfig *config.RemoteWriteConfig) (storage.Storage, error) { - walStore, err := NewStorage(logger, reg, walDir) - if err != nil { - return nil, err - } - // flushDeadline is set to 1m, but it is for metadata watcher only so not used here. - remoteStore := remote.NewStorage(logger, reg, walStore.StartTime, walStore.Directory(), 1*time.Minute, nil) - if err := remoteStore.ApplyConfig(&config.Config{ - GlobalConfig: config.DefaultGlobalConfig, - RemoteWriteConfigs: []*config.RemoteWriteConfig{rwConfig}, - }); err != nil { - return nil, errors.Wrap(err, "applying config to remote storage") - } - return storage.NewFanout(logger, walStore, remoteStore), nil -} diff --git a/pkg/rules/remotewrite/series.go b/pkg/rules/remotewrite/series.go deleted file mode 100644 index 02063d8bfc..0000000000 --- a/pkg/rules/remotewrite/series.go +++ /dev/null @@ -1,259 +0,0 @@ -// Copyright (c) The Thanos Authors. -// Licensed under the Apache License 2.0. - -// This is copied from https://github.com/grafana/agent/blob/a23bd5cf27c2ac99695b7449d38fb12444941a1c/pkg/prom/wal/series.go -// TODO(idoqo): Migrate to prometheus package when https://github.com/prometheus/prometheus/pull/8785 is ready. -package remotewrite - -import ( - "sync" - - "github.com/prometheus/prometheus/pkg/labels" -) - -type memSeries struct { - sync.Mutex - - ref uint64 - lset labels.Labels - lastTs int64 - - // TODO(rfratto): this solution below isn't perfect, and there's still - // the possibility for a series to be deleted before it's - // completely gone from the WAL. Rather, we should have gc return - // a "should delete" map and be given a "deleted" map. - // If a series that is going to be marked for deletion is in the - // "deleted" map, then it should be deleted instead. - // - // The "deleted" map will be populated by the Truncate function. - // It will be cleared with every call to gc. - - // willDelete marks a series as to be deleted on the next garbage - // collection. If it receives a write, willDelete is disabled. - willDelete bool - - // Whether this series has samples waiting to be committed to the WAL - pendingCommit bool -} - -func (s *memSeries) updateTs(ts int64) { - s.lastTs = ts - s.willDelete = false - s.pendingCommit = true -} - -// seriesHashmap is a simple hashmap for memSeries by their label set. It is -// built on top of a regular hashmap and holds a slice of series to resolve -// hash collisions. Its methods require the hash to be submitted with it to -// avoid re-computations throughout the code. -// -// This code is copied from the Prometheus TSDB. -type seriesHashmap map[uint64][]*memSeries - -func (m seriesHashmap) get(hash uint64, lset labels.Labels) *memSeries { - for _, s := range m[hash] { - if labels.Equal(s.lset, lset) { - return s - } - } - return nil -} - -func (m seriesHashmap) set(hash uint64, s *memSeries) { - l := m[hash] - for i, prev := range l { - if labels.Equal(prev.lset, s.lset) { - l[i] = s - return - } - } - m[hash] = append(l, s) -} - -func (m seriesHashmap) del(hash uint64, ref uint64) { - var rem []*memSeries - for _, s := range m[hash] { - if s.ref != ref { - rem = append(rem, s) - } - } - if len(rem) == 0 { - delete(m, hash) - } else { - m[hash] = rem - } -} - -const ( - // defaultStripeSize is the default number of entries to allocate in the - // stripeSeries hash map. - defaultStripeSize = 1 << 14 -) - -// stripeSeries locks modulo ranges of IDs and hashes to reduce lock contention. -// The locks are padded to not be on the same cache line. Filling the padded space -// with the maps was profiled to be slower – likely due to the additional pointer -// dereferences. -// -// This code is copied from the Prometheus TSDB. -type stripeSeries struct { - size int - series []map[uint64]*memSeries - hashes []seriesHashmap - locks []stripeLock -} - -type stripeLock struct { - sync.RWMutex - // Padding to avoid multiple locks being on the same cache line. - _ [40]byte -} - -func newStripeSeries() *stripeSeries { - stripeSize := defaultStripeSize - s := &stripeSeries{ - size: stripeSize, - series: make([]map[uint64]*memSeries, stripeSize), - hashes: make([]seriesHashmap, stripeSize), - locks: make([]stripeLock, stripeSize), - } - - for i := range s.series { - s.series[i] = map[uint64]*memSeries{} - } - for i := range s.hashes { - s.hashes[i] = seriesHashmap{} - } - return s -} - -// gc garbage collects old chunks that are strictly before mint and removes -// series entirely that have no chunks left. -func (s *stripeSeries) gc(mint int64) map[uint64]struct{} { - var ( - deleted = map[uint64]struct{}{} - ) - - // Run through all series and find series that haven't been written to - // since mint. Mark those series as deleted and store their ID. - for i := 0; i < s.size; i++ { - s.locks[i].Lock() - - for _, series := range s.series[i] { - series.Lock() - seriesHash := series.lset.Hash() - - // If the series has received a write after mint, there's still - // data and it's not completely gone yet. - if series.lastTs >= mint || series.pendingCommit { - series.willDelete = false - series.Unlock() - continue - } - - // The series hasn't received any data and *might* be gone, but - // we want to give it an opportunity to come back before marking - // it as deleted, so we wait one more GC cycle. - if !series.willDelete { - series.willDelete = true - series.Unlock() - continue - } - - // The series is gone entirely. We'll need to delete the label - // hash (if one exists) so we'll obtain a lock for that too. - j := int(seriesHash) & (s.size - 1) - if i != j { - s.locks[j].Lock() - } - - deleted[series.ref] = struct{}{} - delete(s.series[i], series.ref) - s.hashes[j].del(seriesHash, series.ref) - - if i != j { - s.locks[j].Unlock() - } - - series.Unlock() - } - - s.locks[i].Unlock() - } - - return deleted -} - -func (s *stripeSeries) getByID(id uint64) *memSeries { - i := id & uint64(s.size-1) - - s.locks[i].RLock() - series := s.series[i][id] - s.locks[i].RUnlock() - - return series -} - -func (s *stripeSeries) getByHash(hash uint64, lset labels.Labels) *memSeries { - i := hash & uint64(s.size-1) - - s.locks[i].RLock() - series := s.hashes[i].get(hash, lset) - s.locks[i].RUnlock() - - return series -} - -func (s *stripeSeries) set(hash uint64, series *memSeries) { - i := hash & uint64(s.size-1) - s.locks[i].Lock() - s.hashes[i].set(hash, series) - s.locks[i].Unlock() - - i = series.ref & uint64(s.size-1) - s.locks[i].Lock() - s.series[i][series.ref] = series - s.locks[i].Unlock() -} - -func (s *stripeSeries) iterator() *stripeSeriesIterator { - return &stripeSeriesIterator{s} -} - -// stripeSeriesIterator allows to iterate over series through a channel. -// The channel should always be completely consumed to not leak. -type stripeSeriesIterator struct { - s *stripeSeries -} - -func (it *stripeSeriesIterator) Channel() <-chan *memSeries { - ret := make(chan *memSeries) - - go func() { - for i := 0; i < it.s.size; i++ { - it.s.locks[i].RLock() - - for _, series := range it.s.series[i] { - series.Lock() - - j := int(series.lset.Hash()) & (it.s.size - 1) - if i != j { - it.s.locks[j].RLock() - } - - ret <- series - - if i != j { - it.s.locks[j].RUnlock() - } - series.Unlock() - } - - it.s.locks[i].RUnlock() - } - - close(ret) - }() - - return ret -} diff --git a/pkg/rules/remotewrite/util.go b/pkg/rules/remotewrite/util.go deleted file mode 100644 index 86f3ac87bc..0000000000 --- a/pkg/rules/remotewrite/util.go +++ /dev/null @@ -1,14 +0,0 @@ -// Copyright (c) The Thanos Authors. -// Licensed under the Apache License 2.0. - -package remotewrite - -import ( - "path/filepath" -) - -// SubDirectory returns the subdirectory within a Storage directory used for -// the Prometheus WAL. -func SubDirectory(base string) string { - return filepath.Join(base, "wal") -} diff --git a/pkg/rules/remotewrite/wal.go b/pkg/rules/remotewrite/wal.go deleted file mode 100644 index bf2c0f033a..0000000000 --- a/pkg/rules/remotewrite/wal.go +++ /dev/null @@ -1,697 +0,0 @@ -// Copyright (c) The Thanos Authors. -// Licensed under the Apache License 2.0. - -// This is copied from https://github.com/grafana/agent/blob/a23bd5cf27c2ac99695b7449d38fb12444941a1c/pkg/prom/wal/wal.go -// TODO(idoqo): Migrate to prometheus package when https://github.com/prometheus/prometheus/pull/8785 is ready. - -package remotewrite - -import ( - "context" - "fmt" - "math" - "sync" - "time" - - "github.com/go-kit/kit/log" - "github.com/go-kit/kit/log/level" - "github.com/pkg/errors" - "github.com/prometheus/client_golang/prometheus" - "github.com/prometheus/client_golang/prometheus/promauto" - "github.com/prometheus/prometheus/pkg/exemplar" - "github.com/prometheus/prometheus/pkg/labels" - "github.com/prometheus/prometheus/pkg/timestamp" - "github.com/prometheus/prometheus/pkg/value" - "github.com/prometheus/prometheus/storage" - "github.com/prometheus/prometheus/tsdb" - "github.com/prometheus/prometheus/tsdb/record" - "github.com/prometheus/prometheus/tsdb/wal" -) - -// ErrWALClosed is an error returned when a WAL operation can't run because the -// storage has already been closed. -var ErrWALClosed = fmt.Errorf("WAL storage closed") - -type storageMetrics struct { - r prometheus.Registerer - - numActiveSeries prometheus.Gauge - numDeletedSeries prometheus.Gauge - totalCreatedSeries prometheus.Counter - totalRemovedSeries prometheus.Counter - totalAppendedSamples prometheus.Counter -} - -func newStorageMetrics(r prometheus.Registerer) *storageMetrics { - m := storageMetrics{r: r} - m.numActiveSeries = promauto.With(r).NewGauge(prometheus.GaugeOpts{ - Name: "thanos_wal_storage_active_series", - Help: "Current number of active series being tracked by the WAL storage", - }) - - m.numDeletedSeries = promauto.With(r).NewGauge(prometheus.GaugeOpts{ - Name: "thanos_wal_storage_deleted_series", - Help: "Current number of series marked for deletion from memory", - }) - - m.totalCreatedSeries = promauto.With(r).NewCounter(prometheus.CounterOpts{ - Name: "thanos_wal_storage_created_series_total", - Help: "Total number of created series appended to the WAL", - }) - - m.totalRemovedSeries = promauto.With(r).NewCounter(prometheus.CounterOpts{ - Name: "thanos_wal_storage_removed_series_total", - Help: "Total number of created series removed from the WAL", - }) - - m.totalAppendedSamples = promauto.With(r).NewCounter(prometheus.CounterOpts{ - Name: "thanos_wal_samples_appended_total", - Help: "Total number of samples appended to the WAL", - }) - - return &m -} - -func (m *storageMetrics) Unregister() { - if m.r == nil { - return - } - cs := []prometheus.Collector{ - m.numActiveSeries, - m.numDeletedSeries, - m.totalCreatedSeries, - m.totalRemovedSeries, - } - for _, c := range cs { - m.r.Unregister(c) - } -} - -// Storage implements storage.Storage, and just writes to the WAL. -type Storage struct { - // Embed Queryable/ChunkQueryable for compatibility, but don't actually implement it. - storage.Queryable - storage.ChunkQueryable - - // Operations against the WAL must be protected by a mutex so it doesn't get - // closed in the middle of an operation. Other operations are concurrency-safe, so we - // use a RWMutex to allow multiple usages of the WAL at once. If the WAL is closed, all - // operations that change the WAL must fail. - walMtx sync.RWMutex - walClosed bool - - path string - wal *wal.WAL - logger log.Logger - - appenderPool sync.Pool - bufPool sync.Pool - - mtx sync.RWMutex - nextRef uint64 - series *stripeSeries - - deletedMtx sync.Mutex - deleted map[uint64]int // Deleted series, and what WAL segment they must be kept until. - - metrics *storageMetrics -} - -// NewStorage makes a new Storage. -func NewStorage(logger log.Logger, registerer prometheus.Registerer, path string) (*Storage, error) { - w, err := wal.NewSize(logger, registerer, SubDirectory(path), wal.DefaultSegmentSize, true) - if err != nil { - return nil, err - } - - storage := &Storage{ - path: path, - wal: w, - logger: logger, - deleted: map[uint64]int{}, - series: newStripeSeries(), - metrics: newStorageMetrics(registerer), - - // The first ref ID must be non-zero, as the scraping code treats 0 as a - // non-existent ID and won't cache it. - nextRef: 1, - } - - storage.bufPool.New = func() interface{} { - b := make([]byte, 0, 1024) - return b - } - - storage.appenderPool.New = func() interface{} { - return &appender{ - w: storage, - series: make([]record.RefSeries, 0, 100), - samples: make([]record.RefSample, 0, 100), - } - } - - if err := storage.replayWAL(); err != nil { - level.Warn(storage.logger).Log("msg", "encountered WAL read error, attempting repair", "err", err) - if err := w.Repair(err); err != nil { - return nil, errors.Wrap(err, "repair corrupted WAL") - } - } - - return storage, nil -} - -func (w *Storage) replayWAL() error { - w.walMtx.RLock() - defer w.walMtx.RUnlock() - - if w.walClosed { - return ErrWALClosed - } - - level.Info(w.logger).Log("msg", "replaying WAL, this may take a while", "dir", w.wal.Dir()) - dir, startFrom, err := wal.LastCheckpoint(w.wal.Dir()) - if err != nil && err != record.ErrNotFound { - return errors.Wrap(err, "find last checkpoint") - } - - if err == nil { - sr, err := wal.NewSegmentsReader(dir) - if err != nil { - return errors.Wrap(err, "open checkpoint") - } - defer func() { - if err := sr.Close(); err != nil { - level.Warn(w.logger).Log("msg", "error while closing the wal segments reader", "err", err) - } - }() - - // A corrupted checkpoint is a hard error for now and requires user - // intervention. There's likely little data that can be recovered anyway. - if err := w.loadWAL(wal.NewReader(sr)); err != nil { - return errors.Wrap(err, "backfill checkpoint") - } - startFrom++ - level.Info(w.logger).Log("msg", "WAL checkpoint loaded") - } - - // Find the last segment. - _, last, err := wal.Segments(w.wal.Dir()) - if err != nil { - return errors.Wrap(err, "finding WAL segments") - } - - // Backfill segments from the most recent checkpoint onwards. - for i := startFrom; i <= last; i++ { - s, err := wal.OpenReadSegment(wal.SegmentName(w.wal.Dir(), i)) - if err != nil { - return errors.Wrap(err, fmt.Sprintf("open WAL segment: %d", i)) - } - - sr := wal.NewSegmentBufReader(s) - err = w.loadWAL(wal.NewReader(sr)) - if err := sr.Close(); err != nil { - level.Warn(w.logger).Log("msg", "error while closing the wal segments reader", "err", err) - } - if err != nil { - return err - } - level.Info(w.logger).Log("msg", "WAL segment loaded", "segment", i, "maxSegment", last) - } - - return nil -} - -func (w *Storage) loadWAL(r *wal.Reader) (err error) { - var ( - dec record.Decoder - ) - - var ( - decoded = make(chan interface{}, 10) - errCh = make(chan error, 1) - seriesPool = sync.Pool{ - New: func() interface{} { - return []record.RefSeries{} - }, - } - samplesPool = sync.Pool{ - New: func() interface{} { - return []record.RefSample{} - }, - } - ) - - go func() { - defer close(decoded) - for r.Next() { - rec := r.Record() - switch dec.Type(rec) { - case record.Series: - series := seriesPool.Get().([]record.RefSeries)[:0] - series, err = dec.Series(rec, series) - if err != nil { - errCh <- &wal.CorruptionErr{ - Err: errors.Wrap(err, "decode series"), - Segment: r.Segment(), - Offset: r.Offset(), - } - return - } - decoded <- series - case record.Samples: - samples := samplesPool.Get().([]record.RefSample)[:0] - samples, err = dec.Samples(rec, samples) - if err != nil { - errCh <- &wal.CorruptionErr{ - Err: errors.Wrap(err, "decode samples"), - Segment: r.Segment(), - Offset: r.Offset(), - } - } - decoded <- samples - case record.Tombstones: - // We don't care about tombstones - continue - default: - errCh <- &wal.CorruptionErr{ - Err: errors.Errorf("invalid record type %v", dec.Type(rec)), - Segment: r.Segment(), - Offset: r.Offset(), - } - return - } - } - }() - - for d := range decoded { - switch v := d.(type) { - case []record.RefSeries: - for _, s := range v { - // If this is a new series, create it in memory without a timestamp. - // If we read in a sample for it, we'll use the timestamp of the latest - // sample. Otherwise, the series is stale and will be deleted once - // the truncation is performed. - if w.series.getByID(s.Ref) == nil { - series := &memSeries{ref: s.Ref, lset: s.Labels, lastTs: 0} - w.series.set(s.Labels.Hash(), series) - - w.metrics.numActiveSeries.Inc() - w.metrics.totalCreatedSeries.Inc() - - w.mtx.Lock() - if w.nextRef <= s.Ref { - w.nextRef = s.Ref + 1 - } - w.mtx.Unlock() - } - } - - //nolint:staticcheck - seriesPool.Put(v) - case []record.RefSample: - for _, s := range v { - // Update the lastTs for the series based - series := w.series.getByID(s.Ref) - if series == nil { - level.Warn(w.logger).Log("msg", "found sample referencing non-existing series, skipping") - continue - } - - series.Lock() - if s.T > series.lastTs { - series.lastTs = s.T - } - series.Unlock() - } - - //nolint:staticcheck - samplesPool.Put(v) - default: - panic(fmt.Errorf("unexpected decoded type: %T", d)) - } - } - - select { - case err := <-errCh: - return err - default: - } - - if r.Err() != nil { - return errors.Wrap(r.Err(), "read records") - } - - return nil -} - -// Directory returns the path where the WAL storage is held. -func (w *Storage) Directory() string { - return w.path -} - -// Appender returns a new appender against the storage. -func (w *Storage) Appender(_ context.Context) storage.Appender { - return w.appenderPool.Get().(storage.Appender) -} - -func (w *Storage) Querier(ctx context.Context, mint, maxt int64) (storage.Querier, error) { - return &remoteWriteQueryable{}, nil -} - -// StartTime always returns 0, nil. It is implemented for compatibility with -// Prometheus, but is unused in the agent. -func (*Storage) StartTime() (int64, error) { - return 0, nil -} - -// Truncate removes all data from the WAL prior to the timestamp specified by -// mint. -func (w *Storage) Truncate(mint int64) error { - w.walMtx.RLock() - defer w.walMtx.RUnlock() - - if w.walClosed { - return ErrWALClosed - } - - start := time.Now() - - // Garbage collect series that haven't received an update since mint. - w.gc(mint) - level.Info(w.logger).Log("msg", "series GC completed", "duration", time.Since(start)) - - first, last, err := wal.Segments(w.wal.Dir()) - if err != nil { - return errors.Wrap(err, "get segment range") - } - - // Start a new segment, so low ingestion volume instance don't have more WAL - // than needed. - err = w.wal.NextSegment() - if err != nil { - return errors.Wrap(err, "next segment") - } - - last-- // Never consider last segment for checkpoint. - if last < 0 { - return nil // no segments yet. - } - - // The lower two thirds of segments should contain mostly obsolete samples. - // If we have less than two segments, it's not worth checkpointing yet. - last = first + (last-first)*2/3 - if last <= first { - return nil - } - - keep := func(id uint64) bool { - if w.series.getByID(id) != nil { - return true - } - - w.deletedMtx.Lock() - _, ok := w.deleted[id] - w.deletedMtx.Unlock() - return ok - } - if _, err = wal.Checkpoint(w.logger, w.wal, first, last, keep, mint); err != nil { - return errors.Wrap(err, "create checkpoint") - } - if err := w.wal.Truncate(last + 1); err != nil { - // If truncating fails, we'll just try again at the next checkpoint. - // Leftover segments will just be ignored in the future if there's a checkpoint - // that supersedes them. - level.Error(w.logger).Log("msg", "truncating segments failed", "err", err) - } - - // The checkpoint is written and segments before it is truncated, so we no - // longer need to track deleted series that are before it. - w.deletedMtx.Lock() - for ref, segment := range w.deleted { - if segment < first { - delete(w.deleted, ref) - w.metrics.totalRemovedSeries.Inc() - } - } - w.metrics.numDeletedSeries.Set(float64(len(w.deleted))) - w.deletedMtx.Unlock() - - if err := wal.DeleteCheckpoints(w.wal.Dir(), last); err != nil { - // Leftover old checkpoints do not cause problems down the line beyond - // occupying disk space. - // They will just be ignored since a higher checkpoint exists. - level.Error(w.logger).Log("msg", "delete old checkpoints", "err", err) - } - - level.Info(w.logger).Log("msg", "WAL checkpoint complete", - "first", first, "last", last, "duration", time.Since(start)) - return nil -} - -// gc removes data before the minimum timestamp from the head. -func (w *Storage) gc(mint int64) { - deleted := w.series.gc(mint) - w.metrics.numActiveSeries.Sub(float64(len(deleted))) - - _, last, _ := wal.Segments(w.wal.Dir()) - w.deletedMtx.Lock() - defer w.deletedMtx.Unlock() - - // We want to keep series records for any newly deleted series - // until we've passed the last recorded segment. The WAL will - // still contain samples records with all of the ref IDs until - // the segment's samples has been deleted from the checkpoint. - // - // If the series weren't kept on startup when the WAL was replied, - // the samples wouldn't be able to be used since there wouldn't - // be any labels for that ref ID. - for ref := range deleted { - w.deleted[ref] = last - } - - w.metrics.numDeletedSeries.Set(float64(len(w.deleted))) -} - -// WriteStalenessMarkers appends a staleness sample for all active series. -func (w *Storage) WriteStalenessMarkers(remoteTsFunc func() int64) error { - var lastErr error - var lastTs int64 - - app := w.Appender(context.Background()) - it := w.series.iterator() - for series := range it.Channel() { - var ( - ref = series.ref - lset = series.lset - ) - - ts := timestamp.FromTime(time.Now()) - _, err := app.Append(ref, lset, ts, math.Float64frombits(value.StaleNaN)) - if err != nil { - lastErr = err - } - - // Remove millisecond precision; the remote write timestamp we get - // only has second precision. - lastTs = (ts / 1000) * 1000 - } - - if lastErr == nil { - if err := app.Commit(); err != nil { - return fmt.Errorf("failed to commit staleness markers: %w", err) - } - - // Wait for remote write to write the lastTs, but give up after 1m - level.Info(w.logger).Log("msg", "waiting for remote write to write staleness markers...") - - stopCh := time.After(1 * time.Minute) - start := time.Now() - - Outer: - for { - select { - case <-stopCh: - level.Error(w.logger).Log("msg", "timed out waiting for staleness markers to be written") - break Outer - default: - writtenTs := remoteTsFunc() - if writtenTs >= lastTs { - duration := time.Since(start) - level.Info(w.logger).Log("msg", "remote write wrote staleness markers", "duration", duration) - break Outer - } - - level.Info(w.logger).Log("msg", "remote write hasn't written staleness markers yet", "remoteTs", writtenTs, "lastTs", lastTs) - - // Wait a bit before reading again - time.Sleep(5 * time.Second) - } - } - } - - return lastErr -} - -// Close closes the storage and all its underlying resources. -func (w *Storage) Close() error { - w.walMtx.Lock() - defer w.walMtx.Unlock() - - if w.walClosed { - return fmt.Errorf("already closed") - } - w.walClosed = true - - if w.metrics != nil { - w.metrics.Unregister() - } - return w.wal.Close() -} - -type appender struct { - w *Storage - series []record.RefSeries - samples []record.RefSample -} - -func (a *appender) Append(ref uint64, l labels.Labels, t int64, v float64) (uint64, error) { - if ref == 0 { - return a.Add(l, t, v) - } - return ref, a.AddFast(ref, t, v) -} - -func (a *appender) Add(l labels.Labels, t int64, v float64) (uint64, error) { - hash := l.Hash() - series := a.w.series.getByHash(hash, l) - if series != nil { - return series.ref, a.AddFast(series.ref, t, v) - } - - // Ensure no empty or duplicate labels have gotten through. This mirrors the - // equivalent validation code in the TSDB's headAppender. - l = l.WithoutEmpty() - if len(l) == 0 { - return 0, errors.Wrap(tsdb.ErrInvalidSample, "empty labelset") - } - - if lbl, dup := l.HasDuplicateLabelNames(); dup { - return 0, errors.Wrap(tsdb.ErrInvalidSample, fmt.Sprintf(`label name "%s" is not unique`, lbl)) - } - - a.w.mtx.Lock() - ref := a.w.nextRef - a.w.nextRef++ - a.w.mtx.Unlock() - - series = &memSeries{ref: ref, lset: l} - series.updateTs(t) - - a.series = append(a.series, record.RefSeries{ - Ref: ref, - Labels: l, - }) - - a.w.series.set(hash, series) - - a.w.metrics.numActiveSeries.Inc() - a.w.metrics.totalCreatedSeries.Inc() - a.w.metrics.totalAppendedSamples.Inc() - - return series.ref, a.AddFast(series.ref, t, v) -} - -func (a *appender) AddFast(ref uint64, t int64, v float64) error { - series := a.w.series.getByID(ref) - if series == nil { - return storage.ErrNotFound - } - series.Lock() - defer series.Unlock() - - // Update last recorded timestamp. Used by Storage.gc to determine if a - // series is dead. - series.updateTs(t) - - a.samples = append(a.samples, record.RefSample{ - Ref: ref, - T: t, - V: v, - }) - - a.w.metrics.totalAppendedSamples.Inc() - return nil -} - -func (a *appender) AppendExemplar(ref uint64, l labels.Labels, e exemplar.Exemplar) (uint64, error) { - // remote_write doesn't support exemplars yet, so do nothing here. - return 0, nil -} - -// Commit submits the collected samples and purges the batch. -func (a *appender) Commit() error { - a.w.walMtx.RLock() - defer a.w.walMtx.RUnlock() - - if a.w.walClosed { - return ErrWALClosed - } - - var encoder record.Encoder - buf := a.w.bufPool.Get().([]byte) - - if len(a.series) > 0 { - buf = encoder.Series(a.series, buf) - if err := a.w.wal.Log(buf); err != nil { - return err - } - buf = buf[:0] - } - - if len(a.samples) > 0 { - buf = encoder.Samples(a.samples, buf) - if err := a.w.wal.Log(buf); err != nil { - return err - } - buf = buf[:0] - } - - //nolint:staticcheck - a.w.bufPool.Put(buf) - - for _, sample := range a.samples { - series := a.w.series.getByID(sample.Ref) - if series != nil { - series.Lock() - series.pendingCommit = false - series.Unlock() - } - } - - return a.Rollback() -} - -func (a *appender) Rollback() error { - a.series = a.series[:0] - a.samples = a.samples[:0] - a.w.appenderPool.Put(a) - return nil -} - -type remoteWriteQueryable struct{} - -func (r *remoteWriteQueryable) LabelValues(name string, matchers ...*labels.Matcher) ([]string, storage.Warnings, error) { - return nil, nil, nil -} - -func (r *remoteWriteQueryable) LabelNames(matchers ...*labels.Matcher) ([]string, storage.Warnings, error) { - return nil, nil, nil -} - -func (r *remoteWriteQueryable) Close() error { - return nil -} - -func (r *remoteWriteQueryable) Select(sortSeries bool, hints *storage.SelectHints, matchers ...*labels.Matcher) storage.SeriesSet { - return storage.EmptySeriesSet() -} diff --git a/pkg/rules/remotewrite/wal_test.go b/pkg/rules/remotewrite/wal_test.go deleted file mode 100644 index 7ab724b486..0000000000 --- a/pkg/rules/remotewrite/wal_test.go +++ /dev/null @@ -1,53 +0,0 @@ -// Copyright (c) The Thanos Authors. -// Licensed under the Apache License 2.0. - -// This is copied from https://github.com/grafana/agent/blob/a23bd5cf27c2ac99695b7449d38fb12444941a1c/pkg/prom/wal/wal_test.go -// TODO(idoqo): Migrate to prometheus package when https://github.com/prometheus/prometheus/pull/8785 is ready. -package remotewrite - -import ( - "context" - "io/ioutil" - "os" - "testing" - - "github.com/go-kit/kit/log" - "github.com/prometheus/prometheus/pkg/labels" - "github.com/stretchr/testify/require" -) - -func TestStorage_InvalidSeries(t *testing.T) { - walDir, err := ioutil.TempDir(os.TempDir(), "wal") - require.NoError(t, err) - defer os.RemoveAll(walDir) - - s, err := NewStorage(log.NewNopLogger(), nil, walDir) - require.NoError(t, err) - defer func() { - require.NoError(t, s.Close()) - }() - - app := s.Appender(context.Background()) - - _, err = app.Append(0, labels.Labels{}, 0, 0) - require.Error(t, err, "should reject empty labels") - - _, err = app.Append(0, labels.Labels{{Name: "a", Value: "1"}, {Name: "a", Value: "2"}}, 0, 0) - require.Error(t, err, "should reject duplicate labels") - - // Sanity check: valid series - _, err = app.Append(0, labels.Labels{{Name: "a", Value: "1"}}, 0, 0) - require.NoError(t, err, "should not reject valid series") -} - -func TestStoraeg_TruncateAfterClose(t *testing.T) { - walDir, err := ioutil.TempDir(os.TempDir(), "wal") - require.NoError(t, err) - defer os.RemoveAll(walDir) - - s, err := NewStorage(log.NewNopLogger(), nil, walDir) - require.NoError(t, err) - - require.NoError(t, s.Close()) - require.Error(t, ErrWALClosed, s.Truncate(0)) -} From 46acaeee0b11e79221e04081235fce6b247985fc Mon Sep 17 00:00:00 2001 From: Ben Ye Date: Mon, 8 Nov 2021 09:53:39 -0800 Subject: [PATCH 26/26] address comments Signed-off-by: Ben Ye --- cmd/thanos/rule.go | 20 ++++++++++---------- test/e2e/rule_test.go | 7 ++++--- 2 files changed, 14 insertions(+), 13 deletions(-) diff --git a/cmd/thanos/rule.go b/cmd/thanos/rule.go index 4e2e046572..b92ab02f58 100644 --- a/cmd/thanos/rule.go +++ b/cmd/thanos/rule.go @@ -336,7 +336,8 @@ func runRule( var ( appendable storage.Appendable queryable storage.Queryable - db *tsdb.DB + tsdbDB *tsdb.DB + agentDB *agent.DB ) rwCfgYAML, err := conf.rwConfig.Content() @@ -361,15 +362,15 @@ func runRule( return errors.Wrap(err, "applying config to remote storage") } - db, err := agent.Open(logger, reg, remoteStore, walDir, agentOpts) + agentDB, err = agent.Open(logger, reg, remoteStore, walDir, agentOpts) if err != nil { return errors.Wrap(err, "start remote write agent db") } - fanoutStore := storage.NewFanout(logger, db, remoteStore) + fanoutStore := storage.NewFanout(logger, agentDB, remoteStore) appendable = fanoutStore queryable = fanoutStore } else { - db, err = tsdb.Open(conf.dataDir, log.With(logger, "component", "tsdb"), reg, tsdbOpts, nil) + tsdbDB, err = tsdb.Open(conf.dataDir, log.With(logger, "component", "tsdb"), reg, tsdbOpts, nil) if err != nil { return errors.Wrap(err, "open TSDB") } @@ -383,13 +384,13 @@ func runRule( done := make(chan struct{}) g.Add(func() error { <-done - return db.Close() + return tsdbDB.Close() }, func(error) { close(done) }) } - appendable = db - queryable = db + appendable = tsdbDB + queryable = tsdbDB } // Build the Alertmanager clients. @@ -585,11 +586,10 @@ func runRule( grpcserver.WithGracePeriod(time.Duration(conf.grpc.gracePeriod)), grpcserver.WithTLSConfig(tlsCfg), } - if db != nil { - tsdbStore := store.NewTSDBStore(logger, db, component.Rule, conf.lset) + if tsdbDB != nil { + tsdbStore := store.NewTSDBStore(logger, tsdbDB, component.Rule, conf.lset) options = append(options, grpcserver.WithServer(store.RegisterStoreServer(tsdbStore))) } - // TODO: Add rules API implementation when ready. s := grpcserver.New(logger, reg, tracer, grpcLogOpts, tagOpts, comp, grpcProbe, options...) g.Add(func() error { diff --git a/test/e2e/rule_test.go b/test/e2e/rule_test.go index ce02ae52d1..dd2d6a927b 100644 --- a/test/e2e/rule_test.go +++ b/test/e2e/rule_test.go @@ -507,11 +507,12 @@ func TestRule_CanRemoteWriteData(t *testing.T) { testutil.Ok(t, err) testutil.Ok(t, e2e.StartAndWaitReady(r)) + // Wait until remote write samples are written to receivers successfully. + testutil.Ok(t, r.WaitSumMetricsWithOptions(e2e.GreaterOrEqual(1), []string{"prometheus_remote_storage_samples_total"}, e2e.WaitMissingMetrics())) + t.Run("can fetch remote-written samples from receiver", func(t *testing.T) { testRecordedSamples := "test_absent_metric" - queryAndAssertSeries(t, ctx, q.Endpoint("http"), testRecordedSamples, func() time.Time { - return time.Now() - }, promclient.QueryOptions{ + queryAndAssertSeries(t, ctx, q.Endpoint("http"), testRecordedSamples, time.Now, promclient.QueryOptions{ Deduplicate: false, }, []model.Metric{ {