From ed5533022741d3e29f9f86012cfbd0a4a3b47f3c Mon Sep 17 00:00:00 2001 From: Owen Diehl Date: Wed, 27 May 2020 15:52:14 -0400 Subject: [PATCH 01/40] begins speccing out ruler ruler memhistory encapsulates metricsHistory removes mutex from memhistory ruler module --- pkg/loki/loki.go | 5 + pkg/loki/modules.go | 43 ++++++++ pkg/ruler/compat.go | 63 +++++++++++ pkg/ruler/memhistory.go | 228 ++++++++++++++++++++++++++++++++++++++++ 4 files changed, 339 insertions(+) create mode 100644 pkg/ruler/compat.go create mode 100644 pkg/ruler/memhistory.go diff --git a/pkg/loki/loki.go b/pkg/loki/loki.go index 104e0cbead318..2d547e76c176d 100644 --- a/pkg/loki/loki.go +++ b/pkg/loki/loki.go @@ -15,6 +15,7 @@ import ( "github.com/cortexproject/cortex/pkg/querier/frontend" "github.com/cortexproject/cortex/pkg/ring" "github.com/cortexproject/cortex/pkg/ring/kv/memberlist" + "github.com/cortexproject/cortex/pkg/ruler" "github.com/cortexproject/cortex/pkg/util" "github.com/cortexproject/cortex/pkg/util/runtimeconfig" "github.com/cortexproject/cortex/pkg/util/services" @@ -56,6 +57,7 @@ type Config struct { TableManager chunk.TableManagerConfig `yaml:"table_manager,omitempty"` Worker frontend.WorkerConfig `yaml:"frontend_worker,omitempty"` Frontend lokifrontend.Config `yaml:"frontend,omitempty"` + Ruler ruler.Config `yaml:"ruler,omitempty"` QueryRange queryrange.Config `yaml:"query_range,omitempty"` RuntimeConfig runtimeconfig.ManagerConfig `yaml:"runtime_config,omitempty"` MemberlistKV memberlist.KVConfig `yaml:"memberlist"` @@ -123,6 +125,7 @@ type Loki struct { store storage.Store tableManager *chunk.TableManager frontend *frontend.Frontend + ruler *ruler.Ruler stopper queryrange.Stopper runtimeConfig *runtimeconfig.Manager memberlistKV *memberlist.KVInitService @@ -295,6 +298,7 @@ func (t *Loki) setupModuleManager() error { mm.RegisterModule(Ingester, t.initIngester) mm.RegisterModule(Querier, t.initQuerier) mm.RegisterModule(QueryFrontend, t.initQueryFrontend) + mm.RegisterModule(Ruler, t.initRuler) mm.RegisterModule(TableManager, t.initTableManager) mm.RegisterModule(All, nil) @@ -307,6 +311,7 @@ func (t *Loki) setupModuleManager() error { Ingester: {Store, Server, MemberlistKV}, Querier: {Store, Ring, Server}, QueryFrontend: {Server, Overrides}, + Ruler: {Server, Ring, Store, Overrides}, TableManager: {Server}, All: {Querier, Ingester, Distributor, TableManager}, } diff --git a/pkg/loki/modules.go b/pkg/loki/modules.go index 4c29194068113..6ef738b588d46 100644 --- a/pkg/loki/modules.go +++ b/pkg/loki/modules.go @@ -19,6 +19,7 @@ import ( "github.com/cortexproject/cortex/pkg/ring" "github.com/cortexproject/cortex/pkg/ring/kv/codec" "github.com/cortexproject/cortex/pkg/ring/kv/memberlist" + cortex_ruler "github.com/cortexproject/cortex/pkg/ruler" "github.com/cortexproject/cortex/pkg/util" "github.com/cortexproject/cortex/pkg/util/runtimeconfig" "github.com/cortexproject/cortex/pkg/util/services" @@ -34,8 +35,10 @@ import ( "github.com/grafana/loki/pkg/distributor" "github.com/grafana/loki/pkg/ingester" "github.com/grafana/loki/pkg/logproto" + "github.com/grafana/loki/pkg/logql" "github.com/grafana/loki/pkg/querier" "github.com/grafana/loki/pkg/querier/queryrange" + "github.com/grafana/loki/pkg/ruler" loki_storage "github.com/grafana/loki/pkg/storage" "github.com/grafana/loki/pkg/storage/stores/shipper" serverutil "github.com/grafana/loki/pkg/util/server" @@ -54,6 +57,7 @@ const ( Ingester string = "ingester" Querier string = "querier" QueryFrontend string = "query-frontend" + Ruler string = "ruler" Store string = "store" TableManager string = "table-manager" MemberlistKV string = "memberlist-kv" @@ -350,6 +354,45 @@ func (t *Loki) initQueryFrontend() (_ services.Service, err error) { }), nil } +func (t *Loki) initRuler() (_ services.Service, err error) { + t.cfg.Ruler.Ring.ListenPort = t.cfg.Server.GRPCListenPort + t.cfg.Ruler.Ring.KVStore.MemberlistKV = t.memberlistKV.GetMemberlistKV + q, err := querier.New(t.cfg.Querier, t.cfg.IngesterClient, t.ring, t.store, t.overrides) + if err != nil { + return nil, err + } + + engine := logql.NewEngine(t.cfg.Querier.Engine, q) + + t.ruler, err = cortex_ruler.NewRuler( + t.cfg.Ruler, + ruler.LokiDelayedQueryFunc(engine), + ruler.InMemoryAppendableHistory, + prometheus.DefaultRegisterer, + util.Logger, + ) + + if err != nil { + return + } + + // Expose HTTP endpoints. + if t.cfg.Ruler.EnableAPI { + + t.server.HTTP.Handle("/ruler/ring", t.ruler) + cortex_ruler.RegisterRulerServer(t.server.GRPC, t.ruler) + + // Ruler API Routes + t.server.HTTP.Path("/api/v1/rules").Methods("GET").Handler(t.httpAuthMiddleware.Wrap(http.HandlerFunc(t.ruler.ListRules))) + t.server.HTTP.Path("/api/v1/rules/{namespace}").Methods("GET").Handler(t.httpAuthMiddleware.Wrap(http.HandlerFunc(t.ruler.ListRules))) + t.server.HTTP.Path("/api/v1/rules/{namespace}/{groupName}").Methods("GET").Handler(t.httpAuthMiddleware.Wrap(http.HandlerFunc(t.ruler.GetRuleGroup))) + t.server.HTTP.Path("/api/v1/rules/{namespace}").Methods("POST").Handler(t.httpAuthMiddleware.Wrap(http.HandlerFunc(t.ruler.CreateRuleGroup))) + t.server.HTTP.Path("/api/v1/rules/{namespace}/{groupName}").Methods("DELETE").Handler(t.httpAuthMiddleware.Wrap(http.HandlerFunc(t.ruler.DeleteRuleGroup))) + } + + return t.ruler, nil +} + func (t *Loki) initMemberlistKV() (services.Service, error) { t.cfg.MemberlistKV.MetricsRegisterer = prometheus.DefaultRegisterer t.cfg.MemberlistKV.Codecs = []codec.Codec{ diff --git a/pkg/ruler/compat.go b/pkg/ruler/compat.go new file mode 100644 index 0000000000000..a14cbf956607d --- /dev/null +++ b/pkg/ruler/compat.go @@ -0,0 +1,63 @@ +package ruler + +import ( + "context" + "errors" + "time" + + "github.com/cortexproject/cortex/pkg/ruler" + "github.com/cortexproject/cortex/pkg/ruler/rules" + "github.com/prometheus/prometheus/pkg/labels" + "github.com/prometheus/prometheus/promql" + "github.com/prometheus/prometheus/storage" + + "github.com/grafana/loki/pkg/logproto" + "github.com/grafana/loki/pkg/logql" +) + +func LokiDelayedQueryFunc(engine *logql.Engine) ruler.DelayedQueryFunc { + return func(delay time.Duration) rules.QueryFunc { + return func(ctx context.Context, qs string, t time.Time) (promql.Vector, error) { + adjusted := t.Add(-delay) + q := engine.Query(logql.NewLiteralParams( + qs, + adjusted, + adjusted, + 0, + 0, + logproto.BACKWARD, + 0, + nil, + )) + + res, err := q.Exec(ctx) + if err != nil { + return nil, err + } + switch v := res.Data.(type) { + case promql.Vector: + return v, nil + case promql.Scalar: + return promql.Vector{promql.Sample{ + Point: promql.Point(v), + Metric: labels.Labels{}, + }}, nil + default: + return nil, errors.New("rule result is not a vector or scalar") + } + } + } +} + +func InMemoryAppendableHistory(userID string, opts *rules.ManagerOptions) (rules.Appendable, rules.AlertHistory) { + hist := NewMemHistory(userID, opts) + return hist, hist +} + +type NoopAppender struct{} + +func (a NoopAppender) Appender() (storage.Appender, error) { return a, nil } +func (a NoopAppender) Add(l labels.Labels, t int64, v float64) (uint64, error) { return 0, nil } +func (a NoopAppender) AddFast(l labels.Labels, ref uint64, t int64, v float64) error { return nil } +func (a NoopAppender) Commit() error { return nil } +func (a NoopAppender) Rollback() error { return nil } diff --git a/pkg/ruler/memhistory.go b/pkg/ruler/memhistory.go new file mode 100644 index 0000000000000..8010095e7f1b9 --- /dev/null +++ b/pkg/ruler/memhistory.go @@ -0,0 +1,228 @@ +package ruler + +import ( + "context" + "errors" + "time" + + "github.com/cortexproject/cortex/pkg/querier/series" + "github.com/cortexproject/cortex/pkg/ruler/rules" + "github.com/go-kit/kit/log/level" + "github.com/prometheus/client_golang/prometheus" + "github.com/prometheus/common/model" + "github.com/prometheus/prometheus/pkg/labels" + "github.com/prometheus/prometheus/storage" +) + +type Metrics struct { + groupMetrics *rules.Metrics +} + +func NewMetrics(registerer prometheus.Registerer, groupMetrics *rules.Metrics) *Metrics { + if groupMetrics == nil { + groupMetrics = rules.NewGroupMetrics(registerer) + } + return &Metrics{ + groupMetrics: groupMetrics, + } +} + +type MemHistory struct { + userId string + opts *rules.ManagerOptions + appenders map[*rules.AlertingRule]*ForStateAppender + metrics *Metrics +} + +func NewMemHistory(userId string, opts *rules.ManagerOptions) *MemHistory { + return &MemHistory{ + userId: userId, + opts: opts, + appenders: make(map[*rules.AlertingRule]*ForStateAppender), + metrics: NewMetrics(opts.Registerer, opts.Metrics), + } +} + +// Implement rules.Appendable +func (m *MemHistory) Appender(rule rules.Rule) (storage.Appender, error) { + if rule == nil { + return NoopAppender{}, nil + } + + alertRule, ok := rule.(*rules.AlertingRule) + if !ok { + return nil, errors.New("unimplemented: MemHistory only accepts AlertingRules") + } + + if app, ok := m.appenders[alertRule]; ok { + return app, nil + } + + app := NewForStateAppender(alertRule) + m.appenders[alertRule] = app + return app, nil +} + +func (m *MemHistory) RestoreForState(ts time.Time, alertRule *rules.AlertingRule) { + appender, err := m.Appender(alertRule) + if err != nil { + level.Error(m.opts.Logger).Log("msg", "Could not find an Appender for rule", "err", err) + return + } + + app := appender.(*ForStateAppender) + + // Here we artificially populate the evaluation at `now-forDuration`. + // Note: We lose granularity here across restarts because we don't persist series. This is an approximation + // of whether the alert condition was positive during this period. This means after restarts, we may lose up + // to the ForDuration in alert granularity. + // TODO: Do we want this to instead evaluate forDuration/interval times? + vec, err := m.opts.QueryFunc(m.opts.Context, alertRule.Query().String(), ts.Add(-alertRule.Duration())) + if err != nil { + alertRule.SetHealth(rules.HealthBad) + alertRule.SetLastError(err) + m.metrics.groupMetrics.FailedEvaluate() + } + + for _, smpl := range vec { + if _, err := app.Add(smpl.Metric, smpl.T, smpl.V); err != nil { + level.Error(m.opts.Logger).Log("msg", "error appending to MemHistory", "err", err) + return + } + } + + // Now that we've evaluated the rule and written the results to our in memory appender, + // delegate to the default implementation. + rules.NewMetricsHistory(app, m.opts).RestoreForState(ts, alertRule) + +} + +type ForStateAppender struct { + rule *rules.AlertingRule + data map[uint64]*series.ConcreteSeries +} + +func NewForStateAppender(rule *rules.AlertingRule) *ForStateAppender { + return &ForStateAppender{ + rule: rule, + data: make(map[uint64]*series.ConcreteSeries), + } +} + +func (m *ForStateAppender) Add(ls labels.Labels, t int64, v float64) (uint64, error) { + for _, l := range ls { + if l.Name == labels.MetricName && l.Value != rules.AlertForStateMetricName { + // This is not an ALERTS_FOR_STATE metric, skip + return 0, nil + } + } + + fp := ls.Hash() + + if s, ok := m.data[fp]; ok { + s.Add(model.SamplePair{ + Timestamp: model.Time(t), + Value: model.SampleValue(v), + }) + + // release all older references that are no longer needed + s.TrimStart(time.Now().Add(-m.rule.Duration())) + return 0, nil + } + m.data[fp] = series.NewConcreteSeries(ls, []model.SamplePair{{Timestamp: model.Time(t), Value: model.SampleValue(v)}}) + return 0, nil + +} + +func (m *ForStateAppender) AddFast(ls labels.Labels, _ uint64, t int64, v float64) error { + _, err := m.Add(ls, t, v) + return err + +} + +func (m *ForStateAppender) Commit() error { return nil } + +func (m *ForStateAppender) Rollback() error { return nil } + +// implement storage.Queryable +func (m *ForStateAppender) Querier(ctx context.Context, mint, _ int64) (storage.Querier, error) { + // These are never realisticallly bounded by maxt, so we omit it. + return ForStateAppenderQuerier{ + mint: mint, + ForStateAppender: m, + }, nil + +} + +// TimeFromMillis is a helper to turn milliseconds -> time.Time +func TimeFromMillis(ms int64) time.Time { + return time.Unix(0, ms*int64(time.Millisecond/time.Nanosecond)) +} + +// ForStateAppenderQuerier wraps a **ForStateAppender and implements storage.Querier +type ForStateAppenderQuerier struct { + mint int64 + *ForStateAppender +} + +// Select returns a set of series that matches the given label matchers. +func (q ForStateAppenderQuerier) Select(params *storage.SelectParams, matchers ...*labels.Matcher) (storage.SeriesSet, storage.Warnings, error) { + var filtered []storage.Series +outer: + for _, s := range q.data { + for _, matcher := range matchers { + if !matcher.Matches(s.Labels().Get(matcher.Name)) { + continue outer + } + + iter := s.Iterator() + + seekTo := q.mint + if seekTo < params.Start { + seekTo = params.Start + } + if !iter.Seek(seekTo) { + continue + } + + var samples []model.SamplePair + + for iter.Next() { + t, v := iter.At() + if t > params.End { + break + } + + samples = append(samples, model.SamplePair{ + Timestamp: model.Time(t), + Value: model.SampleValue(v), + }) + + } + + if len(samples) != 0 { + filtered = append(filtered, series.NewConcreteSeries(s.Labels(), samples)) + } + } + } + + return series.NewConcreteSeriesSet(filtered), nil, nil +} + +// SelectSorted returns a sorted set of series that matches the given label matchers. +func (ForStateAppenderQuerier) SelectSorted(params *storage.SelectParams, matchers ...*labels.Matcher) (storage.SeriesSet, storage.Warnings, error) { + return nil, nil, errors.New("unimplemented") +} + +// LabelValues returns all potential values for a label name. +func (ForStateAppenderQuerier) LabelValues(name string) ([]string, storage.Warnings, error) { + return nil, nil, errors.New("unimplemented") +} + +// LabelNames returns all the unique label names present in the block in sorted order. +func (ForStateAppenderQuerier) LabelNames() ([]string, storage.Warnings, error) { + return nil, nil, errors.New("unimplemented") +} + +// Close releases the resources of the Querier. +func (ForStateAppenderQuerier) Close() error { return nil } From 1f261a7a2f0984bcaba18afdd45b36338453898f Mon Sep 17 00:00:00 2001 From: Owen Diehl Date: Tue, 9 Jun 2020 17:02:33 -0400 Subject: [PATCH 02/40] upstream conflicts --- pkg/ruler/compat.go | 12 +++++++----- pkg/ruler/memhistory.go | 18 ++++++++---------- 2 files changed, 15 insertions(+), 15 deletions(-) diff --git a/pkg/ruler/compat.go b/pkg/ruler/compat.go index a14cbf956607d..7a643f84a979b 100644 --- a/pkg/ruler/compat.go +++ b/pkg/ruler/compat.go @@ -56,8 +56,10 @@ func InMemoryAppendableHistory(userID string, opts *rules.ManagerOptions) (rules type NoopAppender struct{} -func (a NoopAppender) Appender() (storage.Appender, error) { return a, nil } -func (a NoopAppender) Add(l labels.Labels, t int64, v float64) (uint64, error) { return 0, nil } -func (a NoopAppender) AddFast(l labels.Labels, ref uint64, t int64, v float64) error { return nil } -func (a NoopAppender) Commit() error { return nil } -func (a NoopAppender) Rollback() error { return nil } +func (a NoopAppender) Appender() (storage.Appender, error) { return a, nil } +func (a NoopAppender) Add(l labels.Labels, t int64, v float64) (uint64, error) { return 0, nil } +func (a NoopAppender) AddFast(ref uint64, t int64, v float64) error { + return errors.New("unimplemented") +} +func (a NoopAppender) Commit() error { return nil } +func (a NoopAppender) Rollback() error { return nil } diff --git a/pkg/ruler/memhistory.go b/pkg/ruler/memhistory.go index 8010095e7f1b9..83bdab0307fa3 100644 --- a/pkg/ruler/memhistory.go +++ b/pkg/ruler/memhistory.go @@ -134,10 +134,8 @@ func (m *ForStateAppender) Add(ls labels.Labels, t int64, v float64) (uint64, er } -func (m *ForStateAppender) AddFast(ls labels.Labels, _ uint64, t int64, v float64) error { - _, err := m.Add(ls, t, v) - return err - +func (m *ForStateAppender) AddFast(ref uint64, t int64, v float64) error { + return errors.New("unimplemented") } func (m *ForStateAppender) Commit() error { return nil } @@ -166,7 +164,12 @@ type ForStateAppenderQuerier struct { } // Select returns a set of series that matches the given label matchers. -func (q ForStateAppenderQuerier) Select(params *storage.SelectParams, matchers ...*labels.Matcher) (storage.SeriesSet, storage.Warnings, error) { +func (q ForStateAppenderQuerier) Select(sortSeries bool, params *storage.SelectHints, matchers ...*labels.Matcher) (storage.SeriesSet, storage.Warnings, error) { + if sortSeries { + return nil, nil, errors.New("ForStateAppenderQuerier does not support sorted selects") + + } + var filtered []storage.Series outer: for _, s := range q.data { @@ -209,11 +212,6 @@ outer: return series.NewConcreteSeriesSet(filtered), nil, nil } -// SelectSorted returns a sorted set of series that matches the given label matchers. -func (ForStateAppenderQuerier) SelectSorted(params *storage.SelectParams, matchers ...*labels.Matcher) (storage.SeriesSet, storage.Warnings, error) { - return nil, nil, errors.New("unimplemented") -} - // LabelValues returns all potential values for a label name. func (ForStateAppenderQuerier) LabelValues(name string) ([]string, storage.Warnings, error) { return nil, nil, errors.New("unimplemented") From 1157df5159bc0417bd075c631759a0a8f7e3a63c Mon Sep 17 00:00:00 2001 From: Owen Diehl Date: Tue, 9 Jun 2020 17:43:55 -0400 Subject: [PATCH 03/40] implicit ast impls, parser for ruler --- pkg/logql/ast.go | 24 ++++++++++-------------- pkg/loki/modules.go | 1 + 2 files changed, 11 insertions(+), 14 deletions(-) diff --git a/pkg/logql/ast.go b/pkg/logql/ast.go index 17dab242647e0..183df49ff069a 100644 --- a/pkg/logql/ast.go +++ b/pkg/logql/ast.go @@ -28,6 +28,11 @@ type QueryParams interface { GetShards() []string } +// implicit holds default implementations +type implicit struct{} + +func (implicit) logQLExpr() {} + // SelectParams specifies parameters passed to data selections. type SelectLogParams struct { *logproto.QueryRequest @@ -75,6 +80,7 @@ type LogSelectorExpr interface { type matchersExpr struct { matchers []*labels.Matcher + implicit } func newMatcherExpr(matchers []*labels.Matcher) LogSelectorExpr { @@ -102,13 +108,11 @@ func (e *matchersExpr) Filter() (LineFilter, error) { return nil, nil } -// impl Expr -func (e *matchersExpr) logQLExpr() {} - type filterExpr struct { left LogSelectorExpr ty labels.MatchType match string + implicit } // NewFilterExpr wraps an existing Expr with a next filter expression. @@ -163,9 +167,6 @@ func (e *filterExpr) Filter() (LineFilter, error) { return f, nil } -// impl Expr -func (e *filterExpr) logQLExpr() {} - func mustNewMatcher(t labels.MatchType, n, v string) *labels.Matcher { m, err := labels.NewMatcher(t, n, v) if err != nil { @@ -275,6 +276,7 @@ type SampleExpr interface { type rangeAggregationExpr struct { left *logRange operation string + implicit } func newRangeAggregationExpr(left *logRange, operation string) SampleExpr { @@ -288,9 +290,6 @@ func (e *rangeAggregationExpr) Selector() LogSelectorExpr { return e.left.left } -// impl Expr -func (e *rangeAggregationExpr) logQLExpr() {} - // impls Stringer func (e *rangeAggregationExpr) String() string { return formatOperation(e.operation, nil, e.left.String()) @@ -330,6 +329,7 @@ type vectorAggregationExpr struct { grouping *grouping params int operation string + implicit } func mustNewVectorAggregationExpr(left SampleExpr, operation string, gr *grouping, params *string) SampleExpr { @@ -368,9 +368,6 @@ func (e *vectorAggregationExpr) Extractor() (SampleExtractor, error) { return e.left.Extractor() } -// impl Expr -func (e *vectorAggregationExpr) logQLExpr() {} - func (e *vectorAggregationExpr) String() string { var params []string if e.params != 0 { @@ -479,6 +476,7 @@ func reduceBinOp(op string, left, right *literalExpr) *literalExpr { type literalExpr struct { value float64 + implicit } func mustNewLiteralExpr(s string, invert bool) *literalExpr { @@ -496,8 +494,6 @@ func mustNewLiteralExpr(s string, invert bool) *literalExpr { } } -func (e *literalExpr) logQLExpr() {} - func (e *literalExpr) String() string { return fmt.Sprintf("%f", e.value) } diff --git a/pkg/loki/modules.go b/pkg/loki/modules.go index 6ef738b588d46..a8d3d7e57132d 100644 --- a/pkg/loki/modules.go +++ b/pkg/loki/modules.go @@ -369,6 +369,7 @@ func (t *Loki) initRuler() (_ services.Service, err error) { ruler.LokiDelayedQueryFunc(engine), ruler.InMemoryAppendableHistory, prometheus.DefaultRegisterer, + func(s string) (fmt.Stringer, error) { return logql.ParseExpr(s) }, util.Logger, ) From 6beb78dde5dab4a8bd40144ea39df0e642be9456 Mon Sep 17 00:00:00 2001 From: Owen Diehl Date: Wed, 10 Jun 2020 10:14:42 -0400 Subject: [PATCH 04/40] /api/prom ruler routes, ruler enabled in single binary --- pkg/loki/loki.go | 2 +- pkg/loki/modules.go | 7 +++++++ 2 files changed, 8 insertions(+), 1 deletion(-) diff --git a/pkg/loki/loki.go b/pkg/loki/loki.go index 2d547e76c176d..901dc316735d9 100644 --- a/pkg/loki/loki.go +++ b/pkg/loki/loki.go @@ -313,7 +313,7 @@ func (t *Loki) setupModuleManager() error { QueryFrontend: {Server, Overrides}, Ruler: {Server, Ring, Store, Overrides}, TableManager: {Server}, - All: {Querier, Ingester, Distributor, TableManager}, + All: {Querier, Ingester, Distributor, TableManager, Ruler}, } for mod, targets := range deps { diff --git a/pkg/loki/modules.go b/pkg/loki/modules.go index a8d3d7e57132d..388bf21eca836 100644 --- a/pkg/loki/modules.go +++ b/pkg/loki/modules.go @@ -383,6 +383,13 @@ func (t *Loki) initRuler() (_ services.Service, err error) { t.server.HTTP.Handle("/ruler/ring", t.ruler) cortex_ruler.RegisterRulerServer(t.server.GRPC, t.ruler) + // Ruler Legacy API Routes + t.server.HTTP.Path("/api/prom/rules").Methods("GET").Handler(t.httpAuthMiddleware.Wrap(http.HandlerFunc(t.ruler.ListRules))) + t.server.HTTP.Path("/api/prom/rules/{namespace}").Methods("GET").Handler(t.httpAuthMiddleware.Wrap(http.HandlerFunc(t.ruler.ListRules))) + t.server.HTTP.Path("/api/prom/rules/{namespace}/{groupName}").Methods("GET").Handler(t.httpAuthMiddleware.Wrap(http.HandlerFunc(t.ruler.GetRuleGroup))) + t.server.HTTP.Path("/api/prom/rules/{namespace}").Methods("POST").Handler(t.httpAuthMiddleware.Wrap(http.HandlerFunc(t.ruler.CreateRuleGroup))) + t.server.HTTP.Path("/api/prom/rules/{namespace}/{groupName}").Methods("DELETE").Handler(t.httpAuthMiddleware.Wrap(http.HandlerFunc(t.ruler.DeleteRuleGroup))) + // Ruler API Routes t.server.HTTP.Path("/api/v1/rules").Methods("GET").Handler(t.httpAuthMiddleware.Wrap(http.HandlerFunc(t.ruler.ListRules))) t.server.HTTP.Path("/api/v1/rules/{namespace}").Methods("GET").Handler(t.httpAuthMiddleware.Wrap(http.HandlerFunc(t.ruler.ListRules))) From 9f785dbc1c38ab2ebcbd8f3f75e47ecbc253dc47 Mon Sep 17 00:00:00 2001 From: Owen Diehl Date: Wed, 10 Jun 2020 14:41:39 -0400 Subject: [PATCH 05/40] registers ruler flags, doesnt double instantiate metrics --- pkg/loki/loki.go | 1 + pkg/ruler/compat.go | 7 ++++--- pkg/ruler/memhistory.go | 18 +----------------- 3 files changed, 6 insertions(+), 20 deletions(-) diff --git a/pkg/loki/loki.go b/pkg/loki/loki.go index 901dc316735d9..4e142ce793f85 100644 --- a/pkg/loki/loki.go +++ b/pkg/loki/loki.go @@ -83,6 +83,7 @@ func (c *Config) RegisterFlags(f *flag.FlagSet) { c.LimitsConfig.RegisterFlags(f) c.TableManager.RegisterFlags(f) c.Frontend.RegisterFlags(f) + c.Ruler.RegisterFlags(f) c.Worker.RegisterFlags(f) c.QueryRange.RegisterFlags(f) c.RuntimeConfig.RegisterFlags(f) diff --git a/pkg/ruler/compat.go b/pkg/ruler/compat.go index 7a643f84a979b..aae6f2a3c2cd7 100644 --- a/pkg/ruler/compat.go +++ b/pkg/ruler/compat.go @@ -19,16 +19,17 @@ func LokiDelayedQueryFunc(engine *logql.Engine) ruler.DelayedQueryFunc { return func(delay time.Duration) rules.QueryFunc { return func(ctx context.Context, qs string, t time.Time) (promql.Vector, error) { adjusted := t.Add(-delay) - q := engine.Query(logql.NewLiteralParams( + params := logql.NewLiteralParams( qs, adjusted, adjusted, 0, 0, - logproto.BACKWARD, + logproto.FORWARD, 0, nil, - )) + ) + q := engine.Query(params) res, err := q.Exec(ctx) if err != nil { diff --git a/pkg/ruler/memhistory.go b/pkg/ruler/memhistory.go index 83bdab0307fa3..713d827f36de1 100644 --- a/pkg/ruler/memhistory.go +++ b/pkg/ruler/memhistory.go @@ -8,30 +8,15 @@ import ( "github.com/cortexproject/cortex/pkg/querier/series" "github.com/cortexproject/cortex/pkg/ruler/rules" "github.com/go-kit/kit/log/level" - "github.com/prometheus/client_golang/prometheus" "github.com/prometheus/common/model" "github.com/prometheus/prometheus/pkg/labels" "github.com/prometheus/prometheus/storage" ) -type Metrics struct { - groupMetrics *rules.Metrics -} - -func NewMetrics(registerer prometheus.Registerer, groupMetrics *rules.Metrics) *Metrics { - if groupMetrics == nil { - groupMetrics = rules.NewGroupMetrics(registerer) - } - return &Metrics{ - groupMetrics: groupMetrics, - } -} - type MemHistory struct { userId string opts *rules.ManagerOptions appenders map[*rules.AlertingRule]*ForStateAppender - metrics *Metrics } func NewMemHistory(userId string, opts *rules.ManagerOptions) *MemHistory { @@ -39,7 +24,6 @@ func NewMemHistory(userId string, opts *rules.ManagerOptions) *MemHistory { userId: userId, opts: opts, appenders: make(map[*rules.AlertingRule]*ForStateAppender), - metrics: NewMetrics(opts.Registerer, opts.Metrics), } } @@ -81,7 +65,7 @@ func (m *MemHistory) RestoreForState(ts time.Time, alertRule *rules.AlertingRule if err != nil { alertRule.SetHealth(rules.HealthBad) alertRule.SetLastError(err) - m.metrics.groupMetrics.FailedEvaluate() + m.opts.Metrics.FailedEvaluate() } for _, smpl := range vec { From 728436b23a3197ed65207a95dd79b892a30004ff Mon Sep 17 00:00:00 2001 From: Owen Diehl Date: Tue, 23 Jun 2020 15:33:07 -0400 Subject: [PATCH 06/40] cleanup for old samples in ruler --- pkg/ruler/memhistory.go | 55 ++++++++++++++++++++++++++++++++++++++--- 1 file changed, 52 insertions(+), 3 deletions(-) diff --git a/pkg/ruler/memhistory.go b/pkg/ruler/memhistory.go index 713d827f36de1..08dd255bcc689 100644 --- a/pkg/ruler/memhistory.go +++ b/pkg/ruler/memhistory.go @@ -3,6 +3,7 @@ package ruler import ( "context" "errors" + "sync" "time" "github.com/cortexproject/cortex/pkg/querier/series" @@ -14,17 +15,39 @@ import ( ) type MemHistory struct { + mtx sync.RWMutex userId string opts *rules.ManagerOptions appenders map[*rules.AlertingRule]*ForStateAppender + + done chan struct{} + cleanupInterval time.Duration } func NewMemHistory(userId string, opts *rules.ManagerOptions) *MemHistory { - return &MemHistory{ + hist := &MemHistory{ userId: userId, opts: opts, appenders: make(map[*rules.AlertingRule]*ForStateAppender), + + cleanupInterval: 5 * time.Minute, // TODO: make configurable + } + hist.run() + return hist +} + +func (m *MemHistory) run() { + for range time.NewTicker(m.cleanupInterval).C { + m.mtx.Lock() + defer m.mtx.Unlock() + for rule, app := range m.appenders { + if rem := app.CleanupOldSamples(); rem == 0 { + delete(m.appenders, rule) + } + + } } + } // Implement rules.Appendable @@ -38,6 +61,9 @@ func (m *MemHistory) Appender(rule rules.Rule) (storage.Appender, error) { return nil, errors.New("unimplemented: MemHistory only accepts AlertingRules") } + m.mtx.Lock() + defer m.mtx.Unlock() + if app, ok := m.appenders[alertRule]; ok { return app, nil } @@ -82,6 +108,7 @@ func (m *MemHistory) RestoreForState(ts time.Time, alertRule *rules.AlertingRule } type ForStateAppender struct { + mtx sync.Mutex rule *rules.AlertingRule data map[uint64]*series.ConcreteSeries } @@ -101,6 +128,9 @@ func (m *ForStateAppender) Add(ls labels.Labels, t int64, v float64) (uint64, er } } + m.mtx.Lock() + defer m.mtx.Unlock() + fp := ls.Hash() if s, ok := m.data[fp]; ok { @@ -109,8 +139,6 @@ func (m *ForStateAppender) Add(ls labels.Labels, t int64, v float64) (uint64, er Value: model.SampleValue(v), }) - // release all older references that are no longer needed - s.TrimStart(time.Now().Add(-m.rule.Duration())) return 0, nil } m.data[fp] = series.NewConcreteSeries(ls, []model.SamplePair{{Timestamp: model.Time(t), Value: model.SampleValue(v)}}) @@ -118,6 +146,27 @@ func (m *ForStateAppender) Add(ls labels.Labels, t int64, v float64) (uint64, er } +// CleanupOldSamples removes samples that are outside of the rule's `For` duration. +func (m *ForStateAppender) CleanupOldSamples() (seriesRemaining int) { + m.mtx.Lock() + defer m.mtx.Unlock() + + // TODO: make this factor configurable? + // Basically, buffer samples in memory up to ruleDuration * oldEvaluationFactor. + oldEvaluationFactor := time.Duration(2) + + for fp, s := range m.data { + // release all older references that are no longer needed. + s.TrimStart(time.Now().Add(-m.rule.Duration() * oldEvaluationFactor)) + if s.Len() == 0 { + delete(m.data, fp) + } + } + + return len(m.data) + +} + func (m *ForStateAppender) AddFast(ref uint64, t int64, v float64) error { return errors.New("unimplemented") } From 22f764b076cc987ed59e3e61c2c3026b8d83e392 Mon Sep 17 00:00:00 2001 From: Owen Diehl Date: Tue, 23 Jun 2020 16:34:52 -0400 Subject: [PATCH 07/40] begins ruler tests --- pkg/ruler/memhistory.go | 7 +- pkg/ruler/memhistory_test.go | 194 +++++++++++++++++++++++++++++++++++ 2 files changed, 195 insertions(+), 6 deletions(-) create mode 100644 pkg/ruler/memhistory_test.go diff --git a/pkg/ruler/memhistory.go b/pkg/ruler/memhistory.go index 08dd255bcc689..37737b375fe9a 100644 --- a/pkg/ruler/memhistory.go +++ b/pkg/ruler/memhistory.go @@ -32,7 +32,7 @@ func NewMemHistory(userId string, opts *rules.ManagerOptions) *MemHistory { cleanupInterval: 5 * time.Minute, // TODO: make configurable } - hist.run() + go hist.run() return hist } @@ -185,11 +185,6 @@ func (m *ForStateAppender) Querier(ctx context.Context, mint, _ int64) (storage. } -// TimeFromMillis is a helper to turn milliseconds -> time.Time -func TimeFromMillis(ms int64) time.Time { - return time.Unix(0, ms*int64(time.Millisecond/time.Nanosecond)) -} - // ForStateAppenderQuerier wraps a **ForStateAppender and implements storage.Querier type ForStateAppenderQuerier struct { mint int64 diff --git a/pkg/ruler/memhistory_test.go b/pkg/ruler/memhistory_test.go new file mode 100644 index 0000000000000..2effd95c276d8 --- /dev/null +++ b/pkg/ruler/memhistory_test.go @@ -0,0 +1,194 @@ +package ruler + +import ( + "fmt" + "testing" + "time" + + "github.com/cortexproject/cortex/pkg/querier/series" + "github.com/cortexproject/cortex/pkg/ruler/rules" + "github.com/cortexproject/cortex/pkg/util" + "github.com/go-kit/kit/log" + "github.com/prometheus/common/model" + "github.com/prometheus/prometheus/pkg/labels" + "github.com/prometheus/prometheus/promql/parser" + "github.com/prometheus/prometheus/storage" + "github.com/stretchr/testify/require" +) + +func TestNewMemHistory(t *testing.T) { + userID := "abc" + expected := &MemHistory{ + userId: userID, + appenders: make(map[*rules.AlertingRule]*ForStateAppender), + + cleanupInterval: 5 * time.Minute, + } + require.Equal(t, expected, NewMemHistory(userID, nil)) + +} + +func TestMemHistoryAppender(t *testing.T) { + + for _, tc := range []struct { + desc string + err bool + expected storage.Appender + rule rules.Rule + }{ + { + desc: "nil rule returns NoopAppender", + err: false, + expected: NoopAppender{}, + rule: nil, + }, + { + desc: "recording rule errors", + err: true, + expected: nil, + rule: &rules.RecordingRule{}, + }, + { + desc: "alerting rule returns ForStateAppender", + err: false, + expected: NewForStateAppender(rules.NewAlertingRule("foo", nil, 0, nil, nil, nil, true, nil)), + rule: rules.NewAlertingRule("foo", nil, 0, nil, nil, nil, true, nil), + }, + } { + t.Run(tc.desc, func(t *testing.T) { + hist := NewMemHistory("abc", nil) + + app, err := hist.Appender(tc.rule) + if tc.err { + require.NotNil(t, err) + } + require.Equal(t, tc.expected, app) + }) + } +} + +func TestMemHistoryRestoreForState(t *testing.T) {} + +type stringer string + +func (s stringer) String() string { return string(s) } + +func mustParseLabels(s string) labels.Labels { + labels, err := parser.ParseMetric(s) + if err != nil { + panic(fmt.Sprintf("failed to parse %s", s)) + } + + return labels +} + +func newRule(name, qry, ls string, forDur time.Duration) *rules.AlertingRule { + return rules.NewAlertingRule(name, stringer(qry), forDur, mustParseLabels(ls), nil, nil, false, log.NewNopLogger()) +} + +func TestForStateAppenderAdd(t *testing.T) { + app := NewForStateAppender(newRule("foo", "query", `{foo="bar"}`, time.Minute)) + require.Equal(t, map[uint64]*series.ConcreteSeries{}, app.data) + + // create first series + first := mustParseLabels(`{foo="bar", bazz="buzz", __name__="ALERTS_FOR_STATE"}`) + _, err := app.Add(first, 1, 1) + require.Nil(t, err) + require.Equal(t, map[uint64]*series.ConcreteSeries{ + first.Hash(): series.NewConcreteSeries( + first, []model.SamplePair{{Timestamp: model.Time(1), Value: model.SampleValue(1)}}, + ), + }, app.data) + + // create second series + second := mustParseLabels(`{foo="bar", bazz="barf", __name__="ALERTS_FOR_STATE"}`) + _, err = app.Add(second, 1, 1) + require.Nil(t, err) + + require.Equal(t, map[uint64]*series.ConcreteSeries{ + first.Hash(): series.NewConcreteSeries( + first, []model.SamplePair{{Timestamp: model.Time(1), Value: model.SampleValue(1)}}, + ), + second.Hash(): series.NewConcreteSeries( + second, []model.SamplePair{{Timestamp: model.Time(1), Value: model.SampleValue(1)}}, + ), + }, app.data) + + // append first series + _, err = app.Add(first, 3, 3) + require.Nil(t, err) + + require.Equal(t, map[uint64]*series.ConcreteSeries{ + first.Hash(): series.NewConcreteSeries( + first, []model.SamplePair{ + {Timestamp: model.Time(1), Value: model.SampleValue(1)}, + {Timestamp: model.Time(3), Value: model.SampleValue(3)}, + }, + ), + second.Hash(): series.NewConcreteSeries( + second, []model.SamplePair{{Timestamp: model.Time(1), Value: model.SampleValue(1)}}, + ), + }, app.data) + + // insert new points at correct position + _, err = app.Add(first, 2, 2) + require.Nil(t, err) + + require.Equal(t, map[uint64]*series.ConcreteSeries{ + first.Hash(): series.NewConcreteSeries( + first, []model.SamplePair{ + {Timestamp: model.Time(1), Value: model.SampleValue(1)}, + {Timestamp: model.Time(2), Value: model.SampleValue(2)}, + {Timestamp: model.Time(3), Value: model.SampleValue(3)}, + }, + ), + second.Hash(): series.NewConcreteSeries( + second, []model.SamplePair{{Timestamp: model.Time(1), Value: model.SampleValue(1)}}, + ), + }, app.data) + + // ignore non ALERTS_FOR_STATE metrics + _, err = app.Add(mustParseLabels(`{foo="bar", bazz="barf", __name__="test"}`), 1, 1) + require.Nil(t, err) + + require.Equal(t, map[uint64]*series.ConcreteSeries{ + first.Hash(): series.NewConcreteSeries( + first, []model.SamplePair{ + {Timestamp: model.Time(1), Value: model.SampleValue(1)}, + {Timestamp: model.Time(2), Value: model.SampleValue(2)}, + {Timestamp: model.Time(3), Value: model.SampleValue(3)}, + }, + ), + second.Hash(): series.NewConcreteSeries( + second, []model.SamplePair{{Timestamp: model.Time(1), Value: model.SampleValue(1)}}, + ), + }, app.data) +} + +func TestForStateAppenderCleanup(t *testing.T) { + app := NewForStateAppender(newRule("foo", "query", `{foo="bar"}`, time.Minute)) + now := time.Now() + + // create ls series + ls := mustParseLabels(`{foo="bar", bazz="buzz", __name__="ALERTS_FOR_STATE"}`) + _, err := app.Add(ls, util.TimeToMillis(now.Add(-3*time.Minute)), 1) + require.Nil(t, err) + _, err = app.Add(ls, util.TimeToMillis(now.Add(-1*time.Minute)), 2) + require.Nil(t, err) + + rem := app.CleanupOldSamples() + require.Equal(t, 1, rem) + + require.Equal(t, map[uint64]*series.ConcreteSeries{ + ls.Hash(): series.NewConcreteSeries( + ls, []model.SamplePair{ + {Timestamp: model.Time(util.TimeToMillis(now.Add(-1 * time.Minute))), Value: model.SampleValue(2)}, + }, + ), + }, app.data) + +} + +func TestForStateAppenderQuerier(t *testing.T) { + +} From 4e2f97b664bbc69426da2a31f22c19a5f262a09c Mon Sep 17 00:00:00 2001 From: Owen Diehl Date: Tue, 23 Jun 2020 17:20:13 -0400 Subject: [PATCH 08/40] ForStateAppenderQuerier tests --- pkg/ruler/memhistory.go | 32 +++++++++-------- pkg/ruler/memhistory_test.go | 70 ++++++++++++++++++++++++++++++++++++ 2 files changed, 87 insertions(+), 15 deletions(-) diff --git a/pkg/ruler/memhistory.go b/pkg/ruler/memhistory.go index 37737b375fe9a..5de995d99dda4 100644 --- a/pkg/ruler/memhistory.go +++ b/pkg/ruler/memhistory.go @@ -176,10 +176,10 @@ func (m *ForStateAppender) Commit() error { return nil } func (m *ForStateAppender) Rollback() error { return nil } // implement storage.Queryable -func (m *ForStateAppender) Querier(ctx context.Context, mint, _ int64) (storage.Querier, error) { - // These are never realisticallly bounded by maxt, so we omit it. +func (m *ForStateAppender) Querier(ctx context.Context, mint, maxt int64) (storage.Querier, error) { return ForStateAppenderQuerier{ mint: mint, + maxt: maxt, ForStateAppender: m, }, nil @@ -187,7 +187,7 @@ func (m *ForStateAppender) Querier(ctx context.Context, mint, _ int64) (storage. // ForStateAppenderQuerier wraps a **ForStateAppender and implements storage.Querier type ForStateAppenderQuerier struct { - mint int64 + mint, maxt int64 *ForStateAppender } @@ -197,6 +197,18 @@ func (q ForStateAppenderQuerier) Select(sortSeries bool, params *storage.SelectH return nil, nil, errors.New("ForStateAppenderQuerier does not support sorted selects") } + q.mtx.Lock() + defer q.mtx.Unlock() + + seekTo := q.mint + if params != nil && seekTo < params.Start { + seekTo = params.Start + } + + maxt := q.maxt + if params != nil && params.End < maxt { + maxt = params.End + } var filtered []storage.Series outer: @@ -207,20 +219,10 @@ outer: } iter := s.Iterator() - - seekTo := q.mint - if seekTo < params.Start { - seekTo = params.Start - } - if !iter.Seek(seekTo) { - continue - } - var samples []model.SamplePair - - for iter.Next() { + for ok := iter.Seek(seekTo); ok; ok = iter.Next() { t, v := iter.At() - if t > params.End { + if t > maxt { break } diff --git a/pkg/ruler/memhistory_test.go b/pkg/ruler/memhistory_test.go index 2effd95c276d8..5a170e0550ecc 100644 --- a/pkg/ruler/memhistory_test.go +++ b/pkg/ruler/memhistory_test.go @@ -1,6 +1,7 @@ package ruler import ( + "context" "fmt" "testing" "time" @@ -190,5 +191,74 @@ func TestForStateAppenderCleanup(t *testing.T) { } func TestForStateAppenderQuerier(t *testing.T) { + app := NewForStateAppender(newRule("foo", "query", `{foo="bar"}`, time.Minute)) + now := time.Now() + + // create ls series + ls := mustParseLabels(`{foo="bar", bazz="buzz", __name__="ALERTS_FOR_STATE"}`) + _, err := app.Add(ls, util.TimeToMillis(now.Add(-3*time.Minute)), 1) + require.Nil(t, err) + _, err = app.Add(ls, util.TimeToMillis(now.Add(-1*time.Minute)), 2) + require.Nil(t, err) + _, err = app.Add(ls, util.TimeToMillis(now.Add(1*time.Minute)), 3) + require.Nil(t, err) + + // never included due to bounds + _, err = app.Add(mustParseLabels(`{foo="bar", bazz="blip", __name__="ALERTS_FOR_STATE"}`), util.TimeToMillis(now.Add(-2*time.Hour)), 3) + require.Nil(t, err) + + // should succeed with nil selecthints + q, err := app.Querier(context.Background(), util.TimeToMillis(now.Add(-2*time.Minute)), util.TimeToMillis(now)) + require.Nil(t, err) + + set, _, err := q.Select( + false, + nil, + labels.MustNewMatcher(labels.MatchEqual, labels.MetricName, rules.AlertForStateMetricName), + ) + require.Nil(t, err) + require.Equal( + t, + series.NewConcreteSeriesSet( + []storage.Series{ + series.NewConcreteSeries(ls, []model.SamplePair{ + {Timestamp: model.Time(util.TimeToMillis(now.Add(-1 * time.Minute))), Value: model.SampleValue(2)}, + }), + }, + ), + set, + ) + + // // should be able to minimize selection window via hints + q, err = app.Querier(context.Background(), util.TimeToMillis(now.Add(-time.Hour)), util.TimeToMillis(now.Add(time.Hour))) + require.Nil(t, err) + set2, _, err := q.Select( + false, + &storage.SelectHints{ + Start: util.TimeToMillis(now.Add(-2 * time.Minute)), + End: util.TimeToMillis(now), + }, + labels.MustNewMatcher(labels.MatchEqual, labels.MetricName, rules.AlertForStateMetricName), + ) + require.Nil(t, err) + require.Equal( + t, + series.NewConcreteSeriesSet( + []storage.Series{ + series.NewConcreteSeries(ls, []model.SamplePair{ + {Timestamp: model.Time(util.TimeToMillis(now.Add(-1 * time.Minute))), Value: model.SampleValue(2)}, + }), + }, + ), + set2, + ) + + // requiring sorted results should err (unsupported) + _, _, err = q.Select( + true, + nil, + labels.MustNewMatcher(labels.MatchEqual, labels.MetricName, rules.AlertForStateMetricName), + ) + require.NotNil(t, err) } From 26fe29388d252df8f2a4837950983db0dc45fc86 Mon Sep 17 00:00:00 2001 From: Owen Diehl Date: Wed, 24 Jun 2020 10:16:45 -0400 Subject: [PATCH 09/40] memhistory stop --- pkg/ruler/compat.go | 5 +++-- pkg/ruler/memhistory.go | 39 +++++++++++++++++++++++++++--------- pkg/ruler/memhistory_test.go | 22 +++++++++++++++++--- 3 files changed, 51 insertions(+), 15 deletions(-) diff --git a/pkg/ruler/compat.go b/pkg/ruler/compat.go index aae6f2a3c2cd7..562214cf35fc1 100644 --- a/pkg/ruler/compat.go +++ b/pkg/ruler/compat.go @@ -50,8 +50,9 @@ func LokiDelayedQueryFunc(engine *logql.Engine) ruler.DelayedQueryFunc { } } -func InMemoryAppendableHistory(userID string, opts *rules.ManagerOptions) (rules.Appendable, rules.AlertHistory) { - hist := NewMemHistory(userID, opts) +func InMemoryAppendableHistory(userID string, opts *rules.ManagerOptions) (rules.Appendable, rules.TenantAlertHistory) { + // TODO: expose cleanup interval + hist := NewMemHistory(userID, 5*time.Minute, opts) return hist, hist } diff --git a/pkg/ruler/memhistory.go b/pkg/ruler/memhistory.go index 5de995d99dda4..9ceeded640ddf 100644 --- a/pkg/ruler/memhistory.go +++ b/pkg/ruler/memhistory.go @@ -24,30 +24,49 @@ type MemHistory struct { cleanupInterval time.Duration } -func NewMemHistory(userId string, opts *rules.ManagerOptions) *MemHistory { +func NewMemHistory(userId string, cleanupInterval time.Duration, opts *rules.ManagerOptions) *MemHistory { hist := &MemHistory{ userId: userId, opts: opts, appenders: make(map[*rules.AlertingRule]*ForStateAppender), - cleanupInterval: 5 * time.Minute, // TODO: make configurable + cleanupInterval: cleanupInterval, + done: make(chan struct{}), } go hist.run() return hist } +func (m *MemHistory) Stop() { + select { + // ensures Stop() is idempotent + case <-m.done: + return + default: + close(m.done) + return + } +} + +// run periodically cleans up old series/samples to ensure memory consumption doesn't grow unbounded. func (m *MemHistory) run() { - for range time.NewTicker(m.cleanupInterval).C { - m.mtx.Lock() - defer m.mtx.Unlock() - for rule, app := range m.appenders { - if rem := app.CleanupOldSamples(); rem == 0 { - delete(m.appenders, rule) - } + t := time.NewTicker(m.cleanupInterval) + for { + select { + case <-m.done: + t.Stop() + return + case <-t.C: + m.mtx.Lock() + for rule, app := range m.appenders { + if rem := app.CleanupOldSamples(); rem == 0 { + delete(m.appenders, rule) + } + } + m.mtx.Unlock() } } - } // Implement rules.Appendable diff --git a/pkg/ruler/memhistory_test.go b/pkg/ruler/memhistory_test.go index 5a170e0550ecc..01854f2aa503b 100644 --- a/pkg/ruler/memhistory_test.go +++ b/pkg/ruler/memhistory_test.go @@ -25,7 +25,7 @@ func TestNewMemHistory(t *testing.T) { cleanupInterval: 5 * time.Minute, } - require.Equal(t, expected, NewMemHistory(userID, nil)) + require.Equal(t, expected, NewMemHistory(userID, time.Minute, nil)) } @@ -57,7 +57,7 @@ func TestMemHistoryAppender(t *testing.T) { }, } { t.Run(tc.desc, func(t *testing.T) { - hist := NewMemHistory("abc", nil) + hist := NewMemHistory("abc", time.Minute, nil) app, err := hist.Appender(tc.rule) if tc.err { @@ -68,7 +68,23 @@ func TestMemHistoryAppender(t *testing.T) { } } -func TestMemHistoryRestoreForState(t *testing.T) {} +// func TestMemHistoryRestoreForState(t *testing.T) {} +// func TestMemHistoryRestoreForState(t *testing.T) {} + +func TestMemHistoryStop(t *testing.T) { + hist := NewMemHistory("abc", time.Millisecond, nil) + <-time.After(2 * time.Millisecond) // allow it to start ticking (not strictly required for this test) + hist.Stop() + // ensure idempotency + hist.Stop() + + // ensure ticker is cleaned up + select { + case <-time.After(10 * time.Millisecond): + t.Fatalf("done channel not closed") + case <-hist.done: + } +} type stringer string From f753eb49b02eb9fe055580701e8a1e87f2d00b13 Mon Sep 17 00:00:00 2001 From: Owen Diehl Date: Wed, 24 Jun 2020 12:03:45 -0400 Subject: [PATCH 10/40] RestoreForState test --- pkg/ruler/memhistory.go | 13 +++++++- pkg/ruler/memhistory_test.go | 65 ++++++++++++++++++++++++++++-------- 2 files changed, 64 insertions(+), 14 deletions(-) diff --git a/pkg/ruler/memhistory.go b/pkg/ruler/memhistory.go index 9ceeded640ddf..922531675cd0f 100644 --- a/pkg/ruler/memhistory.go +++ b/pkg/ruler/memhistory.go @@ -8,6 +8,7 @@ import ( "github.com/cortexproject/cortex/pkg/querier/series" "github.com/cortexproject/cortex/pkg/ruler/rules" + "github.com/cortexproject/cortex/pkg/util" "github.com/go-kit/kit/log/level" "github.com/prometheus/common/model" "github.com/prometheus/prometheus/pkg/labels" @@ -114,7 +115,17 @@ func (m *MemHistory) RestoreForState(ts time.Time, alertRule *rules.AlertingRule } for _, smpl := range vec { - if _, err := app.Add(smpl.Metric, smpl.T, smpl.V); err != nil { + forStateSample := alertRule.ForStateSample( + &rules.Alert{ + Labels: smpl.Metric, + ActiveAt: ts, + Value: smpl.V, + }, + util.TimeFromMillis(smpl.T), + smpl.V, + ) + + if _, err := app.Add(forStateSample.Metric, forStateSample.T, forStateSample.V); err != nil { level.Error(m.opts.Logger).Log("msg", "error appending to MemHistory", "err", err) return } diff --git a/pkg/ruler/memhistory_test.go b/pkg/ruler/memhistory_test.go index 01854f2aa503b..490f094ef3872 100644 --- a/pkg/ruler/memhistory_test.go +++ b/pkg/ruler/memhistory_test.go @@ -12,23 +12,13 @@ import ( "github.com/go-kit/kit/log" "github.com/prometheus/common/model" "github.com/prometheus/prometheus/pkg/labels" + "github.com/prometheus/prometheus/promql" "github.com/prometheus/prometheus/promql/parser" "github.com/prometheus/prometheus/storage" "github.com/stretchr/testify/require" + "github.com/weaveworks/common/user" ) -func TestNewMemHistory(t *testing.T) { - userID := "abc" - expected := &MemHistory{ - userId: userID, - appenders: make(map[*rules.AlertingRule]*ForStateAppender), - - cleanupInterval: 5 * time.Minute, - } - require.Equal(t, expected, NewMemHistory(userID, time.Minute, nil)) - -} - func TestMemHistoryAppender(t *testing.T) { for _, tc := range []struct { @@ -69,7 +59,56 @@ func TestMemHistoryAppender(t *testing.T) { } // func TestMemHistoryRestoreForState(t *testing.T) {} -// func TestMemHistoryRestoreForState(t *testing.T) {} + +func TestMemHistoryRestoreForState(t *testing.T) { + opts := &rules.ManagerOptions{ + QueryFunc: rules.QueryFunc(func(ctx context.Context, q string, t time.Time) (promql.Vector, error) { + // always return the requested time + return promql.Vector{promql.Sample{ + Point: promql.Point{ + T: util.TimeToMillis(t), + V: float64(util.TimeToMillis(t)), + }, + Metric: mustParseLabels(`{foo="bar", __name__="something"}`), + }}, nil + }), + Context: user.InjectOrgID(context.Background(), "abc"), + Logger: log.NewNopLogger(), + Metrics: rules.NewGroupMetrics(nil), + } + + ts := time.Now().Round(time.Millisecond) + rule := newRule("rule1", "query", `{foo="bar"}`, time.Minute) + + hist := NewMemHistory("abc", time.Minute, opts) + hist.RestoreForState(ts, rule) + + app, err := hist.Appender(rule) + require.Nil(t, err) + casted := app.(*ForStateAppender) + + q, err := casted.Querier(context.Background(), 0, util.TimeToMillis(ts)) + require.Nil(t, err) + set, _, err := q.Select( + false, + nil, + labels.MustNewMatcher(labels.MatchEqual, labels.MetricName, rules.AlertForStateMetricName), + labels.MustNewMatcher(labels.MatchEqual, "foo", "bar"), + ) + require.Nil(t, err) + require.Equal(t, true, set.Next()) + s := set.At() + require.Equal(t, `{__name__="ALERTS_FOR_STATE", alertname="rule1", foo="bar"}`, s.Labels().String()) + iter := s.Iterator() + require.Equal(t, true, iter.Next()) + x, y := iter.At() + adjusted := ts.Add(-rule.Duration()) // Adjusted for the forDuration lookback. + require.Equal(t, util.TimeToMillis(adjusted), x) + require.Equal(t, float64(util.TimeToMillis(adjusted)), y) + require.Equal(t, false, iter.Next()) + + // TODO: ensure extra labels are propagated? +} func TestMemHistoryStop(t *testing.T) { hist := NewMemHistory("abc", time.Millisecond, nil) From 70cd160f1944a5976545d7e6ec964df512a4618e Mon Sep 17 00:00:00 2001 From: Owen Diehl Date: Wed, 24 Jun 2020 12:20:58 -0400 Subject: [PATCH 11/40] upstream querier ifc --- pkg/ruler/memhistory.go | 7 ++++--- pkg/ruler/memhistory_test.go | 15 ++++++--------- 2 files changed, 10 insertions(+), 12 deletions(-) diff --git a/pkg/ruler/memhistory.go b/pkg/ruler/memhistory.go index 922531675cd0f..d93f6285a8587 100644 --- a/pkg/ruler/memhistory.go +++ b/pkg/ruler/memhistory.go @@ -222,9 +222,10 @@ type ForStateAppenderQuerier struct { } // Select returns a set of series that matches the given label matchers. -func (q ForStateAppenderQuerier) Select(sortSeries bool, params *storage.SelectHints, matchers ...*labels.Matcher) (storage.SeriesSet, storage.Warnings, error) { +func (q ForStateAppenderQuerier) Select(sortSeries bool, params *storage.SelectHints, matchers ...*labels.Matcher) storage.SeriesSet { + // TODO: implement sorted selects (currently unused). if sortSeries { - return nil, nil, errors.New("ForStateAppenderQuerier does not support sorted selects") + return storage.NoopSeriesSet() } q.mtx.Lock() @@ -269,7 +270,7 @@ outer: } } - return series.NewConcreteSeriesSet(filtered), nil, nil + return series.NewConcreteSeriesSet(filtered) } // LabelValues returns all potential values for a label name. diff --git a/pkg/ruler/memhistory_test.go b/pkg/ruler/memhistory_test.go index 490f094ef3872..608d709054657 100644 --- a/pkg/ruler/memhistory_test.go +++ b/pkg/ruler/memhistory_test.go @@ -89,13 +89,12 @@ func TestMemHistoryRestoreForState(t *testing.T) { q, err := casted.Querier(context.Background(), 0, util.TimeToMillis(ts)) require.Nil(t, err) - set, _, err := q.Select( + set := q.Select( false, nil, labels.MustNewMatcher(labels.MatchEqual, labels.MetricName, rules.AlertForStateMetricName), labels.MustNewMatcher(labels.MatchEqual, "foo", "bar"), ) - require.Nil(t, err) require.Equal(t, true, set.Next()) s := set.At() require.Equal(t, `{__name__="ALERTS_FOR_STATE", alertname="rule1", foo="bar"}`, s.Labels().String()) @@ -266,12 +265,11 @@ func TestForStateAppenderQuerier(t *testing.T) { q, err := app.Querier(context.Background(), util.TimeToMillis(now.Add(-2*time.Minute)), util.TimeToMillis(now)) require.Nil(t, err) - set, _, err := q.Select( + set := q.Select( false, nil, labels.MustNewMatcher(labels.MatchEqual, labels.MetricName, rules.AlertForStateMetricName), ) - require.Nil(t, err) require.Equal( t, series.NewConcreteSeriesSet( @@ -287,7 +285,7 @@ func TestForStateAppenderQuerier(t *testing.T) { // // should be able to minimize selection window via hints q, err = app.Querier(context.Background(), util.TimeToMillis(now.Add(-time.Hour)), util.TimeToMillis(now.Add(time.Hour))) require.Nil(t, err) - set2, _, err := q.Select( + set2 := q.Select( false, &storage.SelectHints{ Start: util.TimeToMillis(now.Add(-2 * time.Minute)), @@ -295,7 +293,6 @@ func TestForStateAppenderQuerier(t *testing.T) { }, labels.MustNewMatcher(labels.MatchEqual, labels.MetricName, rules.AlertForStateMetricName), ) - require.Nil(t, err) require.Equal( t, series.NewConcreteSeriesSet( @@ -308,12 +305,12 @@ func TestForStateAppenderQuerier(t *testing.T) { set2, ) - // requiring sorted results should err (unsupported) - _, _, err = q.Select( + // requiring sorted results return nothing (unsupported) + empty := q.Select( true, nil, labels.MustNewMatcher(labels.MatchEqual, labels.MetricName, rules.AlertForStateMetricName), ) - require.NotNil(t, err) + require.Equal(t, false, empty.Next()) } From 68eab85f8351795ebbc01c7cd0b9b5d36d04029f Mon Sep 17 00:00:00 2001 From: Owen Diehl Date: Wed, 24 Jun 2020 15:05:08 -0400 Subject: [PATCH 12/40] introducing loki ruler metrics --- pkg/loki/modules.go | 2 +- pkg/ruler/compat.go | 11 ++++--- pkg/ruler/memhistory.go | 56 ++++++++++++++++++++++++++++++------ pkg/ruler/memhistory_test.go | 19 ++++++------ 4 files changed, 67 insertions(+), 21 deletions(-) diff --git a/pkg/loki/modules.go b/pkg/loki/modules.go index 388bf21eca836..fdfb966dcffec 100644 --- a/pkg/loki/modules.go +++ b/pkg/loki/modules.go @@ -367,7 +367,7 @@ func (t *Loki) initRuler() (_ services.Service, err error) { t.ruler, err = cortex_ruler.NewRuler( t.cfg.Ruler, ruler.LokiDelayedQueryFunc(engine), - ruler.InMemoryAppendableHistory, + ruler.InMemoryAppendableHistory(prometheus.DefaultRegisterer), prometheus.DefaultRegisterer, func(s string) (fmt.Stringer, error) { return logql.ParseExpr(s) }, util.Logger, diff --git a/pkg/ruler/compat.go b/pkg/ruler/compat.go index 562214cf35fc1..113796cc66601 100644 --- a/pkg/ruler/compat.go +++ b/pkg/ruler/compat.go @@ -7,6 +7,7 @@ import ( "github.com/cortexproject/cortex/pkg/ruler" "github.com/cortexproject/cortex/pkg/ruler/rules" + "github.com/prometheus/client_golang/prometheus" "github.com/prometheus/prometheus/pkg/labels" "github.com/prometheus/prometheus/promql" "github.com/prometheus/prometheus/storage" @@ -50,10 +51,12 @@ func LokiDelayedQueryFunc(engine *logql.Engine) ruler.DelayedQueryFunc { } } -func InMemoryAppendableHistory(userID string, opts *rules.ManagerOptions) (rules.Appendable, rules.TenantAlertHistory) { - // TODO: expose cleanup interval - hist := NewMemHistory(userID, 5*time.Minute, opts) - return hist, hist +func InMemoryAppendableHistory(r prometheus.Registerer) func(string, *rules.ManagerOptions) (rules.Appendable, rules.TenantAlertHistory) { + metrics := NewMetrics(r) + return func(userID string, opts *rules.ManagerOptions) (rules.Appendable, rules.TenantAlertHistory) { + hist := NewMemHistory(userID, 5*time.Minute, opts, metrics) + return hist, hist + } } type NoopAppender struct{} diff --git a/pkg/ruler/memhistory.go b/pkg/ruler/memhistory.go index d93f6285a8587..4b751c6704b4f 100644 --- a/pkg/ruler/memhistory.go +++ b/pkg/ruler/memhistory.go @@ -10,26 +10,54 @@ import ( "github.com/cortexproject/cortex/pkg/ruler/rules" "github.com/cortexproject/cortex/pkg/util" "github.com/go-kit/kit/log/level" + "github.com/prometheus/client_golang/prometheus" + "github.com/prometheus/client_golang/prometheus/promauto" "github.com/prometheus/common/model" "github.com/prometheus/prometheus/pkg/labels" "github.com/prometheus/prometheus/storage" ) +type Metrics struct { + Series prometheus.Gauge // in memory series + Samples prometheus.Gauge // in memory samples + RuleGranularity prometheus.Histogram // Resolution of evaluations used in RestoreForState when recomputing past entries +} + +func NewMetrics(r prometheus.Registerer) *Metrics { + return &Metrics{ + Series: promauto.With(r).NewGauge(prometheus.GaugeOpts{ + Namespace: "loki", + Name: "ruler_memory_series_total", + }), + Samples: promauto.With(r).NewGauge(prometheus.GaugeOpts{ + Namespace: "loki", + Name: "ruler_memory_samples_total", + }), + RuleGranularity: promauto.With(r).NewHistogram(prometheus.HistogramOpts{ + Name: "loki", + Namespace: "ruler_memory_for_state_resolution", + Buckets: []float64{1, 2, 4, 8, 16}, + }), + } +} + type MemHistory struct { mtx sync.RWMutex userId string opts *rules.ManagerOptions appenders map[*rules.AlertingRule]*ForStateAppender + metrics *Metrics done chan struct{} cleanupInterval time.Duration } -func NewMemHistory(userId string, cleanupInterval time.Duration, opts *rules.ManagerOptions) *MemHistory { +func NewMemHistory(userId string, cleanupInterval time.Duration, opts *rules.ManagerOptions, metrics *Metrics) *MemHistory { hist := &MemHistory{ userId: userId, opts: opts, appenders: make(map[*rules.AlertingRule]*ForStateAppender), + metrics: metrics, cleanupInterval: cleanupInterval, done: make(chan struct{}), @@ -88,7 +116,7 @@ func (m *MemHistory) Appender(rule rules.Rule) (storage.Appender, error) { return app, nil } - app := NewForStateAppender(alertRule) + app := NewForStateAppender(alertRule, m.metrics) m.appenders[alertRule] = app return app, nil } @@ -107,7 +135,9 @@ func (m *MemHistory) RestoreForState(ts time.Time, alertRule *rules.AlertingRule // of whether the alert condition was positive during this period. This means after restarts, we may lose up // to the ForDuration in alert granularity. // TODO: Do we want this to instead evaluate forDuration/interval times? + start := time.Now() vec, err := m.opts.QueryFunc(m.opts.Context, alertRule.Query().String(), ts.Add(-alertRule.Duration())) + m.opts.Metrics.IncrementEvaluations() if err != nil { alertRule.SetHealth(rules.HealthBad) alertRule.SetLastError(err) @@ -130,6 +160,7 @@ func (m *MemHistory) RestoreForState(ts time.Time, alertRule *rules.AlertingRule return } } + m.opts.Metrics.EvalDuration(time.Since(start)) // Now that we've evaluated the rule and written the results to our in memory appender, // delegate to the default implementation. @@ -138,15 +169,17 @@ func (m *MemHistory) RestoreForState(ts time.Time, alertRule *rules.AlertingRule } type ForStateAppender struct { - mtx sync.Mutex - rule *rules.AlertingRule - data map[uint64]*series.ConcreteSeries + mtx sync.Mutex + metrics *Metrics + rule *rules.AlertingRule + data map[uint64]*series.ConcreteSeries } -func NewForStateAppender(rule *rules.AlertingRule) *ForStateAppender { +func NewForStateAppender(rule *rules.AlertingRule, metrics *Metrics) *ForStateAppender { return &ForStateAppender{ - rule: rule, - data: make(map[uint64]*series.ConcreteSeries), + rule: rule, + data: make(map[uint64]*series.ConcreteSeries), + metrics: metrics, } } @@ -164,14 +197,18 @@ func (m *ForStateAppender) Add(ls labels.Labels, t int64, v float64) (uint64, er fp := ls.Hash() if s, ok := m.data[fp]; ok { + priorLn := s.Len() s.Add(model.SamplePair{ Timestamp: model.Time(t), Value: model.SampleValue(v), }) + m.metrics.Samples.Add(float64(s.Len() - priorLn)) return 0, nil } m.data[fp] = series.NewConcreteSeries(ls, []model.SamplePair{{Timestamp: model.Time(t), Value: model.SampleValue(v)}}) + m.metrics.Series.Inc() + m.metrics.Samples.Inc() return 0, nil } @@ -187,8 +224,11 @@ func (m *ForStateAppender) CleanupOldSamples() (seriesRemaining int) { for fp, s := range m.data { // release all older references that are no longer needed. + priorLn := s.Len() s.TrimStart(time.Now().Add(-m.rule.Duration() * oldEvaluationFactor)) + m.metrics.Samples.Add(float64(s.Len() - priorLn)) if s.Len() == 0 { + m.metrics.Series.Dec() delete(m.data, fp) } } diff --git a/pkg/ruler/memhistory_test.go b/pkg/ruler/memhistory_test.go index 608d709054657..015df1b21646b 100644 --- a/pkg/ruler/memhistory_test.go +++ b/pkg/ruler/memhistory_test.go @@ -42,18 +42,21 @@ func TestMemHistoryAppender(t *testing.T) { { desc: "alerting rule returns ForStateAppender", err: false, - expected: NewForStateAppender(rules.NewAlertingRule("foo", nil, 0, nil, nil, nil, true, nil)), + expected: NewForStateAppender(rules.NewAlertingRule("foo", nil, 0, nil, nil, nil, true, nil), NewMetrics(nil)), rule: rules.NewAlertingRule("foo", nil, 0, nil, nil, nil, true, nil), }, } { t.Run(tc.desc, func(t *testing.T) { - hist := NewMemHistory("abc", time.Minute, nil) + hist := NewMemHistory("abc", time.Minute, nil, NewMetrics(nil)) app, err := hist.Appender(tc.rule) if tc.err { require.NotNil(t, err) } - require.Equal(t, tc.expected, app) + + if tc.expected != nil { + require.IsTypef(t, tc.expected, app, "expected ForStateAppender") + } }) } } @@ -80,7 +83,7 @@ func TestMemHistoryRestoreForState(t *testing.T) { ts := time.Now().Round(time.Millisecond) rule := newRule("rule1", "query", `{foo="bar"}`, time.Minute) - hist := NewMemHistory("abc", time.Minute, opts) + hist := NewMemHistory("abc", time.Minute, opts, NewMetrics(nil)) hist.RestoreForState(ts, rule) app, err := hist.Appender(rule) @@ -110,7 +113,7 @@ func TestMemHistoryRestoreForState(t *testing.T) { } func TestMemHistoryStop(t *testing.T) { - hist := NewMemHistory("abc", time.Millisecond, nil) + hist := NewMemHistory("abc", time.Millisecond, nil, NewMetrics(nil)) <-time.After(2 * time.Millisecond) // allow it to start ticking (not strictly required for this test) hist.Stop() // ensure idempotency @@ -142,7 +145,7 @@ func newRule(name, qry, ls string, forDur time.Duration) *rules.AlertingRule { } func TestForStateAppenderAdd(t *testing.T) { - app := NewForStateAppender(newRule("foo", "query", `{foo="bar"}`, time.Minute)) + app := NewForStateAppender(newRule("foo", "query", `{foo="bar"}`, time.Minute), NewMetrics(nil)) require.Equal(t, map[uint64]*series.ConcreteSeries{}, app.data) // create first series @@ -221,7 +224,7 @@ func TestForStateAppenderAdd(t *testing.T) { } func TestForStateAppenderCleanup(t *testing.T) { - app := NewForStateAppender(newRule("foo", "query", `{foo="bar"}`, time.Minute)) + app := NewForStateAppender(newRule("foo", "query", `{foo="bar"}`, time.Minute), NewMetrics(nil)) now := time.Now() // create ls series @@ -245,7 +248,7 @@ func TestForStateAppenderCleanup(t *testing.T) { } func TestForStateAppenderQuerier(t *testing.T) { - app := NewForStateAppender(newRule("foo", "query", `{foo="bar"}`, time.Minute)) + app := NewForStateAppender(newRule("foo", "query", `{foo="bar"}`, time.Minute), NewMetrics(nil)) now := time.Now() // create ls series From 9c83bae3a21c5666270199e6ba70f6d867a5a6ef Mon Sep 17 00:00:00 2001 From: Owen Diehl Date: Wed, 24 Jun 2020 15:15:17 -0400 Subject: [PATCH 13/40] removes rule granularity metric -- to be discussed in pr --- pkg/ruler/memhistory.go | 10 ++-------- 1 file changed, 2 insertions(+), 8 deletions(-) diff --git a/pkg/ruler/memhistory.go b/pkg/ruler/memhistory.go index 4b751c6704b4f..e4ace3affb9dd 100644 --- a/pkg/ruler/memhistory.go +++ b/pkg/ruler/memhistory.go @@ -18,9 +18,8 @@ import ( ) type Metrics struct { - Series prometheus.Gauge // in memory series - Samples prometheus.Gauge // in memory samples - RuleGranularity prometheus.Histogram // Resolution of evaluations used in RestoreForState when recomputing past entries + Series prometheus.Gauge // in memory series + Samples prometheus.Gauge // in memory samples } func NewMetrics(r prometheus.Registerer) *Metrics { @@ -33,11 +32,6 @@ func NewMetrics(r prometheus.Registerer) *Metrics { Namespace: "loki", Name: "ruler_memory_samples_total", }), - RuleGranularity: promauto.With(r).NewHistogram(prometheus.HistogramOpts{ - Name: "loki", - Namespace: "ruler_memory_for_state_resolution", - Buckets: []float64{1, 2, 4, 8, 16}, - }), } } From 5e90c13d565657ebc2c5d850d2292eed04ddc936 Mon Sep 17 00:00:00 2001 From: Owen Diehl Date: Thu, 25 Jun 2020 12:07:14 -0400 Subject: [PATCH 14/40] validates ruler cfg --- pkg/loki/loki.go | 5 ++++- pkg/loki/modules.go | 2 ++ 2 files changed, 6 insertions(+), 1 deletion(-) diff --git a/pkg/loki/loki.go b/pkg/loki/loki.go index 4e142ce793f85..f723b12471d67 100644 --- a/pkg/loki/loki.go +++ b/pkg/loki/loki.go @@ -106,6 +106,9 @@ func (c *Config) Validate(log log.Logger) error { if err := c.TableManager.Validate(); err != nil { return errors.Wrap(err, "invalid tablemanager config") } + if err := c.Ruler.Validate(); err != nil { + return errors.Wrap(err, "invalid ruler config") + } return nil } @@ -312,7 +315,7 @@ func (t *Loki) setupModuleManager() error { Ingester: {Store, Server, MemberlistKV}, Querier: {Store, Ring, Server}, QueryFrontend: {Server, Overrides}, - Ruler: {Server, Ring, Store, Overrides}, + Ruler: {Distributor, Store}, TableManager: {Server}, All: {Querier, Ingester, Distributor, TableManager, Ruler}, } diff --git a/pkg/loki/modules.go b/pkg/loki/modules.go index fdfb966dcffec..dab4207f0f087 100644 --- a/pkg/loki/modules.go +++ b/pkg/loki/modules.go @@ -415,6 +415,8 @@ func (t *Loki) initMemberlistKV() (services.Service, error) { // Note: Another PeriodicConfig might be applicable for future logs which can change index type. func activePeriodConfig(cfg chunk.SchemaConfig) int { now := model.Now() + fmt.Println("cfg", cfg) + fmt.Println("now", now) i := sort.Search(len(cfg.Configs), func(i int) bool { return cfg.Configs[i].From.Time > now }) From e575b15cf924376279545554d21343a77798136e Mon Sep 17 00:00:00 2001 From: Owen Diehl Date: Thu, 25 Jun 2020 14:25:26 -0400 Subject: [PATCH 15/40] renames gauge metrics to not use total --- pkg/ruler/memhistory.go | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/pkg/ruler/memhistory.go b/pkg/ruler/memhistory.go index e4ace3affb9dd..35eccce2d9417 100644 --- a/pkg/ruler/memhistory.go +++ b/pkg/ruler/memhistory.go @@ -26,11 +26,11 @@ func NewMetrics(r prometheus.Registerer) *Metrics { return &Metrics{ Series: promauto.With(r).NewGauge(prometheus.GaugeOpts{ Namespace: "loki", - Name: "ruler_memory_series_total", + Name: "ruler_memory_series", }), Samples: promauto.With(r).NewGauge(prometheus.GaugeOpts{ Namespace: "loki", - Name: "ruler_memory_samples_total", + Name: "ruler_memory_samples", }), } } From 4f3081731aaa77513b336661da54c862c12ebd69 Mon Sep 17 00:00:00 2001 From: Owen Diehl Date: Thu, 25 Jun 2020 15:29:22 -0400 Subject: [PATCH 16/40] removes unnecessary logs --- pkg/loki/modules.go | 2 -- 1 file changed, 2 deletions(-) diff --git a/pkg/loki/modules.go b/pkg/loki/modules.go index dab4207f0f087..fdfb966dcffec 100644 --- a/pkg/loki/modules.go +++ b/pkg/loki/modules.go @@ -415,8 +415,6 @@ func (t *Loki) initMemberlistKV() (services.Service, error) { // Note: Another PeriodicConfig might be applicable for future logs which can change index type. func activePeriodConfig(cfg chunk.SchemaConfig) int { now := model.Now() - fmt.Println("cfg", cfg) - fmt.Println("now", now) i := sort.Search(len(cfg.Configs), func(i int) bool { return cfg.Configs[i].From.Time > now }) From 30e9b521788999afc6bfa752a7b6edd4d933bad1 Mon Sep 17 00:00:00 2001 From: Owen Diehl Date: Fri, 26 Jun 2020 12:51:21 -0400 Subject: [PATCH 17/40] logs synthetic restoreforstate --- pkg/ruler/memhistory.go | 16 +++++++++++++++- 1 file changed, 15 insertions(+), 1 deletion(-) diff --git a/pkg/ruler/memhistory.go b/pkg/ruler/memhistory.go index 35eccce2d9417..008b0ba570d41 100644 --- a/pkg/ruler/memhistory.go +++ b/pkg/ruler/memhistory.go @@ -130,7 +130,16 @@ func (m *MemHistory) RestoreForState(ts time.Time, alertRule *rules.AlertingRule // to the ForDuration in alert granularity. // TODO: Do we want this to instead evaluate forDuration/interval times? start := time.Now() - vec, err := m.opts.QueryFunc(m.opts.Context, alertRule.Query().String(), ts.Add(-alertRule.Duration())) + adjusted := ts.Add(-alertRule.Duration()) + + level.Info(m.opts.Logger).Log( + "msg", "restoring synthetic for state", + "adjusted_ts", adjusted, + "rule", alertRule.Name(), + "query", alertRule.Query().String(), + "rule_duration", alertRule.Duration(), + ) + vec, err := m.opts.QueryFunc(m.opts.Context, alertRule.Query().String(), adjusted) m.opts.Metrics.IncrementEvaluations() if err != nil { alertRule.SetHealth(rules.HealthBad) @@ -154,6 +163,11 @@ func (m *MemHistory) RestoreForState(ts time.Time, alertRule *rules.AlertingRule return } } + level.Info(m.opts.Logger).Log( + "msg", "resolved synthetic for_state", + "rule", alertRule.Name(), + "n_samples", len(vec), + ) m.opts.Metrics.EvalDuration(time.Since(start)) // Now that we've evaluated the rule and written the results to our in memory appender, From 272043d82cd21e2369cec7747e98da6fad2218d3 Mon Sep 17 00:00:00 2001 From: Owen Diehl Date: Fri, 26 Jun 2020 16:00:19 -0400 Subject: [PATCH 18/40] logs tenant in ruler --- pkg/ruler/memhistory.go | 2 ++ 1 file changed, 2 insertions(+) diff --git a/pkg/ruler/memhistory.go b/pkg/ruler/memhistory.go index 008b0ba570d41..1321fdf1fdb20 100644 --- a/pkg/ruler/memhistory.go +++ b/pkg/ruler/memhistory.go @@ -138,6 +138,7 @@ func (m *MemHistory) RestoreForState(ts time.Time, alertRule *rules.AlertingRule "rule", alertRule.Name(), "query", alertRule.Query().String(), "rule_duration", alertRule.Duration(), + "tenant", m.userId, ) vec, err := m.opts.QueryFunc(m.opts.Context, alertRule.Query().String(), adjusted) m.opts.Metrics.IncrementEvaluations() @@ -167,6 +168,7 @@ func (m *MemHistory) RestoreForState(ts time.Time, alertRule *rules.AlertingRule "msg", "resolved synthetic for_state", "rule", alertRule.Name(), "n_samples", len(vec), + "tenant", m.userId, ) m.opts.Metrics.EvalDuration(time.Since(start)) From 9d299e21a217319162091e2be8b1ff8ac51fdd08 Mon Sep 17 00:00:00 2001 From: Owen Diehl Date: Mon, 29 Jun 2020 16:06:48 -0400 Subject: [PATCH 19/40] sets cortex to owen's unmerged fork --- go.sum | 193 ++++----------------------------------------- vendor/modules.txt | 2 + 2 files changed, 16 insertions(+), 179 deletions(-) diff --git a/go.sum b/go.sum index c81fb4d72144c..ac90601292801 100644 --- a/go.sum +++ b/go.sum @@ -8,7 +8,6 @@ cloud.google.com/go v0.44.1/go.mod h1:iSa0KzasP4Uvy3f1mN/7PiObzGgflwredwwASm/v6A cloud.google.com/go v0.44.2/go.mod h1:60680Gw3Yr4ikxnPRS/oxxkBccT6SA1yMk63TGekxKY= cloud.google.com/go v0.45.1/go.mod h1:RpBamKRgapWJb87xiFSdk4g1CME7QZg3uwTez+TSTjc= cloud.google.com/go v0.46.3/go.mod h1:a6bKKbmY7er1mI7TEI4lsAkts/mkhTSZK8w33B4RAg0= -cloud.google.com/go v0.49.0 h1:CH+lkubJzcPYB1Ggupcq0+k8Ni2ILdG2lYjDIgavDBQ= cloud.google.com/go v0.49.0/go.mod h1:hGvAdzcWNbyuxS3nWhD7H2cIJxjRRTRLQVB0bdputVY= cloud.google.com/go v0.50.0/go.mod h1:r9sluTvynVuxRIOHXQEHMFffphuXHOMZMycpNR5e6To= cloud.google.com/go v0.51.0/go.mod h1:hWtGJ6gnXH+KgDv+V0zFGDvpi07n3z8ZNj3T1RW0Gcw= @@ -21,7 +20,6 @@ cloud.google.com/go/bigquery v1.3.0 h1:sAbMqjY1PEQKZBWfbu6Y6bsupJ9c4QdHnzg/VvYTL cloud.google.com/go/bigquery v1.3.0/go.mod h1:PjpwJnslEMmckchkHFfq+HTD2DmtT67aNFKH1/VBDHE= cloud.google.com/go/bigquery v1.4.0 h1:xE3CPsOgttP4ACBePh79zTKALtXwn/Edhcr16R5hMWU= cloud.google.com/go/bigquery v1.4.0/go.mod h1:S8dzgnTigyfTmLBfrtrhyYhwRxG72rYxvftPBK2Dvzc= -cloud.google.com/go/bigtable v1.1.0 h1:+IakvK2mFz1FbfA9Ti0JoKRPiJkORngh9xhfMbVkJqw= cloud.google.com/go/bigtable v1.1.0/go.mod h1:B6ByKcIdYmhoyDzmOnQxyOhN6r05qnewYIxxG6L0/b4= cloud.google.com/go/bigtable v1.2.0 h1:F4cCmA4nuV84V5zYQ3MKY+M1Cw1avHDuf3S/LcZPA9c= cloud.google.com/go/bigtable v1.2.0/go.mod h1:JcVAOl45lrTmQfLj7T6TxyMzIN/3FGGcFm+2xVAli2o= @@ -35,13 +33,11 @@ cloud.google.com/go/pubsub v1.1.0/go.mod h1:EwwdRX2sKPjnvnqCa270oGRyludottCI76h+ cloud.google.com/go/pubsub v1.2.0 h1:Lpy6hKgdcl7a3WGSfJIFmxmcdjSpP6OmBEfcOv1Y680= cloud.google.com/go/pubsub v1.2.0/go.mod h1:jhfEVHT8odbXTkndysNHCcx0awwzvfOlguIAii9o8iA= cloud.google.com/go/storage v1.0.0/go.mod h1:IhtSnM/ZTZV8YYJWCY8RULGVqBDmpoyjwiyrjsg+URw= -cloud.google.com/go/storage v1.3.0 h1:2Ze/3nQD5F+HfL0xOPM2EeawDWs+NPRtzgcre+17iZU= cloud.google.com/go/storage v1.3.0/go.mod h1:9IAwXhoyBJ7z9LcAwkj0/7NnPzYaPeZxxVp3zm+5IqA= cloud.google.com/go/storage v1.5.0/go.mod h1:tpKbwo567HUNpVclU5sGELwQWBDZ8gh0ZeosJ0Rtdos= cloud.google.com/go/storage v1.6.0 h1:UDpwYIwla4jHGzZJaEJYx1tOejbgSoNqsAfHAUYe2r8= cloud.google.com/go/storage v1.6.0/go.mod h1:N7U0C8pVQ/+NIKOBQyamJIeKQKkZ+mxpohlUTyfDhBk= collectd.org v0.3.0/go.mod h1:A/8DzQBkF6abtvrT2j/AU/4tiBgJWYyh0y/oB/4MlWE= -contrib.go.opencensus.io/exporter/ocagent v0.6.0 h1:Z1n6UAyr0QwM284yUuh5Zd8JlvxUGAhFZcgMJkMPrGM= contrib.go.opencensus.io/exporter/ocagent v0.6.0/go.mod h1:zmKjrJcdo0aYcVS7bmEeSEBLPA9YJp5bjrofdU3pIXs= dmitri.shuralyov.com/gpu/mtl v0.0.0-20190408044501-666a987793e9/go.mod h1:H6x//7gZCb22OMCxBHrMx7a5I7Hp++hsVxbQ4BYO7hU= github.com/Azure/azure-pipeline-go v0.2.1 h1:OLBdZJ3yvOn2MezlWvbrBMTEUQC72zAftRZOMdj5HYo= @@ -58,42 +54,36 @@ github.com/Azure/go-autorest v11.2.8+incompatible/go.mod h1:r+4oMnoxhatjLLJ6zxSW github.com/Azure/go-autorest v14.2.0+incompatible h1:V5VMDjClD3GiElqLWO7mz2MxNAK/vTfRHdAubSIPRgs= github.com/Azure/go-autorest v14.2.0+incompatible/go.mod h1:r+4oMnoxhatjLLJ6zxSWATqVooLgysK6ZNox3g/xq24= github.com/Azure/go-autorest/autorest v0.9.0/go.mod h1:xyHB1BMZT0cuDHU7I0+g046+BFDTQ8rEZB0s4Yfa6bI= -github.com/Azure/go-autorest/autorest v0.9.3-0.20191028180845-3492b2aff503 h1:uUhdsDMg2GbFLF5GfQPtLMWd5vdDZSfqvqQp3waafxQ= github.com/Azure/go-autorest/autorest v0.9.3-0.20191028180845-3492b2aff503/go.mod h1:xyHB1BMZT0cuDHU7I0+g046+BFDTQ8rEZB0s4Yfa6bI= -github.com/Azure/go-autorest/autorest v0.10.2 h1:NuSF3gXetiHyUbVdneJMEVyPUYAe5wh+aN08JYAf1tI= github.com/Azure/go-autorest/autorest v0.10.2/go.mod h1:/FALq9T/kS7b5J5qsQ+RSTUdAmGFqi0vUdVNNx8q630= github.com/Azure/go-autorest/autorest v0.11.2 h1:BR5GoSGobeiMwGOOIxXuvNKNPy+HMGdteKB8kJUDnBE= github.com/Azure/go-autorest/autorest v0.11.2/go.mod h1:JFgpikqFJ/MleTTxwepExTKnFUKKszPS8UavbQYUMuw= github.com/Azure/go-autorest/autorest/adal v0.5.0/go.mod h1:8Z9fGy2MpX0PvDjB1pEgQTmVqjGhiHBW7RJJEciWzS0= -github.com/Azure/go-autorest/autorest/adal v0.8.1-0.20191028180845-3492b2aff503 h1:Hxqlh1uAA8aGpa1dFhDNhll7U/rkWtG8ZItFvRMr7l0= github.com/Azure/go-autorest/autorest/adal v0.8.1-0.20191028180845-3492b2aff503/go.mod h1:Z6vX6WXXuyieHAXwMj0S6HY6e6wcHn37qQMBQlvY3lc= github.com/Azure/go-autorest/autorest/adal v0.8.2/go.mod h1:ZjhuQClTqx435SRJ2iMlOxPYt3d2C/T/7TiQCVZSn3Q= -github.com/Azure/go-autorest/autorest/adal v0.8.3 h1:O1AGG9Xig71FxdX9HO5pGNyZ7TbSyHaVg+5eJO/jSGw= github.com/Azure/go-autorest/autorest/adal v0.8.3/go.mod h1:ZjhuQClTqx435SRJ2iMlOxPYt3d2C/T/7TiQCVZSn3Q= github.com/Azure/go-autorest/autorest/adal v0.9.0 h1:SigMbuFNuKgc1xcGhaeapbh+8fgsu+GxgDRFyg7f5lM= github.com/Azure/go-autorest/autorest/adal v0.9.0/go.mod h1:/c022QCutn2P7uY+/oQWWNcK9YU+MH96NgK+jErpbcg= github.com/Azure/go-autorest/autorest/date v0.1.0/go.mod h1:plvfp3oPSKwf2DNjlBjWF/7vwR+cUD/ELuzDCXwHUVA= -github.com/Azure/go-autorest/autorest/date v0.2.0 h1:yW+Zlqf26583pE43KhfnhFcdmSWlm5Ew6bxipnr/tbM= github.com/Azure/go-autorest/autorest/date v0.2.0/go.mod h1:vcORJHLJEh643/Ioh9+vPmf1Ij9AEBM5FuBIXLmIy0g= github.com/Azure/go-autorest/autorest/date v0.3.0 h1:7gUk1U5M/CQbp9WoqinNzJar+8KY+LPI6wiWrP/myHw= github.com/Azure/go-autorest/autorest/date v0.3.0/go.mod h1:BI0uouVdmngYNUzGWeSYnokU+TrmwEsOqdt8Y6sso74= github.com/Azure/go-autorest/autorest/mocks v0.1.0/go.mod h1:OTyCOPRA2IgIlWxVYxBee2F5Gr4kF2zd2J5cFRaIDN0= github.com/Azure/go-autorest/autorest/mocks v0.2.0/go.mod h1:OTyCOPRA2IgIlWxVYxBee2F5Gr4kF2zd2J5cFRaIDN0= -github.com/Azure/go-autorest/autorest/mocks v0.3.0 h1:qJumjCaCudz+OcqE9/XtEPfvtOjOmKaui4EOpFI6zZc= github.com/Azure/go-autorest/autorest/mocks v0.3.0/go.mod h1:a8FDP3DYzQ4RYfVAxAN3SVSiiO77gL2j2ronKKP0syM= github.com/Azure/go-autorest/autorest/mocks v0.4.0 h1:z20OWOSG5aCye0HEkDp6TPmP17ZcfeMxPi6HnSALa8c= github.com/Azure/go-autorest/autorest/mocks v0.4.0/go.mod h1:LTp+uSrOhSkaKrUy935gNZuuIPPVsHlr9DSOxSayd+k= +github.com/Azure/go-autorest/autorest/to v0.3.0 h1:zebkZaadz7+wIQYgC7GXaz3Wb28yKYfVkkBKwc38VF8= github.com/Azure/go-autorest/autorest/to v0.3.0/go.mod h1:MgwOyqaIuKdG4TL/2ywSsIWKAfJfgHDo8ObuUk3t5sA= github.com/Azure/go-autorest/autorest/to v0.3.1-0.20191028180845-3492b2aff503 h1:2McfZNaDqGPjv2pddK547PENIk4HV+NT7gvqRq4L0us= github.com/Azure/go-autorest/autorest/to v0.3.1-0.20191028180845-3492b2aff503/go.mod h1:MgwOyqaIuKdG4TL/2ywSsIWKAfJfgHDo8ObuUk3t5sA= +github.com/Azure/go-autorest/autorest/validation v0.2.0 h1:15vMO4y76dehZSq7pAaOLQxC6dZYsSrj2GQpflyM/L4= github.com/Azure/go-autorest/autorest/validation v0.2.0/go.mod h1:3EEqHnBxQGHXRYq3HT1WyXAvT7LLY3tl70hw6tQIbjI= github.com/Azure/go-autorest/autorest/validation v0.2.1-0.20191028180845-3492b2aff503 h1:RBrGlrkPWapMcLp1M6ywCqyYKOAT5ERI6lYFvGKOThE= github.com/Azure/go-autorest/autorest/validation v0.2.1-0.20191028180845-3492b2aff503/go.mod h1:3EEqHnBxQGHXRYq3HT1WyXAvT7LLY3tl70hw6tQIbjI= -github.com/Azure/go-autorest/logger v0.1.0 h1:ruG4BSDXONFRrZZJ2GUXDiUyVpayPmb1GnWeHDdaNKY= github.com/Azure/go-autorest/logger v0.1.0/go.mod h1:oExouG+K6PryycPJfVSxi/koC6LSNgds39diKLz7Vrc= github.com/Azure/go-autorest/logger v0.2.0 h1:e4RVHVZKC5p6UANLJHkM4OfR1UKZPj8Wt8Pcx+3oqrE= github.com/Azure/go-autorest/logger v0.2.0/go.mod h1:T9E3cAhj2VqvPOtCYAvby9aBXkZmbF5NWuPV8+WeEW8= -github.com/Azure/go-autorest/tracing v0.5.0 h1:TRn4WjSnkcSy5AEG3pnbtFSwNtwzjr4VYyQflFE619k= github.com/Azure/go-autorest/tracing v0.5.0/go.mod h1:r/s2XiOKccPW3HrqB+W0TQzfbtp2fGCgRFtBroKn4Dk= github.com/Azure/go-autorest/tracing v0.6.0 h1:TYi4+3m5t6K48TGI9AUdb+IzbnSxvnvUMfuitfgcfuo= github.com/Azure/go-autorest/tracing v0.6.0/go.mod h1:+vhtPC754Xsa23ID7GlGsrdKBpUA79WCAKPPZVC2DeU= @@ -113,6 +103,7 @@ github.com/NYTimes/gziphandler v0.0.0-20170623195520-56545f4a5d46/go.mod h1:3wb0 github.com/NYTimes/gziphandler v1.1.1 h1:ZUDjpQae29j0ryrS0u/B8HZfJBtBQHjqw2rQ2cqUQ3I= github.com/NYTimes/gziphandler v1.1.1/go.mod h1:n/CVRwUEOgIxrgPvAQhUUr9oeUtvrhMomdKFjzJNB0c= github.com/Nvveen/Gotty v0.0.0-20120604004816-cd527374f1e5/go.mod h1:lmUJ/7eu/Q8D7ML55dXQrVaamCz2vxCfdQBasLZfHKk= +github.com/OneOfOne/xxhash v1.2.2 h1:KMrpdQIwFcEqXDklaen+P1axHaj9BSKzvpUUfnHldSE= github.com/OneOfOne/xxhash v1.2.2/go.mod h1:HSdplMjZKSmBqAxg5vPj2TmRDmfkzw+cTzAElWljhcU= github.com/OneOfOne/xxhash v1.2.5/go.mod h1:eZbhyaAYD41SGSSsnmcpxVoRiQ/MPUTjUdIIOT9Um7Q= github.com/OneOfOne/xxhash v1.2.6 h1:U68crOE3y3MPttCMQGywZOLrTeF5HHJ3/vDBCJn9/bA= @@ -134,7 +125,6 @@ github.com/alecthomas/template v0.0.0-20160405071501-a0175ee3bccc/go.mod h1:LOuy github.com/alecthomas/template v0.0.0-20190718012654-fb15b899a751 h1:JYp7IbQjafoB+tBA3gMyHYHrpOtNuDiK/uB5uXxq5wM= github.com/alecthomas/template v0.0.0-20190718012654-fb15b899a751/go.mod h1:LOuyumcjzFXgccqObfd/Ljyb9UuFJ6TxHnclSeseNhc= github.com/alecthomas/units v0.0.0-20151022065526-2efee857e7cf/go.mod h1:ybxpYRFXyAe+OPACYpWeL0wqObRcbAqCMya13uyzqw0= -github.com/alecthomas/units v0.0.0-20190717042225-c3de453c63f4 h1:Hs82Z41s6SdL1CELW+XaDYmOH4hkBN4/N9og/AsOv7E= github.com/alecthomas/units v0.0.0-20190717042225-c3de453c63f4/go.mod h1:ybxpYRFXyAe+OPACYpWeL0wqObRcbAqCMya13uyzqw0= github.com/alecthomas/units v0.0.0-20190924025748-f65c72e2690d h1:UQZhZ2O0vMHr2cI+DC1Mbh0TJxzA3RcLoMsFw+aXw7E= github.com/alecthomas/units v0.0.0-20190924025748-f65c72e2690d/go.mod h1:rBZYJk541a8SKzHPHnH3zbiI+7dagKZ0cgpgrD7Fyho= @@ -147,10 +137,8 @@ github.com/apache/arrow/go/arrow v0.0.0-20191024131854-af6fa24be0db/go.mod h1:VT github.com/apache/thrift v0.12.0/go.mod h1:cp2SuWMxlEZw2r+iP2GNCdIi4C1qmUzdZFSVb+bacwQ= github.com/apache/thrift v0.13.0/go.mod h1:cp2SuWMxlEZw2r+iP2GNCdIi4C1qmUzdZFSVb+bacwQ= github.com/armon/circbuf v0.0.0-20150827004946-bbbad097214e/go.mod h1:3U/XgcO3hCbHZ8TKRvWD2dDTCfh9M9ya+I9JpbB7O8o= -github.com/armon/go-metrics v0.0.0-20180917152333-f0300d1749da h1:8GUt8eRujhVEGZFFEjBj46YV4rDjvGrNxb0KMWYkL2I= github.com/armon/go-metrics v0.0.0-20180917152333-f0300d1749da/go.mod h1:Q73ZrmVTwzkszR9V5SSuryQ31EELlFMUz1kKyl939pY= github.com/armon/go-metrics v0.0.0-20190430140413-ec5e00d3c878/go.mod h1:3AMJUQhVx52RsWOnlkpikZr01T/yAVN2gn0861vByNg= -github.com/armon/go-metrics v0.3.0 h1:B7AQgHi8QSEi4uHu7Sbsga+IJDU+CENgjxoo81vDUqU= github.com/armon/go-metrics v0.3.0/go.mod h1:zXjbSimjXTd7vOpY8B0/2LpvNvDoXBuplAD+gJD3GYs= github.com/armon/go-metrics v0.3.3 h1:a9F4rlj7EWWrbj7BYw8J8+x+ZZkJeqzNyRk8hdPF+ro= github.com/armon/go-metrics v0.3.3/go.mod h1:4O98XIr/9W0sxpJ8UaYkvjk10Iff7SnFrb4QAOwNTFc= @@ -159,7 +147,6 @@ github.com/armon/go-radix v1.0.0/go.mod h1:ufUuZ+zHj4x4TnLV4JWEpy2hxWSpsRywHrMgI github.com/armon/go-socks5 v0.0.0-20160902184237-e75332964ef5/go.mod h1:wHh0iHkYZB8zMSxRWpUBQtwG5a7fFgvEO+odwuTv2gs= github.com/aryann/difflib v0.0.0-20170710044230-e206f873d14a/go.mod h1:DAHtR1m6lCRdSC2Tm3DSWRPvIPr6xNKyeHdqDQSQT+A= github.com/asaskevich/govalidator v0.0.0-20180720115003-f9ffefc3facf/go.mod h1:lB+ZfQJz7igIIfQNfa7Ml4HSf2uFQQRzpGGRXenZAgY= -github.com/asaskevich/govalidator v0.0.0-20190424111038-f61b66f89f4a h1:idn718Q4B6AGu/h5Sxe66HYVdqdGu2l9Iebqhi/AEoA= github.com/asaskevich/govalidator v0.0.0-20190424111038-f61b66f89f4a/go.mod h1:lB+ZfQJz7igIIfQNfa7Ml4HSf2uFQQRzpGGRXenZAgY= github.com/asaskevich/govalidator v0.0.0-20200108200545-475eaeb16496 h1:zV3ejI06GQ59hwDQAvmK1qxOQGB3WuVTRoY0okPTAv0= github.com/asaskevich/govalidator v0.0.0-20200108200545-475eaeb16496/go.mod h1:oGkLhpf+kjZl6xBf758TQhh5XrAeiJv/7FRz/2spLIg= @@ -167,12 +154,9 @@ github.com/aws/aws-lambda-go v1.13.3/go.mod h1:4UKl9IzQMoD+QF79YdCuzCwp8VbmG4VAQ github.com/aws/aws-sdk-go v1.15.78/go.mod h1:E3/ieXAlvM0XWO57iftYVDLLvQ824smPP3ATZkfNZeM= github.com/aws/aws-sdk-go v1.17.7/go.mod h1:KmX6BPdI08NWTb3/sm4ZGu5ShLoqVDhKgpiN924inxo= github.com/aws/aws-sdk-go v1.22.4/go.mod h1:KmX6BPdI08NWTb3/sm4ZGu5ShLoqVDhKgpiN924inxo= -github.com/aws/aws-sdk-go v1.25.48 h1:J82DYDGZHOKHdhx6hD24Tm30c2C3GchYGfN0mf9iKUk= github.com/aws/aws-sdk-go v1.25.48/go.mod h1:KmX6BPdI08NWTb3/sm4ZGu5ShLoqVDhKgpiN924inxo= github.com/aws/aws-sdk-go v1.27.0/go.mod h1:KmX6BPdI08NWTb3/sm4ZGu5ShLoqVDhKgpiN924inxo= -github.com/aws/aws-sdk-go v1.31.9 h1:n+b34ydVfgC30j0Qm69yaapmjejQPW2BoDBX7Uy/tLI= github.com/aws/aws-sdk-go v1.31.9/go.mod h1:5zCpMtNQVjRREroY7sYe8lOMRSxkhG6MZveU8YkpAk0= -github.com/aws/aws-sdk-go v1.33.5 h1:p2fr1ryvNTU6avUWLI+/H7FGv0TBIjzVM5WDgXBBv4U= github.com/aws/aws-sdk-go v1.33.5/go.mod h1:5zCpMtNQVjRREroY7sYe8lOMRSxkhG6MZveU8YkpAk0= github.com/aws/aws-sdk-go v1.33.12 h1:eydMoSwfrSTD9PWKUJOiDL7+/UwDW8AjInUGVE5Llh4= github.com/aws/aws-sdk-go v1.33.12/go.mod h1:5zCpMtNQVjRREroY7sYe8lOMRSxkhG6MZveU8YkpAk0= @@ -203,13 +187,11 @@ github.com/c2h5oh/datasize v0.0.0-20200112174442-28bbd4740fee h1:BnPxIde0gjtTnc9 github.com/c2h5oh/datasize v0.0.0-20200112174442-28bbd4740fee/go.mod h1:S/7n9copUssQ56c7aAgHqftWO4LTf4xY6CGWt8Bc+3M= github.com/casbin/casbin/v2 v2.1.2/go.mod h1:YcPU1XXisHhLzuxH9coDNf2FbKpjGlbCg3n9yuLkIJQ= github.com/cenkalti/backoff v0.0.0-20181003080854-62661b46c409/go.mod h1:90ReRw6GdpyfrHakVjL/QHaoyV4aDUVVkXQJJJ3NXXM= -github.com/cenkalti/backoff v1.0.0 h1:2XeuDgvPv/6QDyzIuxb6n36ADVocyqTLlOSpYBGYtvM= github.com/cenkalti/backoff v1.0.0/go.mod h1:90ReRw6GdpyfrHakVjL/QHaoyV4aDUVVkXQJJJ3NXXM= github.com/cenkalti/backoff v2.2.1+incompatible h1:tNowT99t7UNflLxfYYSlKYsBpXdEet03Pg2g16Swow4= github.com/cenkalti/backoff v2.2.1+incompatible/go.mod h1:90ReRw6GdpyfrHakVjL/QHaoyV4aDUVVkXQJJJ3NXXM= github.com/cenkalti/backoff/v4 v4.0.2 h1:JIufpQLbh4DkbQoii76ItQIUFzevQSqOLZca4eamEDs= github.com/cenkalti/backoff/v4 v4.0.2/go.mod h1:eEew/i+1Q6OrCDZh3WiXYv3+nJwBASZ8Bog/87DQnVg= -github.com/census-instrumentation/opencensus-proto v0.2.1 h1:glEXhBS5PSLLv4IXzLA5yPRVX4bilULVyxxbrfOtDAk= github.com/census-instrumentation/opencensus-proto v0.2.1/go.mod h1:f6KPmirojxKA12rnyqOA5BBL4O983OfeGPqjHWSTneU= github.com/cespare/xxhash v0.0.0-20181017004759-096ff4a8a059/go.mod h1:XrSqR1VqqWfGrhpAt58auRo0WTKS1nRRg3ghfAqPWnc= github.com/cespare/xxhash v1.1.0 h1:a6HrQnmkObjyL+Gs60czilIUGqrzKutQD6XZog3p+ko= @@ -240,6 +222,7 @@ github.com/containerd/containerd v1.3.4 h1:3o0smo5SKY7H6AJCmJhsnCjR2/V2T8VmiHt7s github.com/containerd/containerd v1.3.4/go.mod h1:bC6axHOhabU15QhwfG7w5PipXdVtMXFTttgp+kVtyUA= github.com/containerd/fifo v0.0.0-20190226154929-a9fb20d87448 h1:PUD50EuOMkXVcpBIA/R95d56duJR9VxhwncsFbNnxW4= github.com/containerd/fifo v0.0.0-20190226154929-a9fb20d87448/go.mod h1:ODA38xgv3Kuk8dQz2ZQXpnv/UZZUHUCL7pnLehbXgQI= +github.com/coreos/go-semver v0.2.0 h1:3Jm3tLmsgAYcjC+4Up7hJrFBPr+n7rAqYeSw/SZazuY= github.com/coreos/go-semver v0.2.0/go.mod h1:nnelYz7RCh+5ahJtPPxZlU+153eP4D4r3EedlOD2RNk= github.com/coreos/go-semver v0.3.0 h1:wkHLiw0WNATZnSG7epLsujiMCgPAc9xhjJ4tgnAxmfM= github.com/coreos/go-semver v0.3.0/go.mod h1:nnelYz7RCh+5ahJtPPxZlU+153eP4D4r3EedlOD2RNk= @@ -247,6 +230,7 @@ github.com/coreos/go-systemd v0.0.0-20180511133405-39ca1b05acc7/go.mod h1:F5haX7 github.com/coreos/go-systemd v0.0.0-20181012123002-c6f51f82210d/go.mod h1:F5haX7vjVVG0kc13fIWeqUViNPyEJxv/OmvnBo0Yme4= github.com/coreos/go-systemd v0.0.0-20191104093116-d3cd4ed1dbcf h1:iW4rZ826su+pqaw19uhpSCzhj44qo35pNgKFGqzDKkU= github.com/coreos/go-systemd v0.0.0-20191104093116-d3cd4ed1dbcf/go.mod h1:F5haX7vjVVG0kc13fIWeqUViNPyEJxv/OmvnBo0Yme4= +github.com/coreos/pkg v0.0.0-20160727233714-3ac0863d7acf h1:CAKfRE2YtTUIjjh1bkBtyYFaUT/WmOqsJjgtihT0vMI= github.com/coreos/pkg v0.0.0-20160727233714-3ac0863d7acf/go.mod h1:E3G3o1h8I7cfcXa63jLwjI0eiQQMgzzUDFVpN/nH/eA= github.com/coreos/pkg v0.0.0-20180928190104-399ea9e2e55f h1:lBNOc5arjvs8E5mO2tbpBpLoyyu8B6e44T7hJy6potg= github.com/coreos/pkg v0.0.0-20180928190104-399ea9e2e55f/go.mod h1:E3G3o1h8I7cfcXa63jLwjI0eiQQMgzzUDFVpN/nH/eA= @@ -277,9 +261,7 @@ github.com/dgryski/go-bitstream v0.0.0-20180413035011-3522498ce2c8/go.mod h1:VMa github.com/dgryski/go-sip13 v0.0.0-20190329191031-25c5027a8c7b/go.mod h1:vAd38F8PWV+bWy6jNmig1y/TA+kYO4g3RSRF0IAv0no= github.com/dhui/dktest v0.3.0 h1:kwX5a7EkLcjo7VpsPQSYJcKGbXBXdjI9FGjuUj1jn6I= github.com/dhui/dktest v0.3.0/go.mod h1:cyzIUfGsBEbZ6BT7tnXqAShHSXCZhSNmFl70sZ7c1yc= -github.com/digitalocean/godo v1.37.0 h1:NEj5ne2cvLBHo1GJY1DNN/iEt9ipa72CMwwAjKEA530= github.com/digitalocean/godo v1.37.0/go.mod h1:p7dOjjtSBqCTUksqtA5Fd3uaKs9kyTq2xcz76ulEJRU= -github.com/digitalocean/godo v1.38.0 h1:to+pLe5RJqflJiyxhaLJfJgT3YzwHRSg19mOWkKt6A0= github.com/digitalocean/godo v1.38.0/go.mod h1:p7dOjjtSBqCTUksqtA5Fd3uaKs9kyTq2xcz76ulEJRU= github.com/digitalocean/godo v1.42.0 h1:xQlEFLhQ1zZUryJAfiWb8meLPPCWnLO901U5Imhh0Mc= github.com/digitalocean/godo v1.42.0/go.mod h1:p7dOjjtSBqCTUksqtA5Fd3uaKs9kyTq2xcz76ulEJRU= @@ -287,7 +269,6 @@ github.com/docker/distribution v2.7.0+incompatible/go.mod h1:J2gT2udsDAN96Uj4Kfc github.com/docker/distribution v2.7.1+incompatible h1:a5mlkVzth6W5A4fOsS3D2EO5BUmsJpcB+cRlLU7cSug= github.com/docker/distribution v2.7.1+incompatible/go.mod h1:J2gT2udsDAN96Uj4KfcMRqY0/ypR+oyYUYmja8H+y+w= github.com/docker/docker v0.7.3-0.20190103212154-2b7e084dc98b/go.mod h1:eEKB0N0r5NX/I1kEveEz05bcu8tLC/8azJZsviup8Sk= -github.com/docker/docker v0.7.3-0.20190817195342-4760db040282 h1:mzrx39dGtGq0VEnTHjnakmczd4uFbhx2cZU3BJDsLdc= github.com/docker/docker v0.7.3-0.20190817195342-4760db040282/go.mod h1:eEKB0N0r5NX/I1kEveEz05bcu8tLC/8azJZsviup8Sk= github.com/docker/docker v17.12.0-ce-rc1.0.20200706150819-a40b877fbb9e+incompatible h1:+mzU0jHyjWpYHiD0StRlsVXkCvecWS2hc55M3OlUJSk= github.com/docker/docker v17.12.0-ce-rc1.0.20200706150819-a40b877fbb9e+incompatible/go.mod h1:eEKB0N0r5NX/I1kEveEz05bcu8tLC/8azJZsviup8Sk= @@ -325,12 +306,12 @@ github.com/envoyproxy/go-control-plane v0.9.0/go.mod h1:YTl/9mNaCwkRvm6d1a2C3ymF github.com/envoyproxy/go-control-plane v0.9.1-0.20191026205805-5f8ba28d4473/go.mod h1:YTl/9mNaCwkRvm6d1a2C3ymFceY/DCBVvsKhRF0iEA4= github.com/envoyproxy/go-control-plane v0.9.4/go.mod h1:6rpuAdCZL397s3pYoYcLgu1mIlRU8Am5FuJP05cCM98= github.com/envoyproxy/protoc-gen-validate v0.1.0/go.mod h1:iSmxcyjqTsJpI2R4NaDN7+kN2VEUnK/pcBlmesArF7c= +github.com/evanphx/json-patch v4.2.0+incompatible h1:fUDGZCv/7iAN7u0puUVhvKCcsR6vRfwrJatElLBEf0I= github.com/evanphx/json-patch v4.2.0+incompatible/go.mod h1:50XU6AFN0ol/bzJsmQLiYLvXMP4fmwYFNcr97nuDLSk= github.com/evanphx/json-patch v4.5.0+incompatible h1:ouOWdg56aJriqS0huScTkVXPC5IcNrDCXZ6OoTAWu7M= github.com/evanphx/json-patch v4.5.0+incompatible/go.mod h1:50XU6AFN0ol/bzJsmQLiYLvXMP4fmwYFNcr97nuDLSk= github.com/facette/natsort v0.0.0-20181210072756-2cd4dd1e2dcb h1:IT4JYU7k4ikYg1SCxNI1/Tieq/NFvh6dzLdgi7eu0tM= github.com/facette/natsort v0.0.0-20181210072756-2cd4dd1e2dcb/go.mod h1:bH6Xx7IW64qjjJq8M2u4dxNaBiDfKK+z/3eGDpXEQhc= -github.com/fatih/color v1.7.0 h1:DkWD4oS2D8LGGgTQ6IvwJJXSL5Vp2ffcQg58nFV38Ys= github.com/fatih/color v1.7.0/go.mod h1:Zm6kSWBoL9eyXnKyktHP6abPY2pDugNf5KwzbycvMj4= github.com/fatih/color v1.9.0 h1:8xPHl4/q1VyqGIPif1F+1V3Y3lSmrq01EabUW3CoW5s= github.com/fatih/color v1.9.0/go.mod h1:eQcE1qtQxscV5RaZvpXrrb8Drkc3/DdQ+uUYCNjL+zU= @@ -358,12 +339,10 @@ github.com/go-gl/glfw v0.0.0-20190409004039-e6da0acd62b1/go.mod h1:vR7hzQXu2zJy9 github.com/go-gl/glfw/v3.3/glfw v0.0.0-20191125211704-12ad95a8df72/go.mod h1:tQ2UAYgL5IevRw8kRxooKSPJfGvJ9fJQFa0TUsXzTg8= github.com/go-gl/glfw/v3.3/glfw v0.0.0-20200222043503-6f7a984d4dc4/go.mod h1:tQ2UAYgL5IevRw8kRxooKSPJfGvJ9fJQFa0TUsXzTg8= github.com/go-kit/kit v0.8.0/go.mod h1:xBxKIO96dXMWWy0MnWVtmwkA9/13aqxPnvrjFYMA2as= -github.com/go-kit/kit v0.9.0 h1:wDJmvq38kDhkVxi50ni9ykkdUr1PKgqKOoi01fa0Mdk= github.com/go-kit/kit v0.9.0/go.mod h1:xBxKIO96dXMWWy0MnWVtmwkA9/13aqxPnvrjFYMA2as= github.com/go-kit/kit v0.10.0 h1:dXFJfIHVvUcpSgDOV+Ne6t7jXri8Tfv2uOLHUZ2XNuo= github.com/go-kit/kit v0.10.0/go.mod h1:xUsJbQ/Fp4kEt7AFgCuvyX4a71u8h9jB8tj/ORgOZ7o= github.com/go-logfmt/logfmt v0.3.0/go.mod h1:Qt1PoO58o5twSAckw1HlFXLmHsOX5/0LbT9GBnD5lWE= -github.com/go-logfmt/logfmt v0.4.0 h1:MP4Eh7ZCb31lleYCFuwm0oe4/YGak+5l1vA2NOE80nA= github.com/go-logfmt/logfmt v0.4.0/go.mod h1:3RMwSq7FuexP4Kalkev3ejPJsZTpXXBr9+V4qmtdjCk= github.com/go-logfmt/logfmt v0.5.0 h1:TrB8swr/68K7m9CcGut2g3UOihhbcbiMAYiuTXdEih4= github.com/go-logfmt/logfmt v0.5.0/go.mod h1:wCYkCAKZfumFQihp8CzCvQ3paCTfi41vtzG1KdI/P7A= @@ -374,7 +353,6 @@ github.com/go-openapi/analysis v0.17.0/go.mod h1:IowGgpVeD0vNm45So8nr+IcQ3pxVtpR github.com/go-openapi/analysis v0.17.2/go.mod h1:IowGgpVeD0vNm45So8nr+IcQ3pxVtpRoBWb8PVZO0ik= github.com/go-openapi/analysis v0.18.0/go.mod h1:IowGgpVeD0vNm45So8nr+IcQ3pxVtpRoBWb8PVZO0ik= github.com/go-openapi/analysis v0.19.2/go.mod h1:3P1osvZa9jKjb8ed2TPng3f0i/UY9snX6gxi44djMjk= -github.com/go-openapi/analysis v0.19.4 h1:1TjOzrWkj+9BrjnM1yPAICbaoC0FyfD49oVkTBrSSa0= github.com/go-openapi/analysis v0.19.4/go.mod h1:3P1osvZa9jKjb8ed2TPng3f0i/UY9snX6gxi44djMjk= github.com/go-openapi/analysis v0.19.5/go.mod h1:hkEAkxagaIvIP7VTn8ygJNkd4kAYON2rCu0v0ObL0AU= github.com/go-openapi/analysis v0.19.10 h1:5BHISBAXOc/aJK25irLZnx2D3s6WyYaY9D4gmuz9fdE= @@ -382,7 +360,6 @@ github.com/go-openapi/analysis v0.19.10/go.mod h1:qmhS3VNFxBlquFJ0RGoDtylO9y4pgT github.com/go-openapi/errors v0.17.0/go.mod h1:LcZQpmvG4wyF5j4IhA73wkLFQg+QJXOQHVjmcZxhka0= github.com/go-openapi/errors v0.17.2/go.mod h1:LcZQpmvG4wyF5j4IhA73wkLFQg+QJXOQHVjmcZxhka0= github.com/go-openapi/errors v0.18.0/go.mod h1:LcZQpmvG4wyF5j4IhA73wkLFQg+QJXOQHVjmcZxhka0= -github.com/go-openapi/errors v0.19.2 h1:a2kIyV3w+OS3S97zxUndRVD46+FhGOUBDFY7nmu4CsY= github.com/go-openapi/errors v0.19.2/go.mod h1:qX0BLWsyaKfvhluLejVpVNwNRdXZhEbTA4kxxpKBC94= github.com/go-openapi/errors v0.19.3/go.mod h1:qX0BLWsyaKfvhluLejVpVNwNRdXZhEbTA4kxxpKBC94= github.com/go-openapi/errors v0.19.4 h1:fSGwO1tSYHFu70NKaWJt5Qh0qoBRtCm/mXS1yhf+0W0= @@ -391,7 +368,6 @@ github.com/go-openapi/jsonpointer v0.0.0-20160704185906-46af16f9f7b1/go.mod h1:+ github.com/go-openapi/jsonpointer v0.17.0/go.mod h1:cOnomiV+CVVwFLk0A/MExoFMjwdsUdVpsRhURCKh+3M= github.com/go-openapi/jsonpointer v0.17.2/go.mod h1:cOnomiV+CVVwFLk0A/MExoFMjwdsUdVpsRhURCKh+3M= github.com/go-openapi/jsonpointer v0.18.0/go.mod h1:cOnomiV+CVVwFLk0A/MExoFMjwdsUdVpsRhURCKh+3M= -github.com/go-openapi/jsonpointer v0.19.2 h1:A9+F4Dc/MCNB5jibxf6rRvOvR/iFgQdyNx9eIhnGqq0= github.com/go-openapi/jsonpointer v0.19.2/go.mod h1:3akKfEdA7DF1sugOqz1dVQHBcuDBPKZGEoHC/NkiQRg= github.com/go-openapi/jsonpointer v0.19.3 h1:gihV7YNZK1iK6Tgwwsxo2rJbD1GTbdm72325Bq8FI3w= github.com/go-openapi/jsonpointer v0.19.3/go.mod h1:Pl9vOtqEWErmShwVjC8pYs9cog34VGT37dQOVbmoatg= @@ -399,7 +375,6 @@ github.com/go-openapi/jsonreference v0.0.0-20160704190145-13c6e3589ad9/go.mod h1 github.com/go-openapi/jsonreference v0.17.0/go.mod h1:g4xxGn04lDIRh0GJb5QlpE3HfopLOL6uZrK/VgnsK9I= github.com/go-openapi/jsonreference v0.17.2/go.mod h1:g4xxGn04lDIRh0GJb5QlpE3HfopLOL6uZrK/VgnsK9I= github.com/go-openapi/jsonreference v0.18.0/go.mod h1:g4xxGn04lDIRh0GJb5QlpE3HfopLOL6uZrK/VgnsK9I= -github.com/go-openapi/jsonreference v0.19.2 h1:o20suLFB4Ri0tuzpWtyHlh7E7HnkqTNLq6aR6WVNS1w= github.com/go-openapi/jsonreference v0.19.2/go.mod h1:jMjeRr2HHw6nAVajTXJ4eiUwohSTlpa0o73RUL1owJc= github.com/go-openapi/jsonreference v0.19.3 h1:5cxNfTy0UVC3X8JL5ymxzyoUZmo8iZb+jeTWn7tUa8o= github.com/go-openapi/jsonreference v0.19.3/go.mod h1:rjx6GuL8TTa9VaixXglHmQmIL98+wF9xc8zWvFonSJ8= @@ -407,7 +382,6 @@ github.com/go-openapi/loads v0.17.0/go.mod h1:72tmFy5wsWx89uEVddd0RjRWPZm92WRLhf github.com/go-openapi/loads v0.17.2/go.mod h1:72tmFy5wsWx89uEVddd0RjRWPZm92WRLhf7AC+0+OOU= github.com/go-openapi/loads v0.18.0/go.mod h1:72tmFy5wsWx89uEVddd0RjRWPZm92WRLhf7AC+0+OOU= github.com/go-openapi/loads v0.19.0/go.mod h1:72tmFy5wsWx89uEVddd0RjRWPZm92WRLhf7AC+0+OOU= -github.com/go-openapi/loads v0.19.2 h1:rf5ArTHmIJxyV5Oiks+Su0mUens1+AjpkPoWr5xFRcI= github.com/go-openapi/loads v0.19.2/go.mod h1:QAskZPMX5V0C2gvfkGZzJlINuP7Hx/4+ix5jWFxsNPs= github.com/go-openapi/loads v0.19.3/go.mod h1:YVfqhUCdahYwR3f3iiwQLhicVRvLlU/WO5WPaZvcvSI= github.com/go-openapi/loads v0.19.4/go.mod h1:zZVHonKd8DXyxyw4yfnVjPzBjIQcLt0CCsn0N0ZrQsk= @@ -417,7 +391,6 @@ github.com/go-openapi/runtime v0.0.0-20180920151709-4f900dc2ade9/go.mod h1:6v9a6 github.com/go-openapi/runtime v0.18.0/go.mod h1:uI6pHuxWYTy94zZxgcwJkUWa9wbIlhteGfloI10GD4U= github.com/go-openapi/runtime v0.19.0/go.mod h1:OwNfisksmmaZse4+gpV3Ne9AyMOlP1lt4sK4FXt0O64= github.com/go-openapi/runtime v0.19.3/go.mod h1:X277bwSUBxVlCYR3r7xgZZGKVvBd/29gLDlFGtJ8NL4= -github.com/go-openapi/runtime v0.19.4 h1:csnOgcgAiuGoM/Po7PEpKDoNulCcF3FGbSnbHfxgjMI= github.com/go-openapi/runtime v0.19.4/go.mod h1:X277bwSUBxVlCYR3r7xgZZGKVvBd/29gLDlFGtJ8NL4= github.com/go-openapi/runtime v0.19.15 h1:2GIefxs9Rx1vCDNghRtypRq+ig8KSLrjHbAYI/gCLCM= github.com/go-openapi/runtime v0.19.15/go.mod h1:dhGWCTKRXlAfGnQG0ONViOZpjfg0m2gUt9nTQPQZuoo= @@ -425,11 +398,9 @@ github.com/go-openapi/spec v0.0.0-20160808142527-6aced65f8501/go.mod h1:J8+jY1nA github.com/go-openapi/spec v0.17.0/go.mod h1:XkF/MOi14NmjsfZ8VtAKf8pIlbZzyoTvZsdfssdxcBI= github.com/go-openapi/spec v0.17.2/go.mod h1:XkF/MOi14NmjsfZ8VtAKf8pIlbZzyoTvZsdfssdxcBI= github.com/go-openapi/spec v0.18.0/go.mod h1:XkF/MOi14NmjsfZ8VtAKf8pIlbZzyoTvZsdfssdxcBI= -github.com/go-openapi/spec v0.19.2 h1:SStNd1jRcYtfKCN7R0laGNs80WYYvn5CbBjM2sOmCrE= github.com/go-openapi/spec v0.19.2/go.mod h1:sCxk3jxKgioEJikev4fgkNmwS+3kuYdJtcsZsD5zxMY= github.com/go-openapi/spec v0.19.3/go.mod h1:FpwSN1ksY1eteniUU7X0N/BgJ7a4WvBFVA8Lj9mJglo= github.com/go-openapi/spec v0.19.6/go.mod h1:Hm2Jr4jv8G1ciIAo+frC/Ft+rR2kQDh8JHKHb3gWUSk= -github.com/go-openapi/spec v0.19.7 h1:0xWSeMd35y5avQAThZR2PkEuqSosoS5t6gDH4L8n11M= github.com/go-openapi/spec v0.19.7/go.mod h1:Hm2Jr4jv8G1ciIAo+frC/Ft+rR2kQDh8JHKHb3gWUSk= github.com/go-openapi/spec v0.19.8 h1:qAdZLh1r6QF/hI/gTq+TJTvsQUodZsM7KLqkAJdiJNg= github.com/go-openapi/spec v0.19.8/go.mod h1:Hm2Jr4jv8G1ciIAo+frC/Ft+rR2kQDh8JHKHb3gWUSk= @@ -437,7 +408,6 @@ github.com/go-openapi/strfmt v0.17.0/go.mod h1:P82hnJI0CXkErkXi8IKjPbNBM6lV6+5pL github.com/go-openapi/strfmt v0.17.2/go.mod h1:P82hnJI0CXkErkXi8IKjPbNBM6lV6+5pLP5l494TcyU= github.com/go-openapi/strfmt v0.18.0/go.mod h1:P82hnJI0CXkErkXi8IKjPbNBM6lV6+5pLP5l494TcyU= github.com/go-openapi/strfmt v0.19.0/go.mod h1:+uW+93UVvGGq2qGaZxdDeJqSAqBqBdl+ZPMF/cC8nDY= -github.com/go-openapi/strfmt v0.19.2 h1:clPGfBnJohokno0e+d7hs6Yocrzjlgz6EsQSDncCRnE= github.com/go-openapi/strfmt v0.19.2/go.mod h1:0yX7dbo8mKIvc3XSKp7MNfxw4JytCfCD6+bY1AVL9LU= github.com/go-openapi/strfmt v0.19.3/go.mod h1:0yX7dbo8mKIvc3XSKp7MNfxw4JytCfCD6+bY1AVL9LU= github.com/go-openapi/strfmt v0.19.4/go.mod h1:eftuHTlB/dI8Uq8JJOyRlieZf+WkkxUuk0dgdHXr2Qk= @@ -449,14 +419,12 @@ github.com/go-openapi/swag v0.17.2/go.mod h1:AByQ+nYG6gQg71GINrmuDXCPWdL640yX49/ github.com/go-openapi/swag v0.18.0/go.mod h1:AByQ+nYG6gQg71GINrmuDXCPWdL640yX49/kXLo40Tg= github.com/go-openapi/swag v0.19.2/go.mod h1:POnQmlKehdgb5mhVOsnJFsivZCEZ/vjK9gh66Z9tfKk= github.com/go-openapi/swag v0.19.4/go.mod h1:POnQmlKehdgb5mhVOsnJFsivZCEZ/vjK9gh66Z9tfKk= -github.com/go-openapi/swag v0.19.5 h1:lTz6Ys4CmqqCQmZPBlbQENR1/GucA2bzYTE12Pw4tFY= github.com/go-openapi/swag v0.19.5/go.mod h1:POnQmlKehdgb5mhVOsnJFsivZCEZ/vjK9gh66Z9tfKk= github.com/go-openapi/swag v0.19.7/go.mod h1:ao+8BpOPyKdpQz3AOJfbeEVpLmWAvlT1IfTe5McPyhY= github.com/go-openapi/swag v0.19.9 h1:1IxuqvBUU3S2Bi4YC7tlP9SJF1gVpCvqN0T2Qof4azE= github.com/go-openapi/swag v0.19.9/go.mod h1:ao+8BpOPyKdpQz3AOJfbeEVpLmWAvlT1IfTe5McPyhY= github.com/go-openapi/validate v0.17.2/go.mod h1:Uh4HdOzKt19xGIGm1qHf/ofbX1YQ4Y+MYsct2VUrAJ4= github.com/go-openapi/validate v0.18.0/go.mod h1:Uh4HdOzKt19xGIGm1qHf/ofbX1YQ4Y+MYsct2VUrAJ4= -github.com/go-openapi/validate v0.19.2 h1:ky5l57HjyVRrsJfd2+Ro5Z9PjGuKbsmftwyMtk8H7js= github.com/go-openapi/validate v0.19.2/go.mod h1:1tRCw7m3jtI8eNWEEliiAqUIcBztB2KDnRCRMUi7GTA= github.com/go-openapi/validate v0.19.3/go.mod h1:90Vh6jjkTn+OT1Eefm0ZixWNFjhtOH7vS9k0lo6zwJo= github.com/go-openapi/validate v0.19.8 h1:YFzsdWIDfVuLvIOF+ZmKjVg1MbPJ1QgY9PihMwei1ys= @@ -503,7 +471,6 @@ github.com/gogo/protobuf v1.1.1/go.mod h1:r8qH/GZQm5c6nD/R0oafs1akxWv10x8SbQlK7a github.com/gogo/protobuf v1.2.0/go.mod h1:r8qH/GZQm5c6nD/R0oafs1akxWv10x8SbQlK7atdtwQ= github.com/gogo/protobuf v1.2.1/go.mod h1:hp+jE20tsWTFYpLwKvXlhS1hjn+gTNwPg2I6zVXpSg4= github.com/gogo/protobuf v1.2.2-0.20190723190241-65acae22fc9d/go.mod h1:SlYgWuQ5SjCEi6WLHjHCa1yvBfUnHcTbrrZtXPKa29o= -github.com/gogo/protobuf v1.2.2-0.20190730201129-28a6bbf47e48 h1:X+zN6RZXsvnrSJaAIQhZezPfAfvsqihKKR8oiLHid34= github.com/gogo/protobuf v1.2.2-0.20190730201129-28a6bbf47e48/go.mod h1:SlYgWuQ5SjCEi6WLHjHCa1yvBfUnHcTbrrZtXPKa29o= github.com/gogo/protobuf v1.3.0/go.mod h1:SlYgWuQ5SjCEi6WLHjHCa1yvBfUnHcTbrrZtXPKa29o= github.com/gogo/protobuf v1.3.1 h1:DqDEcV5aeaTmdFBePNpYsp3FlcVH/2ISVVM9Qf8PSls= @@ -514,12 +481,9 @@ github.com/golang-migrate/migrate/v4 v4.7.0 h1:gONcHxHApDTKXDyLH/H97gEHmpu1zcnnb github.com/golang-migrate/migrate/v4 v4.7.0/go.mod h1:Qvut3N4xKWjoH3sokBccML6WyHSnggXm/DvMMnTsQIc= github.com/golang/freetype v0.0.0-20170609003504-e2365dfdc4a0/go.mod h1:E/TSTwGwJL78qG/PmXZO1EjYhfJinVAhrmmHX6Z8B9k= github.com/golang/geo v0.0.0-20190916061304-5b978397cfec/go.mod h1:QZ0nwyI2jOfgRAoBvP+ab5aRr7c9x7lhGEJrKvBwjWI= -github.com/golang/glog v0.0.0-20160126235308-23def4e6c14b h1:VKtxabqXZkF25pY9ekfRL6a582T4P37/31XEstQ5p58= github.com/golang/glog v0.0.0-20160126235308-23def4e6c14b/go.mod h1:SBH7ygxi8pfUlaOkMMuAQtPIUF8ecWP5IEl/CR7VP2Q= github.com/golang/groupcache v0.0.0-20160516000752-02826c3e7903/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc= -github.com/golang/groupcache v0.0.0-20190702054246-869f871628b6 h1:ZgQEtGgCBiWRM39fZuwSd1LwSqqSW0hOdXCYYDX0R3I= github.com/golang/groupcache v0.0.0-20190702054246-869f871628b6/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc= -github.com/golang/groupcache v0.0.0-20191027212112-611e8accdfc9 h1:uHTyIjqVhYRhLbJ8nIiOJHkEZZ+5YoOsAbD3sk82NiE= github.com/golang/groupcache v0.0.0-20191027212112-611e8accdfc9/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc= github.com/golang/groupcache v0.0.0-20191227052852-215e87163ea7/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc= github.com/golang/groupcache v0.0.0-20200121045136-8c9f03a8e57e h1:1r7pUrabqp18hOBcwBwiTsbnFeTZHV9eER/QT5JVZxY= @@ -533,7 +497,6 @@ github.com/golang/mock v1.4.3/go.mod h1:UOMv5ysSaYNkG+OFQykRIcU/QvvxJf3p21QfJ2Bt github.com/golang/protobuf v0.0.0-20161109072736-4bd1920723d7/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U= github.com/golang/protobuf v1.2.0/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U= github.com/golang/protobuf v1.3.1/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U= -github.com/golang/protobuf v1.3.2 h1:6nsPYzhq5kReh6QImI3k5qWzO4PEbvbIW2cwSfR/6xs= github.com/golang/protobuf v1.3.2/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U= github.com/golang/protobuf v1.3.3/go.mod h1:vzj43D7+SQXF/4pzW/hwtAqwc6iTitCiVSaWz5lYuqw= github.com/golang/protobuf v1.3.5/go.mod h1:6O5/vntMXwX2lRkT1hjjk0nAC1IDOTvTlVgjlRvqsdk= @@ -557,7 +520,6 @@ github.com/google/btree v1.0.0/go.mod h1:lNA+9X1NB3Zf8V7Ke586lFgjr2dZNuvo3lPJSGZ github.com/google/flatbuffers v1.11.0/go.mod h1:1AeVuKshWv4vARoZatz6mlQ0JxURH0Kv5+zNeJKJCa8= github.com/google/go-cmp v0.2.0/go.mod h1:oXzfMopK8JAjlY9xF4vHSVASa0yLyX7SntLO5aqRK0M= github.com/google/go-cmp v0.3.0/go.mod h1:8QqcDgzrUqlUb/G2PQTWiueGozuR1884gddMywk6iLU= -github.com/google/go-cmp v0.3.1 h1:Xye71clBPdm5HgqGwUkwhbynsUJZhDbS20FvLhQ2izg= github.com/google/go-cmp v0.3.1/go.mod h1:8QqcDgzrUqlUb/G2PQTWiueGozuR1884gddMywk6iLU= github.com/google/go-cmp v0.4.0 h1:xsAVV57WRhGj6kEIi8ReJzQlHHqcBYCElAvkovg3B/4= github.com/google/go-cmp v0.4.0/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= @@ -565,7 +527,6 @@ github.com/google/go-github v17.0.0+incompatible/go.mod h1:zLgOLi98H3fifZn+44m+u github.com/google/go-querystring v1.0.0 h1:Xkwi/a1rcvNg1PPYe5vI8GbeBY/jrVuDX5ASuANWTrk= github.com/google/go-querystring v1.0.0/go.mod h1:odCYkC5MyYFN7vkCjXpyrEuKhc/BUO6wN/zVPAxq5ck= github.com/google/gofuzz v0.0.0-20161122191042-44d81051d367/go.mod h1:HP5RmnzzSNb993RKQDq4+1A4ia9nllfqcQFTQJedwGI= -github.com/google/gofuzz v1.0.0 h1:A8PeW59pxE9IoFRqBp37U+mSNaQoZ46F1f0f863XSXw= github.com/google/gofuzz v1.0.0/go.mod h1:dBl0BpW6vV/+mYPU4Po3pmUjxk6FQPldtuIdl/M65Eg= github.com/google/gofuzz v1.1.0 h1:Hsa8mG0dQ46ij8Sl2AYJDUv1oA9/d6Vk+3LG99Oe02g= github.com/google/gofuzz v1.1.0/go.mod h1:dBl0BpW6vV/+mYPU4Po3pmUjxk6FQPldtuIdl/M65Eg= @@ -593,27 +554,21 @@ github.com/googleapis/gnostic v0.0.0-20170426233943-68f4ded48ba9/go.mod h1:sJBsC github.com/googleapis/gnostic v0.0.0-20170729233727-0c5108395e2d/go.mod h1:sJBsCZ4ayReDTBIg8b9dl28c5xFWyhBTVRp3pOg5EKY= github.com/googleapis/gnostic v0.1.0/go.mod h1:sJBsCZ4ayReDTBIg8b9dl28c5xFWyhBTVRp3pOg5EKY= github.com/googleapis/gnostic v0.3.0/go.mod h1:sJBsCZ4ayReDTBIg8b9dl28c5xFWyhBTVRp3pOg5EKY= -github.com/googleapis/gnostic v0.3.1 h1:WeAefnSUHlBb0iJKwxFDZdbfGwkd7xRNuV+IpXMJhYk= github.com/googleapis/gnostic v0.3.1/go.mod h1:on+2t9HRStVgn95RSsFWFz+6Q0Snyqv1awfrALZdbtU= github.com/googleapis/gnostic v0.4.0 h1:BXDUo8p/DaxC+4FJY/SSx3gvnx9C1VdHNgaUkiEL5mk= github.com/googleapis/gnostic v0.4.0/go.mod h1:on+2t9HRStVgn95RSsFWFz+6Q0Snyqv1awfrALZdbtU= github.com/gophercloud/gophercloud v0.1.0/go.mod h1:vxM41WHh5uqHVBMZHzuwNOHh8XEoIEcSTewFxm1c5g8= -github.com/gophercloud/gophercloud v0.3.0 h1:6sjpKIpVwRIIwmcEGp+WwNovNsem+c+2vm6oxshRpL8= github.com/gophercloud/gophercloud v0.3.0/go.mod h1:vxM41WHh5uqHVBMZHzuwNOHh8XEoIEcSTewFxm1c5g8= -github.com/gophercloud/gophercloud v0.6.0 h1:Xb2lcqZtml1XjgYZxbeayEemq7ASbeTp09m36gQFpEU= github.com/gophercloud/gophercloud v0.6.0/go.mod h1:GICNByuaEBibcjmjvI7QvYJSZEbGkcYwAR7EZK2WMqM= -github.com/gophercloud/gophercloud v0.11.0 h1:pYMP9UZBdQa3lsfIZ1tZor4EbtxiuB6BHhocenkiH/E= github.com/gophercloud/gophercloud v0.11.0/go.mod h1:gmC5oQqMDOMO1t1gq5DquX/yAU808e/4mzjjDA76+Ss= github.com/gophercloud/gophercloud v0.12.0 h1:mZrie07npp6ODiwHZolTicr5jV8Ogn43AvAsSMm6Ork= github.com/gophercloud/gophercloud v0.12.0/go.mod h1:gmC5oQqMDOMO1t1gq5DquX/yAU808e/4mzjjDA76+Ss= +github.com/gopherjs/gopherjs v0.0.0-20181017120253-0766667cb4d1 h1:EGx4pi6eqNxGaHF6qqu48+N2wcFQ5qg5FXgOdqsJ5d8= github.com/gopherjs/gopherjs v0.0.0-20181017120253-0766667cb4d1/go.mod h1:wJfORRmW1u3UXTncJ5qlYoELFm8eSnnEO6hX4iZ3EWY= github.com/gopherjs/gopherjs v0.0.0-20191106031601-ce3c9ade29de h1:F7WD09S8QB4LrkEpka0dFPLSotH11HRpCsLIbIcJ7sU= github.com/gopherjs/gopherjs v0.0.0-20191106031601-ce3c9ade29de/go.mod h1:wJfORRmW1u3UXTncJ5qlYoELFm8eSnnEO6hX4iZ3EWY= -github.com/gorilla/context v1.1.1 h1:AWwleXJkX/nhcU9bZSnZoi3h/qGYqQAGhq6zZe/aQW8= github.com/gorilla/context v1.1.1/go.mod h1:kBGZzfjB9CEq2AlWe17Uuf7NDRt0dE0s8S51q0aT7Yg= -github.com/gorilla/mux v1.6.2 h1:Pgr17XVTNXAk3q/r4CpKzC5xBM/qW1uVLV+IhRZpIIk= github.com/gorilla/mux v1.6.2/go.mod h1:1lud6UwP+6orDFRuTfBEV8e9/aOM/c4fVVCaMa2zaAs= -github.com/gorilla/mux v1.7.1 h1:Dw4jY2nghMMRsh1ol8dv1axHkDwMQK2DHerMNJsIpJU= github.com/gorilla/mux v1.7.1/go.mod h1:1lud6UwP+6orDFRuTfBEV8e9/aOM/c4fVVCaMa2zaAs= github.com/gorilla/mux v1.7.3 h1:gnP5JzjVOuiZD07fKKToCAOjS0yOpj/qPETTXCCS6hw= github.com/gorilla/mux v1.7.3/go.mod h1:1lud6UwP+6orDFRuTfBEV8e9/aOM/c4fVVCaMa2zaAs= @@ -625,18 +580,15 @@ github.com/grafana/gocql v0.0.0-20200605141915-ba5dc39ece85/go.mod h1:crI9WX6p0I github.com/grafana/tail v0.0.0-20191024143944-0b54ddf21fe7 h1:eeBhshivxpgHEX78QxJkoL251Pjr0B2GL59ZsivnplU= github.com/grafana/tail v0.0.0-20191024143944-0b54ddf21fe7/go.mod h1:aS6CMYGLEIABOzX3OL8SqZ3zAZCGN7nmBnqgnyJGxyA= github.com/gregjones/httpcache v0.0.0-20180305231024-9cad4c3443a7/go.mod h1:FecbI9+v66THATjSRHfNgh1IVFe/9kFxbXtjV0ctIMA= -github.com/grpc-ecosystem/go-grpc-middleware v1.0.1-0.20190118093823-f849b5445de4 h1:z53tR0945TRRQO/fLEVPI6SMv7ZflF0TEaTAoU7tOzg= github.com/grpc-ecosystem/go-grpc-middleware v1.0.1-0.20190118093823-f849b5445de4/go.mod h1:FiyG127CGDf3tlThmgyCl78X/SZQqEOJBCDaAfeWzPs= github.com/grpc-ecosystem/go-grpc-middleware v1.1.0 h1:THDBEeQ9xZ8JEaCLyLQqXMMdRqNr0QAUJTIkQAUtFjg= github.com/grpc-ecosystem/go-grpc-middleware v1.1.0/go.mod h1:f5nM7jw/oeRSadq3xCzHAvxcr8HZnzsqU6ILg/0NiiE= -github.com/grpc-ecosystem/go-grpc-prometheus v1.2.0 h1:Ovs26xHkKqVztRpIrF/92BcuyuQ/YW4NSIpoGtfXNho= github.com/grpc-ecosystem/go-grpc-prometheus v1.2.0/go.mod h1:8NvIoxWQoOIhqOTXgfV/d3M/q6VIi02HzZEHgUlZvzk= github.com/grpc-ecosystem/go-grpc-prometheus v1.2.1-0.20191002090509-6af20e3a5340 h1:uGoIog/wiQHI9GAxXO5TJbT0wWKH3O9HhOJW1F9c3fY= github.com/grpc-ecosystem/go-grpc-prometheus v1.2.1-0.20191002090509-6af20e3a5340/go.mod h1:3bDW6wMZJB7tiONtC/1Xpicra6Wp5GgbTbQWCbI5fkc= github.com/grpc-ecosystem/grpc-gateway v1.4.1/go.mod h1:RSKVYQBd5MCa4OVpNdGskqpgL2+G+NZTnrVHpWWfpdw= github.com/grpc-ecosystem/grpc-gateway v1.9.4/go.mod h1:vNeuVxBJEsws4ogUvrchl83t/GYV9WGTSLVdBhOQFDY= github.com/grpc-ecosystem/grpc-gateway v1.9.5/go.mod h1:vNeuVxBJEsws4ogUvrchl83t/GYV9WGTSLVdBhOQFDY= -github.com/grpc-ecosystem/grpc-gateway v1.12.1 h1:zCy2xE9ablevUOrUZc3Dl72Dt+ya2FNAvC2yLYMHzi4= github.com/grpc-ecosystem/grpc-gateway v1.12.1/go.mod h1:8XEsbTttt/W+VvjtQhLACqCisSPWTxCZ7sBRjU6iH9c= github.com/grpc-ecosystem/grpc-gateway v1.14.6 h1:8ERzHx8aj1Sc47mu9n/AksaKCSWrMchFtkdrS4BIj5o= github.com/grpc-ecosystem/grpc-gateway v1.14.6/go.mod h1:zdiPV4Yse/1gnckTHtghG4GkDEdKCRJduHpTxT3/jcw= @@ -644,19 +596,13 @@ github.com/grpc-ecosystem/grpc-opentracing v0.0.0-20180507213350-8e809c8a8645 h1 github.com/grpc-ecosystem/grpc-opentracing v0.0.0-20180507213350-8e809c8a8645/go.mod h1:6iZfnjpejD4L/4DwD7NryNaJyCQdzwWwH2MWhCA90Kw= github.com/hailocab/go-hostpool v0.0.0-20160125115350-e80d13ce29ed h1:5upAirOpQc1Q53c0bnx2ufif5kANL7bfZWcc6VJWJd8= github.com/hailocab/go-hostpool v0.0.0-20160125115350-e80d13ce29ed/go.mod h1:tMWxXQ9wFIaZeTI9F+hmhFiGpFmhOHzyShyFUhRm0H4= -github.com/hashicorp/consul/api v1.1.0 h1:BNQPM9ytxj6jbjjdRPioQ94T6YXriSopn0i8COv6SRA= github.com/hashicorp/consul/api v1.1.0/go.mod h1:VmuI/Lkw1nC05EYQWNKwWGbkg+FbDBtguAZLlVdkD9Q= -github.com/hashicorp/consul/api v1.3.0 h1:HXNYlRkkM/t+Y/Yhxtwcy02dlYwIaoxzvxPnS+cqy78= github.com/hashicorp/consul/api v1.3.0/go.mod h1:MmDNSzIMUjNpY/mQ398R4bk2FnqQLoPndWW5VkKPlCE= -github.com/hashicorp/consul/api v1.4.0 h1:jfESivXnO5uLdH650JU/6AnjRoHrLhULq0FnC3Kp9EY= github.com/hashicorp/consul/api v1.4.0/go.mod h1:xc8u05kyMa3Wjr9eEAsIAo3dg8+LywT5E/Cl7cNS5nU= github.com/hashicorp/consul/api v1.5.0 h1:Yo2bneoGy68A7aNwmuETFnPhjyBEm7n3vzRacEVMjvI= github.com/hashicorp/consul/api v1.5.0/go.mod h1:LqwrLNW876eYSuUOo4ZLHBcdKc038txr/IMfbLPATa4= -github.com/hashicorp/consul/sdk v0.1.1 h1:LnuDWGNsoajlhGyHJvuWW6FVqRl8JOTPqS6CPTsYjhY= github.com/hashicorp/consul/sdk v0.1.1/go.mod h1:VKf9jXwCTEY1QZP2MOLRhb5i/I/ssyNV1vwHyQBF0x8= -github.com/hashicorp/consul/sdk v0.3.0 h1:UOxjlb4xVNF93jak1mzzoBatyFju9nrkxpVwIp/QqxQ= github.com/hashicorp/consul/sdk v0.3.0/go.mod h1:VKf9jXwCTEY1QZP2MOLRhb5i/I/ssyNV1vwHyQBF0x8= -github.com/hashicorp/consul/sdk v0.4.0 h1:zBtCfKJZcJDBvSCkQJch4ulp59m1rATFLKwNo/LYY30= github.com/hashicorp/consul/sdk v0.4.0/go.mod h1:fY08Y9z5SvJqevyZNy6WWPXiG3KwBPAvlcdx16zZ0fM= github.com/hashicorp/consul/sdk v0.5.0 h1:WC4594Wp/LkEeML/OdQKEC1yqBmEYkRp6i7X5u0zDAs= github.com/hashicorp/consul/sdk v0.5.0/go.mod h1:fY08Y9z5SvJqevyZNy6WWPXiG3KwBPAvlcdx16zZ0fM= @@ -668,21 +614,18 @@ github.com/hashicorp/go-cleanhttp v0.5.1/go.mod h1:JpRdi6/HCYpAwUzNwuwqhbovhLtng github.com/hashicorp/go-hclog v0.12.0/go.mod h1:whpDNt7SSdeAju8AWKIWsul05p54N/39EeqMAyrmvFQ= github.com/hashicorp/go-hclog v0.12.2 h1:F1fdYblUEsxKiailtkhCCG2g4bipEgaHiDc8vffNpD4= github.com/hashicorp/go-hclog v0.12.2/go.mod h1:whpDNt7SSdeAju8AWKIWsul05p54N/39EeqMAyrmvFQ= -github.com/hashicorp/go-immutable-radix v1.0.0 h1:AKDB1HM5PWEA7i4nhcpwOrO2byshxBjXVn/J/3+z5/0= github.com/hashicorp/go-immutable-radix v1.0.0/go.mod h1:0y9vanUI8NX6FsYoO3zeMjhV/C5i9g4Q3DwcSNZ4P60= -github.com/hashicorp/go-immutable-radix v1.1.0 h1:vN9wG1D6KG6YHRTWr8512cxGOVgTMEfgEdSj/hr8MPc= github.com/hashicorp/go-immutable-radix v1.1.0/go.mod h1:0y9vanUI8NX6FsYoO3zeMjhV/C5i9g4Q3DwcSNZ4P60= github.com/hashicorp/go-immutable-radix v1.2.0 h1:l6UW37iCXwZkZoAbEYnptSHVE/cQ5bOTPYG5W3vf9+8= github.com/hashicorp/go-immutable-radix v1.2.0/go.mod h1:0y9vanUI8NX6FsYoO3zeMjhV/C5i9g4Q3DwcSNZ4P60= +github.com/hashicorp/go-msgpack v0.5.3 h1:zKjpN5BK/P5lMYrLmBHdBULWbJ0XpYR+7NGzqkZzoD4= github.com/hashicorp/go-msgpack v0.5.3/go.mod h1:ahLV/dePpqEmjfWmKiqvPkv/twdG7iPBM1vqhUKIvfM= github.com/hashicorp/go-msgpack v0.5.5 h1:i9R9JSrqIz0QVLz3sz+i3YJdT7TTSLcfLLzJi9aZTuI= github.com/hashicorp/go-msgpack v0.5.5/go.mod h1:ahLV/dePpqEmjfWmKiqvPkv/twdG7iPBM1vqhUKIvfM= github.com/hashicorp/go-multierror v1.0.0 h1:iVjPR7a6H0tWELX5NxNe7bYopibicUzc7uPribsnS6o= github.com/hashicorp/go-multierror v1.0.0/go.mod h1:dHtQlpGsu+cZNNAkkCN/P3hoUDHhCYQXV3UM06sGGrk= github.com/hashicorp/go-retryablehttp v0.5.3/go.mod h1:9B5zBasrRhHXnJnui7y6sL7es7NDiJgTc6Er0maI1Xs= -github.com/hashicorp/go-rootcerts v1.0.0 h1:Rqb66Oo1X/eSV1x66xbDccZjhJigjg0+e82kpwzSwCI= github.com/hashicorp/go-rootcerts v1.0.0/go.mod h1:K6zTfqpRlCUIjkwsN4Z+hiSfzSTQa6eBIzfwKfwNnHU= -github.com/hashicorp/go-rootcerts v1.0.1 h1:DMo4fmknnz0E0evoNYnV48RjWndOsmd6OW+09R3cEP8= github.com/hashicorp/go-rootcerts v1.0.1/go.mod h1:pqUvnprVnM5bf7AOirdbb01K4ccR319Vf4pU3K5EGc8= github.com/hashicorp/go-rootcerts v1.0.2 h1:jzhAVGtqPKbwpyCPELlgNWhE1znq+qwJtW5Oi2viEzc= github.com/hashicorp/go-rootcerts v1.0.2/go.mod h1:pqUvnprVnM5bf7AOirdbb01K4ccR319Vf4pU3K5EGc8= @@ -697,27 +640,20 @@ github.com/hashicorp/go-version v1.2.0/go.mod h1:fltr4n8CU8Ke44wwGCBoEymUuxUHl09 github.com/hashicorp/go.net v0.0.1/go.mod h1:hjKkEWcCURg++eb33jQU7oqQcI9XDCnUzHA0oac0k90= github.com/hashicorp/golang-lru v0.5.0/go.mod h1:/m3WP610KZHVQ1SGc6re/UDhFvYD7pJ4Ao+sR/qLZy8= github.com/hashicorp/golang-lru v0.5.1/go.mod h1:/m3WP610KZHVQ1SGc6re/UDhFvYD7pJ4Ao+sR/qLZy8= -github.com/hashicorp/golang-lru v0.5.3 h1:YPkqC67at8FYaadspW/6uE0COsBxS2656RLEr8Bppgk= github.com/hashicorp/golang-lru v0.5.3/go.mod h1:iADmTwqILo4mZ8BN3D2Q6+9jd8WM5uGBxy+E8yxSoD4= github.com/hashicorp/golang-lru v0.5.4 h1:YDjusn29QI/Das2iO9M0BHnIbxPeyuCHsjMW+lJfyTc= github.com/hashicorp/golang-lru v0.5.4/go.mod h1:iADmTwqILo4mZ8BN3D2Q6+9jd8WM5uGBxy+E8yxSoD4= github.com/hashicorp/logutils v1.0.0/go.mod h1:QIAnNjmIWmVIIkWDTG1z5v++HQmx9WQRO+LraFDTW64= github.com/hashicorp/mdns v1.0.0/go.mod h1:tL+uN++7HEJ6SQLQ2/p+z2pH24WQKWjBPkE0mNTz8vQ= github.com/hashicorp/mdns v1.0.1/go.mod h1:4gW7WsVCke5TE7EPeYliwHlRUyBtfCwuFwuMg2DmyNY= -github.com/hashicorp/memberlist v0.1.3 h1:EmmoJme1matNzb+hMpDuR/0sbJSUisxyqBGG676r31M= github.com/hashicorp/memberlist v0.1.3/go.mod h1:ajVTdAv/9Im8oMAAj5G31PhhMCZJV2pPBoIllUwCN7I= -github.com/hashicorp/memberlist v0.1.4 h1:gkyML/r71w3FL8gUi74Vk76avkj/9lYAY9lvg0OcoGs= github.com/hashicorp/memberlist v0.1.4/go.mod h1:ajVTdAv/9Im8oMAAj5G31PhhMCZJV2pPBoIllUwCN7I= -github.com/hashicorp/memberlist v0.1.5 h1:AYBsgJOW9gab/toO5tEB8lWetVgDKZycqkebJ8xxpqM= github.com/hashicorp/memberlist v0.1.5/go.mod h1:ajVTdAv/9Im8oMAAj5G31PhhMCZJV2pPBoIllUwCN7I= -github.com/hashicorp/memberlist v0.2.0 h1:WeeNspppWi5s1OFefTviPQueC/Bq8dONfvNjPhiEQKE= github.com/hashicorp/memberlist v0.2.0/go.mod h1:MS2lj3INKhZjWNqd3N0m3J+Jxf3DAOnAH9VT3Sh9MUE= github.com/hashicorp/memberlist v0.2.2 h1:5+RffWKwqJ71YPu9mWsF7ZOscZmwfasdA8kbdC7AO2g= github.com/hashicorp/memberlist v0.2.2/go.mod h1:MS2lj3INKhZjWNqd3N0m3J+Jxf3DAOnAH9VT3Sh9MUE= -github.com/hashicorp/serf v0.8.2 h1:YZ7UKsJv+hKjqGVUUbtE3HNj79Eln2oQ75tniF6iPt0= github.com/hashicorp/serf v0.8.2/go.mod h1:6hOLApaqBFA1NXqRQAsxw9QxuDEvNxSQRwA/JwenrHc= github.com/hashicorp/serf v0.8.3/go.mod h1:UpNcs7fFbpKIyZaUuSW6EPiH+eZC7OuyFD+wc1oal+k= -github.com/hashicorp/serf v0.8.5 h1:ZynDUIQiA8usmRgPdGPHFdPnb1wgGI9tK3mO9hcAJjc= github.com/hashicorp/serf v0.8.5/go.mod h1:UpNcs7fFbpKIyZaUuSW6EPiH+eZC7OuyFD+wc1oal+k= github.com/hashicorp/serf v0.9.0 h1:+Zd/16AJ9lxk9RzfTDyv/TLhZ8UerqYS0/+JGCIDaa0= github.com/hashicorp/serf v0.9.0/go.mod h1:YL0HO+FifKOW2u1ke99DGVu1zhcpZzNwrLIqBC7vbYU= @@ -747,7 +683,6 @@ github.com/jessevdk/go-flags v0.0.0-20180331124232-1c38ed7ad0cc/go.mod h1:4FA24M github.com/jessevdk/go-flags v1.4.0 h1:4IU2WS7AumrZ/40jfhf4QVDMsQwqA7VEHozFRrGARJA= github.com/jessevdk/go-flags v1.4.0/go.mod h1:4FA24M0QyGHXBuZZK/XkWh8h0e1EYbRYJSGM75WSRxI= github.com/jmespath/go-jmespath v0.0.0-20160202185014-0b12d6b521d8/go.mod h1:Nht3zPeWKUH0NzdCt2Blrr5ys8VGpn0CEB0cQHVjt7k= -github.com/jmespath/go-jmespath v0.0.0-20180206201540-c2b33e8439af h1:pmfjZENx5imkbgOkpRUYLnmbU7UEFbjtDA2hxJ1ichM= github.com/jmespath/go-jmespath v0.0.0-20180206201540-c2b33e8439af/go.mod h1:Nht3zPeWKUH0NzdCt2Blrr5ys8VGpn0CEB0cQHVjt7k= github.com/jmespath/go-jmespath v0.3.0 h1:OS12ieG61fsCg5+qLJ+SsW9NicxNkg3b25OyT2yCeUc= github.com/jmespath/go-jmespath v0.3.0/go.mod h1:9QtRXoHjLGCJ5IBSaohpXITPlowMeeYCZ7fLUTSywik= @@ -766,14 +701,11 @@ github.com/jsimonetti/rtnetlink v0.0.0-20200117123717-f846d4f6c1f4/go.mod h1:WGu github.com/json-iterator/go v0.0.0-20180612202835-f2b4162afba3/go.mod h1:+SdeFBvtyEkXs7REEP0seUULqWtbJapLOCVDaaPEHmU= github.com/json-iterator/go v1.1.5/go.mod h1:+SdeFBvtyEkXs7REEP0seUULqWtbJapLOCVDaaPEHmU= github.com/json-iterator/go v1.1.6/go.mod h1:+SdeFBvtyEkXs7REEP0seUULqWtbJapLOCVDaaPEHmU= -github.com/json-iterator/go v1.1.7 h1:KfgG9LzI+pYjr4xvmz/5H4FXjokeP+rlHLhv3iH62Fo= github.com/json-iterator/go v1.1.7/go.mod h1:KdQUCv79m/52Kvf8AW2vK1V8akMuk1QjK/uOdHXbAo4= github.com/json-iterator/go v1.1.8/go.mod h1:KdQUCv79m/52Kvf8AW2vK1V8akMuk1QjK/uOdHXbAo4= -github.com/json-iterator/go v1.1.9 h1:9yzud/Ht36ygwatGx56VwCZtlI/2AD15T1X2sjSuGns= github.com/json-iterator/go v1.1.9/go.mod h1:KdQUCv79m/52Kvf8AW2vK1V8akMuk1QjK/uOdHXbAo4= github.com/json-iterator/go v1.1.10 h1:Kz6Cvnvv2wGdaG/V8yMvfkmNiXq9Ya2KUv4rouJJr68= github.com/json-iterator/go v1.1.10/go.mod h1:KdQUCv79m/52Kvf8AW2vK1V8akMuk1QjK/uOdHXbAo4= -github.com/jstemmer/go-junit-report v0.0.0-20190106144839-af01ea7f8024 h1:rBMNdlhTLzJjJSDIjNEXX1Pz3Hmwmz91v+zycvx9PJc= github.com/jstemmer/go-junit-report v0.0.0-20190106144839-af01ea7f8024/go.mod h1:6v2b51hI/fHJwM22ozAgKL4VKDeJcHhJFhtBdhmNjmU= github.com/jstemmer/go-junit-report v0.9.1 h1:6QPYqodiu3GuPL+7mfx+NwDdp2eTkp9IfEUpgAwUN0o= github.com/jstemmer/go-junit-report v0.9.1/go.mod h1:Brl9GWCQeLvo8nXZwPNNblvFj/XSXhF0NWZEnDohbsk= @@ -801,9 +733,7 @@ github.com/knq/sysutil v0.0.0-20191005231841-15668db23d08/go.mod h1:dFWs1zEqDjFt github.com/konsorten/go-windows-terminal-sequences v1.0.1/go.mod h1:T0+1ngSBFLxvqU3pZ+m/2kptfBszLMUkC4ZK/EgS/cQ= github.com/konsorten/go-windows-terminal-sequences v1.0.2 h1:DB17ag19krx9CFsz4o3enTrPXyIXCl+2iCXH/aMAp9s= github.com/konsorten/go-windows-terminal-sequences v1.0.2/go.mod h1:T0+1ngSBFLxvqU3pZ+m/2kptfBszLMUkC4ZK/EgS/cQ= -github.com/kr/logfmt v0.0.0-20140226030751-b84e30acd515 h1:T+h1c/A9Gawja4Y9mFVWj2vyii2bbUNDw3kt9VxK2EY= github.com/kr/logfmt v0.0.0-20140226030751-b84e30acd515/go.mod h1:+0opPa2QZZtGFBFZlji/RkVcI2GknAs/DXo4wKdlNEc= -github.com/kr/pretty v0.1.0 h1:L/CwN0zerZDmRFUapSPitk6f+Q3+0za1rQkzVuMiMFI= github.com/kr/pretty v0.1.0/go.mod h1:dAy3ld7l9f0ibDNOQOHHMYYIIbhfbHSm3C4ZsoJORNo= github.com/kr/pretty v0.2.0 h1:s5hAObm+yFO5uHYt5dYjxi2rXrsnmRpJx4OYvIWUaQs= github.com/kr/pretty v0.2.0/go.mod h1:ipq/a2n7PKx3OHsz4KJII5eveXtPO4qwEXGdVfWzfnI= @@ -814,7 +744,6 @@ github.com/kr/text v0.1.0 h1:45sCR5RtlFHMR4UwH9sdQ5TC8v0qDQCHnXt+kaKSTVE= github.com/kr/text v0.1.0/go.mod h1:4Jbv+DJW3UT/LiOwJeYQe1efqtUx/iVham/4vfdArNI= github.com/kshvakov/clickhouse v1.3.5/go.mod h1:DMzX7FxRymoNkVgizH0DWAL8Cur7wHLgx3MUnGwJqpE= github.com/kylelemons/godebug v0.0.0-20160406211939-eadb3ce320cb/go.mod h1:B69LEHPfb2qLo0BaaOLcbitczOKLWTsrBG9LczfCD4k= -github.com/kylelemons/godebug v0.0.0-20170820004349-d65d576e9348 h1:MtvEpTB6LX3vkb4ax0b5D2DHbNAUsen0Gx5wZoq3lV4= github.com/kylelemons/godebug v0.0.0-20170820004349-d65d576e9348/go.mod h1:B69LEHPfb2qLo0BaaOLcbitczOKLWTsrBG9LczfCD4k= github.com/kylelemons/godebug v1.1.0 h1:RPNrshWIDI6G2gRW9EHilWtl7Z6Sb1BR0xunSBf0SNc= github.com/kylelemons/godebug v1.1.0/go.mod h1:9/0rRGxNHcop5bhtWyNeEfOS8JIWk580+fNqagV/RAw= @@ -839,24 +768,22 @@ github.com/mailru/easyjson v0.0.0-20160728113105-d5b7844b561a/go.mod h1:C1wdFJiN github.com/mailru/easyjson v0.0.0-20180823135443-60711f1a8329/go.mod h1:C1wdFJiN94OJF2b5HbByQZoLdCWB1Yqtg26g4irojpc= github.com/mailru/easyjson v0.0.0-20190312143242-1de009706dbe/go.mod h1:C1wdFJiN94OJF2b5HbByQZoLdCWB1Yqtg26g4irojpc= github.com/mailru/easyjson v0.0.0-20190614124828-94de47d64c63/go.mod h1:C1wdFJiN94OJF2b5HbByQZoLdCWB1Yqtg26g4irojpc= -github.com/mailru/easyjson v0.0.0-20190626092158-b2ccc519800e h1:hB2xlXdHp/pmPZq0y3QnmWAArdw9PqbmotexnWx/FU8= github.com/mailru/easyjson v0.0.0-20190626092158-b2ccc519800e/go.mod h1:C1wdFJiN94OJF2b5HbByQZoLdCWB1Yqtg26g4irojpc= github.com/mailru/easyjson v0.7.0/go.mod h1:KAzv3t3aY1NaHWoQz1+4F1ccyAH66Jk7yos7ldAVICs= github.com/mailru/easyjson v0.7.1 h1:mdxE1MF9o53iCb2Ghj1VfWvh7ZOwHpnVG/xwXrV90U8= github.com/mailru/easyjson v0.7.1/go.mod h1:KAzv3t3aY1NaHWoQz1+4F1ccyAH66Jk7yos7ldAVICs= github.com/markbates/oncer v0.0.0-20181203154359-bf2de49a0be2/go.mod h1:Ld9puTsIW75CHf65OeIOkyKbteujpZVXDpWK6YGZbxE= github.com/markbates/safe v1.0.1/go.mod h1:nAqgmRi7cY2nqMc92/bSEeQA+R4OheNU2T1kNSCBdG0= -github.com/mattn/go-colorable v0.0.9 h1:UVL0vNpWh04HeJXV0KLcaT7r06gOH2l4OW6ddYRUIY4= github.com/mattn/go-colorable v0.0.9/go.mod h1:9vuHe8Xs5qXnSaW/c/ABM9alt+Vo+STaOChaDxuIBZU= github.com/mattn/go-colorable v0.1.4/go.mod h1:U0ppj6V5qS13XJ6of8GYAs25YV2eR4EVcfRqFIhoBtE= github.com/mattn/go-colorable v0.1.6 h1:6Su7aK7lXmJ/U79bYtBjLNaha4Fs1Rg9plHpcH+vvnE= github.com/mattn/go-colorable v0.1.6/go.mod h1:u6P/XSegPjTcexA+o6vUJrdnUu04hMope9wVRipJSqc= github.com/mattn/go-ieproxy v0.0.0-20190610004146-91bb50d98149/go.mod h1:31jz6HNzdxOmlERGGEc4v/dMssOfmp2p5bT/okiKFFc= +github.com/mattn/go-ieproxy v0.0.0-20190702010315-6dee0af9227d h1:oNAwILwmgWKFpuU+dXvI6dl9jG2mAWAZLX3r9s0PPiw= github.com/mattn/go-ieproxy v0.0.0-20190702010315-6dee0af9227d/go.mod h1:31jz6HNzdxOmlERGGEc4v/dMssOfmp2p5bT/okiKFFc= github.com/mattn/go-ieproxy v0.0.0-20191113090002-7c0f6868bffe h1:YioO2TiJyAHWHyCRQCP8jk5IzTqmsbGc5qQPIhHo6xs= github.com/mattn/go-ieproxy v0.0.0-20191113090002-7c0f6868bffe/go.mod h1:pYabZ6IHcRpFh7vIaLfK7rdcWgFEb3SFJ6/gNWuh88E= github.com/mattn/go-isatty v0.0.3/go.mod h1:M+lRXTBqGeGNdLjl/ufCoiOlB5xdOkqRJdNxMWT7Zi4= -github.com/mattn/go-isatty v0.0.4 h1:bnP0vzxcAdeI1zdubAl5PjU6zsERjGZb7raWodagDYs= github.com/mattn/go-isatty v0.0.4/go.mod h1:M+lRXTBqGeGNdLjl/ufCoiOlB5xdOkqRJdNxMWT7Zi4= github.com/mattn/go-isatty v0.0.8/go.mod h1:Iq45c/XA43vh69/j3iqttzPXn0bhXyGjM0Hdxcsrc5s= github.com/mattn/go-isatty v0.0.10/go.mod h1:qgIWMr58cqv1PHHyhnkY9lrL7etaEgOFcMEpPG5Rm84= @@ -882,12 +809,9 @@ github.com/mdlayher/netlink v1.1.0/go.mod h1:H4WCitaheIsdF9yOYu8CFmCgQthAPIWZmcK github.com/mdlayher/wifi v0.0.0-20190303161829-b1436901ddee/go.mod h1:Evt/EIne46u9PtQbeTx2NTcqURpr5K4SvKtGmBuDPN8= github.com/mgutz/ansi v0.0.0-20170206155736-9520e82c474b/go.mod h1:01TrycV0kFyexm33Z7vhZRXopbI8J3TDReVlkTgMUxE= github.com/miekg/dns v1.0.14/go.mod h1:W1PPwlIAgtquWBMBEV9nkV9Cazfe8ScdGz/Lj7v3Nrg= -github.com/miekg/dns v1.1.15 h1:CSSIDtllwGLMoA6zjdKnaE6Tx6eVUxQ29LUgGetiDCI= github.com/miekg/dns v1.1.15/go.mod h1:W1PPwlIAgtquWBMBEV9nkV9Cazfe8ScdGz/Lj7v3Nrg= -github.com/miekg/dns v1.1.22 h1:Jm64b3bO9kP43ddLjL2EY3Io6bmy1qGb9Xxz6TqS6rc= github.com/miekg/dns v1.1.22/go.mod h1:bPDLeHnStXmXAq1m/Ch/hvfNHr14JKNPMBo3VZKjuso= github.com/miekg/dns v1.1.26/go.mod h1:bPDLeHnStXmXAq1m/Ch/hvfNHr14JKNPMBo3VZKjuso= -github.com/miekg/dns v1.1.29 h1:xHBEhR+t5RzcFJjBLJlax2daXOrTYtr9z4WdKEfWFzg= github.com/miekg/dns v1.1.29/go.mod h1:KNUDUusw/aVsxyTYZM1oqvCicbwhgbNgztCETuNZ7xM= github.com/miekg/dns v1.1.30 h1:Qww6FseFn8PRfw07jueqIXqodm0JKiiKuK0DeXSqfyo= github.com/miekg/dns v1.1.30/go.mod h1:KNUDUusw/aVsxyTYZM1oqvCicbwhgbNgztCETuNZ7xM= @@ -897,7 +821,6 @@ github.com/minio/minio-go/v6 v6.0.56/go.mod h1:KQMM+/44DSlSGSQWSfRrAZ12FVMmpWNuX github.com/minio/sha256-simd v0.1.1 h1:5QHSlgo3nt5yKOJrC7W8w7X+NFl8cMPZm96iu8kKUJU= github.com/minio/sha256-simd v0.1.1/go.mod h1:B5e1o+1/KgNmWrSQK08Y6Z1Vb5pwIktudl0J58iy0KM= github.com/mitchellh/cli v1.0.0/go.mod h1:hNIlj7HEI86fIcpObd7a0FcrxTWetlwJDGcceTlRvqc= -github.com/mitchellh/go-homedir v1.0.0 h1:vKb8ShqSby24Yrqr/yDYkuFz8d0WUjys40rvnGC8aR0= github.com/mitchellh/go-homedir v1.0.0/go.mod h1:SfyaCUpYCn1Vlf4IUYiD9fPX4A5wJrkLzIz1N1q0pr0= github.com/mitchellh/go-homedir v1.1.0 h1:lukF9ziXFxDFPkA1vsr5zpc1XuPDn/wFntq5mG+4E0Y= github.com/mitchellh/go-homedir v1.1.0/go.mod h1:SfyaCUpYCn1Vlf4IUYiD9fPX4A5wJrkLzIz1N1q0pr0= @@ -907,7 +830,6 @@ github.com/mitchellh/go-wordwrap v1.0.0/go.mod h1:ZXFpozHsX6DPmq2I0TCekCxypsnAUb github.com/mitchellh/gox v0.4.0/go.mod h1:Sd9lOJ0+aimLBi73mGofS1ycjY8lL3uZM3JPS42BGNg= github.com/mitchellh/iochan v1.0.0/go.mod h1:JwYml1nuB7xOzsp52dPpHFffvOCDupsG0QubkSMEySY= github.com/mitchellh/mapstructure v0.0.0-20160808181253-ca63d7c062ee/go.mod h1:FVVH3fgwuzCH5S8UJGiWEs2h04kUh9fWfEaFds41c1Y= -github.com/mitchellh/mapstructure v1.1.2 h1:fmNYVwqnSfB9mZU6OS2O6GsXM+wcskZDuKQzvN1EDeE= github.com/mitchellh/mapstructure v1.1.2/go.mod h1:FVVH3fgwuzCH5S8UJGiWEs2h04kUh9fWfEaFds41c1Y= github.com/mitchellh/mapstructure v1.2.2 h1:dxe5oCinTXiTIcfgmZecdCzPmAJKd46KsCWc35r0TV4= github.com/mitchellh/mapstructure v1.2.2/go.mod h1:bFUtVrKA4DC2yAKiSyO/QUcy7e+RRV2QTWOzhPopBRo= @@ -943,7 +865,6 @@ github.com/nats-io/nuid v1.0.1/go.mod h1:19wcPz3Ph3q0Jbyiqsd0kePYG7A95tJPxeL+1OS github.com/ncw/swift v1.0.50 h1:E01b5bVIssNhx2KnzAjMWEXkKrb8ytTqCDWY7lqmWjA= github.com/ncw/swift v1.0.50/go.mod h1:23YIA4yWVnGwv2dQlN4bB7egfYX6YLn0Yo/S6zZO/ZM= github.com/oklog/oklog v0.3.2/go.mod h1:FCV+B7mhrz4o+ueLpx+KqkyXRGMWOYEvfiXtdGtbWGs= -github.com/oklog/run v1.0.0 h1:Ru7dDtJNOyC66gQ5dQmaCa0qIsAUFY3sFpK1Xk8igrw= github.com/oklog/run v1.0.0/go.mod h1:dlhp/R75TPv97u0XWUtDeV/lRKWPKSdTuV0TZvrmrQA= github.com/oklog/run v1.1.0 h1:GEenZ1cK0+q0+wsJew9qUg/DyD8k3JzYsZAi5gYi2mA= github.com/oklog/run v1.1.0/go.mod h1:sVPdnTZT1zYwAJeCMu2Th4T21pA3FPOQRfWjQlk7DVU= @@ -956,23 +877,20 @@ github.com/olekukonko/tablewriter v0.0.2/go.mod h1:rSAaSIOAGT9odnlyGlUfAJaoc5w2f github.com/onsi/ginkgo v0.0.0-20170829012221-11459a886d9c/go.mod h1:lLunBs/Ym6LB5Z9jYTR76FiuTmxDTDusOGeTQH+WWjE= github.com/onsi/ginkgo v1.6.0/go.mod h1:lLunBs/Ym6LB5Z9jYTR76FiuTmxDTDusOGeTQH+WWjE= github.com/onsi/ginkgo v1.7.0/go.mod h1:lLunBs/Ym6LB5Z9jYTR76FiuTmxDTDusOGeTQH+WWjE= -github.com/onsi/ginkgo v1.8.0 h1:VkHVNpR4iVnU8XQR6DBm8BqYjN7CRzw+xKUbVVbbW9w= github.com/onsi/ginkgo v1.8.0/go.mod h1:lLunBs/Ym6LB5Z9jYTR76FiuTmxDTDusOGeTQH+WWjE= github.com/onsi/ginkgo v1.10.1/go.mod h1:lLunBs/Ym6LB5Z9jYTR76FiuTmxDTDusOGeTQH+WWjE= -github.com/onsi/ginkgo v1.10.3 h1:OoxbjfXVZyod1fmWYhI7SEyaD8B00ynP3T+D5GiyHOY= github.com/onsi/ginkgo v1.10.3/go.mod h1:lLunBs/Ym6LB5Z9jYTR76FiuTmxDTDusOGeTQH+WWjE= github.com/onsi/ginkgo v1.11.0 h1:JAKSXpt1YjtLA7YpPiqO9ss6sNXEsPfSGdwN0UHqzrw= github.com/onsi/ginkgo v1.11.0/go.mod h1:lLunBs/Ym6LB5Z9jYTR76FiuTmxDTDusOGeTQH+WWjE= github.com/onsi/gomega v0.0.0-20170829124025-dcabb60a477c/go.mod h1:C1qb7wdrVGGVU+Z6iS04AVkA3Q65CEZX59MT0QO5uiA= github.com/onsi/gomega v1.4.2/go.mod h1:ex+gbHU/CVuBBDIJjb2X0qEXbFg53c61hWP/1CpauHY= github.com/onsi/gomega v1.4.3/go.mod h1:ex+gbHU/CVuBBDIJjb2X0qEXbFg53c61hWP/1CpauHY= -github.com/onsi/gomega v1.5.0 h1:izbySO9zDPmjJ8rDjLvkA2zJHIo+HkYXHnf7eN7SSyo= github.com/onsi/gomega v1.5.0/go.mod h1:ex+gbHU/CVuBBDIJjb2X0qEXbFg53c61hWP/1CpauHY= +github.com/onsi/gomega v1.7.0 h1:XPnZz8VVBHjVsy1vzJmRwIcSwiUO+JFfrv/xGiigmME= github.com/onsi/gomega v1.7.0/go.mod h1:ex+gbHU/CVuBBDIJjb2X0qEXbFg53c61hWP/1CpauHY= github.com/onsi/gomega v1.7.1 h1:K0jcRCwNQM3vFGh1ppMtDh/+7ApJrjldlX8fA0jDTLQ= github.com/onsi/gomega v1.7.1/go.mod h1:XdKZgCCFLUoM/7CFJVPcG8C1xQ1AJ0vpAezJrB7JYyY= github.com/op/go-logging v0.0.0-20160315200505-970db520ece7/go.mod h1:HzydrMdWErDVzsI23lYNej1Htcns9BCg93Dk0bBINWk= -github.com/opencontainers/go-digest v1.0.0-rc1 h1:WzifXhOVOEOuFYOJAW6aQqW0TooG2iki3E3Ii+WN7gQ= github.com/opencontainers/go-digest v1.0.0-rc1/go.mod h1:cMLVZDEM3+U2I4VmLI6N8jQYUd2OVphdqWwCJHrFt2s= github.com/opencontainers/go-digest v1.0.0 h1:apOUWs51W5PlhuyGyz9FCeeBIOUDA/6nW8Oi/yOhh5U= github.com/opencontainers/go-digest v1.0.0/go.mod h1:0JzlMkj0TRzQZfJkVvzbP0HBR3IKzErnv2BNG4W4MAM= @@ -981,16 +899,13 @@ github.com/opencontainers/image-spec v1.0.1/go.mod h1:BtxoFyWECRxE4U/7sNtV5W15zM github.com/opentracing-contrib/go-grpc v0.0.0-20180928155321-4b5a12d3ff02 h1:0R5mDLI66Qw13qN80TRz85zthQ2nf2+uDyiV23w6c3Q= github.com/opentracing-contrib/go-grpc v0.0.0-20180928155321-4b5a12d3ff02/go.mod h1:JNdpVEzCpXBgIiv4ds+TzhN1hrtxq6ClLrTlT9OQRSc= github.com/opentracing-contrib/go-observer v0.0.0-20170622124052-a52f23424492/go.mod h1:Ngi6UdF0k5OKD5t5wlmGhe/EDKPoUM3BXZSSfIuJbis= -github.com/opentracing-contrib/go-stdlib v0.0.0-20190519235532-cf7a6c988dc9 h1:QsgXACQhd9QJhEmRumbsMQQvBtmdS0mafoVEBplWXEg= github.com/opentracing-contrib/go-stdlib v0.0.0-20190519235532-cf7a6c988dc9/go.mod h1:PLldrQSroqzH70Xl+1DQcGnefIbqsKR7UDaiux3zV+w= github.com/opentracing-contrib/go-stdlib v1.0.0 h1:TBS7YuVotp8myLon4Pv7BtCBzOTo1DeZCld0Z63mW2w= github.com/opentracing-contrib/go-stdlib v1.0.0/go.mod h1:qtI1ogk+2JhVPIXVc6q+NHziSmy2W5GbdQZFUHADCBU= github.com/opentracing/basictracer-go v1.0.0/go.mod h1:QfBfYuafItcjQuMwinw9GhYKwFXS9KnPs5lxoYwgW74= github.com/opentracing/opentracing-go v1.0.2/go.mod h1:UkNAQd3GIcIGf0SeVgPpRdFStlNbqXla1AfSYxPUl2o= github.com/opentracing/opentracing-go v1.0.3-0.20180606204148-bd9c31933947/go.mod h1:UkNAQd3GIcIGf0SeVgPpRdFStlNbqXla1AfSYxPUl2o= -github.com/opentracing/opentracing-go v1.1.0 h1:pWlfV3Bxv7k65HYwkikxat0+s3pV4bsqf19k25Ur8rU= github.com/opentracing/opentracing-go v1.1.0/go.mod h1:UkNAQd3GIcIGf0SeVgPpRdFStlNbqXla1AfSYxPUl2o= -github.com/opentracing/opentracing-go v1.1.1-0.20200124165624-2876d2018785 h1:Oi9nYnU9jbiUVyoRTQfMpSdGzNVmEI+/9fija3lcnjU= github.com/opentracing/opentracing-go v1.1.1-0.20200124165624-2876d2018785/go.mod h1:C+iumr2ni468+1jvcHXLCdqP9uQnoQbdX93F3aWahWU= github.com/opentracing/opentracing-go v1.2.0 h1:uEJPy/1a5RIPAJ0Ov+OIO8OxWu77jEv+1B0VhjKrZUs= github.com/opentracing/opentracing-go v1.2.0/go.mod h1:GxEUsuufX4nBwe+T+Wl9TAgYrxe9dPLANfrWvHYVTgc= @@ -999,7 +914,6 @@ github.com/openzipkin/zipkin-go v0.1.6/go.mod h1:QgAqvLzwWbR/WpD4A3cGpPtJrZXNIiJ github.com/openzipkin/zipkin-go v0.2.1/go.mod h1:NaW6tEwdmWMaCDZzg8sh+IBNOxHMPnhQw8ySjnjRyN4= github.com/openzipkin/zipkin-go v0.2.2/go.mod h1:NaW6tEwdmWMaCDZzg8sh+IBNOxHMPnhQw8ySjnjRyN4= github.com/pact-foundation/pact-go v1.0.4/go.mod h1:uExwJY4kCzNPcHRj+hCR/HBbOOIwwtUjcrb0b5/5kLM= -github.com/pascaldekloe/goe v0.0.0-20180627143212-57f6aae5913c h1:Lgl0gzECD8GnQ5QCWA8o6BtfL6mDH5rQgM4/fX3avOs= github.com/pascaldekloe/goe v0.0.0-20180627143212-57f6aae5913c/go.mod h1:lzWF7FIEvWOWxwDKqyGYQf6ZUaNfKdP144TG7ZOy1lc= github.com/pascaldekloe/goe v0.1.0 h1:cBOtyMzM9HTpWjXfbbunk26uA6nG3a8n06Wieeh0MwY= github.com/pascaldekloe/goe v0.1.0/go.mod h1:lzWF7FIEvWOWxwDKqyGYQf6ZUaNfKdP144TG7ZOy1lc= @@ -1015,7 +929,6 @@ github.com/pierrec/lz4 v2.0.5+incompatible/go.mod h1:pdkljMzZIN41W+lC3N2tnIh5sFi github.com/pierrec/lz4 v2.5.3-0.20200429092203-e876bbd321b3+incompatible h1:wPraQD8xUZ14zNJcKn9cz/+n3r6H2NklrGqq7J+c5qY= github.com/pierrec/lz4 v2.5.3-0.20200429092203-e876bbd321b3+incompatible/go.mod h1:pdkljMzZIN41W+lC3N2tnIh5sFi+IEE17M5jbnwPHcY= github.com/pkg/errors v0.8.0/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0= -github.com/pkg/errors v0.8.1 h1:iURUrRGxPUNPdy5/HRSm+Yj6okJ6UtLINN0Q9M4+h3I= github.com/pkg/errors v0.8.1/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0= github.com/pkg/errors v0.9.1 h1:FEBLx1zS214owpjy7qsBeixbURkuhQAwrK5UwLGTwt4= github.com/pkg/errors v0.9.1/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0= @@ -1027,7 +940,6 @@ github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZN github.com/posener/complete v1.1.1/go.mod h1:em0nMJCgc9GFtwrmVmEMR/ZL6WyhyjMBndrE9hABlRI= github.com/prometheus/alertmanager v0.18.0/go.mod h1:WcxHBl40VSPuOaqWae6l6HpnEOVRIycEJ7i9iYkadEE= github.com/prometheus/alertmanager v0.19.0/go.mod h1:Eyp94Yi/T+kdeb2qvq66E3RGuph5T/jm/RBVh4yz1xo= -github.com/prometheus/alertmanager v0.20.0 h1:PBMNY7oyIvYMBBIag35/C0hO7xn8+35p4V5rNAph5N8= github.com/prometheus/alertmanager v0.20.0/go.mod h1:9g2i48FAyZW6BtbsnvHtMHQXl2aVtrORKwKVCQ+nbrg= github.com/prometheus/alertmanager v0.21.0 h1:qK51JcUR9l/unhawGA9F9B64OCYfcGewhPNprem/Acc= github.com/prometheus/alertmanager v0.21.0/go.mod h1:h7tJ81NA0VLWvWEayi1QltevFkLF3KxmC/malTcT8Go= @@ -1038,13 +950,11 @@ github.com/prometheus/client_golang v0.9.3-0.20190127221311-3c4408c8b829/go.mod github.com/prometheus/client_golang v1.0.0/go.mod h1:db9x61etRT2tGnBNRi70OPL5FsnadC4Ky3P0J6CfImo= github.com/prometheus/client_golang v1.1.0/go.mod h1:I1FGZT9+L76gKKOs5djB6ezCbFQP1xR9D75/vuwEF3g= github.com/prometheus/client_golang v1.2.0/go.mod h1:XMU6Z2MjaRKVu/dC1qupJI9SiNkDYzz3xecMgSW/F+U= -github.com/prometheus/client_golang v1.2.1 h1:JnMpQc6ppsNgw9QPAGF6Dod479itz7lvlsMzzNayLOI= github.com/prometheus/client_golang v1.2.1/go.mod h1:XMU6Z2MjaRKVu/dC1qupJI9SiNkDYzz3xecMgSW/F+U= github.com/prometheus/client_golang v1.3.0/go.mod h1:hJaj2vgQTGQmVCsAACORcieXFeDPbaTKGT+JTgUa3og= github.com/prometheus/client_golang v1.4.0/go.mod h1:e9GMxYsXl05ICDXkRhurwBS4Q3OK1iX/F2sw+iXX5zU= github.com/prometheus/client_golang v1.4.1/go.mod h1:e9GMxYsXl05ICDXkRhurwBS4Q3OK1iX/F2sw+iXX5zU= github.com/prometheus/client_golang v1.6.0/go.mod h1:ZLOG9ck3JLRdB5MgO8f+lLTe83AXG6ro35rLTxvnIl4= -github.com/prometheus/client_golang v1.6.1-0.20200604110148-03575cad4e55 h1:ADHic9K/n5JQDFx/OAQbxkwR+uPJ9oYmN0taBMyYrBo= github.com/prometheus/client_golang v1.6.1-0.20200604110148-03575cad4e55/go.mod h1:25h+Uz1WvXDBZYwqGX8PAb71RBkcjxEVV/R5wGnsq4I= github.com/prometheus/client_golang v1.7.1 h1:NTGy1Ja9pByO+xAeH/qiWnLrKtr3hJPNjaVUwnjpdpA= github.com/prometheus/client_golang v1.7.1/go.mod h1:PY5Wy2awLA44sXw4AOSfFBetzPP4j5+D6mVACh+pe2M= @@ -1052,7 +962,6 @@ github.com/prometheus/client_model v0.0.0-20170216185247-6f3806018612/go.mod h1: github.com/prometheus/client_model v0.0.0-20180712105110-5c3871d89910/go.mod h1:MbSGuTsp3dbXC40dX6PRTWyKYBIrTGTE9sqQNg2J8bo= github.com/prometheus/client_model v0.0.0-20190115171406-56726106282f/go.mod h1:MbSGuTsp3dbXC40dX6PRTWyKYBIrTGTE9sqQNg2J8bo= github.com/prometheus/client_model v0.0.0-20190129233127-fd36f4220a90/go.mod h1:xMI15A0UPsDsEKsMN9yxemIoYk6Tm2C1GtYGdfGttqA= -github.com/prometheus/client_model v0.0.0-20190812154241-14fe0d1b01d4 h1:gQz4mCbXsO+nc9n1hCxHcGA3Zx3Eo+UHZoInFGUIXNM= github.com/prometheus/client_model v0.0.0-20190812154241-14fe0d1b01d4/go.mod h1:xMI15A0UPsDsEKsMN9yxemIoYk6Tm2C1GtYGdfGttqA= github.com/prometheus/client_model v0.1.0/go.mod h1:xMI15A0UPsDsEKsMN9yxemIoYk6Tm2C1GtYGdfGttqA= github.com/prometheus/client_model v0.2.0 h1:uq5h0d+GuxiXLJLNABMgp2qUWDPiLvgCzz2dUR+/W/M= @@ -1062,10 +971,8 @@ github.com/prometheus/common v0.0.0-20181126121408-4724e9255275/go.mod h1:daVV7q github.com/prometheus/common v0.2.0/go.mod h1:TNfzLD0ON7rHzMJeJkieUDPYmFC7Snx/y86RQel1bk4= github.com/prometheus/common v0.4.1/go.mod h1:TNfzLD0ON7rHzMJeJkieUDPYmFC7Snx/y86RQel1bk4= github.com/prometheus/common v0.6.0/go.mod h1:eBmuwkDJBwy6iBfxCBob6t6dR6ENT/y+J+Zk0j9GMYc= -github.com/prometheus/common v0.7.0 h1:L+1lyG48J1zAQXA3RBX/nG/B3gjlHq0zTt2tlbJLyCY= github.com/prometheus/common v0.7.0/go.mod h1:DjGbpBbp5NYNiECxcL/VnbXCCaQpKd3tt26CguLLsqA= github.com/prometheus/common v0.8.0/go.mod h1:PC/OgXc+UN7B4ALwvn1yzVZmVwvhXp5JsbBv6wSv6i0= -github.com/prometheus/common v0.9.1 h1:KOMtN28tlbam3/7ZKEYKHhKoJZYYj3gMH4uc62x7X7U= github.com/prometheus/common v0.9.1/go.mod h1:yhUN8i9wzaXS3w1O07YhxHEBxD+W35wd8bs7vj7HSQ4= github.com/prometheus/common v0.10.0 h1:RyRA7RzGXQZiW+tGMr7sxa85G1z0yOpM1qq5c8lNawc= github.com/prometheus/common v0.10.0/go.mod h1:Tlit/dnDKsSWFlCLTWaA1cyBgKHSMdTB80sz/V91rCo= @@ -1077,24 +984,18 @@ github.com/prometheus/procfs v0.0.0-20181204211112-1dc9a6cbc91a/go.mod h1:c3At6R github.com/prometheus/procfs v0.0.0-20190117184657-bf6a532e95b1/go.mod h1:c3At6R/oaqEKCNdg8wHV1ftS6bRYblBhIjjI8uT2IGk= github.com/prometheus/procfs v0.0.0-20190425082905-87a4384529e0/go.mod h1:TjEm7ze935MbeOT/UhFTIMYKhuLP4wbCsTZCD3I8kEA= github.com/prometheus/procfs v0.0.2/go.mod h1:TjEm7ze935MbeOT/UhFTIMYKhuLP4wbCsTZCD3I8kEA= -github.com/prometheus/procfs v0.0.3 h1:CTwfnzjQ+8dS6MhHHu4YswVAD99sL2wjPqP+VkURmKE= github.com/prometheus/procfs v0.0.3/go.mod h1:4A/X28fw3Fc593LaREMrKMqOKvUAntwMDaekg4FpcdQ= github.com/prometheus/procfs v0.0.5/go.mod h1:4A/X28fw3Fc593LaREMrKMqOKvUAntwMDaekg4FpcdQ= -github.com/prometheus/procfs v0.0.6 h1:0qbH+Yqu/cj1ViVLvEWCP6qMQ4efWUj6bQqOEA0V0U4= github.com/prometheus/procfs v0.0.6/go.mod h1:7Qr8sr6344vo1JqZ6HhLceV9o3AJ1Ff+GxbHq6oeK9A= -github.com/prometheus/procfs v0.0.8 h1:+fpWZdT24pJBiqJdAwYBjPSk+5YmQzYNPYzQsdzLkt8= github.com/prometheus/procfs v0.0.8/go.mod h1:7Qr8sr6344vo1JqZ6HhLceV9o3AJ1Ff+GxbHq6oeK9A= -github.com/prometheus/procfs v0.0.11 h1:DhHlBtkHWPYi8O2y31JkK0TF+DGM+51OopZjH/Ia5qI= github.com/prometheus/procfs v0.0.11/go.mod h1:lV6e/gmhEcM9IjHGsFOCxxuZ+z1YqCvr4OA4YeYWdaU= github.com/prometheus/procfs v0.1.3 h1:F0+tqvhOksq22sc6iCHF5WGlWjdwj92p0udFh1VFBS8= github.com/prometheus/procfs v0.1.3/go.mod h1:lV6e/gmhEcM9IjHGsFOCxxuZ+z1YqCvr4OA4YeYWdaU= github.com/prometheus/prometheus v0.0.0-20180315085919-58e2a31db8de/go.mod h1:oAIUtOny2rjMX0OWN5vPR5/q/twIROJvdqnQKDdil/s= github.com/prometheus/prometheus v0.0.0-20190818123050-43acd0e2e93f/go.mod h1:rMTlmxGCvukf2KMu3fClMDKLLoJ5hl61MhcJ7xKakf0= github.com/prometheus/prometheus v1.8.2-0.20200107122003-4708915ac6ef/go.mod h1:7U90zPoLkWjEIQcy/rweQla82OCTUzxVHE51G3OhJbI= -github.com/prometheus/prometheus v1.8.2-0.20200213233353-b90be6f32a33 h1:HBYrMJj5iosUjUkAK9L5GO+5eEQXbcrzdjkqY9HV5W4= github.com/prometheus/prometheus v1.8.2-0.20200213233353-b90be6f32a33/go.mod h1:fkIPPkuZnkXyopYHmXPxf9rgiPkVgZCN8w9o8+UgBlY= github.com/prometheus/prometheus v1.8.2-0.20200619100132-74207c04655e/go.mod h1:QV6T0PPQi5UFmqcLBJw3JiyIR8r1O7KEv9qlVw4VV40= -github.com/prometheus/prometheus v1.8.2-0.20200722151933-4a8531a64b32 h1:GcJMaFu1uu6rSueToTRZuVS3AiORbFtLEDMUfp4GA9Q= github.com/prometheus/prometheus v1.8.2-0.20200722151933-4a8531a64b32/go.mod h1:+/y4DzJ62qmhy0o/H4PtXegRXw+80E8RVRHhLbv+bkM= github.com/prometheus/prometheus v1.8.2-0.20200727090838-6f296594a852 h1:aRBuOcI/bN5f/UqmIGn8CajY6W0mPBEajK8q+SFgNZY= github.com/prometheus/prometheus v1.8.2-0.20200727090838-6f296594a852/go.mod h1:yzkxU+U4d5ZgVH/ywg/zONKN91UPLKsKCYkcyGOBH18= @@ -1107,16 +1008,13 @@ github.com/rogpeppe/fastuuid v1.2.0/go.mod h1:jVj6XXZzXRy/MSR5jhDC/2q6DgLz+nrA6L github.com/rogpeppe/go-internal v1.1.0/go.mod h1:M8bDsm7K2OlrFYOpmOWEs/qY81heoFRclV5y23lUDJ4= github.com/rogpeppe/go-internal v1.2.2/go.mod h1:M8bDsm7K2OlrFYOpmOWEs/qY81heoFRclV5y23lUDJ4= github.com/rogpeppe/go-internal v1.3.0/go.mod h1:M8bDsm7K2OlrFYOpmOWEs/qY81heoFRclV5y23lUDJ4= -github.com/rs/cors v1.6.0 h1:G9tHG9lebljV9mfp9SNPDL36nCDxmo3zTlAf1YgvzmI= github.com/rs/cors v1.6.0/go.mod h1:gFx+x8UowdsKA9AchylcLynDq+nNFfI8FkUZdN/jGCU= github.com/rs/cors v1.7.0 h1:+88SsELBHx5r+hZ8TCkggzSstaWNbDvThkVK8H6f9ik= github.com/rs/cors v1.7.0/go.mod h1:gFx+x8UowdsKA9AchylcLynDq+nNFfI8FkUZdN/jGCU= github.com/russross/blackfriday/v2 v2.0.1/go.mod h1:+Rmxgy9KzJVeS9/2gXHxylqXiyQDYRxCVz55jmeOWTM= github.com/ryanuber/columnize v0.0.0-20160712163229-9b3edd62028f/go.mod h1:sm1tb6uqfes/u+d4ooFouqFdy9/2g9QGwK3SQygK0Ts= github.com/ryanuber/columnize v2.1.0+incompatible/go.mod h1:sm1tb6uqfes/u+d4ooFouqFdy9/2g9QGwK3SQygK0Ts= -github.com/samuel/go-zookeeper v0.0.0-20190810000440-0ceca61e4d75 h1:cA+Ubq9qEVIQhIWvP2kNuSZ2CmnfBJFSRq+kO1pu2cc= github.com/samuel/go-zookeeper v0.0.0-20190810000440-0ceca61e4d75/go.mod h1:gi+0XIa01GRL2eRQVjQkKGqKF3SF9vZR/HnPullcV2E= -github.com/samuel/go-zookeeper v0.0.0-20190923202752-2cc03de413da h1:p3Vo3i64TCLY7gIfzeQaUJ+kppEO5WQG3cL8iE8tGHU= github.com/samuel/go-zookeeper v0.0.0-20190923202752-2cc03de413da/go.mod h1:gi+0XIa01GRL2eRQVjQkKGqKF3SF9vZR/HnPullcV2E= github.com/samuel/go-zookeeper v0.0.0-20200724154423-2164a8ac840e h1:CGjiMQ0wMH4wtNWrlj6kiTbkPt2F3rbYnhGX6TWLfco= github.com/samuel/go-zookeeper v0.0.0-20200724154423-2164a8ac840e/go.mod h1:gi+0XIa01GRL2eRQVjQkKGqKF3SF9vZR/HnPullcV2E= @@ -1125,13 +1023,11 @@ github.com/satori/go.uuid v1.2.0 h1:0uYX9dsZ2yD7q2RtLRtPSdGDWzjeM3TbMJP9utgA0ww= github.com/satori/go.uuid v1.2.0/go.mod h1:dA0hQrYB0VpLJoorglMZABFdXlWrHn1NEOzdhQKdks0= github.com/sean-/seed v0.0.0-20170313163322-e2103e2c3529 h1:nn5Wsu0esKSJiIVhscUtVbo7ada43DJhG55ua/hjS5I= github.com/sean-/seed v0.0.0-20170313163322-e2103e2c3529/go.mod h1:DxrIzT+xaE7yg65j358z/aeFdxmN0P9QXhEzd20vsDc= -github.com/segmentio/fasthash v0.0.0-20180216231524-a72b379d632e h1:uO75wNGioszjmIzcY/tvdDYKRLVvzggtAmmJkn9j4GQ= github.com/segmentio/fasthash v0.0.0-20180216231524-a72b379d632e/go.mod h1:tm/wZFQ8e24NYaBGIlnO2WGCAi67re4HHuOm0sftE/M= github.com/segmentio/fasthash v1.0.2 h1:86fGDl2hB+iSHYlccB/FP9qRGvLNuH/fhEEFn6gnQUs= github.com/segmentio/fasthash v1.0.2/go.mod h1:waKX8l2N8yckOgmSsXJi7x1ZfdKZ4x7KRMzBtS3oedY= github.com/segmentio/kafka-go v0.1.0/go.mod h1:X6itGqS9L4jDletMsxZ7Dz+JFWxM6JHfPOCvTvk+EJo= github.com/segmentio/kafka-go v0.2.0/go.mod h1:X6itGqS9L4jDletMsxZ7Dz+JFWxM6JHfPOCvTvk+EJo= -github.com/sercand/kuberesolver v2.1.0+incompatible h1:iJ1oCzPQ/aacsbCWLfJW1hPKkHMvCEgNSA9kvWcb9MY= github.com/sercand/kuberesolver v2.1.0+incompatible/go.mod h1:lWF3GL0xptCB/vCiJPl/ZshwPsX/n4Y7u0CW9E7aQIQ= github.com/sercand/kuberesolver v2.4.0+incompatible h1:WE2OlRf6wjLxHwNkkFLQGaZcVLEXjMjBPjjEU5vksH8= github.com/sercand/kuberesolver v2.4.0+incompatible/go.mod h1:lWF3GL0xptCB/vCiJPl/ZshwPsX/n4Y7u0CW9E7aQIQ= @@ -1142,7 +1038,6 @@ github.com/shurcooL/httpfs v0.0.0-20190707220628-8d4bc4ba7749 h1:bUGsEnyNbVPw06B github.com/shurcooL/httpfs v0.0.0-20190707220628-8d4bc4ba7749/go.mod h1:ZY1cvUeJuFPAdZ/B6v7RHavJWZn2YPVFQ1OSXhCGOkg= github.com/shurcooL/sanitized_anchor_name v1.0.0/go.mod h1:1NzhyTcUVG4SuEtjjoZeVRXNmyL/1OwPU0+IJeTBvfc= github.com/shurcooL/vfsgen v0.0.0-20180825020608-02ddb050ef6b/go.mod h1:TrYk7fJVaAttu97ZZKrO9UbRa8izdowaMIZcxYMbVaw= -github.com/shurcooL/vfsgen v0.0.0-20181202132449-6a9ea43bcacd h1:ug7PpSOB5RBPK1Kg6qskGBoP3Vnj/aNYFTznWvlkGo0= github.com/shurcooL/vfsgen v0.0.0-20181202132449-6a9ea43bcacd/go.mod h1:TrYk7fJVaAttu97ZZKrO9UbRa8izdowaMIZcxYMbVaw= github.com/shurcooL/vfsgen v0.0.0-20200627165143-92b8a710ab6c h1:XLPw6rny9Vrrvrzhw8pNLrC2+x/kH0a/3gOx5xWDa6Y= github.com/shurcooL/vfsgen v0.0.0-20200627165143-92b8a710ab6c/go.mod h1:TrYk7fJVaAttu97ZZKrO9UbRa8izdowaMIZcxYMbVaw= @@ -1151,10 +1046,10 @@ github.com/sirupsen/logrus v1.0.5/go.mod h1:pMByvHTf9Beacp5x1UXfOR9xyW/9antXMhjM github.com/sirupsen/logrus v1.2.0/go.mod h1:LxeOpSwHxABJmUn/MG1IvRgCAasNZTLOkJPxbbu5VWo= github.com/sirupsen/logrus v1.4.0/go.mod h1:LxeOpSwHxABJmUn/MG1IvRgCAasNZTLOkJPxbbu5VWo= github.com/sirupsen/logrus v1.4.1/go.mod h1:ni0Sbl8bgC9z8RoU9G6nDWqqs/fq4eDPysMBDgk/93Q= -github.com/sirupsen/logrus v1.4.2 h1:SPIRibHv4MatM3XXNO2BJeFLZwZ2LvZgfQ5+UNI2im4= github.com/sirupsen/logrus v1.4.2/go.mod h1:tLMulIdttU9McNUspp0xgXVQah82FyeX6MwdIuYE2rE= github.com/sirupsen/logrus v1.5.0 h1:1N5EYkVAPEywqZRJd7cwnRtCb6xJx7NH3T3WUTF980Q= github.com/sirupsen/logrus v1.5.0/go.mod h1:+F7Ogzej0PZc/94MaYx/nvG9jOFMD2osvC3s+Squfpo= +github.com/smartystreets/assertions v0.0.0-20180927180507-b2de0cb4f26d h1:zE9ykElWQ6/NYmHa3jpm/yHnI4xSofP+UP6SpjHcSeM= github.com/smartystreets/assertions v0.0.0-20180927180507-b2de0cb4f26d/go.mod h1:OnSkiWE9lh6wB0YB77sQom3nweQdgAjqCqsofrRNTgc= github.com/smartystreets/assertions v1.0.1 h1:voD4ITNjPL5jjBfgR/r8fPIIBrliWrWHeiJApdr3r4w= github.com/smartystreets/assertions v1.0.1/go.mod h1:kHHU4qYBaI3q23Pp3VPrmWhuIUrLW/7eUrw0BU5VaoM= @@ -1165,6 +1060,7 @@ github.com/soheilhy/cmux v0.1.4 h1:0HKaf1o97UwFjHH9o5XsHUOF+tqmdA7KEzXLpiyaw0E= github.com/soheilhy/cmux v0.1.4/go.mod h1:IM3LyeVVIOuxMH7sFAkER9+bJ4dT7Ms6E4xg4kGIyLM= github.com/sony/gobreaker v0.4.1/go.mod h1:ZKptC7FHNvhBz7dN2LGjPVBz2sZJmc0/PkyDJOjmxWY= github.com/soundcloud/go-runit v0.0.0-20150630195641-06ad41a06c4a/go.mod h1:LeFCbQYJ3KJlPs/FvPz2dy1tkpxyeNESVyCNNzRXFR0= +github.com/spaolacci/murmur3 v0.0.0-20180118202830-f09979ecbc72 h1:qLC7fQah7D6K1B0ujays3HV9gkFtllcxhzImRR7ArPQ= github.com/spaolacci/murmur3 v0.0.0-20180118202830-f09979ecbc72/go.mod h1:JwIasOWyU6f++ZhiEuf87xNszmSA2myDM2Kzu9HwQUA= github.com/spaolacci/murmur3 v1.1.0 h1:7c1g84S4BPRrfL5Xrdp6fOJ206sU9y293DDHaoy0bLI= github.com/spaolacci/murmur3 v1.1.0/go.mod h1:JwIasOWyU6f++ZhiEuf87xNszmSA2myDM2Kzu9HwQUA= @@ -1174,7 +1070,6 @@ github.com/spf13/cast v1.3.0/go.mod h1:Qx5cxh0v+4UWYiBimWS+eyWzqEqokIECu5etghLkU github.com/spf13/cobra v0.0.3/go.mod h1:1l0Ry5zgKvJasoi3XT1TypsSe7PqH0Sj9dhYf7v3XqQ= github.com/spf13/pflag v0.0.0-20170130214245-9ff6c6923cff/go.mod h1:DYY7MBk1bdzusC3SYhjObp+wFpr4gzcvqqNjLnInEg4= github.com/spf13/pflag v1.0.1/go.mod h1:DYY7MBk1bdzusC3SYhjObp+wFpr4gzcvqqNjLnInEg4= -github.com/spf13/pflag v1.0.3 h1:zPAT6CGy6wXeQ7NtTnaTerfKOsV6V6F8agHXFiazDkg= github.com/spf13/pflag v1.0.3/go.mod h1:DYY7MBk1bdzusC3SYhjObp+wFpr4gzcvqqNjLnInEg4= github.com/spf13/pflag v1.0.5 h1:iy+VFUOCP1a+8yFto/drg2CJ5u0yRoB7fZw3DKv/JXA= github.com/spf13/pflag v1.0.5/go.mod h1:McXfInJRrz4CZXVZOBLb0bTZqETkiAhM9Iw0y3An2Bg= @@ -1188,9 +1083,7 @@ github.com/stretchr/objx v0.2.0/go.mod h1:qt09Ya8vawLte6SNmTgCsAVtYtaKzEcn8ATUoH github.com/stretchr/testify v0.0.0-20151208002404-e3a8ff8ce365/go.mod h1:a8OnRcib4nhh0OaRAV+Yts87kKdq0PP7pXfy6kDkUVs= github.com/stretchr/testify v1.2.0/go.mod h1:a8OnRcib4nhh0OaRAV+Yts87kKdq0PP7pXfy6kDkUVs= github.com/stretchr/testify v1.2.2/go.mod h1:a8OnRcib4nhh0OaRAV+Yts87kKdq0PP7pXfy6kDkUVs= -github.com/stretchr/testify v1.3.0 h1:TivCn/peBQ7UY8ooIcPgZFpTNSz0Q2U6UrFlUfqbe0Q= github.com/stretchr/testify v1.3.0/go.mod h1:M5WIy9Dh21IEIfnGCwXGc5bZfKNJtfHm1UVUgZn+9EI= -github.com/stretchr/testify v1.4.0 h1:2E4SXV/wtOkTonXsotYi4li6zVWxYlZuYNCXe9XRJyk= github.com/stretchr/testify v1.4.0/go.mod h1:j7eGeouHqKxXV5pUuKE4zz7dFj8WfuZ+81PSLYec5m4= github.com/stretchr/testify v1.5.1 h1:nOGnQDM7FYENwehXlg/kFVnos3rEvtKTjRvOWSzb6H4= github.com/stretchr/testify v1.5.1/go.mod h1:5W2xD1RspED5o8YsWQXVCued0rvSQ+mT+I5cxcmMvtA= @@ -1201,6 +1094,7 @@ github.com/tidwall/pretty v0.0.0-20180105212114-65a9db5fad51/go.mod h1:XNkn88O1C github.com/tidwall/pretty v1.0.0 h1:HsD+QiTn7sK6flMKIvNmpqz1qrpP3Ps6jOKIKMooyg4= github.com/tidwall/pretty v1.0.0/go.mod h1:XNkn88O1ChpSDQmQeStsy+sBenx6DDtFZJxhVysOjyk= github.com/tinylib/msgp v1.0.2/go.mod h1:+d+yLhGm8mzTaHzB+wgMYrodPfmZrzkirds8fDWklFE= +github.com/tmc/grpc-websocket-proxy v0.0.0-20170815181823-89b8d40f7ca8 h1:ndzgwNDnKIqyCvHTXaCqh9KlOWKvBry6nuXMJmonVsE= github.com/tmc/grpc-websocket-proxy v0.0.0-20170815181823-89b8d40f7ca8/go.mod h1:ncp9v5uamzpCO7NfCPTXjqaC+bZgJeR0sMTm6dMHP7U= github.com/tmc/grpc-websocket-proxy v0.0.0-20190109142713-0ad062ec5ee5 h1:LnC5Kc/wtumK+WB441p7ynQJzVuNRJiqddSIE3IlSEQ= github.com/tmc/grpc-websocket-proxy v0.0.0-20190109142713-0ad062ec5ee5/go.mod h1:ncp9v5uamzpCO7NfCPTXjqaC+bZgJeR0sMTm6dMHP7U= @@ -1208,18 +1102,14 @@ github.com/tonistiigi/fifo v0.0.0-20190226154929-a9fb20d87448 h1:hbyjqt5UnyKeOT3 github.com/tonistiigi/fifo v0.0.0-20190226154929-a9fb20d87448/go.mod h1:Q5IRRDY+cjIaiOjTAnXN5LKQV5MPqVx5ofQn85Jy5Yw= github.com/tv42/httpunix v0.0.0-20150427012821-b75d8614f926/go.mod h1:9ESjWnEqriFuLhtthL60Sar/7RFoluCcXsuvEwTV5KM= github.com/uber/jaeger-client-go v2.15.0+incompatible/go.mod h1:WVhlPFC8FDjOFMMWRy2pZqQJSXxYSwNYOkTr/Z6d3Kk= -github.com/uber/jaeger-client-go v2.20.1+incompatible h1:HgqpYBng0n7tLJIlyT4kPCIv5XgCsF+kai1NnnrJzEU= github.com/uber/jaeger-client-go v2.20.1+incompatible/go.mod h1:WVhlPFC8FDjOFMMWRy2pZqQJSXxYSwNYOkTr/Z6d3Kk= -github.com/uber/jaeger-client-go v2.23.1+incompatible h1:uArBYHQR0HqLFFAypI7RsWTzPSj/bDpmZZuQjMLSg1A= github.com/uber/jaeger-client-go v2.23.1+incompatible/go.mod h1:WVhlPFC8FDjOFMMWRy2pZqQJSXxYSwNYOkTr/Z6d3Kk= -github.com/uber/jaeger-client-go v2.24.0+incompatible h1:CGchgJcHsDd2jWnaL4XngByMrXoGHh3n8oCqAKx0uMo= github.com/uber/jaeger-client-go v2.24.0+incompatible/go.mod h1:WVhlPFC8FDjOFMMWRy2pZqQJSXxYSwNYOkTr/Z6d3Kk= github.com/uber/jaeger-client-go v2.25.0+incompatible h1:IxcNZ7WRY1Y3G4poYlx24szfsn/3LvK9QHCq9oQw8+U= github.com/uber/jaeger-client-go v2.25.0+incompatible/go.mod h1:WVhlPFC8FDjOFMMWRy2pZqQJSXxYSwNYOkTr/Z6d3Kk= github.com/uber/jaeger-lib v1.5.1-0.20181102163054-1fc5c315e03c/go.mod h1:ComeNDZlWwrWnDv8aPp0Ba6+uUTzImX/AauajbLI56U= github.com/uber/jaeger-lib v2.2.0+incompatible h1:MxZXOiR2JuoANZ3J6DE/U0kSFv/eJ/GfSYVCjK7dyaw= github.com/uber/jaeger-lib v2.2.0+incompatible/go.mod h1:ComeNDZlWwrWnDv8aPp0Ba6+uUTzImX/AauajbLI56U= -github.com/ugorji/go v1.1.4 h1:j4s+tAvLfL3bZyefP2SEWmhBzmuIlH/eqNuPdFPgngw= github.com/ugorji/go v1.1.4/go.mod h1:uQMGLiO92mf5W77hV/PUCpI3pbzQx3CRekS0kk+RGrc= github.com/ugorji/go v1.1.7 h1:/68gy2h+1mWMrwZFeD1kQialdSzAb432dtpeJ42ovdo= github.com/ugorji/go v1.1.7/go.mod h1:kZn38zHttfInRq0xu/PH0az30d+z6vm202qpg1oXVMw= @@ -1250,7 +1140,6 @@ go.elastic.co/apm v1.5.0/go.mod h1:OdB9sPtM6Vt7oz3VXt7+KR96i9li74qrxBGHTQygFvk= go.elastic.co/apm/module/apmhttp v1.5.0/go.mod h1:1FbmNuyD3ddauwzgVwFB0fqY6KbZt3JkV187tGCYYhY= go.elastic.co/apm/module/apmot v1.5.0/go.mod h1:d2KYwhJParTpyw2WnTNy8geNlHKKFX+4oK3YLlsesWE= go.elastic.co/fastjson v1.0.0/go.mod h1:PmeUOMMtLHQr9ZS9J9owrAVg0FkaZDRZJEFTTGHtchs= -go.etcd.io/bbolt v1.3.3 h1:MUGmc65QhB3pIlaQ5bB4LwqSj6GIonVJXpZiaKNyaKk= go.etcd.io/bbolt v1.3.3/go.mod h1:IbVyRI1SCnLcuJnV2u8VeU0CEYM7e686BmAb1XKL+uU= go.etcd.io/bbolt v1.3.5-0.20200615073812-232d8fc87f50 h1:ASw9n1EHMftwnP3Az4XW6e308+gNsrHzmdhd0Olz9Hs= go.etcd.io/bbolt v1.3.5-0.20200615073812-232d8fc87f50/go.mod h1:G5EMThwa9y8QZGBClrRx5EY+Yw9kAhnjy3bSjsnlVTQ= @@ -1260,7 +1149,6 @@ go.etcd.io/etcd v0.5.0-alpha.5.0.20200520232829-54ba9589114f h1:pBCD+Z7cy5WPTq+R go.etcd.io/etcd v0.5.0-alpha.5.0.20200520232829-54ba9589114f/go.mod h1:skWido08r9w6Lq/w70DO5XYIKMu4QFu1+4VsqLQuJy8= go.mongodb.org/mongo-driver v1.0.3/go.mod h1:u7ryQJ+DOzQmeO7zB6MHyr8jkEQvC8vH7qLUO4lqsUM= go.mongodb.org/mongo-driver v1.0.4/go.mod h1:u7ryQJ+DOzQmeO7zB6MHyr8jkEQvC8vH7qLUO4lqsUM= -go.mongodb.org/mongo-driver v1.1.0 h1:aeOqSrhl9eDRAap/3T5pCfMBEBxZ0vuXBP+RMtp2KX8= go.mongodb.org/mongo-driver v1.1.0/go.mod h1:u7ryQJ+DOzQmeO7zB6MHyr8jkEQvC8vH7qLUO4lqsUM= go.mongodb.org/mongo-driver v1.1.1/go.mod h1:u7ryQJ+DOzQmeO7zB6MHyr8jkEQvC8vH7qLUO4lqsUM= go.mongodb.org/mongo-driver v1.1.2/go.mod h1:u7ryQJ+DOzQmeO7zB6MHyr8jkEQvC8vH7qLUO4lqsUM= @@ -1271,14 +1159,11 @@ go.opencensus.io v0.20.1/go.mod h1:6WKK9ahsWS3RSO+PY9ZHZUfv2irvY6gN279GOPZjmmk= go.opencensus.io v0.20.2/go.mod h1:6WKK9ahsWS3RSO+PY9ZHZUfv2irvY6gN279GOPZjmmk= go.opencensus.io v0.21.0/go.mod h1:mSImk1erAIZhrmZN+AvHh14ztQfjbGwt4TtuofqLduU= go.opencensus.io v0.22.0/go.mod h1:+kGneAE2xo2IficOXnaByMWTGM9T73dGwxeWcUqIpI8= -go.opencensus.io v0.22.2 h1:75k/FF0Q2YM8QYo07VPddOLBslDt1MZOdEslOHvmzAs= go.opencensus.io v0.22.2/go.mod h1:yxeiOL68Rb0Xd1ddK5vPZ/oVn4vY4Ynel7k9FzqtOIw= go.opencensus.io v0.22.3 h1:8sGtKOrtQqkN1bp2AtX+misvLIlOmsEsNd+9NIcPEm8= go.opencensus.io v0.22.3/go.mod h1:yxeiOL68Rb0Xd1ddK5vPZ/oVn4vY4Ynel7k9FzqtOIw= go.uber.org/atomic v1.3.2/go.mod h1:gD2HeocX3+yG+ygLZcrzQJaqmWj9AIm7n08wl/qW/PE= -go.uber.org/atomic v1.4.0 h1:cxzIVoETapQEqDhQu3QfnvXAV4AlzcvUCxkVUFw3+EU= go.uber.org/atomic v1.4.0/go.mod h1:gD2HeocX3+yG+ygLZcrzQJaqmWj9AIm7n08wl/qW/PE= -go.uber.org/atomic v1.5.0 h1:OI5t8sDa1Or+q8AeE+yKeB/SDYioSHAgcVljj9JIETY= go.uber.org/atomic v1.5.0/go.mod h1:sABNBOSYdrvTF6hTgEIbc7YasKWGhgEQZyfxyTvoXHQ= go.uber.org/atomic v1.5.1/go.mod h1:sABNBOSYdrvTF6hTgEIbc7YasKWGhgEQZyfxyTvoXHQ= go.uber.org/atomic v1.6.0 h1:Ezj3JGmsOnG1MoRWQkPBsKLe9DwWD9QeXzTRzzldNVk= @@ -1286,7 +1171,6 @@ go.uber.org/atomic v1.6.0/go.mod h1:sABNBOSYdrvTF6hTgEIbc7YasKWGhgEQZyfxyTvoXHQ= go.uber.org/automaxprocs v1.2.0/go.mod h1:YfO3fm683kQpzETxlTGZhGIVmXAhaw3gxeBADbpZtnU= go.uber.org/goleak v1.0.0 h1:qsup4IcBdlmsnGfqyLl4Ntn3C2XCCuKAE7DwHpScyUo= go.uber.org/goleak v1.0.0/go.mod h1:8a7PlsEVH3e/a/GLqe5IIrQx6GzcnRmZEufDUTk4A7A= -go.uber.org/multierr v1.1.0 h1:HoEmRHQPVSqub6w2z2d2EOVs2fjyFRGyofhKuyDq0QI= go.uber.org/multierr v1.1.0/go.mod h1:wR5kodmAFQ0UK8QlbwjlSNy0Z68gJhDJUG5sjR94q/0= go.uber.org/multierr v1.3.0/go.mod h1:VgVr7evmIr6uPjLBxg28wmKNXyqE9akIJ5XnfpiKl+4= go.uber.org/multierr v1.4.0/go.mod h1:VgVr7evmIr6uPjLBxg28wmKNXyqE9akIJ5XnfpiKl+4= @@ -1295,7 +1179,6 @@ go.uber.org/multierr v1.5.0/go.mod h1:FeouvMocqHpRaaGuG9EjoKcStLC43Zu/fmqdUMPcKY go.uber.org/tools v0.0.0-20190618225709-2cfd321de3ee h1:0mgffUl7nfd+FpvXMVz4IDEaUSmT1ysygQC7qYo7sG4= go.uber.org/tools v0.0.0-20190618225709-2cfd321de3ee/go.mod h1:vJERXedbb3MVM5f9Ejo0C68/HhF8uaILCdgjnY+goOA= go.uber.org/zap v1.9.1/go.mod h1:vwi/ZaCAaUcBkycHslxD9B2zi4UTXhF60s6SWpuDF0Q= -go.uber.org/zap v1.10.0 h1:ORx85nbTijNz8ljznvCMR1ZBIPKFn3jQrag10X2AsuM= go.uber.org/zap v1.10.0/go.mod h1:vwi/ZaCAaUcBkycHslxD9B2zi4UTXhF60s6SWpuDF0Q= go.uber.org/zap v1.13.0/go.mod h1:zwrFLgMcdUuIBviXEYEH1YKNaOBnKXsx2IPda5bBwHM= go.uber.org/zap v1.14.1 h1:nYDKopTbvAPq/NrUVZwT15y2lpROBiLLyoRTbXOYWOo= @@ -1318,12 +1201,10 @@ golang.org/x/crypto v0.0.0-20190617133340-57b3e21c3d56/go.mod h1:yigFU9vqHzYiE8U golang.org/x/crypto v0.0.0-20190701094942-4def268fd1a4/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= golang.org/x/crypto v0.0.0-20190923035154-9ee001bba392/go.mod h1:/lpIB1dKB+9EgE3H3cr1v9wB50oz8l4C4h62xy7jSTY= golang.org/x/crypto v0.0.0-20191011191535-87dc89f01550/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= -golang.org/x/crypto v0.0.0-20191112222119-e1110fd1c708 h1:pXVtWnwHkrWD9ru3sDxY/qFK/bfc0egRovX91EjWjf4= golang.org/x/crypto v0.0.0-20191112222119-e1110fd1c708/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto= golang.org/x/crypto v0.0.0-20191202143827-86a70503ff7e/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto= golang.org/x/crypto v0.0.0-20191206172530-e9b2fee46413/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto= golang.org/x/crypto v0.0.0-20200220183623-bac4c82f6975/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto= -golang.org/x/crypto v0.0.0-20200422194213-44a606286825 h1:dSChiwOTvzwbHFTMq2l6uRardHH7/E6SqEkqccinS/o= golang.org/x/crypto v0.0.0-20200422194213-44a606286825/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto= golang.org/x/crypto v0.0.0-20200622213623-75b288015ac9 h1:psW17arqaxU48Z5kZ0CQnkZWQJsqcURM6tKiBApRjXI= golang.org/x/crypto v0.0.0-20200622213623-75b288015ac9/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto= @@ -1335,7 +1216,6 @@ golang.org/x/exp v0.0.0-20190306152737-a1d7652674e8/go.mod h1:CJ0aWSM057203Lf6IL golang.org/x/exp v0.0.0-20190510132918-efd6b22b2522/go.mod h1:ZjyILWgesfNpC6sMxTJOJm9Kp84zZh5NQWvqDGG3Qr8= golang.org/x/exp v0.0.0-20190829153037-c13cbed26979/go.mod h1:86+5VVa7VpoJ4kLfm080zCjGlMRFzhUhsZKEZO7MGek= golang.org/x/exp v0.0.0-20191029154019-8994fa331a53/go.mod h1:JXzH8nQsPlswgeRAPE3MuO9GYsAcnJvJ4vnMwN/5qkY= -golang.org/x/exp v0.0.0-20191030013958-a1ab85dbe136 h1:A1gGSx58LAGVHUUsOf7IiR0u8Xb6W51gRwfDBhkdcaw= golang.org/x/exp v0.0.0-20191030013958-a1ab85dbe136/go.mod h1:JXzH8nQsPlswgeRAPE3MuO9GYsAcnJvJ4vnMwN/5qkY= golang.org/x/exp v0.0.0-20191129062945-2f5052295587/go.mod h1:2RIsYlXP63K8oxa1u096TMicItID8zy7Y6sNkU49FU4= golang.org/x/exp v0.0.0-20191227195350-da58074b4299/go.mod h1:2RIsYlXP63K8oxa1u096TMicItID8zy7Y6sNkU49FU4= @@ -1351,7 +1231,6 @@ golang.org/x/lint v0.0.0-20190301231843-5614ed5bae6f/go.mod h1:UVdnD1Gm6xHRNCYTk golang.org/x/lint v0.0.0-20190313153728-d0100b6bd8b3/go.mod h1:6SW0HCj/g11FgYtHlgUYUwCkIfeOF89ocIRzGO/8vkc= golang.org/x/lint v0.0.0-20190409202823-959b441ac422/go.mod h1:6SW0HCj/g11FgYtHlgUYUwCkIfeOF89ocIRzGO/8vkc= golang.org/x/lint v0.0.0-20190909230951-414d861bb4ac/go.mod h1:6SW0HCj/g11FgYtHlgUYUwCkIfeOF89ocIRzGO/8vkc= -golang.org/x/lint v0.0.0-20190930215403-16217165b5de h1:5hukYrvBGR8/eNkX5mdUezrA6JiaEZDtJb9Ei+1LlBs= golang.org/x/lint v0.0.0-20190930215403-16217165b5de/go.mod h1:6SW0HCj/g11FgYtHlgUYUwCkIfeOF89ocIRzGO/8vkc= golang.org/x/lint v0.0.0-20191125180803-fdd1cda4f05f/go.mod h1:5qLYkcX4OjUUV8bRuDixDT3tpyyb+LUpUlRWLxfhWrs= golang.org/x/lint v0.0.0-20200130185559-910be7a94367/go.mod h1:3xt1FjdF8hUf6vQPIChWIBhFzV8gjjsPE/fR3IyQdNY= @@ -1363,7 +1242,6 @@ golang.org/x/mod v0.0.0-20190513183733-4bf6d317e70e/go.mod h1:mXi4GBBbnImb6dmsKG golang.org/x/mod v0.1.0/go.mod h1:0QHyrYULN0/3qlju5TqG8bIK38QM8yzMo5ekMj3DlcY= golang.org/x/mod v0.1.1-0.20191105210325-c90efee705ee/go.mod h1:QqPTAvyqsEbceGzBzNggFXnrqF1CaUcvgkdR5Ot7KZg= golang.org/x/mod v0.1.1-0.20191107180719-034126e5016b/go.mod h1:QqPTAvyqsEbceGzBzNggFXnrqF1CaUcvgkdR5Ot7KZg= -golang.org/x/mod v0.2.0 h1:KU7oHjnv3XNWfa5COkzUifxZmxp1TyI7ImMXqFxLwvQ= golang.org/x/mod v0.2.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= golang.org/x/mod v0.3.0 h1:RM4zey1++hCTbCVQfnWeKs9/IEsaBLA8vTkd0WVtmH4= golang.org/x/mod v0.3.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= @@ -1394,23 +1272,19 @@ golang.org/x/net v0.0.0-20190628185345-da137c7871d7/go.mod h1:z5CRVTTTmAJ677TzLL golang.org/x/net v0.0.0-20190724013045-ca1201d0de80/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= golang.org/x/net v0.0.0-20190813141303-74dc4d7220e7/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= golang.org/x/net v0.0.0-20190827160401-ba9fcec4b297/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= -golang.org/x/net v0.0.0-20190923162816-aa69164e4478 h1:l5EDrHhldLYb3ZRHDUhXF7Om7MvYXnkV9/iQNo1lX6g= golang.org/x/net v0.0.0-20190923162816-aa69164e4478/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= golang.org/x/net v0.0.0-20191002035440-2ec189313ef0/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= golang.org/x/net v0.0.0-20191004110552-13f9640d40b9/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= golang.org/x/net v0.0.0-20191007182048-72f939374954/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= -golang.org/x/net v0.0.0-20191112182307-2180aed22343 h1:00ohfJ4K98s3m6BGUoBd8nyfp4Yl0GoIKvw5abItTjI= golang.org/x/net v0.0.0-20191112182307-2180aed22343/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= golang.org/x/net v0.0.0-20191126235420-ef20fe5d7933/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= golang.org/x/net v0.0.0-20191209160850-c0dbc17a3553/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= golang.org/x/net v0.0.0-20200114155413-6afb5195e5aa/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= golang.org/x/net v0.0.0-20200202094626-16171245cfb2/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= golang.org/x/net v0.0.0-20200222125558-5a598a2470a0/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= -golang.org/x/net v0.0.0-20200226121028-0de0cce0169b h1:0mm1VjtFUOIlE1SbDlwjYaDxZVDP2S5ou6y0gSgXHu8= golang.org/x/net v0.0.0-20200226121028-0de0cce0169b/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= golang.org/x/net v0.0.0-20200324143707-d3edc9973b7e/go.mod h1:qpuaurCH72eLCgpAm/N6yyVIVM9cpaDIP3A8BGJEC5A= golang.org/x/net v0.0.0-20200513185701-a91f0712d120/go.mod h1:qpuaurCH72eLCgpAm/N6yyVIVM9cpaDIP3A8BGJEC5A= -golang.org/x/net v0.0.0-20200602114024-627f9648deb9 h1:pNX+40auqi2JqRfOP1akLGtYcn15TUbkhwuCO3foqqM= golang.org/x/net v0.0.0-20200602114024-627f9648deb9/go.mod h1:qpuaurCH72eLCgpAm/N6yyVIVM9cpaDIP3A8BGJEC5A= golang.org/x/net v0.0.0-20200625001655-4c5254603344/go.mod h1:/O7V0waA8r7cgGh81Ro3o1hOxt32SMVPicZroKQ2sZA= golang.org/x/net v0.0.0-20200707034311-ab3426394381 h1:VXak5I6aEWmAXeQjA+QSZzlgNrpq9mjcfDemuexIKsU= @@ -1419,7 +1293,6 @@ golang.org/x/oauth2 v0.0.0-20180821212333-d2e6202438be/go.mod h1:N/0e6XlmueqKjAG golang.org/x/oauth2 v0.0.0-20181106182150-f42d05182288/go.mod h1:N/0e6XlmueqKjAGxoOufVs8QHGRruUQn6yWY3a++T0U= golang.org/x/oauth2 v0.0.0-20190226205417-e64efc72b421/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw= golang.org/x/oauth2 v0.0.0-20190402181905-9f3314589c9a/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw= -golang.org/x/oauth2 v0.0.0-20190604053449-0f29369cfe45 h1:SVwTIAaPC2U/AvvLNZ2a7OVsmBpC8L5BlwK1whH3hm0= golang.org/x/oauth2 v0.0.0-20190604053449-0f29369cfe45/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw= golang.org/x/oauth2 v0.0.0-20191202225959-858c2ad4c8b6/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw= golang.org/x/oauth2 v0.0.0-20200107190931-bf48bf16ab8d h1:TzXSXBo42m9gQenoE3b9BGiEpg5IG2JkU5FkPIawgtw= @@ -1430,9 +1303,7 @@ golang.org/x/sync v0.0.0-20181221193216-37e7f081c4d4/go.mod h1:RxMgew5VJxzue5/jJ golang.org/x/sync v0.0.0-20190227155943-e225da77a7e6/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20190412183630-56d357773e84/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20190423024810-112230192c58/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= -golang.org/x/sync v0.0.0-20190911185100-cd5d95a43a6e h1:vcxGaoTs7kV8m5Np9uUNQin4BrLOthgV7252N8V+FwY= golang.org/x/sync v0.0.0-20190911185100-cd5d95a43a6e/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= -golang.org/x/sync v0.0.0-20200317015054-43a5402ce75a h1:WXEvlFVvvGxCJLG6REjsT03iWnKLEWinaScsxF2Vm2o= golang.org/x/sync v0.0.0-20200317015054-43a5402ce75a/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20200625203802-6e8e738ad208 h1:qwRHBd0NqMbJxfbotnDhm2ByMI1Shq4Y6oRJo21SGJA= golang.org/x/sync v0.0.0-20200625203802-6e8e738ad208/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= @@ -1467,13 +1338,10 @@ golang.org/x/sys v0.0.0-20190624142023-c5567b49c5d0/go.mod h1:h1NjWce9XRLGQEsW7w golang.org/x/sys v0.0.0-20190712062909-fae7ac547cb7/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20190726091711-fc99dfbffb4e/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20190801041406-cbf593c0f2f3/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20190813064441-fde4db37ae7a h1:aYOabOQFp6Vj6W1F80affTUvO9UxmJRx8K0gsfABByQ= golang.org/x/sys v0.0.0-20190813064441-fde4db37ae7a/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20190826190057-c7b8b68b1456 h1:ng0gs1AKnRRuEMZoTLLlbOd+C17zUDepwGQBb/n+JVg= golang.org/x/sys v0.0.0-20190826190057-c7b8b68b1456/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20190902133755-9109b7679e13/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20190922100055-0a153f010e69/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20190924154521-2837fb4f24fe h1:6fAMxZRR6sl1Uq8U61gxU+kPTs2tR8uOySCbBP7BN/M= golang.org/x/sys v0.0.0-20190924154521-2837fb4f24fe/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20191001151750-bb3f8db39f24/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20191008105621-543471e840be/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= @@ -1491,7 +1359,6 @@ golang.org/x/sys v0.0.0-20200106162015-b016eb3dc98e/go.mod h1:h1NjWce9XRLGQEsW7w golang.org/x/sys v0.0.0-20200107162124-548cf772de50/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20200113162924-86b910548bc1/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20200116001909-b77594299b42/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20200122134326-e047566fdf82 h1:ywK/j/KkyTHcdyYSZNXGjMwgmDSfjglYZ3vStQ/gSCU= golang.org/x/sys v0.0.0-20200122134326-e047566fdf82/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20200124204421-9fbb57f87de9/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20200202164722-d101bd2416d5/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= @@ -1502,9 +1369,7 @@ golang.org/x/sys v0.0.0-20200323222414-85ca7c5b95cd/go.mod h1:h1NjWce9XRLGQEsW7w golang.org/x/sys v0.0.0-20200331124033-c3d80250170d/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20200420163511-1957bb5e6d1f/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20200602225109-6fdc65e7d980/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20200615200032-f1bc736245b1 h1:ogLJMz+qpzav7lGMh10LMvAkM/fAoGlaiiHYiFYdm80= golang.org/x/sys v0.0.0-20200615200032-f1bc736245b1/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20200625212154-ddb9806d33ae h1:Ih9Yo4hSPImZOpfGuA4bR/ORKTAbhZo2AbWNRCnevdo= golang.org/x/sys v0.0.0-20200625212154-ddb9806d33ae/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20200724161237-0e2f3a69832c h1:UIcGWL6/wpCfyGuJnRFJRurA+yj8RrW7Q6x2YMCXt6c= golang.org/x/sys v0.0.0-20200724161237-0e2f3a69832c/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= @@ -1518,9 +1383,7 @@ golang.org/x/text v0.3.2/go.mod h1:bEr9sfX3Q8Zfm5fL9x+3itogRgK3+ptLWKqgva+5dAk= golang.org/x/time v0.0.0-20180412165947-fbb02b2291d2/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= golang.org/x/time v0.0.0-20181108054448-85acf8d2951c/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= golang.org/x/time v0.0.0-20190308202827-9d24e82272b4/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= -golang.org/x/time v0.0.0-20191024005414-555d28b269f0 h1:/5xXl8Y5W96D+TtHSlonuFqGHIWVuyCkGJLwGh9JJFs= golang.org/x/time v0.0.0-20191024005414-555d28b269f0/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= -golang.org/x/time v0.0.0-20200416051211-89c76fbcd5d1 h1:NusfzzA6yGQ+ua51ck7E3omNUX/JuqbFSaRGqU8CcLI= golang.org/x/time v0.0.0-20200416051211-89c76fbcd5d1/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= golang.org/x/time v0.0.0-20200630173020-3af7569d3a1e h1:EHBhcS0mlXEAVwNyO2dLfjToGsyY4j24pTs2ScHnX7s= golang.org/x/time v0.0.0-20200630173020-3af7569d3a1e/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= @@ -1563,7 +1426,6 @@ golang.org/x/tools v0.0.0-20191029190741-b9c20aec41a5/go.mod h1:b+2E5dAYhXwXZwtn golang.org/x/tools v0.0.0-20191108193012-7d206e10da11/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= golang.org/x/tools v0.0.0-20191111182352-50fa39b762bc/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= golang.org/x/tools v0.0.0-20191113191852-77e3bb0ad9e7/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= -golang.org/x/tools v0.0.0-20191115202509-3a792d9c32b2 h1:EtTFh6h4SAKemS+CURDMTDIANuduG5zKEXShyy18bGA= golang.org/x/tools v0.0.0-20191115202509-3a792d9c32b2/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= golang.org/x/tools v0.0.0-20191119224855-298f0cb1881e/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= golang.org/x/tools v0.0.0-20191125144606-a911d9008d1f/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= @@ -1584,9 +1446,7 @@ golang.org/x/tools v0.0.0-20200216192241-b320d3a0f5a2/go.mod h1:TB2adYChydJhpapK golang.org/x/tools v0.0.0-20200224181240-023911ca70b2/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28= golang.org/x/tools v0.0.0-20200331025713-a30bf2db82d4/go.mod h1:Sl4aGygMT6LrqrWclx+PTx3U+LnKx/seiNR+3G19Ar8= golang.org/x/tools v0.0.0-20200513201620-d5fe73897c97/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE= -golang.org/x/tools v0.0.0-20200603131246-cc40288be839 h1:SxYgZ5FbVts/fm9UsuLycOG8MRWJPm7krdhgPQSayUs= golang.org/x/tools v0.0.0-20200603131246-cc40288be839/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE= -golang.org/x/tools v0.0.0-20200710042808-f1c4188a97a1 h1:rD1FcWVsRaMY+l8biE9jbWP5MS/CJJ/90a9TMkMgNrM= golang.org/x/tools v0.0.0-20200710042808-f1c4188a97a1/go.mod h1:njjCfa9FT2d7l9Bc6FUM5FLjQPp3cFF28FI3qnDFljA= golang.org/x/tools v0.0.0-20200725200936-102e7d357031 h1:VtIxiVHWPhnny2ZTi4f9/2diZKqyLaq3FUTuud5+khA= golang.org/x/tools v0.0.0-20200725200936-102e7d357031/go.mod h1:njjCfa9FT2d7l9Bc6FUM5FLjQPp3cFF28FI3qnDFljA= @@ -1604,17 +1464,14 @@ google.golang.org/api v0.3.1/go.mod h1:6wY9I6uQWHQ8EM57III9mq/AjF+i8G65rmVagqKMt google.golang.org/api v0.3.2/go.mod h1:6wY9I6uQWHQ8EM57III9mq/AjF+i8G65rmVagqKMtkk= google.golang.org/api v0.4.0/go.mod h1:8k5glujaEP+g9n7WNsDg8QP6cUVNI86fCNMcbazEtwE= google.golang.org/api v0.7.0/go.mod h1:WtwebWUNSVBH/HAw79HIFXZNqEvBhG+Ra+ax0hx3E3M= -google.golang.org/api v0.8.0 h1:VGGbLNyPF7dvYHhcUGYBBGCRDDK0RRJAI6KCvo0CL+E= google.golang.org/api v0.8.0/go.mod h1:o4eAsZoiT+ibD93RtjEohWalFOjRDx6CVaqeizhEnKg= google.golang.org/api v0.9.0/go.mod h1:o4eAsZoiT+ibD93RtjEohWalFOjRDx6CVaqeizhEnKg= google.golang.org/api v0.13.0/go.mod h1:iLdEw5Ide6rF15KTC1Kkl0iskquN2gFfn9o9XIsbkAI= -google.golang.org/api v0.14.0 h1:uMf5uLi4eQMRrMKhCplNik4U4H8Z6C1br3zOtAa/aDE= google.golang.org/api v0.14.0/go.mod h1:iLdEw5Ide6rF15KTC1Kkl0iskquN2gFfn9o9XIsbkAI= google.golang.org/api v0.15.0/go.mod h1:iLdEw5Ide6rF15KTC1Kkl0iskquN2gFfn9o9XIsbkAI= google.golang.org/api v0.17.0/go.mod h1:BwFmGc8tA3vsd7r/7kR8DY7iEEGSU04BFxCo5jP/sfE= google.golang.org/api v0.18.0/go.mod h1:BwFmGc8tA3vsd7r/7kR8DY7iEEGSU04BFxCo5jP/sfE= google.golang.org/api v0.20.0/go.mod h1:BwFmGc8tA3vsd7r/7kR8DY7iEEGSU04BFxCo5jP/sfE= -google.golang.org/api v0.26.0 h1:VJZ8h6E8ip82FRpQl848c5vAadxlTXrUh8RzQzSRm08= google.golang.org/api v0.26.0/go.mod h1:lIXQywCXRcnZPGlsd8NbLnOjtAoL6em04bJ9+z0MncE= google.golang.org/api v0.29.0 h1:BaiDisFir8O4IJxvAabCGGkQ6yCJegNQqSVoYUNAnbk= google.golang.org/api v0.29.0/go.mod h1:Lcubydp8VUV7KeIHD9z2Bys/sm/vGKnG1UHuDBSrHWM= @@ -1624,7 +1481,6 @@ google.golang.org/appengine v1.3.0/go.mod h1:xpcJRLb0r/rnEns0DIKYYv+WjYCduHsrkT7 google.golang.org/appengine v1.4.0/go.mod h1:xpcJRLb0r/rnEns0DIKYYv+WjYCduHsrkT7/EB5XEv4= google.golang.org/appengine v1.5.0/go.mod h1:xpcJRLb0r/rnEns0DIKYYv+WjYCduHsrkT7/EB5XEv4= google.golang.org/appengine v1.6.1/go.mod h1:i06prIuMbXzDqacNJfV5OdTW448YApPu5ww/cMBSeb0= -google.golang.org/appengine v1.6.5 h1:tycE03LOZYQNhDpS27tcQdAzLCVMaj7QT2SXxebnpCM= google.golang.org/appengine v1.6.5/go.mod h1:8WjMMxjGQR8xUklV/ARdw2HLXBOI7O7uCIDZVag1xfc= google.golang.org/appengine v1.6.6 h1:lMO5rYAqUxkmaj76jAkRUvt5JZgFymx/+Q5Mzfivuhc= google.golang.org/appengine v1.6.6/go.mod h1:8WjMMxjGQR8xUklV/ARdw2HLXBOI7O7uCIDZVag1xfc= @@ -1643,7 +1499,6 @@ google.golang.org/genproto v0.0.0-20190911173649-1774047e7e51/go.mod h1:IbNlFCBr google.golang.org/genproto v0.0.0-20190927181202-20e1ac93f88c/go.mod h1:IbNlFCBrqXvoKpeg0TB2l7cyZUmoaFKYIwrEpbDKLA8= google.golang.org/genproto v0.0.0-20191028173616-919d9bdd9fe6/go.mod h1:n3cpQtvxv34hfy77yVDNjmbRyujviMdxYliBSkLhpCc= google.golang.org/genproto v0.0.0-20191108220845-16a3f7862a1a/go.mod h1:n3cpQtvxv34hfy77yVDNjmbRyujviMdxYliBSkLhpCc= -google.golang.org/genproto v0.0.0-20191115194625-c23dd37a84c9 h1:6XzpBoANz1NqMNfDXzc2QmHmbb1vyMsvRfoP5rM+K1I= google.golang.org/genproto v0.0.0-20191115194625-c23dd37a84c9/go.mod h1:n3cpQtvxv34hfy77yVDNjmbRyujviMdxYliBSkLhpCc= google.golang.org/genproto v0.0.0-20191216164720-4f79533eabd1/go.mod h1:n3cpQtvxv34hfy77yVDNjmbRyujviMdxYliBSkLhpCc= google.golang.org/genproto v0.0.0-20191230161307-f3c370f40bfb/go.mod h1:n3cpQtvxv34hfy77yVDNjmbRyujviMdxYliBSkLhpCc= @@ -1656,9 +1511,7 @@ google.golang.org/genproto v0.0.0-20200224152610-e50cd9704f63/go.mod h1:55QSHmfG google.golang.org/genproto v0.0.0-20200331122359-1ee6d9798940/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c= google.golang.org/genproto v0.0.0-20200513103714-09dca8ec2884/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c= google.golang.org/genproto v0.0.0-20200526211855-cb27e3aa2013/go.mod h1:NbSheEEYHJ7i3ixzK3sjbqSGDJWnxyFXZblF3eUsNvo= -google.golang.org/genproto v0.0.0-20200603110839-e855014d5736 h1:+IE3xTD+6Eb7QWG5JFp+dQr/XjKpjmrNkh4pdjTdHEs= google.golang.org/genproto v0.0.0-20200603110839-e855014d5736/go.mod h1:jDfRM7FcilCzHH/e9qn6dsT145K34l5v+OpcnNgKAAA= -google.golang.org/genproto v0.0.0-20200710124503-20a17af7bd0e h1:k+p/u26/lVeNEpdxSeUrm7rTvoFckBKaf7gTzgmHyDA= google.golang.org/genproto v0.0.0-20200710124503-20a17af7bd0e/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= google.golang.org/genproto v0.0.0-20200724131911-43cab4749ae7 h1:AWgNCmk2V5HZp9AiCDRBExX/b9I0Ey9F8STHDZlhCC4= google.golang.org/genproto v0.0.0-20200724131911-43cab4749ae7/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= @@ -1675,7 +1528,6 @@ google.golang.org/grpc v1.22.1/go.mod h1:Y5yQAOtifL1yxbo5wqy6BxZv8vAUGQwXBOALyac google.golang.org/grpc v1.23.0/go.mod h1:Y5yQAOtifL1yxbo5wqy6BxZv8vAUGQwXBOALyacEbxg= google.golang.org/grpc v1.23.1/go.mod h1:Y5yQAOtifL1yxbo5wqy6BxZv8vAUGQwXBOALyacEbxg= google.golang.org/grpc v1.24.0/go.mod h1:XDChyiUovWa60DnaeDeZmSW86xtLtjtZbwvSiRnRtcA= -google.golang.org/grpc v1.25.1 h1:wdKvqQk7IttEw92GoRyKG2IDrUIpgpj6H6m81yfeMW0= google.golang.org/grpc v1.25.1/go.mod h1:c3i+UQWmh7LiEpx4sFZnkU36qjEYZ0imhYfXVyQciAY= google.golang.org/grpc v1.26.0/go.mod h1:qbnxyOmOxrQa7FizSgH+ReBfzJrCY1pSN7KXBS8abTk= google.golang.org/grpc v1.27.0/go.mod h1:qbnxyOmOxrQa7FizSgH+ReBfzJrCY1pSN7KXBS8abTk= @@ -1711,6 +1563,7 @@ gopkg.in/gemnasium/logrus-airbrake-hook.v2 v2.1.2/go.mod h1:Xk6kEKp8OKb+X14hQBKW gopkg.in/inf.v0 v0.9.0/go.mod h1:cWUDdTG/fYaXco+Dcufb5Vnc6Gp2YChqWtbxRZE0mXw= gopkg.in/inf.v0 v0.9.1 h1:73M5CoZyi3ZLMOyDlQh031Cx6N9NDJ2Vvfl76EDAgDc= gopkg.in/inf.v0 v0.9.1/go.mod h1:cWUDdTG/fYaXco+Dcufb5Vnc6Gp2YChqWtbxRZE0mXw= +gopkg.in/ini.v1 v1.42.0 h1:7N3gPTt50s8GuLortA00n8AqRTk75qOP98+mTPpgzRk= gopkg.in/ini.v1 v1.42.0/go.mod h1:pNLf8WUiyNEtQjuu5G5vTm06TEv9tsIgeAvK8hOrP4k= gopkg.in/ini.v1 v1.51.0 h1:AQvPpx3LzTDM0AjnIRlVFwFFGC+npRopjZxLJj6gdno= gopkg.in/ini.v1 v1.51.0/go.mod h1:pNLf8WUiyNEtQjuu5G5vTm06TEv9tsIgeAvK8hOrP4k= @@ -1720,20 +1573,15 @@ gopkg.in/tomb.v1 v1.0.0-20141024135613-dd632973f1e7/go.mod h1:dt/ZhP58zS4L8KSrWD gopkg.in/warnings.v0 v0.1.2/go.mod h1:jksf8JmL6Qr/oQM2OXTHunEvvTAsrWBLb6OOjuVWRNI= gopkg.in/yaml.v2 v2.0.0-20170812160011-eb3733d160e7/go.mod h1:JAlM8MvJe8wmxCU4Bli9HhUf9+ttbYbLASfIpnQbh74= gopkg.in/yaml.v2 v2.2.1/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= -gopkg.in/yaml.v2 v2.2.2 h1:ZCJp+EgiOT7lHqUV2J862kp8Qj64Jo6az82+3Td9dZw= gopkg.in/yaml.v2 v2.2.2/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= gopkg.in/yaml.v2 v2.2.3/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= gopkg.in/yaml.v2 v2.2.4/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= -gopkg.in/yaml.v2 v2.2.5 h1:ymVxjfMaHvXD8RqPRmzHHsB3VvucivSkIAvJFDI5O3c= gopkg.in/yaml.v2 v2.2.5/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= -gopkg.in/yaml.v2 v2.2.7 h1:VUgggvou5XRW9mHwD/yXxIYSMtY0zoKQf/v226p2nyo= gopkg.in/yaml.v2 v2.2.7/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= gopkg.in/yaml.v2 v2.2.8/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= gopkg.in/yaml.v2 v2.3.0 h1:clyUAQHOM3G0M3f5vQj7LuJrETvjVot3Z5el9nffUtU= gopkg.in/yaml.v2 v2.3.0/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= -gopkg.in/yaml.v3 v3.0.0-20191120175047-4206685974f2 h1:XZx7nhd5GMaZpmDaEHFVafUZC7ya0fuo7cSJ3UCKYmM= gopkg.in/yaml.v3 v3.0.0-20191120175047-4206685974f2/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM= -gopkg.in/yaml.v3 v3.0.0-20200603094226-e3079894b1e8 h1:jL/vaozO53FMfZLySWM+4nulF3gQEC6q5jH90LPomDo= gopkg.in/yaml.v3 v3.0.0-20200603094226-e3079894b1e8/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM= gopkg.in/yaml.v3 v3.0.0-20200615113413-eeeca48fe776 h1:tQIYjPdBoyREyB9XMu+nnTclpTYkz2zFM+lzLJFO4gQ= gopkg.in/yaml.v3 v3.0.0-20200615113413-eeeca48fe776/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM= @@ -1744,28 +1592,19 @@ honnef.co/go/tools v0.0.0-20190102054323-c2f93a96b099/go.mod h1:rf3lG4BRIbNafJWh honnef.co/go/tools v0.0.0-20190106161140-3f1c8253044a/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4= honnef.co/go/tools v0.0.0-20190418001031-e561f6794a2a/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4= honnef.co/go/tools v0.0.0-20190523083050-ea95bdfd59fc/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4= -honnef.co/go/tools v0.0.1-2019.2.3 h1:3JgtbtFHMiCmsznwGVTUWbgGov+pVqnlf1dEJTNAXeM= honnef.co/go/tools v0.0.1-2019.2.3/go.mod h1:a3bituU0lyd329TUQxRnasdCoJDkEUEAqEt0JzvZhAg= honnef.co/go/tools v0.0.1-2020.1.3 h1:sXmLre5bzIR6ypkjXCDI3jHPssRhc8KD/Ome589sc3U= honnef.co/go/tools v0.0.1-2020.1.3/go.mod h1:X/FiERA/W4tHapMX5mGpAtMSVEeEUOyHaw9vFzvIQ3k= howett.net/plist v0.0.0-20181124034731-591f970eefbb/go.mod h1:vMygbs4qMhSZSc4lCUl2OEE+rDiIIJAIdR4m7MiMcm0= -k8s.io/api v0.0.0-20190813020757-36bff7324fb7 h1:4uJOjRn9kWq4AqJRE8+qzmAy+lJd9rh8TY455dNef4U= k8s.io/api v0.0.0-20190813020757-36bff7324fb7/go.mod h1:3Iy+myeAORNCLgjd/Xu9ebwN7Vh59Bw0vh9jhoX+V58= -k8s.io/api v0.0.0-20191115095533-47f6de673b26 h1:6L7CEQVcduEr9eUPN3r3RliLvDrvcaniFOE5B5zRfmc= k8s.io/api v0.0.0-20191115095533-47f6de673b26/go.mod h1:iA/8arsvelvo4IDqIhX4IbjTEKBGgvsf2OraTuRtLFU= -k8s.io/api v0.18.3 h1:2AJaUQdgUZLoDZHrun21PW2Nx9+ll6cUzvn3IKhSIn0= k8s.io/api v0.18.3/go.mod h1:UOaMwERbqJMfeeeHc8XJKawj4P9TgDRnViIqqBeH2QA= -k8s.io/api v0.18.5 h1:fKbCxr+U3fu7k6jB+QeYPD/c6xKYeSJ2KVWmyUypuWM= k8s.io/api v0.18.5/go.mod h1:tN+e/2nbdGKOAH55NMV8oGrMG+3uRlA9GaRfvnCCSNk= k8s.io/api v0.18.6 h1:osqrAXbOQjkKIWDTjrqxWQ3w0GkKb1KA1XkUGHHYpeE= k8s.io/api v0.18.6/go.mod h1:eeyxr+cwCjMdLAmr2W3RyDI0VvTawSg/3RFFBEnmZGI= -k8s.io/apimachinery v0.0.0-20190809020650-423f5d784010 h1:pyoq062NftC1y/OcnbSvgolyZDJ8y4fmUPWMkdA6gfU= k8s.io/apimachinery v0.0.0-20190809020650-423f5d784010/go.mod h1:Waf/xTS2FGRrgXCkO5FP3XxTOWh0qLf2QhL1qFZZ/R8= -k8s.io/apimachinery v0.0.0-20191115015347-3c7067801da2 h1:TSH6UZ+y3etc/aDbVqow1NT8o7SJXkxhLKbp3Ywhyvg= k8s.io/apimachinery v0.0.0-20191115015347-3c7067801da2/go.mod h1:dXFS2zaQR8fyzuvRdJDHw2Aerij/yVGJSre0bZQSVJA= -k8s.io/apimachinery v0.18.3 h1:pOGcbVAhxADgUYnjS08EFXs9QMl8qaH5U4fr5LGUrSk= k8s.io/apimachinery v0.18.3/go.mod h1:OaXp26zu/5J7p0f92ASynJa1pZo06YlV9fG7BoWbCko= -k8s.io/apimachinery v0.18.5 h1:Lh6tgsM9FMkC12K5T5QjRm7rDs6aQN5JHkA0JomULDM= k8s.io/apimachinery v0.18.5/go.mod h1:OaXp26zu/5J7p0f92ASynJa1pZo06YlV9fG7BoWbCko= k8s.io/apimachinery v0.18.6 h1:RtFHnfGNfd1N0LeSrKCUznz5xtUP1elRGvHJbL3Ntag= k8s.io/apimachinery v0.18.6/go.mod h1:OaXp26zu/5J7p0f92ASynJa1pZo06YlV9fG7BoWbCko= @@ -1775,7 +1614,6 @@ k8s.io/gengo v0.0.0-20190128074634-0689ccc1d7d6/go.mod h1:ezvh/TsK7cY6rbqRK0oQQ8 k8s.io/klog v0.0.0-20181102134211-b9b56d5dfc92/go.mod h1:Gq+BEi5rUBO/HRz0bTSXDUcqjScdoY3a9IHpCEIOOfk= k8s.io/klog v0.3.0/go.mod h1:Gq+BEi5rUBO/HRz0bTSXDUcqjScdoY3a9IHpCEIOOfk= k8s.io/klog v0.3.1/go.mod h1:Gq+BEi5rUBO/HRz0bTSXDUcqjScdoY3a9IHpCEIOOfk= -k8s.io/klog v0.4.0 h1:lCJCxf/LIowc2IGS9TPjWDyXY4nOmdGdfcwwDQCOURQ= k8s.io/klog v0.4.0/go.mod h1:4Bi6QPql/J/LkTDqv7R/cd3hPo4k2DG6Ptcz060Ez5I= k8s.io/klog v1.0.0 h1:Pt+yjF5aB1xDSVbau4VsWe+dQNzA0qv1LlXdC2dF6Q8= k8s.io/klog v1.0.0/go.mod h1:4Bi6QPql/J/LkTDqv7R/cd3hPo4k2DG6Ptcz060Ez5I= @@ -1783,12 +1621,10 @@ k8s.io/klog/v2 v2.0.0 h1:Foj74zO6RbjjP4hBEKjnYtjjAhGg4jNynUdYF6fJrok= k8s.io/klog/v2 v2.0.0/go.mod h1:PBfzABfn139FHAV07az/IF9Wp1bkk3vpT2XSJ76fSDE= k8s.io/kube-openapi v0.0.0-20190709113604-33be087ad058/go.mod h1:nfDlWeOsu3pUf4yWGL+ERqohP4YsZcBJXWMK+gkzOA4= k8s.io/kube-openapi v0.0.0-20190722073852-5e22f3d471e6/go.mod h1:RZvgC8MSN6DjiMV6oIfEE9pDL9CYXokkfaCKZeHm3nc= -k8s.io/kube-openapi v0.0.0-20191107075043-30be4d16710a h1:UcxjrRMyNx/i/y8G7kPvLyy7rfbeuf1PYyBf973pgyU= k8s.io/kube-openapi v0.0.0-20191107075043-30be4d16710a/go.mod h1:1TqjTSzOxsLGIKfj0lK8EeCP7K1iUG65v09OM0/WG5E= k8s.io/kube-openapi v0.0.0-20200410145947-61e04a5be9a6 h1:Oh3Mzx5pJ+yIumsAD0MOECPVeXsVot0UkiaCGVyfGQY= k8s.io/kube-openapi v0.0.0-20200410145947-61e04a5be9a6/go.mod h1:GRQhZsXIAJ1xR0C9bd8UpWHZ5plfAS9fzPjJuQ6JL3E= k8s.io/utils v0.0.0-20190809000727-6c36bc71fc4a/go.mod h1:sZAwmy6armz5eXlNoLmJcl4F1QuKu7sr+mFQ0byX7Ew= -k8s.io/utils v0.0.0-20191114200735-6ca3b61696b6 h1:p0Ai3qVtkbCG/Af26dBmU0E1W58NID3hSSh7cMyylpM= k8s.io/utils v0.0.0-20191114200735-6ca3b61696b6/go.mod h1:sZAwmy6armz5eXlNoLmJcl4F1QuKu7sr+mFQ0byX7Ew= k8s.io/utils v0.0.0-20200324210504-a9aa75ae1b89/go.mod h1:sZAwmy6armz5eXlNoLmJcl4F1QuKu7sr+mFQ0byX7Ew= k8s.io/utils v0.0.0-20200414100711-2df71ebbae66 h1:Ly1Oxdu5p5ZFmiVT71LFgeZETvMfZ1iBIGeOenT2JeM= @@ -1803,7 +1639,6 @@ sigs.k8s.io/structured-merge-diff v0.0.0-20190525122527-15d366b2352e/go.mod h1:w sigs.k8s.io/structured-merge-diff/v3 v3.0.0-20200116222232-67a7b8c61874/go.mod h1:PlARxl6Hbt/+BC80dRLi1qAmnMqwqDg62YvvVkZjemw= sigs.k8s.io/structured-merge-diff/v3 v3.0.0 h1:dOmIZBMfhcHS09XZkMyUgkq5trg3/jRyJYFZUiaOp8E= sigs.k8s.io/structured-merge-diff/v3 v3.0.0/go.mod h1:PlARxl6Hbt/+BC80dRLi1qAmnMqwqDg62YvvVkZjemw= -sigs.k8s.io/yaml v1.1.0 h1:4A07+ZFc2wgJwo8YNlQpr1rVlgUDlxXHhPJciaPY5gs= sigs.k8s.io/yaml v1.1.0/go.mod h1:UJmg0vDUVViEyp3mgSv9WPwZCDxu4rQW1olrI1uml+o= sigs.k8s.io/yaml v1.2.0 h1:kr/MCeFWJWTwyaHoR9c8EjH9OumOmoF9YGiZd7lFm/Q= sigs.k8s.io/yaml v1.2.0/go.mod h1:yfXDCHCao9+ENCvLSE62v9VSji2MKu5jeNfTrofGhJc= diff --git a/vendor/modules.txt b/vendor/modules.txt index 77acf5edd5ffe..d9023d515998c 100644 --- a/vendor/modules.txt +++ b/vendor/modules.txt @@ -65,6 +65,8 @@ github.com/armon/go-metrics github.com/armon/go-metrics/prometheus # github.com/asaskevich/govalidator v0.0.0-20200108200545-475eaeb16496 github.com/asaskevich/govalidator +# github.com/aws/aws-lambda-go v1.17.0 +## explicit # github.com/aws/aws-sdk-go v1.33.12 github.com/aws/aws-sdk-go/aws github.com/aws/aws-sdk-go/aws/arn From 026abb9f1bd8f75c60b0bfb0f65f208576a70320 Mon Sep 17 00:00:00 2001 From: Owen Diehl Date: Tue, 7 Jul 2020 11:35:14 -0400 Subject: [PATCH 20/40] begins porting rules pkg --- pkg/ruler/compat.go | 41 - pkg/ruler/rules/alerting.go | 556 ++++++++++++ pkg/ruler/rules/alerting_test.go | 329 +++++++ pkg/ruler/rules/fixtures/rules.yaml | 6 + pkg/ruler/rules/fixtures/rules2.yaml | 5 + pkg/ruler/rules/fixtures/rules2_copy.yaml | 5 + pkg/ruler/rules/manager.go | 1004 +++++++++++++++++++++ pkg/ruler/rules/manager_test.go | 959 ++++++++++++++++++++ pkg/ruler/rules/query.go | 46 + 9 files changed, 2910 insertions(+), 41 deletions(-) create mode 100644 pkg/ruler/rules/alerting.go create mode 100644 pkg/ruler/rules/alerting_test.go create mode 100644 pkg/ruler/rules/fixtures/rules.yaml create mode 100644 pkg/ruler/rules/fixtures/rules2.yaml create mode 100644 pkg/ruler/rules/fixtures/rules2_copy.yaml create mode 100644 pkg/ruler/rules/manager.go create mode 100644 pkg/ruler/rules/manager_test.go create mode 100644 pkg/ruler/rules/query.go diff --git a/pkg/ruler/compat.go b/pkg/ruler/compat.go index 113796cc66601..6cb285fe46968 100644 --- a/pkg/ruler/compat.go +++ b/pkg/ruler/compat.go @@ -1,56 +1,15 @@ package ruler import ( - "context" "errors" "time" - "github.com/cortexproject/cortex/pkg/ruler" "github.com/cortexproject/cortex/pkg/ruler/rules" "github.com/prometheus/client_golang/prometheus" "github.com/prometheus/prometheus/pkg/labels" - "github.com/prometheus/prometheus/promql" "github.com/prometheus/prometheus/storage" - - "github.com/grafana/loki/pkg/logproto" - "github.com/grafana/loki/pkg/logql" ) -func LokiDelayedQueryFunc(engine *logql.Engine) ruler.DelayedQueryFunc { - return func(delay time.Duration) rules.QueryFunc { - return func(ctx context.Context, qs string, t time.Time) (promql.Vector, error) { - adjusted := t.Add(-delay) - params := logql.NewLiteralParams( - qs, - adjusted, - adjusted, - 0, - 0, - logproto.FORWARD, - 0, - nil, - ) - q := engine.Query(params) - - res, err := q.Exec(ctx) - if err != nil { - return nil, err - } - switch v := res.Data.(type) { - case promql.Vector: - return v, nil - case promql.Scalar: - return promql.Vector{promql.Sample{ - Point: promql.Point(v), - Metric: labels.Labels{}, - }}, nil - default: - return nil, errors.New("rule result is not a vector or scalar") - } - } - } -} - func InMemoryAppendableHistory(r prometheus.Registerer) func(string, *rules.ManagerOptions) (rules.Appendable, rules.TenantAlertHistory) { metrics := NewMetrics(r) return func(userID string, opts *rules.ManagerOptions) (rules.Appendable, rules.TenantAlertHistory) { diff --git a/pkg/ruler/rules/alerting.go b/pkg/ruler/rules/alerting.go new file mode 100644 index 0000000000000..85ce868cc74b1 --- /dev/null +++ b/pkg/ruler/rules/alerting.go @@ -0,0 +1,556 @@ +// Copyright 2013 The Prometheus Authors +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package rules + +import ( + "context" + "fmt" + "net/url" + "strings" + "sync" + "time" + + html_template "html/template" + + yaml "gopkg.in/yaml.v2" + + "github.com/go-kit/kit/log" + "github.com/go-kit/kit/log/level" + "github.com/pkg/errors" + "github.com/prometheus/common/model" + + "github.com/prometheus/prometheus/pkg/labels" + "github.com/prometheus/prometheus/pkg/rulefmt" + "github.com/prometheus/prometheus/pkg/timestamp" + "github.com/prometheus/prometheus/promql" + "github.com/prometheus/prometheus/promql/parser" + "github.com/prometheus/prometheus/rules" + "github.com/prometheus/prometheus/template" + "github.com/prometheus/prometheus/util/strutil" +) + +const ( + // AlertMetricName is the metric name for synthetic alert timeseries. + alertMetricName = "ALERTS" + // AlertForStateMetricName is the metric name for 'for' state of alert. + AlertForStateMetricName = "ALERTS_FOR_STATE" + + // AlertNameLabel is the label name indicating the name of an alert. + alertNameLabel = "alertname" + // AlertStateLabel is the label name indicating the state of an alert. + alertStateLabel = "alertstate" +) + +// AlertState denotes the state of an active alert. +type AlertState int + +const ( + // StateInactive is the state of an alert that is neither firing nor pending. + StateInactive AlertState = iota + // StatePending is the state of an alert that has been active for less than + // the configured threshold duration. + StatePending + // StateFiring is the state of an alert that has been active for longer than + // the configured threshold duration. + StateFiring +) + +func (s AlertState) String() string { + switch s { + case StateInactive: + return "inactive" + case StatePending: + return "pending" + case StateFiring: + return "firing" + } + panic(errors.Errorf("unknown alert state: %s", s.String())) +} + +// Alert is the user-level representation of a single instance of an alerting rule. +type Alert struct { + State AlertState + + Labels labels.Labels + Annotations labels.Labels + + // The value at the last evaluation of the alerting expression. + Value float64 + // The interval during which the condition of this alert held true. + // ResolvedAt will be 0 to indicate a still active alert. + ActiveAt time.Time + FiredAt time.Time + ResolvedAt time.Time + LastSentAt time.Time + ValidUntil time.Time +} + +func (a *Alert) needsSending(ts time.Time, resendDelay time.Duration) bool { + if a.State == StatePending { + return false + } + + // if an alert has been resolved since the last send, resend it + if a.ResolvedAt.After(a.LastSentAt) { + return true + } + + return a.LastSentAt.Add(resendDelay).Before(ts) +} + +// An AlertingRule generates alerts from its vector expression. +type AlertingRule struct { + // The name of the alert. + name string + // The vector expression from which to generate alerts. + vector fmt.Stringer + // The duration for which a labelset needs to persist in the expression + // output vector before an alert transitions from Pending to Firing state. + holdDuration time.Duration + // Extra labels to attach to the resulting alert sample vectors. + labels labels.Labels + // Non-identifying key/value pairs. + annotations labels.Labels + // External labels from the global config. + externalLabels map[string]string + // true if old state has been restored. We start persisting samples for ALERT_FOR_STATE + // only after the restoration. + restored bool + // Protects the below. + mtx sync.Mutex + // Time in seconds taken to evaluate rule. + evaluationDuration time.Duration + // Timestamp of last evaluation of rule. + evaluationTimestamp time.Time + // The health of the alerting rule. + health RuleHealth + // The last error seen by the alerting rule. + lastError error + // A map of alerts which are currently active (Pending or Firing), keyed by + // the fingerprint of the labelset they correspond to. + active map[uint64]*Alert + + logger log.Logger +} + +// NewAlertingRule constructs a new AlertingRule. +func NewAlertingRule( + name string, vec parser.Expr, hold time.Duration, + labels, annotations, externalLabels labels.Labels, + restored bool, logger log.Logger, +) *AlertingRule { + el := make(map[string]string, len(externalLabels)) + for _, lbl := range externalLabels { + el[lbl.Name] = lbl.Value + } + + return &AlertingRule{ + name: name, + vector: vec, + holdDuration: hold, + labels: labels, + annotations: annotations, + externalLabels: el, + health: rules.HealthUnknown, + active: map[uint64]*Alert{}, + logger: logger, + restored: restored, + } +} + +// Name returns the name of the alerting rule. +func (r *AlertingRule) Name() string { + return r.name +} + +// SetLastError sets the current error seen by the alerting rule. +func (r *AlertingRule) SetLastError(err error) { + r.mtx.Lock() + defer r.mtx.Unlock() + r.lastError = err +} + +// LastError returns the last error seen by the alerting rule. +func (r *AlertingRule) LastError() error { + r.mtx.Lock() + defer r.mtx.Unlock() + return r.lastError +} + +// SetHealth sets the current health of the alerting rule. +func (r *AlertingRule) SetHealth(health RuleHealth) { + r.mtx.Lock() + defer r.mtx.Unlock() + r.health = health +} + +// Health returns the current health of the alerting rule. +func (r *AlertingRule) Health() RuleHealth { + r.mtx.Lock() + defer r.mtx.Unlock() + return r.health +} + +// Query returns the query expression of the alerting rule. +func (r *AlertingRule) Query() fmt.Stringer { + return r.vector +} + +// Duration returns the hold duration of the alerting rule. +func (r *AlertingRule) Duration() time.Duration { + return r.holdDuration +} + +// Labels returns the labels of the alerting rule. +func (r *AlertingRule) Labels() labels.Labels { + return r.labels +} + +// Annotations returns the annotations of the alerting rule. +func (r *AlertingRule) Annotations() labels.Labels { + return r.annotations +} + +func (r *AlertingRule) sample(alert *Alert, ts time.Time) promql.Sample { + lb := labels.NewBuilder(r.labels) + + for _, l := range alert.Labels { + lb.Set(l.Name, l.Value) + } + + lb.Set(labels.MetricName, alertMetricName) + lb.Set(labels.AlertName, r.name) + lb.Set(alertStateLabel, alert.State.String()) + + s := promql.Sample{ + Metric: lb.Labels(), + Point: promql.Point{T: timestamp.FromTime(ts), V: 1}, + } + return s +} + +// forStateSample returns the sample for ALERTS_FOR_STATE. +func (r *AlertingRule) forStateSample(alert *Alert, ts time.Time, v float64) promql.Sample { + lb := labels.NewBuilder(r.labels) + + for _, l := range alert.Labels { + lb.Set(l.Name, l.Value) + } + + lb.Set(labels.MetricName, AlertForStateMetricName) + lb.Set(labels.AlertName, r.name) + + s := promql.Sample{ + Metric: lb.Labels(), + Point: promql.Point{T: timestamp.FromTime(ts), V: v}, + } + return s +} + +// SetEvaluationDuration updates evaluationDuration to the duration it took to evaluate the rule on its last evaluation. +func (r *AlertingRule) SetEvaluationDuration(dur time.Duration) { + r.mtx.Lock() + defer r.mtx.Unlock() + r.evaluationDuration = dur +} + +// GetEvaluationDuration returns the time in seconds it took to evaluate the alerting rule. +func (r *AlertingRule) GetEvaluationDuration() time.Duration { + r.mtx.Lock() + defer r.mtx.Unlock() + return r.evaluationDuration +} + +// SetEvaluationTimestamp updates evaluationTimestamp to the timestamp of when the rule was last evaluated. +func (r *AlertingRule) SetEvaluationTimestamp(ts time.Time) { + r.mtx.Lock() + defer r.mtx.Unlock() + r.evaluationTimestamp = ts +} + +// GetEvaluationTimestamp returns the time the evaluation took place. +func (r *AlertingRule) GetEvaluationTimestamp() time.Time { + r.mtx.Lock() + defer r.mtx.Unlock() + return r.evaluationTimestamp +} + +// SetRestored updates the restoration state of the alerting rule. +func (r *AlertingRule) SetRestored(restored bool) { + r.restored = restored +} + +// resolvedRetention is the duration for which a resolved alert instance +// is kept in memory state and consequently repeatedly sent to the AlertManager. +const resolvedRetention = 15 * time.Minute + +// Eval evaluates the rule expression and then creates pending alerts and fires +// or removes previously pending alerts accordingly. +func (r *AlertingRule) Eval(ctx context.Context, ts time.Time, query rules.QueryFunc, externalURL *url.URL) (promql.Vector, error) { + res, err := query(ctx, r.vector.String(), ts) + if err != nil { + r.SetHealth(rules.HealthBad) + r.SetLastError(err) + return nil, err + } + + r.mtx.Lock() + defer r.mtx.Unlock() + + // Create pending alerts for any new vector elements in the alert expression + // or update the expression value for existing elements. + resultFPs := map[uint64]struct{}{} + + var vec promql.Vector + var alerts = make(map[uint64]*Alert, len(res)) + for _, smpl := range res { + // Provide the alert information to the template. + l := make(map[string]string, len(smpl.Metric)) + for _, lbl := range smpl.Metric { + l[lbl.Name] = lbl.Value + } + + tmplData := template.AlertTemplateData(l, r.externalLabels, smpl.V) + // Inject some convenience variables that are easier to remember for users + // who are not used to Go's templating system. + defs := []string{ + "{{$labels := .Labels}}", + "{{$externalLabels := .ExternalLabels}}", + "{{$value := .Value}}", + } + + expand := func(text string) string { + tmpl := template.NewTemplateExpander( + ctx, + strings.Join(append(defs, text), ""), + "__alert_"+r.Name(), + tmplData, + model.Time(timestamp.FromTime(ts)), + template.QueryFunc(query), + externalURL, + ) + result, err := tmpl.Expand() + if err != nil { + result = fmt.Sprintf("", err) + level.Warn(r.logger).Log("msg", "Expanding alert template failed", "err", err, "data", tmplData) + } + return result + } + + lb := labels.NewBuilder(smpl.Metric).Del(labels.MetricName) + + for _, l := range r.labels { + lb.Set(l.Name, expand(l.Value)) + } + lb.Set(labels.AlertName, r.Name()) + + annotations := make(labels.Labels, 0, len(r.annotations)) + for _, a := range r.annotations { + annotations = append(annotations, labels.Label{Name: a.Name, Value: expand(a.Value)}) + } + + lbs := lb.Labels() + h := lbs.Hash() + resultFPs[h] = struct{}{} + + if _, ok := alerts[h]; ok { + err = fmt.Errorf("vector contains metrics with the same labelset after applying alert labels") + // We have already acquired the lock above hence using SetHealth and + // SetLastError will deadlock. + r.health = rules.HealthBad + r.lastError = err + return nil, err + } + + alerts[h] = &Alert{ + Labels: lbs, + Annotations: annotations, + ActiveAt: ts, + State: StatePending, + Value: smpl.V, + } + } + + for h, a := range alerts { + // Check whether we already have alerting state for the identifying label set. + // Update the last value and annotations if so, create a new alert entry otherwise. + if alert, ok := r.active[h]; ok && alert.State != StateInactive { + alert.Value = a.Value + alert.Annotations = a.Annotations + continue + } + + r.active[h] = a + } + + // Check if any pending alerts should be removed or fire now. Write out alert timeseries. + for fp, a := range r.active { + if _, ok := resultFPs[fp]; !ok { + // If the alert was previously firing, keep it around for a given + // retention time so it is reported as resolved to the AlertManager. + if a.State == StatePending || (!a.ResolvedAt.IsZero() && ts.Sub(a.ResolvedAt) > resolvedRetention) { + delete(r.active, fp) + } + if a.State != StateInactive { + a.State = StateInactive + a.ResolvedAt = ts + } + continue + } + + if a.State == StatePending && ts.Sub(a.ActiveAt) >= r.holdDuration { + a.State = StateFiring + a.FiredAt = ts + } + + if r.restored { + vec = append(vec, r.sample(a, ts)) + vec = append(vec, r.forStateSample(a, ts, float64(a.ActiveAt.Unix()))) + } + } + + // We have already acquired the lock above hence using SetHealth and + // SetLastError will deadlock. + r.health = rules.HealthGood + r.lastError = err + return vec, nil +} + +// State returns the maximum state of alert instances for this rule. +// StateFiring > StatePending > StateInactive +func (r *AlertingRule) State() AlertState { + r.mtx.Lock() + defer r.mtx.Unlock() + + maxState := StateInactive + for _, a := range r.active { + if a.State > maxState { + maxState = a.State + } + } + return maxState +} + +// ActiveAlerts returns a slice of active alerts. +func (r *AlertingRule) ActiveAlerts() []*Alert { + var res []*Alert + for _, a := range r.currentAlerts() { + if a.ResolvedAt.IsZero() { + res = append(res, a) + } + } + return res +} + +// currentAlerts returns all instances of alerts for this rule. This may include +// inactive alerts that were previously firing. +func (r *AlertingRule) currentAlerts() []*Alert { + r.mtx.Lock() + defer r.mtx.Unlock() + + alerts := make([]*Alert, 0, len(r.active)) + + for _, a := range r.active { + anew := *a + alerts = append(alerts, &anew) + } + return alerts +} + +// ForEachActiveAlert runs the given function on each alert. +// This should be used when you want to use the actual alerts from the AlertingRule +// and not on its copy. +// If you want to run on a copy of alerts then don't use this, get the alerts from 'ActiveAlerts()'. +func (r *AlertingRule) ForEachActiveAlert(f func(*Alert)) { + r.mtx.Lock() + defer r.mtx.Unlock() + + for _, a := range r.active { + f(a) + } +} + +func (r *AlertingRule) sendAlerts(ctx context.Context, ts time.Time, resendDelay time.Duration, interval time.Duration, notifyFunc NotifyFunc) { + alerts := []*Alert{} + r.ForEachActiveAlert(func(alert *Alert) { + if alert.needsSending(ts, resendDelay) { + alert.LastSentAt = ts + // Allow for a couple Eval or Alertmanager send failures + delta := resendDelay + if interval > resendDelay { + delta = interval + } + alert.ValidUntil = ts.Add(3 * delta) + anew := *alert + alerts = append(alerts, &anew) + } + }) + notifyFunc(ctx, r.vector.String(), alerts...) +} + +func (r *AlertingRule) String() string { + ar := rulefmt.Rule{ + Alert: r.name, + Expr: r.vector.String(), + For: model.Duration(r.holdDuration), + Labels: r.labels.Map(), + Annotations: r.annotations.Map(), + } + + byt, err := yaml.Marshal(ar) + if err != nil { + return fmt.Sprintf("error marshaling alerting rule: %s", err.Error()) + } + + return string(byt) +} + +// HTMLSnippet returns an HTML snippet representing this alerting rule. The +// resulting snippet is expected to be presented in a
 element, so that
+// line breaks and other returned whitespace is respected.
+func (r *AlertingRule) HTMLSnippet(pathPrefix string) html_template.HTML {
+	alertMetric := model.Metric{
+		model.MetricNameLabel: alertMetricName,
+		alertNameLabel:        model.LabelValue(r.name),
+	}
+
+	labelsMap := make(map[string]string, len(r.labels))
+	for _, l := range r.labels {
+		labelsMap[l.Name] = html_template.HTMLEscapeString(l.Value)
+	}
+
+	annotationsMap := make(map[string]string, len(r.annotations))
+	for _, l := range r.annotations {
+		annotationsMap[l.Name] = html_template.HTMLEscapeString(l.Value)
+	}
+
+	ar := rulefmt.Rule{
+		Alert:       fmt.Sprintf("%s", pathPrefix+strutil.TableLinkForExpression(alertMetric.String()), r.name),
+		Expr:        fmt.Sprintf("%s", pathPrefix+strutil.TableLinkForExpression(r.vector.String()), html_template.HTMLEscapeString(r.vector.String())),
+		For:         model.Duration(r.holdDuration),
+		Labels:      labelsMap,
+		Annotations: annotationsMap,
+	}
+
+	byt, err := yaml.Marshal(ar)
+	if err != nil {
+		return html_template.HTML(fmt.Sprintf("error marshaling alerting rule: %q", html_template.HTMLEscapeString(err.Error())))
+	}
+	return html_template.HTML(byt)
+}
+
+// HoldDuration returns the holdDuration of the alerting rule.
+func (r *AlertingRule) HoldDuration() time.Duration {
+	return r.holdDuration
+}
diff --git a/pkg/ruler/rules/alerting_test.go b/pkg/ruler/rules/alerting_test.go
new file mode 100644
index 0000000000000..5387cdd731d38
--- /dev/null
+++ b/pkg/ruler/rules/alerting_test.go
@@ -0,0 +1,329 @@
+// Copyright 2016 The Prometheus Authors
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package rules
+
+import (
+	"context"
+	"fmt"
+	"testing"
+	"time"
+
+	"github.com/go-kit/kit/log"
+	"github.com/prometheus/prometheus/pkg/labels"
+	"github.com/prometheus/prometheus/pkg/timestamp"
+	"github.com/prometheus/prometheus/promql"
+	"github.com/prometheus/prometheus/promql/parser"
+	"github.com/prometheus/prometheus/rules"
+	"github.com/prometheus/prometheus/util/teststorage"
+	"github.com/prometheus/prometheus/util/testutil"
+)
+
+func TestAlertingRuleHTMLSnippet(t *testing.T) {
+	expr, err := parser.ParseExpr(`foo{html="BOLD"}`)
+	testutil.Ok(t, err)
+	rule := NewAlertingRule("testrule", expr, 0, labels.FromStrings("html", "BOLD"), labels.FromStrings("html", "BOLD"), nil, false, nil)
+
+	const want = `alert: testrule
+expr: foo{html="<b>BOLD<b>"}
+labels:
+  html: '<b>BOLD</b>'
+annotations:
+  html: '<b>BOLD</b>'
+`
+
+	got := rule.HTMLSnippet("/test/prefix")
+	testutil.Assert(t, want == got, "incorrect HTML snippet; want:\n\n|%v|\n\ngot:\n\n|%v|", want, got)
+}
+
+func TestAlertingRuleLabelsUpdate(t *testing.T) {
+	suite, err := promql.NewTest(t, `
+		load 1m
+			http_requests{job="app-server", instance="0"}	75 85 70 70
+	`)
+	testutil.Ok(t, err)
+	defer suite.Close()
+
+	testutil.Ok(t, suite.Run())
+
+	expr, err := parser.ParseExpr(`http_requests < 100`)
+	testutil.Ok(t, err)
+
+	rule := NewAlertingRule(
+		"HTTPRequestRateLow",
+		expr,
+		time.Minute,
+		// Basing alerting rule labels off of a value that can change is a very bad idea.
+		// If an alert is going back and forth between two label values it will never fire.
+		// Instead, you should write two alerts with constant labels.
+		labels.FromStrings("severity", "{{ if lt $value 80.0 }}critical{{ else }}warning{{ end }}"),
+		nil, nil, true, nil,
+	)
+
+	results := []promql.Vector{
+		{
+			{
+				Metric: labels.FromStrings(
+					"__name__", "ALERTS",
+					"alertname", "HTTPRequestRateLow",
+					"alertstate", "pending",
+					"instance", "0",
+					"job", "app-server",
+					"severity", "critical",
+				),
+				Point: promql.Point{V: 1},
+			},
+		},
+		{
+			{
+				Metric: labels.FromStrings(
+					"__name__", "ALERTS",
+					"alertname", "HTTPRequestRateLow",
+					"alertstate", "pending",
+					"instance", "0",
+					"job", "app-server",
+					"severity", "warning",
+				),
+				Point: promql.Point{V: 1},
+			},
+		},
+		{
+			{
+				Metric: labels.FromStrings(
+					"__name__", "ALERTS",
+					"alertname", "HTTPRequestRateLow",
+					"alertstate", "pending",
+					"instance", "0",
+					"job", "app-server",
+					"severity", "critical",
+				),
+				Point: promql.Point{V: 1},
+			},
+		},
+		{
+			{
+				Metric: labels.FromStrings(
+					"__name__", "ALERTS",
+					"alertname", "HTTPRequestRateLow",
+					"alertstate", "firing",
+					"instance", "0",
+					"job", "app-server",
+					"severity", "critical",
+				),
+				Point: promql.Point{V: 1},
+			},
+		},
+	}
+
+	baseTime := time.Unix(0, 0)
+	for i, result := range results {
+		t.Logf("case %d", i)
+		evalTime := baseTime.Add(time.Duration(i) * time.Minute)
+		result[0].Point.T = timestamp.FromTime(evalTime)
+		res, err := rule.Eval(suite.Context(), evalTime, rules.EngineQueryFunc(suite.QueryEngine(), suite.Storage()), nil)
+		testutil.Ok(t, err)
+
+		var filteredRes promql.Vector // After removing 'ALERTS_FOR_STATE' samples.
+		for _, smpl := range res {
+			smplName := smpl.Metric.Get("__name__")
+			if smplName == "ALERTS" {
+				filteredRes = append(filteredRes, smpl)
+			} else {
+				// If not 'ALERTS', it has to be 'ALERTS_FOR_STATE'.
+				testutil.Equals(t, "ALERTS_FOR_STATE", smplName)
+			}
+		}
+
+		testutil.Equals(t, result, filteredRes)
+	}
+}
+
+func TestAlertingRuleExternalLabelsInTemplate(t *testing.T) {
+	suite, err := promql.NewTest(t, `
+		load 1m
+			http_requests{job="app-server", instance="0"}	75 85 70 70
+	`)
+	testutil.Ok(t, err)
+	defer suite.Close()
+
+	testutil.Ok(t, suite.Run())
+
+	expr, err := parser.ParseExpr(`http_requests < 100`)
+	testutil.Ok(t, err)
+
+	ruleWithoutExternalLabels := NewAlertingRule(
+		"ExternalLabelDoesNotExist",
+		expr,
+		time.Minute,
+		labels.FromStrings("templated_label", "There are {{ len $externalLabels }} external Labels, of which foo is {{ $externalLabels.foo }}."),
+		nil,
+		nil,
+		true, log.NewNopLogger(),
+	)
+	ruleWithExternalLabels := NewAlertingRule(
+		"ExternalLabelExists",
+		expr,
+		time.Minute,
+		labels.FromStrings("templated_label", "There are {{ len $externalLabels }} external Labels, of which foo is {{ $externalLabels.foo }}."),
+		nil,
+		labels.FromStrings("foo", "bar", "dings", "bums"),
+		true, log.NewNopLogger(),
+	)
+	result := promql.Vector{
+		{
+			Metric: labels.FromStrings(
+				"__name__", "ALERTS",
+				"alertname", "ExternalLabelDoesNotExist",
+				"alertstate", "pending",
+				"instance", "0",
+				"job", "app-server",
+				"templated_label", "There are 0 external Labels, of which foo is .",
+			),
+			Point: promql.Point{V: 1},
+		},
+		{
+			Metric: labels.FromStrings(
+				"__name__", "ALERTS",
+				"alertname", "ExternalLabelExists",
+				"alertstate", "pending",
+				"instance", "0",
+				"job", "app-server",
+				"templated_label", "There are 2 external Labels, of which foo is bar.",
+			),
+			Point: promql.Point{V: 1},
+		},
+	}
+
+	evalTime := time.Unix(0, 0)
+	result[0].Point.T = timestamp.FromTime(evalTime)
+	result[1].Point.T = timestamp.FromTime(evalTime)
+
+	var filteredRes promql.Vector // After removing 'ALERTS_FOR_STATE' samples.
+	res, err := ruleWithoutExternalLabels.Eval(
+		suite.Context(), evalTime, rules.EngineQueryFunc(suite.QueryEngine(), suite.Storage()), nil,
+	)
+	testutil.Ok(t, err)
+	for _, smpl := range res {
+		smplName := smpl.Metric.Get("__name__")
+		if smplName == "ALERTS" {
+			filteredRes = append(filteredRes, smpl)
+		} else {
+			// If not 'ALERTS', it has to be 'ALERTS_FOR_STATE'.
+			testutil.Equals(t, "ALERTS_FOR_STATE", smplName)
+		}
+	}
+
+	res, err = ruleWithExternalLabels.Eval(
+		suite.Context(), evalTime, rules.EngineQueryFunc(suite.QueryEngine(), suite.Storage()), nil,
+	)
+	testutil.Ok(t, err)
+	for _, smpl := range res {
+		smplName := smpl.Metric.Get("__name__")
+		if smplName == "ALERTS" {
+			filteredRes = append(filteredRes, smpl)
+		} else {
+			// If not 'ALERTS', it has to be 'ALERTS_FOR_STATE'.
+			testutil.Equals(t, "ALERTS_FOR_STATE", smplName)
+		}
+	}
+
+	testutil.Equals(t, result, filteredRes)
+}
+
+func TestAlertingRuleEmptyLabelFromTemplate(t *testing.T) {
+	suite, err := promql.NewTest(t, `
+		load 1m
+			http_requests{job="app-server", instance="0"}	75 85 70 70
+	`)
+	testutil.Ok(t, err)
+	defer suite.Close()
+
+	testutil.Ok(t, suite.Run())
+
+	expr, err := parser.ParseExpr(`http_requests < 100`)
+	testutil.Ok(t, err)
+
+	rule := NewAlertingRule(
+		"EmptyLabel",
+		expr,
+		time.Minute,
+		labels.FromStrings("empty_label", ""),
+		nil,
+		nil,
+		true, log.NewNopLogger(),
+	)
+	result := promql.Vector{
+		{
+			Metric: labels.FromStrings(
+				"__name__", "ALERTS",
+				"alertname", "EmptyLabel",
+				"alertstate", "pending",
+				"instance", "0",
+				"job", "app-server",
+			),
+			Point: promql.Point{V: 1},
+		},
+	}
+
+	evalTime := time.Unix(0, 0)
+	result[0].Point.T = timestamp.FromTime(evalTime)
+
+	var filteredRes promql.Vector // After removing 'ALERTS_FOR_STATE' samples.
+	res, err := rule.Eval(
+		suite.Context(), evalTime, rules.EngineQueryFunc(suite.QueryEngine(), suite.Storage()), nil,
+	)
+	testutil.Ok(t, err)
+	for _, smpl := range res {
+		smplName := smpl.Metric.Get("__name__")
+		if smplName == "ALERTS" {
+			filteredRes = append(filteredRes, smpl)
+		} else {
+			// If not 'ALERTS', it has to be 'ALERTS_FOR_STATE'.
+			testutil.Equals(t, "ALERTS_FOR_STATE", smplName)
+		}
+	}
+	testutil.Equals(t, result, filteredRes)
+}
+
+func TestAlertingRuleDuplicate(t *testing.T) {
+	storage := teststorage.New(t)
+	defer storage.Close()
+
+	opts := promql.EngineOpts{
+		Logger:     nil,
+		Reg:        nil,
+		MaxSamples: 10,
+		Timeout:    10 * time.Second,
+	}
+
+	engine := promql.NewEngine(opts)
+	ctx, cancelCtx := context.WithCancel(context.Background())
+	defer cancelCtx()
+
+	now := time.Now()
+
+	expr, _ := parser.ParseExpr(`vector(0) or label_replace(vector(0),"test","x","","")`)
+	rule := NewAlertingRule(
+		"foo",
+		expr,
+		time.Minute,
+		labels.FromStrings("test", "test"),
+		nil,
+		nil,
+		true, log.NewNopLogger(),
+	)
+	_, err := rule.Eval(ctx, now, rules.EngineQueryFunc(engine, storage), nil)
+	testutil.NotOk(t, err)
+	e := fmt.Errorf("vector contains metrics with the same labelset after applying alert labels")
+	testutil.ErrorEqual(t, e, err)
+}
diff --git a/pkg/ruler/rules/fixtures/rules.yaml b/pkg/ruler/rules/fixtures/rules.yaml
new file mode 100644
index 0000000000000..38fe21cb8d047
--- /dev/null
+++ b/pkg/ruler/rules/fixtures/rules.yaml
@@ -0,0 +1,6 @@
+groups:
+  - name: test
+    rules:
+    - record: job:http_requests:rate5m
+      expr: sum by (job)(rate({job="http"}[5m]))
+
diff --git a/pkg/ruler/rules/fixtures/rules2.yaml b/pkg/ruler/rules/fixtures/rules2.yaml
new file mode 100644
index 0000000000000..e405138f8af5c
--- /dev/null
+++ b/pkg/ruler/rules/fixtures/rules2.yaml
@@ -0,0 +1,5 @@
+groups:
+  - name: test_2
+    rules:
+    - record: test_2
+      expr: vector(2)
diff --git a/pkg/ruler/rules/fixtures/rules2_copy.yaml b/pkg/ruler/rules/fixtures/rules2_copy.yaml
new file mode 100644
index 0000000000000..dd74b65116f35
--- /dev/null
+++ b/pkg/ruler/rules/fixtures/rules2_copy.yaml
@@ -0,0 +1,5 @@
+groups:
+  - name: test_2 copy
+    rules:
+    - record: test_2
+      expr: vector(2)
diff --git a/pkg/ruler/rules/manager.go b/pkg/ruler/rules/manager.go
new file mode 100644
index 0000000000000..26961d727c098
--- /dev/null
+++ b/pkg/ruler/rules/manager.go
@@ -0,0 +1,1004 @@
+package rules
+
+import (
+	"context"
+	"math"
+	"net/url"
+	"sort"
+	"sync"
+	"time"
+
+	"github.com/cortexproject/cortex/pkg/ruler"
+	"github.com/go-kit/kit/log"
+	"github.com/go-kit/kit/log/level"
+	"github.com/grafana/loki/pkg/logql"
+	"github.com/opentracing/opentracing-go"
+	"github.com/pkg/errors"
+	"github.com/prometheus/client_golang/prometheus"
+	"github.com/prometheus/common/model"
+	"github.com/prometheus/prometheus/pkg/labels"
+	"github.com/prometheus/prometheus/pkg/rulefmt"
+	"github.com/prometheus/prometheus/pkg/timestamp"
+	"github.com/prometheus/prometheus/pkg/value"
+	"github.com/prometheus/prometheus/promql"
+	"github.com/prometheus/prometheus/promql/parser"
+	promRules "github.com/prometheus/prometheus/rules"
+	"github.com/prometheus/prometheus/storage"
+)
+
+// RuleHealth describes the health state of a rule. Alias to rules pkg.
+type RuleHealth = promRules.RuleHealth
+
+// Constants for instrumentation.
+const namespace = "prometheus"
+
+type Metrics struct {
+	evalDuration        prometheus.Summary
+	iterationDuration   prometheus.Summary
+	iterationsMissed    prometheus.Counter
+	iterationsScheduled prometheus.Counter
+	evalTotal           *prometheus.CounterVec
+	evalFailures        *prometheus.CounterVec
+	groupInterval       *prometheus.GaugeVec
+	groupLastEvalTime   *prometheus.GaugeVec
+	groupLastDuration   *prometheus.GaugeVec
+	groupRules          *prometheus.GaugeVec
+}
+
+// NewGroupMetrics creates a new instance of Metrics and registers it with the provided registerer,
+// if not nil.
+func NewGroupMetrics(reg prometheus.Registerer) *Metrics {
+	m := &Metrics{
+		evalDuration: prometheus.NewSummary(
+			prometheus.SummaryOpts{
+				Namespace:  namespace,
+				Name:       "rule_evaluation_duration_seconds",
+				Help:       "The duration for a rule to execute.",
+				Objectives: map[float64]float64{0.5: 0.05, 0.9: 0.01, 0.99: 0.001},
+			}),
+		iterationDuration: prometheus.NewSummary(prometheus.SummaryOpts{
+			Namespace:  namespace,
+			Name:       "rule_group_duration_seconds",
+			Help:       "The duration of rule group evaluations.",
+			Objectives: map[float64]float64{0.01: 0.001, 0.05: 0.005, 0.5: 0.05, 0.90: 0.01, 0.99: 0.001},
+		}),
+		iterationsMissed: prometheus.NewCounter(prometheus.CounterOpts{
+			Namespace: namespace,
+			Name:      "rule_group_iterations_missed_total",
+			Help:      "The total number of rule group evaluations missed due to slow rule group evaluation.",
+		}),
+		iterationsScheduled: prometheus.NewCounter(prometheus.CounterOpts{
+			Namespace: namespace,
+			Name:      "rule_group_iterations_total",
+			Help:      "The total number of scheduled rule group evaluations, whether executed or missed.",
+		}),
+		evalTotal: prometheus.NewCounterVec(
+			prometheus.CounterOpts{
+				Namespace: namespace,
+				Name:      "rule_evaluations_total",
+				Help:      "The total number of rule evaluations.",
+			},
+			[]string{"rule_group"},
+		),
+		evalFailures: prometheus.NewCounterVec(
+			prometheus.CounterOpts{
+				Namespace: namespace,
+				Name:      "rule_evaluation_failures_total",
+				Help:      "The total number of rule evaluation failures.",
+			},
+			[]string{"rule_group"},
+		),
+		groupInterval: prometheus.NewGaugeVec(
+			prometheus.GaugeOpts{
+				Namespace: namespace,
+				Name:      "rule_group_interval_seconds",
+				Help:      "The interval of a rule group.",
+			},
+			[]string{"rule_group"},
+		),
+		groupLastEvalTime: prometheus.NewGaugeVec(
+			prometheus.GaugeOpts{
+				Namespace: namespace,
+				Name:      "rule_group_last_evaluation_timestamp_seconds",
+				Help:      "The timestamp of the last rule group evaluation in seconds.",
+			},
+			[]string{"rule_group"},
+		),
+		groupLastDuration: prometheus.NewGaugeVec(
+			prometheus.GaugeOpts{
+				Namespace: namespace,
+				Name:      "rule_group_last_duration_seconds",
+				Help:      "The duration of the last rule group evaluation.",
+			},
+			[]string{"rule_group"},
+		),
+		groupRules: prometheus.NewGaugeVec(
+			prometheus.GaugeOpts{
+				Namespace: namespace,
+				Name:      "rule_group_rules",
+				Help:      "The number of rules.",
+			},
+			[]string{"rule_group"},
+		),
+	}
+
+	if reg != nil {
+		reg.MustRegister(
+			m.evalDuration,
+			m.iterationDuration,
+			m.iterationsMissed,
+			m.iterationsScheduled,
+			m.evalTotal,
+			m.evalFailures,
+			m.groupInterval,
+			m.groupLastEvalTime,
+			m.groupLastDuration,
+			m.groupRules,
+		)
+	}
+
+	return m
+}
+
+// Group is a set of rules that have a logical relation.
+type Group struct {
+	name                 string
+	file                 string
+	interval             time.Duration
+	rules                []promRules.Rule
+	seriesInPreviousEval []map[string]labels.Labels // One per Rule.
+	staleSeries          []labels.Labels
+	opts                 *ManagerOptions
+	mtx                  sync.Mutex
+	evaluationDuration   time.Duration
+	evaluationTimestamp  time.Time
+
+	shouldRestore bool
+
+	markStale   bool
+	done        chan struct{}
+	terminated  chan struct{}
+	managerDone chan struct{}
+
+	logger log.Logger
+
+	metrics *Metrics
+}
+
+type GroupOptions struct {
+	Name, File    string
+	Interval      time.Duration
+	Rules         []promRules.Rule
+	ShouldRestore bool
+	Opts          *ManagerOptions
+	done          chan struct{}
+}
+
+// NewGroup makes a new Group with the given name, options, and rules.
+func NewGroup(o GroupOptions) *Group {
+	metrics := o.Opts.Metrics
+	if metrics == nil {
+		metrics = NewGroupMetrics(o.Opts.Registerer)
+	}
+
+	key := groupKey(o.File, o.Name)
+	metrics.evalTotal.WithLabelValues(key)
+	metrics.evalFailures.WithLabelValues(key)
+	metrics.groupLastEvalTime.WithLabelValues(key)
+	metrics.groupLastDuration.WithLabelValues(key)
+	metrics.groupRules.WithLabelValues(key).Set(float64(len(o.Rules)))
+	metrics.groupInterval.WithLabelValues(key).Set(o.Interval.Seconds())
+
+	return &Group{
+		name:                 o.Name,
+		file:                 o.File,
+		interval:             o.Interval,
+		rules:                o.Rules,
+		shouldRestore:        o.ShouldRestore,
+		opts:                 o.Opts,
+		seriesInPreviousEval: make([]map[string]labels.Labels, len(o.Rules)),
+		done:                 make(chan struct{}),
+		managerDone:          o.done,
+		terminated:           make(chan struct{}),
+		logger:               log.With(o.Opts.Logger, "group", o.Name),
+		metrics:              metrics,
+	}
+}
+
+// Name returns the group name.
+func (g *Group) Name() string { return g.name }
+
+// File returns the group's file.
+func (g *Group) File() string { return g.file }
+
+// Rules returns the group's rules.
+func (g *Group) Rules() []promRules.Rule { return g.rules }
+
+// Interval returns the group's interval.
+func (g *Group) Interval() time.Duration { return g.interval }
+
+func (g *Group) run(ctx context.Context) {
+	defer close(g.terminated)
+
+	// Wait an initial amount to have consistently slotted intervals.
+	evalTimestamp := g.evalTimestamp().Add(g.interval)
+	select {
+	case <-time.After(time.Until(evalTimestamp)):
+	case <-g.done:
+		return
+	}
+
+	ctx = promql.NewOriginContext(ctx, map[string]interface{}{
+		"ruleGroup": map[string]string{
+			"file": g.File(),
+			"name": g.Name(),
+		},
+	})
+
+	iter := func() {
+		g.metrics.iterationsScheduled.Inc()
+
+		start := time.Now()
+		g.Eval(ctx, evalTimestamp)
+		timeSinceStart := time.Since(start)
+
+		g.metrics.iterationDuration.Observe(timeSinceStart.Seconds())
+		g.setEvaluationDuration(timeSinceStart)
+		g.setEvaluationTimestamp(start)
+	}
+
+	// The assumption here is that since the ticker was started after having
+	// waited for `evalTimestamp` to pass, the ticks will trigger soon
+	// after each `evalTimestamp + N * g.interval` occurrence.
+	tick := time.NewTicker(g.interval)
+	defer tick.Stop()
+
+	defer func() {
+		if !g.markStale {
+			return
+		}
+		go func(now time.Time) {
+			for _, rule := range g.seriesInPreviousEval {
+				for _, r := range rule {
+					g.staleSeries = append(g.staleSeries, r)
+				}
+			}
+			// That can be garbage collected at this point.
+			g.seriesInPreviousEval = nil
+			// Wait for 2 intervals to give the opportunity to renamed rules
+			// to insert new series in the tsdb. At this point if there is a
+			// renamed rule, it should already be started.
+			select {
+			case <-g.managerDone:
+			case <-time.After(2 * g.interval):
+				g.cleanupStaleSeries(now)
+			}
+		}(time.Now())
+	}()
+
+	iter()
+	if g.shouldRestore {
+		// If we have to restore, we wait for another Eval to finish.
+		// The reason behind this is, during first eval (or before it)
+		// we might not have enough data scraped, and recording rules would not
+		// have updated the latest values, on which some alerts might depend.
+		select {
+		case <-g.done:
+			return
+		case <-tick.C:
+			missed := (time.Since(evalTimestamp) / g.interval) - 1
+			if missed > 0 {
+				g.metrics.iterationsMissed.Add(float64(missed))
+				g.metrics.iterationsScheduled.Add(float64(missed))
+			}
+			evalTimestamp = evalTimestamp.Add((missed + 1) * g.interval)
+			iter()
+		}
+
+		g.RestoreForState(time.Now())
+		g.shouldRestore = false
+	}
+
+	for {
+		select {
+		case <-g.done:
+			return
+		default:
+			select {
+			case <-g.done:
+				return
+			case <-tick.C:
+				missed := (time.Since(evalTimestamp) / g.interval) - 1
+				if missed > 0 {
+					g.metrics.iterationsMissed.Add(float64(missed))
+					g.metrics.iterationsScheduled.Add(float64(missed))
+				}
+				evalTimestamp = evalTimestamp.Add((missed + 1) * g.interval)
+				iter()
+			}
+		}
+	}
+}
+
+func (g *Group) stop() {
+	close(g.done)
+	<-g.terminated
+}
+
+func (g *Group) hash() uint64 {
+	l := labels.New(
+		labels.Label{Name: "name", Value: g.name},
+		labels.Label{Name: "file", Value: g.file},
+	)
+	return l.Hash()
+}
+
+// AlertingRules returns the list of the group's alerting rules.
+func (g *Group) AlertingRules() []*AlertingRule {
+	g.mtx.Lock()
+	defer g.mtx.Unlock()
+
+	var alerts []*AlertingRule
+	for _, rule := range g.rules {
+		if alertingRule, ok := rule.(*AlertingRule); ok {
+			alerts = append(alerts, alertingRule)
+		}
+	}
+	sort.Slice(alerts, func(i, j int) bool {
+		return alerts[i].State() > alerts[j].State() ||
+			(alerts[i].State() == alerts[j].State() &&
+				alerts[i].Name() < alerts[j].Name())
+	})
+	return alerts
+}
+
+// HasAlertingRules returns true if the group contains at least one AlertingRule.
+func (g *Group) HasAlertingRules() bool {
+	g.mtx.Lock()
+	defer g.mtx.Unlock()
+
+	for _, rule := range g.rules {
+		if _, ok := rule.(*AlertingRule); ok {
+			return true
+		}
+	}
+	return false
+}
+
+// GetEvaluationDuration returns the time in seconds it took to evaluate the rule group.
+func (g *Group) GetEvaluationDuration() time.Duration {
+	g.mtx.Lock()
+	defer g.mtx.Unlock()
+	return g.evaluationDuration
+}
+
+// setEvaluationDuration sets the time in seconds the last evaluation took.
+func (g *Group) setEvaluationDuration(dur time.Duration) {
+	g.metrics.groupLastDuration.WithLabelValues(groupKey(g.file, g.name)).Set(dur.Seconds())
+
+	g.mtx.Lock()
+	defer g.mtx.Unlock()
+	g.evaluationDuration = dur
+}
+
+// GetEvaluationTimestamp returns the time the last evaluation of the rule group took place.
+func (g *Group) GetEvaluationTimestamp() time.Time {
+	g.mtx.Lock()
+	defer g.mtx.Unlock()
+	return g.evaluationTimestamp
+}
+
+// setEvaluationTimestamp updates evaluationTimestamp to the timestamp of when the rule group was last evaluated.
+func (g *Group) setEvaluationTimestamp(ts time.Time) {
+	g.metrics.groupLastEvalTime.WithLabelValues(groupKey(g.file, g.name)).Set(float64(ts.UnixNano()) / 1e9)
+
+	g.mtx.Lock()
+	defer g.mtx.Unlock()
+	g.evaluationTimestamp = ts
+}
+
+// evalTimestamp returns the immediately preceding consistently slotted evaluation time.
+func (g *Group) evalTimestamp() time.Time {
+	var (
+		offset = int64(g.hash() % uint64(g.interval))
+		now    = time.Now().UnixNano()
+		adjNow = now - offset
+		base   = adjNow - (adjNow % int64(g.interval))
+	)
+
+	return time.Unix(0, base+offset).UTC()
+}
+
+func nameAndLabels(rule promRules.Rule) string {
+	return rule.Name() + rule.Labels().String()
+}
+
+// CopyState copies the alerting rule and staleness related state from the given group.
+//
+// Rules are matched based on their name and labels. If there are duplicates, the
+// first is matched with the first, second with the second etc.
+func (g *Group) CopyState(from *Group) {
+	g.evaluationDuration = from.evaluationDuration
+
+	ruleMap := make(map[string][]int, len(from.rules))
+
+	for fi, fromRule := range from.rules {
+		nameAndLabels := nameAndLabels(fromRule)
+		l := ruleMap[nameAndLabels]
+		ruleMap[nameAndLabels] = append(l, fi)
+	}
+
+	for i, rule := range g.rules {
+		nameAndLabels := nameAndLabels(rule)
+		indexes := ruleMap[nameAndLabels]
+		if len(indexes) == 0 {
+			continue
+		}
+		fi := indexes[0]
+		g.seriesInPreviousEval[i] = from.seriesInPreviousEval[fi]
+		ruleMap[nameAndLabels] = indexes[1:]
+
+		ar, ok := rule.(*AlertingRule)
+		if !ok {
+			continue
+		}
+		far, ok := from.rules[fi].(*AlertingRule)
+		if !ok {
+			continue
+		}
+
+		for fp, a := range far.active {
+			ar.active[fp] = a
+		}
+	}
+
+	// Handle deleted and unmatched duplicate rules.
+	g.staleSeries = from.staleSeries
+	for fi, fromRule := range from.rules {
+		nameAndLabels := nameAndLabels(fromRule)
+		l := ruleMap[nameAndLabels]
+		if len(l) != 0 {
+			for _, series := range from.seriesInPreviousEval[fi] {
+				g.staleSeries = append(g.staleSeries, series)
+			}
+		}
+	}
+}
+
+// Eval runs a single evaluation cycle in which all rules are evaluated sequentially.
+func (g *Group) Eval(ctx context.Context, ts time.Time) {
+	for i, rule := range g.rules {
+		select {
+		case <-g.done:
+			return
+		default:
+		}
+
+		func(i int, rule promRules.Rule) {
+			sp, ctx := opentracing.StartSpanFromContext(ctx, "rule")
+			sp.SetTag("name", rule.Name())
+			defer func(t time.Time) {
+				sp.Finish()
+
+				since := time.Since(t)
+				g.metrics.evalDuration.Observe(since.Seconds())
+				rule.SetEvaluationDuration(since)
+				rule.SetEvaluationTimestamp(t)
+			}(time.Now())
+
+			g.metrics.evalTotal.WithLabelValues(groupKey(g.File(), g.Name())).Inc()
+
+			vector, err := rule.Eval(ctx, ts, g.opts.QueryFunc, g.opts.ExternalURL)
+			if err != nil {
+				// Canceled queries are intentional termination of queries. This normally
+				// happens on shutdown and thus we skip logging of any errors here.
+				if _, ok := err.(promql.ErrQueryCanceled); !ok {
+					level.Warn(g.logger).Log("msg", "Evaluating rule failed", "rule", rule, "err", err)
+				}
+				g.metrics.evalFailures.WithLabelValues(groupKey(g.File(), g.Name())).Inc()
+				return
+			}
+
+			if ar, ok := rule.(*AlertingRule); ok {
+				ar.sendAlerts(ctx, ts, g.opts.ResendDelay, g.interval, g.opts.NotifyFunc)
+			}
+			var (
+				numOutOfOrder = 0
+				numDuplicates = 0
+			)
+
+			app := g.opts.Appendable.Appender()
+			seriesReturned := make(map[string]labels.Labels, len(g.seriesInPreviousEval[i]))
+			defer func() {
+				if err := app.Commit(); err != nil {
+					level.Warn(g.logger).Log("msg", "Rule sample appending failed", "err", err)
+					return
+				}
+				g.seriesInPreviousEval[i] = seriesReturned
+			}()
+			for _, s := range vector {
+				if _, err := app.Add(s.Metric, s.T, s.V); err != nil {
+					switch errors.Cause(err) {
+					case storage.ErrOutOfOrderSample:
+						numOutOfOrder++
+						level.Debug(g.logger).Log("msg", "Rule evaluation result discarded", "err", err, "sample", s)
+					case storage.ErrDuplicateSampleForTimestamp:
+						numDuplicates++
+						level.Debug(g.logger).Log("msg", "Rule evaluation result discarded", "err", err, "sample", s)
+					default:
+						level.Warn(g.logger).Log("msg", "Rule evaluation result discarded", "err", err, "sample", s)
+					}
+				} else {
+					seriesReturned[s.Metric.String()] = s.Metric
+				}
+			}
+			if numOutOfOrder > 0 {
+				level.Warn(g.logger).Log("msg", "Error on ingesting out-of-order result from rule evaluation", "numDropped", numOutOfOrder)
+			}
+			if numDuplicates > 0 {
+				level.Warn(g.logger).Log("msg", "Error on ingesting results from rule evaluation with different value but same timestamp", "numDropped", numDuplicates)
+			}
+
+			for metric, lset := range g.seriesInPreviousEval[i] {
+				if _, ok := seriesReturned[metric]; !ok {
+					// Series no longer exposed, mark it stale.
+					_, err = app.Add(lset, timestamp.FromTime(ts), math.Float64frombits(value.StaleNaN))
+					switch errors.Cause(err) {
+					case nil:
+					case storage.ErrOutOfOrderSample, storage.ErrDuplicateSampleForTimestamp:
+						// Do not count these in logging, as this is expected if series
+						// is exposed from a different rule.
+					default:
+						level.Warn(g.logger).Log("msg", "Adding stale sample failed", "sample", metric, "err", err)
+					}
+				}
+			}
+		}(i, rule)
+	}
+	g.cleanupStaleSeries(ts)
+}
+
+func (g *Group) cleanupStaleSeries(ts time.Time) {
+	if len(g.staleSeries) == 0 {
+		return
+	}
+	app := g.opts.Appendable.Appender()
+	for _, s := range g.staleSeries {
+		// Rule that produced series no longer configured, mark it stale.
+		_, err := app.Add(s, timestamp.FromTime(ts), math.Float64frombits(value.StaleNaN))
+		switch errors.Cause(err) {
+		case nil:
+		case storage.ErrOutOfOrderSample, storage.ErrDuplicateSampleForTimestamp:
+			// Do not count these in logging, as this is expected if series
+			// is exposed from a different rule.
+		default:
+			level.Warn(g.logger).Log("msg", "Adding stale sample for previous configuration failed", "sample", s, "err", err)
+		}
+	}
+	if err := app.Commit(); err != nil {
+		level.Warn(g.logger).Log("msg", "Stale sample appending for previous configuration failed", "err", err)
+	} else {
+		g.staleSeries = nil
+	}
+}
+
+// RestoreForState restores the 'for' state of the alerts
+// by looking up last ActiveAt from storage.
+func (g *Group) RestoreForState(ts time.Time) {
+	maxtMS := int64(model.TimeFromUnixNano(ts.UnixNano()))
+	// We allow restoration only if alerts were active before after certain time.
+	mint := ts.Add(-g.opts.OutageTolerance)
+	mintMS := int64(model.TimeFromUnixNano(mint.UnixNano()))
+	q, err := g.opts.Queryable.Querier(g.opts.Context, mintMS, maxtMS)
+	if err != nil {
+		level.Error(g.logger).Log("msg", "Failed to get Querier", "err", err)
+		return
+	}
+	defer func() {
+		if err := q.Close(); err != nil {
+			level.Error(g.logger).Log("msg", "Failed to close Querier", "err", err)
+		}
+	}()
+
+	for _, rule := range g.Rules() {
+		alertRule, ok := rule.(*AlertingRule)
+		if !ok {
+			continue
+		}
+
+		alertHoldDuration := alertRule.HoldDuration()
+		if alertHoldDuration < g.opts.ForGracePeriod {
+			// If alertHoldDuration is already less than grace period, we would not
+			// like to make it wait for `g.opts.ForGracePeriod` time before firing.
+			// Hence we skip restoration, which will make it wait for alertHoldDuration.
+			alertRule.SetRestored(true)
+			continue
+		}
+
+		alertRule.ForEachActiveAlert(func(a *Alert) {
+			smpl := alertRule.forStateSample(a, time.Now(), 0)
+			var matchers []*labels.Matcher
+			for _, l := range smpl.Metric {
+				mt, err := labels.NewMatcher(labels.MatchEqual, l.Name, l.Value)
+				if err != nil {
+					panic(err)
+				}
+				matchers = append(matchers, mt)
+			}
+
+			sset := q.Select(false, nil, matchers...)
+
+			seriesFound := false
+			var s storage.Series
+			for sset.Next() {
+				// Query assures that smpl.Metric is included in sset.At().Labels(),
+				// hence just checking the length would act like equality.
+				// (This is faster than calling labels.Compare again as we already have some info).
+				if len(sset.At().Labels()) == len(smpl.Metric) {
+					s = sset.At()
+					seriesFound = true
+					break
+				}
+			}
+
+			if err := sset.Err(); err != nil {
+				// Querier Warnings are ignored. We do not care unless we have an error.
+				level.Error(g.logger).Log(
+					"msg", "Failed to restore 'for' state",
+					labels.AlertName, alertRule.Name(),
+					"stage", "Select",
+					"err", err,
+				)
+				return
+			}
+
+			if !seriesFound {
+				return
+			}
+
+			// Series found for the 'for' state.
+			var t int64
+			var v float64
+			it := s.Iterator()
+			for it.Next() {
+				t, v = it.At()
+			}
+			if it.Err() != nil {
+				level.Error(g.logger).Log("msg", "Failed to restore 'for' state",
+					labels.AlertName, alertRule.Name(), "stage", "Iterator", "err", it.Err())
+				return
+			}
+			if value.IsStaleNaN(v) { // Alert was not active.
+				return
+			}
+
+			downAt := time.Unix(t/1000, 0).UTC()
+			restoredActiveAt := time.Unix(int64(v), 0).UTC()
+			timeSpentPending := downAt.Sub(restoredActiveAt)
+			timeRemainingPending := alertHoldDuration - timeSpentPending
+
+			if timeRemainingPending <= 0 {
+				// It means that alert was firing when prometheus went down.
+				// In the next Eval, the state of this alert will be set back to
+				// firing again if it's still firing in that Eval.
+				// Nothing to be done in this case.
+			} else if timeRemainingPending < g.opts.ForGracePeriod {
+				// (new) restoredActiveAt = (ts + m.opts.ForGracePeriod) - alertHoldDuration
+				//                            /* new firing time */      /* moving back by hold duration */
+				//
+				// Proof of correctness:
+				// firingTime = restoredActiveAt.Add(alertHoldDuration)
+				//            = ts + m.opts.ForGracePeriod - alertHoldDuration + alertHoldDuration
+				//            = ts + m.opts.ForGracePeriod
+				//
+				// Time remaining to fire = firingTime.Sub(ts)
+				//                        = (ts + m.opts.ForGracePeriod) - ts
+				//                        = m.opts.ForGracePeriod
+				restoredActiveAt = ts.Add(g.opts.ForGracePeriod).Add(-alertHoldDuration)
+			} else {
+				// By shifting ActiveAt to the future (ActiveAt + some_duration),
+				// the total pending time from the original ActiveAt
+				// would be `alertHoldDuration + some_duration`.
+				// Here, some_duration = downDuration.
+				downDuration := ts.Sub(downAt)
+				restoredActiveAt = restoredActiveAt.Add(downDuration)
+			}
+
+			a.ActiveAt = restoredActiveAt
+			level.Debug(g.logger).Log("msg", "'for' state restored",
+				labels.AlertName, alertRule.Name(), "restored_time", a.ActiveAt.Format(time.RFC850),
+				"labels", a.Labels.String())
+
+		})
+
+		alertRule.SetRestored(true)
+	}
+
+}
+
+// Equals return if two groups are the same.
+func (g *Group) Equals(ng *Group) bool {
+	if g.name != ng.name {
+		return false
+	}
+
+	if g.file != ng.file {
+		return false
+	}
+
+	if g.interval != ng.interval {
+		return false
+	}
+
+	if len(g.rules) != len(ng.rules) {
+		return false
+	}
+
+	for i, gr := range g.rules {
+		if gr.String() != ng.rules[i].String() {
+			return false
+		}
+	}
+
+	return true
+}
+
+// The Manager manages recording and alerting rules.
+type Manager struct {
+	opts     *ManagerOptions
+	groups   map[string]*Group
+	mtx      sync.RWMutex
+	block    chan struct{}
+	done     chan struct{}
+	restored bool
+
+	logger log.Logger
+}
+
+// NotifyFunc sends notifications about a set of alerts generated by the given expression.
+type NotifyFunc func(ctx context.Context, expr string, alerts ...*Alert)
+
+// ManagerOptions bundles options for the Manager.
+type ManagerOptions struct {
+	ExternalURL     *url.URL
+	QueryFunc       promRules.QueryFunc
+	NotifyFunc      NotifyFunc
+	Context         context.Context
+	Appendable      storage.Appendable
+	Queryable       storage.Queryable
+	Logger          log.Logger
+	Registerer      prometheus.Registerer
+	OutageTolerance time.Duration
+	ForGracePeriod  time.Duration
+	ResendDelay     time.Duration
+
+	Metrics *Metrics
+}
+
+// NewManager returns an implementation of Manager, ready to be started
+// by calling the Run method.
+func NewManager(o *ManagerOptions) *Manager {
+	if o.Metrics == nil {
+		o.Metrics = NewGroupMetrics(o.Registerer)
+	}
+
+	m := &Manager{
+		groups: map[string]*Group{},
+		opts:   o,
+		block:  make(chan struct{}),
+		done:   make(chan struct{}),
+		logger: o.Logger,
+	}
+
+	o.Metrics.iterationsMissed.Inc()
+	return m
+}
+
+// Run starts processing of the rule manager.
+func (m *Manager) Run() {
+	close(m.block)
+}
+
+// Stop the rule manager's rule evaluation cycles.
+func (m *Manager) Stop() {
+	m.mtx.Lock()
+	defer m.mtx.Unlock()
+
+	level.Info(m.logger).Log("msg", "Stopping rule manager...")
+
+	for _, eg := range m.groups {
+		eg.stop()
+	}
+
+	// Shut down the groups waiting multiple evaluation intervals to write
+	// staleness markers.
+	close(m.done)
+
+	level.Info(m.logger).Log("msg", "Rule manager stopped")
+}
+
+// Update the rule manager's state as the config requires. If
+// loading the new rules failed the old rule set is restored.
+func (m *Manager) Update(interval time.Duration, files []string, externalLabels labels.Labels) error {
+	m.mtx.Lock()
+	defer m.mtx.Unlock()
+
+	groups, errs := m.LoadGroups(interval, externalLabels, files...)
+	if errs != nil {
+		for _, e := range errs {
+			level.Error(m.logger).Log("msg", "loading groups failed", "err", e)
+		}
+		return errors.New("error loading rules, previous rule set restored")
+	}
+	m.restored = true
+
+	var wg sync.WaitGroup
+	for _, newg := range groups {
+		// If there is an old group with the same identifier,
+		// check if new group equals with the old group, if yes then skip it.
+		// If not equals, stop it and wait for it to finish the current iteration.
+		// Then copy it into the new group.
+		gn := groupKey(newg.file, newg.name)
+		oldg, ok := m.groups[gn]
+		delete(m.groups, gn)
+
+		if ok && oldg.Equals(newg) {
+			groups[gn] = oldg
+			continue
+		}
+
+		wg.Add(1)
+		go func(newg *Group) {
+			if ok {
+				oldg.stop()
+				newg.CopyState(oldg)
+			}
+			go func() {
+				// Wait with starting evaluation until the rule manager
+				// is told to run. This is necessary to avoid running
+				// queries against a bootstrapping storage.
+				<-m.block
+				newg.run(m.opts.Context)
+			}()
+			wg.Done()
+		}(newg)
+	}
+
+	// Stop remaining old groups.
+	wg.Add(len(m.groups))
+	for n, oldg := range m.groups {
+		go func(n string, g *Group) {
+			g.markStale = true
+			g.stop()
+			if m := g.metrics; m != nil {
+				m.evalTotal.DeleteLabelValues(n)
+				m.evalFailures.DeleteLabelValues(n)
+				m.groupInterval.DeleteLabelValues(n)
+				m.groupLastEvalTime.DeleteLabelValues(n)
+				m.groupLastDuration.DeleteLabelValues(n)
+				m.groupRules.DeleteLabelValues(n)
+			}
+			wg.Done()
+		}(n, oldg)
+	}
+
+	wg.Wait()
+	m.groups = groups
+
+	return nil
+}
+
+// LoadGroups reads groups from a list of files.
+func (m *Manager) LoadGroups(
+	interval time.Duration, externalLabels labels.Labels, filenames ...string,
+) (map[string]*Group, []error) {
+	groups := make(map[string]*Group)
+
+	shouldRestore := !m.restored
+
+	for _, fn := range filenames {
+		rgs, errs := rulefmt.ParseFile(fn)
+		if errs != nil {
+			return nil, errs
+		}
+
+		for _, rg := range rgs.Groups {
+			itv := interval
+			if rg.Interval != 0 {
+				itv = time.Duration(rg.Interval)
+			}
+
+			rules := make([]promRules.Rule, 0, len(rg.Rules))
+			for _, r := range rg.Rules {
+				expr, err := logql.ParseExpr(r.Expr.Value)
+				if err != nil {
+					return nil, []error{errors.Wrap(err, fn)}
+				}
+
+				if r.Alert.Value != "" {
+					rules = append(rules, NewAlertingRule(
+						r.Alert.Value,
+						&parser.StringLiteral{Val: expr.String()},
+						time.Duration(r.For),
+						labels.FromMap(r.Labels),
+						labels.FromMap(r.Annotations),
+						externalLabels,
+						m.restored,
+						log.With(m.logger, "alert", r.Alert),
+					))
+					continue
+				}
+				rules = append(rules, promRules.NewRecordingRule(
+					r.Record.Value,
+					&parser.StringLiteral{Val: expr.String()},
+					labels.FromMap(r.Labels),
+				))
+			}
+
+			groups[groupKey(fn, rg.Name)] = NewGroup(GroupOptions{
+				Name:          rg.Name,
+				File:          fn,
+				Interval:      itv,
+				Rules:         rules,
+				ShouldRestore: shouldRestore,
+				Opts:          m.opts,
+				done:          m.done,
+			})
+		}
+	}
+
+	return groups, nil
+}
+
+// Group names need not be unique across filenames.
+func groupKey(file, name string) string {
+	return file + ";" + name
+}
+
+// RuleGroups returns the list of manager's rule groups.
+func (m *Manager) RuleGroups() []ruler.Group {
+	m.mtx.RLock()
+	defer m.mtx.RUnlock()
+
+	rgs := make([]ruler.Group, 0, len(m.groups))
+	for _, g := range m.groups {
+		rgs = append(rgs, g)
+	}
+
+	sort.Slice(rgs, func(i, j int) bool {
+		if rgs[i].File() != rgs[j].File() {
+			return rgs[i].File() < rgs[j].File()
+		}
+		return rgs[i].Name() < rgs[j].Name()
+	})
+
+	return rgs
+}
+
+// Rules returns the list of the manager's rules.
+func (m *Manager) Rules() []promRules.Rule {
+	m.mtx.RLock()
+	defer m.mtx.RUnlock()
+
+	var rules []promRules.Rule
+	for _, g := range m.groups {
+		rules = append(rules, g.rules...)
+	}
+
+	return rules
+}
+
+// AlertingRules returns the list of the manager's alerting rules.
+func (m *Manager) AlertingRules() []*AlertingRule {
+	m.mtx.RLock()
+	defer m.mtx.RUnlock()
+
+	alerts := []*AlertingRule{}
+	for _, rule := range m.Rules() {
+		if alertingRule, ok := rule.(*AlertingRule); ok {
+			alerts = append(alerts, alertingRule)
+		}
+	}
+
+	return alerts
+}
diff --git a/pkg/ruler/rules/manager_test.go b/pkg/ruler/rules/manager_test.go
new file mode 100644
index 0000000000000..ee091d2b11306
--- /dev/null
+++ b/pkg/ruler/rules/manager_test.go
@@ -0,0 +1,959 @@
+// Copyright 2013 The Prometheus Authors
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package rules
+
+import (
+	"context"
+	"fmt"
+	"io/ioutil"
+	"math"
+	"os"
+	"sort"
+	"testing"
+	"time"
+
+	"github.com/go-kit/kit/log"
+	"github.com/prometheus/client_golang/prometheus"
+	"github.com/prometheus/common/model"
+	yaml "gopkg.in/yaml.v2"
+
+	"github.com/prometheus/prometheus/pkg/labels"
+	"github.com/prometheus/prometheus/pkg/rulefmt"
+	"github.com/prometheus/prometheus/pkg/timestamp"
+	"github.com/prometheus/prometheus/pkg/value"
+	"github.com/prometheus/prometheus/promql"
+	"github.com/prometheus/prometheus/promql/parser"
+	"github.com/prometheus/prometheus/rules"
+	"github.com/prometheus/prometheus/storage"
+	"github.com/prometheus/prometheus/util/teststorage"
+	"github.com/prometheus/prometheus/util/testutil"
+)
+
+type AppendableAdapter struct{ storage.Storage }
+
+func (a AppendableAdapter) Appender(_ rules.Rule) (storage.Appender, error) {
+	return a.Storage.Appender(), nil
+}
+
+func TestAlertingRule(t *testing.T) {
+	suite, err := promql.NewTest(t, `
+		load 5m
+			http_requests{job="app-server", instance="0", group="canary", severity="overwrite-me"}	75 85  95 105 105  95  85
+			http_requests{job="app-server", instance="1", group="canary", severity="overwrite-me"}	80 90 100 110 120 130 140
+	`)
+	testutil.Ok(t, err)
+	defer suite.Close()
+
+	err = suite.Run()
+	testutil.Ok(t, err)
+
+	expr, err := parser.ParseExpr(`http_requests{group="canary", job="app-server"} < 100`)
+	testutil.Ok(t, err)
+
+	rule := NewAlertingRule(
+		"HTTPRequestRateLow",
+		expr,
+		time.Minute,
+		labels.FromStrings("severity", "{{\"c\"}}ritical"),
+		nil, nil, true, nil,
+	)
+	result := promql.Vector{
+		{
+			Metric: labels.FromStrings(
+				"__name__", "ALERTS",
+				"alertname", "HTTPRequestRateLow",
+				"alertstate", "pending",
+				"group", "canary",
+				"instance", "0",
+				"job", "app-server",
+				"severity", "critical",
+			),
+			Point: promql.Point{V: 1},
+		},
+		{
+			Metric: labels.FromStrings(
+				"__name__", "ALERTS",
+				"alertname", "HTTPRequestRateLow",
+				"alertstate", "pending",
+				"group", "canary",
+				"instance", "1",
+				"job", "app-server",
+				"severity", "critical",
+			),
+			Point: promql.Point{V: 1},
+		},
+		{
+			Metric: labels.FromStrings(
+				"__name__", "ALERTS",
+				"alertname", "HTTPRequestRateLow",
+				"alertstate", "firing",
+				"group", "canary",
+				"instance", "0",
+				"job", "app-server",
+				"severity", "critical",
+			),
+			Point: promql.Point{V: 1},
+		},
+		{
+			Metric: labels.FromStrings(
+				"__name__", "ALERTS",
+				"alertname", "HTTPRequestRateLow",
+				"alertstate", "firing",
+				"group", "canary",
+				"instance", "1",
+				"job", "app-server",
+				"severity", "critical",
+			),
+			Point: promql.Point{V: 1},
+		},
+	}
+
+	baseTime := time.Unix(0, 0)
+
+	var tests = []struct {
+		time   time.Duration
+		result promql.Vector
+	}{
+		{
+			time:   0,
+			result: result[:2],
+		}, {
+			time:   5 * time.Minute,
+			result: result[2:],
+		}, {
+			time:   10 * time.Minute,
+			result: result[2:3],
+		},
+		{
+			time:   15 * time.Minute,
+			result: nil,
+		},
+		{
+			time:   20 * time.Minute,
+			result: nil,
+		},
+		{
+			time:   25 * time.Minute,
+			result: result[:1],
+		},
+		{
+			time:   30 * time.Minute,
+			result: result[2:3],
+		},
+	}
+
+	for i, test := range tests {
+		t.Logf("case %d", i)
+
+		evalTime := baseTime.Add(test.time)
+
+		res, err := rule.Eval(suite.Context(), evalTime, EngineQueryFunc(suite.QueryEngine(), suite.Storage()), nil)
+		testutil.Ok(t, err)
+
+		var filteredRes promql.Vector // After removing 'ALERTS_FOR_STATE' samples.
+		for _, smpl := range res {
+			smplName := smpl.Metric.Get("__name__")
+			if smplName == "ALERTS" {
+				filteredRes = append(filteredRes, smpl)
+			} else {
+				// If not 'ALERTS', it has to be 'ALERTS_FOR_STATE'.
+				testutil.Equals(t, smplName, "ALERTS_FOR_STATE")
+			}
+		}
+		for i := range test.result {
+			test.result[i].T = timestamp.FromTime(evalTime)
+		}
+		testutil.Assert(t, len(test.result) == len(filteredRes), "%d. Number of samples in expected and actual output don't match (%d vs. %d)", i, len(test.result), len(res))
+
+		sort.Slice(filteredRes, func(i, j int) bool {
+			return labels.Compare(filteredRes[i].Metric, filteredRes[j].Metric) < 0
+		})
+		testutil.Equals(t, test.result, filteredRes)
+
+		for _, aa := range rule.ActiveAlerts() {
+			testutil.Assert(t, aa.Labels.Get(model.MetricNameLabel) == "", "%s label set on active alert: %s", model.MetricNameLabel, aa.Labels)
+		}
+	}
+}
+
+func TestForStateAddSamples(t *testing.T) {
+	suite, err := promql.NewTest(t, `
+		load 5m
+			http_requests{job="app-server", instance="0", group="canary", severity="overwrite-me"}	75 85  95 105 105  95  85
+			http_requests{job="app-server", instance="1", group="canary", severity="overwrite-me"}	80 90 100 110 120 130 140
+	`)
+	testutil.Ok(t, err)
+	defer suite.Close()
+
+	err = suite.Run()
+	testutil.Ok(t, err)
+
+	expr, err := parser.ParseExpr(`http_requests{group="canary", job="app-server"} < 100`)
+	testutil.Ok(t, err)
+
+	rule := NewAlertingRule(
+		"HTTPRequestRateLow",
+		expr,
+		time.Minute,
+		labels.FromStrings("severity", "{{\"c\"}}ritical"),
+		nil, nil, true, nil,
+	)
+	result := promql.Vector{
+		{
+			Metric: labels.FromStrings(
+				"__name__", "ALERTS_FOR_STATE",
+				"alertname", "HTTPRequestRateLow",
+				"group", "canary",
+				"instance", "0",
+				"job", "app-server",
+				"severity", "critical",
+			),
+			Point: promql.Point{V: 1},
+		},
+		{
+			Metric: labels.FromStrings(
+				"__name__", "ALERTS_FOR_STATE",
+				"alertname", "HTTPRequestRateLow",
+				"group", "canary",
+				"instance", "1",
+				"job", "app-server",
+				"severity", "critical",
+			),
+			Point: promql.Point{V: 1},
+		},
+		{
+			Metric: labels.FromStrings(
+				"__name__", "ALERTS_FOR_STATE",
+				"alertname", "HTTPRequestRateLow",
+				"group", "canary",
+				"instance", "0",
+				"job", "app-server",
+				"severity", "critical",
+			),
+			Point: promql.Point{V: 1},
+		},
+		{
+			Metric: labels.FromStrings(
+				"__name__", "ALERTS_FOR_STATE",
+				"alertname", "HTTPRequestRateLow",
+				"group", "canary",
+				"instance", "1",
+				"job", "app-server",
+				"severity", "critical",
+			),
+			Point: promql.Point{V: 1},
+		},
+	}
+
+	baseTime := time.Unix(0, 0)
+
+	var tests = []struct {
+		time            time.Duration
+		result          promql.Vector
+		persistThisTime bool // If true, it means this 'time' is persisted for 'for'.
+	}{
+		{
+			time:            0,
+			result:          append(promql.Vector{}, result[:2]...),
+			persistThisTime: true,
+		},
+		{
+			time:   5 * time.Minute,
+			result: append(promql.Vector{}, result[2:]...),
+		},
+		{
+			time:   10 * time.Minute,
+			result: append(promql.Vector{}, result[2:3]...),
+		},
+		{
+			time:   15 * time.Minute,
+			result: nil,
+		},
+		{
+			time:   20 * time.Minute,
+			result: nil,
+		},
+		{
+			time:            25 * time.Minute,
+			result:          append(promql.Vector{}, result[:1]...),
+			persistThisTime: true,
+		},
+		{
+			time:   30 * time.Minute,
+			result: append(promql.Vector{}, result[2:3]...),
+		},
+	}
+
+	var forState float64
+	for i, test := range tests {
+		t.Logf("case %d", i)
+		evalTime := baseTime.Add(test.time)
+
+		if test.persistThisTime {
+			forState = float64(evalTime.Unix())
+		}
+		if test.result == nil {
+			forState = float64(value.StaleNaN)
+		}
+
+		res, err := rule.Eval(suite.Context(), evalTime, EngineQueryFunc(suite.QueryEngine(), suite.Storage()), nil)
+		testutil.Ok(t, err)
+
+		var filteredRes promql.Vector // After removing 'ALERTS' samples.
+		for _, smpl := range res {
+			smplName := smpl.Metric.Get("__name__")
+			if smplName == "ALERTS_FOR_STATE" {
+				filteredRes = append(filteredRes, smpl)
+			} else {
+				// If not 'ALERTS_FOR_STATE', it has to be 'ALERTS'.
+				testutil.Equals(t, smplName, "ALERTS")
+			}
+		}
+		for i := range test.result {
+			test.result[i].T = timestamp.FromTime(evalTime)
+			// Updating the expected 'for' state.
+			if test.result[i].V >= 0 {
+				test.result[i].V = forState
+			}
+		}
+		testutil.Assert(t, len(test.result) == len(filteredRes), "%d. Number of samples in expected and actual output don't match (%d vs. %d)", i, len(test.result), len(res))
+
+		sort.Slice(filteredRes, func(i, j int) bool {
+			return labels.Compare(filteredRes[i].Metric, filteredRes[j].Metric) < 0
+		})
+		testutil.Equals(t, test.result, filteredRes)
+
+		for _, aa := range rule.ActiveAlerts() {
+			testutil.Assert(t, aa.Labels.Get(model.MetricNameLabel) == "", "%s label set on active alert: %s", model.MetricNameLabel, aa.Labels)
+		}
+
+	}
+}
+
+// sortAlerts sorts `[]*Alert` w.r.t. the Labels.
+func sortAlerts(items []*Alert) {
+	sort.Slice(items, func(i, j int) bool {
+		return labels.Compare(items[i].Labels, items[j].Labels) <= 0
+	})
+}
+
+func TestForStateRestore(t *testing.T) {
+	suite, err := promql.NewTest(t, `
+		load 5m
+		http_requests{job="app-server", instance="0", group="canary", severity="overwrite-me"}	75  85 50 0 0 25 0 0 40 0 120
+		http_requests{job="app-server", instance="1", group="canary", severity="overwrite-me"}	125 90 60 0 0 25 0 0 40 0 130
+	`)
+	testutil.Ok(t, err)
+	defer suite.Close()
+
+	err = suite.Run()
+	testutil.Ok(t, err)
+
+	expr, err := parser.ParseExpr(`http_requests{group="canary", job="app-server"} < 100`)
+	testutil.Ok(t, err)
+
+	opts := &ManagerOptions{
+		QueryFunc:       EngineQueryFunc(suite.QueryEngine(), suite.Storage()),
+		Appendable:      AppendableAdapter{suite.Storage()},
+		Context:         context.Background(),
+		Logger:          log.NewNopLogger(),
+		NotifyFunc:      func(ctx context.Context, expr string, alerts ...*Alert) {},
+		OutageTolerance: 30 * time.Minute,
+		ForGracePeriod:  10 * time.Minute,
+	}
+	opts.AlertHistory = NewMetricsHistory(suite.Storage(), opts)
+
+	alertForDuration := 25 * time.Minute
+	// Initial run before prometheus goes down.
+	rule := NewAlertingRule(
+		"HTTPRequestRateLow",
+		expr,
+		alertForDuration,
+		labels.FromStrings("severity", "critical"),
+		nil, nil, true, nil,
+	)
+
+	group := NewGroup("default", "", time.Second, []Rule{rule}, true, opts)
+	groups := make(map[string]*Group)
+	groups["default;"] = group
+
+	initialRuns := []time.Duration{0, 5 * time.Minute}
+
+	baseTime := time.Unix(0, 0)
+	for _, duration := range initialRuns {
+		evalTime := baseTime.Add(duration)
+		group.Eval(suite.Context(), evalTime)
+	}
+
+	exp := rule.ActiveAlerts()
+	for _, aa := range exp {
+		testutil.Assert(t, aa.Labels.Get(model.MetricNameLabel) == "", "%s label set on active alert: %s", model.MetricNameLabel, aa.Labels)
+	}
+	sort.Slice(exp, func(i, j int) bool {
+		return labels.Compare(exp[i].Labels, exp[j].Labels) < 0
+	})
+
+	// Prometheus goes down here. We create new rules and groups.
+	type testInput struct {
+		restoreDuration time.Duration
+		alerts          []*Alert
+
+		num          int
+		noRestore    bool
+		gracePeriod  bool
+		downDuration time.Duration
+	}
+
+	tests := []testInput{
+		{
+			// Normal restore (alerts were not firing).
+			restoreDuration: 15 * time.Minute,
+			alerts:          rule.ActiveAlerts(),
+			downDuration:    10 * time.Minute,
+		},
+		{
+			// Testing Outage Tolerance.
+			restoreDuration: 40 * time.Minute,
+			noRestore:       true,
+			num:             2,
+		},
+		{
+			// No active alerts.
+			restoreDuration: 50 * time.Minute,
+			alerts:          []*Alert{},
+		},
+	}
+
+	testFunc := func(tst testInput) {
+		newRule := NewAlertingRule(
+			"HTTPRequestRateLow",
+			expr,
+			alertForDuration,
+			labels.FromStrings("severity", "critical"),
+			nil, nil, false, nil,
+		)
+		newGroup := NewGroup("default", "", time.Second, []Rule{newRule}, true, opts)
+
+		newGroups := make(map[string]*Group)
+		newGroups["default;"] = newGroup
+
+		restoreTime := baseTime.Add(tst.restoreDuration)
+		// First eval before restoration.
+		newGroup.Eval(suite.Context(), restoreTime)
+		// Restore happens here.
+		newGroup.RestoreForState(restoreTime)
+
+		got := newRule.ActiveAlerts()
+		for _, aa := range got {
+			testutil.Assert(t, aa.Labels.Get(model.MetricNameLabel) == "", "%s label set on active alert: %s", model.MetricNameLabel, aa.Labels)
+		}
+		sort.Slice(got, func(i, j int) bool {
+			return labels.Compare(got[i].Labels, got[j].Labels) < 0
+		})
+
+		// Checking if we have restored it correctly.
+		if tst.noRestore {
+			testutil.Equals(t, tst.num, len(got))
+			for _, e := range got {
+				testutil.Equals(t, e.ActiveAt, restoreTime)
+			}
+		} else if tst.gracePeriod {
+			testutil.Equals(t, tst.num, len(got))
+			for _, e := range got {
+				testutil.Equals(t, opts.ForGracePeriod, e.ActiveAt.Add(alertForDuration).Sub(restoreTime))
+			}
+		} else {
+			exp := tst.alerts
+			testutil.Equals(t, len(exp), len(got))
+			sortAlerts(exp)
+			sortAlerts(got)
+			for i, e := range exp {
+				testutil.Equals(t, e.Labels, got[i].Labels)
+
+				// Difference in time should be within 1e6 ns, i.e. 1ms
+				// (due to conversion between ns & ms, float64 & int64).
+				activeAtDiff := float64(e.ActiveAt.Unix() + int64(tst.downDuration/time.Second) - got[i].ActiveAt.Unix())
+				testutil.Assert(t, math.Abs(activeAtDiff) == 0, "'for' state restored time is wrong")
+			}
+		}
+	}
+
+	for _, tst := range tests {
+		testFunc(tst)
+	}
+
+	// Testing the grace period.
+	for _, duration := range []time.Duration{10 * time.Minute, 15 * time.Minute, 20 * time.Minute} {
+		evalTime := baseTime.Add(duration)
+		group.Eval(suite.Context(), evalTime)
+	}
+	testFunc(testInput{
+		restoreDuration: 25 * time.Minute,
+		alerts:          []*Alert{},
+		gracePeriod:     true,
+		num:             2,
+	})
+}
+
+func TestStaleness(t *testing.T) {
+	storage := teststorage.New(t)
+	defer storage.Close()
+	engineOpts := promql.EngineOpts{
+		Logger:     nil,
+		Reg:        nil,
+		MaxSamples: 10,
+		Timeout:    10 * time.Second,
+	}
+	engine := promql.NewEngine(engineOpts)
+	opts := &ManagerOptions{
+		QueryFunc:  EngineQueryFunc(engine, storage),
+		Appendable: AppendableAdapter{storage},
+		Context:    context.Background(),
+		Logger:     log.NewNopLogger(),
+	}
+	opts.AlertHistory = NewMetricsHistory(storage, opts)
+
+	expr, err := parser.ParseExpr("a + 1")
+	testutil.Ok(t, err)
+	rule := NewRecordingRule("a_plus_one", expr, labels.Labels{})
+	group := NewGroup("default", "", time.Second, []Rule{rule}, true, opts)
+
+	// A time series that has two samples and then goes stale.
+	app := storage.Appender()
+	app.Add(labels.FromStrings(model.MetricNameLabel, "a"), 0, 1)
+	app.Add(labels.FromStrings(model.MetricNameLabel, "a"), 1000, 2)
+	app.Add(labels.FromStrings(model.MetricNameLabel, "a"), 2000, math.Float64frombits(value.StaleNaN))
+
+	err = app.Commit()
+	testutil.Ok(t, err)
+
+	ctx := context.Background()
+
+	// Execute 3 times, 1 second apart.
+	group.Eval(ctx, time.Unix(0, 0))
+	group.Eval(ctx, time.Unix(1, 0))
+	group.Eval(ctx, time.Unix(2, 0))
+
+	querier, err := storage.Querier(context.Background(), 0, 2000)
+	testutil.Ok(t, err)
+	defer querier.Close()
+
+	matcher, err := labels.NewMatcher(labels.MatchEqual, model.MetricNameLabel, "a_plus_one")
+	testutil.Ok(t, err)
+
+	set := querier.Select(false, nil, matcher)
+
+	samples, err := readSeriesSet(set)
+	testutil.Ok(t, err)
+
+	metric := labels.FromStrings(model.MetricNameLabel, "a_plus_one").String()
+	metricSample, ok := samples[metric]
+
+	testutil.Assert(t, ok, "Series %s not returned.", metric)
+	testutil.Assert(t, value.IsStaleNaN(metricSample[2].V), "Appended second sample not as expected. Wanted: stale NaN Got: %x", math.Float64bits(metricSample[2].V))
+	metricSample[2].V = 42 // reflect.DeepEqual cannot handle NaN.
+
+	want := map[string][]promql.Point{
+		metric: {{T: 0, V: 2}, {T: 1000, V: 3}, {T: 2000, V: 42}},
+	}
+
+	testutil.Equals(t, want, samples)
+}
+
+// Convert a SeriesSet into a form usable with reflect.DeepEqual.
+func readSeriesSet(ss storage.SeriesSet) (map[string][]promql.Point, error) {
+	result := map[string][]promql.Point{}
+
+	for ss.Next() {
+		series := ss.At()
+
+		points := []promql.Point{}
+		it := series.Iterator()
+		for it.Next() {
+			t, v := it.At()
+			points = append(points, promql.Point{T: t, V: v})
+		}
+
+		name := series.Labels().String()
+		result[name] = points
+	}
+	return result, ss.Err()
+}
+
+func TestCopyState(t *testing.T) {
+	oldGroup := &Group{
+		rules: []Rule{
+			NewAlertingRule("alert", nil, 0, nil, nil, nil, true, nil),
+			NewRecordingRule("rule1", nil, nil),
+			NewRecordingRule("rule2", nil, nil),
+			NewRecordingRule("rule3", nil, labels.Labels{{Name: "l1", Value: "v1"}}),
+			NewRecordingRule("rule3", nil, labels.Labels{{Name: "l1", Value: "v2"}}),
+			NewRecordingRule("rule3", nil, labels.Labels{{Name: "l1", Value: "v3"}}),
+			NewAlertingRule("alert2", nil, 0, labels.Labels{{Name: "l2", Value: "v1"}}, nil, nil, true, nil),
+		},
+		seriesInPreviousEval: []map[string]labels.Labels{
+			{},
+			{},
+			{},
+			{"r3a": labels.Labels{{Name: "l1", Value: "v1"}}},
+			{"r3b": labels.Labels{{Name: "l1", Value: "v2"}}},
+			{"r3c": labels.Labels{{Name: "l1", Value: "v3"}}},
+			{"a2": labels.Labels{{Name: "l2", Value: "v1"}}},
+		},
+		evaluationDuration: time.Second,
+	}
+	oldGroup.rules[0].(*AlertingRule).active[42] = nil
+	newGroup := &Group{
+		rules: []Rule{
+			NewRecordingRule("rule3", nil, labels.Labels{{Name: "l1", Value: "v0"}}),
+			NewRecordingRule("rule3", nil, labels.Labels{{Name: "l1", Value: "v1"}}),
+			NewRecordingRule("rule3", nil, labels.Labels{{Name: "l1", Value: "v2"}}),
+			NewAlertingRule("alert", nil, 0, nil, nil, nil, true, nil),
+			NewRecordingRule("rule1", nil, nil),
+			NewAlertingRule("alert2", nil, 0, labels.Labels{{Name: "l2", Value: "v0"}}, nil, nil, true, nil),
+			NewAlertingRule("alert2", nil, 0, labels.Labels{{Name: "l2", Value: "v1"}}, nil, nil, true, nil),
+			NewRecordingRule("rule4", nil, nil),
+		},
+		seriesInPreviousEval: make([]map[string]labels.Labels, 8),
+	}
+	newGroup.CopyState(oldGroup)
+
+	want := []map[string]labels.Labels{
+		nil,
+		{"r3a": labels.Labels{{Name: "l1", Value: "v1"}}},
+		{"r3b": labels.Labels{{Name: "l1", Value: "v2"}}},
+		{},
+		{},
+		nil,
+		{"a2": labels.Labels{{Name: "l2", Value: "v1"}}},
+		nil,
+	}
+	testutil.Equals(t, want, newGroup.seriesInPreviousEval)
+	testutil.Equals(t, oldGroup.rules[0], newGroup.rules[3])
+	testutil.Equals(t, oldGroup.evaluationDuration, newGroup.evaluationDuration)
+	testutil.Equals(t, []labels.Labels{labels.Labels{{Name: "l1", Value: "v3"}}}, newGroup.staleSeries)
+}
+
+func TestDeletedRuleMarkedStale(t *testing.T) {
+	storage := teststorage.New(t)
+	defer storage.Close()
+	oldGroup := &Group{
+		rules: []Rule{
+			NewRecordingRule("rule1", nil, labels.Labels{{Name: "l1", Value: "v1"}}),
+		},
+		seriesInPreviousEval: []map[string]labels.Labels{
+			{"r1": labels.Labels{{Name: "l1", Value: "v1"}}},
+		},
+	}
+	newGroup := &Group{
+		rules:                []Rule{},
+		seriesInPreviousEval: []map[string]labels.Labels{},
+		opts: &ManagerOptions{
+			Appendable: AppendableAdapter{storage},
+		},
+	}
+	newGroup.CopyState(oldGroup)
+
+	newGroup.Eval(context.Background(), time.Unix(0, 0))
+
+	querier, err := storage.Querier(context.Background(), 0, 2000)
+	testutil.Ok(t, err)
+	defer querier.Close()
+
+	matcher, err := labels.NewMatcher(labels.MatchEqual, "l1", "v1")
+	testutil.Ok(t, err)
+
+	set := querier.Select(false, nil, matcher)
+
+	samples, err := readSeriesSet(set)
+	testutil.Ok(t, err)
+
+	metric := labels.FromStrings("l1", "v1").String()
+	metricSample, ok := samples[metric]
+
+	testutil.Assert(t, ok, "Series %s not returned.", metric)
+	testutil.Assert(t, value.IsStaleNaN(metricSample[0].V), "Appended sample not as expected. Wanted: stale NaN Got: %x", math.Float64bits(metricSample[0].V))
+}
+
+func TestUpdate(t *testing.T) {
+	files := []string{"fixtures/rules.yaml"}
+	expected := map[string]labels.Labels{
+		"test": labels.FromStrings("name", "value"),
+	}
+	storage := teststorage.New(t)
+	defer storage.Close()
+	opts := promql.EngineOpts{
+		Logger:     nil,
+		Reg:        nil,
+		MaxSamples: 10,
+		Timeout:    10 * time.Second,
+	}
+	engine := promql.NewEngine(opts)
+	mo := &ManagerOptions{
+		Appendable: AppendableAdapter{storage},
+		QueryFunc:  EngineQueryFunc(engine, storage),
+		Context:    context.Background(),
+		Logger:     log.NewNopLogger(),
+	}
+	mo.AlertHistory = NewMetricsHistory(storage, mo)
+	ruleManager := NewManager(mo)
+
+	ruleManager.Run()
+	defer ruleManager.Stop()
+
+	err := ruleManager.Update(10*time.Second, files, nil)
+	testutil.Ok(t, err)
+	testutil.Assert(t, len(ruleManager.groups) > 0, "expected non-empty rule groups")
+	ogs := map[string]*Group{}
+	for h, g := range ruleManager.groups {
+		g.seriesInPreviousEval = []map[string]labels.Labels{
+			expected,
+		}
+		ogs[h] = g
+	}
+
+	err = ruleManager.Update(10*time.Second, files, nil)
+	testutil.Ok(t, err)
+	for h, g := range ruleManager.groups {
+		for _, actual := range g.seriesInPreviousEval {
+			testutil.Equals(t, expected, actual)
+		}
+		// Groups are the same because of no updates.
+		testutil.Equals(t, ogs[h], g)
+	}
+
+	// Groups will be recreated if updated.
+	rgs, errs := rulefmt.ParseFile("fixtures/rules.yaml")
+	testutil.Assert(t, len(errs) == 0, "file parsing failures")
+
+	tmpFile, err := ioutil.TempFile("", "rules.test.*.yaml")
+	testutil.Ok(t, err)
+	defer os.Remove(tmpFile.Name())
+	defer tmpFile.Close()
+
+	err = ruleManager.Update(10*time.Second, []string{tmpFile.Name()}, nil)
+	testutil.Ok(t, err)
+
+	for h, g := range ruleManager.groups {
+		ogs[h] = g
+	}
+
+	// Update interval and reload.
+	for i, g := range rgs.Groups {
+		if g.Interval != 0 {
+			rgs.Groups[i].Interval = g.Interval * 2
+		} else {
+			rgs.Groups[i].Interval = model.Duration(10)
+		}
+
+	}
+	reloadAndValidate(rgs, t, tmpFile, ruleManager, expected, ogs)
+
+	// Change group rules and reload.
+	for i, g := range rgs.Groups {
+		for j, r := range g.Rules {
+			rgs.Groups[i].Rules[j].Expr.SetString(fmt.Sprintf("%s * 0", r.Expr.Value))
+		}
+	}
+	reloadAndValidate(rgs, t, tmpFile, ruleManager, expected, ogs)
+}
+
+// ruleGroupsTest for running tests over rules.
+type ruleGroupsTest struct {
+	Groups []ruleGroupTest `yaml:"groups"`
+}
+
+// ruleGroupTest forms a testing struct for running tests over rules.
+type ruleGroupTest struct {
+	Name     string         `yaml:"name"`
+	Interval model.Duration `yaml:"interval,omitempty"`
+	Rules    []rulefmt.Rule `yaml:"rules"`
+}
+
+func formatRules(r *rulefmt.RuleGroups) ruleGroupsTest {
+	grps := r.Groups
+	tmp := []ruleGroupTest{}
+	for _, g := range grps {
+		rtmp := []rulefmt.Rule{}
+		for _, r := range g.Rules {
+			rtmp = append(rtmp, rulefmt.Rule{
+				Record:      r.Record.Value,
+				Alert:       r.Alert.Value,
+				Expr:        r.Expr.Value,
+				For:         r.For,
+				Labels:      r.Labels,
+				Annotations: r.Annotations,
+			})
+		}
+		tmp = append(tmp, ruleGroupTest{
+			Name:     g.Name,
+			Interval: g.Interval,
+			Rules:    rtmp,
+		})
+	}
+	return ruleGroupsTest{
+		Groups: tmp,
+	}
+}
+
+func reloadAndValidate(rgs *rulefmt.RuleGroups, t *testing.T, tmpFile *os.File, ruleManager *Manager, expected map[string]labels.Labels, ogs map[string]*Group) {
+	bs, err := yaml.Marshal(formatRules(rgs))
+	testutil.Ok(t, err)
+	tmpFile.Seek(0, 0)
+	_, err = tmpFile.Write(bs)
+	testutil.Ok(t, err)
+	err = ruleManager.Update(10*time.Second, []string{tmpFile.Name()}, nil)
+	testutil.Ok(t, err)
+	for h, g := range ruleManager.groups {
+		if ogs[h] == g {
+			t.Fail()
+		}
+		ogs[h] = g
+	}
+}
+
+func TestNotify(t *testing.T) {
+	storage := teststorage.New(t)
+	defer storage.Close()
+	engineOpts := promql.EngineOpts{
+		Logger:     nil,
+		Reg:        nil,
+		MaxSamples: 10,
+		Timeout:    10 * time.Second,
+	}
+	engine := promql.NewEngine(engineOpts)
+	var lastNotified []*Alert
+	notifyFunc := func(ctx context.Context, expr string, alerts ...*Alert) {
+		lastNotified = alerts
+	}
+	opts := &ManagerOptions{
+		QueryFunc:   EngineQueryFunc(engine, storage),
+		Appendable:  AppendableAdapter{storage},
+		Context:     context.Background(),
+		Logger:      log.NewNopLogger(),
+		NotifyFunc:  notifyFunc,
+		ResendDelay: 2 * time.Second,
+	}
+	opts.AlertHistory = NewMetricsHistory(storage, opts)
+
+	expr, err := parser.ParseExpr("a > 1")
+	testutil.Ok(t, err)
+	rule := NewAlertingRule("aTooHigh", expr, 0, labels.Labels{}, labels.Labels{}, nil, true, log.NewNopLogger())
+	group := NewGroup("alert", "", time.Second, []Rule{rule}, true, opts)
+
+	app := storage.Appender()
+	app.Add(labels.FromStrings(model.MetricNameLabel, "a"), 1000, 2)
+	app.Add(labels.FromStrings(model.MetricNameLabel, "a"), 2000, 3)
+	app.Add(labels.FromStrings(model.MetricNameLabel, "a"), 5000, 3)
+	app.Add(labels.FromStrings(model.MetricNameLabel, "a"), 6000, 0)
+
+	err = app.Commit()
+	testutil.Ok(t, err)
+
+	ctx := context.Background()
+
+	// Alert sent right away
+	group.Eval(ctx, time.Unix(1, 0))
+	testutil.Equals(t, 1, len(lastNotified))
+	testutil.Assert(t, !lastNotified[0].ValidUntil.IsZero(), "ValidUntil should not be zero")
+
+	// Alert is not sent 1s later
+	group.Eval(ctx, time.Unix(2, 0))
+	testutil.Equals(t, 0, len(lastNotified))
+
+	// Alert is resent at t=5s
+	group.Eval(ctx, time.Unix(5, 0))
+	testutil.Equals(t, 1, len(lastNotified))
+
+	// Resolution alert sent right away
+	group.Eval(ctx, time.Unix(6, 0))
+	testutil.Equals(t, 1, len(lastNotified))
+}
+
+func TestMetricsUpdate(t *testing.T) {
+	files := []string{"fixtures/rules.yaml", "fixtures/rules2.yaml"}
+	metricNames := []string{
+		"prometheus_rule_group_interval_seconds",
+		"prometheus_rule_group_last_duration_seconds",
+		"prometheus_rule_group_last_evaluation_timestamp_seconds",
+		"prometheus_rule_group_rules",
+	}
+
+	storage := teststorage.New(t)
+	registry := prometheus.NewRegistry()
+	defer storage.Close()
+	opts := promql.EngineOpts{
+		Logger:     nil,
+		Reg:        nil,
+		MaxSamples: 10,
+		Timeout:    10 * time.Second,
+	}
+	engine := promql.NewEngine(opts)
+	mo := &ManagerOptions{
+		Appendable: AppendableAdapter{storage},
+		QueryFunc:  EngineQueryFunc(engine, storage),
+		Context:    context.Background(),
+		Logger:     log.NewNopLogger(),
+		Registerer: registry,
+	}
+	mo.AlertHistory = NewMetricsHistory(storage, mo)
+	ruleManager := NewManager(mo)
+	ruleManager.Run()
+	defer ruleManager.Stop()
+
+	countMetrics := func() int {
+		ms, err := registry.Gather()
+		testutil.Ok(t, err)
+		var metrics int
+		for _, m := range ms {
+			s := m.GetName()
+			for _, n := range metricNames {
+				if s == n {
+					metrics += len(m.Metric)
+					break
+				}
+			}
+		}
+		return metrics
+	}
+
+	cases := []struct {
+		files   []string
+		metrics int
+	}{
+		{
+			files:   files,
+			metrics: 8,
+		},
+		{
+			files:   files[:1],
+			metrics: 4,
+		},
+		{
+			files:   files[:0],
+			metrics: 0,
+		},
+		{
+			files:   files[1:],
+			metrics: 4,
+		},
+	}
+
+	for i, c := range cases {
+		err := ruleManager.Update(time.Second, c.files, nil)
+		testutil.Ok(t, err)
+		time.Sleep(2 * time.Second)
+		testutil.Equals(t, c.metrics, countMetrics(), "test %d: invalid count of metrics", i)
+	}
+}
diff --git a/pkg/ruler/rules/query.go b/pkg/ruler/rules/query.go
new file mode 100644
index 0000000000000..279239955be32
--- /dev/null
+++ b/pkg/ruler/rules/query.go
@@ -0,0 +1,46 @@
+package rules
+
+import (
+	"context"
+	"time"
+
+	"github.com/grafana/loki/pkg/logproto"
+	"github.com/grafana/loki/pkg/logql"
+	"github.com/pkg/errors"
+	"github.com/prometheus/prometheus/pkg/labels"
+	"github.com/prometheus/prometheus/promql"
+	"github.com/prometheus/prometheus/rules"
+)
+
+func LokiDelayedQueryFunc(engine *logql.Engine, delay time.Duration) rules.QueryFunc {
+	return rules.QueryFunc(func(ctx context.Context, qs string, t time.Time) (promql.Vector, error) {
+		adjusted := t.Add(-delay)
+		params := logql.NewLiteralParams(
+			qs,
+			adjusted,
+			adjusted,
+			0,
+			0,
+			logproto.FORWARD,
+			0,
+			nil,
+		)
+		q := engine.Query(params)
+
+		res, err := q.Exec(ctx)
+		if err != nil {
+			return nil, err
+		}
+		switch v := res.Data.(type) {
+		case promql.Vector:
+			return v, nil
+		case promql.Scalar:
+			return promql.Vector{promql.Sample{
+				Point:  promql.Point(v),
+				Metric: labels.Labels{},
+			}}, nil
+		default:
+			return nil, errors.New("rule result is not a vector or scalar")
+		}
+	})
+}

From 2d9ce8b24bde9a9fee10a78b72d955d28c538731 Mon Sep 17 00:00:00 2001
From: Owen Diehl 
Date: Wed, 8 Jul 2020 17:28:32 -0400
Subject: [PATCH 21/40] memstore work

---
 pkg/ruler/compat.go                |   2 +-
 pkg/ruler/manager/appender.go      | 100 ++++++++++++
 pkg/ruler/manager/memstore.go      | 248 +++++++++++++++++++++++++++++
 pkg/ruler/manager/memstore_test.go | 220 +++++++++++++++++++++++++
 pkg/ruler/memhistory.go            |   2 +-
 5 files changed, 570 insertions(+), 2 deletions(-)
 create mode 100644 pkg/ruler/manager/appender.go
 create mode 100644 pkg/ruler/manager/memstore.go
 create mode 100644 pkg/ruler/manager/memstore_test.go

diff --git a/pkg/ruler/compat.go b/pkg/ruler/compat.go
index 6cb285fe46968..c367cc9d7374f 100644
--- a/pkg/ruler/compat.go
+++ b/pkg/ruler/compat.go
@@ -4,9 +4,9 @@ import (
 	"errors"
 	"time"
 
-	"github.com/cortexproject/cortex/pkg/ruler/rules"
 	"github.com/prometheus/client_golang/prometheus"
 	"github.com/prometheus/prometheus/pkg/labels"
+	"github.com/prometheus/prometheus/rules"
 	"github.com/prometheus/prometheus/storage"
 )
 
diff --git a/pkg/ruler/manager/appender.go b/pkg/ruler/manager/appender.go
new file mode 100644
index 0000000000000..1c791c6ddcc2d
--- /dev/null
+++ b/pkg/ruler/manager/appender.go
@@ -0,0 +1,100 @@
+package manager
+
+import (
+	"context"
+	"errors"
+
+	"github.com/prometheus/prometheus/pkg/labels"
+	"github.com/prometheus/prometheus/storage"
+)
+
+const AlertForStateMetricName = "ALERTS_FOR_STATE"
+
+func (m *MemStore) Appender() storage.Appender { return m }
+
+// Add implements storage.Appener by filtering only the ALERTS_FOR_STATE series and mapping to a rule-specific appender.
+// This is used when a distinct rule group is loaded to see if it had been firing previously.
+func (m *MemStore) Add(ls labels.Labels, t int64, v float64) (uint64, error) {
+	var name string
+
+	for _, l := range ls {
+		if l.Name == labels.AlertName {
+			name = l.Value
+		}
+		if l.Name == labels.MetricName && l.Value != AlertForStateMetricName {
+			// This is not an ALERTS_FOR_STATE metric, skip
+			return 0, nil
+		}
+	}
+
+	m.mtx.Lock()
+	defer m.mtx.Unlock()
+
+	app, ok := m.appenders[name]
+	if !ok {
+		app = NewForStateAppender(m.metrics)
+		m.appenders[name] = app
+	}
+
+	return app.Add(ls, t, v)
+}
+
+func (m *MemStore) AddFast(ref uint64, t int64, v float64) error {
+	return errors.New("unimplemented")
+}
+
+func (m *MemStore) Commit() error { return nil }
+
+func (m *MemStore) Rollback() error { return nil }
+
+// implement storage.Queryable
+func (m *MemStore) Querier(ctx context.Context, mint, maxt int64) (storage.Querier, error) {
+	return &MemStoreQuerier{
+		mint:     mint,
+		maxt:     maxt,
+		MemStore: m,
+		ctx:      ctx,
+	}, nil
+
+}
+
+type MemStoreQuerier struct {
+	mint, maxt int64
+	ctx        context.Context
+	*MemStore
+}
+
+func (m *MemStoreQuerier) Select(sortSeries bool, params *storage.SelectHints, matchers ...*labels.Matcher) storage.SeriesSet {
+	var ruleKey string
+	for _, matcher := range matchers {
+		if matcher.Name == labels.AlertName && matcher.Type == labels.MatchEqual {
+			ruleKey = matcher.Value
+		}
+	}
+	if ruleKey == "" {
+		return storage.NoopSeriesSet()
+	}
+
+	m.MemStore.mtx.Lock()
+	defer m.MemStore.mtx.Unlock()
+
+	app, ok := m.MemStore.appenders[ruleKey]
+	if !ok {
+		return storage.NoopSeriesSet()
+	}
+
+	return app.Querier(m.ctx, m.mint, m.maxt).Select(sortSeries, params, matchers...)
+}
+
+// LabelValues returns all potential values for a label name.
+func (*MemStoreQuerier) LabelValues(name string) ([]string, storage.Warnings, error) {
+	return nil, nil, errors.New("unimplemented")
+}
+
+// LabelNames returns all the unique label names present in the block in sorted order.
+func (*MemStoreQuerier) LabelNames() ([]string, storage.Warnings, error) {
+	return nil, nil, errors.New("unimplemented")
+}
+
+// Close releases the resources of the Querier.
+func (*MemStoreQuerier) Close() error { return nil }
diff --git a/pkg/ruler/manager/memstore.go b/pkg/ruler/manager/memstore.go
new file mode 100644
index 0000000000000..004c1786f9efa
--- /dev/null
+++ b/pkg/ruler/manager/memstore.go
@@ -0,0 +1,248 @@
+package manager
+
+import (
+	"context"
+	"errors"
+	"sync"
+	"time"
+
+	"github.com/cortexproject/cortex/pkg/querier/series"
+	"github.com/prometheus/client_golang/prometheus"
+	"github.com/prometheus/client_golang/prometheus/promauto"
+	"github.com/prometheus/common/model"
+	"github.com/prometheus/prometheus/pkg/labels"
+	"github.com/prometheus/prometheus/rules"
+	"github.com/prometheus/prometheus/storage"
+)
+
+type Metrics struct {
+	Series  prometheus.Gauge // in memory series
+	Samples prometheus.Gauge // in memory samples
+}
+
+func NewMetrics(r prometheus.Registerer) *Metrics {
+	return &Metrics{
+		Series: promauto.With(r).NewGauge(prometheus.GaugeOpts{
+			Namespace: "loki",
+			Name:      "ruler_memory_series",
+		}),
+		Samples: promauto.With(r).NewGauge(prometheus.GaugeOpts{
+			Namespace: "loki",
+			Name:      "ruler_memory_samples",
+		}),
+	}
+}
+
+type MemStore struct {
+	mtx       sync.Mutex
+	queryFunc rules.QueryFunc
+	appenders map[string]*ForStateAppender
+	metrics   *Metrics
+	mgr       *rules.Manager
+
+	cleanupInterval time.Duration
+	done            chan struct{}
+}
+
+func NewMemStore(mgr *rules.Manager, cleanupInterval time.Duration, metrics *Metrics) *MemStore {
+	s := &MemStore{
+		mgr:       mgr,
+		appenders: make(map[string]*ForStateAppender),
+		metrics:   metrics,
+
+		cleanupInterval: cleanupInterval,
+		done:            make(chan struct{}),
+	}
+	go s.run()
+	return s
+}
+
+func (m *MemStore) Stop() {
+	// Need to nil all series & decrement gauges
+	m.mtx.Lock()
+	defer m.mtx.Unlock()
+
+	select {
+	// ensures Stop() is idempotent
+	case <-m.done:
+		return
+	default:
+		for ruleKey, app := range m.appenders {
+			// Force cleanup of all samples older than time.Now (all of them).
+			_ = app.CleanupOldSamples(0)
+			delete(m.appenders, ruleKey)
+		}
+		close(m.done)
+	}
+}
+
+// run periodically cleans up old series/samples to ensure memory consumption doesn't grow unbounded.
+func (m *MemStore) run() {
+	t := time.NewTicker(m.cleanupInterval)
+	for {
+		select {
+		case <-m.done:
+			t.Stop()
+			return
+		case <-t.C:
+			m.mtx.Lock()
+			holdDurs := make(map[string]time.Duration)
+			for _, rule := range m.mgr.AlertingRules() {
+				holdDurs[rule.Name()] = rule.HoldDuration()
+			}
+
+			for ruleKey, app := range m.appenders {
+				dur, ok := holdDurs[ruleKey]
+
+				// rule is no longer being tracked, remove it
+				if !ok {
+					_ = app.CleanupOldSamples(0)
+					delete(m.appenders, ruleKey)
+					continue
+				}
+
+				// trim older samples out of tracking bounds, doubled to buffer.
+				if rem := app.CleanupOldSamples(2 * dur); rem == 0 {
+					delete(m.appenders, ruleKey)
+				}
+
+			}
+
+			m.mtx.Unlock()
+		}
+	}
+}
+
+type ForStateAppender struct {
+	mtx     sync.Mutex
+	metrics *Metrics
+	data    map[uint64]*series.ConcreteSeries
+}
+
+func NewForStateAppender(metrics *Metrics) *ForStateAppender {
+	return &ForStateAppender{
+		data:    make(map[uint64]*series.ConcreteSeries),
+		metrics: metrics,
+	}
+}
+
+func (m *ForStateAppender) Add(ls labels.Labels, t int64, v float64) (uint64, error) {
+	for _, l := range ls {
+		if l.Name == labels.MetricName && l.Value != AlertForStateMetricName {
+			// This is not an ALERTS_FOR_STATE metric, skip
+			return 0, nil
+		}
+	}
+
+	m.mtx.Lock()
+	defer m.mtx.Unlock()
+
+	fp := ls.Hash()
+
+	if s, ok := m.data[fp]; ok {
+		priorLn := s.Len()
+		s.Add(model.SamplePair{
+			Timestamp: model.Time(t),
+			Value:     model.SampleValue(v),
+		})
+		m.metrics.Samples.Add(float64(s.Len() - priorLn))
+
+		return 0, nil
+	}
+	m.data[fp] = series.NewConcreteSeries(ls, []model.SamplePair{{Timestamp: model.Time(t), Value: model.SampleValue(v)}})
+	m.metrics.Series.Inc()
+	m.metrics.Samples.Inc()
+	return 0, nil
+
+}
+
+// CleanupOldSamples removes samples that are outside of the rule's `For` duration.
+func (m *ForStateAppender) CleanupOldSamples(lookback time.Duration) (seriesRemaining int) {
+	m.mtx.Lock()
+	defer m.mtx.Unlock()
+
+	for fp, s := range m.data {
+		// release all older references that are no longer needed.
+		priorLn := s.Len()
+		s.TrimStart(time.Now().Add(-lookback))
+		m.metrics.Samples.Add(float64(s.Len() - priorLn))
+		if s.Len() == 0 {
+			m.metrics.Series.Dec()
+			delete(m.data, fp)
+		}
+	}
+
+	return len(m.data)
+
+}
+
+func (m *ForStateAppender) AddFast(ref uint64, t int64, v float64) error {
+	return errors.New("unimplemented")
+}
+
+func (m *ForStateAppender) Commit() error { return nil }
+
+func (m *ForStateAppender) Rollback() error { return nil }
+
+// implement storage.Queryable
+func (m *ForStateAppender) Querier(ctx context.Context, mint, maxt int64) ForStateAppenderQuerier {
+	return ForStateAppenderQuerier{
+		mint:             mint,
+		maxt:             maxt,
+		ForStateAppender: m,
+	}
+}
+
+// ForStateAppenderQuerier wraps a *ForStateAppender and implements storage.Querier
+type ForStateAppenderQuerier struct {
+	mint, maxt int64
+	*ForStateAppender
+}
+
+// Select returns a set of series that matches the given label matchers.
+func (q ForStateAppenderQuerier) Select(sortSeries bool, params *storage.SelectHints, matchers ...*labels.Matcher) storage.SeriesSet {
+	// TODO: implement sorted selects (currently unused/ignored).
+	q.mtx.Lock()
+	defer q.mtx.Unlock()
+
+	seekTo := q.mint
+	if params != nil && seekTo < params.Start {
+		seekTo = params.Start
+	}
+
+	maxt := q.maxt
+	if params != nil && params.End < maxt {
+		maxt = params.End
+	}
+
+	var filtered []storage.Series
+outer:
+	for _, s := range q.data {
+		for _, matcher := range matchers {
+			if !matcher.Matches(s.Labels().Get(matcher.Name)) {
+				continue outer
+			}
+
+			iter := s.Iterator()
+			var samples []model.SamplePair
+			for ok := iter.Seek(seekTo); ok; ok = iter.Next() {
+				t, v := iter.At()
+				if t > maxt {
+					break
+				}
+
+				samples = append(samples, model.SamplePair{
+					Timestamp: model.Time(t),
+					Value:     model.SampleValue(v),
+				})
+
+			}
+
+			if len(samples) != 0 {
+				filtered = append(filtered, series.NewConcreteSeries(s.Labels(), samples))
+			}
+		}
+	}
+
+	return series.NewConcreteSeriesSet(filtered)
+}
diff --git a/pkg/ruler/manager/memstore_test.go b/pkg/ruler/manager/memstore_test.go
new file mode 100644
index 0000000000000..3c9b8b00c115e
--- /dev/null
+++ b/pkg/ruler/manager/memstore_test.go
@@ -0,0 +1,220 @@
+package manager
+
+import (
+	"context"
+	"errors"
+	"fmt"
+	"testing"
+	"time"
+
+	"github.com/cortexproject/cortex/pkg/querier/series"
+	"github.com/cortexproject/cortex/pkg/util"
+	"github.com/go-kit/kit/log"
+	"github.com/prometheus/common/model"
+	"github.com/prometheus/prometheus/pkg/labels"
+	"github.com/prometheus/prometheus/promql/parser"
+	"github.com/prometheus/prometheus/rules"
+	"github.com/prometheus/prometheus/storage"
+	"github.com/stretchr/testify/require"
+)
+
+type NoopAppender struct{}
+
+func (a NoopAppender) Appender() (storage.Appender, error)                     { return a, nil }
+func (a NoopAppender) Add(l labels.Labels, t int64, v float64) (uint64, error) { return 0, nil }
+func (a NoopAppender) AddFast(ref uint64, t int64, v float64) error {
+	return errors.New("unimplemented")
+}
+func (a NoopAppender) Commit() error   { return nil }
+func (a NoopAppender) Rollback() error { return nil }
+
+func TestMemStoreStop(t *testing.T) {
+	hist := NewMemStore(&rules.Manager{}, time.Millisecond, NewMetrics(nil))
+	<-time.After(2 * time.Millisecond) // allow it to start ticking (not strictly required for this test)
+	hist.Stop()
+	// ensure idempotency
+	hist.Stop()
+
+	// ensure ticker is cleaned up
+	select {
+	case <-time.After(10 * time.Millisecond):
+		t.Fatalf("done channel not closed")
+	case <-hist.done:
+	}
+}
+
+func mustParseLabels(s string) labels.Labels {
+	labels, err := parser.ParseMetric(s)
+	if err != nil {
+		panic(fmt.Sprintf("failed to parse %s", s))
+	}
+
+	return labels
+}
+
+func newRule(name, qry, ls string, forDur time.Duration) *rules.AlertingRule {
+	return rules.NewAlertingRule(name, &parser.StringLiteral{Val: qry}, forDur, mustParseLabels(ls), nil, nil, false, log.NewNopLogger())
+}
+
+func TestForStateAppenderAdd(t *testing.T) {
+	app := NewForStateAppender(NewMetrics(nil))
+	require.Equal(t, map[uint64]*series.ConcreteSeries{}, app.data)
+
+	// create first series
+	first := mustParseLabels(`{foo="bar", bazz="buzz", __name__="ALERTS_FOR_STATE"}`)
+	_, err := app.Add(first, 1, 1)
+	require.Nil(t, err)
+	require.Equal(t, map[uint64]*series.ConcreteSeries{
+		first.Hash(): series.NewConcreteSeries(
+			first, []model.SamplePair{{Timestamp: model.Time(1), Value: model.SampleValue(1)}},
+		),
+	}, app.data)
+
+	// create second series
+	second := mustParseLabels(`{foo="bar", bazz="barf", __name__="ALERTS_FOR_STATE"}`)
+	_, err = app.Add(second, 1, 1)
+	require.Nil(t, err)
+
+	require.Equal(t, map[uint64]*series.ConcreteSeries{
+		first.Hash(): series.NewConcreteSeries(
+			first, []model.SamplePair{{Timestamp: model.Time(1), Value: model.SampleValue(1)}},
+		),
+		second.Hash(): series.NewConcreteSeries(
+			second, []model.SamplePair{{Timestamp: model.Time(1), Value: model.SampleValue(1)}},
+		),
+	}, app.data)
+
+	// append first series
+	_, err = app.Add(first, 3, 3)
+	require.Nil(t, err)
+
+	require.Equal(t, map[uint64]*series.ConcreteSeries{
+		first.Hash(): series.NewConcreteSeries(
+			first, []model.SamplePair{
+				{Timestamp: model.Time(1), Value: model.SampleValue(1)},
+				{Timestamp: model.Time(3), Value: model.SampleValue(3)},
+			},
+		),
+		second.Hash(): series.NewConcreteSeries(
+			second, []model.SamplePair{{Timestamp: model.Time(1), Value: model.SampleValue(1)}},
+		),
+	}, app.data)
+
+	// insert new points at correct position
+	_, err = app.Add(first, 2, 2)
+	require.Nil(t, err)
+
+	require.Equal(t, map[uint64]*series.ConcreteSeries{
+		first.Hash(): series.NewConcreteSeries(
+			first, []model.SamplePair{
+				{Timestamp: model.Time(1), Value: model.SampleValue(1)},
+				{Timestamp: model.Time(2), Value: model.SampleValue(2)},
+				{Timestamp: model.Time(3), Value: model.SampleValue(3)},
+			},
+		),
+		second.Hash(): series.NewConcreteSeries(
+			second, []model.SamplePair{{Timestamp: model.Time(1), Value: model.SampleValue(1)}},
+		),
+	}, app.data)
+
+	// ignore non ALERTS_FOR_STATE metrics
+	_, err = app.Add(mustParseLabels(`{foo="bar", bazz="barf", __name__="test"}`), 1, 1)
+	require.Nil(t, err)
+
+	require.Equal(t, map[uint64]*series.ConcreteSeries{
+		first.Hash(): series.NewConcreteSeries(
+			first, []model.SamplePair{
+				{Timestamp: model.Time(1), Value: model.SampleValue(1)},
+				{Timestamp: model.Time(2), Value: model.SampleValue(2)},
+				{Timestamp: model.Time(3), Value: model.SampleValue(3)},
+			},
+		),
+		second.Hash(): series.NewConcreteSeries(
+			second, []model.SamplePair{{Timestamp: model.Time(1), Value: model.SampleValue(1)}},
+		),
+	}, app.data)
+}
+
+func TestForStateAppenderCleanup(t *testing.T) {
+	app := NewForStateAppender(NewMetrics(nil))
+	now := time.Now()
+
+	// create ls series
+	ls := mustParseLabels(`{foo="bar", bazz="buzz", __name__="ALERTS_FOR_STATE"}`)
+	_, err := app.Add(ls, util.TimeToMillis(now.Add(-3*time.Minute)), 1)
+	require.Nil(t, err)
+	_, err = app.Add(ls, util.TimeToMillis(now.Add(-1*time.Minute)), 2)
+	require.Nil(t, err)
+
+	rem := app.CleanupOldSamples(time.Minute)
+	require.Equal(t, 1, rem)
+
+	require.Equal(t, map[uint64]*series.ConcreteSeries{
+		ls.Hash(): series.NewConcreteSeries(
+			ls, []model.SamplePair{
+				{Timestamp: model.Time(util.TimeToMillis(now.Add(-1 * time.Minute))), Value: model.SampleValue(2)},
+			},
+		),
+	}, app.data)
+
+}
+
+func TestForStateAppenderQuerier(t *testing.T) {
+	app := NewForStateAppender(NewMetrics(nil))
+	now := time.Now()
+
+	// create ls series
+	ls := mustParseLabels(`{foo="bar", bazz="buzz", __name__="ALERTS_FOR_STATE"}`)
+	_, err := app.Add(ls, util.TimeToMillis(now.Add(-3*time.Minute)), 1)
+	require.Nil(t, err)
+	_, err = app.Add(ls, util.TimeToMillis(now.Add(-1*time.Minute)), 2)
+	require.Nil(t, err)
+	_, err = app.Add(ls, util.TimeToMillis(now.Add(1*time.Minute)), 3)
+	require.Nil(t, err)
+
+	// never included due to bounds
+	_, err = app.Add(mustParseLabels(`{foo="bar", bazz="blip", __name__="ALERTS_FOR_STATE"}`), util.TimeToMillis(now.Add(-2*time.Hour)), 3)
+	require.Nil(t, err)
+
+	// should succeed with nil selecthints
+	q := app.Querier(context.Background(), util.TimeToMillis(now.Add(-2*time.Minute)), util.TimeToMillis(now))
+
+	set := q.Select(
+		false,
+		nil,
+		labels.MustNewMatcher(labels.MatchEqual, labels.MetricName, AlertForStateMetricName),
+	)
+	require.Equal(
+		t,
+		series.NewConcreteSeriesSet(
+			[]storage.Series{
+				series.NewConcreteSeries(ls, []model.SamplePair{
+					{Timestamp: model.Time(util.TimeToMillis(now.Add(-1 * time.Minute))), Value: model.SampleValue(2)},
+				}),
+			},
+		),
+		set,
+	)
+
+	// // should be able to minimize selection window via hints
+	q = app.Querier(context.Background(), util.TimeToMillis(now.Add(-time.Hour)), util.TimeToMillis(now.Add(time.Hour)))
+	set2 := q.Select(
+		false,
+		&storage.SelectHints{
+			Start: util.TimeToMillis(now.Add(-2 * time.Minute)),
+			End:   util.TimeToMillis(now),
+		},
+		labels.MustNewMatcher(labels.MatchEqual, labels.MetricName, AlertForStateMetricName),
+	)
+	require.Equal(
+		t,
+		series.NewConcreteSeriesSet(
+			[]storage.Series{
+				series.NewConcreteSeries(ls, []model.SamplePair{
+					{Timestamp: model.Time(util.TimeToMillis(now.Add(-1 * time.Minute))), Value: model.SampleValue(2)},
+				}),
+			},
+		),
+		set2,
+	)
+}
diff --git a/pkg/ruler/memhistory.go b/pkg/ruler/memhistory.go
index 1321fdf1fdb20..47dd1f474e203 100644
--- a/pkg/ruler/memhistory.go
+++ b/pkg/ruler/memhistory.go
@@ -7,13 +7,13 @@ import (
 	"time"
 
 	"github.com/cortexproject/cortex/pkg/querier/series"
-	"github.com/cortexproject/cortex/pkg/ruler/rules"
 	"github.com/cortexproject/cortex/pkg/util"
 	"github.com/go-kit/kit/log/level"
 	"github.com/prometheus/client_golang/prometheus"
 	"github.com/prometheus/client_golang/prometheus/promauto"
 	"github.com/prometheus/common/model"
 	"github.com/prometheus/prometheus/pkg/labels"
+	"github.com/prometheus/prometheus/rules"
 	"github.com/prometheus/prometheus/storage"
 )
 

From e825231a41cfc680cb660b767ea7e0500168d750 Mon Sep 17 00:00:00 2001
From: Owen Diehl 
Date: Wed, 8 Jul 2020 21:45:52 -0400
Subject: [PATCH 22/40] work on queryable based in memory series store

---
 pkg/ruler/manager/appender.go      | 100 -------
 pkg/ruler/manager/memstore.go      | 323 +++++++++++++++--------
 pkg/ruler/manager/memstore_test.go | 408 ++++++++++++++---------------
 3 files changed, 399 insertions(+), 432 deletions(-)
 delete mode 100644 pkg/ruler/manager/appender.go

diff --git a/pkg/ruler/manager/appender.go b/pkg/ruler/manager/appender.go
deleted file mode 100644
index 1c791c6ddcc2d..0000000000000
--- a/pkg/ruler/manager/appender.go
+++ /dev/null
@@ -1,100 +0,0 @@
-package manager
-
-import (
-	"context"
-	"errors"
-
-	"github.com/prometheus/prometheus/pkg/labels"
-	"github.com/prometheus/prometheus/storage"
-)
-
-const AlertForStateMetricName = "ALERTS_FOR_STATE"
-
-func (m *MemStore) Appender() storage.Appender { return m }
-
-// Add implements storage.Appener by filtering only the ALERTS_FOR_STATE series and mapping to a rule-specific appender.
-// This is used when a distinct rule group is loaded to see if it had been firing previously.
-func (m *MemStore) Add(ls labels.Labels, t int64, v float64) (uint64, error) {
-	var name string
-
-	for _, l := range ls {
-		if l.Name == labels.AlertName {
-			name = l.Value
-		}
-		if l.Name == labels.MetricName && l.Value != AlertForStateMetricName {
-			// This is not an ALERTS_FOR_STATE metric, skip
-			return 0, nil
-		}
-	}
-
-	m.mtx.Lock()
-	defer m.mtx.Unlock()
-
-	app, ok := m.appenders[name]
-	if !ok {
-		app = NewForStateAppender(m.metrics)
-		m.appenders[name] = app
-	}
-
-	return app.Add(ls, t, v)
-}
-
-func (m *MemStore) AddFast(ref uint64, t int64, v float64) error {
-	return errors.New("unimplemented")
-}
-
-func (m *MemStore) Commit() error { return nil }
-
-func (m *MemStore) Rollback() error { return nil }
-
-// implement storage.Queryable
-func (m *MemStore) Querier(ctx context.Context, mint, maxt int64) (storage.Querier, error) {
-	return &MemStoreQuerier{
-		mint:     mint,
-		maxt:     maxt,
-		MemStore: m,
-		ctx:      ctx,
-	}, nil
-
-}
-
-type MemStoreQuerier struct {
-	mint, maxt int64
-	ctx        context.Context
-	*MemStore
-}
-
-func (m *MemStoreQuerier) Select(sortSeries bool, params *storage.SelectHints, matchers ...*labels.Matcher) storage.SeriesSet {
-	var ruleKey string
-	for _, matcher := range matchers {
-		if matcher.Name == labels.AlertName && matcher.Type == labels.MatchEqual {
-			ruleKey = matcher.Value
-		}
-	}
-	if ruleKey == "" {
-		return storage.NoopSeriesSet()
-	}
-
-	m.MemStore.mtx.Lock()
-	defer m.MemStore.mtx.Unlock()
-
-	app, ok := m.MemStore.appenders[ruleKey]
-	if !ok {
-		return storage.NoopSeriesSet()
-	}
-
-	return app.Querier(m.ctx, m.mint, m.maxt).Select(sortSeries, params, matchers...)
-}
-
-// LabelValues returns all potential values for a label name.
-func (*MemStoreQuerier) LabelValues(name string) ([]string, storage.Warnings, error) {
-	return nil, nil, errors.New("unimplemented")
-}
-
-// LabelNames returns all the unique label names present in the block in sorted order.
-func (*MemStoreQuerier) LabelNames() ([]string, storage.Warnings, error) {
-	return nil, nil, errors.New("unimplemented")
-}
-
-// Close releases the resources of the Querier.
-func (*MemStoreQuerier) Close() error { return nil }
diff --git a/pkg/ruler/manager/memstore.go b/pkg/ruler/manager/memstore.go
index 004c1786f9efa..7d0d0715b2564 100644
--- a/pkg/ruler/manager/memstore.go
+++ b/pkg/ruler/manager/memstore.go
@@ -7,25 +7,46 @@ import (
 	"time"
 
 	"github.com/cortexproject/cortex/pkg/querier/series"
+	"github.com/cortexproject/cortex/pkg/util"
+	"github.com/go-kit/kit/log"
+	"github.com/go-kit/kit/log/level"
 	"github.com/prometheus/client_golang/prometheus"
 	"github.com/prometheus/client_golang/prometheus/promauto"
 	"github.com/prometheus/common/model"
 	"github.com/prometheus/prometheus/pkg/labels"
+	"github.com/prometheus/prometheus/promql"
 	"github.com/prometheus/prometheus/rules"
 	"github.com/prometheus/prometheus/storage"
 )
 
+const (
+	AlertForStateMetricName = "ALERTS_FOR_STATE"
+	statusSuccess           = "success"
+	statusFailure           = "failure"
+)
+
+type NoopAppender struct{}
+
+func (a NoopAppender) Appender() (storage.Appender, error)                     { return a, nil }
+func (a NoopAppender) Add(l labels.Labels, t int64, v float64) (uint64, error) { return 0, nil }
+func (a NoopAppender) AddFast(ref uint64, t int64, v float64) error {
+	return errors.New("unimplemented")
+}
+func (a NoopAppender) Commit() error   { return nil }
+func (a NoopAppender) Rollback() error { return nil }
+
 type Metrics struct {
-	Series  prometheus.Gauge // in memory series
-	Samples prometheus.Gauge // in memory samples
+	evaluations *prometheus.CounterVec
+	Series      prometheus.Gauge // in memory series
+	Samples     prometheus.Gauge // in memory samples
 }
 
 func NewMetrics(r prometheus.Registerer) *Metrics {
 	return &Metrics{
-		Series: promauto.With(r).NewGauge(prometheus.GaugeOpts{
+		evaluations: promauto.With(r).NewCounterVec(prometheus.CounterOpts{
 			Namespace: "loki",
-			Name:      "ruler_memory_series",
-		}),
+			Name:      "ruler_memory_for_state_evaluations",
+		}, []string{"status", "tenant"}),
 		Samples: promauto.With(r).NewGauge(prometheus.GaugeOpts{
 			Namespace: "loki",
 			Name:      "ruler_memory_samples",
@@ -35,26 +56,32 @@ func NewMetrics(r prometheus.Registerer) *Metrics {
 
 type MemStore struct {
 	mtx       sync.Mutex
+	userID    string
 	queryFunc rules.QueryFunc
-	appenders map[string]*ForStateAppender
 	metrics   *Metrics
 	mgr       *rules.Manager
+	logger    log.Logger
+	rules     map[string]*RuleCache
 
-	cleanupInterval time.Duration
 	done            chan struct{}
+	cleanupInterval time.Duration
 }
 
-func NewMemStore(mgr *rules.Manager, cleanupInterval time.Duration, metrics *Metrics) *MemStore {
+func NewMemStore(userID string, mgr *rules.Manager, queryFunc rules.QueryFunc, metrics *Metrics, cleanupInterval time.Duration, logger log.Logger) *MemStore {
 	s := &MemStore{
-		mgr:       mgr,
-		appenders: make(map[string]*ForStateAppender),
-		metrics:   metrics,
-
+		userID:          userID,
+		metrics:         metrics,
+		queryFunc:       queryFunc,
+		logger:          logger,
+		mgr:             mgr,
 		cleanupInterval: cleanupInterval,
-		done:            make(chan struct{}),
+		rules:           make(map[string]*RuleCache),
+
+		done: make(chan struct{}),
 	}
 	go s.run()
 	return s
+
 }
 
 func (m *MemStore) Stop() {
@@ -67,10 +94,10 @@ func (m *MemStore) Stop() {
 	case <-m.done:
 		return
 	default:
-		for ruleKey, app := range m.appenders {
+		for ruleKey, cache := range m.rules {
 			// Force cleanup of all samples older than time.Now (all of them).
-			_ = app.CleanupOldSamples(0)
-			delete(m.appenders, ruleKey)
+			_ = cache.CleanupOldSamples(time.Now())
+			delete(m.rules, ruleKey)
 		}
 		close(m.done)
 	}
@@ -91,19 +118,19 @@ func (m *MemStore) run() {
 				holdDurs[rule.Name()] = rule.HoldDuration()
 			}
 
-			for ruleKey, app := range m.appenders {
+			for ruleKey, cache := range m.rules {
 				dur, ok := holdDurs[ruleKey]
 
 				// rule is no longer being tracked, remove it
 				if !ok {
-					_ = app.CleanupOldSamples(0)
-					delete(m.appenders, ruleKey)
+					_ = cache.CleanupOldSamples(time.Now())
+					delete(m.rules, ruleKey)
 					continue
 				}
 
 				// trim older samples out of tracking bounds, doubled to buffer.
-				if rem := app.CleanupOldSamples(2 * dur); rem == 0 {
-					delete(m.appenders, ruleKey)
+				if empty := cache.CleanupOldSamples(time.Now().Add(-2 * dur)); empty {
+					delete(m.rules, ruleKey)
 				}
 
 			}
@@ -113,136 +140,204 @@ func (m *MemStore) run() {
 	}
 }
 
-type ForStateAppender struct {
-	mtx     sync.Mutex
-	metrics *Metrics
-	data    map[uint64]*series.ConcreteSeries
+func (m *MemStore) Appender() storage.Appender { return NoopAppender{} }
+
+// implement storage.Queryable. It is only called with the desired ts as maxtime. Mint is
+// parameterized via the outage tolerance, but since we're synthetically generating these,
+// we only care about the desired time.
+func (m *MemStore) Querier(ctx context.Context, mint, maxt int64) (storage.Querier, error) {
+	return &MemStoreQuerier{
+		ts:       util.TimeFromMillis(maxt),
+		MemStore: m,
+		ctx:      ctx,
+	}, nil
+
 }
 
-func NewForStateAppender(metrics *Metrics) *ForStateAppender {
-	return &ForStateAppender{
-		data:    make(map[uint64]*series.ConcreteSeries),
-		metrics: metrics,
-	}
+type MemStoreQuerier struct {
+	ts  time.Time
+	ctx context.Context
+	*MemStore
 }
 
-func (m *ForStateAppender) Add(ls labels.Labels, t int64, v float64) (uint64, error) {
-	for _, l := range ls {
-		if l.Name == labels.MetricName && l.Value != AlertForStateMetricName {
-			// This is not an ALERTS_FOR_STATE metric, skip
-			return 0, nil
+func (m *MemStoreQuerier) Select(sortSeries bool, params *storage.SelectHints, matchers ...*labels.Matcher) storage.SeriesSet {
+	b := labels.NewBuilder(nil)
+	var ruleKey string
+	for _, matcher := range matchers {
+		b.Set(matcher.Name, matcher.Value)
+		if matcher.Name == labels.AlertName && matcher.Type == labels.MatchEqual {
+			ruleKey = matcher.Value
 		}
 	}
+	ls := b.Labels()
+	if ruleKey == "" {
+		level.Error(m.logger).Log("msg", "MemStoreQuerier.Select called in an unexpected fashion without alertname or ALERTS_FOR_STATE labels")
+		return storage.NoopSeriesSet()
+	}
 
-	m.mtx.Lock()
-	defer m.mtx.Unlock()
-
-	fp := ls.Hash()
-
-	if s, ok := m.data[fp]; ok {
-		priorLn := s.Len()
-		s.Add(model.SamplePair{
-			Timestamp: model.Time(t),
-			Value:     model.SampleValue(v),
-		})
-		m.metrics.Samples.Add(float64(s.Len() - priorLn))
+	var rule *rules.AlertingRule
 
-		return 0, nil
+	// go fetch the rule via the alertname
+	for _, x := range m.mgr.AlertingRules() {
+		if x.Name() == ruleKey {
+			rule = x
+			break
+		}
 	}
-	m.data[fp] = series.NewConcreteSeries(ls, []model.SamplePair{{Timestamp: model.Time(t), Value: model.SampleValue(v)}})
-	m.metrics.Series.Inc()
-	m.metrics.Samples.Inc()
-	return 0, nil
 
-}
+	// should not happen
+	if rule == nil {
+		level.Error(m.logger).Log("msg", "failure trying to restore for state for untracked alerting rule", "name", ruleKey)
+		return storage.NoopSeriesSet()
+	}
 
-// CleanupOldSamples removes samples that are outside of the rule's `For` duration.
-func (m *ForStateAppender) CleanupOldSamples(lookback time.Duration) (seriesRemaining int) {
 	m.mtx.Lock()
 	defer m.mtx.Unlock()
+	cache, ok := m.rules[ruleKey]
+
+	// no timestamp results are cached for this rule at all; Create it.
+	if !ok {
+		cache = NewRuleCache(m.metrics)
+		m.rules[ruleKey] = cache
+	} else {
+		smpl, cached := cache.Get(m.ts, ls)
+		if cached {
+			// Assuming the result is cached but the desired series is not in the result, it wouldn't be considered active.
+			if smpl == nil {
+				return storage.NoopSeriesSet()
+			}
 
-	for fp, s := range m.data {
-		// release all older references that are no longer needed.
-		priorLn := s.Len()
-		s.TrimStart(time.Now().Add(-lookback))
-		m.metrics.Samples.Add(float64(s.Len() - priorLn))
-		if s.Len() == 0 {
-			m.metrics.Series.Dec()
-			delete(m.data, fp)
+			// If the labelset is cached we can consider it active. Return the for state sample active immediately.
+			return series.NewConcreteSeriesSet(
+				[]storage.Series{
+					series.NewConcreteSeries(smpl.Metric, []model.SamplePair{
+						{Timestamp: model.Time(util.TimeToMillis(m.ts)), Value: model.SampleValue(smpl.V)},
+					}),
+				},
+			)
 		}
 	}
 
-	return len(m.data)
+	// see if alert condition had any inhabitants at ts-forDuration. We can assume it's still firing because
+	// that's the only condition under which this is queried (via RestoreForState).
+	vec, err := m.queryFunc(m.ctx, rule.Query().String(), m.ts.Add(-rule.HoldDuration()))
+	if err != nil {
+		m.metrics.evaluations.WithLabelValues(statusFailure, m.userID).Inc()
+		return storage.NoopSeriesSet()
+	}
+	m.metrics.evaluations.WithLabelValues(statusSuccess, m.userID).Inc()
+
+	// translate the result into the ALERTS_FOR_STATE series for caching,
+	// considered active & written at the timetamp requested
+	forStateVec := make(promql.Vector, 0, len(vec))
+	for _, smpl := range vec {
+		b := labels.NewBuilder(smpl.Metric)
+		b.Set(labels.MetricName, AlertForStateMetricName)
+		b.Set(labels.AlertName, rule.Name())
+
+		ts := util.TimeToMillis(m.ts)
+
+		forStateVec = append(forStateVec, promql.Sample{
+			Metric: b.Labels(),
+			Point: promql.Point{
+				T: ts,
+				V: float64(ts),
+			},
+		})
+
+	}
+
+	// cache the result of the evalauation at this timestamp
+	cache.Set(m.ts, forStateVec)
 
+	// Finally return the series if it exists
+	smpl, ok := cache.Get(m.ts, ls)
+	if !ok || smpl == nil {
+		return storage.NoopSeriesSet()
+	}
+	// If the labelset is cached we can consider it active. Return the for state sample active immediately.
+	return series.NewConcreteSeriesSet(
+		[]storage.Series{
+			series.NewConcreteSeries(smpl.Metric, []model.SamplePair{
+				{Timestamp: model.Time(util.TimeToMillis(m.ts)), Value: model.SampleValue(smpl.V)},
+			}),
+		},
+	)
 }
 
-func (m *ForStateAppender) AddFast(ref uint64, t int64, v float64) error {
-	return errors.New("unimplemented")
+// LabelValues returns all potential values for a label name.
+func (*MemStoreQuerier) LabelValues(name string) ([]string, storage.Warnings, error) {
+	return nil, nil, errors.New("unimplemented")
 }
 
-func (m *ForStateAppender) Commit() error { return nil }
+// LabelNames returns all the unique label names present in the block in sorted order.
+func (*MemStoreQuerier) LabelNames() ([]string, storage.Warnings, error) {
+	return nil, nil, errors.New("unimplemented")
+}
 
-func (m *ForStateAppender) Rollback() error { return nil }
+// Close releases the resources of the Querier.
+func (*MemStoreQuerier) Close() error { return nil }
 
-// implement storage.Queryable
-func (m *ForStateAppender) Querier(ctx context.Context, mint, maxt int64) ForStateAppenderQuerier {
-	return ForStateAppenderQuerier{
-		mint:             mint,
-		maxt:             maxt,
-		ForStateAppender: m,
-	}
+type RuleCache struct {
+	mtx     sync.Mutex
+	metrics *Metrics
+	data    map[int64]map[uint64]promql.Sample
 }
 
-// ForStateAppenderQuerier wraps a *ForStateAppender and implements storage.Querier
-type ForStateAppenderQuerier struct {
-	mint, maxt int64
-	*ForStateAppender
+func NewRuleCache(metrics *Metrics) *RuleCache {
+	return &RuleCache{
+		data:    make(map[int64]map[uint64]promql.Sample),
+		metrics: metrics,
+	}
 }
 
-// Select returns a set of series that matches the given label matchers.
-func (q ForStateAppenderQuerier) Select(sortSeries bool, params *storage.SelectHints, matchers ...*labels.Matcher) storage.SeriesSet {
-	// TODO: implement sorted selects (currently unused/ignored).
-	q.mtx.Lock()
-	defer q.mtx.Unlock()
+func (c *RuleCache) Set(ts time.Time, vec promql.Vector) {
+	c.mtx.Lock()
+	defer c.mtx.Unlock()
+	tsMap, ok := c.data[ts.UnixNano()]
+	if !ok {
+		tsMap = make(map[uint64]promql.Sample)
+		c.data[ts.UnixNano()] = tsMap
+	}
 
-	seekTo := q.mint
-	if params != nil && seekTo < params.Start {
-		seekTo = params.Start
+	for _, sample := range vec {
+		tsMap[sample.Metric.Hash()] = sample
 	}
+	c.metrics.Samples.Add(float64(len(vec)))
+}
+
+// Get returns ok if that timestamp's result is cached.
+func (c *RuleCache) Get(ts time.Time, ls labels.Labels) (pt *promql.Sample, ok bool) {
+	c.mtx.Lock()
+	defer c.mtx.Unlock()
 
-	maxt := q.maxt
-	if params != nil && params.End < maxt {
-		maxt = params.End
+	match, ok := c.data[ts.UnixNano()]
+	if !ok {
+		return pt, false
 	}
 
-	var filtered []storage.Series
-outer:
-	for _, s := range q.data {
-		for _, matcher := range matchers {
-			if !matcher.Matches(s.Labels().Get(matcher.Name)) {
-				continue outer
-			}
+	smp, ok := match[ls.Hash()]
+	if !ok {
+		return nil, true
+	}
+	return &smp, true
 
-			iter := s.Iterator()
-			var samples []model.SamplePair
-			for ok := iter.Seek(seekTo); ok; ok = iter.Next() {
-				t, v := iter.At()
-				if t > maxt {
-					break
-				}
+}
 
-				samples = append(samples, model.SamplePair{
-					Timestamp: model.Time(t),
-					Value:     model.SampleValue(v),
-				})
+// CleanupOldSamples removes samples that are outside of the rule's `For` duration.
+func (c *RuleCache) CleanupOldSamples(olderThan time.Time) (empty bool) {
+	c.mtx.Lock()
+	defer c.mtx.Unlock()
 
-			}
+	ns := olderThan.UnixNano()
 
-			if len(samples) != 0 {
-				filtered = append(filtered, series.NewConcreteSeries(s.Labels(), samples))
-			}
+	// This could be more efficient (logarithmic instead of linear)
+	for ts, tsMap := range c.data {
+		if ts < ns {
+			delete(c.data, ts)
+			c.metrics.Samples.Add(-float64(len(tsMap)))
 		}
-	}
 
-	return series.NewConcreteSeriesSet(filtered)
+	}
+	return len(c.data) == 0
 }
diff --git a/pkg/ruler/manager/memstore_test.go b/pkg/ruler/manager/memstore_test.go
index 3c9b8b00c115e..6ec0ba1e0b246 100644
--- a/pkg/ruler/manager/memstore_test.go
+++ b/pkg/ruler/manager/memstore_test.go
@@ -1,220 +1,192 @@
 package manager
 
-import (
-	"context"
-	"errors"
-	"fmt"
-	"testing"
-	"time"
-
-	"github.com/cortexproject/cortex/pkg/querier/series"
-	"github.com/cortexproject/cortex/pkg/util"
-	"github.com/go-kit/kit/log"
-	"github.com/prometheus/common/model"
-	"github.com/prometheus/prometheus/pkg/labels"
-	"github.com/prometheus/prometheus/promql/parser"
-	"github.com/prometheus/prometheus/rules"
-	"github.com/prometheus/prometheus/storage"
-	"github.com/stretchr/testify/require"
-)
-
-type NoopAppender struct{}
-
-func (a NoopAppender) Appender() (storage.Appender, error)                     { return a, nil }
-func (a NoopAppender) Add(l labels.Labels, t int64, v float64) (uint64, error) { return 0, nil }
-func (a NoopAppender) AddFast(ref uint64, t int64, v float64) error {
-	return errors.New("unimplemented")
-}
-func (a NoopAppender) Commit() error   { return nil }
-func (a NoopAppender) Rollback() error { return nil }
-
-func TestMemStoreStop(t *testing.T) {
-	hist := NewMemStore(&rules.Manager{}, time.Millisecond, NewMetrics(nil))
-	<-time.After(2 * time.Millisecond) // allow it to start ticking (not strictly required for this test)
-	hist.Stop()
-	// ensure idempotency
-	hist.Stop()
-
-	// ensure ticker is cleaned up
-	select {
-	case <-time.After(10 * time.Millisecond):
-		t.Fatalf("done channel not closed")
-	case <-hist.done:
-	}
-}
-
-func mustParseLabels(s string) labels.Labels {
-	labels, err := parser.ParseMetric(s)
-	if err != nil {
-		panic(fmt.Sprintf("failed to parse %s", s))
-	}
-
-	return labels
-}
-
-func newRule(name, qry, ls string, forDur time.Duration) *rules.AlertingRule {
-	return rules.NewAlertingRule(name, &parser.StringLiteral{Val: qry}, forDur, mustParseLabels(ls), nil, nil, false, log.NewNopLogger())
-}
-
-func TestForStateAppenderAdd(t *testing.T) {
-	app := NewForStateAppender(NewMetrics(nil))
-	require.Equal(t, map[uint64]*series.ConcreteSeries{}, app.data)
-
-	// create first series
-	first := mustParseLabels(`{foo="bar", bazz="buzz", __name__="ALERTS_FOR_STATE"}`)
-	_, err := app.Add(first, 1, 1)
-	require.Nil(t, err)
-	require.Equal(t, map[uint64]*series.ConcreteSeries{
-		first.Hash(): series.NewConcreteSeries(
-			first, []model.SamplePair{{Timestamp: model.Time(1), Value: model.SampleValue(1)}},
-		),
-	}, app.data)
-
-	// create second series
-	second := mustParseLabels(`{foo="bar", bazz="barf", __name__="ALERTS_FOR_STATE"}`)
-	_, err = app.Add(second, 1, 1)
-	require.Nil(t, err)
-
-	require.Equal(t, map[uint64]*series.ConcreteSeries{
-		first.Hash(): series.NewConcreteSeries(
-			first, []model.SamplePair{{Timestamp: model.Time(1), Value: model.SampleValue(1)}},
-		),
-		second.Hash(): series.NewConcreteSeries(
-			second, []model.SamplePair{{Timestamp: model.Time(1), Value: model.SampleValue(1)}},
-		),
-	}, app.data)
-
-	// append first series
-	_, err = app.Add(first, 3, 3)
-	require.Nil(t, err)
-
-	require.Equal(t, map[uint64]*series.ConcreteSeries{
-		first.Hash(): series.NewConcreteSeries(
-			first, []model.SamplePair{
-				{Timestamp: model.Time(1), Value: model.SampleValue(1)},
-				{Timestamp: model.Time(3), Value: model.SampleValue(3)},
-			},
-		),
-		second.Hash(): series.NewConcreteSeries(
-			second, []model.SamplePair{{Timestamp: model.Time(1), Value: model.SampleValue(1)}},
-		),
-	}, app.data)
-
-	// insert new points at correct position
-	_, err = app.Add(first, 2, 2)
-	require.Nil(t, err)
-
-	require.Equal(t, map[uint64]*series.ConcreteSeries{
-		first.Hash(): series.NewConcreteSeries(
-			first, []model.SamplePair{
-				{Timestamp: model.Time(1), Value: model.SampleValue(1)},
-				{Timestamp: model.Time(2), Value: model.SampleValue(2)},
-				{Timestamp: model.Time(3), Value: model.SampleValue(3)},
-			},
-		),
-		second.Hash(): series.NewConcreteSeries(
-			second, []model.SamplePair{{Timestamp: model.Time(1), Value: model.SampleValue(1)}},
-		),
-	}, app.data)
-
-	// ignore non ALERTS_FOR_STATE metrics
-	_, err = app.Add(mustParseLabels(`{foo="bar", bazz="barf", __name__="test"}`), 1, 1)
-	require.Nil(t, err)
-
-	require.Equal(t, map[uint64]*series.ConcreteSeries{
-		first.Hash(): series.NewConcreteSeries(
-			first, []model.SamplePair{
-				{Timestamp: model.Time(1), Value: model.SampleValue(1)},
-				{Timestamp: model.Time(2), Value: model.SampleValue(2)},
-				{Timestamp: model.Time(3), Value: model.SampleValue(3)},
-			},
-		),
-		second.Hash(): series.NewConcreteSeries(
-			second, []model.SamplePair{{Timestamp: model.Time(1), Value: model.SampleValue(1)}},
-		),
-	}, app.data)
-}
-
-func TestForStateAppenderCleanup(t *testing.T) {
-	app := NewForStateAppender(NewMetrics(nil))
-	now := time.Now()
-
-	// create ls series
-	ls := mustParseLabels(`{foo="bar", bazz="buzz", __name__="ALERTS_FOR_STATE"}`)
-	_, err := app.Add(ls, util.TimeToMillis(now.Add(-3*time.Minute)), 1)
-	require.Nil(t, err)
-	_, err = app.Add(ls, util.TimeToMillis(now.Add(-1*time.Minute)), 2)
-	require.Nil(t, err)
-
-	rem := app.CleanupOldSamples(time.Minute)
-	require.Equal(t, 1, rem)
-
-	require.Equal(t, map[uint64]*series.ConcreteSeries{
-		ls.Hash(): series.NewConcreteSeries(
-			ls, []model.SamplePair{
-				{Timestamp: model.Time(util.TimeToMillis(now.Add(-1 * time.Minute))), Value: model.SampleValue(2)},
-			},
-		),
-	}, app.data)
-
-}
-
-func TestForStateAppenderQuerier(t *testing.T) {
-	app := NewForStateAppender(NewMetrics(nil))
-	now := time.Now()
-
-	// create ls series
-	ls := mustParseLabels(`{foo="bar", bazz="buzz", __name__="ALERTS_FOR_STATE"}`)
-	_, err := app.Add(ls, util.TimeToMillis(now.Add(-3*time.Minute)), 1)
-	require.Nil(t, err)
-	_, err = app.Add(ls, util.TimeToMillis(now.Add(-1*time.Minute)), 2)
-	require.Nil(t, err)
-	_, err = app.Add(ls, util.TimeToMillis(now.Add(1*time.Minute)), 3)
-	require.Nil(t, err)
-
-	// never included due to bounds
-	_, err = app.Add(mustParseLabels(`{foo="bar", bazz="blip", __name__="ALERTS_FOR_STATE"}`), util.TimeToMillis(now.Add(-2*time.Hour)), 3)
-	require.Nil(t, err)
-
-	// should succeed with nil selecthints
-	q := app.Querier(context.Background(), util.TimeToMillis(now.Add(-2*time.Minute)), util.TimeToMillis(now))
-
-	set := q.Select(
-		false,
-		nil,
-		labels.MustNewMatcher(labels.MatchEqual, labels.MetricName, AlertForStateMetricName),
-	)
-	require.Equal(
-		t,
-		series.NewConcreteSeriesSet(
-			[]storage.Series{
-				series.NewConcreteSeries(ls, []model.SamplePair{
-					{Timestamp: model.Time(util.TimeToMillis(now.Add(-1 * time.Minute))), Value: model.SampleValue(2)},
-				}),
-			},
-		),
-		set,
-	)
-
-	// // should be able to minimize selection window via hints
-	q = app.Querier(context.Background(), util.TimeToMillis(now.Add(-time.Hour)), util.TimeToMillis(now.Add(time.Hour)))
-	set2 := q.Select(
-		false,
-		&storage.SelectHints{
-			Start: util.TimeToMillis(now.Add(-2 * time.Minute)),
-			End:   util.TimeToMillis(now),
-		},
-		labels.MustNewMatcher(labels.MatchEqual, labels.MetricName, AlertForStateMetricName),
-	)
-	require.Equal(
-		t,
-		series.NewConcreteSeriesSet(
-			[]storage.Series{
-				series.NewConcreteSeries(ls, []model.SamplePair{
-					{Timestamp: model.Time(util.TimeToMillis(now.Add(-1 * time.Minute))), Value: model.SampleValue(2)},
-				}),
-			},
-		),
-		set2,
-	)
-}
+// func TestMemStoreStop(t *testing.T) {
+// 	hist := NewMemStore(&rules.Manager{}, time.Millisecond, NewMetrics(nil))
+// 	<-time.After(2 * time.Millisecond) // allow it to start ticking (not strictly required for this test)
+// 	hist.Stop()
+// 	// ensure idempotency
+// 	hist.Stop()
+
+// 	// ensure ticker is cleaned up
+// 	select {
+// 	case <-time.After(10 * time.Millisecond):
+// 		t.Fatalf("done channel not closed")
+// 	case <-hist.done:
+// 	}
+// }
+
+// func mustParseLabels(s string) labels.Labels {
+// 	labels, err := parser.ParseMetric(s)
+// 	if err != nil {
+// 		panic(fmt.Sprintf("failed to parse %s", s))
+// 	}
+
+// 	return labels
+// }
+
+// func newRule(name, qry, ls string, forDur time.Duration) *rules.AlertingRule {
+// 	return rules.NewAlertingRule(name, &parser.StringLiteral{Val: qry}, forDur, mustParseLabels(ls), nil, nil, false, log.NewNopLogger())
+// }
+
+// func TestForStateAppenderAdd(t *testing.T) {
+// 	app := NewForStateAppender(NewMetrics(nil))
+// 	require.Equal(t, map[uint64]*series.ConcreteSeries{}, app.data)
+
+// 	// create first series
+// 	first := mustParseLabels(`{foo="bar", bazz="buzz", __name__="ALERTS_FOR_STATE"}`)
+// 	_, err := app.Add(first, 1, 1)
+// 	require.Nil(t, err)
+// 	require.Equal(t, map[uint64]*series.ConcreteSeries{
+// 		first.Hash(): series.NewConcreteSeries(
+// 			first, []model.SamplePair{{Timestamp: model.Time(1), Value: model.SampleValue(1)}},
+// 		),
+// 	}, app.data)
+
+// 	// create second series
+// 	second := mustParseLabels(`{foo="bar", bazz="barf", __name__="ALERTS_FOR_STATE"}`)
+// 	_, err = app.Add(second, 1, 1)
+// 	require.Nil(t, err)
+
+// 	require.Equal(t, map[uint64]*series.ConcreteSeries{
+// 		first.Hash(): series.NewConcreteSeries(
+// 			first, []model.SamplePair{{Timestamp: model.Time(1), Value: model.SampleValue(1)}},
+// 		),
+// 		second.Hash(): series.NewConcreteSeries(
+// 			second, []model.SamplePair{{Timestamp: model.Time(1), Value: model.SampleValue(1)}},
+// 		),
+// 	}, app.data)
+
+// 	// append first series
+// 	_, err = app.Add(first, 3, 3)
+// 	require.Nil(t, err)
+
+// 	require.Equal(t, map[uint64]*series.ConcreteSeries{
+// 		first.Hash(): series.NewConcreteSeries(
+// 			first, []model.SamplePair{
+// 				{Timestamp: model.Time(1), Value: model.SampleValue(1)},
+// 				{Timestamp: model.Time(3), Value: model.SampleValue(3)},
+// 			},
+// 		),
+// 		second.Hash(): series.NewConcreteSeries(
+// 			second, []model.SamplePair{{Timestamp: model.Time(1), Value: model.SampleValue(1)}},
+// 		),
+// 	}, app.data)
+
+// 	// insert new points at correct position
+// 	_, err = app.Add(first, 2, 2)
+// 	require.Nil(t, err)
+
+// 	require.Equal(t, map[uint64]*series.ConcreteSeries{
+// 		first.Hash(): series.NewConcreteSeries(
+// 			first, []model.SamplePair{
+// 				{Timestamp: model.Time(1), Value: model.SampleValue(1)},
+// 				{Timestamp: model.Time(2), Value: model.SampleValue(2)},
+// 				{Timestamp: model.Time(3), Value: model.SampleValue(3)},
+// 			},
+// 		),
+// 		second.Hash(): series.NewConcreteSeries(
+// 			second, []model.SamplePair{{Timestamp: model.Time(1), Value: model.SampleValue(1)}},
+// 		),
+// 	}, app.data)
+
+// 	// ignore non ALERTS_FOR_STATE metrics
+// 	_, err = app.Add(mustParseLabels(`{foo="bar", bazz="barf", __name__="test"}`), 1, 1)
+// 	require.Nil(t, err)
+
+// 	require.Equal(t, map[uint64]*series.ConcreteSeries{
+// 		first.Hash(): series.NewConcreteSeries(
+// 			first, []model.SamplePair{
+// 				{Timestamp: model.Time(1), Value: model.SampleValue(1)},
+// 				{Timestamp: model.Time(2), Value: model.SampleValue(2)},
+// 				{Timestamp: model.Time(3), Value: model.SampleValue(3)},
+// 			},
+// 		),
+// 		second.Hash(): series.NewConcreteSeries(
+// 			second, []model.SamplePair{{Timestamp: model.Time(1), Value: model.SampleValue(1)}},
+// 		),
+// 	}, app.data)
+// }
+
+// func TestForStateAppenderCleanup(t *testing.T) {
+// 	app := NewForStateAppender(NewMetrics(nil))
+// 	now := time.Now()
+
+// 	// create ls series
+// 	ls := mustParseLabels(`{foo="bar", bazz="buzz", __name__="ALERTS_FOR_STATE"}`)
+// 	_, err := app.Add(ls, util.TimeToMillis(now.Add(-3*time.Minute)), 1)
+// 	require.Nil(t, err)
+// 	_, err = app.Add(ls, util.TimeToMillis(now.Add(-1*time.Minute)), 2)
+// 	require.Nil(t, err)
+
+// 	rem := app.CleanupOldSamples(time.Minute)
+// 	require.Equal(t, 1, rem)
+
+// 	require.Equal(t, map[uint64]*series.ConcreteSeries{
+// 		ls.Hash(): series.NewConcreteSeries(
+// 			ls, []model.SamplePair{
+// 				{Timestamp: model.Time(util.TimeToMillis(now.Add(-1 * time.Minute))), Value: model.SampleValue(2)},
+// 			},
+// 		),
+// 	}, app.data)
+
+// }
+
+// func TestForStateAppenderQuerier(t *testing.T) {
+// 	app := NewForStateAppender(NewMetrics(nil))
+// 	now := time.Now()
+
+// 	// create ls series
+// 	ls := mustParseLabels(`{foo="bar", bazz="buzz", __name__="ALERTS_FOR_STATE"}`)
+// 	_, err := app.Add(ls, util.TimeToMillis(now.Add(-3*time.Minute)), 1)
+// 	require.Nil(t, err)
+// 	_, err = app.Add(ls, util.TimeToMillis(now.Add(-1*time.Minute)), 2)
+// 	require.Nil(t, err)
+// 	_, err = app.Add(ls, util.TimeToMillis(now.Add(1*time.Minute)), 3)
+// 	require.Nil(t, err)
+
+// 	// never included due to bounds
+// 	_, err = app.Add(mustParseLabels(`{foo="bar", bazz="blip", __name__="ALERTS_FOR_STATE"}`), util.TimeToMillis(now.Add(-2*time.Hour)), 3)
+// 	require.Nil(t, err)
+
+// 	// should succeed with nil selecthints
+// 	q := app.Querier(context.Background(), util.TimeToMillis(now.Add(-2*time.Minute)), util.TimeToMillis(now))
+
+// 	set := q.Select(
+// 		false,
+// 		nil,
+// 		labels.MustNewMatcher(labels.MatchEqual, labels.MetricName, AlertForStateMetricName),
+// 	)
+// 	require.Equal(
+// 		t,
+// 		series.NewConcreteSeriesSet(
+// 			[]storage.Series{
+// 				series.NewConcreteSeries(ls, []model.SamplePair{
+// 					{Timestamp: model.Time(util.TimeToMillis(now.Add(-1 * time.Minute))), Value: model.SampleValue(2)},
+// 				}),
+// 			},
+// 		),
+// 		set,
+// 	)
+
+// 	// // should be able to minimize selection window via hints
+// 	q = app.Querier(context.Background(), util.TimeToMillis(now.Add(-time.Hour)), util.TimeToMillis(now.Add(time.Hour)))
+// 	set2 := q.Select(
+// 		false,
+// 		&storage.SelectHints{
+// 			Start: util.TimeToMillis(now.Add(-2 * time.Minute)),
+// 			End:   util.TimeToMillis(now),
+// 		},
+// 		labels.MustNewMatcher(labels.MatchEqual, labels.MetricName, AlertForStateMetricName),
+// 	)
+// 	require.Equal(
+// 		t,
+// 		series.NewConcreteSeriesSet(
+// 			[]storage.Series{
+// 				series.NewConcreteSeries(ls, []model.SamplePair{
+// 					{Timestamp: model.Time(util.TimeToMillis(now.Add(-1 * time.Minute))), Value: model.SampleValue(2)},
+// 				}),
+// 			},
+// 		),
+// 		set2,
+// 	)
+// }

From 33570eb76b9fb6bde49ca352f5f4df1cb31aef2c Mon Sep 17 00:00:00 2001
From: Owen Diehl 
Date: Thu, 23 Jul 2020 09:55:04 -0400
Subject: [PATCH 23/40] removes unused pkgs, adds memstore test

---
 pkg/ruler/compat.go                       |   29 -
 pkg/ruler/manager/memstore.go             |   58 +-
 pkg/ruler/manager/memstore_test.go        |  328 +++----
 pkg/ruler/{rules => manager}/query.go     |    2 +-
 pkg/ruler/memhistory.go                   |  337 -------
 pkg/ruler/memhistory_test.go              |  319 -------
 pkg/ruler/rules/alerting.go               |  556 ------------
 pkg/ruler/rules/alerting_test.go          |  329 -------
 pkg/ruler/rules/fixtures/rules.yaml       |    6 -
 pkg/ruler/rules/fixtures/rules2.yaml      |    5 -
 pkg/ruler/rules/fixtures/rules2_copy.yaml |    5 -
 pkg/ruler/rules/manager.go                | 1004 ---------------------
 pkg/ruler/rules/manager_test.go           |  959 --------------------
 13 files changed, 172 insertions(+), 3765 deletions(-)
 delete mode 100644 pkg/ruler/compat.go
 rename pkg/ruler/{rules => manager}/query.go (98%)
 delete mode 100644 pkg/ruler/memhistory.go
 delete mode 100644 pkg/ruler/memhistory_test.go
 delete mode 100644 pkg/ruler/rules/alerting.go
 delete mode 100644 pkg/ruler/rules/alerting_test.go
 delete mode 100644 pkg/ruler/rules/fixtures/rules.yaml
 delete mode 100644 pkg/ruler/rules/fixtures/rules2.yaml
 delete mode 100644 pkg/ruler/rules/fixtures/rules2_copy.yaml
 delete mode 100644 pkg/ruler/rules/manager.go
 delete mode 100644 pkg/ruler/rules/manager_test.go

diff --git a/pkg/ruler/compat.go b/pkg/ruler/compat.go
deleted file mode 100644
index c367cc9d7374f..0000000000000
--- a/pkg/ruler/compat.go
+++ /dev/null
@@ -1,29 +0,0 @@
-package ruler
-
-import (
-	"errors"
-	"time"
-
-	"github.com/prometheus/client_golang/prometheus"
-	"github.com/prometheus/prometheus/pkg/labels"
-	"github.com/prometheus/prometheus/rules"
-	"github.com/prometheus/prometheus/storage"
-)
-
-func InMemoryAppendableHistory(r prometheus.Registerer) func(string, *rules.ManagerOptions) (rules.Appendable, rules.TenantAlertHistory) {
-	metrics := NewMetrics(r)
-	return func(userID string, opts *rules.ManagerOptions) (rules.Appendable, rules.TenantAlertHistory) {
-		hist := NewMemHistory(userID, 5*time.Minute, opts, metrics)
-		return hist, hist
-	}
-}
-
-type NoopAppender struct{}
-
-func (a NoopAppender) Appender() (storage.Appender, error)                     { return a, nil }
-func (a NoopAppender) Add(l labels.Labels, t int64, v float64) (uint64, error) { return 0, nil }
-func (a NoopAppender) AddFast(ref uint64, t int64, v float64) error {
-	return errors.New("unimplemented")
-}
-func (a NoopAppender) Commit() error   { return nil }
-func (a NoopAppender) Rollback() error { return nil }
diff --git a/pkg/ruler/manager/memstore.go b/pkg/ruler/manager/memstore.go
index 7d0d0715b2564..c6ebcdb84ba5b 100644
--- a/pkg/ruler/manager/memstore.go
+++ b/pkg/ruler/manager/memstore.go
@@ -35,6 +35,13 @@ func (a NoopAppender) AddFast(ref uint64, t int64, v float64) error {
 func (a NoopAppender) Commit() error   { return nil }
 func (a NoopAppender) Rollback() error { return nil }
 
+func ForStateMetric(base labels.Labels, alertName string) labels.Labels {
+	b := labels.NewBuilder(base)
+	b.Set(labels.MetricName, AlertForStateMetricName)
+	b.Set(labels.AlertName, alertName)
+	return b.Labels()
+}
+
 type Metrics struct {
 	evaluations *prometheus.CounterVec
 	Series      prometheus.Gauge // in memory series
@@ -54,12 +61,16 @@ func NewMetrics(r prometheus.Registerer) *Metrics {
 	}
 }
 
+type RuleIter interface {
+	AlertingRules() []*rules.AlertingRule
+}
+
 type MemStore struct {
 	mtx       sync.Mutex
 	userID    string
 	queryFunc rules.QueryFunc
 	metrics   *Metrics
-	mgr       *rules.Manager
+	mgr       RuleIter
 	logger    log.Logger
 	rules     map[string]*RuleCache
 
@@ -67,7 +78,7 @@ type MemStore struct {
 	cleanupInterval time.Duration
 }
 
-func NewMemStore(userID string, mgr *rules.Manager, queryFunc rules.QueryFunc, metrics *Metrics, cleanupInterval time.Duration, logger log.Logger) *MemStore {
+func NewMemStore(userID string, mgr RuleIter, queryFunc rules.QueryFunc, metrics *Metrics, cleanupInterval time.Duration, logger log.Logger) *MemStore {
 	s := &MemStore{
 		userID:          userID,
 		metrics:         metrics,
@@ -199,23 +210,23 @@ func (m *MemStoreQuerier) Select(sortSeries bool, params *storage.SelectHints, m
 	if !ok {
 		cache = NewRuleCache(m.metrics)
 		m.rules[ruleKey] = cache
-	} else {
-		smpl, cached := cache.Get(m.ts, ls)
-		if cached {
-			// Assuming the result is cached but the desired series is not in the result, it wouldn't be considered active.
-			if smpl == nil {
-				return storage.NoopSeriesSet()
-			}
+	}
 
-			// If the labelset is cached we can consider it active. Return the for state sample active immediately.
-			return series.NewConcreteSeriesSet(
-				[]storage.Series{
-					series.NewConcreteSeries(smpl.Metric, []model.SamplePair{
-						{Timestamp: model.Time(util.TimeToMillis(m.ts)), Value: model.SampleValue(smpl.V)},
-					}),
-				},
-			)
+	smpl, cached := cache.Get(m.ts, ls)
+	if cached {
+		// Assuming the result is cached but the desired series is not in the result, it wouldn't be considered active.
+		if smpl == nil {
+			return storage.NoopSeriesSet()
 		}
+
+		// If the labelset is cached we can consider it active. Return the for state sample active immediately.
+		return series.NewConcreteSeriesSet(
+			[]storage.Series{
+				series.NewConcreteSeries(smpl.Metric, []model.SamplePair{
+					{Timestamp: model.Time(util.TimeToMillis(m.ts)), Value: model.SampleValue(smpl.V)},
+				}),
+			},
+		)
 	}
 
 	// see if alert condition had any inhabitants at ts-forDuration. We can assume it's still firing because
@@ -231,14 +242,11 @@ func (m *MemStoreQuerier) Select(sortSeries bool, params *storage.SelectHints, m
 	// considered active & written at the timetamp requested
 	forStateVec := make(promql.Vector, 0, len(vec))
 	for _, smpl := range vec {
-		b := labels.NewBuilder(smpl.Metric)
-		b.Set(labels.MetricName, AlertForStateMetricName)
-		b.Set(labels.AlertName, rule.Name())
 
 		ts := util.TimeToMillis(m.ts)
 
 		forStateVec = append(forStateVec, promql.Sample{
-			Metric: b.Labels(),
+			Metric: ForStateMetric(smpl.Metric, rule.Name()),
 			Point: promql.Point{
 				T: ts,
 				V: float64(ts),
@@ -247,11 +255,11 @@ func (m *MemStoreQuerier) Select(sortSeries bool, params *storage.SelectHints, m
 
 	}
 
-	// cache the result of the evalauation at this timestamp
+	// cache the result of the evaluation at this timestamp
 	cache.Set(m.ts, forStateVec)
 
 	// Finally return the series if it exists
-	smpl, ok := cache.Get(m.ts, ls)
+	smpl, ok = cache.Get(m.ts, ls)
 	if !ok || smpl == nil {
 		return storage.NoopSeriesSet()
 	}
@@ -307,13 +315,13 @@ func (c *RuleCache) Set(ts time.Time, vec promql.Vector) {
 }
 
 // Get returns ok if that timestamp's result is cached.
-func (c *RuleCache) Get(ts time.Time, ls labels.Labels) (pt *promql.Sample, ok bool) {
+func (c *RuleCache) Get(ts time.Time, ls labels.Labels) (*promql.Sample, bool) {
 	c.mtx.Lock()
 	defer c.mtx.Unlock()
 
 	match, ok := c.data[ts.UnixNano()]
 	if !ok {
-		return pt, false
+		return nil, false
 	}
 
 	smp, ok := match[ls.Hash()]
diff --git a/pkg/ruler/manager/memstore_test.go b/pkg/ruler/manager/memstore_test.go
index 6ec0ba1e0b246..ec2c327d8b19f 100644
--- a/pkg/ruler/manager/memstore_test.go
+++ b/pkg/ruler/manager/memstore_test.go
@@ -1,192 +1,140 @@
 package manager
 
-// func TestMemStoreStop(t *testing.T) {
-// 	hist := NewMemStore(&rules.Manager{}, time.Millisecond, NewMetrics(nil))
-// 	<-time.After(2 * time.Millisecond) // allow it to start ticking (not strictly required for this test)
-// 	hist.Stop()
-// 	// ensure idempotency
-// 	hist.Stop()
-
-// 	// ensure ticker is cleaned up
-// 	select {
-// 	case <-time.After(10 * time.Millisecond):
-// 		t.Fatalf("done channel not closed")
-// 	case <-hist.done:
-// 	}
-// }
-
-// func mustParseLabels(s string) labels.Labels {
-// 	labels, err := parser.ParseMetric(s)
-// 	if err != nil {
-// 		panic(fmt.Sprintf("failed to parse %s", s))
-// 	}
-
-// 	return labels
-// }
-
-// func newRule(name, qry, ls string, forDur time.Duration) *rules.AlertingRule {
-// 	return rules.NewAlertingRule(name, &parser.StringLiteral{Val: qry}, forDur, mustParseLabels(ls), nil, nil, false, log.NewNopLogger())
-// }
-
-// func TestForStateAppenderAdd(t *testing.T) {
-// 	app := NewForStateAppender(NewMetrics(nil))
-// 	require.Equal(t, map[uint64]*series.ConcreteSeries{}, app.data)
-
-// 	// create first series
-// 	first := mustParseLabels(`{foo="bar", bazz="buzz", __name__="ALERTS_FOR_STATE"}`)
-// 	_, err := app.Add(first, 1, 1)
-// 	require.Nil(t, err)
-// 	require.Equal(t, map[uint64]*series.ConcreteSeries{
-// 		first.Hash(): series.NewConcreteSeries(
-// 			first, []model.SamplePair{{Timestamp: model.Time(1), Value: model.SampleValue(1)}},
-// 		),
-// 	}, app.data)
-
-// 	// create second series
-// 	second := mustParseLabels(`{foo="bar", bazz="barf", __name__="ALERTS_FOR_STATE"}`)
-// 	_, err = app.Add(second, 1, 1)
-// 	require.Nil(t, err)
-
-// 	require.Equal(t, map[uint64]*series.ConcreteSeries{
-// 		first.Hash(): series.NewConcreteSeries(
-// 			first, []model.SamplePair{{Timestamp: model.Time(1), Value: model.SampleValue(1)}},
-// 		),
-// 		second.Hash(): series.NewConcreteSeries(
-// 			second, []model.SamplePair{{Timestamp: model.Time(1), Value: model.SampleValue(1)}},
-// 		),
-// 	}, app.data)
-
-// 	// append first series
-// 	_, err = app.Add(first, 3, 3)
-// 	require.Nil(t, err)
-
-// 	require.Equal(t, map[uint64]*series.ConcreteSeries{
-// 		first.Hash(): series.NewConcreteSeries(
-// 			first, []model.SamplePair{
-// 				{Timestamp: model.Time(1), Value: model.SampleValue(1)},
-// 				{Timestamp: model.Time(3), Value: model.SampleValue(3)},
-// 			},
-// 		),
-// 		second.Hash(): series.NewConcreteSeries(
-// 			second, []model.SamplePair{{Timestamp: model.Time(1), Value: model.SampleValue(1)}},
-// 		),
-// 	}, app.data)
-
-// 	// insert new points at correct position
-// 	_, err = app.Add(first, 2, 2)
-// 	require.Nil(t, err)
-
-// 	require.Equal(t, map[uint64]*series.ConcreteSeries{
-// 		first.Hash(): series.NewConcreteSeries(
-// 			first, []model.SamplePair{
-// 				{Timestamp: model.Time(1), Value: model.SampleValue(1)},
-// 				{Timestamp: model.Time(2), Value: model.SampleValue(2)},
-// 				{Timestamp: model.Time(3), Value: model.SampleValue(3)},
-// 			},
-// 		),
-// 		second.Hash(): series.NewConcreteSeries(
-// 			second, []model.SamplePair{{Timestamp: model.Time(1), Value: model.SampleValue(1)}},
-// 		),
-// 	}, app.data)
-
-// 	// ignore non ALERTS_FOR_STATE metrics
-// 	_, err = app.Add(mustParseLabels(`{foo="bar", bazz="barf", __name__="test"}`), 1, 1)
-// 	require.Nil(t, err)
-
-// 	require.Equal(t, map[uint64]*series.ConcreteSeries{
-// 		first.Hash(): series.NewConcreteSeries(
-// 			first, []model.SamplePair{
-// 				{Timestamp: model.Time(1), Value: model.SampleValue(1)},
-// 				{Timestamp: model.Time(2), Value: model.SampleValue(2)},
-// 				{Timestamp: model.Time(3), Value: model.SampleValue(3)},
-// 			},
-// 		),
-// 		second.Hash(): series.NewConcreteSeries(
-// 			second, []model.SamplePair{{Timestamp: model.Time(1), Value: model.SampleValue(1)}},
-// 		),
-// 	}, app.data)
-// }
-
-// func TestForStateAppenderCleanup(t *testing.T) {
-// 	app := NewForStateAppender(NewMetrics(nil))
-// 	now := time.Now()
-
-// 	// create ls series
-// 	ls := mustParseLabels(`{foo="bar", bazz="buzz", __name__="ALERTS_FOR_STATE"}`)
-// 	_, err := app.Add(ls, util.TimeToMillis(now.Add(-3*time.Minute)), 1)
-// 	require.Nil(t, err)
-// 	_, err = app.Add(ls, util.TimeToMillis(now.Add(-1*time.Minute)), 2)
-// 	require.Nil(t, err)
-
-// 	rem := app.CleanupOldSamples(time.Minute)
-// 	require.Equal(t, 1, rem)
-
-// 	require.Equal(t, map[uint64]*series.ConcreteSeries{
-// 		ls.Hash(): series.NewConcreteSeries(
-// 			ls, []model.SamplePair{
-// 				{Timestamp: model.Time(util.TimeToMillis(now.Add(-1 * time.Minute))), Value: model.SampleValue(2)},
-// 			},
-// 		),
-// 	}, app.data)
-
-// }
-
-// func TestForStateAppenderQuerier(t *testing.T) {
-// 	app := NewForStateAppender(NewMetrics(nil))
-// 	now := time.Now()
-
-// 	// create ls series
-// 	ls := mustParseLabels(`{foo="bar", bazz="buzz", __name__="ALERTS_FOR_STATE"}`)
-// 	_, err := app.Add(ls, util.TimeToMillis(now.Add(-3*time.Minute)), 1)
-// 	require.Nil(t, err)
-// 	_, err = app.Add(ls, util.TimeToMillis(now.Add(-1*time.Minute)), 2)
-// 	require.Nil(t, err)
-// 	_, err = app.Add(ls, util.TimeToMillis(now.Add(1*time.Minute)), 3)
-// 	require.Nil(t, err)
-
-// 	// never included due to bounds
-// 	_, err = app.Add(mustParseLabels(`{foo="bar", bazz="blip", __name__="ALERTS_FOR_STATE"}`), util.TimeToMillis(now.Add(-2*time.Hour)), 3)
-// 	require.Nil(t, err)
-
-// 	// should succeed with nil selecthints
-// 	q := app.Querier(context.Background(), util.TimeToMillis(now.Add(-2*time.Minute)), util.TimeToMillis(now))
-
-// 	set := q.Select(
-// 		false,
-// 		nil,
-// 		labels.MustNewMatcher(labels.MatchEqual, labels.MetricName, AlertForStateMetricName),
-// 	)
-// 	require.Equal(
-// 		t,
-// 		series.NewConcreteSeriesSet(
-// 			[]storage.Series{
-// 				series.NewConcreteSeries(ls, []model.SamplePair{
-// 					{Timestamp: model.Time(util.TimeToMillis(now.Add(-1 * time.Minute))), Value: model.SampleValue(2)},
-// 				}),
-// 			},
-// 		),
-// 		set,
-// 	)
-
-// 	// // should be able to minimize selection window via hints
-// 	q = app.Querier(context.Background(), util.TimeToMillis(now.Add(-time.Hour)), util.TimeToMillis(now.Add(time.Hour)))
-// 	set2 := q.Select(
-// 		false,
-// 		&storage.SelectHints{
-// 			Start: util.TimeToMillis(now.Add(-2 * time.Minute)),
-// 			End:   util.TimeToMillis(now),
-// 		},
-// 		labels.MustNewMatcher(labels.MatchEqual, labels.MetricName, AlertForStateMetricName),
-// 	)
-// 	require.Equal(
-// 		t,
-// 		series.NewConcreteSeriesSet(
-// 			[]storage.Series{
-// 				series.NewConcreteSeries(ls, []model.SamplePair{
-// 					{Timestamp: model.Time(util.TimeToMillis(now.Add(-1 * time.Minute))), Value: model.SampleValue(2)},
-// 				}),
-// 			},
-// 		),
-// 		set2,
-// 	)
-// }
+import (
+	"context"
+	"testing"
+	"time"
+
+	"github.com/cortexproject/cortex/pkg/util"
+	"github.com/go-kit/kit/log"
+	"github.com/prometheus/prometheus/pkg/labels"
+	"github.com/prometheus/prometheus/promql"
+	"github.com/prometheus/prometheus/promql/parser"
+	"github.com/prometheus/prometheus/rules"
+	"github.com/stretchr/testify/require"
+)
+
+var (
+	NilMetrics = NewMetrics(nil)
+	NilLogger  = log.NewNopLogger()
+)
+
+func labelsToMatchers(ls labels.Labels) (res []*labels.Matcher) {
+	for _, l := range ls {
+		res = append(res, labels.MustNewMatcher(labels.MatchEqual, l.Name, l.Value))
+	}
+	return res
+}
+
+type MockRuleIter []*rules.AlertingRule
+
+func (xs MockRuleIter) AlertingRules() []*rules.AlertingRule { return xs }
+
+func testStore(alerts []*rules.AlertingRule, queryFunc rules.QueryFunc, itv time.Duration) *MemStore {
+	return NewMemStore("test", MockRuleIter(alerts), queryFunc, NilMetrics, itv, NilLogger)
+}
+
+func TestIdempotentStop(t *testing.T) {
+	store := testStore(nil, nil, time.Millisecond)
+
+	store.Stop()
+	store.Stop()
+}
+
+func TestSelectRestores(t *testing.T) {
+	ruleName := "testrule"
+	ars := []*rules.AlertingRule{
+		rules.NewAlertingRule(
+			ruleName,
+			&parser.StringLiteral{Val: "unused"},
+			time.Minute,
+			labels.FromMap(map[string]string{"foo": "bar"}),
+			nil,
+			nil,
+			false,
+			NilLogger,
+		),
+	}
+
+	callCount := 0
+	fn := rules.QueryFunc(func(ctx context.Context, qs string, t time.Time) (promql.Vector, error) {
+		callCount++
+		return promql.Vector{
+			promql.Sample{
+				Metric: labels.FromMap(map[string]string{
+					labels.MetricName: "some_metric",
+					"foo":             "bar",  // from the AlertingRule.labels spec
+					"bazz":            "buzz", // an extra label
+				}),
+				Point: promql.Point{
+					T: util.TimeToMillis(t),
+					V: 1,
+				},
+			},
+			promql.Sample{
+				Metric: labels.FromMap(map[string]string{
+					labels.MetricName: "some_metric",
+					"foo":             "bar",  // from the AlertingRule.labels spec
+					"bazz":            "bork", // an extra label (second variant)
+				}),
+				Point: promql.Point{
+					T: util.TimeToMillis(t),
+					V: 1,
+				},
+			},
+		}, nil
+	})
+
+	store := testStore(ars, fn, time.Minute)
+
+	now := util.TimeToMillis(time.Now())
+
+	q, err := store.Querier(context.Background(), 0, now)
+	require.Nil(t, err)
+
+	ls := ForStateMetric(labels.FromMap(map[string]string{
+		"foo":  "bar",
+		"bazz": "buzz",
+	}), ruleName)
+
+	// First call evaluates the rule at ts-ForDuration and populates the cache
+	sset := q.Select(false, nil, labelsToMatchers(ls)...)
+
+	require.Equal(t, true, sset.Next())
+	require.Equal(t, ls, sset.At().Labels())
+	iter := sset.At().Iterator()
+	require.Equal(t, true, iter.Next())
+	ts, v := iter.At()
+	require.Equal(t, now, ts)
+	require.Equal(t, float64(now), v)
+	require.Equal(t, false, iter.Next())
+	require.Equal(t, false, sset.Next())
+
+	// Second call uses cache
+	ls = ForStateMetric(labels.FromMap(map[string]string{
+		"foo":  "bar",
+		"bazz": "bork",
+	}), ruleName)
+
+	sset = q.Select(false, nil, labelsToMatchers(ls)...)
+	require.Equal(t, true, sset.Next())
+	require.Equal(t, ls, sset.At().Labels())
+	iter = sset.At().Iterator()
+	require.Equal(t, true, iter.Next())
+	ts, v = iter.At()
+	require.Equal(t, now, ts)
+	require.Equal(t, float64(now), v)
+	require.Equal(t, false, iter.Next())
+	require.Equal(t, false, sset.Next())
+	require.Equal(t, 1, callCount)
+
+	// Third call uses cache & has no match
+	ls = ForStateMetric(labels.FromMap(map[string]string{
+		"foo":  "bar",
+		"bazz": "unknown",
+	}), ruleName)
+
+	sset = q.Select(false, nil, labelsToMatchers(ls)...)
+	require.Equal(t, false, sset.Next())
+	require.Equal(t, 1, callCount)
+}
diff --git a/pkg/ruler/rules/query.go b/pkg/ruler/manager/query.go
similarity index 98%
rename from pkg/ruler/rules/query.go
rename to pkg/ruler/manager/query.go
index 279239955be32..746f8e404193b 100644
--- a/pkg/ruler/rules/query.go
+++ b/pkg/ruler/manager/query.go
@@ -1,4 +1,4 @@
-package rules
+package manager
 
 import (
 	"context"
diff --git a/pkg/ruler/memhistory.go b/pkg/ruler/memhistory.go
deleted file mode 100644
index 47dd1f474e203..0000000000000
--- a/pkg/ruler/memhistory.go
+++ /dev/null
@@ -1,337 +0,0 @@
-package ruler
-
-import (
-	"context"
-	"errors"
-	"sync"
-	"time"
-
-	"github.com/cortexproject/cortex/pkg/querier/series"
-	"github.com/cortexproject/cortex/pkg/util"
-	"github.com/go-kit/kit/log/level"
-	"github.com/prometheus/client_golang/prometheus"
-	"github.com/prometheus/client_golang/prometheus/promauto"
-	"github.com/prometheus/common/model"
-	"github.com/prometheus/prometheus/pkg/labels"
-	"github.com/prometheus/prometheus/rules"
-	"github.com/prometheus/prometheus/storage"
-)
-
-type Metrics struct {
-	Series  prometheus.Gauge // in memory series
-	Samples prometheus.Gauge // in memory samples
-}
-
-func NewMetrics(r prometheus.Registerer) *Metrics {
-	return &Metrics{
-		Series: promauto.With(r).NewGauge(prometheus.GaugeOpts{
-			Namespace: "loki",
-			Name:      "ruler_memory_series",
-		}),
-		Samples: promauto.With(r).NewGauge(prometheus.GaugeOpts{
-			Namespace: "loki",
-			Name:      "ruler_memory_samples",
-		}),
-	}
-}
-
-type MemHistory struct {
-	mtx       sync.RWMutex
-	userId    string
-	opts      *rules.ManagerOptions
-	appenders map[*rules.AlertingRule]*ForStateAppender
-	metrics   *Metrics
-
-	done            chan struct{}
-	cleanupInterval time.Duration
-}
-
-func NewMemHistory(userId string, cleanupInterval time.Duration, opts *rules.ManagerOptions, metrics *Metrics) *MemHistory {
-	hist := &MemHistory{
-		userId:    userId,
-		opts:      opts,
-		appenders: make(map[*rules.AlertingRule]*ForStateAppender),
-		metrics:   metrics,
-
-		cleanupInterval: cleanupInterval,
-		done:            make(chan struct{}),
-	}
-	go hist.run()
-	return hist
-}
-
-func (m *MemHistory) Stop() {
-	select {
-	// ensures Stop() is idempotent
-	case <-m.done:
-		return
-	default:
-		close(m.done)
-		return
-	}
-}
-
-// run periodically cleans up old series/samples to ensure memory consumption doesn't grow unbounded.
-func (m *MemHistory) run() {
-	t := time.NewTicker(m.cleanupInterval)
-	for {
-		select {
-		case <-m.done:
-			t.Stop()
-			return
-		case <-t.C:
-			m.mtx.Lock()
-			for rule, app := range m.appenders {
-				if rem := app.CleanupOldSamples(); rem == 0 {
-					delete(m.appenders, rule)
-				}
-
-			}
-			m.mtx.Unlock()
-		}
-	}
-}
-
-// Implement rules.Appendable
-func (m *MemHistory) Appender(rule rules.Rule) (storage.Appender, error) {
-	if rule == nil {
-		return NoopAppender{}, nil
-	}
-
-	alertRule, ok := rule.(*rules.AlertingRule)
-	if !ok {
-		return nil, errors.New("unimplemented: MemHistory only accepts AlertingRules")
-	}
-
-	m.mtx.Lock()
-	defer m.mtx.Unlock()
-
-	if app, ok := m.appenders[alertRule]; ok {
-		return app, nil
-	}
-
-	app := NewForStateAppender(alertRule, m.metrics)
-	m.appenders[alertRule] = app
-	return app, nil
-}
-
-func (m *MemHistory) RestoreForState(ts time.Time, alertRule *rules.AlertingRule) {
-	appender, err := m.Appender(alertRule)
-	if err != nil {
-		level.Error(m.opts.Logger).Log("msg", "Could not find an Appender for rule", "err", err)
-		return
-	}
-
-	app := appender.(*ForStateAppender)
-
-	// Here we artificially populate the evaluation at `now-forDuration`.
-	// Note: We lose granularity here across restarts because we don't persist series. This is an approximation
-	// of whether the alert condition was positive during this period. This means after restarts, we may lose up
-	// to the ForDuration in alert granularity.
-	// TODO: Do we want this to instead evaluate forDuration/interval times?
-	start := time.Now()
-	adjusted := ts.Add(-alertRule.Duration())
-
-	level.Info(m.opts.Logger).Log(
-		"msg", "restoring synthetic for state",
-		"adjusted_ts", adjusted,
-		"rule", alertRule.Name(),
-		"query", alertRule.Query().String(),
-		"rule_duration", alertRule.Duration(),
-		"tenant", m.userId,
-	)
-	vec, err := m.opts.QueryFunc(m.opts.Context, alertRule.Query().String(), adjusted)
-	m.opts.Metrics.IncrementEvaluations()
-	if err != nil {
-		alertRule.SetHealth(rules.HealthBad)
-		alertRule.SetLastError(err)
-		m.opts.Metrics.FailedEvaluate()
-	}
-
-	for _, smpl := range vec {
-		forStateSample := alertRule.ForStateSample(
-			&rules.Alert{
-				Labels:   smpl.Metric,
-				ActiveAt: ts,
-				Value:    smpl.V,
-			},
-			util.TimeFromMillis(smpl.T),
-			smpl.V,
-		)
-
-		if _, err := app.Add(forStateSample.Metric, forStateSample.T, forStateSample.V); err != nil {
-			level.Error(m.opts.Logger).Log("msg", "error appending to MemHistory", "err", err)
-			return
-		}
-	}
-	level.Info(m.opts.Logger).Log(
-		"msg", "resolved synthetic for_state",
-		"rule", alertRule.Name(),
-		"n_samples", len(vec),
-		"tenant", m.userId,
-	)
-	m.opts.Metrics.EvalDuration(time.Since(start))
-
-	// Now that we've evaluated the rule and written the results to our in memory appender,
-	// delegate to the default implementation.
-	rules.NewMetricsHistory(app, m.opts).RestoreForState(ts, alertRule)
-
-}
-
-type ForStateAppender struct {
-	mtx     sync.Mutex
-	metrics *Metrics
-	rule    *rules.AlertingRule
-	data    map[uint64]*series.ConcreteSeries
-}
-
-func NewForStateAppender(rule *rules.AlertingRule, metrics *Metrics) *ForStateAppender {
-	return &ForStateAppender{
-		rule:    rule,
-		data:    make(map[uint64]*series.ConcreteSeries),
-		metrics: metrics,
-	}
-}
-
-func (m *ForStateAppender) Add(ls labels.Labels, t int64, v float64) (uint64, error) {
-	for _, l := range ls {
-		if l.Name == labels.MetricName && l.Value != rules.AlertForStateMetricName {
-			// This is not an ALERTS_FOR_STATE metric, skip
-			return 0, nil
-		}
-	}
-
-	m.mtx.Lock()
-	defer m.mtx.Unlock()
-
-	fp := ls.Hash()
-
-	if s, ok := m.data[fp]; ok {
-		priorLn := s.Len()
-		s.Add(model.SamplePair{
-			Timestamp: model.Time(t),
-			Value:     model.SampleValue(v),
-		})
-		m.metrics.Samples.Add(float64(s.Len() - priorLn))
-
-		return 0, nil
-	}
-	m.data[fp] = series.NewConcreteSeries(ls, []model.SamplePair{{Timestamp: model.Time(t), Value: model.SampleValue(v)}})
-	m.metrics.Series.Inc()
-	m.metrics.Samples.Inc()
-	return 0, nil
-
-}
-
-// CleanupOldSamples removes samples that are outside of the rule's `For` duration.
-func (m *ForStateAppender) CleanupOldSamples() (seriesRemaining int) {
-	m.mtx.Lock()
-	defer m.mtx.Unlock()
-
-	// TODO: make this factor configurable?
-	// Basically, buffer samples in memory up to ruleDuration * oldEvaluationFactor.
-	oldEvaluationFactor := time.Duration(2)
-
-	for fp, s := range m.data {
-		// release all older references that are no longer needed.
-		priorLn := s.Len()
-		s.TrimStart(time.Now().Add(-m.rule.Duration() * oldEvaluationFactor))
-		m.metrics.Samples.Add(float64(s.Len() - priorLn))
-		if s.Len() == 0 {
-			m.metrics.Series.Dec()
-			delete(m.data, fp)
-		}
-	}
-
-	return len(m.data)
-
-}
-
-func (m *ForStateAppender) AddFast(ref uint64, t int64, v float64) error {
-	return errors.New("unimplemented")
-}
-
-func (m *ForStateAppender) Commit() error { return nil }
-
-func (m *ForStateAppender) Rollback() error { return nil }
-
-// implement storage.Queryable
-func (m *ForStateAppender) Querier(ctx context.Context, mint, maxt int64) (storage.Querier, error) {
-	return ForStateAppenderQuerier{
-		mint:             mint,
-		maxt:             maxt,
-		ForStateAppender: m,
-	}, nil
-
-}
-
-// ForStateAppenderQuerier wraps a **ForStateAppender and implements storage.Querier
-type ForStateAppenderQuerier struct {
-	mint, maxt int64
-	*ForStateAppender
-}
-
-// Select returns a set of series that matches the given label matchers.
-func (q ForStateAppenderQuerier) Select(sortSeries bool, params *storage.SelectHints, matchers ...*labels.Matcher) storage.SeriesSet {
-	// TODO: implement sorted selects (currently unused).
-	if sortSeries {
-		return storage.NoopSeriesSet()
-
-	}
-	q.mtx.Lock()
-	defer q.mtx.Unlock()
-
-	seekTo := q.mint
-	if params != nil && seekTo < params.Start {
-		seekTo = params.Start
-	}
-
-	maxt := q.maxt
-	if params != nil && params.End < maxt {
-		maxt = params.End
-	}
-
-	var filtered []storage.Series
-outer:
-	for _, s := range q.data {
-		for _, matcher := range matchers {
-			if !matcher.Matches(s.Labels().Get(matcher.Name)) {
-				continue outer
-			}
-
-			iter := s.Iterator()
-			var samples []model.SamplePair
-			for ok := iter.Seek(seekTo); ok; ok = iter.Next() {
-				t, v := iter.At()
-				if t > maxt {
-					break
-				}
-
-				samples = append(samples, model.SamplePair{
-					Timestamp: model.Time(t),
-					Value:     model.SampleValue(v),
-				})
-
-			}
-
-			if len(samples) != 0 {
-				filtered = append(filtered, series.NewConcreteSeries(s.Labels(), samples))
-			}
-		}
-	}
-
-	return series.NewConcreteSeriesSet(filtered)
-}
-
-// LabelValues returns all potential values for a label name.
-func (ForStateAppenderQuerier) LabelValues(name string) ([]string, storage.Warnings, error) {
-	return nil, nil, errors.New("unimplemented")
-}
-
-// LabelNames returns all the unique label names present in the block in sorted order.
-func (ForStateAppenderQuerier) LabelNames() ([]string, storage.Warnings, error) {
-	return nil, nil, errors.New("unimplemented")
-}
-
-// Close releases the resources of the Querier.
-func (ForStateAppenderQuerier) Close() error { return nil }
diff --git a/pkg/ruler/memhistory_test.go b/pkg/ruler/memhistory_test.go
deleted file mode 100644
index 015df1b21646b..0000000000000
--- a/pkg/ruler/memhistory_test.go
+++ /dev/null
@@ -1,319 +0,0 @@
-package ruler
-
-import (
-	"context"
-	"fmt"
-	"testing"
-	"time"
-
-	"github.com/cortexproject/cortex/pkg/querier/series"
-	"github.com/cortexproject/cortex/pkg/ruler/rules"
-	"github.com/cortexproject/cortex/pkg/util"
-	"github.com/go-kit/kit/log"
-	"github.com/prometheus/common/model"
-	"github.com/prometheus/prometheus/pkg/labels"
-	"github.com/prometheus/prometheus/promql"
-	"github.com/prometheus/prometheus/promql/parser"
-	"github.com/prometheus/prometheus/storage"
-	"github.com/stretchr/testify/require"
-	"github.com/weaveworks/common/user"
-)
-
-func TestMemHistoryAppender(t *testing.T) {
-
-	for _, tc := range []struct {
-		desc     string
-		err      bool
-		expected storage.Appender
-		rule     rules.Rule
-	}{
-		{
-			desc:     "nil rule returns NoopAppender",
-			err:      false,
-			expected: NoopAppender{},
-			rule:     nil,
-		},
-		{
-			desc:     "recording rule errors",
-			err:      true,
-			expected: nil,
-			rule:     &rules.RecordingRule{},
-		},
-		{
-			desc:     "alerting rule returns ForStateAppender",
-			err:      false,
-			expected: NewForStateAppender(rules.NewAlertingRule("foo", nil, 0, nil, nil, nil, true, nil), NewMetrics(nil)),
-			rule:     rules.NewAlertingRule("foo", nil, 0, nil, nil, nil, true, nil),
-		},
-	} {
-		t.Run(tc.desc, func(t *testing.T) {
-			hist := NewMemHistory("abc", time.Minute, nil, NewMetrics(nil))
-
-			app, err := hist.Appender(tc.rule)
-			if tc.err {
-				require.NotNil(t, err)
-			}
-
-			if tc.expected != nil {
-				require.IsTypef(t, tc.expected, app, "expected ForStateAppender")
-			}
-		})
-	}
-}
-
-// func TestMemHistoryRestoreForState(t *testing.T) {}
-
-func TestMemHistoryRestoreForState(t *testing.T) {
-	opts := &rules.ManagerOptions{
-		QueryFunc: rules.QueryFunc(func(ctx context.Context, q string, t time.Time) (promql.Vector, error) {
-			// always return the requested time
-			return promql.Vector{promql.Sample{
-				Point: promql.Point{
-					T: util.TimeToMillis(t),
-					V: float64(util.TimeToMillis(t)),
-				},
-				Metric: mustParseLabels(`{foo="bar", __name__="something"}`),
-			}}, nil
-		}),
-		Context: user.InjectOrgID(context.Background(), "abc"),
-		Logger:  log.NewNopLogger(),
-		Metrics: rules.NewGroupMetrics(nil),
-	}
-
-	ts := time.Now().Round(time.Millisecond)
-	rule := newRule("rule1", "query", `{foo="bar"}`, time.Minute)
-
-	hist := NewMemHistory("abc", time.Minute, opts, NewMetrics(nil))
-	hist.RestoreForState(ts, rule)
-
-	app, err := hist.Appender(rule)
-	require.Nil(t, err)
-	casted := app.(*ForStateAppender)
-
-	q, err := casted.Querier(context.Background(), 0, util.TimeToMillis(ts))
-	require.Nil(t, err)
-	set := q.Select(
-		false,
-		nil,
-		labels.MustNewMatcher(labels.MatchEqual, labels.MetricName, rules.AlertForStateMetricName),
-		labels.MustNewMatcher(labels.MatchEqual, "foo", "bar"),
-	)
-	require.Equal(t, true, set.Next())
-	s := set.At()
-	require.Equal(t, `{__name__="ALERTS_FOR_STATE", alertname="rule1", foo="bar"}`, s.Labels().String())
-	iter := s.Iterator()
-	require.Equal(t, true, iter.Next())
-	x, y := iter.At()
-	adjusted := ts.Add(-rule.Duration()) // Adjusted for the forDuration lookback.
-	require.Equal(t, util.TimeToMillis(adjusted), x)
-	require.Equal(t, float64(util.TimeToMillis(adjusted)), y)
-	require.Equal(t, false, iter.Next())
-
-	// TODO: ensure extra labels are propagated?
-}
-
-func TestMemHistoryStop(t *testing.T) {
-	hist := NewMemHistory("abc", time.Millisecond, nil, NewMetrics(nil))
-	<-time.After(2 * time.Millisecond) // allow it to start ticking (not strictly required for this test)
-	hist.Stop()
-	// ensure idempotency
-	hist.Stop()
-
-	// ensure ticker is cleaned up
-	select {
-	case <-time.After(10 * time.Millisecond):
-		t.Fatalf("done channel not closed")
-	case <-hist.done:
-	}
-}
-
-type stringer string
-
-func (s stringer) String() string { return string(s) }
-
-func mustParseLabels(s string) labels.Labels {
-	labels, err := parser.ParseMetric(s)
-	if err != nil {
-		panic(fmt.Sprintf("failed to parse %s", s))
-	}
-
-	return labels
-}
-
-func newRule(name, qry, ls string, forDur time.Duration) *rules.AlertingRule {
-	return rules.NewAlertingRule(name, stringer(qry), forDur, mustParseLabels(ls), nil, nil, false, log.NewNopLogger())
-}
-
-func TestForStateAppenderAdd(t *testing.T) {
-	app := NewForStateAppender(newRule("foo", "query", `{foo="bar"}`, time.Minute), NewMetrics(nil))
-	require.Equal(t, map[uint64]*series.ConcreteSeries{}, app.data)
-
-	// create first series
-	first := mustParseLabels(`{foo="bar", bazz="buzz", __name__="ALERTS_FOR_STATE"}`)
-	_, err := app.Add(first, 1, 1)
-	require.Nil(t, err)
-	require.Equal(t, map[uint64]*series.ConcreteSeries{
-		first.Hash(): series.NewConcreteSeries(
-			first, []model.SamplePair{{Timestamp: model.Time(1), Value: model.SampleValue(1)}},
-		),
-	}, app.data)
-
-	// create second series
-	second := mustParseLabels(`{foo="bar", bazz="barf", __name__="ALERTS_FOR_STATE"}`)
-	_, err = app.Add(second, 1, 1)
-	require.Nil(t, err)
-
-	require.Equal(t, map[uint64]*series.ConcreteSeries{
-		first.Hash(): series.NewConcreteSeries(
-			first, []model.SamplePair{{Timestamp: model.Time(1), Value: model.SampleValue(1)}},
-		),
-		second.Hash(): series.NewConcreteSeries(
-			second, []model.SamplePair{{Timestamp: model.Time(1), Value: model.SampleValue(1)}},
-		),
-	}, app.data)
-
-	// append first series
-	_, err = app.Add(first, 3, 3)
-	require.Nil(t, err)
-
-	require.Equal(t, map[uint64]*series.ConcreteSeries{
-		first.Hash(): series.NewConcreteSeries(
-			first, []model.SamplePair{
-				{Timestamp: model.Time(1), Value: model.SampleValue(1)},
-				{Timestamp: model.Time(3), Value: model.SampleValue(3)},
-			},
-		),
-		second.Hash(): series.NewConcreteSeries(
-			second, []model.SamplePair{{Timestamp: model.Time(1), Value: model.SampleValue(1)}},
-		),
-	}, app.data)
-
-	// insert new points at correct position
-	_, err = app.Add(first, 2, 2)
-	require.Nil(t, err)
-
-	require.Equal(t, map[uint64]*series.ConcreteSeries{
-		first.Hash(): series.NewConcreteSeries(
-			first, []model.SamplePair{
-				{Timestamp: model.Time(1), Value: model.SampleValue(1)},
-				{Timestamp: model.Time(2), Value: model.SampleValue(2)},
-				{Timestamp: model.Time(3), Value: model.SampleValue(3)},
-			},
-		),
-		second.Hash(): series.NewConcreteSeries(
-			second, []model.SamplePair{{Timestamp: model.Time(1), Value: model.SampleValue(1)}},
-		),
-	}, app.data)
-
-	// ignore non ALERTS_FOR_STATE metrics
-	_, err = app.Add(mustParseLabels(`{foo="bar", bazz="barf", __name__="test"}`), 1, 1)
-	require.Nil(t, err)
-
-	require.Equal(t, map[uint64]*series.ConcreteSeries{
-		first.Hash(): series.NewConcreteSeries(
-			first, []model.SamplePair{
-				{Timestamp: model.Time(1), Value: model.SampleValue(1)},
-				{Timestamp: model.Time(2), Value: model.SampleValue(2)},
-				{Timestamp: model.Time(3), Value: model.SampleValue(3)},
-			},
-		),
-		second.Hash(): series.NewConcreteSeries(
-			second, []model.SamplePair{{Timestamp: model.Time(1), Value: model.SampleValue(1)}},
-		),
-	}, app.data)
-}
-
-func TestForStateAppenderCleanup(t *testing.T) {
-	app := NewForStateAppender(newRule("foo", "query", `{foo="bar"}`, time.Minute), NewMetrics(nil))
-	now := time.Now()
-
-	// create ls series
-	ls := mustParseLabels(`{foo="bar", bazz="buzz", __name__="ALERTS_FOR_STATE"}`)
-	_, err := app.Add(ls, util.TimeToMillis(now.Add(-3*time.Minute)), 1)
-	require.Nil(t, err)
-	_, err = app.Add(ls, util.TimeToMillis(now.Add(-1*time.Minute)), 2)
-	require.Nil(t, err)
-
-	rem := app.CleanupOldSamples()
-	require.Equal(t, 1, rem)
-
-	require.Equal(t, map[uint64]*series.ConcreteSeries{
-		ls.Hash(): series.NewConcreteSeries(
-			ls, []model.SamplePair{
-				{Timestamp: model.Time(util.TimeToMillis(now.Add(-1 * time.Minute))), Value: model.SampleValue(2)},
-			},
-		),
-	}, app.data)
-
-}
-
-func TestForStateAppenderQuerier(t *testing.T) {
-	app := NewForStateAppender(newRule("foo", "query", `{foo="bar"}`, time.Minute), NewMetrics(nil))
-	now := time.Now()
-
-	// create ls series
-	ls := mustParseLabels(`{foo="bar", bazz="buzz", __name__="ALERTS_FOR_STATE"}`)
-	_, err := app.Add(ls, util.TimeToMillis(now.Add(-3*time.Minute)), 1)
-	require.Nil(t, err)
-	_, err = app.Add(ls, util.TimeToMillis(now.Add(-1*time.Minute)), 2)
-	require.Nil(t, err)
-	_, err = app.Add(ls, util.TimeToMillis(now.Add(1*time.Minute)), 3)
-	require.Nil(t, err)
-
-	// never included due to bounds
-	_, err = app.Add(mustParseLabels(`{foo="bar", bazz="blip", __name__="ALERTS_FOR_STATE"}`), util.TimeToMillis(now.Add(-2*time.Hour)), 3)
-	require.Nil(t, err)
-
-	// should succeed with nil selecthints
-	q, err := app.Querier(context.Background(), util.TimeToMillis(now.Add(-2*time.Minute)), util.TimeToMillis(now))
-	require.Nil(t, err)
-
-	set := q.Select(
-		false,
-		nil,
-		labels.MustNewMatcher(labels.MatchEqual, labels.MetricName, rules.AlertForStateMetricName),
-	)
-	require.Equal(
-		t,
-		series.NewConcreteSeriesSet(
-			[]storage.Series{
-				series.NewConcreteSeries(ls, []model.SamplePair{
-					{Timestamp: model.Time(util.TimeToMillis(now.Add(-1 * time.Minute))), Value: model.SampleValue(2)},
-				}),
-			},
-		),
-		set,
-	)
-
-	// // should be able to minimize selection window via hints
-	q, err = app.Querier(context.Background(), util.TimeToMillis(now.Add(-time.Hour)), util.TimeToMillis(now.Add(time.Hour)))
-	require.Nil(t, err)
-	set2 := q.Select(
-		false,
-		&storage.SelectHints{
-			Start: util.TimeToMillis(now.Add(-2 * time.Minute)),
-			End:   util.TimeToMillis(now),
-		},
-		labels.MustNewMatcher(labels.MatchEqual, labels.MetricName, rules.AlertForStateMetricName),
-	)
-	require.Equal(
-		t,
-		series.NewConcreteSeriesSet(
-			[]storage.Series{
-				series.NewConcreteSeries(ls, []model.SamplePair{
-					{Timestamp: model.Time(util.TimeToMillis(now.Add(-1 * time.Minute))), Value: model.SampleValue(2)},
-				}),
-			},
-		),
-		set2,
-	)
-
-	// requiring sorted results return nothing (unsupported)
-	empty := q.Select(
-		true,
-		nil,
-		labels.MustNewMatcher(labels.MatchEqual, labels.MetricName, rules.AlertForStateMetricName),
-	)
-	require.Equal(t, false, empty.Next())
-
-}
diff --git a/pkg/ruler/rules/alerting.go b/pkg/ruler/rules/alerting.go
deleted file mode 100644
index 85ce868cc74b1..0000000000000
--- a/pkg/ruler/rules/alerting.go
+++ /dev/null
@@ -1,556 +0,0 @@
-// Copyright 2013 The Prometheus Authors
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-package rules
-
-import (
-	"context"
-	"fmt"
-	"net/url"
-	"strings"
-	"sync"
-	"time"
-
-	html_template "html/template"
-
-	yaml "gopkg.in/yaml.v2"
-
-	"github.com/go-kit/kit/log"
-	"github.com/go-kit/kit/log/level"
-	"github.com/pkg/errors"
-	"github.com/prometheus/common/model"
-
-	"github.com/prometheus/prometheus/pkg/labels"
-	"github.com/prometheus/prometheus/pkg/rulefmt"
-	"github.com/prometheus/prometheus/pkg/timestamp"
-	"github.com/prometheus/prometheus/promql"
-	"github.com/prometheus/prometheus/promql/parser"
-	"github.com/prometheus/prometheus/rules"
-	"github.com/prometheus/prometheus/template"
-	"github.com/prometheus/prometheus/util/strutil"
-)
-
-const (
-	// AlertMetricName is the metric name for synthetic alert timeseries.
-	alertMetricName = "ALERTS"
-	// AlertForStateMetricName is the metric name for 'for' state of alert.
-	AlertForStateMetricName = "ALERTS_FOR_STATE"
-
-	// AlertNameLabel is the label name indicating the name of an alert.
-	alertNameLabel = "alertname"
-	// AlertStateLabel is the label name indicating the state of an alert.
-	alertStateLabel = "alertstate"
-)
-
-// AlertState denotes the state of an active alert.
-type AlertState int
-
-const (
-	// StateInactive is the state of an alert that is neither firing nor pending.
-	StateInactive AlertState = iota
-	// StatePending is the state of an alert that has been active for less than
-	// the configured threshold duration.
-	StatePending
-	// StateFiring is the state of an alert that has been active for longer than
-	// the configured threshold duration.
-	StateFiring
-)
-
-func (s AlertState) String() string {
-	switch s {
-	case StateInactive:
-		return "inactive"
-	case StatePending:
-		return "pending"
-	case StateFiring:
-		return "firing"
-	}
-	panic(errors.Errorf("unknown alert state: %s", s.String()))
-}
-
-// Alert is the user-level representation of a single instance of an alerting rule.
-type Alert struct {
-	State AlertState
-
-	Labels      labels.Labels
-	Annotations labels.Labels
-
-	// The value at the last evaluation of the alerting expression.
-	Value float64
-	// The interval during which the condition of this alert held true.
-	// ResolvedAt will be 0 to indicate a still active alert.
-	ActiveAt   time.Time
-	FiredAt    time.Time
-	ResolvedAt time.Time
-	LastSentAt time.Time
-	ValidUntil time.Time
-}
-
-func (a *Alert) needsSending(ts time.Time, resendDelay time.Duration) bool {
-	if a.State == StatePending {
-		return false
-	}
-
-	// if an alert has been resolved since the last send, resend it
-	if a.ResolvedAt.After(a.LastSentAt) {
-		return true
-	}
-
-	return a.LastSentAt.Add(resendDelay).Before(ts)
-}
-
-// An AlertingRule generates alerts from its vector expression.
-type AlertingRule struct {
-	// The name of the alert.
-	name string
-	// The vector expression from which to generate alerts.
-	vector fmt.Stringer
-	// The duration for which a labelset needs to persist in the expression
-	// output vector before an alert transitions from Pending to Firing state.
-	holdDuration time.Duration
-	// Extra labels to attach to the resulting alert sample vectors.
-	labels labels.Labels
-	// Non-identifying key/value pairs.
-	annotations labels.Labels
-	// External labels from the global config.
-	externalLabels map[string]string
-	// true if old state has been restored. We start persisting samples for ALERT_FOR_STATE
-	// only after the restoration.
-	restored bool
-	// Protects the below.
-	mtx sync.Mutex
-	// Time in seconds taken to evaluate rule.
-	evaluationDuration time.Duration
-	// Timestamp of last evaluation of rule.
-	evaluationTimestamp time.Time
-	// The health of the alerting rule.
-	health RuleHealth
-	// The last error seen by the alerting rule.
-	lastError error
-	// A map of alerts which are currently active (Pending or Firing), keyed by
-	// the fingerprint of the labelset they correspond to.
-	active map[uint64]*Alert
-
-	logger log.Logger
-}
-
-// NewAlertingRule constructs a new AlertingRule.
-func NewAlertingRule(
-	name string, vec parser.Expr, hold time.Duration,
-	labels, annotations, externalLabels labels.Labels,
-	restored bool, logger log.Logger,
-) *AlertingRule {
-	el := make(map[string]string, len(externalLabels))
-	for _, lbl := range externalLabels {
-		el[lbl.Name] = lbl.Value
-	}
-
-	return &AlertingRule{
-		name:           name,
-		vector:         vec,
-		holdDuration:   hold,
-		labels:         labels,
-		annotations:    annotations,
-		externalLabels: el,
-		health:         rules.HealthUnknown,
-		active:         map[uint64]*Alert{},
-		logger:         logger,
-		restored:       restored,
-	}
-}
-
-// Name returns the name of the alerting rule.
-func (r *AlertingRule) Name() string {
-	return r.name
-}
-
-// SetLastError sets the current error seen by the alerting rule.
-func (r *AlertingRule) SetLastError(err error) {
-	r.mtx.Lock()
-	defer r.mtx.Unlock()
-	r.lastError = err
-}
-
-// LastError returns the last error seen by the alerting rule.
-func (r *AlertingRule) LastError() error {
-	r.mtx.Lock()
-	defer r.mtx.Unlock()
-	return r.lastError
-}
-
-// SetHealth sets the current health of the alerting rule.
-func (r *AlertingRule) SetHealth(health RuleHealth) {
-	r.mtx.Lock()
-	defer r.mtx.Unlock()
-	r.health = health
-}
-
-// Health returns the current health of the alerting rule.
-func (r *AlertingRule) Health() RuleHealth {
-	r.mtx.Lock()
-	defer r.mtx.Unlock()
-	return r.health
-}
-
-// Query returns the query expression of the alerting rule.
-func (r *AlertingRule) Query() fmt.Stringer {
-	return r.vector
-}
-
-// Duration returns the hold duration of the alerting rule.
-func (r *AlertingRule) Duration() time.Duration {
-	return r.holdDuration
-}
-
-// Labels returns the labels of the alerting rule.
-func (r *AlertingRule) Labels() labels.Labels {
-	return r.labels
-}
-
-// Annotations returns the annotations of the alerting rule.
-func (r *AlertingRule) Annotations() labels.Labels {
-	return r.annotations
-}
-
-func (r *AlertingRule) sample(alert *Alert, ts time.Time) promql.Sample {
-	lb := labels.NewBuilder(r.labels)
-
-	for _, l := range alert.Labels {
-		lb.Set(l.Name, l.Value)
-	}
-
-	lb.Set(labels.MetricName, alertMetricName)
-	lb.Set(labels.AlertName, r.name)
-	lb.Set(alertStateLabel, alert.State.String())
-
-	s := promql.Sample{
-		Metric: lb.Labels(),
-		Point:  promql.Point{T: timestamp.FromTime(ts), V: 1},
-	}
-	return s
-}
-
-// forStateSample returns the sample for ALERTS_FOR_STATE.
-func (r *AlertingRule) forStateSample(alert *Alert, ts time.Time, v float64) promql.Sample {
-	lb := labels.NewBuilder(r.labels)
-
-	for _, l := range alert.Labels {
-		lb.Set(l.Name, l.Value)
-	}
-
-	lb.Set(labels.MetricName, AlertForStateMetricName)
-	lb.Set(labels.AlertName, r.name)
-
-	s := promql.Sample{
-		Metric: lb.Labels(),
-		Point:  promql.Point{T: timestamp.FromTime(ts), V: v},
-	}
-	return s
-}
-
-// SetEvaluationDuration updates evaluationDuration to the duration it took to evaluate the rule on its last evaluation.
-func (r *AlertingRule) SetEvaluationDuration(dur time.Duration) {
-	r.mtx.Lock()
-	defer r.mtx.Unlock()
-	r.evaluationDuration = dur
-}
-
-// GetEvaluationDuration returns the time in seconds it took to evaluate the alerting rule.
-func (r *AlertingRule) GetEvaluationDuration() time.Duration {
-	r.mtx.Lock()
-	defer r.mtx.Unlock()
-	return r.evaluationDuration
-}
-
-// SetEvaluationTimestamp updates evaluationTimestamp to the timestamp of when the rule was last evaluated.
-func (r *AlertingRule) SetEvaluationTimestamp(ts time.Time) {
-	r.mtx.Lock()
-	defer r.mtx.Unlock()
-	r.evaluationTimestamp = ts
-}
-
-// GetEvaluationTimestamp returns the time the evaluation took place.
-func (r *AlertingRule) GetEvaluationTimestamp() time.Time {
-	r.mtx.Lock()
-	defer r.mtx.Unlock()
-	return r.evaluationTimestamp
-}
-
-// SetRestored updates the restoration state of the alerting rule.
-func (r *AlertingRule) SetRestored(restored bool) {
-	r.restored = restored
-}
-
-// resolvedRetention is the duration for which a resolved alert instance
-// is kept in memory state and consequently repeatedly sent to the AlertManager.
-const resolvedRetention = 15 * time.Minute
-
-// Eval evaluates the rule expression and then creates pending alerts and fires
-// or removes previously pending alerts accordingly.
-func (r *AlertingRule) Eval(ctx context.Context, ts time.Time, query rules.QueryFunc, externalURL *url.URL) (promql.Vector, error) {
-	res, err := query(ctx, r.vector.String(), ts)
-	if err != nil {
-		r.SetHealth(rules.HealthBad)
-		r.SetLastError(err)
-		return nil, err
-	}
-
-	r.mtx.Lock()
-	defer r.mtx.Unlock()
-
-	// Create pending alerts for any new vector elements in the alert expression
-	// or update the expression value for existing elements.
-	resultFPs := map[uint64]struct{}{}
-
-	var vec promql.Vector
-	var alerts = make(map[uint64]*Alert, len(res))
-	for _, smpl := range res {
-		// Provide the alert information to the template.
-		l := make(map[string]string, len(smpl.Metric))
-		for _, lbl := range smpl.Metric {
-			l[lbl.Name] = lbl.Value
-		}
-
-		tmplData := template.AlertTemplateData(l, r.externalLabels, smpl.V)
-		// Inject some convenience variables that are easier to remember for users
-		// who are not used to Go's templating system.
-		defs := []string{
-			"{{$labels := .Labels}}",
-			"{{$externalLabels := .ExternalLabels}}",
-			"{{$value := .Value}}",
-		}
-
-		expand := func(text string) string {
-			tmpl := template.NewTemplateExpander(
-				ctx,
-				strings.Join(append(defs, text), ""),
-				"__alert_"+r.Name(),
-				tmplData,
-				model.Time(timestamp.FromTime(ts)),
-				template.QueryFunc(query),
-				externalURL,
-			)
-			result, err := tmpl.Expand()
-			if err != nil {
-				result = fmt.Sprintf("", err)
-				level.Warn(r.logger).Log("msg", "Expanding alert template failed", "err", err, "data", tmplData)
-			}
-			return result
-		}
-
-		lb := labels.NewBuilder(smpl.Metric).Del(labels.MetricName)
-
-		for _, l := range r.labels {
-			lb.Set(l.Name, expand(l.Value))
-		}
-		lb.Set(labels.AlertName, r.Name())
-
-		annotations := make(labels.Labels, 0, len(r.annotations))
-		for _, a := range r.annotations {
-			annotations = append(annotations, labels.Label{Name: a.Name, Value: expand(a.Value)})
-		}
-
-		lbs := lb.Labels()
-		h := lbs.Hash()
-		resultFPs[h] = struct{}{}
-
-		if _, ok := alerts[h]; ok {
-			err = fmt.Errorf("vector contains metrics with the same labelset after applying alert labels")
-			// We have already acquired the lock above hence using SetHealth and
-			// SetLastError will deadlock.
-			r.health = rules.HealthBad
-			r.lastError = err
-			return nil, err
-		}
-
-		alerts[h] = &Alert{
-			Labels:      lbs,
-			Annotations: annotations,
-			ActiveAt:    ts,
-			State:       StatePending,
-			Value:       smpl.V,
-		}
-	}
-
-	for h, a := range alerts {
-		// Check whether we already have alerting state for the identifying label set.
-		// Update the last value and annotations if so, create a new alert entry otherwise.
-		if alert, ok := r.active[h]; ok && alert.State != StateInactive {
-			alert.Value = a.Value
-			alert.Annotations = a.Annotations
-			continue
-		}
-
-		r.active[h] = a
-	}
-
-	// Check if any pending alerts should be removed or fire now. Write out alert timeseries.
-	for fp, a := range r.active {
-		if _, ok := resultFPs[fp]; !ok {
-			// If the alert was previously firing, keep it around for a given
-			// retention time so it is reported as resolved to the AlertManager.
-			if a.State == StatePending || (!a.ResolvedAt.IsZero() && ts.Sub(a.ResolvedAt) > resolvedRetention) {
-				delete(r.active, fp)
-			}
-			if a.State != StateInactive {
-				a.State = StateInactive
-				a.ResolvedAt = ts
-			}
-			continue
-		}
-
-		if a.State == StatePending && ts.Sub(a.ActiveAt) >= r.holdDuration {
-			a.State = StateFiring
-			a.FiredAt = ts
-		}
-
-		if r.restored {
-			vec = append(vec, r.sample(a, ts))
-			vec = append(vec, r.forStateSample(a, ts, float64(a.ActiveAt.Unix())))
-		}
-	}
-
-	// We have already acquired the lock above hence using SetHealth and
-	// SetLastError will deadlock.
-	r.health = rules.HealthGood
-	r.lastError = err
-	return vec, nil
-}
-
-// State returns the maximum state of alert instances for this rule.
-// StateFiring > StatePending > StateInactive
-func (r *AlertingRule) State() AlertState {
-	r.mtx.Lock()
-	defer r.mtx.Unlock()
-
-	maxState := StateInactive
-	for _, a := range r.active {
-		if a.State > maxState {
-			maxState = a.State
-		}
-	}
-	return maxState
-}
-
-// ActiveAlerts returns a slice of active alerts.
-func (r *AlertingRule) ActiveAlerts() []*Alert {
-	var res []*Alert
-	for _, a := range r.currentAlerts() {
-		if a.ResolvedAt.IsZero() {
-			res = append(res, a)
-		}
-	}
-	return res
-}
-
-// currentAlerts returns all instances of alerts for this rule. This may include
-// inactive alerts that were previously firing.
-func (r *AlertingRule) currentAlerts() []*Alert {
-	r.mtx.Lock()
-	defer r.mtx.Unlock()
-
-	alerts := make([]*Alert, 0, len(r.active))
-
-	for _, a := range r.active {
-		anew := *a
-		alerts = append(alerts, &anew)
-	}
-	return alerts
-}
-
-// ForEachActiveAlert runs the given function on each alert.
-// This should be used when you want to use the actual alerts from the AlertingRule
-// and not on its copy.
-// If you want to run on a copy of alerts then don't use this, get the alerts from 'ActiveAlerts()'.
-func (r *AlertingRule) ForEachActiveAlert(f func(*Alert)) {
-	r.mtx.Lock()
-	defer r.mtx.Unlock()
-
-	for _, a := range r.active {
-		f(a)
-	}
-}
-
-func (r *AlertingRule) sendAlerts(ctx context.Context, ts time.Time, resendDelay time.Duration, interval time.Duration, notifyFunc NotifyFunc) {
-	alerts := []*Alert{}
-	r.ForEachActiveAlert(func(alert *Alert) {
-		if alert.needsSending(ts, resendDelay) {
-			alert.LastSentAt = ts
-			// Allow for a couple Eval or Alertmanager send failures
-			delta := resendDelay
-			if interval > resendDelay {
-				delta = interval
-			}
-			alert.ValidUntil = ts.Add(3 * delta)
-			anew := *alert
-			alerts = append(alerts, &anew)
-		}
-	})
-	notifyFunc(ctx, r.vector.String(), alerts...)
-}
-
-func (r *AlertingRule) String() string {
-	ar := rulefmt.Rule{
-		Alert:       r.name,
-		Expr:        r.vector.String(),
-		For:         model.Duration(r.holdDuration),
-		Labels:      r.labels.Map(),
-		Annotations: r.annotations.Map(),
-	}
-
-	byt, err := yaml.Marshal(ar)
-	if err != nil {
-		return fmt.Sprintf("error marshaling alerting rule: %s", err.Error())
-	}
-
-	return string(byt)
-}
-
-// HTMLSnippet returns an HTML snippet representing this alerting rule. The
-// resulting snippet is expected to be presented in a 
 element, so that
-// line breaks and other returned whitespace is respected.
-func (r *AlertingRule) HTMLSnippet(pathPrefix string) html_template.HTML {
-	alertMetric := model.Metric{
-		model.MetricNameLabel: alertMetricName,
-		alertNameLabel:        model.LabelValue(r.name),
-	}
-
-	labelsMap := make(map[string]string, len(r.labels))
-	for _, l := range r.labels {
-		labelsMap[l.Name] = html_template.HTMLEscapeString(l.Value)
-	}
-
-	annotationsMap := make(map[string]string, len(r.annotations))
-	for _, l := range r.annotations {
-		annotationsMap[l.Name] = html_template.HTMLEscapeString(l.Value)
-	}
-
-	ar := rulefmt.Rule{
-		Alert:       fmt.Sprintf("%s", pathPrefix+strutil.TableLinkForExpression(alertMetric.String()), r.name),
-		Expr:        fmt.Sprintf("%s", pathPrefix+strutil.TableLinkForExpression(r.vector.String()), html_template.HTMLEscapeString(r.vector.String())),
-		For:         model.Duration(r.holdDuration),
-		Labels:      labelsMap,
-		Annotations: annotationsMap,
-	}
-
-	byt, err := yaml.Marshal(ar)
-	if err != nil {
-		return html_template.HTML(fmt.Sprintf("error marshaling alerting rule: %q", html_template.HTMLEscapeString(err.Error())))
-	}
-	return html_template.HTML(byt)
-}
-
-// HoldDuration returns the holdDuration of the alerting rule.
-func (r *AlertingRule) HoldDuration() time.Duration {
-	return r.holdDuration
-}
diff --git a/pkg/ruler/rules/alerting_test.go b/pkg/ruler/rules/alerting_test.go
deleted file mode 100644
index 5387cdd731d38..0000000000000
--- a/pkg/ruler/rules/alerting_test.go
+++ /dev/null
@@ -1,329 +0,0 @@
-// Copyright 2016 The Prometheus Authors
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-package rules
-
-import (
-	"context"
-	"fmt"
-	"testing"
-	"time"
-
-	"github.com/go-kit/kit/log"
-	"github.com/prometheus/prometheus/pkg/labels"
-	"github.com/prometheus/prometheus/pkg/timestamp"
-	"github.com/prometheus/prometheus/promql"
-	"github.com/prometheus/prometheus/promql/parser"
-	"github.com/prometheus/prometheus/rules"
-	"github.com/prometheus/prometheus/util/teststorage"
-	"github.com/prometheus/prometheus/util/testutil"
-)
-
-func TestAlertingRuleHTMLSnippet(t *testing.T) {
-	expr, err := parser.ParseExpr(`foo{html="BOLD"}`)
-	testutil.Ok(t, err)
-	rule := NewAlertingRule("testrule", expr, 0, labels.FromStrings("html", "BOLD"), labels.FromStrings("html", "BOLD"), nil, false, nil)
-
-	const want = `alert: testrule
-expr: foo{html="<b>BOLD<b>"}
-labels:
-  html: '<b>BOLD</b>'
-annotations:
-  html: '<b>BOLD</b>'
-`
-
-	got := rule.HTMLSnippet("/test/prefix")
-	testutil.Assert(t, want == got, "incorrect HTML snippet; want:\n\n|%v|\n\ngot:\n\n|%v|", want, got)
-}
-
-func TestAlertingRuleLabelsUpdate(t *testing.T) {
-	suite, err := promql.NewTest(t, `
-		load 1m
-			http_requests{job="app-server", instance="0"}	75 85 70 70
-	`)
-	testutil.Ok(t, err)
-	defer suite.Close()
-
-	testutil.Ok(t, suite.Run())
-
-	expr, err := parser.ParseExpr(`http_requests < 100`)
-	testutil.Ok(t, err)
-
-	rule := NewAlertingRule(
-		"HTTPRequestRateLow",
-		expr,
-		time.Minute,
-		// Basing alerting rule labels off of a value that can change is a very bad idea.
-		// If an alert is going back and forth between two label values it will never fire.
-		// Instead, you should write two alerts with constant labels.
-		labels.FromStrings("severity", "{{ if lt $value 80.0 }}critical{{ else }}warning{{ end }}"),
-		nil, nil, true, nil,
-	)
-
-	results := []promql.Vector{
-		{
-			{
-				Metric: labels.FromStrings(
-					"__name__", "ALERTS",
-					"alertname", "HTTPRequestRateLow",
-					"alertstate", "pending",
-					"instance", "0",
-					"job", "app-server",
-					"severity", "critical",
-				),
-				Point: promql.Point{V: 1},
-			},
-		},
-		{
-			{
-				Metric: labels.FromStrings(
-					"__name__", "ALERTS",
-					"alertname", "HTTPRequestRateLow",
-					"alertstate", "pending",
-					"instance", "0",
-					"job", "app-server",
-					"severity", "warning",
-				),
-				Point: promql.Point{V: 1},
-			},
-		},
-		{
-			{
-				Metric: labels.FromStrings(
-					"__name__", "ALERTS",
-					"alertname", "HTTPRequestRateLow",
-					"alertstate", "pending",
-					"instance", "0",
-					"job", "app-server",
-					"severity", "critical",
-				),
-				Point: promql.Point{V: 1},
-			},
-		},
-		{
-			{
-				Metric: labels.FromStrings(
-					"__name__", "ALERTS",
-					"alertname", "HTTPRequestRateLow",
-					"alertstate", "firing",
-					"instance", "0",
-					"job", "app-server",
-					"severity", "critical",
-				),
-				Point: promql.Point{V: 1},
-			},
-		},
-	}
-
-	baseTime := time.Unix(0, 0)
-	for i, result := range results {
-		t.Logf("case %d", i)
-		evalTime := baseTime.Add(time.Duration(i) * time.Minute)
-		result[0].Point.T = timestamp.FromTime(evalTime)
-		res, err := rule.Eval(suite.Context(), evalTime, rules.EngineQueryFunc(suite.QueryEngine(), suite.Storage()), nil)
-		testutil.Ok(t, err)
-
-		var filteredRes promql.Vector // After removing 'ALERTS_FOR_STATE' samples.
-		for _, smpl := range res {
-			smplName := smpl.Metric.Get("__name__")
-			if smplName == "ALERTS" {
-				filteredRes = append(filteredRes, smpl)
-			} else {
-				// If not 'ALERTS', it has to be 'ALERTS_FOR_STATE'.
-				testutil.Equals(t, "ALERTS_FOR_STATE", smplName)
-			}
-		}
-
-		testutil.Equals(t, result, filteredRes)
-	}
-}
-
-func TestAlertingRuleExternalLabelsInTemplate(t *testing.T) {
-	suite, err := promql.NewTest(t, `
-		load 1m
-			http_requests{job="app-server", instance="0"}	75 85 70 70
-	`)
-	testutil.Ok(t, err)
-	defer suite.Close()
-
-	testutil.Ok(t, suite.Run())
-
-	expr, err := parser.ParseExpr(`http_requests < 100`)
-	testutil.Ok(t, err)
-
-	ruleWithoutExternalLabels := NewAlertingRule(
-		"ExternalLabelDoesNotExist",
-		expr,
-		time.Minute,
-		labels.FromStrings("templated_label", "There are {{ len $externalLabels }} external Labels, of which foo is {{ $externalLabels.foo }}."),
-		nil,
-		nil,
-		true, log.NewNopLogger(),
-	)
-	ruleWithExternalLabels := NewAlertingRule(
-		"ExternalLabelExists",
-		expr,
-		time.Minute,
-		labels.FromStrings("templated_label", "There are {{ len $externalLabels }} external Labels, of which foo is {{ $externalLabels.foo }}."),
-		nil,
-		labels.FromStrings("foo", "bar", "dings", "bums"),
-		true, log.NewNopLogger(),
-	)
-	result := promql.Vector{
-		{
-			Metric: labels.FromStrings(
-				"__name__", "ALERTS",
-				"alertname", "ExternalLabelDoesNotExist",
-				"alertstate", "pending",
-				"instance", "0",
-				"job", "app-server",
-				"templated_label", "There are 0 external Labels, of which foo is .",
-			),
-			Point: promql.Point{V: 1},
-		},
-		{
-			Metric: labels.FromStrings(
-				"__name__", "ALERTS",
-				"alertname", "ExternalLabelExists",
-				"alertstate", "pending",
-				"instance", "0",
-				"job", "app-server",
-				"templated_label", "There are 2 external Labels, of which foo is bar.",
-			),
-			Point: promql.Point{V: 1},
-		},
-	}
-
-	evalTime := time.Unix(0, 0)
-	result[0].Point.T = timestamp.FromTime(evalTime)
-	result[1].Point.T = timestamp.FromTime(evalTime)
-
-	var filteredRes promql.Vector // After removing 'ALERTS_FOR_STATE' samples.
-	res, err := ruleWithoutExternalLabels.Eval(
-		suite.Context(), evalTime, rules.EngineQueryFunc(suite.QueryEngine(), suite.Storage()), nil,
-	)
-	testutil.Ok(t, err)
-	for _, smpl := range res {
-		smplName := smpl.Metric.Get("__name__")
-		if smplName == "ALERTS" {
-			filteredRes = append(filteredRes, smpl)
-		} else {
-			// If not 'ALERTS', it has to be 'ALERTS_FOR_STATE'.
-			testutil.Equals(t, "ALERTS_FOR_STATE", smplName)
-		}
-	}
-
-	res, err = ruleWithExternalLabels.Eval(
-		suite.Context(), evalTime, rules.EngineQueryFunc(suite.QueryEngine(), suite.Storage()), nil,
-	)
-	testutil.Ok(t, err)
-	for _, smpl := range res {
-		smplName := smpl.Metric.Get("__name__")
-		if smplName == "ALERTS" {
-			filteredRes = append(filteredRes, smpl)
-		} else {
-			// If not 'ALERTS', it has to be 'ALERTS_FOR_STATE'.
-			testutil.Equals(t, "ALERTS_FOR_STATE", smplName)
-		}
-	}
-
-	testutil.Equals(t, result, filteredRes)
-}
-
-func TestAlertingRuleEmptyLabelFromTemplate(t *testing.T) {
-	suite, err := promql.NewTest(t, `
-		load 1m
-			http_requests{job="app-server", instance="0"}	75 85 70 70
-	`)
-	testutil.Ok(t, err)
-	defer suite.Close()
-
-	testutil.Ok(t, suite.Run())
-
-	expr, err := parser.ParseExpr(`http_requests < 100`)
-	testutil.Ok(t, err)
-
-	rule := NewAlertingRule(
-		"EmptyLabel",
-		expr,
-		time.Minute,
-		labels.FromStrings("empty_label", ""),
-		nil,
-		nil,
-		true, log.NewNopLogger(),
-	)
-	result := promql.Vector{
-		{
-			Metric: labels.FromStrings(
-				"__name__", "ALERTS",
-				"alertname", "EmptyLabel",
-				"alertstate", "pending",
-				"instance", "0",
-				"job", "app-server",
-			),
-			Point: promql.Point{V: 1},
-		},
-	}
-
-	evalTime := time.Unix(0, 0)
-	result[0].Point.T = timestamp.FromTime(evalTime)
-
-	var filteredRes promql.Vector // After removing 'ALERTS_FOR_STATE' samples.
-	res, err := rule.Eval(
-		suite.Context(), evalTime, rules.EngineQueryFunc(suite.QueryEngine(), suite.Storage()), nil,
-	)
-	testutil.Ok(t, err)
-	for _, smpl := range res {
-		smplName := smpl.Metric.Get("__name__")
-		if smplName == "ALERTS" {
-			filteredRes = append(filteredRes, smpl)
-		} else {
-			// If not 'ALERTS', it has to be 'ALERTS_FOR_STATE'.
-			testutil.Equals(t, "ALERTS_FOR_STATE", smplName)
-		}
-	}
-	testutil.Equals(t, result, filteredRes)
-}
-
-func TestAlertingRuleDuplicate(t *testing.T) {
-	storage := teststorage.New(t)
-	defer storage.Close()
-
-	opts := promql.EngineOpts{
-		Logger:     nil,
-		Reg:        nil,
-		MaxSamples: 10,
-		Timeout:    10 * time.Second,
-	}
-
-	engine := promql.NewEngine(opts)
-	ctx, cancelCtx := context.WithCancel(context.Background())
-	defer cancelCtx()
-
-	now := time.Now()
-
-	expr, _ := parser.ParseExpr(`vector(0) or label_replace(vector(0),"test","x","","")`)
-	rule := NewAlertingRule(
-		"foo",
-		expr,
-		time.Minute,
-		labels.FromStrings("test", "test"),
-		nil,
-		nil,
-		true, log.NewNopLogger(),
-	)
-	_, err := rule.Eval(ctx, now, rules.EngineQueryFunc(engine, storage), nil)
-	testutil.NotOk(t, err)
-	e := fmt.Errorf("vector contains metrics with the same labelset after applying alert labels")
-	testutil.ErrorEqual(t, e, err)
-}
diff --git a/pkg/ruler/rules/fixtures/rules.yaml b/pkg/ruler/rules/fixtures/rules.yaml
deleted file mode 100644
index 38fe21cb8d047..0000000000000
--- a/pkg/ruler/rules/fixtures/rules.yaml
+++ /dev/null
@@ -1,6 +0,0 @@
-groups:
-  - name: test
-    rules:
-    - record: job:http_requests:rate5m
-      expr: sum by (job)(rate({job="http"}[5m]))
-
diff --git a/pkg/ruler/rules/fixtures/rules2.yaml b/pkg/ruler/rules/fixtures/rules2.yaml
deleted file mode 100644
index e405138f8af5c..0000000000000
--- a/pkg/ruler/rules/fixtures/rules2.yaml
+++ /dev/null
@@ -1,5 +0,0 @@
-groups:
-  - name: test_2
-    rules:
-    - record: test_2
-      expr: vector(2)
diff --git a/pkg/ruler/rules/fixtures/rules2_copy.yaml b/pkg/ruler/rules/fixtures/rules2_copy.yaml
deleted file mode 100644
index dd74b65116f35..0000000000000
--- a/pkg/ruler/rules/fixtures/rules2_copy.yaml
+++ /dev/null
@@ -1,5 +0,0 @@
-groups:
-  - name: test_2 copy
-    rules:
-    - record: test_2
-      expr: vector(2)
diff --git a/pkg/ruler/rules/manager.go b/pkg/ruler/rules/manager.go
deleted file mode 100644
index 26961d727c098..0000000000000
--- a/pkg/ruler/rules/manager.go
+++ /dev/null
@@ -1,1004 +0,0 @@
-package rules
-
-import (
-	"context"
-	"math"
-	"net/url"
-	"sort"
-	"sync"
-	"time"
-
-	"github.com/cortexproject/cortex/pkg/ruler"
-	"github.com/go-kit/kit/log"
-	"github.com/go-kit/kit/log/level"
-	"github.com/grafana/loki/pkg/logql"
-	"github.com/opentracing/opentracing-go"
-	"github.com/pkg/errors"
-	"github.com/prometheus/client_golang/prometheus"
-	"github.com/prometheus/common/model"
-	"github.com/prometheus/prometheus/pkg/labels"
-	"github.com/prometheus/prometheus/pkg/rulefmt"
-	"github.com/prometheus/prometheus/pkg/timestamp"
-	"github.com/prometheus/prometheus/pkg/value"
-	"github.com/prometheus/prometheus/promql"
-	"github.com/prometheus/prometheus/promql/parser"
-	promRules "github.com/prometheus/prometheus/rules"
-	"github.com/prometheus/prometheus/storage"
-)
-
-// RuleHealth describes the health state of a rule. Alias to rules pkg.
-type RuleHealth = promRules.RuleHealth
-
-// Constants for instrumentation.
-const namespace = "prometheus"
-
-type Metrics struct {
-	evalDuration        prometheus.Summary
-	iterationDuration   prometheus.Summary
-	iterationsMissed    prometheus.Counter
-	iterationsScheduled prometheus.Counter
-	evalTotal           *prometheus.CounterVec
-	evalFailures        *prometheus.CounterVec
-	groupInterval       *prometheus.GaugeVec
-	groupLastEvalTime   *prometheus.GaugeVec
-	groupLastDuration   *prometheus.GaugeVec
-	groupRules          *prometheus.GaugeVec
-}
-
-// NewGroupMetrics creates a new instance of Metrics and registers it with the provided registerer,
-// if not nil.
-func NewGroupMetrics(reg prometheus.Registerer) *Metrics {
-	m := &Metrics{
-		evalDuration: prometheus.NewSummary(
-			prometheus.SummaryOpts{
-				Namespace:  namespace,
-				Name:       "rule_evaluation_duration_seconds",
-				Help:       "The duration for a rule to execute.",
-				Objectives: map[float64]float64{0.5: 0.05, 0.9: 0.01, 0.99: 0.001},
-			}),
-		iterationDuration: prometheus.NewSummary(prometheus.SummaryOpts{
-			Namespace:  namespace,
-			Name:       "rule_group_duration_seconds",
-			Help:       "The duration of rule group evaluations.",
-			Objectives: map[float64]float64{0.01: 0.001, 0.05: 0.005, 0.5: 0.05, 0.90: 0.01, 0.99: 0.001},
-		}),
-		iterationsMissed: prometheus.NewCounter(prometheus.CounterOpts{
-			Namespace: namespace,
-			Name:      "rule_group_iterations_missed_total",
-			Help:      "The total number of rule group evaluations missed due to slow rule group evaluation.",
-		}),
-		iterationsScheduled: prometheus.NewCounter(prometheus.CounterOpts{
-			Namespace: namespace,
-			Name:      "rule_group_iterations_total",
-			Help:      "The total number of scheduled rule group evaluations, whether executed or missed.",
-		}),
-		evalTotal: prometheus.NewCounterVec(
-			prometheus.CounterOpts{
-				Namespace: namespace,
-				Name:      "rule_evaluations_total",
-				Help:      "The total number of rule evaluations.",
-			},
-			[]string{"rule_group"},
-		),
-		evalFailures: prometheus.NewCounterVec(
-			prometheus.CounterOpts{
-				Namespace: namespace,
-				Name:      "rule_evaluation_failures_total",
-				Help:      "The total number of rule evaluation failures.",
-			},
-			[]string{"rule_group"},
-		),
-		groupInterval: prometheus.NewGaugeVec(
-			prometheus.GaugeOpts{
-				Namespace: namespace,
-				Name:      "rule_group_interval_seconds",
-				Help:      "The interval of a rule group.",
-			},
-			[]string{"rule_group"},
-		),
-		groupLastEvalTime: prometheus.NewGaugeVec(
-			prometheus.GaugeOpts{
-				Namespace: namespace,
-				Name:      "rule_group_last_evaluation_timestamp_seconds",
-				Help:      "The timestamp of the last rule group evaluation in seconds.",
-			},
-			[]string{"rule_group"},
-		),
-		groupLastDuration: prometheus.NewGaugeVec(
-			prometheus.GaugeOpts{
-				Namespace: namespace,
-				Name:      "rule_group_last_duration_seconds",
-				Help:      "The duration of the last rule group evaluation.",
-			},
-			[]string{"rule_group"},
-		),
-		groupRules: prometheus.NewGaugeVec(
-			prometheus.GaugeOpts{
-				Namespace: namespace,
-				Name:      "rule_group_rules",
-				Help:      "The number of rules.",
-			},
-			[]string{"rule_group"},
-		),
-	}
-
-	if reg != nil {
-		reg.MustRegister(
-			m.evalDuration,
-			m.iterationDuration,
-			m.iterationsMissed,
-			m.iterationsScheduled,
-			m.evalTotal,
-			m.evalFailures,
-			m.groupInterval,
-			m.groupLastEvalTime,
-			m.groupLastDuration,
-			m.groupRules,
-		)
-	}
-
-	return m
-}
-
-// Group is a set of rules that have a logical relation.
-type Group struct {
-	name                 string
-	file                 string
-	interval             time.Duration
-	rules                []promRules.Rule
-	seriesInPreviousEval []map[string]labels.Labels // One per Rule.
-	staleSeries          []labels.Labels
-	opts                 *ManagerOptions
-	mtx                  sync.Mutex
-	evaluationDuration   time.Duration
-	evaluationTimestamp  time.Time
-
-	shouldRestore bool
-
-	markStale   bool
-	done        chan struct{}
-	terminated  chan struct{}
-	managerDone chan struct{}
-
-	logger log.Logger
-
-	metrics *Metrics
-}
-
-type GroupOptions struct {
-	Name, File    string
-	Interval      time.Duration
-	Rules         []promRules.Rule
-	ShouldRestore bool
-	Opts          *ManagerOptions
-	done          chan struct{}
-}
-
-// NewGroup makes a new Group with the given name, options, and rules.
-func NewGroup(o GroupOptions) *Group {
-	metrics := o.Opts.Metrics
-	if metrics == nil {
-		metrics = NewGroupMetrics(o.Opts.Registerer)
-	}
-
-	key := groupKey(o.File, o.Name)
-	metrics.evalTotal.WithLabelValues(key)
-	metrics.evalFailures.WithLabelValues(key)
-	metrics.groupLastEvalTime.WithLabelValues(key)
-	metrics.groupLastDuration.WithLabelValues(key)
-	metrics.groupRules.WithLabelValues(key).Set(float64(len(o.Rules)))
-	metrics.groupInterval.WithLabelValues(key).Set(o.Interval.Seconds())
-
-	return &Group{
-		name:                 o.Name,
-		file:                 o.File,
-		interval:             o.Interval,
-		rules:                o.Rules,
-		shouldRestore:        o.ShouldRestore,
-		opts:                 o.Opts,
-		seriesInPreviousEval: make([]map[string]labels.Labels, len(o.Rules)),
-		done:                 make(chan struct{}),
-		managerDone:          o.done,
-		terminated:           make(chan struct{}),
-		logger:               log.With(o.Opts.Logger, "group", o.Name),
-		metrics:              metrics,
-	}
-}
-
-// Name returns the group name.
-func (g *Group) Name() string { return g.name }
-
-// File returns the group's file.
-func (g *Group) File() string { return g.file }
-
-// Rules returns the group's rules.
-func (g *Group) Rules() []promRules.Rule { return g.rules }
-
-// Interval returns the group's interval.
-func (g *Group) Interval() time.Duration { return g.interval }
-
-func (g *Group) run(ctx context.Context) {
-	defer close(g.terminated)
-
-	// Wait an initial amount to have consistently slotted intervals.
-	evalTimestamp := g.evalTimestamp().Add(g.interval)
-	select {
-	case <-time.After(time.Until(evalTimestamp)):
-	case <-g.done:
-		return
-	}
-
-	ctx = promql.NewOriginContext(ctx, map[string]interface{}{
-		"ruleGroup": map[string]string{
-			"file": g.File(),
-			"name": g.Name(),
-		},
-	})
-
-	iter := func() {
-		g.metrics.iterationsScheduled.Inc()
-
-		start := time.Now()
-		g.Eval(ctx, evalTimestamp)
-		timeSinceStart := time.Since(start)
-
-		g.metrics.iterationDuration.Observe(timeSinceStart.Seconds())
-		g.setEvaluationDuration(timeSinceStart)
-		g.setEvaluationTimestamp(start)
-	}
-
-	// The assumption here is that since the ticker was started after having
-	// waited for `evalTimestamp` to pass, the ticks will trigger soon
-	// after each `evalTimestamp + N * g.interval` occurrence.
-	tick := time.NewTicker(g.interval)
-	defer tick.Stop()
-
-	defer func() {
-		if !g.markStale {
-			return
-		}
-		go func(now time.Time) {
-			for _, rule := range g.seriesInPreviousEval {
-				for _, r := range rule {
-					g.staleSeries = append(g.staleSeries, r)
-				}
-			}
-			// That can be garbage collected at this point.
-			g.seriesInPreviousEval = nil
-			// Wait for 2 intervals to give the opportunity to renamed rules
-			// to insert new series in the tsdb. At this point if there is a
-			// renamed rule, it should already be started.
-			select {
-			case <-g.managerDone:
-			case <-time.After(2 * g.interval):
-				g.cleanupStaleSeries(now)
-			}
-		}(time.Now())
-	}()
-
-	iter()
-	if g.shouldRestore {
-		// If we have to restore, we wait for another Eval to finish.
-		// The reason behind this is, during first eval (or before it)
-		// we might not have enough data scraped, and recording rules would not
-		// have updated the latest values, on which some alerts might depend.
-		select {
-		case <-g.done:
-			return
-		case <-tick.C:
-			missed := (time.Since(evalTimestamp) / g.interval) - 1
-			if missed > 0 {
-				g.metrics.iterationsMissed.Add(float64(missed))
-				g.metrics.iterationsScheduled.Add(float64(missed))
-			}
-			evalTimestamp = evalTimestamp.Add((missed + 1) * g.interval)
-			iter()
-		}
-
-		g.RestoreForState(time.Now())
-		g.shouldRestore = false
-	}
-
-	for {
-		select {
-		case <-g.done:
-			return
-		default:
-			select {
-			case <-g.done:
-				return
-			case <-tick.C:
-				missed := (time.Since(evalTimestamp) / g.interval) - 1
-				if missed > 0 {
-					g.metrics.iterationsMissed.Add(float64(missed))
-					g.metrics.iterationsScheduled.Add(float64(missed))
-				}
-				evalTimestamp = evalTimestamp.Add((missed + 1) * g.interval)
-				iter()
-			}
-		}
-	}
-}
-
-func (g *Group) stop() {
-	close(g.done)
-	<-g.terminated
-}
-
-func (g *Group) hash() uint64 {
-	l := labels.New(
-		labels.Label{Name: "name", Value: g.name},
-		labels.Label{Name: "file", Value: g.file},
-	)
-	return l.Hash()
-}
-
-// AlertingRules returns the list of the group's alerting rules.
-func (g *Group) AlertingRules() []*AlertingRule {
-	g.mtx.Lock()
-	defer g.mtx.Unlock()
-
-	var alerts []*AlertingRule
-	for _, rule := range g.rules {
-		if alertingRule, ok := rule.(*AlertingRule); ok {
-			alerts = append(alerts, alertingRule)
-		}
-	}
-	sort.Slice(alerts, func(i, j int) bool {
-		return alerts[i].State() > alerts[j].State() ||
-			(alerts[i].State() == alerts[j].State() &&
-				alerts[i].Name() < alerts[j].Name())
-	})
-	return alerts
-}
-
-// HasAlertingRules returns true if the group contains at least one AlertingRule.
-func (g *Group) HasAlertingRules() bool {
-	g.mtx.Lock()
-	defer g.mtx.Unlock()
-
-	for _, rule := range g.rules {
-		if _, ok := rule.(*AlertingRule); ok {
-			return true
-		}
-	}
-	return false
-}
-
-// GetEvaluationDuration returns the time in seconds it took to evaluate the rule group.
-func (g *Group) GetEvaluationDuration() time.Duration {
-	g.mtx.Lock()
-	defer g.mtx.Unlock()
-	return g.evaluationDuration
-}
-
-// setEvaluationDuration sets the time in seconds the last evaluation took.
-func (g *Group) setEvaluationDuration(dur time.Duration) {
-	g.metrics.groupLastDuration.WithLabelValues(groupKey(g.file, g.name)).Set(dur.Seconds())
-
-	g.mtx.Lock()
-	defer g.mtx.Unlock()
-	g.evaluationDuration = dur
-}
-
-// GetEvaluationTimestamp returns the time the last evaluation of the rule group took place.
-func (g *Group) GetEvaluationTimestamp() time.Time {
-	g.mtx.Lock()
-	defer g.mtx.Unlock()
-	return g.evaluationTimestamp
-}
-
-// setEvaluationTimestamp updates evaluationTimestamp to the timestamp of when the rule group was last evaluated.
-func (g *Group) setEvaluationTimestamp(ts time.Time) {
-	g.metrics.groupLastEvalTime.WithLabelValues(groupKey(g.file, g.name)).Set(float64(ts.UnixNano()) / 1e9)
-
-	g.mtx.Lock()
-	defer g.mtx.Unlock()
-	g.evaluationTimestamp = ts
-}
-
-// evalTimestamp returns the immediately preceding consistently slotted evaluation time.
-func (g *Group) evalTimestamp() time.Time {
-	var (
-		offset = int64(g.hash() % uint64(g.interval))
-		now    = time.Now().UnixNano()
-		adjNow = now - offset
-		base   = adjNow - (adjNow % int64(g.interval))
-	)
-
-	return time.Unix(0, base+offset).UTC()
-}
-
-func nameAndLabels(rule promRules.Rule) string {
-	return rule.Name() + rule.Labels().String()
-}
-
-// CopyState copies the alerting rule and staleness related state from the given group.
-//
-// Rules are matched based on their name and labels. If there are duplicates, the
-// first is matched with the first, second with the second etc.
-func (g *Group) CopyState(from *Group) {
-	g.evaluationDuration = from.evaluationDuration
-
-	ruleMap := make(map[string][]int, len(from.rules))
-
-	for fi, fromRule := range from.rules {
-		nameAndLabels := nameAndLabels(fromRule)
-		l := ruleMap[nameAndLabels]
-		ruleMap[nameAndLabels] = append(l, fi)
-	}
-
-	for i, rule := range g.rules {
-		nameAndLabels := nameAndLabels(rule)
-		indexes := ruleMap[nameAndLabels]
-		if len(indexes) == 0 {
-			continue
-		}
-		fi := indexes[0]
-		g.seriesInPreviousEval[i] = from.seriesInPreviousEval[fi]
-		ruleMap[nameAndLabels] = indexes[1:]
-
-		ar, ok := rule.(*AlertingRule)
-		if !ok {
-			continue
-		}
-		far, ok := from.rules[fi].(*AlertingRule)
-		if !ok {
-			continue
-		}
-
-		for fp, a := range far.active {
-			ar.active[fp] = a
-		}
-	}
-
-	// Handle deleted and unmatched duplicate rules.
-	g.staleSeries = from.staleSeries
-	for fi, fromRule := range from.rules {
-		nameAndLabels := nameAndLabels(fromRule)
-		l := ruleMap[nameAndLabels]
-		if len(l) != 0 {
-			for _, series := range from.seriesInPreviousEval[fi] {
-				g.staleSeries = append(g.staleSeries, series)
-			}
-		}
-	}
-}
-
-// Eval runs a single evaluation cycle in which all rules are evaluated sequentially.
-func (g *Group) Eval(ctx context.Context, ts time.Time) {
-	for i, rule := range g.rules {
-		select {
-		case <-g.done:
-			return
-		default:
-		}
-
-		func(i int, rule promRules.Rule) {
-			sp, ctx := opentracing.StartSpanFromContext(ctx, "rule")
-			sp.SetTag("name", rule.Name())
-			defer func(t time.Time) {
-				sp.Finish()
-
-				since := time.Since(t)
-				g.metrics.evalDuration.Observe(since.Seconds())
-				rule.SetEvaluationDuration(since)
-				rule.SetEvaluationTimestamp(t)
-			}(time.Now())
-
-			g.metrics.evalTotal.WithLabelValues(groupKey(g.File(), g.Name())).Inc()
-
-			vector, err := rule.Eval(ctx, ts, g.opts.QueryFunc, g.opts.ExternalURL)
-			if err != nil {
-				// Canceled queries are intentional termination of queries. This normally
-				// happens on shutdown and thus we skip logging of any errors here.
-				if _, ok := err.(promql.ErrQueryCanceled); !ok {
-					level.Warn(g.logger).Log("msg", "Evaluating rule failed", "rule", rule, "err", err)
-				}
-				g.metrics.evalFailures.WithLabelValues(groupKey(g.File(), g.Name())).Inc()
-				return
-			}
-
-			if ar, ok := rule.(*AlertingRule); ok {
-				ar.sendAlerts(ctx, ts, g.opts.ResendDelay, g.interval, g.opts.NotifyFunc)
-			}
-			var (
-				numOutOfOrder = 0
-				numDuplicates = 0
-			)
-
-			app := g.opts.Appendable.Appender()
-			seriesReturned := make(map[string]labels.Labels, len(g.seriesInPreviousEval[i]))
-			defer func() {
-				if err := app.Commit(); err != nil {
-					level.Warn(g.logger).Log("msg", "Rule sample appending failed", "err", err)
-					return
-				}
-				g.seriesInPreviousEval[i] = seriesReturned
-			}()
-			for _, s := range vector {
-				if _, err := app.Add(s.Metric, s.T, s.V); err != nil {
-					switch errors.Cause(err) {
-					case storage.ErrOutOfOrderSample:
-						numOutOfOrder++
-						level.Debug(g.logger).Log("msg", "Rule evaluation result discarded", "err", err, "sample", s)
-					case storage.ErrDuplicateSampleForTimestamp:
-						numDuplicates++
-						level.Debug(g.logger).Log("msg", "Rule evaluation result discarded", "err", err, "sample", s)
-					default:
-						level.Warn(g.logger).Log("msg", "Rule evaluation result discarded", "err", err, "sample", s)
-					}
-				} else {
-					seriesReturned[s.Metric.String()] = s.Metric
-				}
-			}
-			if numOutOfOrder > 0 {
-				level.Warn(g.logger).Log("msg", "Error on ingesting out-of-order result from rule evaluation", "numDropped", numOutOfOrder)
-			}
-			if numDuplicates > 0 {
-				level.Warn(g.logger).Log("msg", "Error on ingesting results from rule evaluation with different value but same timestamp", "numDropped", numDuplicates)
-			}
-
-			for metric, lset := range g.seriesInPreviousEval[i] {
-				if _, ok := seriesReturned[metric]; !ok {
-					// Series no longer exposed, mark it stale.
-					_, err = app.Add(lset, timestamp.FromTime(ts), math.Float64frombits(value.StaleNaN))
-					switch errors.Cause(err) {
-					case nil:
-					case storage.ErrOutOfOrderSample, storage.ErrDuplicateSampleForTimestamp:
-						// Do not count these in logging, as this is expected if series
-						// is exposed from a different rule.
-					default:
-						level.Warn(g.logger).Log("msg", "Adding stale sample failed", "sample", metric, "err", err)
-					}
-				}
-			}
-		}(i, rule)
-	}
-	g.cleanupStaleSeries(ts)
-}
-
-func (g *Group) cleanupStaleSeries(ts time.Time) {
-	if len(g.staleSeries) == 0 {
-		return
-	}
-	app := g.opts.Appendable.Appender()
-	for _, s := range g.staleSeries {
-		// Rule that produced series no longer configured, mark it stale.
-		_, err := app.Add(s, timestamp.FromTime(ts), math.Float64frombits(value.StaleNaN))
-		switch errors.Cause(err) {
-		case nil:
-		case storage.ErrOutOfOrderSample, storage.ErrDuplicateSampleForTimestamp:
-			// Do not count these in logging, as this is expected if series
-			// is exposed from a different rule.
-		default:
-			level.Warn(g.logger).Log("msg", "Adding stale sample for previous configuration failed", "sample", s, "err", err)
-		}
-	}
-	if err := app.Commit(); err != nil {
-		level.Warn(g.logger).Log("msg", "Stale sample appending for previous configuration failed", "err", err)
-	} else {
-		g.staleSeries = nil
-	}
-}
-
-// RestoreForState restores the 'for' state of the alerts
-// by looking up last ActiveAt from storage.
-func (g *Group) RestoreForState(ts time.Time) {
-	maxtMS := int64(model.TimeFromUnixNano(ts.UnixNano()))
-	// We allow restoration only if alerts were active before after certain time.
-	mint := ts.Add(-g.opts.OutageTolerance)
-	mintMS := int64(model.TimeFromUnixNano(mint.UnixNano()))
-	q, err := g.opts.Queryable.Querier(g.opts.Context, mintMS, maxtMS)
-	if err != nil {
-		level.Error(g.logger).Log("msg", "Failed to get Querier", "err", err)
-		return
-	}
-	defer func() {
-		if err := q.Close(); err != nil {
-			level.Error(g.logger).Log("msg", "Failed to close Querier", "err", err)
-		}
-	}()
-
-	for _, rule := range g.Rules() {
-		alertRule, ok := rule.(*AlertingRule)
-		if !ok {
-			continue
-		}
-
-		alertHoldDuration := alertRule.HoldDuration()
-		if alertHoldDuration < g.opts.ForGracePeriod {
-			// If alertHoldDuration is already less than grace period, we would not
-			// like to make it wait for `g.opts.ForGracePeriod` time before firing.
-			// Hence we skip restoration, which will make it wait for alertHoldDuration.
-			alertRule.SetRestored(true)
-			continue
-		}
-
-		alertRule.ForEachActiveAlert(func(a *Alert) {
-			smpl := alertRule.forStateSample(a, time.Now(), 0)
-			var matchers []*labels.Matcher
-			for _, l := range smpl.Metric {
-				mt, err := labels.NewMatcher(labels.MatchEqual, l.Name, l.Value)
-				if err != nil {
-					panic(err)
-				}
-				matchers = append(matchers, mt)
-			}
-
-			sset := q.Select(false, nil, matchers...)
-
-			seriesFound := false
-			var s storage.Series
-			for sset.Next() {
-				// Query assures that smpl.Metric is included in sset.At().Labels(),
-				// hence just checking the length would act like equality.
-				// (This is faster than calling labels.Compare again as we already have some info).
-				if len(sset.At().Labels()) == len(smpl.Metric) {
-					s = sset.At()
-					seriesFound = true
-					break
-				}
-			}
-
-			if err := sset.Err(); err != nil {
-				// Querier Warnings are ignored. We do not care unless we have an error.
-				level.Error(g.logger).Log(
-					"msg", "Failed to restore 'for' state",
-					labels.AlertName, alertRule.Name(),
-					"stage", "Select",
-					"err", err,
-				)
-				return
-			}
-
-			if !seriesFound {
-				return
-			}
-
-			// Series found for the 'for' state.
-			var t int64
-			var v float64
-			it := s.Iterator()
-			for it.Next() {
-				t, v = it.At()
-			}
-			if it.Err() != nil {
-				level.Error(g.logger).Log("msg", "Failed to restore 'for' state",
-					labels.AlertName, alertRule.Name(), "stage", "Iterator", "err", it.Err())
-				return
-			}
-			if value.IsStaleNaN(v) { // Alert was not active.
-				return
-			}
-
-			downAt := time.Unix(t/1000, 0).UTC()
-			restoredActiveAt := time.Unix(int64(v), 0).UTC()
-			timeSpentPending := downAt.Sub(restoredActiveAt)
-			timeRemainingPending := alertHoldDuration - timeSpentPending
-
-			if timeRemainingPending <= 0 {
-				// It means that alert was firing when prometheus went down.
-				// In the next Eval, the state of this alert will be set back to
-				// firing again if it's still firing in that Eval.
-				// Nothing to be done in this case.
-			} else if timeRemainingPending < g.opts.ForGracePeriod {
-				// (new) restoredActiveAt = (ts + m.opts.ForGracePeriod) - alertHoldDuration
-				//                            /* new firing time */      /* moving back by hold duration */
-				//
-				// Proof of correctness:
-				// firingTime = restoredActiveAt.Add(alertHoldDuration)
-				//            = ts + m.opts.ForGracePeriod - alertHoldDuration + alertHoldDuration
-				//            = ts + m.opts.ForGracePeriod
-				//
-				// Time remaining to fire = firingTime.Sub(ts)
-				//                        = (ts + m.opts.ForGracePeriod) - ts
-				//                        = m.opts.ForGracePeriod
-				restoredActiveAt = ts.Add(g.opts.ForGracePeriod).Add(-alertHoldDuration)
-			} else {
-				// By shifting ActiveAt to the future (ActiveAt + some_duration),
-				// the total pending time from the original ActiveAt
-				// would be `alertHoldDuration + some_duration`.
-				// Here, some_duration = downDuration.
-				downDuration := ts.Sub(downAt)
-				restoredActiveAt = restoredActiveAt.Add(downDuration)
-			}
-
-			a.ActiveAt = restoredActiveAt
-			level.Debug(g.logger).Log("msg", "'for' state restored",
-				labels.AlertName, alertRule.Name(), "restored_time", a.ActiveAt.Format(time.RFC850),
-				"labels", a.Labels.String())
-
-		})
-
-		alertRule.SetRestored(true)
-	}
-
-}
-
-// Equals return if two groups are the same.
-func (g *Group) Equals(ng *Group) bool {
-	if g.name != ng.name {
-		return false
-	}
-
-	if g.file != ng.file {
-		return false
-	}
-
-	if g.interval != ng.interval {
-		return false
-	}
-
-	if len(g.rules) != len(ng.rules) {
-		return false
-	}
-
-	for i, gr := range g.rules {
-		if gr.String() != ng.rules[i].String() {
-			return false
-		}
-	}
-
-	return true
-}
-
-// The Manager manages recording and alerting rules.
-type Manager struct {
-	opts     *ManagerOptions
-	groups   map[string]*Group
-	mtx      sync.RWMutex
-	block    chan struct{}
-	done     chan struct{}
-	restored bool
-
-	logger log.Logger
-}
-
-// NotifyFunc sends notifications about a set of alerts generated by the given expression.
-type NotifyFunc func(ctx context.Context, expr string, alerts ...*Alert)
-
-// ManagerOptions bundles options for the Manager.
-type ManagerOptions struct {
-	ExternalURL     *url.URL
-	QueryFunc       promRules.QueryFunc
-	NotifyFunc      NotifyFunc
-	Context         context.Context
-	Appendable      storage.Appendable
-	Queryable       storage.Queryable
-	Logger          log.Logger
-	Registerer      prometheus.Registerer
-	OutageTolerance time.Duration
-	ForGracePeriod  time.Duration
-	ResendDelay     time.Duration
-
-	Metrics *Metrics
-}
-
-// NewManager returns an implementation of Manager, ready to be started
-// by calling the Run method.
-func NewManager(o *ManagerOptions) *Manager {
-	if o.Metrics == nil {
-		o.Metrics = NewGroupMetrics(o.Registerer)
-	}
-
-	m := &Manager{
-		groups: map[string]*Group{},
-		opts:   o,
-		block:  make(chan struct{}),
-		done:   make(chan struct{}),
-		logger: o.Logger,
-	}
-
-	o.Metrics.iterationsMissed.Inc()
-	return m
-}
-
-// Run starts processing of the rule manager.
-func (m *Manager) Run() {
-	close(m.block)
-}
-
-// Stop the rule manager's rule evaluation cycles.
-func (m *Manager) Stop() {
-	m.mtx.Lock()
-	defer m.mtx.Unlock()
-
-	level.Info(m.logger).Log("msg", "Stopping rule manager...")
-
-	for _, eg := range m.groups {
-		eg.stop()
-	}
-
-	// Shut down the groups waiting multiple evaluation intervals to write
-	// staleness markers.
-	close(m.done)
-
-	level.Info(m.logger).Log("msg", "Rule manager stopped")
-}
-
-// Update the rule manager's state as the config requires. If
-// loading the new rules failed the old rule set is restored.
-func (m *Manager) Update(interval time.Duration, files []string, externalLabels labels.Labels) error {
-	m.mtx.Lock()
-	defer m.mtx.Unlock()
-
-	groups, errs := m.LoadGroups(interval, externalLabels, files...)
-	if errs != nil {
-		for _, e := range errs {
-			level.Error(m.logger).Log("msg", "loading groups failed", "err", e)
-		}
-		return errors.New("error loading rules, previous rule set restored")
-	}
-	m.restored = true
-
-	var wg sync.WaitGroup
-	for _, newg := range groups {
-		// If there is an old group with the same identifier,
-		// check if new group equals with the old group, if yes then skip it.
-		// If not equals, stop it and wait for it to finish the current iteration.
-		// Then copy it into the new group.
-		gn := groupKey(newg.file, newg.name)
-		oldg, ok := m.groups[gn]
-		delete(m.groups, gn)
-
-		if ok && oldg.Equals(newg) {
-			groups[gn] = oldg
-			continue
-		}
-
-		wg.Add(1)
-		go func(newg *Group) {
-			if ok {
-				oldg.stop()
-				newg.CopyState(oldg)
-			}
-			go func() {
-				// Wait with starting evaluation until the rule manager
-				// is told to run. This is necessary to avoid running
-				// queries against a bootstrapping storage.
-				<-m.block
-				newg.run(m.opts.Context)
-			}()
-			wg.Done()
-		}(newg)
-	}
-
-	// Stop remaining old groups.
-	wg.Add(len(m.groups))
-	for n, oldg := range m.groups {
-		go func(n string, g *Group) {
-			g.markStale = true
-			g.stop()
-			if m := g.metrics; m != nil {
-				m.evalTotal.DeleteLabelValues(n)
-				m.evalFailures.DeleteLabelValues(n)
-				m.groupInterval.DeleteLabelValues(n)
-				m.groupLastEvalTime.DeleteLabelValues(n)
-				m.groupLastDuration.DeleteLabelValues(n)
-				m.groupRules.DeleteLabelValues(n)
-			}
-			wg.Done()
-		}(n, oldg)
-	}
-
-	wg.Wait()
-	m.groups = groups
-
-	return nil
-}
-
-// LoadGroups reads groups from a list of files.
-func (m *Manager) LoadGroups(
-	interval time.Duration, externalLabels labels.Labels, filenames ...string,
-) (map[string]*Group, []error) {
-	groups := make(map[string]*Group)
-
-	shouldRestore := !m.restored
-
-	for _, fn := range filenames {
-		rgs, errs := rulefmt.ParseFile(fn)
-		if errs != nil {
-			return nil, errs
-		}
-
-		for _, rg := range rgs.Groups {
-			itv := interval
-			if rg.Interval != 0 {
-				itv = time.Duration(rg.Interval)
-			}
-
-			rules := make([]promRules.Rule, 0, len(rg.Rules))
-			for _, r := range rg.Rules {
-				expr, err := logql.ParseExpr(r.Expr.Value)
-				if err != nil {
-					return nil, []error{errors.Wrap(err, fn)}
-				}
-
-				if r.Alert.Value != "" {
-					rules = append(rules, NewAlertingRule(
-						r.Alert.Value,
-						&parser.StringLiteral{Val: expr.String()},
-						time.Duration(r.For),
-						labels.FromMap(r.Labels),
-						labels.FromMap(r.Annotations),
-						externalLabels,
-						m.restored,
-						log.With(m.logger, "alert", r.Alert),
-					))
-					continue
-				}
-				rules = append(rules, promRules.NewRecordingRule(
-					r.Record.Value,
-					&parser.StringLiteral{Val: expr.String()},
-					labels.FromMap(r.Labels),
-				))
-			}
-
-			groups[groupKey(fn, rg.Name)] = NewGroup(GroupOptions{
-				Name:          rg.Name,
-				File:          fn,
-				Interval:      itv,
-				Rules:         rules,
-				ShouldRestore: shouldRestore,
-				Opts:          m.opts,
-				done:          m.done,
-			})
-		}
-	}
-
-	return groups, nil
-}
-
-// Group names need not be unique across filenames.
-func groupKey(file, name string) string {
-	return file + ";" + name
-}
-
-// RuleGroups returns the list of manager's rule groups.
-func (m *Manager) RuleGroups() []ruler.Group {
-	m.mtx.RLock()
-	defer m.mtx.RUnlock()
-
-	rgs := make([]ruler.Group, 0, len(m.groups))
-	for _, g := range m.groups {
-		rgs = append(rgs, g)
-	}
-
-	sort.Slice(rgs, func(i, j int) bool {
-		if rgs[i].File() != rgs[j].File() {
-			return rgs[i].File() < rgs[j].File()
-		}
-		return rgs[i].Name() < rgs[j].Name()
-	})
-
-	return rgs
-}
-
-// Rules returns the list of the manager's rules.
-func (m *Manager) Rules() []promRules.Rule {
-	m.mtx.RLock()
-	defer m.mtx.RUnlock()
-
-	var rules []promRules.Rule
-	for _, g := range m.groups {
-		rules = append(rules, g.rules...)
-	}
-
-	return rules
-}
-
-// AlertingRules returns the list of the manager's alerting rules.
-func (m *Manager) AlertingRules() []*AlertingRule {
-	m.mtx.RLock()
-	defer m.mtx.RUnlock()
-
-	alerts := []*AlertingRule{}
-	for _, rule := range m.Rules() {
-		if alertingRule, ok := rule.(*AlertingRule); ok {
-			alerts = append(alerts, alertingRule)
-		}
-	}
-
-	return alerts
-}
diff --git a/pkg/ruler/rules/manager_test.go b/pkg/ruler/rules/manager_test.go
deleted file mode 100644
index ee091d2b11306..0000000000000
--- a/pkg/ruler/rules/manager_test.go
+++ /dev/null
@@ -1,959 +0,0 @@
-// Copyright 2013 The Prometheus Authors
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-package rules
-
-import (
-	"context"
-	"fmt"
-	"io/ioutil"
-	"math"
-	"os"
-	"sort"
-	"testing"
-	"time"
-
-	"github.com/go-kit/kit/log"
-	"github.com/prometheus/client_golang/prometheus"
-	"github.com/prometheus/common/model"
-	yaml "gopkg.in/yaml.v2"
-
-	"github.com/prometheus/prometheus/pkg/labels"
-	"github.com/prometheus/prometheus/pkg/rulefmt"
-	"github.com/prometheus/prometheus/pkg/timestamp"
-	"github.com/prometheus/prometheus/pkg/value"
-	"github.com/prometheus/prometheus/promql"
-	"github.com/prometheus/prometheus/promql/parser"
-	"github.com/prometheus/prometheus/rules"
-	"github.com/prometheus/prometheus/storage"
-	"github.com/prometheus/prometheus/util/teststorage"
-	"github.com/prometheus/prometheus/util/testutil"
-)
-
-type AppendableAdapter struct{ storage.Storage }
-
-func (a AppendableAdapter) Appender(_ rules.Rule) (storage.Appender, error) {
-	return a.Storage.Appender(), nil
-}
-
-func TestAlertingRule(t *testing.T) {
-	suite, err := promql.NewTest(t, `
-		load 5m
-			http_requests{job="app-server", instance="0", group="canary", severity="overwrite-me"}	75 85  95 105 105  95  85
-			http_requests{job="app-server", instance="1", group="canary", severity="overwrite-me"}	80 90 100 110 120 130 140
-	`)
-	testutil.Ok(t, err)
-	defer suite.Close()
-
-	err = suite.Run()
-	testutil.Ok(t, err)
-
-	expr, err := parser.ParseExpr(`http_requests{group="canary", job="app-server"} < 100`)
-	testutil.Ok(t, err)
-
-	rule := NewAlertingRule(
-		"HTTPRequestRateLow",
-		expr,
-		time.Minute,
-		labels.FromStrings("severity", "{{\"c\"}}ritical"),
-		nil, nil, true, nil,
-	)
-	result := promql.Vector{
-		{
-			Metric: labels.FromStrings(
-				"__name__", "ALERTS",
-				"alertname", "HTTPRequestRateLow",
-				"alertstate", "pending",
-				"group", "canary",
-				"instance", "0",
-				"job", "app-server",
-				"severity", "critical",
-			),
-			Point: promql.Point{V: 1},
-		},
-		{
-			Metric: labels.FromStrings(
-				"__name__", "ALERTS",
-				"alertname", "HTTPRequestRateLow",
-				"alertstate", "pending",
-				"group", "canary",
-				"instance", "1",
-				"job", "app-server",
-				"severity", "critical",
-			),
-			Point: promql.Point{V: 1},
-		},
-		{
-			Metric: labels.FromStrings(
-				"__name__", "ALERTS",
-				"alertname", "HTTPRequestRateLow",
-				"alertstate", "firing",
-				"group", "canary",
-				"instance", "0",
-				"job", "app-server",
-				"severity", "critical",
-			),
-			Point: promql.Point{V: 1},
-		},
-		{
-			Metric: labels.FromStrings(
-				"__name__", "ALERTS",
-				"alertname", "HTTPRequestRateLow",
-				"alertstate", "firing",
-				"group", "canary",
-				"instance", "1",
-				"job", "app-server",
-				"severity", "critical",
-			),
-			Point: promql.Point{V: 1},
-		},
-	}
-
-	baseTime := time.Unix(0, 0)
-
-	var tests = []struct {
-		time   time.Duration
-		result promql.Vector
-	}{
-		{
-			time:   0,
-			result: result[:2],
-		}, {
-			time:   5 * time.Minute,
-			result: result[2:],
-		}, {
-			time:   10 * time.Minute,
-			result: result[2:3],
-		},
-		{
-			time:   15 * time.Minute,
-			result: nil,
-		},
-		{
-			time:   20 * time.Minute,
-			result: nil,
-		},
-		{
-			time:   25 * time.Minute,
-			result: result[:1],
-		},
-		{
-			time:   30 * time.Minute,
-			result: result[2:3],
-		},
-	}
-
-	for i, test := range tests {
-		t.Logf("case %d", i)
-
-		evalTime := baseTime.Add(test.time)
-
-		res, err := rule.Eval(suite.Context(), evalTime, EngineQueryFunc(suite.QueryEngine(), suite.Storage()), nil)
-		testutil.Ok(t, err)
-
-		var filteredRes promql.Vector // After removing 'ALERTS_FOR_STATE' samples.
-		for _, smpl := range res {
-			smplName := smpl.Metric.Get("__name__")
-			if smplName == "ALERTS" {
-				filteredRes = append(filteredRes, smpl)
-			} else {
-				// If not 'ALERTS', it has to be 'ALERTS_FOR_STATE'.
-				testutil.Equals(t, smplName, "ALERTS_FOR_STATE")
-			}
-		}
-		for i := range test.result {
-			test.result[i].T = timestamp.FromTime(evalTime)
-		}
-		testutil.Assert(t, len(test.result) == len(filteredRes), "%d. Number of samples in expected and actual output don't match (%d vs. %d)", i, len(test.result), len(res))
-
-		sort.Slice(filteredRes, func(i, j int) bool {
-			return labels.Compare(filteredRes[i].Metric, filteredRes[j].Metric) < 0
-		})
-		testutil.Equals(t, test.result, filteredRes)
-
-		for _, aa := range rule.ActiveAlerts() {
-			testutil.Assert(t, aa.Labels.Get(model.MetricNameLabel) == "", "%s label set on active alert: %s", model.MetricNameLabel, aa.Labels)
-		}
-	}
-}
-
-func TestForStateAddSamples(t *testing.T) {
-	suite, err := promql.NewTest(t, `
-		load 5m
-			http_requests{job="app-server", instance="0", group="canary", severity="overwrite-me"}	75 85  95 105 105  95  85
-			http_requests{job="app-server", instance="1", group="canary", severity="overwrite-me"}	80 90 100 110 120 130 140
-	`)
-	testutil.Ok(t, err)
-	defer suite.Close()
-
-	err = suite.Run()
-	testutil.Ok(t, err)
-
-	expr, err := parser.ParseExpr(`http_requests{group="canary", job="app-server"} < 100`)
-	testutil.Ok(t, err)
-
-	rule := NewAlertingRule(
-		"HTTPRequestRateLow",
-		expr,
-		time.Minute,
-		labels.FromStrings("severity", "{{\"c\"}}ritical"),
-		nil, nil, true, nil,
-	)
-	result := promql.Vector{
-		{
-			Metric: labels.FromStrings(
-				"__name__", "ALERTS_FOR_STATE",
-				"alertname", "HTTPRequestRateLow",
-				"group", "canary",
-				"instance", "0",
-				"job", "app-server",
-				"severity", "critical",
-			),
-			Point: promql.Point{V: 1},
-		},
-		{
-			Metric: labels.FromStrings(
-				"__name__", "ALERTS_FOR_STATE",
-				"alertname", "HTTPRequestRateLow",
-				"group", "canary",
-				"instance", "1",
-				"job", "app-server",
-				"severity", "critical",
-			),
-			Point: promql.Point{V: 1},
-		},
-		{
-			Metric: labels.FromStrings(
-				"__name__", "ALERTS_FOR_STATE",
-				"alertname", "HTTPRequestRateLow",
-				"group", "canary",
-				"instance", "0",
-				"job", "app-server",
-				"severity", "critical",
-			),
-			Point: promql.Point{V: 1},
-		},
-		{
-			Metric: labels.FromStrings(
-				"__name__", "ALERTS_FOR_STATE",
-				"alertname", "HTTPRequestRateLow",
-				"group", "canary",
-				"instance", "1",
-				"job", "app-server",
-				"severity", "critical",
-			),
-			Point: promql.Point{V: 1},
-		},
-	}
-
-	baseTime := time.Unix(0, 0)
-
-	var tests = []struct {
-		time            time.Duration
-		result          promql.Vector
-		persistThisTime bool // If true, it means this 'time' is persisted for 'for'.
-	}{
-		{
-			time:            0,
-			result:          append(promql.Vector{}, result[:2]...),
-			persistThisTime: true,
-		},
-		{
-			time:   5 * time.Minute,
-			result: append(promql.Vector{}, result[2:]...),
-		},
-		{
-			time:   10 * time.Minute,
-			result: append(promql.Vector{}, result[2:3]...),
-		},
-		{
-			time:   15 * time.Minute,
-			result: nil,
-		},
-		{
-			time:   20 * time.Minute,
-			result: nil,
-		},
-		{
-			time:            25 * time.Minute,
-			result:          append(promql.Vector{}, result[:1]...),
-			persistThisTime: true,
-		},
-		{
-			time:   30 * time.Minute,
-			result: append(promql.Vector{}, result[2:3]...),
-		},
-	}
-
-	var forState float64
-	for i, test := range tests {
-		t.Logf("case %d", i)
-		evalTime := baseTime.Add(test.time)
-
-		if test.persistThisTime {
-			forState = float64(evalTime.Unix())
-		}
-		if test.result == nil {
-			forState = float64(value.StaleNaN)
-		}
-
-		res, err := rule.Eval(suite.Context(), evalTime, EngineQueryFunc(suite.QueryEngine(), suite.Storage()), nil)
-		testutil.Ok(t, err)
-
-		var filteredRes promql.Vector // After removing 'ALERTS' samples.
-		for _, smpl := range res {
-			smplName := smpl.Metric.Get("__name__")
-			if smplName == "ALERTS_FOR_STATE" {
-				filteredRes = append(filteredRes, smpl)
-			} else {
-				// If not 'ALERTS_FOR_STATE', it has to be 'ALERTS'.
-				testutil.Equals(t, smplName, "ALERTS")
-			}
-		}
-		for i := range test.result {
-			test.result[i].T = timestamp.FromTime(evalTime)
-			// Updating the expected 'for' state.
-			if test.result[i].V >= 0 {
-				test.result[i].V = forState
-			}
-		}
-		testutil.Assert(t, len(test.result) == len(filteredRes), "%d. Number of samples in expected and actual output don't match (%d vs. %d)", i, len(test.result), len(res))
-
-		sort.Slice(filteredRes, func(i, j int) bool {
-			return labels.Compare(filteredRes[i].Metric, filteredRes[j].Metric) < 0
-		})
-		testutil.Equals(t, test.result, filteredRes)
-
-		for _, aa := range rule.ActiveAlerts() {
-			testutil.Assert(t, aa.Labels.Get(model.MetricNameLabel) == "", "%s label set on active alert: %s", model.MetricNameLabel, aa.Labels)
-		}
-
-	}
-}
-
-// sortAlerts sorts `[]*Alert` w.r.t. the Labels.
-func sortAlerts(items []*Alert) {
-	sort.Slice(items, func(i, j int) bool {
-		return labels.Compare(items[i].Labels, items[j].Labels) <= 0
-	})
-}
-
-func TestForStateRestore(t *testing.T) {
-	suite, err := promql.NewTest(t, `
-		load 5m
-		http_requests{job="app-server", instance="0", group="canary", severity="overwrite-me"}	75  85 50 0 0 25 0 0 40 0 120
-		http_requests{job="app-server", instance="1", group="canary", severity="overwrite-me"}	125 90 60 0 0 25 0 0 40 0 130
-	`)
-	testutil.Ok(t, err)
-	defer suite.Close()
-
-	err = suite.Run()
-	testutil.Ok(t, err)
-
-	expr, err := parser.ParseExpr(`http_requests{group="canary", job="app-server"} < 100`)
-	testutil.Ok(t, err)
-
-	opts := &ManagerOptions{
-		QueryFunc:       EngineQueryFunc(suite.QueryEngine(), suite.Storage()),
-		Appendable:      AppendableAdapter{suite.Storage()},
-		Context:         context.Background(),
-		Logger:          log.NewNopLogger(),
-		NotifyFunc:      func(ctx context.Context, expr string, alerts ...*Alert) {},
-		OutageTolerance: 30 * time.Minute,
-		ForGracePeriod:  10 * time.Minute,
-	}
-	opts.AlertHistory = NewMetricsHistory(suite.Storage(), opts)
-
-	alertForDuration := 25 * time.Minute
-	// Initial run before prometheus goes down.
-	rule := NewAlertingRule(
-		"HTTPRequestRateLow",
-		expr,
-		alertForDuration,
-		labels.FromStrings("severity", "critical"),
-		nil, nil, true, nil,
-	)
-
-	group := NewGroup("default", "", time.Second, []Rule{rule}, true, opts)
-	groups := make(map[string]*Group)
-	groups["default;"] = group
-
-	initialRuns := []time.Duration{0, 5 * time.Minute}
-
-	baseTime := time.Unix(0, 0)
-	for _, duration := range initialRuns {
-		evalTime := baseTime.Add(duration)
-		group.Eval(suite.Context(), evalTime)
-	}
-
-	exp := rule.ActiveAlerts()
-	for _, aa := range exp {
-		testutil.Assert(t, aa.Labels.Get(model.MetricNameLabel) == "", "%s label set on active alert: %s", model.MetricNameLabel, aa.Labels)
-	}
-	sort.Slice(exp, func(i, j int) bool {
-		return labels.Compare(exp[i].Labels, exp[j].Labels) < 0
-	})
-
-	// Prometheus goes down here. We create new rules and groups.
-	type testInput struct {
-		restoreDuration time.Duration
-		alerts          []*Alert
-
-		num          int
-		noRestore    bool
-		gracePeriod  bool
-		downDuration time.Duration
-	}
-
-	tests := []testInput{
-		{
-			// Normal restore (alerts were not firing).
-			restoreDuration: 15 * time.Minute,
-			alerts:          rule.ActiveAlerts(),
-			downDuration:    10 * time.Minute,
-		},
-		{
-			// Testing Outage Tolerance.
-			restoreDuration: 40 * time.Minute,
-			noRestore:       true,
-			num:             2,
-		},
-		{
-			// No active alerts.
-			restoreDuration: 50 * time.Minute,
-			alerts:          []*Alert{},
-		},
-	}
-
-	testFunc := func(tst testInput) {
-		newRule := NewAlertingRule(
-			"HTTPRequestRateLow",
-			expr,
-			alertForDuration,
-			labels.FromStrings("severity", "critical"),
-			nil, nil, false, nil,
-		)
-		newGroup := NewGroup("default", "", time.Second, []Rule{newRule}, true, opts)
-
-		newGroups := make(map[string]*Group)
-		newGroups["default;"] = newGroup
-
-		restoreTime := baseTime.Add(tst.restoreDuration)
-		// First eval before restoration.
-		newGroup.Eval(suite.Context(), restoreTime)
-		// Restore happens here.
-		newGroup.RestoreForState(restoreTime)
-
-		got := newRule.ActiveAlerts()
-		for _, aa := range got {
-			testutil.Assert(t, aa.Labels.Get(model.MetricNameLabel) == "", "%s label set on active alert: %s", model.MetricNameLabel, aa.Labels)
-		}
-		sort.Slice(got, func(i, j int) bool {
-			return labels.Compare(got[i].Labels, got[j].Labels) < 0
-		})
-
-		// Checking if we have restored it correctly.
-		if tst.noRestore {
-			testutil.Equals(t, tst.num, len(got))
-			for _, e := range got {
-				testutil.Equals(t, e.ActiveAt, restoreTime)
-			}
-		} else if tst.gracePeriod {
-			testutil.Equals(t, tst.num, len(got))
-			for _, e := range got {
-				testutil.Equals(t, opts.ForGracePeriod, e.ActiveAt.Add(alertForDuration).Sub(restoreTime))
-			}
-		} else {
-			exp := tst.alerts
-			testutil.Equals(t, len(exp), len(got))
-			sortAlerts(exp)
-			sortAlerts(got)
-			for i, e := range exp {
-				testutil.Equals(t, e.Labels, got[i].Labels)
-
-				// Difference in time should be within 1e6 ns, i.e. 1ms
-				// (due to conversion between ns & ms, float64 & int64).
-				activeAtDiff := float64(e.ActiveAt.Unix() + int64(tst.downDuration/time.Second) - got[i].ActiveAt.Unix())
-				testutil.Assert(t, math.Abs(activeAtDiff) == 0, "'for' state restored time is wrong")
-			}
-		}
-	}
-
-	for _, tst := range tests {
-		testFunc(tst)
-	}
-
-	// Testing the grace period.
-	for _, duration := range []time.Duration{10 * time.Minute, 15 * time.Minute, 20 * time.Minute} {
-		evalTime := baseTime.Add(duration)
-		group.Eval(suite.Context(), evalTime)
-	}
-	testFunc(testInput{
-		restoreDuration: 25 * time.Minute,
-		alerts:          []*Alert{},
-		gracePeriod:     true,
-		num:             2,
-	})
-}
-
-func TestStaleness(t *testing.T) {
-	storage := teststorage.New(t)
-	defer storage.Close()
-	engineOpts := promql.EngineOpts{
-		Logger:     nil,
-		Reg:        nil,
-		MaxSamples: 10,
-		Timeout:    10 * time.Second,
-	}
-	engine := promql.NewEngine(engineOpts)
-	opts := &ManagerOptions{
-		QueryFunc:  EngineQueryFunc(engine, storage),
-		Appendable: AppendableAdapter{storage},
-		Context:    context.Background(),
-		Logger:     log.NewNopLogger(),
-	}
-	opts.AlertHistory = NewMetricsHistory(storage, opts)
-
-	expr, err := parser.ParseExpr("a + 1")
-	testutil.Ok(t, err)
-	rule := NewRecordingRule("a_plus_one", expr, labels.Labels{})
-	group := NewGroup("default", "", time.Second, []Rule{rule}, true, opts)
-
-	// A time series that has two samples and then goes stale.
-	app := storage.Appender()
-	app.Add(labels.FromStrings(model.MetricNameLabel, "a"), 0, 1)
-	app.Add(labels.FromStrings(model.MetricNameLabel, "a"), 1000, 2)
-	app.Add(labels.FromStrings(model.MetricNameLabel, "a"), 2000, math.Float64frombits(value.StaleNaN))
-
-	err = app.Commit()
-	testutil.Ok(t, err)
-
-	ctx := context.Background()
-
-	// Execute 3 times, 1 second apart.
-	group.Eval(ctx, time.Unix(0, 0))
-	group.Eval(ctx, time.Unix(1, 0))
-	group.Eval(ctx, time.Unix(2, 0))
-
-	querier, err := storage.Querier(context.Background(), 0, 2000)
-	testutil.Ok(t, err)
-	defer querier.Close()
-
-	matcher, err := labels.NewMatcher(labels.MatchEqual, model.MetricNameLabel, "a_plus_one")
-	testutil.Ok(t, err)
-
-	set := querier.Select(false, nil, matcher)
-
-	samples, err := readSeriesSet(set)
-	testutil.Ok(t, err)
-
-	metric := labels.FromStrings(model.MetricNameLabel, "a_plus_one").String()
-	metricSample, ok := samples[metric]
-
-	testutil.Assert(t, ok, "Series %s not returned.", metric)
-	testutil.Assert(t, value.IsStaleNaN(metricSample[2].V), "Appended second sample not as expected. Wanted: stale NaN Got: %x", math.Float64bits(metricSample[2].V))
-	metricSample[2].V = 42 // reflect.DeepEqual cannot handle NaN.
-
-	want := map[string][]promql.Point{
-		metric: {{T: 0, V: 2}, {T: 1000, V: 3}, {T: 2000, V: 42}},
-	}
-
-	testutil.Equals(t, want, samples)
-}
-
-// Convert a SeriesSet into a form usable with reflect.DeepEqual.
-func readSeriesSet(ss storage.SeriesSet) (map[string][]promql.Point, error) {
-	result := map[string][]promql.Point{}
-
-	for ss.Next() {
-		series := ss.At()
-
-		points := []promql.Point{}
-		it := series.Iterator()
-		for it.Next() {
-			t, v := it.At()
-			points = append(points, promql.Point{T: t, V: v})
-		}
-
-		name := series.Labels().String()
-		result[name] = points
-	}
-	return result, ss.Err()
-}
-
-func TestCopyState(t *testing.T) {
-	oldGroup := &Group{
-		rules: []Rule{
-			NewAlertingRule("alert", nil, 0, nil, nil, nil, true, nil),
-			NewRecordingRule("rule1", nil, nil),
-			NewRecordingRule("rule2", nil, nil),
-			NewRecordingRule("rule3", nil, labels.Labels{{Name: "l1", Value: "v1"}}),
-			NewRecordingRule("rule3", nil, labels.Labels{{Name: "l1", Value: "v2"}}),
-			NewRecordingRule("rule3", nil, labels.Labels{{Name: "l1", Value: "v3"}}),
-			NewAlertingRule("alert2", nil, 0, labels.Labels{{Name: "l2", Value: "v1"}}, nil, nil, true, nil),
-		},
-		seriesInPreviousEval: []map[string]labels.Labels{
-			{},
-			{},
-			{},
-			{"r3a": labels.Labels{{Name: "l1", Value: "v1"}}},
-			{"r3b": labels.Labels{{Name: "l1", Value: "v2"}}},
-			{"r3c": labels.Labels{{Name: "l1", Value: "v3"}}},
-			{"a2": labels.Labels{{Name: "l2", Value: "v1"}}},
-		},
-		evaluationDuration: time.Second,
-	}
-	oldGroup.rules[0].(*AlertingRule).active[42] = nil
-	newGroup := &Group{
-		rules: []Rule{
-			NewRecordingRule("rule3", nil, labels.Labels{{Name: "l1", Value: "v0"}}),
-			NewRecordingRule("rule3", nil, labels.Labels{{Name: "l1", Value: "v1"}}),
-			NewRecordingRule("rule3", nil, labels.Labels{{Name: "l1", Value: "v2"}}),
-			NewAlertingRule("alert", nil, 0, nil, nil, nil, true, nil),
-			NewRecordingRule("rule1", nil, nil),
-			NewAlertingRule("alert2", nil, 0, labels.Labels{{Name: "l2", Value: "v0"}}, nil, nil, true, nil),
-			NewAlertingRule("alert2", nil, 0, labels.Labels{{Name: "l2", Value: "v1"}}, nil, nil, true, nil),
-			NewRecordingRule("rule4", nil, nil),
-		},
-		seriesInPreviousEval: make([]map[string]labels.Labels, 8),
-	}
-	newGroup.CopyState(oldGroup)
-
-	want := []map[string]labels.Labels{
-		nil,
-		{"r3a": labels.Labels{{Name: "l1", Value: "v1"}}},
-		{"r3b": labels.Labels{{Name: "l1", Value: "v2"}}},
-		{},
-		{},
-		nil,
-		{"a2": labels.Labels{{Name: "l2", Value: "v1"}}},
-		nil,
-	}
-	testutil.Equals(t, want, newGroup.seriesInPreviousEval)
-	testutil.Equals(t, oldGroup.rules[0], newGroup.rules[3])
-	testutil.Equals(t, oldGroup.evaluationDuration, newGroup.evaluationDuration)
-	testutil.Equals(t, []labels.Labels{labels.Labels{{Name: "l1", Value: "v3"}}}, newGroup.staleSeries)
-}
-
-func TestDeletedRuleMarkedStale(t *testing.T) {
-	storage := teststorage.New(t)
-	defer storage.Close()
-	oldGroup := &Group{
-		rules: []Rule{
-			NewRecordingRule("rule1", nil, labels.Labels{{Name: "l1", Value: "v1"}}),
-		},
-		seriesInPreviousEval: []map[string]labels.Labels{
-			{"r1": labels.Labels{{Name: "l1", Value: "v1"}}},
-		},
-	}
-	newGroup := &Group{
-		rules:                []Rule{},
-		seriesInPreviousEval: []map[string]labels.Labels{},
-		opts: &ManagerOptions{
-			Appendable: AppendableAdapter{storage},
-		},
-	}
-	newGroup.CopyState(oldGroup)
-
-	newGroup.Eval(context.Background(), time.Unix(0, 0))
-
-	querier, err := storage.Querier(context.Background(), 0, 2000)
-	testutil.Ok(t, err)
-	defer querier.Close()
-
-	matcher, err := labels.NewMatcher(labels.MatchEqual, "l1", "v1")
-	testutil.Ok(t, err)
-
-	set := querier.Select(false, nil, matcher)
-
-	samples, err := readSeriesSet(set)
-	testutil.Ok(t, err)
-
-	metric := labels.FromStrings("l1", "v1").String()
-	metricSample, ok := samples[metric]
-
-	testutil.Assert(t, ok, "Series %s not returned.", metric)
-	testutil.Assert(t, value.IsStaleNaN(metricSample[0].V), "Appended sample not as expected. Wanted: stale NaN Got: %x", math.Float64bits(metricSample[0].V))
-}
-
-func TestUpdate(t *testing.T) {
-	files := []string{"fixtures/rules.yaml"}
-	expected := map[string]labels.Labels{
-		"test": labels.FromStrings("name", "value"),
-	}
-	storage := teststorage.New(t)
-	defer storage.Close()
-	opts := promql.EngineOpts{
-		Logger:     nil,
-		Reg:        nil,
-		MaxSamples: 10,
-		Timeout:    10 * time.Second,
-	}
-	engine := promql.NewEngine(opts)
-	mo := &ManagerOptions{
-		Appendable: AppendableAdapter{storage},
-		QueryFunc:  EngineQueryFunc(engine, storage),
-		Context:    context.Background(),
-		Logger:     log.NewNopLogger(),
-	}
-	mo.AlertHistory = NewMetricsHistory(storage, mo)
-	ruleManager := NewManager(mo)
-
-	ruleManager.Run()
-	defer ruleManager.Stop()
-
-	err := ruleManager.Update(10*time.Second, files, nil)
-	testutil.Ok(t, err)
-	testutil.Assert(t, len(ruleManager.groups) > 0, "expected non-empty rule groups")
-	ogs := map[string]*Group{}
-	for h, g := range ruleManager.groups {
-		g.seriesInPreviousEval = []map[string]labels.Labels{
-			expected,
-		}
-		ogs[h] = g
-	}
-
-	err = ruleManager.Update(10*time.Second, files, nil)
-	testutil.Ok(t, err)
-	for h, g := range ruleManager.groups {
-		for _, actual := range g.seriesInPreviousEval {
-			testutil.Equals(t, expected, actual)
-		}
-		// Groups are the same because of no updates.
-		testutil.Equals(t, ogs[h], g)
-	}
-
-	// Groups will be recreated if updated.
-	rgs, errs := rulefmt.ParseFile("fixtures/rules.yaml")
-	testutil.Assert(t, len(errs) == 0, "file parsing failures")
-
-	tmpFile, err := ioutil.TempFile("", "rules.test.*.yaml")
-	testutil.Ok(t, err)
-	defer os.Remove(tmpFile.Name())
-	defer tmpFile.Close()
-
-	err = ruleManager.Update(10*time.Second, []string{tmpFile.Name()}, nil)
-	testutil.Ok(t, err)
-
-	for h, g := range ruleManager.groups {
-		ogs[h] = g
-	}
-
-	// Update interval and reload.
-	for i, g := range rgs.Groups {
-		if g.Interval != 0 {
-			rgs.Groups[i].Interval = g.Interval * 2
-		} else {
-			rgs.Groups[i].Interval = model.Duration(10)
-		}
-
-	}
-	reloadAndValidate(rgs, t, tmpFile, ruleManager, expected, ogs)
-
-	// Change group rules and reload.
-	for i, g := range rgs.Groups {
-		for j, r := range g.Rules {
-			rgs.Groups[i].Rules[j].Expr.SetString(fmt.Sprintf("%s * 0", r.Expr.Value))
-		}
-	}
-	reloadAndValidate(rgs, t, tmpFile, ruleManager, expected, ogs)
-}
-
-// ruleGroupsTest for running tests over rules.
-type ruleGroupsTest struct {
-	Groups []ruleGroupTest `yaml:"groups"`
-}
-
-// ruleGroupTest forms a testing struct for running tests over rules.
-type ruleGroupTest struct {
-	Name     string         `yaml:"name"`
-	Interval model.Duration `yaml:"interval,omitempty"`
-	Rules    []rulefmt.Rule `yaml:"rules"`
-}
-
-func formatRules(r *rulefmt.RuleGroups) ruleGroupsTest {
-	grps := r.Groups
-	tmp := []ruleGroupTest{}
-	for _, g := range grps {
-		rtmp := []rulefmt.Rule{}
-		for _, r := range g.Rules {
-			rtmp = append(rtmp, rulefmt.Rule{
-				Record:      r.Record.Value,
-				Alert:       r.Alert.Value,
-				Expr:        r.Expr.Value,
-				For:         r.For,
-				Labels:      r.Labels,
-				Annotations: r.Annotations,
-			})
-		}
-		tmp = append(tmp, ruleGroupTest{
-			Name:     g.Name,
-			Interval: g.Interval,
-			Rules:    rtmp,
-		})
-	}
-	return ruleGroupsTest{
-		Groups: tmp,
-	}
-}
-
-func reloadAndValidate(rgs *rulefmt.RuleGroups, t *testing.T, tmpFile *os.File, ruleManager *Manager, expected map[string]labels.Labels, ogs map[string]*Group) {
-	bs, err := yaml.Marshal(formatRules(rgs))
-	testutil.Ok(t, err)
-	tmpFile.Seek(0, 0)
-	_, err = tmpFile.Write(bs)
-	testutil.Ok(t, err)
-	err = ruleManager.Update(10*time.Second, []string{tmpFile.Name()}, nil)
-	testutil.Ok(t, err)
-	for h, g := range ruleManager.groups {
-		if ogs[h] == g {
-			t.Fail()
-		}
-		ogs[h] = g
-	}
-}
-
-func TestNotify(t *testing.T) {
-	storage := teststorage.New(t)
-	defer storage.Close()
-	engineOpts := promql.EngineOpts{
-		Logger:     nil,
-		Reg:        nil,
-		MaxSamples: 10,
-		Timeout:    10 * time.Second,
-	}
-	engine := promql.NewEngine(engineOpts)
-	var lastNotified []*Alert
-	notifyFunc := func(ctx context.Context, expr string, alerts ...*Alert) {
-		lastNotified = alerts
-	}
-	opts := &ManagerOptions{
-		QueryFunc:   EngineQueryFunc(engine, storage),
-		Appendable:  AppendableAdapter{storage},
-		Context:     context.Background(),
-		Logger:      log.NewNopLogger(),
-		NotifyFunc:  notifyFunc,
-		ResendDelay: 2 * time.Second,
-	}
-	opts.AlertHistory = NewMetricsHistory(storage, opts)
-
-	expr, err := parser.ParseExpr("a > 1")
-	testutil.Ok(t, err)
-	rule := NewAlertingRule("aTooHigh", expr, 0, labels.Labels{}, labels.Labels{}, nil, true, log.NewNopLogger())
-	group := NewGroup("alert", "", time.Second, []Rule{rule}, true, opts)
-
-	app := storage.Appender()
-	app.Add(labels.FromStrings(model.MetricNameLabel, "a"), 1000, 2)
-	app.Add(labels.FromStrings(model.MetricNameLabel, "a"), 2000, 3)
-	app.Add(labels.FromStrings(model.MetricNameLabel, "a"), 5000, 3)
-	app.Add(labels.FromStrings(model.MetricNameLabel, "a"), 6000, 0)
-
-	err = app.Commit()
-	testutil.Ok(t, err)
-
-	ctx := context.Background()
-
-	// Alert sent right away
-	group.Eval(ctx, time.Unix(1, 0))
-	testutil.Equals(t, 1, len(lastNotified))
-	testutil.Assert(t, !lastNotified[0].ValidUntil.IsZero(), "ValidUntil should not be zero")
-
-	// Alert is not sent 1s later
-	group.Eval(ctx, time.Unix(2, 0))
-	testutil.Equals(t, 0, len(lastNotified))
-
-	// Alert is resent at t=5s
-	group.Eval(ctx, time.Unix(5, 0))
-	testutil.Equals(t, 1, len(lastNotified))
-
-	// Resolution alert sent right away
-	group.Eval(ctx, time.Unix(6, 0))
-	testutil.Equals(t, 1, len(lastNotified))
-}
-
-func TestMetricsUpdate(t *testing.T) {
-	files := []string{"fixtures/rules.yaml", "fixtures/rules2.yaml"}
-	metricNames := []string{
-		"prometheus_rule_group_interval_seconds",
-		"prometheus_rule_group_last_duration_seconds",
-		"prometheus_rule_group_last_evaluation_timestamp_seconds",
-		"prometheus_rule_group_rules",
-	}
-
-	storage := teststorage.New(t)
-	registry := prometheus.NewRegistry()
-	defer storage.Close()
-	opts := promql.EngineOpts{
-		Logger:     nil,
-		Reg:        nil,
-		MaxSamples: 10,
-		Timeout:    10 * time.Second,
-	}
-	engine := promql.NewEngine(opts)
-	mo := &ManagerOptions{
-		Appendable: AppendableAdapter{storage},
-		QueryFunc:  EngineQueryFunc(engine, storage),
-		Context:    context.Background(),
-		Logger:     log.NewNopLogger(),
-		Registerer: registry,
-	}
-	mo.AlertHistory = NewMetricsHistory(storage, mo)
-	ruleManager := NewManager(mo)
-	ruleManager.Run()
-	defer ruleManager.Stop()
-
-	countMetrics := func() int {
-		ms, err := registry.Gather()
-		testutil.Ok(t, err)
-		var metrics int
-		for _, m := range ms {
-			s := m.GetName()
-			for _, n := range metricNames {
-				if s == n {
-					metrics += len(m.Metric)
-					break
-				}
-			}
-		}
-		return metrics
-	}
-
-	cases := []struct {
-		files   []string
-		metrics int
-	}{
-		{
-			files:   files,
-			metrics: 8,
-		},
-		{
-			files:   files[:1],
-			metrics: 4,
-		},
-		{
-			files:   files[:0],
-			metrics: 0,
-		},
-		{
-			files:   files[1:],
-			metrics: 4,
-		},
-	}
-
-	for i, c := range cases {
-		err := ruleManager.Update(time.Second, c.files, nil)
-		testutil.Ok(t, err)
-		time.Sleep(2 * time.Second)
-		testutil.Equals(t, c.metrics, countMetrics(), "test %d: invalid count of metrics", i)
-	}
-}

From 82f2e795c6d879aa65f50bd6d4cd1fe2488f0261 Mon Sep 17 00:00:00 2001
From: Owen Diehl 
Date: Thu, 23 Jul 2020 16:54:06 -0400
Subject: [PATCH 24/40] MemStore must be started after construction

---
 pkg/ruler/manager/compat.go        | 79 ++++++++++++++++++++++++
 pkg/ruler/manager/memstore.go      | 42 +++++++++----
 pkg/ruler/manager/memstore_test.go | 97 +++++++++++++++++++++++++++---
 pkg/ruler/manager/query.go         | 46 --------------
 4 files changed, 197 insertions(+), 67 deletions(-)
 create mode 100644 pkg/ruler/manager/compat.go
 delete mode 100644 pkg/ruler/manager/query.go

diff --git a/pkg/ruler/manager/compat.go b/pkg/ruler/manager/compat.go
new file mode 100644
index 0000000000000..bc106c01e8b3e
--- /dev/null
+++ b/pkg/ruler/manager/compat.go
@@ -0,0 +1,79 @@
+package manager
+
+import (
+	"context"
+	"time"
+
+	"github.com/cortexproject/cortex/pkg/ruler"
+	"github.com/grafana/loki/pkg/logproto"
+	"github.com/grafana/loki/pkg/logql"
+	"github.com/pkg/errors"
+	"github.com/prometheus/prometheus/pkg/labels"
+	"github.com/prometheus/prometheus/promql"
+	"github.com/prometheus/prometheus/rules"
+)
+
+func LokiDelayedQueryFunc(engine *logql.Engine) ruler.DelayedQueryFunc {
+	return func(delay time.Duration) rules.QueryFunc {
+		return rules.QueryFunc(func(ctx context.Context, qs string, t time.Time) (promql.Vector, error) {
+			adjusted := t.Add(-delay)
+			params := logql.NewLiteralParams(
+				qs,
+				adjusted,
+				adjusted,
+				0,
+				0,
+				logproto.FORWARD,
+				0,
+				nil,
+			)
+			q := engine.Query(params)
+
+			res, err := q.Exec(ctx)
+			if err != nil {
+				return nil, err
+			}
+			switch v := res.Data.(type) {
+			case promql.Vector:
+				return v, nil
+			case promql.Scalar:
+				return promql.Vector{promql.Sample{
+					Point:  promql.Point(v),
+					Metric: labels.Labels{},
+				}}, nil
+			default:
+				return nil, errors.New("rule result is not a vector or scalar")
+			}
+		})
+	}
+
+}
+
+// func MemstoreTenantManager(
+// 	cfg ruler.Config,
+// 	queryFunc ruler.DelayedQueryFunc,
+// ) ruler.TenantManagerFunc {
+// 	metrics := NewMetrics()
+
+// 	return ruler.TenantOptionsFunc(func(
+// 		ctx context.Context,
+// 		userID string,
+// 		notifier *notifier.Manager,
+// 		logger log.Logger,
+// 		reg prometheus.Registerer,
+// 	) *rules.Manager {
+// 		return &rules.ManagerOptions{
+// 			Appendable:      NoopAppender{},
+// 			Queryable:       q,
+// 			QueryFunc:       queryFunc(cfg.EvaluationDelay),
+// 			Context:         user.InjectOrgID(ctx, userID),
+// 			ExternalURL:     cfg.ExternalURL.URL,
+// 			NotifyFunc:      sendAlerts(notifier, cfg.ExternalURL.URL.String()),
+// 			Logger:          log.With(logger, "user", userID),
+// 			Registerer:      reg,
+// 			OutageTolerance: cfg.OutageTolerance,
+// 			ForGracePeriod:  cfg.ForGracePeriod,
+// 			ResendDelay:     cfg.ResendDelay,
+// 		}
+// 	})
+// }
diff --git a/pkg/ruler/manager/memstore.go b/pkg/ruler/manager/memstore.go
index c6ebcdb84ba5b..1dfb077d4a0be 100644
--- a/pkg/ruler/manager/memstore.go
+++ b/pkg/ruler/manager/memstore.go
@@ -74,28 +74,46 @@ type MemStore struct {
 	logger    log.Logger
 	rules     map[string]*RuleCache
 
+	initiated       chan struct{}
 	done            chan struct{}
 	cleanupInterval time.Duration
 }
 
-func NewMemStore(userID string, mgr RuleIter, queryFunc rules.QueryFunc, metrics *Metrics, cleanupInterval time.Duration, logger log.Logger) *MemStore {
+func NewMemStore(userID string, queryFunc rules.QueryFunc, metrics *Metrics, cleanupInterval time.Duration, logger log.Logger) *MemStore {
 	s := &MemStore{
 		userID:          userID,
 		metrics:         metrics,
 		queryFunc:       queryFunc,
 		logger:          logger,
-		mgr:             mgr,
 		cleanupInterval: cleanupInterval,
 		rules:           make(map[string]*RuleCache),
 
-		done: make(chan struct{}),
+		initiated: make(chan struct{}), // blocks execution until Start() is called
+		done:      make(chan struct{}),
 	}
-	go s.run()
 	return s
 
 }
 
+// Calling Start will set the RuleIter, unblock the MemStore, and start the run() function in a separate goroutine.
+func (m *MemStore) Start(iter RuleIter) error {
+	if iter == nil {
+		return errors.New("nil RuleIter")
+	}
+	m.mgr = iter
+	close(m.initiated)
+	go m.run()
+	return nil
+}
+
 func (m *MemStore) Stop() {
+	select {
+	case <-m.initiated:
+	default:
+		// If initiated is blocked, the MemStore has yet to start: easy no-op.
+		return
+	}
+
 	// Need to nil all series & decrement gauges
 	m.mtx.Lock()
 	defer m.mtx.Unlock()
@@ -116,6 +134,7 @@ func (m *MemStore) Stop() {
 
 // run periodically cleans up old series/samples to ensure memory consumption doesn't grow unbounded.
 func (m *MemStore) run() {
+	<-m.initiated
 	t := time.NewTicker(m.cleanupInterval)
 	for {
 		select {
@@ -151,13 +170,12 @@ func (m *MemStore) run() {
 	}
 }
 
-func (m *MemStore) Appender() storage.Appender { return NoopAppender{} }
-
 // implement storage.Queryable. It is only called with the desired ts as maxtime. Mint is
 // parameterized via the outage tolerance, but since we're synthetically generating these,
 // we only care about the desired time.
 func (m *MemStore) Querier(ctx context.Context, mint, maxt int64) (storage.Querier, error) {
-	return &MemStoreQuerier{
+	<-m.initiated
+	return &memStoreQuerier{
 		ts:       util.TimeFromMillis(maxt),
 		MemStore: m,
 		ctx:      ctx,
@@ -165,13 +183,13 @@ func (m *MemStore) Querier(ctx context.Context, mint, maxt int64) (storage.Queri
 
 }
 
-type MemStoreQuerier struct {
+type memStoreQuerier struct {
 	ts  time.Time
 	ctx context.Context
 	*MemStore
 }
 
-func (m *MemStoreQuerier) Select(sortSeries bool, params *storage.SelectHints, matchers ...*labels.Matcher) storage.SeriesSet {
+func (m *memStoreQuerier) Select(sortSeries bool, params *storage.SelectHints, matchers ...*labels.Matcher) storage.SeriesSet {
 	b := labels.NewBuilder(nil)
 	var ruleKey string
 	for _, matcher := range matchers {
@@ -274,17 +292,17 @@ func (m *MemStoreQuerier) Select(sortSeries bool, params *storage.SelectHints, m
 }
 
 // LabelValues returns all potential values for a label name.
-func (*MemStoreQuerier) LabelValues(name string) ([]string, storage.Warnings, error) {
+func (*memStoreQuerier) LabelValues(name string) ([]string, storage.Warnings, error) {
 	return nil, nil, errors.New("unimplemented")
 }
 
 // LabelNames returns all the unique label names present in the block in sorted order.
-func (*MemStoreQuerier) LabelNames() ([]string, storage.Warnings, error) {
+func (*memStoreQuerier) LabelNames() ([]string, storage.Warnings, error) {
 	return nil, nil, errors.New("unimplemented")
 }
 
 // Close releases the resources of the Querier.
-func (*MemStoreQuerier) Close() error { return nil }
+func (*memStoreQuerier) Close() error { return nil }
 
 type RuleCache struct {
 	mtx     sync.Mutex
diff --git a/pkg/ruler/manager/memstore_test.go b/pkg/ruler/manager/memstore_test.go
index ec2c327d8b19f..1fd1fd5df5ff3 100644
--- a/pkg/ruler/manager/memstore_test.go
+++ b/pkg/ruler/manager/memstore_test.go
@@ -30,15 +30,9 @@ type MockRuleIter []*rules.AlertingRule
 
 func (xs MockRuleIter) AlertingRules() []*rules.AlertingRule { return xs }
 
-func testStore(alerts []*rules.AlertingRule, queryFunc rules.QueryFunc, itv time.Duration) *MemStore {
-	return NewMemStore("test", MockRuleIter(alerts), queryFunc, NilMetrics, itv, NilLogger)
-}
-
-func TestIdempotentStop(t *testing.T) {
-	store := testStore(nil, nil, time.Millisecond)
+func testStore(queryFunc rules.QueryFunc, itv time.Duration) *MemStore {
+	return NewMemStore("test", queryFunc, NilMetrics, itv, NilLogger)
 
-	store.Stop()
-	store.Stop()
 }
 
 func TestSelectRestores(t *testing.T) {
@@ -85,7 +79,9 @@ func TestSelectRestores(t *testing.T) {
 		}, nil
 	})
 
-	store := testStore(ars, fn, time.Minute)
+	store := testStore(fn, time.Minute)
+	err := store.Start(MockRuleIter(ars))
+	require.Nil(t, err)
 
 	now := util.TimeToMillis(time.Now())
 
@@ -138,3 +134,86 @@ func TestSelectRestores(t *testing.T) {
 	require.Equal(t, false, sset.Next())
 	require.Equal(t, 1, callCount)
 }
+
+func TestMemstoreStart(t *testing.T) {
+	ruleName := "testrule"
+	ars := []*rules.AlertingRule{
+		rules.NewAlertingRule(
+			ruleName,
+			&parser.StringLiteral{Val: "unused"},
+			time.Minute,
+			labels.FromMap(map[string]string{"foo": "bar"}),
+			nil,
+			nil,
+			false,
+			NilLogger,
+		),
+	}
+
+	fn := rules.QueryFunc(func(ctx context.Context, qs string, t time.Time) (promql.Vector, error) {
+		return nil, nil
+	})
+
+	store := testStore(fn, time.Minute)
+
+	err := store.Start(nil)
+	require.NotNil(t, err)
+	err = store.Start(MockRuleIter(ars))
+	require.Nil(t, err)
+}
+
+func TestMemStoreStopBeforeStart(t *testing.T) {
+	store := testStore(nil, time.Minute)
+	done := make(chan struct{})
+	go func() {
+		store.Stop()
+		done <- struct{}{}
+	}()
+	select {
+	case <-time.After(time.Millisecond):
+		t.FailNow()
+	case <-done:
+	}
+}
+
+func TestMemstoreBlocks(t *testing.T) {
+	ruleName := "testrule"
+	ars := []*rules.AlertingRule{
+		rules.NewAlertingRule(
+			ruleName,
+			&parser.StringLiteral{Val: "unused"},
+			time.Minute,
+			labels.FromMap(map[string]string{"foo": "bar"}),
+			nil,
+			nil,
+			false,
+			NilLogger,
+		),
+	}
+
+	fn := rules.QueryFunc(func(ctx context.Context, qs string, t time.Time) (promql.Vector, error) {
+		return nil, nil
+	})
+
+	store := testStore(fn, time.Minute)
+
+	done := make(chan struct{})
+	go func() {
+		store.Querier(context.Background(), 0, 1)
+		done <- struct{}{}
+	}()
+
+	select {
+	case <-time.After(time.Millisecond):
+	case <-done:
+		t.FailNow()
+	}
+
+	store.Start(MockRuleIter(ars))
+	select {
+	case <-done:
+	case <-time.After(time.Millisecond):
+		t.FailNow()
+	}
+
+}
diff --git a/pkg/ruler/manager/query.go b/pkg/ruler/manager/query.go
deleted file mode 100644
index 746f8e404193b..0000000000000
--- a/pkg/ruler/manager/query.go
+++ /dev/null
@@ -1,46 +0,0 @@
-package manager
-
-import (
-	"context"
-	"time"
-
-	"github.com/grafana/loki/pkg/logproto"
-	"github.com/grafana/loki/pkg/logql"
-	"github.com/pkg/errors"
-	"github.com/prometheus/prometheus/pkg/labels"
-	"github.com/prometheus/prometheus/promql"
-	"github.com/prometheus/prometheus/rules"
-)
-
-func LokiDelayedQueryFunc(engine *logql.Engine, delay time.Duration) rules.QueryFunc {
-	return rules.QueryFunc(func(ctx context.Context, qs string, t time.Time) (promql.Vector, error) {
-		adjusted := t.Add(-delay)
-		params := logql.NewLiteralParams(
-			qs,
-			adjusted,
-			adjusted,
-			0,
-			0,
-			logproto.FORWARD,
-			0,
-			nil,
-		)
-		q := engine.Query(params)
-
-		res, err := q.Exec(ctx)
-		if err != nil {
-			return nil, err
-		}
-		switch v := res.Data.(type) {
-		case promql.Vector:
-			return v, nil
-		case promql.Scalar:
-			return promql.Vector{promql.Sample{
-				Point:  promql.Point(v),
-				Metric: labels.Labels{},
-			}}, nil
-		default:
-			return nil, errors.New("rule result is not a vector or scalar")
-		}
-	})
-}

From 5f7e57e3a4cf65345825e2a71a889f2531e10c6a Mon Sep 17 00:00:00 2001
From: Owen Diehl 
Date: Thu, 23 Jul 2020 17:08:11 -0400
Subject: [PATCH 25/40] MemstoreTenantManager

---
 pkg/ruler/manager/compat.go        | 73 +++++++++++++++++++-----------
 pkg/ruler/manager/memstore.go      |  8 +---
 pkg/ruler/manager/memstore_test.go |  8 +---
 3 files changed, 50 insertions(+), 39 deletions(-)

diff --git a/pkg/ruler/manager/compat.go b/pkg/ruler/manager/compat.go
index bc106c01e8b3e..304022899019f 100644
--- a/pkg/ruler/manager/compat.go
+++ b/pkg/ruler/manager/compat.go
@@ -5,12 +5,16 @@ import (
 	"time"
 
 	"github.com/cortexproject/cortex/pkg/ruler"
+	"github.com/go-kit/kit/log"
 	"github.com/grafana/loki/pkg/logproto"
 	"github.com/grafana/loki/pkg/logql"
 	"github.com/pkg/errors"
+	"github.com/prometheus/client_golang/prometheus"
+	"github.com/prometheus/prometheus/notifier"
 	"github.com/prometheus/prometheus/pkg/labels"
 	"github.com/prometheus/prometheus/promql"
 	"github.com/prometheus/prometheus/rules"
+	"github.com/weaveworks/common/user"
 )
 
 func LokiDelayedQueryFunc(engine *logql.Engine) ruler.DelayedQueryFunc {
@@ -49,31 +53,46 @@ func LokiDelayedQueryFunc(engine *logql.Engine) ruler.DelayedQueryFunc {
 
 }
 
-// func MemstoreTenantManager(
-// 	cfg ruler.Config,
-// 	queryFunc ruler.DelayedQueryFunc,
-// ) ruler.TenantManagerFunc {
-// 	metrics := NewMetrics()
+func MemstoreTenantManager(
+	cfg ruler.Config,
+	delayedQueryFunc ruler.DelayedQueryFunc,
+) ruler.TenantManagerFunc {
+	var metrics *Metrics
 
-// 	return ruler.TenantOptionsFunc(func(
-// 		ctx context.Context,
-// 		userID string,
-// 		notifier *notifier.Manager,
-// 		logger log.Logger,
-// 		reg prometheus.Registerer,
-// 	) *rules.Manager {
-// 		return &rules.ManagerOptions{
-// 			Appendable:      NoopAppender{},
-// 			Queryable:       q,
-// 			QueryFunc:       queryFunc(cfg.EvaluationDelay),
-// 			Context:         user.InjectOrgID(ctx, userID),
-// 			ExternalURL:     cfg.ExternalURL.URL,
-// 			NotifyFunc:      sendAlerts(notifier, cfg.ExternalURL.URL.String()),
-// 			Logger:          log.With(logger, "user", userID),
-// 			Registerer:      reg,
-// 			OutageTolerance: cfg.OutageTolerance,
-// 			ForGracePeriod:  cfg.ForGracePeriod,
-// 			ResendDelay:     cfg.ResendDelay,
-// 		}
-// 	})
-// }
+	return ruler.TenantManagerFunc(func(
+		ctx context.Context,
+		userID string,
+		notifier *notifier.Manager,
+		logger log.Logger,
+		reg prometheus.Registerer,
+	) *rules.Manager {
+
+		// Note: this currently does not distinguish between different managers between tenants,
+		// but is used solely to prevent re-registering the same metrics.
+		if metrics == nil {
+			metrics = NewMetrics(reg)
+		}
+		logger = log.With(logger, "user", userID)
+		queryFunc := delayedQueryFunc(cfg.EvaluationDelay)
+		memStore := NewMemStore(userID, queryFunc, metrics, 5*time.Minute, log.With(logger, "subcomponent", "MemStore"))
+
+		mgr := rules.NewManager(&rules.ManagerOptions{
+			Appendable:      NoopAppender{},
+			Queryable:       memStore,
+			QueryFunc:       queryFunc,
+			Context:         user.InjectOrgID(ctx, userID),
+			ExternalURL:     cfg.ExternalURL.URL,
+			NotifyFunc:      ruler.SendAlerts(notifier, cfg.ExternalURL.URL.String()),
+			Logger:          logger,
+			Registerer:      reg,
+			OutageTolerance: cfg.OutageTolerance,
+			ForGracePeriod:  cfg.ForGracePeriod,
+			ResendDelay:     cfg.ResendDelay,
+		})
+
+		// initialize memStore, bound to the manager's alerting rules
+		memStore.Start(mgr)
+
+		return mgr
+	})
+}
diff --git a/pkg/ruler/manager/memstore.go b/pkg/ruler/manager/memstore.go
index 1dfb077d4a0be..56b3fdcb98083 100644
--- a/pkg/ruler/manager/memstore.go
+++ b/pkg/ruler/manager/memstore.go
@@ -27,7 +27,7 @@ const (
 
 type NoopAppender struct{}
 
-func (a NoopAppender) Appender() (storage.Appender, error)                     { return a, nil }
+func (a NoopAppender) Appender() storage.Appender                              { return a }
 func (a NoopAppender) Add(l labels.Labels, t int64, v float64) (uint64, error) { return 0, nil }
 func (a NoopAppender) AddFast(ref uint64, t int64, v float64) error {
 	return errors.New("unimplemented")
@@ -96,14 +96,10 @@ func NewMemStore(userID string, queryFunc rules.QueryFunc, metrics *Metrics, cle
 }
 
 // Calling Start will set the RuleIter, unblock the MemStore, and start the run() function in a separate goroutine.
-func (m *MemStore) Start(iter RuleIter) error {
-	if iter == nil {
-		return errors.New("nil RuleIter")
-	}
+func (m *MemStore) Start(iter RuleIter) {
 	m.mgr = iter
 	close(m.initiated)
 	go m.run()
-	return nil
 }
 
 func (m *MemStore) Stop() {
diff --git a/pkg/ruler/manager/memstore_test.go b/pkg/ruler/manager/memstore_test.go
index 1fd1fd5df5ff3..ef3277075e0de 100644
--- a/pkg/ruler/manager/memstore_test.go
+++ b/pkg/ruler/manager/memstore_test.go
@@ -80,8 +80,7 @@ func TestSelectRestores(t *testing.T) {
 	})
 
 	store := testStore(fn, time.Minute)
-	err := store.Start(MockRuleIter(ars))
-	require.Nil(t, err)
+	store.Start(MockRuleIter(ars))
 
 	now := util.TimeToMillis(time.Now())
 
@@ -156,10 +155,7 @@ func TestMemstoreStart(t *testing.T) {
 
 	store := testStore(fn, time.Minute)
 
-	err := store.Start(nil)
-	require.NotNil(t, err)
-	err = store.Start(MockRuleIter(ars))
-	require.Nil(t, err)
+	store.Start(MockRuleIter(ars))
 }
 
 func TestMemStoreStopBeforeStart(t *testing.T) {

From a254a9ade3fd6e1c07b8cfdf13e91eef399acc7b Mon Sep 17 00:00:00 2001
From: Owen Diehl 
Date: Thu, 23 Jul 2020 18:00:56 -0400
Subject: [PATCH 26/40] ruler loading

---
 pkg/loki/loki.go    |  8 +++++---
 pkg/loki/modules.go | 30 +++++++++++++++++++++++++-----
 2 files changed, 30 insertions(+), 8 deletions(-)

diff --git a/pkg/loki/loki.go b/pkg/loki/loki.go
index f723b12471d67..289297764302c 100644
--- a/pkg/loki/loki.go
+++ b/pkg/loki/loki.go
@@ -15,7 +15,7 @@ import (
 	"github.com/cortexproject/cortex/pkg/querier/frontend"
 	"github.com/cortexproject/cortex/pkg/ring"
 	"github.com/cortexproject/cortex/pkg/ring/kv/memberlist"
-	"github.com/cortexproject/cortex/pkg/ruler"
+	cortex_ruler "github.com/cortexproject/cortex/pkg/ruler"
 	"github.com/cortexproject/cortex/pkg/util"
 	"github.com/cortexproject/cortex/pkg/util/runtimeconfig"
 	"github.com/cortexproject/cortex/pkg/util/services"
@@ -33,6 +33,7 @@ import (
 	"github.com/grafana/loki/pkg/lokifrontend"
 	"github.com/grafana/loki/pkg/querier"
 	"github.com/grafana/loki/pkg/querier/queryrange"
+	"github.com/grafana/loki/pkg/ruler"
 	"github.com/grafana/loki/pkg/storage"
 	"github.com/grafana/loki/pkg/tracing"
 	serverutil "github.com/grafana/loki/pkg/util/server"
@@ -129,7 +130,7 @@ type Loki struct {
 	store         storage.Store
 	tableManager  *chunk.TableManager
 	frontend      *frontend.Frontend
-	ruler         *ruler.Ruler
+	ruler         *cortex_ruler.Ruler
 	stopper       queryrange.Stopper
 	runtimeConfig *runtimeconfig.Manager
 	memberlistKV  *memberlist.KVInitService
@@ -302,6 +303,7 @@ func (t *Loki) setupModuleManager() error {
 	mm.RegisterModule(Ingester, t.initIngester)
 	mm.RegisterModule(Querier, t.initQuerier)
 	mm.RegisterModule(QueryFrontend, t.initQueryFrontend)
+	mm.RegisterModule(RulerStorage, t.initRulerStorage, modules.UserInvisibleModule)
 	mm.RegisterModule(Ruler, t.initRuler)
 	mm.RegisterModule(TableManager, t.initTableManager)
 	mm.RegisterModule(All, nil)
@@ -315,7 +317,7 @@ func (t *Loki) setupModuleManager() error {
 		Ingester:      {Store, Server, MemberlistKV},
 		Querier:       {Store, Ring, Server},
 		QueryFrontend: {Server, Overrides},
-		Ruler:         {Distributor, Store},
+		Ruler:         {Distributor, Store, RulerStorage},
 		TableManager:  {Server},
 		All:           {Querier, Ingester, Distributor, TableManager, Ruler},
 	}
diff --git a/pkg/loki/modules.go b/pkg/loki/modules.go
index fdfb966dcffec..ea2700a40af0e 100644
--- a/pkg/loki/modules.go
+++ b/pkg/loki/modules.go
@@ -57,6 +57,7 @@ const (
 	Ingester      string = "ingester"
 	Querier       string = "querier"
 	QueryFrontend string = "query-frontend"
+	RulerStorage  string = "ruler-storage"
 	Ruler         string = "ruler"
 	Store         string = "store"
 	TableManager  string = "table-manager"
@@ -354,7 +355,27 @@ func (t *Loki) initQueryFrontend() (_ services.Service, err error) {
 	}), nil
 }
 
+func (t *Loki) initRulerStorage() (_ services.Service, err error) {
+	// if the ruler is not configured and we're in single binary then let's just log an error and continue.
+	// unfortunately there is no way to generate a "default" config and compare default against actual
+	// to determine if it's unconfigured.  the following check, however, correctly tests this.
+	// Single binary integration tests will break if this ever drifts
+	if t.Cfg.Target == All && t.Cfg.Ruler.StoreConfig.IsDefaults() {
+		level.Info(util.Logger).Log("msg", "RulerStorage is not configured in single binary mode and will not be started.")
+		return
+	}
+
+	t.RulerStorage, err = cortex_ruler.NewRuleStorage(t.Cfg.Ruler.StoreConfig)
+
+	return
+}
+
 func (t *Loki) initRuler() (_ services.Service, err error) {
+	if t.RulerStorage == nil {
+		level.Info(util.Logger).Log("msg", "RulerStorage is nil.  Not starting the ruler.")
+		return nil, nil
+	}
+
 	t.cfg.Ruler.Ring.ListenPort = t.cfg.Server.GRPCListenPort
 	t.cfg.Ruler.Ring.KVStore.MemberlistKV = t.memberlistKV.GetMemberlistKV
 	q, err := querier.New(t.cfg.Querier, t.cfg.IngesterClient, t.ring, t.store, t.overrides)
@@ -364,13 +385,12 @@ func (t *Loki) initRuler() (_ services.Service, err error) {
 
 	engine := logql.NewEngine(t.cfg.Querier.Engine, q)
 
-	t.ruler, err = cortex_ruler.NewRuler(
-		t.cfg.Ruler,
-		ruler.LokiDelayedQueryFunc(engine),
-		ruler.InMemoryAppendableHistory(prometheus.DefaultRegisterer),
+	t.Ruler, err = ruler.NewRuler(
+		t.Cfg.Ruler,
+		engine,
 		prometheus.DefaultRegisterer,
-		func(s string) (fmt.Stringer, error) { return logql.ParseExpr(s) },
 		util.Logger,
+		t.RulerStorage,
 	)
 
 	if err != nil {

From 367e18a2606b7516b77f6dfe5104169f3c2badd9 Mon Sep 17 00:00:00 2001
From: Owen Diehl 
Date: Wed, 29 Jul 2020 10:37:50 -0400
Subject: [PATCH 27/40] ruler instantiation

---
 pkg/loki/loki.go            |  2 ++
 pkg/loki/modules.go         |  8 ++++----
 pkg/ruler/manager/compat.go | 29 +++++++++++++++++++++++++++--
 pkg/ruler/ruler.go          | 29 +++++++++++++++++++++++++++++
 4 files changed, 62 insertions(+), 6 deletions(-)
 create mode 100644 pkg/ruler/ruler.go

diff --git a/pkg/loki/loki.go b/pkg/loki/loki.go
index 289297764302c..7f9ba110b914d 100644
--- a/pkg/loki/loki.go
+++ b/pkg/loki/loki.go
@@ -16,6 +16,7 @@ import (
 	"github.com/cortexproject/cortex/pkg/ring"
 	"github.com/cortexproject/cortex/pkg/ring/kv/memberlist"
 	cortex_ruler "github.com/cortexproject/cortex/pkg/ruler"
+	"github.com/cortexproject/cortex/pkg/ruler/rules"
 	"github.com/cortexproject/cortex/pkg/util"
 	"github.com/cortexproject/cortex/pkg/util/runtimeconfig"
 	"github.com/cortexproject/cortex/pkg/util/services"
@@ -131,6 +132,7 @@ type Loki struct {
 	tableManager  *chunk.TableManager
 	frontend      *frontend.Frontend
 	ruler         *cortex_ruler.Ruler
+	RulerStorage  rules.RuleStore
 	stopper       queryrange.Stopper
 	runtimeConfig *runtimeconfig.Manager
 	memberlistKV  *memberlist.KVInitService
diff --git a/pkg/loki/modules.go b/pkg/loki/modules.go
index ea2700a40af0e..fa4062328859e 100644
--- a/pkg/loki/modules.go
+++ b/pkg/loki/modules.go
@@ -360,12 +360,12 @@ func (t *Loki) initRulerStorage() (_ services.Service, err error) {
 	// unfortunately there is no way to generate a "default" config and compare default against actual
 	// to determine if it's unconfigured.  the following check, however, correctly tests this.
 	// Single binary integration tests will break if this ever drifts
-	if t.Cfg.Target == All && t.Cfg.Ruler.StoreConfig.IsDefaults() {
+	if t.cfg.Target == All && t.cfg.Ruler.StoreConfig.IsDefaults() {
 		level.Info(util.Logger).Log("msg", "RulerStorage is not configured in single binary mode and will not be started.")
 		return
 	}
 
-	t.RulerStorage, err = cortex_ruler.NewRuleStorage(t.Cfg.Ruler.StoreConfig)
+	t.RulerStorage, err = cortex_ruler.NewRuleStorage(t.cfg.Ruler.StoreConfig)
 
 	return
 }
@@ -385,8 +385,8 @@ func (t *Loki) initRuler() (_ services.Service, err error) {
 
 	engine := logql.NewEngine(t.cfg.Querier.Engine, q)
 
-	t.Ruler, err = ruler.NewRuler(
-		t.Cfg.Ruler,
+	t.ruler, err = ruler.NewRuler(
+		t.cfg.Ruler,
 		engine,
 		prometheus.DefaultRegisterer,
 		util.Logger,
diff --git a/pkg/ruler/manager/compat.go b/pkg/ruler/manager/compat.go
index 304022899019f..f22c17b45da57 100644
--- a/pkg/ruler/manager/compat.go
+++ b/pkg/ruler/manager/compat.go
@@ -6,15 +6,17 @@ import (
 
 	"github.com/cortexproject/cortex/pkg/ruler"
 	"github.com/go-kit/kit/log"
-	"github.com/grafana/loki/pkg/logproto"
-	"github.com/grafana/loki/pkg/logql"
 	"github.com/pkg/errors"
 	"github.com/prometheus/client_golang/prometheus"
 	"github.com/prometheus/prometheus/notifier"
 	"github.com/prometheus/prometheus/pkg/labels"
 	"github.com/prometheus/prometheus/promql"
+	"github.com/prometheus/prometheus/promql/parser"
 	"github.com/prometheus/prometheus/rules"
 	"github.com/weaveworks/common/user"
+
+	"github.com/grafana/loki/pkg/logproto"
+	"github.com/grafana/loki/pkg/logql"
 )
 
 func LokiDelayedQueryFunc(engine *logql.Engine) ruler.DelayedQueryFunc {
@@ -88,6 +90,7 @@ func MemstoreTenantManager(
 			OutageTolerance: cfg.OutageTolerance,
 			ForGracePeriod:  cfg.ForGracePeriod,
 			ResendDelay:     cfg.ResendDelay,
+			GroupLoader:     groupLoader{},
 		})
 
 		// initialize memStore, bound to the manager's alerting rules
@@ -96,3 +99,25 @@ func MemstoreTenantManager(
 		return mgr
 	})
 }
+
+type groupLoader struct {
+	rules.FileLoader // embed the default and override the parse method for logql queries
+}
+
+func (groupLoader) Parse(query string) (parser.Expr, error) {
+	expr, err := logql.ParseExpr(query)
+	if err != nil {
+		return nil, err
+	}
+
+	return exprAdapter{expr}, nil
+}
+
+// Allows logql expressions to be treated as promql expressions by the prometheus rules pkg.
+type exprAdapter struct {
+	logql.Expr
+}
+
+func (exprAdapter) PositionRange() parser.PositionRange { return parser.PositionRange{} }
+func (exprAdapter) PromQLExpr()                         {}
+func (exprAdapter) Type() parser.ValueType              { return parser.ValueType("unimplemented") }
diff --git a/pkg/ruler/ruler.go b/pkg/ruler/ruler.go
new file mode 100644
index 0000000000000..5954cce277699
--- /dev/null
+++ b/pkg/ruler/ruler.go
@@ -0,0 +1,29 @@
+package ruler
+
+import (
+	"github.com/cortexproject/cortex/pkg/ruler"
+	cRules "github.com/cortexproject/cortex/pkg/ruler/rules"
+	"github.com/go-kit/kit/log"
+	"github.com/grafana/loki/pkg/logql"
+	"github.com/grafana/loki/pkg/ruler/manager"
+	"github.com/prometheus/client_golang/prometheus"
+)
+
+type Config struct {
+	ruler.Config `yaml:",inline"`
+}
+
+func NewRuler(cfg Config, engine *logql.Engine, reg prometheus.Registerer, logger log.Logger, ruleStore cRules.RuleStore) (*ruler.Ruler, error) {
+
+	tenantManager := manager.MemstoreTenantManager(
+		cfg.Config,
+		manager.LokiDelayedQueryFunc(engine),
+	)
+	return ruler.NewRuler(
+		cfg.Config,
+		tenantManager,
+		reg,
+		logger,
+		ruleStore,
+	)
+}

From 7c9eca452f9e2af246c4a07a89e88d071fbbe2dd Mon Sep 17 00:00:00 2001
From: Owen Diehl 
Date: Thu, 30 Jul 2020 12:14:42 -0400
Subject: [PATCH 28/40] better metrics & logging in ruler

---
 pkg/ruler/manager/compat.go   | 70 +++++++++++++++++------------------
 pkg/ruler/manager/memstore.go | 22 ++++++++---
 pkg/ruler/ruler.go            |  2 +-
 3 files changed, 52 insertions(+), 42 deletions(-)

diff --git a/pkg/ruler/manager/compat.go b/pkg/ruler/manager/compat.go
index f22c17b45da57..f451890e301d2 100644
--- a/pkg/ruler/manager/compat.go
+++ b/pkg/ruler/manager/compat.go
@@ -19,45 +19,45 @@ import (
 	"github.com/grafana/loki/pkg/logql"
 )
 
-func LokiDelayedQueryFunc(engine *logql.Engine) ruler.DelayedQueryFunc {
-	return func(delay time.Duration) rules.QueryFunc {
-		return rules.QueryFunc(func(ctx context.Context, qs string, t time.Time) (promql.Vector, error) {
-			adjusted := t.Add(-delay)
-			params := logql.NewLiteralParams(
-				qs,
-				adjusted,
-				adjusted,
-				0,
-				0,
-				logproto.FORWARD,
-				0,
-				nil,
-			)
-			q := engine.Query(params)
-
-			res, err := q.Exec(ctx)
-			if err != nil {
-				return nil, err
-			}
-			switch v := res.Data.(type) {
-			case promql.Vector:
-				return v, nil
-			case promql.Scalar:
-				return promql.Vector{promql.Sample{
-					Point:  promql.Point(v),
-					Metric: labels.Labels{},
-				}}, nil
-			default:
-				return nil, errors.New("rule result is not a vector or scalar")
-			}
-		})
-	}
+// engineQueryFunc returns a new query function using the rules.EngineQueryFunc function
+// and passing an altered timestamp.
+func engineQueryFunc(engine *logql.Engine, delay time.Duration) rules.QueryFunc {
+	return rules.QueryFunc(func(ctx context.Context, qs string, t time.Time) (promql.Vector, error) {
+		adjusted := t.Add(-delay)
+		params := logql.NewLiteralParams(
+			qs,
+			adjusted,
+			adjusted,
+			0,
+			0,
+			logproto.FORWARD,
+			0,
+			nil,
+		)
+		q := engine.Query(params)
+
+		res, err := q.Exec(ctx)
+		if err != nil {
+			return nil, err
+		}
+		switch v := res.Data.(type) {
+		case promql.Vector:
+			return v, nil
+		case promql.Scalar:
+			return promql.Vector{promql.Sample{
+				Point:  promql.Point(v),
+				Metric: labels.Labels{},
+			}}, nil
+		default:
+			return nil, errors.New("rule result is not a vector or scalar")
+		}
+	})
 
 }
 
 func MemstoreTenantManager(
 	cfg ruler.Config,
-	delayedQueryFunc ruler.DelayedQueryFunc,
+	engine *logql.Engine,
 ) ruler.TenantManagerFunc {
 	var metrics *Metrics
 
@@ -75,7 +75,7 @@ func MemstoreTenantManager(
 			metrics = NewMetrics(reg)
 		}
 		logger = log.With(logger, "user", userID)
-		queryFunc := delayedQueryFunc(cfg.EvaluationDelay)
+		queryFunc := engineQueryFunc(engine, cfg.EvaluationDelay)
 		memStore := NewMemStore(userID, queryFunc, metrics, 5*time.Minute, log.With(logger, "subcomponent", "MemStore"))
 
 		mgr := rules.NewManager(&rules.ManagerOptions{
diff --git a/pkg/ruler/manager/memstore.go b/pkg/ruler/manager/memstore.go
index 56b3fdcb98083..a6504fb12a863 100644
--- a/pkg/ruler/manager/memstore.go
+++ b/pkg/ruler/manager/memstore.go
@@ -43,14 +43,14 @@ func ForStateMetric(base labels.Labels, alertName string) labels.Labels {
 }
 
 type Metrics struct {
-	evaluations *prometheus.CounterVec
-	Series      prometheus.Gauge // in memory series
-	Samples     prometheus.Gauge // in memory samples
+	Evaluations *prometheus.CounterVec
+	Samples     prometheus.Gauge       // in memory samples
+	CacheHits   *prometheus.CounterVec // cache hits on in memory samples
 }
 
 func NewMetrics(r prometheus.Registerer) *Metrics {
 	return &Metrics{
-		evaluations: promauto.With(r).NewCounterVec(prometheus.CounterOpts{
+		Evaluations: promauto.With(r).NewCounterVec(prometheus.CounterOpts{
 			Namespace: "loki",
 			Name:      "ruler_memory_for_state_evaluations",
 		}, []string{"status", "tenant"}),
@@ -58,6 +58,10 @@ func NewMetrics(r prometheus.Registerer) *Metrics {
 			Namespace: "loki",
 			Name:      "ruler_memory_samples",
 		}),
+		CacheHits: promauto.With(r).NewCounterVec(prometheus.CounterOpts{
+			Namespace: "loki",
+			Name:      "ruler_memory_for_state_cache_hits",
+		}, []string{"tenant"}),
 	}
 }
 
@@ -216,6 +220,8 @@ func (m *memStoreQuerier) Select(sortSeries bool, params *storage.SelectHints, m
 		return storage.NoopSeriesSet()
 	}
 
+	level.Debug(m.logger).Log("msg", "restoring for state via evaluation", "rule", ruleKey, "tenant", m.MemStore.userID)
+
 	m.mtx.Lock()
 	defer m.mtx.Unlock()
 	cache, ok := m.rules[ruleKey]
@@ -227,7 +233,9 @@ func (m *memStoreQuerier) Select(sortSeries bool, params *storage.SelectHints, m
 	}
 
 	smpl, cached := cache.Get(m.ts, ls)
+	m.metrics.CacheHits.WithLabelValues(m.userID).Inc()
 	if cached {
+		level.Debug(m.logger).Log("msg", "result cached", "rule", ruleKey, "tenant", m.MemStore.userID)
 		// Assuming the result is cached but the desired series is not in the result, it wouldn't be considered active.
 		if smpl == nil {
 			return storage.NoopSeriesSet()
@@ -247,10 +255,12 @@ func (m *memStoreQuerier) Select(sortSeries bool, params *storage.SelectHints, m
 	// that's the only condition under which this is queried (via RestoreForState).
 	vec, err := m.queryFunc(m.ctx, rule.Query().String(), m.ts.Add(-rule.HoldDuration()))
 	if err != nil {
-		m.metrics.evaluations.WithLabelValues(statusFailure, m.userID).Inc()
+		level.Info(m.logger).Log("msg", "error querying for rule", "rule", ruleKey, "tenant", m.MemStore.userID, "err", err.Error())
+		m.metrics.Evaluations.WithLabelValues(statusFailure, m.userID).Inc()
 		return storage.NoopSeriesSet()
 	}
-	m.metrics.evaluations.WithLabelValues(statusSuccess, m.userID).Inc()
+	m.metrics.Evaluations.WithLabelValues(statusSuccess, m.userID).Inc()
+	level.Debug(m.logger).Log("msg", "rule state successfully restored", "rule", ruleKey, "tenant", m.MemStore.userID, "len", len(vec))
 
 	// translate the result into the ALERTS_FOR_STATE series for caching,
 	// considered active & written at the timetamp requested
diff --git a/pkg/ruler/ruler.go b/pkg/ruler/ruler.go
index 5954cce277699..48c9d65fc8fb7 100644
--- a/pkg/ruler/ruler.go
+++ b/pkg/ruler/ruler.go
@@ -17,7 +17,7 @@ func NewRuler(cfg Config, engine *logql.Engine, reg prometheus.Registerer, logge
 
 	tenantManager := manager.MemstoreTenantManager(
 		cfg.Config,
-		manager.LokiDelayedQueryFunc(engine),
+		engine,
 	)
 	return ruler.NewRuler(
 		cfg.Config,

From 25ef5aa1cd35585657e92f8cd76c091467aabd1b Mon Sep 17 00:00:00 2001
From: Owen Diehl 
Date: Thu, 30 Jul 2020 12:44:16 -0400
Subject: [PATCH 29/40] grpc cortex compatibility in go.mod

---
 go.mod | 3 +++
 1 file changed, 3 insertions(+)

diff --git a/go.mod b/go.mod
index 6ab023268a763..ea821aab19809 100644
--- a/go.mod
+++ b/go.mod
@@ -73,3 +73,6 @@ replace github.com/satori/go.uuid => github.com/satori/go.uuid v1.2.0
 
 // Use fork of gocql that has gokit logs and Prometheus metrics.
 replace github.com/gocql/gocql => github.com/grafana/gocql v0.0.0-20200605141915-ba5dc39ece85
+
+// cortex compatibility
+replace google.golang.org/grpc => google.golang.org/grpc v1.29.1
\ No newline at end of file

From 94b80caa511be78d8caf98d2233f1fda77b7f034 Mon Sep 17 00:00:00 2001
From: Owen Diehl 
Date: Thu, 30 Jul 2020 12:56:57 -0400
Subject: [PATCH 30/40] cortex vendoring compat

---
 pkg/querier/queryrange/roundtrip.go | 1 +
 pkg/storage/store.go                | 2 +-
 pkg/storage/util_test.go            | 2 +-
 3 files changed, 3 insertions(+), 2 deletions(-)

diff --git a/pkg/querier/queryrange/roundtrip.go b/pkg/querier/queryrange/roundtrip.go
index bb546d1309b4e..fa799fa9ef7be 100644
--- a/pkg/querier/queryrange/roundtrip.go
+++ b/pkg/querier/queryrange/roundtrip.go
@@ -335,6 +335,7 @@ func NewMetricTripperware(
 			codec,
 			extractor,
 			nil,
+			nil,
 		)
 		if err != nil {
 			return nil, nil, err
diff --git a/pkg/storage/store.go b/pkg/storage/store.go
index 7dcd5e8b107fb..381debf160ede 100644
--- a/pkg/storage/store.go
+++ b/pkg/storage/store.go
@@ -51,7 +51,7 @@ type store struct {
 
 // NewStore creates a new Loki Store using configuration supplied.
 func NewStore(cfg Config, storeCfg chunk.StoreConfig, schemaCfg chunk.SchemaConfig, limits storage.StoreLimits, registerer prometheus.Registerer) (Store, error) {
-	s, err := storage.NewStore(cfg.Config, storeCfg, schemaCfg, limits, registerer, nil)
+	s, err := storage.NewStore(cfg.Config, storeCfg, schemaCfg, limits, registerer, nil, nil)
 	if err != nil {
 		return nil, err
 	}
diff --git a/pkg/storage/util_test.go b/pkg/storage/util_test.go
index 15771cd6255ea..e713f83668218 100644
--- a/pkg/storage/util_test.go
+++ b/pkg/storage/util_test.go
@@ -199,7 +199,7 @@ func (m *mockChunkStore) GetChunkRefs(ctx context.Context, userID string, from,
 		refs = append(refs, r)
 	}
 
-	cache, err := cache.New(cache.Config{Prefix: "chunks"})
+	cache, err := cache.New(cache.Config{Prefix: "chunks"}, nil, nil)
 	if err != nil {
 		panic(err)
 	}

From 72dbbd8a82f89951447df08328b0b5912b60789b Mon Sep 17 00:00:00 2001
From: Owen Diehl 
Date: Thu, 30 Jul 2020 13:45:07 -0400
Subject: [PATCH 31/40] increments memory cache hits only if cached

---
 pkg/querier/queryrange/.#roundtrip.go | 1 +
 pkg/ruler/manager/memstore.go         | 2 +-
 2 files changed, 2 insertions(+), 1 deletion(-)
 create mode 120000 pkg/querier/queryrange/.#roundtrip.go

diff --git a/pkg/querier/queryrange/.#roundtrip.go b/pkg/querier/queryrange/.#roundtrip.go
new file mode 120000
index 0000000000000..0f53fe955a53f
--- /dev/null
+++ b/pkg/querier/queryrange/.#roundtrip.go
@@ -0,0 +1 @@
+owendiehl@Owens-MBP.fios-router.home.85533
\ No newline at end of file
diff --git a/pkg/ruler/manager/memstore.go b/pkg/ruler/manager/memstore.go
index a6504fb12a863..fbbcb8b3369bf 100644
--- a/pkg/ruler/manager/memstore.go
+++ b/pkg/ruler/manager/memstore.go
@@ -233,8 +233,8 @@ func (m *memStoreQuerier) Select(sortSeries bool, params *storage.SelectHints, m
 	}
 
 	smpl, cached := cache.Get(m.ts, ls)
-	m.metrics.CacheHits.WithLabelValues(m.userID).Inc()
 	if cached {
+		m.metrics.CacheHits.WithLabelValues(m.userID).Inc()
 		level.Debug(m.logger).Log("msg", "result cached", "rule", ruleKey, "tenant", m.MemStore.userID)
 		// Assuming the result is cached but the desired series is not in the result, it wouldn't be considered active.
 		if smpl == nil {

From 04e6950e2ca28b361997125c74551310b5393f1a Mon Sep 17 00:00:00 2001
From: Owen Diehl 
Date: Thu, 30 Jul 2020 14:52:03 -0400
Subject: [PATCH 32/40] loki in memory metrics use prometheus default
 registerer

---
 pkg/ruler/manager/compat.go | 6 +++---
 1 file changed, 3 insertions(+), 3 deletions(-)

diff --git a/pkg/ruler/manager/compat.go b/pkg/ruler/manager/compat.go
index f451890e301d2..2fa394c5da7bb 100644
--- a/pkg/ruler/manager/compat.go
+++ b/pkg/ruler/manager/compat.go
@@ -69,10 +69,10 @@ func MemstoreTenantManager(
 		reg prometheus.Registerer,
 	) *rules.Manager {
 
-		// Note: this currently does not distinguish between different managers between tenants,
-		// but is used solely to prevent re-registering the same metrics.
+		// We'll ignore the passed registere and use the default registerer to avoid prefix issues and other weirdness.
+		// This closure prevents re-registering.
 		if metrics == nil {
-			metrics = NewMetrics(reg)
+			metrics = NewMetrics(prometheus.DefaultRegisterer)
 		}
 		logger = log.With(logger, "user", userID)
 		queryFunc := engineQueryFunc(engine, cfg.EvaluationDelay)

From 262e3948dcaee6c80e685dc5d7e51d372addc40a Mon Sep 17 00:00:00 2001
From: Owen Diehl 
Date: Thu, 30 Jul 2020 15:12:28 -0400
Subject: [PATCH 33/40] ruler only depends on ring

---
 pkg/loki/loki.go | 2 +-
 1 file changed, 1 insertion(+), 1 deletion(-)

diff --git a/pkg/loki/loki.go b/pkg/loki/loki.go
index 7f9ba110b914d..3766d9eb0d78d 100644
--- a/pkg/loki/loki.go
+++ b/pkg/loki/loki.go
@@ -319,7 +319,7 @@ func (t *Loki) setupModuleManager() error {
 		Ingester:      {Store, Server, MemberlistKV},
 		Querier:       {Store, Ring, Server},
 		QueryFrontend: {Server, Overrides},
-		Ruler:         {Distributor, Store, RulerStorage},
+		Ruler:         {Ring, Server, Store, RulerStorage},
 		TableManager:  {Server},
 		All:           {Querier, Ingester, Distributor, TableManager, Ruler},
 	}

From cabf036ea6d034046aacc6542b7f4c8dd9bad0ec Mon Sep 17 00:00:00 2001
From: Owen Diehl 
Date: Fri, 31 Jul 2020 08:32:06 -0400
Subject: [PATCH 34/40] managerfactory rename

---
 pkg/ruler/manager/compat.go | 6 +++---
 1 file changed, 3 insertions(+), 3 deletions(-)

diff --git a/pkg/ruler/manager/compat.go b/pkg/ruler/manager/compat.go
index 2fa394c5da7bb..36bfe674baeae 100644
--- a/pkg/ruler/manager/compat.go
+++ b/pkg/ruler/manager/compat.go
@@ -58,10 +58,10 @@ func engineQueryFunc(engine *logql.Engine, delay time.Duration) rules.QueryFunc
 func MemstoreTenantManager(
 	cfg ruler.Config,
 	engine *logql.Engine,
-) ruler.TenantManagerFunc {
+) ruler.ManagerFactory {
 	var metrics *Metrics
 
-	return ruler.TenantManagerFunc(func(
+	return func(
 		ctx context.Context,
 		userID string,
 		notifier *notifier.Manager,
@@ -97,7 +97,7 @@ func MemstoreTenantManager(
 		memStore.Start(mgr)
 
 		return mgr
-	})
+	}
 }
 
 type groupLoader struct {

From f384d8e3232e6b26986689abf8cfd9daf3f774ca Mon Sep 17 00:00:00 2001
From: Owen Diehl 
Date: Fri, 31 Jul 2020 10:16:17 -0400
Subject: [PATCH 35/40] revendors cortex

---
 go.mod                                        |   6 +-
 go.sum                                        |  42 +----
 .../cortexproject/cortex/pkg/api/api.go       |   2 +-
 .../cortexproject/cortex/pkg/api/queryable.go | 158 ++++++++++++++++++
 .../cortex/pkg/chunk/cache/background.go      |  38 ++---
 .../cortex/pkg/chunk/cache/cache.go           |  19 ++-
 .../cortex/pkg/chunk/cache/fifo_cache.go      | 138 ++++++++-------
 .../cortex/pkg/chunk/cache/instrumented.go    |  75 ++++-----
 .../cortex/pkg/chunk/cache/memcached.go       |  33 ++--
 .../pkg/chunk/cache/memcached_client.go       |  27 +--
 .../cortex/pkg/chunk/cache/redis_cache.go     |  17 +-
 .../cortex/pkg/chunk/cache/snappy.go          |  13 +-
 .../cortex/pkg/chunk/chunk_store_utils.go     |   1 +
 .../cortex/pkg/chunk/schema_config.go         |  28 +++-
 .../pkg/chunk/storage/caching_fixtures.go     |   9 +-
 .../pkg/chunk/storage/caching_index_client.go |  10 +-
 .../cortex/pkg/chunk/storage/factory.go       |  25 ++-
 .../cortex/pkg/compactor/compactor.go         |   6 +-
 .../cortexproject/cortex/pkg/cortex/cortex.go |   6 +-
 .../cortex/pkg/cortex/modules.go              |  33 ++--
 .../cortex/pkg/distributor/query.go           |   9 +-
 .../cortex/pkg/ingester/flush.go              |  39 +++--
 .../cortex/pkg/ingester/ingester.go           |  26 +--
 .../cortex/pkg/ingester/ingester_v2.go        |  44 ++---
 .../cortex/pkg/ingester/mapper.go             |   7 +-
 .../cortex/pkg/ingester/metrics.go            |  13 +-
 .../cortexproject/cortex/pkg/ingester/rate.go |  13 +-
 .../cortex/pkg/ingester/series_map.go         |  10 +-
 .../cortex/pkg/ingester/transfer.go           |  16 +-
 .../cortexproject/cortex/pkg/querier/block.go |   3 +-
 .../pkg/querier/blocks_store_queryable.go     |  18 +-
 .../pkg/querier/chunk_store_queryable.go      |  15 +-
 .../pkg/querier/distributor_queryable.go      |  92 ++++++----
 .../cortex/pkg/querier/frontend/frontend.go   |  10 +-
 .../cortex/pkg/querier/querier.go             |   4 +-
 .../cortex/pkg/querier/queryrange/limits.go   |   2 +-
 .../pkg/querier/queryrange/results_cache.go   |   4 +-
 .../pkg/querier/queryrange/roundtrip.go       |   2 +-
 .../cortex/pkg/ring/basic_lifecycler.go       |  12 +-
 .../cortexproject/cortex/pkg/ring/batch.go    |  25 +--
 .../pkg/ring/kv/memberlist/tcp_transport.go   |   8 +-
 .../cortex/pkg/ring/lifecycler.go             |   3 +
 .../cortexproject/cortex/pkg/ruler/compat.go  |  59 ++++++-
 .../cortex/pkg/ruler/manager_metrics.go       | 149 +++++++++++++++++
 .../cortexproject/cortex/pkg/ruler/ruler.go   |  80 ++++-----
 .../pkg/storage/backend/azure/config.go       |   2 +-
 .../pkg/storage/backend/filesystem/config.go  |   2 +-
 .../cortex/pkg/storage/backend/gcs/config.go  |   2 +-
 .../cortex/pkg/storage/backend/s3/config.go   |   2 +-
 .../cortex/pkg/storage/tsdb/bucket_client.go  |   2 +-
 .../cortex/pkg/storage/tsdb/config.go         | 145 +++++++++-------
 .../cortex/pkg/storage/tsdb/index_cache.go    |   2 +-
 .../cortex/pkg/storage/tsdb/ref_cache.go      |  16 +-
 .../pkg/storegateway/bucket_store_metrics.go  |   7 +
 .../cortex/pkg/storegateway/bucket_stores.go  |  17 +-
 .../cortex/pkg/storegateway/gateway.go        |  13 +-
 .../cortexproject/cortex/pkg/util/events.go   |   6 +-
 .../cortex/pkg/util/metrics_helper.go         |  18 ++
 .../cortex/pkg/util/validation/limits.go      |   6 +-
 .../thanos-io/thanos/pkg/block/fetcher.go     |  18 ++
 .../thanos-io/thanos/pkg/compact/compact.go   |   3 +-
 .../thanos/pkg/discovery/dns/provider.go      |   3 +-
 .../thanos-io/thanos/pkg/gate/gate.go         |   2 +-
 .../thanos/pkg/objstore/swift/swift.go        |  28 ++--
 .../thanos/pkg/rules/rulespb/custom.go        |  15 +-
 .../thanos-io/thanos/pkg/store/bucket.go      |  48 +++---
 .../thanos-io/thanos/pkg/store/limiter.go     |  39 ++++-
 .../thanos-io/thanos/pkg/store/local.go       |  13 +-
 .../thanos-io/thanos/pkg/store/multitsdb.go   |  94 ++++++-----
 .../thanos/pkg/store/postings_codec.go        |  14 +-
 .../thanos-io/thanos/pkg/store/prometheus.go  |  14 +-
 .../thanos-io/thanos/pkg/store/proxy.go       |  57 ++++---
 .../thanos-io/thanos/pkg/store/tsdb.go        |  28 ++--
 vendor/modules.txt                            |   9 +-
 74 files changed, 1252 insertions(+), 722 deletions(-)
 create mode 100644 vendor/github.com/cortexproject/cortex/pkg/api/queryable.go
 create mode 100644 vendor/github.com/cortexproject/cortex/pkg/ruler/manager_metrics.go

diff --git a/go.mod b/go.mod
index ea821aab19809..7a1f4fc90f47e 100644
--- a/go.mod
+++ b/go.mod
@@ -9,7 +9,7 @@ require (
 	github.com/cespare/xxhash/v2 v2.1.1
 	github.com/containerd/fifo v0.0.0-20190226154929-a9fb20d87448 // indirect
 	github.com/coreos/go-systemd v0.0.0-20191104093116-d3cd4ed1dbcf
-	github.com/cortexproject/cortex v1.2.1-0.20200727121049-4cfa4a2978c2
+	github.com/cortexproject/cortex v1.2.1-0.20200731141046-75cc6c827e31
 	github.com/davecgh/go-spew v1.1.1
 	github.com/docker/docker v17.12.0-ce-rc1.0.20200706150819-a40b877fbb9e+incompatible
 	github.com/docker/go-metrics v0.0.0-20181218153428-b84716841b82 // indirect
@@ -55,7 +55,7 @@ require (
 	github.com/weaveworks/common v0.0.0-20200625145055-4b1847531bc9
 	go.etcd.io/bbolt v1.3.5-0.20200615073812-232d8fc87f50
 	golang.org/x/net v0.0.0-20200707034311-ab3426394381
-	google.golang.org/grpc v1.29.1
+	google.golang.org/grpc v1.30.0
 	gopkg.in/alecthomas/kingpin.v2 v2.2.6
 	gopkg.in/fsnotify.v1 v1.4.7
 	gopkg.in/yaml.v2 v2.3.0
@@ -75,4 +75,4 @@ replace github.com/satori/go.uuid => github.com/satori/go.uuid v1.2.0
 replace github.com/gocql/gocql => github.com/grafana/gocql v0.0.0-20200605141915-ba5dc39ece85
 
 // cortex compatibility
-replace google.golang.org/grpc => google.golang.org/grpc v1.29.1
\ No newline at end of file
+replace google.golang.org/grpc => google.golang.org/grpc v1.29.1
diff --git a/go.sum b/go.sum
index ac90601292801..dc6e2f4f65e7a 100644
--- a/go.sum
+++ b/go.sum
@@ -1,4 +1,3 @@
-cloud.google.com/go v0.26.0/go.mod h1:aQUYkXzVsufM+DwF1aE+0xfcU+56JwCaLick0ClmMTw=
 cloud.google.com/go v0.34.0/go.mod h1:aQUYkXzVsufM+DwF1aE+0xfcU+56JwCaLick0ClmMTw=
 cloud.google.com/go v0.37.4/go.mod h1:NHPJ89PdicEuT9hdPXMROBD91xc5uRDxsMtSB16k7hw=
 cloud.google.com/go v0.38.0/go.mod h1:990N+gfupTy94rShfmMCWGDn0LpTmnzTp2qbd1dvSRU=
@@ -208,7 +207,6 @@ github.com/chzyer/test v0.0.0-20180213035817-a1ea475d72b1/go.mod h1:Q3SI9o4m/ZMn
 github.com/circonus-labs/circonus-gometrics v2.3.1+incompatible/go.mod h1:nmEj6Dob7S7YxXgwXpfOuvO54S+tGdZdw9fuRZt25Ag=
 github.com/circonus-labs/circonusllhist v0.1.3/go.mod h1:kMXHVDlOchFAehlya5ePtbp5jckzBHf4XRpQvBOLI+I=
 github.com/clbanning/x2j v0.0.0-20191024224557-825249438eec/go.mod h1:jMjuTZXRI4dUb/I5gc9Hdhagfvm9+RyrPryS/auMzxE=
-github.com/client9/misspell v0.3.4/go.mod h1:qj6jICC3Q7zFZvVWo7KLAzC3yx5G7kyvSDkc90ppPyw=
 github.com/cncf/udpa/go v0.0.0-20191209042840-269d4d468f6f/go.mod h1:M8M6+tZqaGXZJjfX53e64911xZQV5JYwmTeXPW+k8Sc=
 github.com/cockroachdb/apd v1.1.0/go.mod h1:8Sl8LxpKi29FqWXR16WEFZRNSz3SoPzUzeMeY4+DwBQ=
 github.com/cockroachdb/cockroach-go v0.0.0-20181001143604-e0a95dfd547c/go.mod h1:XGLbWH/ujMcbPbhZq52Nv6UrCghb1yGn//133kEsvDk=
@@ -235,8 +233,8 @@ github.com/coreos/pkg v0.0.0-20160727233714-3ac0863d7acf/go.mod h1:E3G3o1h8I7cfc
 github.com/coreos/pkg v0.0.0-20180928190104-399ea9e2e55f h1:lBNOc5arjvs8E5mO2tbpBpLoyyu8B6e44T7hJy6potg=
 github.com/coreos/pkg v0.0.0-20180928190104-399ea9e2e55f/go.mod h1:E3G3o1h8I7cfcXa63jLwjI0eiQQMgzzUDFVpN/nH/eA=
 github.com/cortexproject/cortex v0.6.1-0.20200228110116-92ab6cbe0995/go.mod h1:3Xa3DjJxtpXqxcMGdk850lcIRb81M0fyY1MQ6udY134=
-github.com/cortexproject/cortex v1.2.1-0.20200727121049-4cfa4a2978c2 h1:KseIJ2j4OJ8Vt9B2dpUyAgqgoeoRtFxLydxabmTToDg=
-github.com/cortexproject/cortex v1.2.1-0.20200727121049-4cfa4a2978c2/go.mod h1:zBfkUqePbDsIbPaClWi31N3wC93h76vu0ONPNYQitCs=
+github.com/cortexproject/cortex v1.2.1-0.20200731141046-75cc6c827e31 h1:RDz0AkRqajiaoxi5lxuGRDSLUTpovG+9umHE1YTiUaw=
+github.com/cortexproject/cortex v1.2.1-0.20200731141046-75cc6c827e31/go.mod h1:rruYhu6DND/XxrneQki4iPNJVmgAH3StJ6ThseI4WoM=
 github.com/cpuguy83/go-md2man/v2 v2.0.0-20190314233015-f79a8a8ca69d/go.mod h1:maD7wRr/U5Z6m/iR4s+kqSMx2CaBsrgA7czyZG/E6dU=
 github.com/creack/pty v1.1.7/go.mod h1:lj5s0c3V2DBrqTV7llrYr5NG6My20zk30Fl46Y7DoTY=
 github.com/cznic/b v0.0.0-20180115125044-35e9bbe41f07/go.mod h1:URriBxXwVq5ijiJ12C7iIZqlA69nTlI+LgI6/pwftG8=
@@ -301,9 +299,6 @@ github.com/elazarl/goproxy v0.0.0-20170405201442-c4fc26588b6e/go.mod h1:/Zj4wYkg
 github.com/elazarl/goproxy v0.0.0-20180725130230-947c36da3153/go.mod h1:/Zj4wYkgs4iZTTu3o/KG3Itv/qCCa8VVMlb3i9OVuzc=
 github.com/ema/qdisc v0.0.0-20190904071900-b82c76788043/go.mod h1:ix4kG2zvdUd8kEKSW0ZTr1XLks0epFpI4j745DXxlNE=
 github.com/emicklei/go-restful v0.0.0-20170410110728-ff4f55a20633/go.mod h1:otzb+WCGbkyDHkqmQmT5YD2WR4BBwUdeQoFo8l/7tVs=
-github.com/envoyproxy/go-control-plane v0.6.9/go.mod h1:SBwIajubJHhxtWwsL9s8ss4safvEdbitLhGGK48rN6g=
-github.com/envoyproxy/go-control-plane v0.9.0/go.mod h1:YTl/9mNaCwkRvm6d1a2C3ymFceY/DCBVvsKhRF0iEA4=
-github.com/envoyproxy/go-control-plane v0.9.1-0.20191026205805-5f8ba28d4473/go.mod h1:YTl/9mNaCwkRvm6d1a2C3ymFceY/DCBVvsKhRF0iEA4=
 github.com/envoyproxy/go-control-plane v0.9.4/go.mod h1:6rpuAdCZL397s3pYoYcLgu1mIlRU8Am5FuJP05cCM98=
 github.com/envoyproxy/protoc-gen-validate v0.1.0/go.mod h1:iSmxcyjqTsJpI2R4NaDN7+kN2VEUnK/pcBlmesArF7c=
 github.com/evanphx/json-patch v4.2.0+incompatible h1:fUDGZCv/7iAN7u0puUVhvKCcsR6vRfwrJatElLBEf0I=
@@ -763,7 +758,6 @@ github.com/lightstep/lightstep-tracer-go v0.18.0/go.mod h1:jlF1pusYV4pidLvZ+XD0U
 github.com/lightstep/lightstep-tracer-go v0.18.1/go.mod h1:jlF1pusYV4pidLvZ+XD0UBX0ZE6WURAspgAczcDHrL4=
 github.com/lovoo/gcloud-opentracing v0.3.0/go.mod h1:ZFqk2y38kMDDikZPAK7ynTTGuyt17nSPdS3K5e+ZTBY=
 github.com/lufia/iostat v1.1.0/go.mod h1:rEPNA0xXgjHQjuI5Cy05sLlS2oRcSlWHRLrvh/AQ+Pg=
-github.com/lyft/protoc-gen-validate v0.0.13/go.mod h1:XbGvPuh87YZc5TdIa2/I4pLk0QoUACkjt2znoq26NVQ=
 github.com/mailru/easyjson v0.0.0-20160728113105-d5b7844b561a/go.mod h1:C1wdFJiN94OJF2b5HbByQZoLdCWB1Yqtg26g4irojpc=
 github.com/mailru/easyjson v0.0.0-20180823135443-60711f1a8329/go.mod h1:C1wdFJiN94OJF2b5HbByQZoLdCWB1Yqtg26g4irojpc=
 github.com/mailru/easyjson v0.0.0-20190312143242-1de009706dbe/go.mod h1:C1wdFJiN94OJF2b5HbByQZoLdCWB1Yqtg26g4irojpc=
@@ -995,7 +989,7 @@ github.com/prometheus/prometheus v0.0.0-20180315085919-58e2a31db8de/go.mod h1:oA
 github.com/prometheus/prometheus v0.0.0-20190818123050-43acd0e2e93f/go.mod h1:rMTlmxGCvukf2KMu3fClMDKLLoJ5hl61MhcJ7xKakf0=
 github.com/prometheus/prometheus v1.8.2-0.20200107122003-4708915ac6ef/go.mod h1:7U90zPoLkWjEIQcy/rweQla82OCTUzxVHE51G3OhJbI=
 github.com/prometheus/prometheus v1.8.2-0.20200213233353-b90be6f32a33/go.mod h1:fkIPPkuZnkXyopYHmXPxf9rgiPkVgZCN8w9o8+UgBlY=
-github.com/prometheus/prometheus v1.8.2-0.20200619100132-74207c04655e/go.mod h1:QV6T0PPQi5UFmqcLBJw3JiyIR8r1O7KEv9qlVw4VV40=
+github.com/prometheus/prometheus v1.8.2-0.20200707115909-30505a202a4c/go.mod h1:/kMSPIRsxr/apyHxlzYMdFnaPXUXXqILU5uzIoNhOvc=
 github.com/prometheus/prometheus v1.8.2-0.20200722151933-4a8531a64b32/go.mod h1:+/y4DzJ62qmhy0o/H4PtXegRXw+80E8RVRHhLbv+bkM=
 github.com/prometheus/prometheus v1.8.2-0.20200727090838-6f296594a852 h1:aRBuOcI/bN5f/UqmIGn8CajY6W0mPBEajK8q+SFgNZY=
 github.com/prometheus/prometheus v1.8.2-0.20200727090838-6f296594a852/go.mod h1:yzkxU+U4d5ZgVH/ywg/zONKN91UPLKsKCYkcyGOBH18=
@@ -1088,8 +1082,8 @@ github.com/stretchr/testify v1.4.0/go.mod h1:j7eGeouHqKxXV5pUuKE4zz7dFj8WfuZ+81P
 github.com/stretchr/testify v1.5.1 h1:nOGnQDM7FYENwehXlg/kFVnos3rEvtKTjRvOWSzb6H4=
 github.com/stretchr/testify v1.5.1/go.mod h1:5W2xD1RspED5o8YsWQXVCued0rvSQ+mT+I5cxcmMvtA=
 github.com/thanos-io/thanos v0.8.1-0.20200109203923-552ffa4c1a0d/go.mod h1:usT/TxtJQ7DzinTt+G9kinDQmRS5sxwu0unVKZ9vdcw=
-github.com/thanos-io/thanos v0.13.1-0.20200625180332-f078faed1b96 h1:McsluZ8fXVwGbdXsZ20uZNGukmPycDU9m6df64S2bqQ=
-github.com/thanos-io/thanos v0.13.1-0.20200625180332-f078faed1b96/go.mod h1:VuNcGvUE0u57S1XXqYhf0dQzUO3wUnw2B5IKsju+1z4=
+github.com/thanos-io/thanos v0.13.1-0.20200722150410-6485769a1350 h1:yAiiVhWtQuic/JWFU6jSvBE/LR1GbSwBRgjnEYqemFI=
+github.com/thanos-io/thanos v0.13.1-0.20200722150410-6485769a1350/go.mod h1:m/jG5dV9LQ+rcKdCDQgrSxqQz+vDddqa+Zn4FRoXDbo=
 github.com/tidwall/pretty v0.0.0-20180105212114-65a9db5fad51/go.mod h1:XNkn88O1ChpSDQmQeStsy+sBenx6DDtFZJxhVysOjyk=
 github.com/tidwall/pretty v1.0.0 h1:HsD+QiTn7sK6flMKIvNmpqz1qrpP3Ps6jOKIKMooyg4=
 github.com/tidwall/pretty v1.0.0/go.mod h1:XNkn88O1ChpSDQmQeStsy+sBenx6DDtFZJxhVysOjyk=
@@ -1225,7 +1219,6 @@ golang.org/x/exp v0.0.0-20200224162631-6cc2880d07d6/go.mod h1:3jZMyOhIsHpP37uCMk
 golang.org/x/image v0.0.0-20180708004352-c73c2afc3b81/go.mod h1:ux5Hcp/YLpHSI86hEcLt0YII63i6oz57MZXIpbrjZUs=
 golang.org/x/image v0.0.0-20190227222117-0694c2d4d067/go.mod h1:kZ7UVZpmo3dzQBMxlp+ypCbDeSB+sBbTgSJuh5dn5js=
 golang.org/x/image v0.0.0-20190802002840-cff245a6509b/go.mod h1:FeLwcggjj3mMvU+oOTbSwawSJRM1uh48EjtB4UJZlP0=
-golang.org/x/lint v0.0.0-20181026193005-c67002cb31c3/go.mod h1:UVdnD1Gm6xHRNCYTkRU2/jEulfH38KcIWyp/GAMgvoE=
 golang.org/x/lint v0.0.0-20190227174305-5b3e6a55c961/go.mod h1:wehouNa3lNwaWXcvxsM5YxQ5yQlVC4a0KAMCusXpPoU=
 golang.org/x/lint v0.0.0-20190301231843-5614ed5bae6f/go.mod h1:UVdnD1Gm6xHRNCYTkRU2/jEulfH38KcIWyp/GAMgvoE=
 golang.org/x/lint v0.0.0-20190313153728-d0100b6bd8b3/go.mod h1:6SW0HCj/g11FgYtHlgUYUwCkIfeOF89ocIRzGO/8vkc=
@@ -1247,7 +1240,6 @@ golang.org/x/mod v0.3.0 h1:RM4zey1++hCTbCVQfnWeKs9/IEsaBLA8vTkd0WVtmH4=
 golang.org/x/mod v0.3.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA=
 golang.org/x/net v0.0.0-20170114055629-f2499483f923/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
 golang.org/x/net v0.0.0-20180724234803-3673e40ba225/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
-golang.org/x/net v0.0.0-20180826012351-8a410e7b638d/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
 golang.org/x/net v0.0.0-20180906233101-161cd47e91fd/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
 golang.org/x/net v0.0.0-20181005035420-146acd28ed58/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
 golang.org/x/net v0.0.0-20181023162649-9b4f9f5ad519/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
@@ -1309,7 +1301,6 @@ golang.org/x/sync v0.0.0-20200625203802-6e8e738ad208 h1:qwRHBd0NqMbJxfbotnDhm2By
 golang.org/x/sync v0.0.0-20200625203802-6e8e738ad208/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
 golang.org/x/sys v0.0.0-20170830134202-bb24a47a89ea/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
 golang.org/x/sys v0.0.0-20180823144017-11551d06cbcc/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
-golang.org/x/sys v0.0.0-20180830151530-49385e6e1522/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
 golang.org/x/sys v0.0.0-20180905080454-ebe1bf3edb33/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
 golang.org/x/sys v0.0.0-20180909124046-d0be0721c37e/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
 golang.org/x/sys v0.0.0-20181026203630-95b1ffbd15a5/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
@@ -1368,7 +1359,6 @@ golang.org/x/sys v0.0.0-20200223170610-d5e6a3e2c0ae/go.mod h1:h1NjWce9XRLGQEsW7w
 golang.org/x/sys v0.0.0-20200323222414-85ca7c5b95cd/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
 golang.org/x/sys v0.0.0-20200331124033-c3d80250170d/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
 golang.org/x/sys v0.0.0-20200420163511-1957bb5e6d1f/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
-golang.org/x/sys v0.0.0-20200602225109-6fdc65e7d980/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
 golang.org/x/sys v0.0.0-20200615200032-f1bc736245b1/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
 golang.org/x/sys v0.0.0-20200625212154-ddb9806d33ae/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
 golang.org/x/sys v0.0.0-20200724161237-0e2f3a69832c h1:UIcGWL6/wpCfyGuJnRFJRurA+yj8RrW7Q6x2YMCXt6c=
@@ -1389,11 +1379,9 @@ golang.org/x/time v0.0.0-20200630173020-3af7569d3a1e h1:EHBhcS0mlXEAVwNyO2dLfjTo
 golang.org/x/time v0.0.0-20200630173020-3af7569d3a1e/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ=
 golang.org/x/tools v0.0.0-20180221164845-07fd8470d635/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ=
 golang.org/x/tools v0.0.0-20180525024113-a5b4c53f6e8b/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ=
-golang.org/x/tools v0.0.0-20180828015842-6cd1fcedba52/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ=
 golang.org/x/tools v0.0.0-20180917221912-90fa682c2a6e/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ=
 golang.org/x/tools v0.0.0-20181011042414-1f849cf54d09/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ=
 golang.org/x/tools v0.0.0-20181030221726-6c7e314b6563/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ=
-golang.org/x/tools v0.0.0-20190114222345-bf090417da8b/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ=
 golang.org/x/tools v0.0.0-20190118193359-16909d206f00/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ=
 golang.org/x/tools v0.0.0-20190125232054-d66bd3c5d5a6/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ=
 golang.org/x/tools v0.0.0-20190206041539-40960b6deb8e/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ=
@@ -1475,7 +1463,6 @@ google.golang.org/api v0.20.0/go.mod h1:BwFmGc8tA3vsd7r/7kR8DY7iEEGSU04BFxCo5jP/
 google.golang.org/api v0.26.0/go.mod h1:lIXQywCXRcnZPGlsd8NbLnOjtAoL6em04bJ9+z0MncE=
 google.golang.org/api v0.29.0 h1:BaiDisFir8O4IJxvAabCGGkQ6yCJegNQqSVoYUNAnbk=
 google.golang.org/api v0.29.0/go.mod h1:Lcubydp8VUV7KeIHD9z2Bys/sm/vGKnG1UHuDBSrHWM=
-google.golang.org/appengine v1.1.0/go.mod h1:EbEs0AVv82hx2wNQdGPgUI5lhzA/G0D9YwlJXL52JkM=
 google.golang.org/appengine v1.2.0/go.mod h1:xpcJRLb0r/rnEns0DIKYYv+WjYCduHsrkT7/EB5XEv4=
 google.golang.org/appengine v1.3.0/go.mod h1:xpcJRLb0r/rnEns0DIKYYv+WjYCduHsrkT7/EB5XEv4=
 google.golang.org/appengine v1.4.0/go.mod h1:xpcJRLb0r/rnEns0DIKYYv+WjYCduHsrkT7/EB5XEv4=
@@ -1515,24 +1502,6 @@ google.golang.org/genproto v0.0.0-20200603110839-e855014d5736/go.mod h1:jDfRM7Fc
 google.golang.org/genproto v0.0.0-20200710124503-20a17af7bd0e/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no=
 google.golang.org/genproto v0.0.0-20200724131911-43cab4749ae7 h1:AWgNCmk2V5HZp9AiCDRBExX/b9I0Ey9F8STHDZlhCC4=
 google.golang.org/genproto v0.0.0-20200724131911-43cab4749ae7/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no=
-google.golang.org/grpc v1.14.0/go.mod h1:yo6s7OP7yaDglbqo1J04qKzAhqBH6lvTonzMVmEdcZw=
-google.golang.org/grpc v1.17.0/go.mod h1:6QZJwpn2B+Zp71q/5VxRsJ6NXXVCE5NRUHRo+f3cWCs=
-google.golang.org/grpc v1.18.0/go.mod h1:6QZJwpn2B+Zp71q/5VxRsJ6NXXVCE5NRUHRo+f3cWCs=
-google.golang.org/grpc v1.19.0/go.mod h1:mqu4LbDTu4XGKhr4mRzUsmM4RtVoemTSY81AxZiDr8c=
-google.golang.org/grpc v1.20.0/go.mod h1:chYK+tFQF0nDUGJgXMSgLCQk3phJEuONr2DCgLDdAQM=
-google.golang.org/grpc v1.20.1/go.mod h1:10oTOabMzJvdu6/UiuZezV6QK5dSlG84ov/aaiqXj38=
-google.golang.org/grpc v1.21.0/go.mod h1:oYelfM1adQP15Ek0mdvEgi9Df8B9CZIaU1084ijfRaM=
-google.golang.org/grpc v1.21.1/go.mod h1:oYelfM1adQP15Ek0mdvEgi9Df8B9CZIaU1084ijfRaM=
-google.golang.org/grpc v1.22.0/go.mod h1:Y5yQAOtifL1yxbo5wqy6BxZv8vAUGQwXBOALyacEbxg=
-google.golang.org/grpc v1.22.1/go.mod h1:Y5yQAOtifL1yxbo5wqy6BxZv8vAUGQwXBOALyacEbxg=
-google.golang.org/grpc v1.23.0/go.mod h1:Y5yQAOtifL1yxbo5wqy6BxZv8vAUGQwXBOALyacEbxg=
-google.golang.org/grpc v1.23.1/go.mod h1:Y5yQAOtifL1yxbo5wqy6BxZv8vAUGQwXBOALyacEbxg=
-google.golang.org/grpc v1.24.0/go.mod h1:XDChyiUovWa60DnaeDeZmSW86xtLtjtZbwvSiRnRtcA=
-google.golang.org/grpc v1.25.1/go.mod h1:c3i+UQWmh7LiEpx4sFZnkU36qjEYZ0imhYfXVyQciAY=
-google.golang.org/grpc v1.26.0/go.mod h1:qbnxyOmOxrQa7FizSgH+ReBfzJrCY1pSN7KXBS8abTk=
-google.golang.org/grpc v1.27.0/go.mod h1:qbnxyOmOxrQa7FizSgH+ReBfzJrCY1pSN7KXBS8abTk=
-google.golang.org/grpc v1.27.1/go.mod h1:qbnxyOmOxrQa7FizSgH+ReBfzJrCY1pSN7KXBS8abTk=
-google.golang.org/grpc v1.28.0/go.mod h1:rpkK4SK4GF4Ach/+MFLZUBavHOvF2JJB5uozKKal+60=
 google.golang.org/grpc v1.29.1 h1:EC2SB8S04d2r73uptxphDSUG+kTKVgjRPF+N3xpxRB4=
 google.golang.org/grpc v1.29.1/go.mod h1:itym6AZVZYACWQqET3MqgPpjcuV5QH3BxFS3IjizoKk=
 google.golang.org/protobuf v0.0.0-20200109180630-ec00e32a8dfd/go.mod h1:DFci5gLYBciE7Vtevhsrf46CRTquxDuWsQurQQe4oz8=
@@ -1587,7 +1556,6 @@ gopkg.in/yaml.v3 v3.0.0-20200615113413-eeeca48fe776 h1:tQIYjPdBoyREyB9XMu+nnTclp
 gopkg.in/yaml.v3 v3.0.0-20200615113413-eeeca48fe776/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM=
 gotest.tools v2.2.0+incompatible h1:VsBPFP1AI068pPrMxtb/S8Zkgf9xEmTLJjfM+P5UIEo=
 gotest.tools v2.2.0+incompatible/go.mod h1:DsYFclhRJ6vuDpmuTbkuFWG+y2sxOXAzmJt81HFBacw=
-honnef.co/go/tools v0.0.0-20180728063816-88497007e858/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4=
 honnef.co/go/tools v0.0.0-20190102054323-c2f93a96b099/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4=
 honnef.co/go/tools v0.0.0-20190106161140-3f1c8253044a/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4=
 honnef.co/go/tools v0.0.0-20190418001031-e561f6794a2a/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4=
diff --git a/vendor/github.com/cortexproject/cortex/pkg/api/api.go b/vendor/github.com/cortexproject/cortex/pkg/api/api.go
index 4238dcad5d326..45db017daf4b9 100644
--- a/vendor/github.com/cortexproject/cortex/pkg/api/api.go
+++ b/vendor/github.com/cortexproject/cortex/pkg/api/api.go
@@ -269,7 +269,7 @@ func (a *API) RegisterQuerier(
 ) http.Handler {
 	api := v1.NewAPI(
 		engine,
-		queryable,
+		errorTranslateQueryable{queryable}, // Translate errors to errors expected by API.
 		func(context.Context) v1.TargetRetriever { return &querier.DummyTargetRetriever{} },
 		func(context.Context) v1.AlertmanagerRetriever { return &querier.DummyAlertmanagerRetriever{} },
 		func() config.Config { return config.Config{} },
diff --git a/vendor/github.com/cortexproject/cortex/pkg/api/queryable.go b/vendor/github.com/cortexproject/cortex/pkg/api/queryable.go
new file mode 100644
index 0000000000000..8682872a47e10
--- /dev/null
+++ b/vendor/github.com/cortexproject/cortex/pkg/api/queryable.go
@@ -0,0 +1,158 @@
+package api
+
+import (
+	"context"
+
+	"github.com/gogo/status"
+	"github.com/pkg/errors"
+	"github.com/prometheus/prometheus/pkg/labels"
+	"github.com/prometheus/prometheus/promql"
+	"github.com/prometheus/prometheus/storage"
+
+	"github.com/cortexproject/cortex/pkg/chunk"
+)
+
+func translateError(err error) error {
+	if err == nil {
+		return err
+	}
+
+	// vendor/github.com/prometheus/prometheus/web/api/v1/api.go, respondError function only accepts
+	// *apiError types.
+	// Translation of error to *apiError happens in vendor/github.com/prometheus/prometheus/web/api/v1/api.go, returnAPIError method.
+	// It only supports:
+	// promql.ErrQueryCanceled, mapped to 503
+	// promql.ErrQueryTimeout, mapped to 503
+	// promql.ErrStorage mapped to 500
+	// anything else is mapped to 422
+
+	switch errors.Cause(err).(type) {
+	case promql.ErrStorage, promql.ErrTooManySamples, promql.ErrQueryCanceled, promql.ErrQueryTimeout:
+		// Don't translate those, just in case we use them internally.
+		return err
+	case chunk.QueryError:
+		// This will be returned with status code 422 by Prometheus API.
+		return err
+	default:
+		if errors.Is(err, context.Canceled) {
+			return err // 422
+		}
+
+		s, ok := status.FromError(err)
+		if ok {
+			code := s.Code()
+
+			// Treat these as HTTP status codes, even though they are supposed to be grpc codes.
+			if code >= 400 && code < 500 {
+				// Return directly, will be mapped to 422
+				return err
+			} else if code >= 500 && code < 599 {
+				// Wrap into ErrStorage for mapping to 500
+				return promql.ErrStorage{Err: err}
+			}
+		}
+
+		// All other errors will be returned as 500.
+		return promql.ErrStorage{Err: err}
+	}
+}
+
+type errorTranslateQueryable struct {
+	q storage.SampleAndChunkQueryable
+}
+
+func (e errorTranslateQueryable) Querier(ctx context.Context, mint, maxt int64) (storage.Querier, error) {
+	q, err := e.q.Querier(ctx, mint, maxt)
+	return errorTranslateQuerier{q: q}, translateError(err)
+}
+
+func (e errorTranslateQueryable) ChunkQuerier(ctx context.Context, mint, maxt int64) (storage.ChunkQuerier, error) {
+	q, err := e.q.ChunkQuerier(ctx, mint, maxt)
+	return errorTranslateChunkQuerier{q: q}, translateError(err)
+}
+
+type errorTranslateQuerier struct {
+	q storage.Querier
+}
+
+func (e errorTranslateQuerier) LabelValues(name string) ([]string, storage.Warnings, error) {
+	values, warnings, err := e.q.LabelValues(name)
+	return values, warnings, translateError(err)
+}
+
+func (e errorTranslateQuerier) LabelNames() ([]string, storage.Warnings, error) {
+	values, warnings, err := e.q.LabelNames()
+	return values, warnings, translateError(err)
+}
+
+func (e errorTranslateQuerier) Close() error {
+	return translateError(e.q.Close())
+}
+
+func (e errorTranslateQuerier) Select(sortSeries bool, hints *storage.SelectHints, matchers ...*labels.Matcher) storage.SeriesSet {
+	s := e.q.Select(sortSeries, hints, matchers...)
+	return errorTranslateSeriesSet{s}
+}
+
+type errorTranslateChunkQuerier struct {
+	q storage.ChunkQuerier
+}
+
+func (e errorTranslateChunkQuerier) LabelValues(name string) ([]string, storage.Warnings, error) {
+	values, warnings, err := e.q.LabelValues(name)
+	return values, warnings, translateError(err)
+}
+
+func (e errorTranslateChunkQuerier) LabelNames() ([]string, storage.Warnings, error) {
+	values, warnings, err := e.q.LabelNames()
+	return values, warnings, translateError(err)
+}
+
+func (e errorTranslateChunkQuerier) Close() error {
+	return translateError(e.q.Close())
+}
+
+func (e errorTranslateChunkQuerier) Select(sortSeries bool, hints *storage.SelectHints, matchers ...*labels.Matcher) storage.ChunkSeriesSet {
+	s := e.q.Select(sortSeries, hints, matchers...)
+	return errorTranslateChunkSeriesSet{s}
+}
+
+type errorTranslateSeriesSet struct {
+	s storage.SeriesSet
+}
+
+func (e errorTranslateSeriesSet) Next() bool {
+	return e.s.Next()
+}
+
+func (e errorTranslateSeriesSet) At() storage.Series {
+	return e.s.At()
+}
+
+func (e errorTranslateSeriesSet) Err() error {
+	return translateError(e.s.Err())
+}
+
+func (e errorTranslateSeriesSet) Warnings() storage.Warnings {
+	return e.s.Warnings()
+}
+
+type errorTranslateChunkSeriesSet struct {
+	s storage.ChunkSeriesSet
+}
+
+func (e errorTranslateChunkSeriesSet) Next() bool {
+	return e.s.Next()
+}
+
+func (e errorTranslateChunkSeriesSet) At() storage.ChunkSeries {
+	return e.s.At()
+}
+
+func (e errorTranslateChunkSeriesSet) Err() error {
+	return translateError(e.s.Err())
+}
+
+func (e errorTranslateChunkSeriesSet) Warnings() storage.Warnings {
+	return e.s.Warnings()
+}
diff --git a/vendor/github.com/cortexproject/cortex/pkg/chunk/cache/background.go b/vendor/github.com/cortexproject/cortex/pkg/chunk/cache/background.go
index 994c49b745239..bfdfb748d894b 100644
--- a/vendor/github.com/cortexproject/cortex/pkg/chunk/cache/background.go
+++ b/vendor/github.com/cortexproject/cortex/pkg/chunk/cache/background.go
@@ -11,19 +11,6 @@ import (
 	"github.com/prometheus/client_golang/prometheus/promauto"
 )
 
-var (
-	droppedWriteBack = promauto.NewCounterVec(prometheus.CounterOpts{
-		Namespace: "cortex",
-		Name:      "cache_dropped_background_writes_total",
-		Help:      "Total count of dropped write backs to cache.",
-	}, []string{"name"})
-	queueLength = promauto.NewGaugeVec(prometheus.GaugeOpts{
-		Namespace: "cortex",
-		Name:      "cache_background_queue_length",
-		Help:      "Length of the cache background write queue.",
-	}, []string{"name"})
-)
-
 // BackgroundConfig is config for a Background Cache.
 type BackgroundConfig struct {
 	WriteBackGoroutines int `yaml:"writeback_goroutines"`
@@ -54,14 +41,25 @@ type backgroundWrite struct {
 }
 
 // NewBackground returns a new Cache that does stores on background goroutines.
-func NewBackground(name string, cfg BackgroundConfig, cache Cache) Cache {
+func NewBackground(name string, cfg BackgroundConfig, cache Cache, reg prometheus.Registerer) Cache {
 	c := &backgroundCache{
-		Cache:            cache,
-		quit:             make(chan struct{}),
-		bgWrites:         make(chan backgroundWrite, cfg.WriteBackBuffer),
-		name:             name,
-		droppedWriteBack: droppedWriteBack.WithLabelValues(name),
-		queueLength:      queueLength.WithLabelValues(name),
+		Cache:    cache,
+		quit:     make(chan struct{}),
+		bgWrites: make(chan backgroundWrite, cfg.WriteBackBuffer),
+		name:     name,
+		droppedWriteBack: promauto.With(reg).NewCounter(prometheus.CounterOpts{
+			Namespace:   "cortex",
+			Name:        "cache_dropped_background_writes_total",
+			Help:        "Total count of dropped write backs to cache.",
+			ConstLabels: prometheus.Labels{"name": name},
+		}),
+
+		queueLength: promauto.With(reg).NewGauge(prometheus.GaugeOpts{
+			Namespace:   "cortex",
+			Name:        "cache_background_queue_length",
+			Help:        "Length of the cache background write queue.",
+			ConstLabels: prometheus.Labels{"name": name},
+		}),
 	}
 
 	c.wg.Add(cfg.WriteBackGoroutines)
diff --git a/vendor/github.com/cortexproject/cortex/pkg/chunk/cache/cache.go b/vendor/github.com/cortexproject/cortex/pkg/chunk/cache/cache.go
index e400e88a32b27..dbbc6b2e8c4fa 100644
--- a/vendor/github.com/cortexproject/cortex/pkg/chunk/cache/cache.go
+++ b/vendor/github.com/cortexproject/cortex/pkg/chunk/cache/cache.go
@@ -6,6 +6,7 @@ import (
 	"flag"
 	"time"
 
+	"github.com/go-kit/kit/log"
 	"github.com/prometheus/client_golang/prometheus"
 )
 
@@ -60,7 +61,7 @@ func (cfg *Config) Validate() error {
 }
 
 // New creates a new Cache using Config.
-func New(cfg Config) (Cache, error) {
+func New(cfg Config, reg prometheus.Registerer, logger log.Logger) (Cache, error) {
 	if cfg.Cache != nil {
 		return cfg.Cache, nil
 	}
@@ -72,8 +73,8 @@ func New(cfg Config) (Cache, error) {
 			cfg.Fifocache.Validity = cfg.DefaultValidity
 		}
 
-		if cache := NewFifoCache(cfg.Prefix+"fifocache", cfg.Fifocache); cache != nil {
-			caches = append(caches, Instrument(cfg.Prefix+"fifocache", cache))
+		if cache := NewFifoCache(cfg.Prefix+"fifocache", cfg.Fifocache, reg, logger); cache != nil {
+			caches = append(caches, Instrument(cfg.Prefix+"fifocache", cache, reg))
 		}
 	}
 
@@ -86,11 +87,11 @@ func New(cfg Config) (Cache, error) {
 			cfg.Memcache.Expiration = cfg.DefaultValidity
 		}
 
-		client := NewMemcachedClient(cfg.MemcacheClient, cfg.Prefix, prometheus.DefaultRegisterer)
-		cache := NewMemcached(cfg.Memcache, client, cfg.Prefix)
+		client := NewMemcachedClient(cfg.MemcacheClient, cfg.Prefix, reg, logger)
+		cache := NewMemcached(cfg.Memcache, client, cfg.Prefix, reg, logger)
 
 		cacheName := cfg.Prefix + "memcache"
-		caches = append(caches, NewBackground(cacheName, cfg.Background, Instrument(cacheName, cache)))
+		caches = append(caches, NewBackground(cacheName, cfg.Background, Instrument(cacheName, cache, reg), reg))
 	}
 
 	if cfg.Redis.Endpoint != "" {
@@ -98,13 +99,13 @@ func New(cfg Config) (Cache, error) {
 			cfg.Redis.Expiration = cfg.DefaultValidity
 		}
 		cacheName := cfg.Prefix + "redis"
-		cache := NewRedisCache(cfg.Redis, cacheName, nil)
-		caches = append(caches, NewBackground(cacheName, cfg.Background, Instrument(cacheName, cache)))
+		cache := NewRedisCache(cfg.Redis, cacheName, nil, logger)
+		caches = append(caches, NewBackground(cacheName, cfg.Background, Instrument(cacheName, cache, reg), reg))
 	}
 
 	cache := NewTiered(caches)
 	if len(caches) > 1 {
-		cache = Instrument(cfg.Prefix+"tiered", cache)
+		cache = Instrument(cfg.Prefix+"tiered", cache, reg)
 	}
 	return cache, nil
 }
diff --git a/vendor/github.com/cortexproject/cortex/pkg/chunk/cache/fifo_cache.go b/vendor/github.com/cortexproject/cortex/pkg/chunk/cache/fifo_cache.go
index ca331de77e68d..81432d1a1e5b7 100644
--- a/vendor/github.com/cortexproject/cortex/pkg/chunk/cache/fifo_cache.go
+++ b/vendor/github.com/cortexproject/cortex/pkg/chunk/cache/fifo_cache.go
@@ -9,6 +9,7 @@ import (
 	"unsafe"
 
 	"github.com/dustin/go-humanize"
+	"github.com/go-kit/kit/log"
 	"github.com/go-kit/kit/log/level"
 	"github.com/pkg/errors"
 	"github.com/prometheus/client_golang/prometheus"
@@ -18,64 +19,6 @@ import (
 	"github.com/cortexproject/cortex/pkg/util/flagext"
 )
 
-var (
-	cacheEntriesAdded = promauto.NewCounterVec(prometheus.CounterOpts{
-		Namespace: "querier",
-		Subsystem: "cache",
-		Name:      "added_total",
-		Help:      "The total number of Put calls on the cache",
-	}, []string{"cache"})
-
-	cacheEntriesAddedNew = promauto.NewCounterVec(prometheus.CounterOpts{
-		Namespace: "querier",
-		Subsystem: "cache",
-		Name:      "added_new_total",
-		Help:      "The total number of new entries added to the cache",
-	}, []string{"cache"})
-
-	cacheEntriesEvicted = promauto.NewCounterVec(prometheus.CounterOpts{
-		Namespace: "querier",
-		Subsystem: "cache",
-		Name:      "evicted_total",
-		Help:      "The total number of evicted entries",
-	}, []string{"cache"})
-
-	cacheEntriesCurrent = promauto.NewGaugeVec(prometheus.GaugeOpts{
-		Namespace: "querier",
-		Subsystem: "cache",
-		Name:      "entries",
-		Help:      "The total number of entries",
-	}, []string{"cache"})
-
-	cacheTotalGets = promauto.NewCounterVec(prometheus.CounterOpts{
-		Namespace: "querier",
-		Subsystem: "cache",
-		Name:      "gets_total",
-		Help:      "The total number of Get calls",
-	}, []string{"cache"})
-
-	cacheTotalMisses = promauto.NewCounterVec(prometheus.CounterOpts{
-		Namespace: "querier",
-		Subsystem: "cache",
-		Name:      "misses_total",
-		Help:      "The total number of Get calls that had no valid entry",
-	}, []string{"cache"})
-
-	cacheStaleGets = promauto.NewCounterVec(prometheus.CounterOpts{
-		Namespace: "querier",
-		Subsystem: "cache",
-		Name:      "stale_gets_total",
-		Help:      "The total number of Get calls that had an entry which expired",
-	}, []string{"cache"})
-
-	cacheMemoryBytes = promauto.NewGaugeVec(prometheus.GaugeOpts{
-		Namespace: "querier",
-		Subsystem: "cache",
-		Name:      "memory_bytes",
-		Help:      "The current cache size in bytes",
-	}, []string{"cache"})
-)
-
 const (
 	elementSize    = int(unsafe.Sizeof(list.Element{}))
 	elementPrtSize = int(unsafe.Sizeof(&list.Element{}))
@@ -149,20 +92,19 @@ type cacheEntry struct {
 }
 
 // NewFifoCache returns a new initialised FifoCache of size.
-// TODO(bwplotka): Fix metrics, get them out of globals, separate or allow prefixing.
-func NewFifoCache(name string, cfg FifoCacheConfig) *FifoCache {
+func NewFifoCache(name string, cfg FifoCacheConfig, reg prometheus.Registerer, logger log.Logger) *FifoCache {
 	util.WarnExperimentalUse("In-memory (FIFO) cache")
 
 	if cfg.DeprecatedSize > 0 {
 		flagext.DeprecatedFlagsUsed.Inc()
-		level.Warn(util.Logger).Log("msg", "running with DEPRECATED flag fifocache.size, use fifocache.max-size-items or fifocache.max-size-bytes instead", "cache", name)
+		level.Warn(logger).Log("msg", "running with DEPRECATED flag fifocache.size, use fifocache.max-size-items or fifocache.max-size-bytes instead", "cache", name)
 		cfg.MaxSizeItems = cfg.DeprecatedSize
 	}
 	maxSizeBytes, _ := parsebytes(cfg.MaxSizeBytes)
 
 	if maxSizeBytes == 0 && cfg.MaxSizeItems == 0 {
 		// zero cache capacity - no need to create cache
-		level.Warn(util.Logger).Log("msg", "neither fifocache.max-size-bytes nor fifocache.max-size-items is set", "cache", name)
+		level.Warn(logger).Log("msg", "neither fifocache.max-size-bytes nor fifocache.max-size-items is set", "cache", name)
 		return nil
 	}
 	return &FifoCache{
@@ -172,15 +114,69 @@ func NewFifoCache(name string, cfg FifoCacheConfig) *FifoCache {
 		entries:      make(map[string]*list.Element),
 		lru:          list.New(),
 
-		// TODO(bwplotka): There might be simple cache.Cache wrapper for those.
-		entriesAdded:    cacheEntriesAdded.WithLabelValues(name),
-		entriesAddedNew: cacheEntriesAddedNew.WithLabelValues(name),
-		entriesEvicted:  cacheEntriesEvicted.WithLabelValues(name),
-		entriesCurrent:  cacheEntriesCurrent.WithLabelValues(name),
-		totalGets:       cacheTotalGets.WithLabelValues(name),
-		totalMisses:     cacheTotalMisses.WithLabelValues(name),
-		staleGets:       cacheStaleGets.WithLabelValues(name),
-		memoryBytes:     cacheMemoryBytes.WithLabelValues(name),
+		entriesAdded: promauto.With(reg).NewCounter(prometheus.CounterOpts{
+			Namespace:   "querier",
+			Subsystem:   "cache",
+			Name:        "added_total",
+			Help:        "The total number of Put calls on the cache",
+			ConstLabels: prometheus.Labels{"cache": name},
+		}),
+
+		entriesAddedNew: promauto.With(reg).NewCounter(prometheus.CounterOpts{
+			Namespace:   "querier",
+			Subsystem:   "cache",
+			Name:        "added_new_total",
+			Help:        "The total number of new entries added to the cache",
+			ConstLabels: prometheus.Labels{"cache": name},
+		}),
+
+		entriesEvicted: promauto.With(reg).NewCounter(prometheus.CounterOpts{
+			Namespace:   "querier",
+			Subsystem:   "cache",
+			Name:        "evicted_total",
+			Help:        "The total number of evicted entries",
+			ConstLabels: prometheus.Labels{"cache": name},
+		}),
+
+		entriesCurrent: promauto.With(reg).NewGauge(prometheus.GaugeOpts{
+			Namespace:   "querier",
+			Subsystem:   "cache",
+			Name:        "entries",
+			Help:        "The total number of entries",
+			ConstLabels: prometheus.Labels{"cache": name},
+		}),
+
+		totalGets: promauto.With(reg).NewCounter(prometheus.CounterOpts{
+			Namespace:   "querier",
+			Subsystem:   "cache",
+			Name:        "gets_total",
+			Help:        "The total number of Get calls",
+			ConstLabels: prometheus.Labels{"cache": name},
+		}),
+
+		totalMisses: promauto.With(reg).NewCounter(prometheus.CounterOpts{
+			Namespace:   "querier",
+			Subsystem:   "cache",
+			Name:        "misses_total",
+			Help:        "The total number of Get calls that had no valid entry",
+			ConstLabels: prometheus.Labels{"cache": name},
+		}),
+
+		staleGets: promauto.With(reg).NewCounter(prometheus.CounterOpts{
+			Namespace:   "querier",
+			Subsystem:   "cache",
+			Name:        "stale_gets_total",
+			Help:        "The total number of Get calls that had an entry which expired",
+			ConstLabels: prometheus.Labels{"cache": name},
+		}),
+
+		memoryBytes: promauto.With(reg).NewGauge(prometheus.GaugeOpts{
+			Namespace:   "querier",
+			Subsystem:   "cache",
+			Name:        "memory_bytes",
+			Help:        "The current cache size in bytes",
+			ConstLabels: prometheus.Labels{"cache": name},
+		}),
 	}
 }
 
diff --git a/vendor/github.com/cortexproject/cortex/pkg/chunk/cache/instrumented.go b/vendor/github.com/cortexproject/cortex/pkg/chunk/cache/instrumented.go
index c5c43b21cec18..ca27d4a3b4e40 100644
--- a/vendor/github.com/cortexproject/cortex/pkg/chunk/cache/instrumented.go
+++ b/vendor/github.com/cortexproject/cortex/pkg/chunk/cache/instrumented.go
@@ -6,58 +6,52 @@ import (
 	ot "github.com/opentracing/opentracing-go"
 	otlog "github.com/opentracing/opentracing-go/log"
 	"github.com/prometheus/client_golang/prometheus"
+	"github.com/prometheus/client_golang/prometheus/promauto"
 	instr "github.com/weaveworks/common/instrument"
 )
 
-var (
-	requestDuration = instr.NewHistogramCollector(prometheus.NewHistogramVec(prometheus.HistogramOpts{
-		Namespace: "cortex",
-		Name:      "cache_request_duration_seconds",
-		Help:      "Total time spent in seconds doing cache requests.",
-		// Cache requests are very quick: smallest bucket is 16us, biggest is 1s.
-		Buckets: prometheus.ExponentialBuckets(0.000016, 4, 8),
-	}, []string{"method", "status_code"}))
-
-	fetchedKeys = prometheus.NewCounterVec(prometheus.CounterOpts{
-		Namespace: "cortex",
-		Name:      "cache_fetched_keys",
-		Help:      "Total count of keys requested from cache.",
-	}, []string{"name"})
-
-	hits = prometheus.NewCounterVec(prometheus.CounterOpts{
-		Namespace: "cortex",
-		Name:      "cache_hits",
-		Help:      "Total count of keys found in cache.",
-	}, []string{"name"})
-
-	valueSize = prometheus.NewHistogramVec(prometheus.HistogramOpts{
+// Instrument returns an instrumented cache.
+func Instrument(name string, cache Cache, reg prometheus.Registerer) Cache {
+	valueSize := promauto.With(reg).NewHistogramVec(prometheus.HistogramOpts{
 		Namespace: "cortex",
 		Name:      "cache_value_size_bytes",
 		Help:      "Size of values in the cache.",
 		// Cached chunks are generally in the KBs, but cached index can
 		// get big.  Histogram goes from 1KB to 4MB.
 		// 1024 * 4^(7-1) = 4MB
-		Buckets: prometheus.ExponentialBuckets(1024, 4, 7),
-	}, []string{"name", "method"})
-)
+		Buckets:     prometheus.ExponentialBuckets(1024, 4, 7),
+		ConstLabels: prometheus.Labels{"name": name},
+	}, []string{"method"})
 
-func init() {
-	requestDuration.Register()
-	prometheus.MustRegister(fetchedKeys)
-	prometheus.MustRegister(hits)
-	prometheus.MustRegister(valueSize)
-}
-
-// Instrument returns an instrumented cache.
-func Instrument(name string, cache Cache) Cache {
 	return &instrumentedCache{
 		name:  name,
 		Cache: cache,
 
-		fetchedKeys:      fetchedKeys.WithLabelValues(name),
-		hits:             hits.WithLabelValues(name),
-		storedValueSize:  valueSize.WithLabelValues(name, "store"),
-		fetchedValueSize: valueSize.WithLabelValues(name, "fetch"),
+		requestDuration: instr.NewHistogramCollector(promauto.With(reg).NewHistogramVec(prometheus.HistogramOpts{
+			Namespace: "cortex",
+			Name:      "cache_request_duration_seconds",
+			Help:      "Total time spent in seconds doing cache requests.",
+			// Cache requests are very quick: smallest bucket is 16us, biggest is 1s.
+			Buckets:     prometheus.ExponentialBuckets(0.000016, 4, 8),
+			ConstLabels: prometheus.Labels{"name": name},
+		}, []string{"method", "status_code"})),
+
+		fetchedKeys: promauto.With(reg).NewCounter(prometheus.CounterOpts{
+			Namespace:   "cortex",
+			Name:        "cache_fetched_keys",
+			Help:        "Total count of keys requested from cache.",
+			ConstLabels: prometheus.Labels{"name": name},
+		}),
+
+		hits: promauto.With(reg).NewCounter(prometheus.CounterOpts{
+			Namespace:   "cortex",
+			Name:        "cache_hits",
+			Help:        "Total count of keys found in cache.",
+			ConstLabels: prometheus.Labels{"name": name},
+		}),
+
+		storedValueSize:  valueSize.WithLabelValues("store"),
+		fetchedValueSize: valueSize.WithLabelValues("fetch"),
 	}
 }
 
@@ -67,6 +61,7 @@ type instrumentedCache struct {
 
 	fetchedKeys, hits                 prometheus.Counter
 	storedValueSize, fetchedValueSize prometheus.Observer
+	requestDuration                   *instr.HistogramCollector
 }
 
 func (i *instrumentedCache) Store(ctx context.Context, keys []string, bufs [][]byte) {
@@ -75,7 +70,7 @@ func (i *instrumentedCache) Store(ctx context.Context, keys []string, bufs [][]b
 	}
 
 	method := i.name + ".store"
-	_ = instr.CollectedRequest(ctx, method, requestDuration, instr.ErrorCode, func(ctx context.Context) error {
+	_ = instr.CollectedRequest(ctx, method, i.requestDuration, instr.ErrorCode, func(ctx context.Context) error {
 		sp := ot.SpanFromContext(ctx)
 		sp.LogFields(otlog.Int("keys", len(keys)))
 		i.Cache.Store(ctx, keys, bufs)
@@ -91,7 +86,7 @@ func (i *instrumentedCache) Fetch(ctx context.Context, keys []string) ([]string,
 		method  = i.name + ".fetch"
 	)
 
-	_ = instr.CollectedRequest(ctx, method, requestDuration, instr.ErrorCode, func(ctx context.Context) error {
+	_ = instr.CollectedRequest(ctx, method, i.requestDuration, instr.ErrorCode, func(ctx context.Context) error {
 		sp := ot.SpanFromContext(ctx)
 		sp.LogFields(otlog.Int("keys requested", len(keys)))
 
diff --git a/vendor/github.com/cortexproject/cortex/pkg/chunk/cache/memcached.go b/vendor/github.com/cortexproject/cortex/pkg/chunk/cache/memcached.go
index 0b14180e11f48..c2101e6916836 100644
--- a/vendor/github.com/cortexproject/cortex/pkg/chunk/cache/memcached.go
+++ b/vendor/github.com/cortexproject/cortex/pkg/chunk/cache/memcached.go
@@ -9,6 +9,7 @@ import (
 	"time"
 
 	"github.com/bradfitz/gomemcache/memcache"
+	"github.com/go-kit/kit/log"
 	"github.com/go-kit/kit/log/level"
 	opentracing "github.com/opentracing/opentracing-go"
 	otlog "github.com/opentracing/opentracing-go/log"
@@ -19,16 +20,6 @@ import (
 	"github.com/cortexproject/cortex/pkg/util"
 )
 
-var (
-	memcacheRequestDuration = promauto.NewHistogramVec(prometheus.HistogramOpts{
-		Namespace: "cortex",
-		Name:      "memcache_request_duration_seconds",
-		Help:      "Total time spent in seconds doing memcache requests.",
-		// Memecache requests are very quick: smallest bucket is 16us, biggest is 1s
-		Buckets: prometheus.ExponentialBuckets(0.000016, 4, 8),
-	}, []string{"method", "status_code", "name"})
-)
-
 type observableVecCollector struct {
 	v prometheus.ObserverVec
 }
@@ -64,20 +55,26 @@ type Memcached struct {
 
 	wg      sync.WaitGroup
 	inputCh chan *work
+
+	logger log.Logger
 }
 
 // NewMemcached makes a new Memcache.
-// TODO(bwplotka): Fix metrics, get them out of globals, separate or allow prefixing.
-// TODO(bwplotka): Remove globals & util packages from cache package entirely (e.g util.Logger).
-func NewMemcached(cfg MemcachedConfig, client MemcachedClient, name string) *Memcached {
+func NewMemcached(cfg MemcachedConfig, client MemcachedClient, name string, reg prometheus.Registerer, logger log.Logger) *Memcached {
 	c := &Memcached{
 		cfg:      cfg,
 		memcache: client,
 		name:     name,
+		logger:   logger,
 		requestDuration: observableVecCollector{
-			v: memcacheRequestDuration.MustCurryWith(prometheus.Labels{
-				"name": name,
-			}),
+			v: promauto.With(reg).NewHistogramVec(prometheus.HistogramOpts{
+				Namespace: "cortex",
+				Name:      "memcache_request_duration_seconds",
+				Help:      "Total time spent in seconds doing memcache requests.",
+				// Memecache requests are very quick: smallest bucket is 16us, biggest is 1s
+				Buckets:     prometheus.ExponentialBuckets(0.000016, 4, 8),
+				ConstLabels: prometheus.Labels{"name": name},
+			}, []string{"method", "status_code"}),
 		},
 	}
 
@@ -161,7 +158,7 @@ func (c *Memcached) fetch(ctx context.Context, keys []string) (found []string, b
 		// Memcached returns partial results even on error.
 		if err != nil {
 			sp.LogFields(otlog.Error(err))
-			level.Error(util.Logger).Log("msg", "Failed to get keys from memcached", "err", err)
+			level.Error(c.logger).Log("msg", "Failed to get keys from memcached", "err", err)
 		}
 		return err
 	})
@@ -234,7 +231,7 @@ func (c *Memcached) Store(ctx context.Context, keys []string, bufs [][]byte) {
 			return c.memcache.Set(&item)
 		})
 		if err != nil {
-			level.Error(util.Logger).Log("msg", "failed to put to memcached", "name", c.name, "err", err)
+			level.Error(c.logger).Log("msg", "failed to put to memcached", "name", c.name, "err", err)
 		}
 	}
 }
diff --git a/vendor/github.com/cortexproject/cortex/pkg/chunk/cache/memcached_client.go b/vendor/github.com/cortexproject/cortex/pkg/chunk/cache/memcached_client.go
index df0969dc78f3b..6a0b52a0ff5cd 100644
--- a/vendor/github.com/cortexproject/cortex/pkg/chunk/cache/memcached_client.go
+++ b/vendor/github.com/cortexproject/cortex/pkg/chunk/cache/memcached_client.go
@@ -11,6 +11,7 @@ import (
 	"time"
 
 	"github.com/bradfitz/gomemcache/memcache"
+	"github.com/go-kit/kit/log"
 	"github.com/go-kit/kit/log/level"
 	"github.com/pkg/errors"
 	"github.com/prometheus/client_golang/prometheus"
@@ -20,14 +21,6 @@ import (
 	"github.com/cortexproject/cortex/pkg/util"
 )
 
-var (
-	memcacheServersDiscovered = promauto.NewGaugeVec(prometheus.GaugeOpts{
-		Namespace: "cortex",
-		Name:      "memcache_client_servers",
-		Help:      "The number of memcache servers discovered.",
-	}, []string{"name"})
-)
-
 // MemcachedClient interface exists for mocking memcacheClient.
 type MemcachedClient interface {
 	GetMulti(keys []string) (map[string]*memcache.Item, error)
@@ -55,6 +48,8 @@ type memcachedClient struct {
 	wait sync.WaitGroup
 
 	numServers prometheus.Gauge
+
+	logger log.Logger
 }
 
 // MemcachedClientConfig defines how a MemcachedClient should be constructed.
@@ -81,7 +76,7 @@ func (cfg *MemcachedClientConfig) RegisterFlagsWithPrefix(prefix, description st
 
 // NewMemcachedClient creates a new MemcacheClient that gets its server list
 // from SRV and updates the server list on a regular basis.
-func NewMemcachedClient(cfg MemcachedClientConfig, name string, r prometheus.Registerer) MemcachedClient {
+func NewMemcachedClient(cfg MemcachedClientConfig, name string, r prometheus.Registerer, logger log.Logger) MemcachedClient {
 	var selector serverSelector
 	if cfg.ConsistentHash {
 		selector = &MemcachedJumpHashSelector{}
@@ -102,10 +97,16 @@ func NewMemcachedClient(cfg MemcachedClientConfig, name string, r prometheus.Reg
 		serverList: selector,
 		hostname:   cfg.Host,
 		service:    cfg.Service,
-		provider:   dns.NewProvider(util.Logger, dnsProviderRegisterer, dns.GolangResolverType),
+		logger:     logger,
+		provider:   dns.NewProvider(logger, dnsProviderRegisterer, dns.GolangResolverType),
 		quit:       make(chan struct{}),
 
-		numServers: memcacheServersDiscovered.WithLabelValues(name),
+		numServers: promauto.With(r).NewGauge(prometheus.GaugeOpts{
+			Namespace:   "cortex",
+			Name:        "memcache_client_servers",
+			Help:        "The number of memcache servers discovered.",
+			ConstLabels: prometheus.Labels{"name": name},
+		}),
 	}
 
 	if len(cfg.Addresses) > 0 {
@@ -115,7 +116,7 @@ func NewMemcachedClient(cfg MemcachedClientConfig, name string, r prometheus.Reg
 
 	err := newClient.updateMemcacheServers()
 	if err != nil {
-		level.Error(util.Logger).Log("msg", "error setting memcache servers to host", "host", cfg.Host, "err", err)
+		level.Error(logger).Log("msg", "error setting memcache servers to host", "host", cfg.Host, "err", err)
 	}
 
 	newClient.wait.Add(1)
@@ -153,7 +154,7 @@ func (c *memcachedClient) updateLoop(updateInterval time.Duration) {
 		case <-ticker.C:
 			err := c.updateMemcacheServers()
 			if err != nil {
-				level.Warn(util.Logger).Log("msg", "error updating memcache servers", "err", err)
+				level.Warn(c.logger).Log("msg", "error updating memcache servers", "err", err)
 			}
 		case <-c.quit:
 			ticker.Stop()
diff --git a/vendor/github.com/cortexproject/cortex/pkg/chunk/cache/redis_cache.go b/vendor/github.com/cortexproject/cortex/pkg/chunk/cache/redis_cache.go
index fac33bb4589b5..382290e30ba86 100644
--- a/vendor/github.com/cortexproject/cortex/pkg/chunk/cache/redis_cache.go
+++ b/vendor/github.com/cortexproject/cortex/pkg/chunk/cache/redis_cache.go
@@ -5,6 +5,7 @@ import (
 	"flag"
 	"time"
 
+	"github.com/go-kit/kit/log"
 	"github.com/go-kit/kit/log/level"
 	"github.com/gomodule/redigo/redis"
 
@@ -18,6 +19,7 @@ type RedisCache struct {
 	expiration int
 	timeout    time.Duration
 	pool       *redis.Pool
+	logger     log.Logger
 }
 
 // RedisConfig defines how a RedisCache should be constructed.
@@ -49,7 +51,7 @@ func (cfg *RedisConfig) RegisterFlagsWithPrefix(prefix, description string, f *f
 }
 
 // NewRedisCache creates a new RedisCache
-func NewRedisCache(cfg RedisConfig, name string, pool *redis.Pool) *RedisCache {
+func NewRedisCache(cfg RedisConfig, name string, pool *redis.Pool, logger log.Logger) *RedisCache {
 	util.WarnExperimentalUse("Redis cache")
 	// pool != nil only in unit tests
 	if pool == nil {
@@ -82,10 +84,11 @@ func NewRedisCache(cfg RedisConfig, name string, pool *redis.Pool) *RedisCache {
 		timeout:    cfg.Timeout,
 		name:       name,
 		pool:       pool,
+		logger:     logger,
 	}
 
 	if err := cache.ping(context.Background()); err != nil {
-		level.Error(util.Logger).Log("msg", "error connecting to redis", "endpoint", cfg.Endpoint, "err", err)
+		level.Error(logger).Log("msg", "error connecting to redis", "endpoint", cfg.Endpoint, "err", err)
 	}
 
 	return cache
@@ -96,7 +99,7 @@ func (c *RedisCache) Fetch(ctx context.Context, keys []string) (found []string,
 	data, err := c.mget(ctx, keys)
 
 	if err != nil {
-		level.Error(util.Logger).Log("msg", "failed to get from redis", "name", c.name, "err", err)
+		level.Error(c.logger).Log("msg", "failed to get from redis", "name", c.name, "err", err)
 		missed = make([]string, len(keys))
 		copy(missed, keys)
 		return
@@ -116,7 +119,7 @@ func (c *RedisCache) Fetch(ctx context.Context, keys []string) (found []string,
 func (c *RedisCache) Store(ctx context.Context, keys []string, bufs [][]byte) {
 	err := c.mset(ctx, keys, bufs, c.expiration)
 	if err != nil {
-		level.Error(util.Logger).Log("msg", "failed to put to redis", "name", c.name, "err", err)
+		level.Error(c.logger).Log("msg", "failed to put to redis", "name", c.name, "err", err)
 	}
 }
 
@@ -126,7 +129,7 @@ func (c *RedisCache) Stop() {
 }
 
 // mset adds key-value pairs to the cache.
-func (c *RedisCache) mset(ctx context.Context, keys []string, bufs [][]byte, ttl int) error {
+func (c *RedisCache) mset(_ context.Context, keys []string, bufs [][]byte, ttl int) error {
 	conn := c.pool.Get()
 	defer conn.Close()
 
@@ -143,7 +146,7 @@ func (c *RedisCache) mset(ctx context.Context, keys []string, bufs [][]byte, ttl
 }
 
 // mget retrieves values from the cache.
-func (c *RedisCache) mget(ctx context.Context, keys []string) ([][]byte, error) {
+func (c *RedisCache) mget(_ context.Context, keys []string) ([][]byte, error) {
 	intf := make([]interface{}, len(keys))
 	for i, key := range keys {
 		intf[i] = key
@@ -155,7 +158,7 @@ func (c *RedisCache) mget(ctx context.Context, keys []string) ([][]byte, error)
 	return redis.ByteSlices(redis.DoWithTimeout(conn, c.timeout, "MGET", intf...))
 }
 
-func (c *RedisCache) ping(ctx context.Context) error {
+func (c *RedisCache) ping(_ context.Context) error {
 	conn := c.pool.Get()
 	defer conn.Close()
 
diff --git a/vendor/github.com/cortexproject/cortex/pkg/chunk/cache/snappy.go b/vendor/github.com/cortexproject/cortex/pkg/chunk/cache/snappy.go
index 2fc2308f4844f..d2ee606eda279 100644
--- a/vendor/github.com/cortexproject/cortex/pkg/chunk/cache/snappy.go
+++ b/vendor/github.com/cortexproject/cortex/pkg/chunk/cache/snappy.go
@@ -3,20 +3,21 @@ package cache
 import (
 	"context"
 
+	"github.com/go-kit/kit/log"
 	"github.com/go-kit/kit/log/level"
 	"github.com/golang/snappy"
-
-	"github.com/cortexproject/cortex/pkg/util"
 )
 
 type snappyCache struct {
-	next Cache
+	next   Cache
+	logger log.Logger
 }
 
 // NewSnappy makes a new snappy encoding cache wrapper.
-func NewSnappy(next Cache) Cache {
+func NewSnappy(next Cache, logger log.Logger) Cache {
 	return &snappyCache{
-		next: next,
+		next:   next,
+		logger: logger,
 	}
 }
 
@@ -35,7 +36,7 @@ func (s *snappyCache) Fetch(ctx context.Context, keys []string) ([]string, [][]b
 	for _, buf := range bufs {
 		d, err := snappy.Decode(nil, buf)
 		if err != nil {
-			level.Error(util.Logger).Log("msg", "failed to decode cache entry", "err", err)
+			level.Error(s.logger).Log("msg", "failed to decode cache entry", "err", err)
 			return nil, nil, keys
 		}
 		ds = append(ds, d)
diff --git a/vendor/github.com/cortexproject/cortex/pkg/chunk/chunk_store_utils.go b/vendor/github.com/cortexproject/cortex/pkg/chunk/chunk_store_utils.go
index c18a38a167252..061a9b1c638a1 100644
--- a/vendor/github.com/cortexproject/cortex/pkg/chunk/chunk_store_utils.go
+++ b/vendor/github.com/cortexproject/cortex/pkg/chunk/chunk_store_utils.go
@@ -162,6 +162,7 @@ func (c *Fetcher) FetchChunks(ctx context.Context, chunks []Chunk, keys []string
 	}
 
 	if err != nil {
+		// Don't rely on Cortex error translation here.
 		return nil, promql.ErrStorage{Err: err}
 	}
 
diff --git a/vendor/github.com/cortexproject/cortex/pkg/chunk/schema_config.go b/vendor/github.com/cortexproject/cortex/pkg/chunk/schema_config.go
index 612c12c5f97c9..405dac5359e15 100644
--- a/vendor/github.com/cortexproject/cortex/pkg/chunk/schema_config.go
+++ b/vendor/github.com/cortexproject/cortex/pkg/chunk/schema_config.go
@@ -25,9 +25,10 @@ const (
 )
 
 var (
-	errInvalidSchemaVersion = errors.New("invalid schema version")
-	errInvalidTablePeriod   = errors.New("the table period must be a multiple of 24h (1h for schema v1)")
-	errConfigFileNotSet     = errors.New("schema config file needs to be set")
+	errInvalidSchemaVersion    = errors.New("invalid schema version")
+	errInvalidTablePeriod      = errors.New("the table period must be a multiple of 24h (1h for schema v1)")
+	errConfigFileNotSet        = errors.New("schema config file needs to be set")
+	errConfigChunkPrefixNotSet = errors.New("schema config for chunks is missing the 'prefix' setting")
 )
 
 // PeriodConfig defines the schema and tables to use for a period of time
@@ -148,6 +149,22 @@ func (cfg *SchemaConfig) ForEachAfter(t model.Time, f func(config *PeriodConfig)
 	}
 }
 
+func validateChunks(cfg PeriodConfig) error {
+	objectStore := cfg.IndexType
+	if cfg.ObjectType != "" {
+		objectStore = cfg.ObjectType
+	}
+	switch objectStore {
+	case "cassandra", "aws-dynamo", "bigtable-hashed", "gcp", "gcp-columnkey", "bigtable", "grpc-store":
+		if cfg.ChunkTables.Prefix == "" {
+			return errConfigChunkPrefixNotSet
+		}
+		return nil
+	default:
+		return nil
+	}
+}
+
 // CreateSchema returns the schema defined by the PeriodConfig
 func (cfg PeriodConfig) CreateSchema() (BaseSchema, error) {
 	buckets, bucketsPeriod := cfg.createBucketsFunc()
@@ -209,6 +226,11 @@ func (cfg *PeriodConfig) applyDefaults() {
 
 // Validate the period config.
 func (cfg PeriodConfig) validate() error {
+	validateError := validateChunks(cfg)
+	if validateError != nil {
+		return validateError
+	}
+
 	_, err := cfg.CreateSchema()
 	return err
 }
diff --git a/vendor/github.com/cortexproject/cortex/pkg/chunk/storage/caching_fixtures.go b/vendor/github.com/cortexproject/cortex/pkg/chunk/storage/caching_fixtures.go
index ee032300b8089..6c14f4962852c 100644
--- a/vendor/github.com/cortexproject/cortex/pkg/chunk/storage/caching_fixtures.go
+++ b/vendor/github.com/cortexproject/cortex/pkg/chunk/storage/caching_fixtures.go
@@ -1,9 +1,12 @@
 package storage
 
 import (
-	io "io"
+	"io"
 	"time"
 
+	"github.com/go-kit/kit/log"
+	"github.com/prometheus/client_golang/prometheus"
+
 	"github.com/cortexproject/cortex/pkg/util/flagext"
 	"github.com/cortexproject/cortex/pkg/util/validation"
 
@@ -25,10 +28,12 @@ func (f fixture) Clients() (chunk.IndexClient, chunk.Client, chunk.TableClient,
 		return nil, nil, nil, chunk.SchemaConfig{}, nil, err
 	}
 	indexClient, chunkClient, tableClient, schemaConfig, closer, err := f.fixture.Clients()
+	reg := prometheus.NewRegistry()
+	logger := log.NewNopLogger()
 	indexClient = newCachingIndexClient(indexClient, cache.NewFifoCache("index-fifo", cache.FifoCacheConfig{
 		MaxSizeItems: 500,
 		Validity:     5 * time.Minute,
-	}), 5*time.Minute, limits)
+	}, reg, logger), 5*time.Minute, limits, logger)
 	return indexClient, chunkClient, tableClient, schemaConfig, closer, err
 }
 
diff --git a/vendor/github.com/cortexproject/cortex/pkg/chunk/storage/caching_index_client.go b/vendor/github.com/cortexproject/cortex/pkg/chunk/storage/caching_index_client.go
index cf9e37f1ca8d0..6408a5c5eab08 100644
--- a/vendor/github.com/cortexproject/cortex/pkg/chunk/storage/caching_index_client.go
+++ b/vendor/github.com/cortexproject/cortex/pkg/chunk/storage/caching_index_client.go
@@ -5,6 +5,7 @@ import (
 	"sync"
 	"time"
 
+	"github.com/go-kit/kit/log"
 	"github.com/go-kit/kit/log/level"
 	"github.com/gogo/protobuf/proto"
 	"github.com/prometheus/client_golang/prometheus"
@@ -14,7 +15,6 @@ import (
 	"github.com/cortexproject/cortex/pkg/chunk"
 	"github.com/cortexproject/cortex/pkg/chunk/cache"
 	chunk_util "github.com/cortexproject/cortex/pkg/chunk/util"
-	"github.com/cortexproject/cortex/pkg/util"
 	"github.com/cortexproject/cortex/pkg/util/spanlogger"
 )
 
@@ -46,18 +46,20 @@ type cachingIndexClient struct {
 	cache    cache.Cache
 	validity time.Duration
 	limits   StoreLimits
+	logger   log.Logger
 }
 
-func newCachingIndexClient(client chunk.IndexClient, c cache.Cache, validity time.Duration, limits StoreLimits) chunk.IndexClient {
+func newCachingIndexClient(client chunk.IndexClient, c cache.Cache, validity time.Duration, limits StoreLimits, logger log.Logger) chunk.IndexClient {
 	if c == nil || cache.IsEmptyTieredCache(c) {
 		return client
 	}
 
 	return &cachingIndexClient{
 		IndexClient: client,
-		cache:       cache.NewSnappy(c),
+		cache:       cache.NewSnappy(c, logger),
 		validity:    validity,
 		limits:      limits,
+		logger:      logger,
 	}
 }
 
@@ -226,7 +228,7 @@ func (s *cachingIndexClient) cacheStore(ctx context.Context, keys []string, batc
 		hashed = append(hashed, cache.HashKey(keys[i]))
 		out, err := proto.Marshal(&batches[i])
 		if err != nil {
-			level.Warn(util.Logger).Log("msg", "error marshalling ReadBatch", "err", err)
+			level.Warn(s.logger).Log("msg", "error marshalling ReadBatch", "err", err)
 			cacheEncodeErrs.Inc()
 			return
 		}
diff --git a/vendor/github.com/cortexproject/cortex/pkg/chunk/storage/factory.go b/vendor/github.com/cortexproject/cortex/pkg/chunk/storage/factory.go
index 1629ead9247fc..de4cc6cc41512 100644
--- a/vendor/github.com/cortexproject/cortex/pkg/chunk/storage/factory.go
+++ b/vendor/github.com/cortexproject/cortex/pkg/chunk/storage/factory.go
@@ -7,6 +7,7 @@ import (
 	"strings"
 	"time"
 
+	"github.com/go-kit/kit/log"
 	"github.com/go-kit/kit/log/level"
 	"github.com/pkg/errors"
 	"github.com/prometheus/client_golang/prometheus"
@@ -28,7 +29,7 @@ import (
 // Supported storage engines
 const (
 	StorageEngineChunks = "chunks"
-	StorageEngineTSDB   = "tsdb"
+	StorageEngineBlocks = "blocks"
 )
 
 type indexStoreFactories struct {
@@ -91,14 +92,14 @@ func (cfg *Config) RegisterFlags(f *flag.FlagSet) {
 	cfg.Swift.RegisterFlags(f)
 	cfg.GrpcConfig.RegisterFlags(f)
 
-	f.StringVar(&cfg.Engine, "store.engine", "chunks", "The storage engine to use: chunks or tsdb. Be aware tsdb is experimental and shouldn't be used in production.")
+	f.StringVar(&cfg.Engine, "store.engine", "chunks", "The storage engine to use: chunks or blocks. Be aware that blocks storage is experimental and shouldn't be used in production.")
 	cfg.IndexQueriesCacheConfig.RegisterFlagsWithPrefix("store.index-cache-read.", "Cache config for index entry reading. ", f)
 	f.DurationVar(&cfg.IndexCacheValidity, "store.index-cache-validity", 5*time.Minute, "Cache validity for active index entries. Should be no higher than -ingester.max-chunk-idle.")
 }
 
 // Validate config and returns error on failure
 func (cfg *Config) Validate() error {
-	if cfg.Engine != StorageEngineChunks && cfg.Engine != StorageEngineTSDB {
+	if cfg.Engine != StorageEngineChunks && cfg.Engine != StorageEngineBlocks {
 		return errors.New("unsupported storage engine")
 	}
 	if err := cfg.CassandraStorageConfig.Validate(); err != nil {
@@ -114,22 +115,30 @@ func (cfg *Config) Validate() error {
 }
 
 // NewStore makes the storage clients based on the configuration.
-func NewStore(cfg Config, storeCfg chunk.StoreConfig, schemaCfg chunk.SchemaConfig, limits StoreLimits, reg prometheus.Registerer, cacheGenNumLoader chunk.CacheGenNumLoader) (chunk.Store, error) {
+func NewStore(
+	cfg Config,
+	storeCfg chunk.StoreConfig,
+	schemaCfg chunk.SchemaConfig,
+	limits StoreLimits,
+	reg prometheus.Registerer,
+	cacheGenNumLoader chunk.CacheGenNumLoader,
+	logger log.Logger,
+) (chunk.Store, error) {
 	chunkMetrics := newChunkClientMetrics(reg)
 
-	indexReadCache, err := cache.New(cfg.IndexQueriesCacheConfig)
+	indexReadCache, err := cache.New(cfg.IndexQueriesCacheConfig, reg, logger)
 	if err != nil {
 		return nil, err
 	}
 
-	writeDedupeCache, err := cache.New(storeCfg.WriteDedupeCacheConfig)
+	writeDedupeCache, err := cache.New(storeCfg.WriteDedupeCacheConfig, reg, logger)
 	if err != nil {
 		return nil, err
 	}
 
 	chunkCacheCfg := storeCfg.ChunkCacheConfig
 	chunkCacheCfg.Prefix = "chunks"
-	chunksCache, err := cache.New(chunkCacheCfg)
+	chunksCache, err := cache.New(chunkCacheCfg, reg, logger)
 	if err != nil {
 		return nil, err
 	}
@@ -160,7 +169,7 @@ func NewStore(cfg Config, storeCfg chunk.StoreConfig, schemaCfg chunk.SchemaConf
 		if err != nil {
 			return nil, errors.Wrap(err, "error creating index client")
 		}
-		index = newCachingIndexClient(index, indexReadCache, cfg.IndexCacheValidity, limits)
+		index = newCachingIndexClient(index, indexReadCache, cfg.IndexCacheValidity, limits, logger)
 
 		objectStoreType := s.ObjectType
 		if objectStoreType == "" {
diff --git a/vendor/github.com/cortexproject/cortex/pkg/compactor/compactor.go b/vendor/github.com/cortexproject/cortex/pkg/compactor/compactor.go
index c4426b1cef850..8d5ec0bbf50e5 100644
--- a/vendor/github.com/cortexproject/cortex/pkg/compactor/compactor.go
+++ b/vendor/github.com/cortexproject/cortex/pkg/compactor/compactor.go
@@ -78,7 +78,7 @@ type Compactor struct {
 	services.Service
 
 	compactorCfg Config
-	storageCfg   cortex_tsdb.Config
+	storageCfg   cortex_tsdb.BlocksStorageConfig
 	logger       log.Logger
 	parentLogger log.Logger
 	registerer   prometheus.Registerer
@@ -118,7 +118,7 @@ type Compactor struct {
 }
 
 // NewCompactor makes a new Compactor.
-func NewCompactor(compactorCfg Config, storageCfg cortex_tsdb.Config, logger log.Logger, registerer prometheus.Registerer) (*Compactor, error) {
+func NewCompactor(compactorCfg Config, storageCfg cortex_tsdb.BlocksStorageConfig, logger log.Logger, registerer prometheus.Registerer) (*Compactor, error) {
 	createBucketClientAndTsdbCompactor := func(ctx context.Context) (objstore.Bucket, tsdb.Compactor, error) {
 		bucketClient, err := cortex_tsdb.NewBucketClient(ctx, storageCfg, "compactor", logger, registerer)
 		if err != nil {
@@ -139,7 +139,7 @@ func NewCompactor(compactorCfg Config, storageCfg cortex_tsdb.Config, logger log
 
 func newCompactor(
 	compactorCfg Config,
-	storageCfg cortex_tsdb.Config,
+	storageCfg cortex_tsdb.BlocksStorageConfig,
 	logger log.Logger,
 	registerer prometheus.Registerer,
 	createBucketClientAndTsdbCompactor func(ctx context.Context) (objstore.Bucket, tsdb.Compactor, error),
diff --git a/vendor/github.com/cortexproject/cortex/pkg/cortex/cortex.go b/vendor/github.com/cortexproject/cortex/pkg/cortex/cortex.go
index 5b06530065b62..21e19d86fb0bb 100644
--- a/vendor/github.com/cortexproject/cortex/pkg/cortex/cortex.go
+++ b/vendor/github.com/cortexproject/cortex/pkg/cortex/cortex.go
@@ -94,7 +94,7 @@ type Config struct {
 	QueryRange     queryrange.Config        `yaml:"query_range"`
 	TableManager   chunk.TableManagerConfig `yaml:"table_manager"`
 	Encoding       encoding.Config          `yaml:"-"` // No yaml for this, it only works with flags.
-	TSDB           tsdb.Config              `yaml:"tsdb"`
+	BlocksStorage  tsdb.BlocksStorageConfig `yaml:"blocks_storage"`
 	Compactor      compactor.Config         `yaml:"compactor"`
 	StoreGateway   storegateway.Config      `yaml:"store_gateway"`
 	PurgerConfig   purger.Config            `yaml:"purger"`
@@ -133,7 +133,7 @@ func (c *Config) RegisterFlags(f *flag.FlagSet) {
 	c.QueryRange.RegisterFlags(f)
 	c.TableManager.RegisterFlags(f)
 	c.Encoding.RegisterFlags(f)
-	c.TSDB.RegisterFlags(f)
+	c.BlocksStorage.RegisterFlags(f)
 	c.Compactor.RegisterFlags(f)
 	c.StoreGateway.RegisterFlags(f)
 	c.PurgerConfig.RegisterFlags(f)
@@ -166,7 +166,7 @@ func (c *Config) Validate(log log.Logger) error {
 	if err := c.Ruler.Validate(); err != nil {
 		return errors.Wrap(err, "invalid ruler config")
 	}
-	if err := c.TSDB.Validate(); err != nil {
+	if err := c.BlocksStorage.Validate(); err != nil {
 		return errors.Wrap(err, "invalid TSDB config")
 	}
 	if err := c.LimitsConfig.Validate(c.Distributor.ShardByAllLabels); err != nil {
diff --git a/vendor/github.com/cortexproject/cortex/pkg/cortex/modules.go b/vendor/github.com/cortexproject/cortex/pkg/cortex/modules.go
index 25f03ae5e5181..a10486102e480 100644
--- a/vendor/github.com/cortexproject/cortex/pkg/cortex/modules.go
+++ b/vendor/github.com/cortexproject/cortex/pkg/cortex/modules.go
@@ -255,14 +255,14 @@ func initQueryableForEngine(engine string, cfg Config, chunkStore chunk.Store, l
 		}
 		return querier.NewChunkStoreQueryable(cfg.Querier, chunkStore), nil
 
-	case storage.StorageEngineTSDB:
+	case storage.StorageEngineBlocks:
 		// When running in single binary, if the blocks sharding is disabled and no custom
 		// store-gateway address has been configured, we can set it to the running process.
 		if cfg.Target == All && !cfg.StoreGateway.ShardingEnabled && cfg.Querier.StoreGatewayAddresses == "" {
 			cfg.Querier.StoreGatewayAddresses = fmt.Sprintf("127.0.0.1:%d", cfg.Server.GRPCListenPort)
 		}
 
-		return querier.NewBlocksStoreQueryableFromConfig(cfg.Querier, cfg.StoreGateway, cfg.TSDB, limits, util.Logger, reg)
+		return querier.NewBlocksStoreQueryableFromConfig(cfg.Querier, cfg.StoreGateway, cfg.BlocksStorage, limits, util.Logger, reg)
 
 	default:
 		return nil, fmt.Errorf("unknown storage engine '%s'", engine)
@@ -270,8 +270,8 @@ func initQueryableForEngine(engine string, cfg Config, chunkStore chunk.Store, l
 }
 
 func (t *Cortex) tsdbIngesterConfig() {
-	t.Cfg.Ingester.TSDBEnabled = t.Cfg.Storage.Engine == storage.StorageEngineTSDB
-	t.Cfg.Ingester.TSDBConfig = t.Cfg.TSDB
+	t.Cfg.Ingester.BlocksStorageEnabled = t.Cfg.Storage.Engine == storage.StorageEngineBlocks
+	t.Cfg.Ingester.BlocksStorageConfig = t.Cfg.BlocksStorage
 }
 
 func (t *Cortex) initIngester() (serv services.Service, err error) {
@@ -316,7 +316,7 @@ func (t *Cortex) initChunkStore() (serv services.Service, err error) {
 		return
 	}
 
-	t.Store, err = storage.NewStore(t.Cfg.Storage, t.Cfg.ChunkStore, t.Cfg.Schema, t.Overrides, prometheus.DefaultRegisterer, t.TombstonesLoader)
+	t.Store, err = storage.NewStore(t.Cfg.Storage, t.Cfg.ChunkStore, t.Cfg.Schema, t.Overrides, prometheus.DefaultRegisterer, t.TombstonesLoader, util.Logger)
 	if err != nil {
 		return
 	}
@@ -407,7 +407,7 @@ func (t *Cortex) initQueryFrontend() (serv services.Service, err error) {
 }
 
 func (t *Cortex) initTableManager() (services.Service, error) {
-	if t.Cfg.Storage.Engine == storage.StorageEngineTSDB {
+	if t.Cfg.Storage.Engine == storage.StorageEngineBlocks {
 		return nil, nil // table manager isn't used in v2
 	}
 
@@ -487,7 +487,18 @@ func (t *Cortex) initRuler() (serv services.Service, err error) {
 	rulerRegisterer := prometheus.WrapRegistererWith(prometheus.Labels{"engine": "ruler"}, prometheus.DefaultRegisterer)
 	queryable, engine := querier.New(t.Cfg.Querier, t.Overrides, t.Distributor, t.StoreQueryables, t.TombstonesLoader, rulerRegisterer)
 
-	t.Ruler, err = ruler.NewRuler(t.Cfg.Ruler, engine, queryable, t.Distributor, prometheus.DefaultRegisterer, util.Logger, t.RulerStorage)
+	t.Ruler, err = ruler.NewRuler(
+		t.Cfg.Ruler,
+		ruler.DefaultTenantManagerFactory(
+			t.Cfg.Ruler,
+			t.Distributor,
+			queryable,
+			engine,
+		),
+		prometheus.DefaultRegisterer,
+		util.Logger,
+		t.RulerStorage,
+	)
 	if err != nil {
 		return
 	}
@@ -526,7 +537,7 @@ func (t *Cortex) initCompactor() (serv services.Service, err error) {
 	t.Cfg.Compactor.ShardingRing.ListenPort = t.Cfg.Server.GRPCListenPort
 	t.Cfg.Compactor.ShardingRing.KVStore.MemberlistKV = t.MemberlistKV.GetMemberlistKV
 
-	t.Compactor, err = compactor.NewCompactor(t.Cfg.Compactor, t.Cfg.TSDB, util.Logger, prometheus.DefaultRegisterer)
+	t.Compactor, err = compactor.NewCompactor(t.Cfg.Compactor, t.Cfg.BlocksStorage, util.Logger, prometheus.DefaultRegisterer)
 	if err != nil {
 		return
 	}
@@ -537,14 +548,14 @@ func (t *Cortex) initCompactor() (serv services.Service, err error) {
 }
 
 func (t *Cortex) initStoreGateway() (serv services.Service, err error) {
-	if t.Cfg.Storage.Engine != storage.StorageEngineTSDB {
+	if t.Cfg.Storage.Engine != storage.StorageEngineBlocks {
 		return nil, nil
 	}
 
 	t.Cfg.StoreGateway.ShardingRing.ListenPort = t.Cfg.Server.GRPCListenPort
 	t.Cfg.StoreGateway.ShardingRing.KVStore.MemberlistKV = t.MemberlistKV.GetMemberlistKV
 
-	t.StoreGateway, err = storegateway.NewStoreGateway(t.Cfg.StoreGateway, t.Cfg.TSDB, t.Cfg.Server.LogLevel, util.Logger, prometheus.DefaultRegisterer)
+	t.StoreGateway, err = storegateway.NewStoreGateway(t.Cfg.StoreGateway, t.Cfg.BlocksStorage, t.Overrides, t.Cfg.Server.LogLevel, util.Logger, prometheus.DefaultRegisterer)
 	if err != nil {
 		return nil, err
 	}
@@ -630,7 +641,7 @@ func (t *Cortex) setupModuleManager() error {
 		Configs:        {API},
 		AlertManager:   {API},
 		Compactor:      {API},
-		StoreGateway:   {API},
+		StoreGateway:   {API, Overrides},
 		Purger:         {Store, DeleteRequestsStore, API},
 		All:            {QueryFrontend, Querier, Ingester, Distributor, TableManager, Purger, StoreGateway, Ruler},
 	}
diff --git a/vendor/github.com/cortexproject/cortex/pkg/distributor/query.go b/vendor/github.com/cortexproject/cortex/pkg/distributor/query.go
index fed48e51af9b9..a7813c6453fcd 100644
--- a/vendor/github.com/cortexproject/cortex/pkg/distributor/query.go
+++ b/vendor/github.com/cortexproject/cortex/pkg/distributor/query.go
@@ -7,7 +7,6 @@ import (
 	"github.com/opentracing/opentracing-go"
 	"github.com/prometheus/common/model"
 	"github.com/prometheus/prometheus/pkg/labels"
-	"github.com/prometheus/prometheus/promql"
 	"github.com/weaveworks/common/instrument"
 	"github.com/weaveworks/common/user"
 
@@ -25,12 +24,12 @@ func (d *Distributor) Query(ctx context.Context, from, to model.Time, matchers .
 	err := instrument.CollectedRequest(ctx, "Distributor.Query", queryDuration, instrument.ErrorCode, func(ctx context.Context) error {
 		replicationSet, req, err := d.queryPrep(ctx, from, to, matchers...)
 		if err != nil {
-			return promql.ErrStorage{Err: err}
+			return err
 		}
 
 		matrix, err = d.queryIngesters(ctx, replicationSet, req)
 		if err != nil {
-			return promql.ErrStorage{Err: err}
+			return err
 		}
 
 		if s := opentracing.SpanFromContext(ctx); s != nil {
@@ -47,12 +46,12 @@ func (d *Distributor) QueryStream(ctx context.Context, from, to model.Time, matc
 	err := instrument.CollectedRequest(ctx, "Distributor.QueryStream", queryDuration, instrument.ErrorCode, func(ctx context.Context) error {
 		replicationSet, req, err := d.queryPrep(ctx, from, to, matchers...)
 		if err != nil {
-			return promql.ErrStorage{Err: err}
+			return err
 		}
 
 		result, err = d.queryIngesterStream(ctx, replicationSet, req)
 		if err != nil {
-			return promql.ErrStorage{Err: err}
+			return err
 		}
 
 		if s := opentracing.SpanFromContext(ctx); s != nil {
diff --git a/vendor/github.com/cortexproject/cortex/pkg/ingester/flush.go b/vendor/github.com/cortexproject/cortex/pkg/ingester/flush.go
index 34e3a11e25ed5..9c619f7877826 100644
--- a/vendor/github.com/cortexproject/cortex/pkg/ingester/flush.go
+++ b/vendor/github.com/cortexproject/cortex/pkg/ingester/flush.go
@@ -24,7 +24,7 @@ const (
 // Flush triggers a flush of all the chunks and closes the flush queues.
 // Called from the Lifecycler as part of the ingester shutdown.
 func (i *Ingester) Flush() {
-	if i.cfg.TSDBEnabled {
+	if i.cfg.BlocksStorageEnabled {
 		i.v2LifecyclerFlush()
 		return
 	}
@@ -45,7 +45,7 @@ func (i *Ingester) Flush() {
 // FlushHandler triggers a flush of all in memory chunks.  Mainly used for
 // local testing.
 func (i *Ingester) FlushHandler(w http.ResponseWriter, r *http.Request) {
-	if i.cfg.TSDBEnabled {
+	if i.cfg.BlocksStorageEnabled {
 		i.v2FlushHandler(w, r)
 		return
 	}
@@ -106,6 +106,12 @@ const (
 	reasonIdle
 	reasonStale
 	reasonSpreadFlush
+	// Following are flush outcomes
+	noUser
+	noSeries
+	noChunks
+	flushError
+	maxFlushReason // Used for testing String() method. Should be last.
 )
 
 func (f flushReason) String() string {
@@ -124,6 +130,14 @@ func (f flushReason) String() string {
 		return "Stale"
 	case reasonSpreadFlush:
 		return "Spread"
+	case noUser:
+		return "NoUser"
+	case noSeries:
+		return "NoSeries"
+	case noChunks:
+		return "NoChunksToFlush"
+	case flushError:
+		return "FlushError"
 	default:
 		panic("unrecognised flushReason")
 	}
@@ -146,6 +160,7 @@ func (i *Ingester) sweepSeries(userID string, fp model.Fingerprint, series *memo
 
 	flushQueueIndex := int(uint64(fp) % uint64(i.cfg.ConcurrentFlushes))
 	if i.flushQueues[flushQueueIndex].Enqueue(&flushOp{firstTime, userID, fp, immediate}) {
+		i.metrics.seriesEnqueuedForFlush.WithLabelValues(flush.String()).Inc()
 		util.Event().Log("msg", "add to flush queue", "userID", userID, "reason", flush, "firstTime", firstTime, "fp", fp, "series", series.metric, "nlabels", len(series.metric), "queue", flushQueueIndex)
 	}
 }
@@ -217,7 +232,8 @@ func (i *Ingester) flushLoop(j int) {
 		}
 		op := o.(*flushOp)
 
-		err := i.flushUserSeries(j, op.userID, op.fp, op.immediate)
+		outcome, err := i.flushUserSeries(j, op.userID, op.fp, op.immediate)
+		i.metrics.seriesDequeuedOutcome.WithLabelValues(outcome.String()).Inc()
 		if err != nil {
 			level.Error(util.WithUserID(op.userID, util.Logger)).Log("msg", "failed to flush user", "err", err)
 		}
@@ -231,7 +247,8 @@ func (i *Ingester) flushLoop(j int) {
 	}
 }
 
-func (i *Ingester) flushUserSeries(flushQueueIndex int, userID string, fp model.Fingerprint, immediate bool) error {
+// Returns flush outcome (either original reason, if series was flushed, noFlush if it doesn't need flushing anymore, or one of the errors)
+func (i *Ingester) flushUserSeries(flushQueueIndex int, userID string, fp model.Fingerprint, immediate bool) (flushReason, error) {
 	i.metrics.flushSeriesInProgress.Inc()
 	defer i.metrics.flushSeriesInProgress.Dec()
 
@@ -241,19 +258,19 @@ func (i *Ingester) flushUserSeries(flushQueueIndex int, userID string, fp model.
 
 	userState, ok := i.userStates.get(userID)
 	if !ok {
-		return nil
+		return noUser, nil
 	}
 
 	series, ok := userState.fpToSeries.get(fp)
 	if !ok {
-		return nil
+		return noSeries, nil
 	}
 
 	userState.fpLocker.Lock(fp)
 	reason := i.shouldFlushSeries(series, fp, immediate)
 	if reason == noFlush {
 		userState.fpLocker.Unlock(fp)
-		return nil
+		return noFlush, nil
 	}
 
 	// shouldFlushSeries() has told us we have at least one chunk.
@@ -302,11 +319,9 @@ func (i *Ingester) flushUserSeries(flushQueueIndex int, userID string, fp model.
 	}
 
 	if len(chunks) == 0 {
-		return nil
+		return noChunks, nil
 	}
 
-	i.metrics.flushedSeries.WithLabelValues(reason.String()).Inc()
-
 	// flush the chunks without locking the series, as we don't want to hold the series lock for the duration of the dynamo/s3 rpcs.
 	ctx, cancel := context.WithTimeout(context.Background(), i.cfg.FlushOpTimeout)
 	defer cancel() // releases resources if slowOperation completes before timeout elapses
@@ -318,7 +333,7 @@ func (i *Ingester) flushUserSeries(flushQueueIndex int, userID string, fp model.
 	util.Event().Log("msg", "flush chunks", "userID", userID, "reason", reason, "numChunks", len(chunks), "firstTime", chunks[0].FirstTime, "fp", fp, "series", series.metric, "nlabels", len(series.metric), "queue", flushQueueIndex)
 	err := i.flushChunks(ctx, userID, fp, series.metric, chunks)
 	if err != nil {
-		return err
+		return flushError, err
 	}
 
 	userState.fpLocker.Lock(fp)
@@ -329,7 +344,7 @@ func (i *Ingester) flushUserSeries(flushQueueIndex int, userID string, fp model.
 		chunks[i].LastUpdate = model.Now()
 	}
 	userState.fpLocker.Unlock(fp)
-	return nil
+	return reason, err
 }
 
 // must be called under fpLocker lock
diff --git a/vendor/github.com/cortexproject/cortex/pkg/ingester/ingester.go b/vendor/github.com/cortexproject/cortex/pkg/ingester/ingester.go
index 79634f77b16ba..2b6cafaa78ebb 100644
--- a/vendor/github.com/cortexproject/cortex/pkg/ingester/ingester.go
+++ b/vendor/github.com/cortexproject/cortex/pkg/ingester/ingester.go
@@ -71,9 +71,9 @@ type Config struct {
 
 	RateUpdatePeriod time.Duration `yaml:"rate_update_period"`
 
-	// Use tsdb block storage
-	TSDBEnabled bool        `yaml:"-"`
-	TSDBConfig  tsdb.Config `yaml:"-"`
+	// Use blocks storage.
+	BlocksStorageEnabled bool                     `yaml:"-"`
+	BlocksStorageConfig  tsdb.BlocksStorageConfig `yaml:"-"`
 
 	// Injected at runtime and read from the distributor config, required
 	// to accurately apply global limits.
@@ -158,7 +158,7 @@ func New(cfg Config, clientConfig client.Config, limits *validation.Overrides, c
 		cfg.ingesterClientFactory = client.MakeIngesterClient
 	}
 
-	if cfg.TSDBEnabled {
+	if cfg.BlocksStorageEnabled {
 		return NewV2(cfg, clientConfig, limits, registerer)
 	}
 
@@ -263,7 +263,7 @@ func (i *Ingester) startFlushLoops() {
 //   * Always replays the WAL.
 //   * Does not start the lifecycler.
 func NewForFlusher(cfg Config, chunkStore ChunkStore, registerer prometheus.Registerer) (*Ingester, error) {
-	if cfg.TSDBEnabled {
+	if cfg.BlocksStorageEnabled {
 		return NewV2ForFlusher(cfg, registerer)
 	}
 
@@ -379,7 +379,7 @@ func (i *Ingester) Push(ctx context.Context, req *client.WriteRequest) (*client.
 		return nil, err
 	}
 
-	if i.cfg.TSDBEnabled {
+	if i.cfg.BlocksStorageEnabled {
 		return i.v2Push(ctx, req)
 	}
 
@@ -619,7 +619,7 @@ func (i *Ingester) Query(ctx context.Context, req *client.QueryRequest) (*client
 		return nil, err
 	}
 
-	if i.cfg.TSDBEnabled {
+	if i.cfg.BlocksStorageEnabled {
 		return i.v2Query(ctx, req)
 	}
 
@@ -686,7 +686,7 @@ func (i *Ingester) QueryStream(req *client.QueryRequest, stream client.Ingester_
 		return err
 	}
 
-	if i.cfg.TSDBEnabled {
+	if i.cfg.BlocksStorageEnabled {
 		return i.v2QueryStream(req, stream)
 	}
 
@@ -767,7 +767,7 @@ func (i *Ingester) LabelValues(ctx context.Context, req *client.LabelValuesReque
 		return nil, err
 	}
 
-	if i.cfg.TSDBEnabled {
+	if i.cfg.BlocksStorageEnabled {
 		return i.v2LabelValues(ctx, req)
 	}
 
@@ -792,7 +792,7 @@ func (i *Ingester) LabelNames(ctx context.Context, req *client.LabelNamesRequest
 		return nil, err
 	}
 
-	if i.cfg.TSDBEnabled {
+	if i.cfg.BlocksStorageEnabled {
 		return i.v2LabelNames(ctx, req)
 	}
 
@@ -817,7 +817,7 @@ func (i *Ingester) MetricsForLabelMatchers(ctx context.Context, req *client.Metr
 		return nil, err
 	}
 
-	if i.cfg.TSDBEnabled {
+	if i.cfg.BlocksStorageEnabled {
 		return i.v2MetricsForLabelMatchers(ctx, req)
 	}
 
@@ -887,7 +887,7 @@ func (i *Ingester) UserStats(ctx context.Context, req *client.UserStatsRequest)
 		return nil, err
 	}
 
-	if i.cfg.TSDBEnabled {
+	if i.cfg.BlocksStorageEnabled {
 		return i.v2UserStats(ctx, req)
 	}
 
@@ -916,7 +916,7 @@ func (i *Ingester) AllUserStats(ctx context.Context, req *client.UserStatsReques
 		return nil, err
 	}
 
-	if i.cfg.TSDBEnabled {
+	if i.cfg.BlocksStorageEnabled {
 		return i.v2AllUserStats(ctx, req)
 	}
 
diff --git a/vendor/github.com/cortexproject/cortex/pkg/ingester/ingester_v2.go b/vendor/github.com/cortexproject/cortex/pkg/ingester/ingester_v2.go
index da21f5da928e3..80a31d7ee983e 100644
--- a/vendor/github.com/cortexproject/cortex/pkg/ingester/ingester_v2.go
+++ b/vendor/github.com/cortexproject/cortex/pkg/ingester/ingester_v2.go
@@ -193,7 +193,7 @@ func newTSDBState(bucketClient objstore.Bucket, registerer prometheus.Registerer
 // NewV2 returns a new Ingester that uses Cortex block storage instead of chunks storage.
 func NewV2(cfg Config, clientConfig client.Config, limits *validation.Overrides, registerer prometheus.Registerer) (*Ingester, error) {
 	util.WarnExperimentalUse("Blocks storage engine")
-	bucketClient, err := cortex_tsdb.NewBucketClient(context.Background(), cfg.TSDBConfig, "ingester", util.Logger, registerer)
+	bucketClient, err := cortex_tsdb.NewBucketClient(context.Background(), cfg.BlocksStorageConfig, "ingester", util.Logger, registerer)
 	if err != nil {
 		return nil, errors.Wrap(err, "failed to create the bucket client")
 	}
@@ -219,7 +219,7 @@ func NewV2(cfg Config, clientConfig client.Config, limits *validation.Overrides,
 		}, i.numSeriesInTSDB)
 	}
 
-	i.lifecycler, err = ring.NewLifecycler(cfg.LifecyclerConfig, i, "ingester", ring.IngesterRingKey, cfg.TSDBConfig.FlushBlocksOnShutdown, registerer)
+	i.lifecycler, err = ring.NewLifecycler(cfg.LifecyclerConfig, i, "ingester", ring.IngesterRingKey, cfg.BlocksStorageConfig.TSDB.FlushBlocksOnShutdown, registerer)
 	if err != nil {
 		return nil, err
 	}
@@ -240,7 +240,7 @@ func NewV2(cfg Config, clientConfig client.Config, limits *validation.Overrides,
 // on Flush method and flush all openened TSDBs when called.
 func NewV2ForFlusher(cfg Config, registerer prometheus.Registerer) (*Ingester, error) {
 	util.WarnExperimentalUse("Blocks storage engine")
-	bucketClient, err := cortex_tsdb.NewBucketClient(context.Background(), cfg.TSDBConfig, "ingester", util.Logger, registerer)
+	bucketClient, err := cortex_tsdb.NewBucketClient(context.Background(), cfg.BlocksStorageConfig, "ingester", util.Logger, registerer)
 	if err != nil {
 		return nil, errors.Wrap(err, "failed to create the bucket client")
 	}
@@ -288,7 +288,7 @@ func (i *Ingester) startingV2(ctx context.Context) error {
 	compactionService := services.NewBasicService(nil, i.compactionLoop, nil)
 	servs = append(servs, compactionService)
 
-	if i.cfg.TSDBConfig.ShipInterval > 0 {
+	if i.cfg.BlocksStorageConfig.TSDB.ShipInterval > 0 {
 		shippingService := services.NewBasicService(nil, i.shipBlocksLoop, nil)
 		servs = append(servs, shippingService)
 	}
@@ -302,7 +302,7 @@ func (i *Ingester) startingV2(ctx context.Context) error {
 }
 
 func (i *Ingester) stoppingV2ForFlusher(_ error) error {
-	if !i.cfg.TSDBConfig.KeepUserTSDBOpenOnShutdown {
+	if !i.cfg.BlocksStorageConfig.TSDB.KeepUserTSDBOpenOnShutdown {
 		i.closeAllTSDB()
 	}
 	return nil
@@ -323,7 +323,7 @@ func (i *Ingester) stoppingV2(_ error) error {
 		level.Warn(util.Logger).Log("msg", "failed to stop ingester lifecycler", "err", err)
 	}
 
-	if !i.cfg.TSDBConfig.KeepUserTSDBOpenOnShutdown {
+	if !i.cfg.BlocksStorageConfig.TSDB.KeepUserTSDBOpenOnShutdown {
 		i.closeAllTSDB()
 	}
 	return nil
@@ -921,10 +921,10 @@ func (i *Ingester) getOrCreateTSDB(userID string, force bool) (*userTSDB, error)
 // createTSDB creates a TSDB for a given userID, and returns the created db.
 func (i *Ingester) createTSDB(userID string) (*userTSDB, error) {
 	tsdbPromReg := prometheus.NewRegistry()
-	udir := i.cfg.TSDBConfig.BlocksDir(userID)
+	udir := i.cfg.BlocksStorageConfig.TSDB.BlocksDir(userID)
 	userLogger := util.WithUserID(userID, util.Logger)
 
-	blockRanges := i.cfg.TSDBConfig.BlockRanges.ToMilliseconds()
+	blockRanges := i.cfg.BlocksStorageConfig.TSDB.BlockRanges.ToMilliseconds()
 
 	userDB := &userTSDB{
 		userID:              userID,
@@ -937,12 +937,12 @@ func (i *Ingester) createTSDB(userID string) (*userTSDB, error) {
 
 	// Create a new user database
 	db, err := tsdb.Open(udir, userLogger, tsdbPromReg, &tsdb.Options{
-		RetentionDuration:       i.cfg.TSDBConfig.Retention.Milliseconds(),
+		RetentionDuration:       i.cfg.BlocksStorageConfig.TSDB.Retention.Milliseconds(),
 		MinBlockDuration:        blockRanges[0],
 		MaxBlockDuration:        blockRanges[len(blockRanges)-1],
 		NoLockfile:              true,
-		StripeSize:              i.cfg.TSDBConfig.StripeSize,
-		WALCompression:          i.cfg.TSDBConfig.WALCompressionEnabled,
+		StripeSize:              i.cfg.BlocksStorageConfig.TSDB.StripeSize,
+		WALCompression:          i.cfg.BlocksStorageConfig.TSDB.WALCompressionEnabled,
 		SeriesLifecycleCallback: userDB,
 	})
 	if err != nil {
@@ -979,7 +979,7 @@ func (i *Ingester) createTSDB(userID string) (*userTSDB, error) {
 	}
 
 	// Create a new shipper for this database
-	if i.cfg.TSDBConfig.ShipInterval > 0 {
+	if i.cfg.BlocksStorageConfig.TSDB.ShipInterval > 0 {
 		userDB.shipper = shipper.New(
 			userLogger,
 			tsdbPromReg,
@@ -1033,15 +1033,15 @@ func (i *Ingester) closeAllTSDB() {
 func (i *Ingester) openExistingTSDB(ctx context.Context) error {
 	level.Info(util.Logger).Log("msg", "opening existing TSDBs")
 	wg := &sync.WaitGroup{}
-	openGate := gate.New(i.cfg.TSDBConfig.MaxTSDBOpeningConcurrencyOnStartup)
+	openGate := gate.New(i.cfg.BlocksStorageConfig.TSDB.MaxTSDBOpeningConcurrencyOnStartup)
 
-	err := filepath.Walk(i.cfg.TSDBConfig.Dir, func(path string, info os.FileInfo, err error) error {
+	err := filepath.Walk(i.cfg.BlocksStorageConfig.TSDB.Dir, func(path string, info os.FileInfo, err error) error {
 		if err != nil {
 			return filepath.SkipDir
 		}
 
 		// Skip root dir and all other files
-		if path == i.cfg.TSDBConfig.Dir || !info.IsDir() {
+		if path == i.cfg.BlocksStorageConfig.TSDB.Dir || !info.IsDir() {
 			return nil
 		}
 
@@ -1116,7 +1116,7 @@ func (i *Ingester) numSeriesInTSDB() float64 {
 }
 
 func (i *Ingester) shipBlocksLoop(ctx context.Context) error {
-	shipTicker := time.NewTicker(i.cfg.TSDBConfig.ShipInterval)
+	shipTicker := time.NewTicker(i.cfg.BlocksStorageConfig.TSDB.ShipInterval)
 	defer shipTicker.Stop()
 
 	for {
@@ -1153,7 +1153,7 @@ func (i *Ingester) shipBlocks(ctx context.Context) {
 
 	// Number of concurrent workers is limited in order to avoid to concurrently sync a lot
 	// of tenants in a large cluster.
-	i.runConcurrentUserWorkers(ctx, i.cfg.TSDBConfig.ShipConcurrency, func(userID string) {
+	i.runConcurrentUserWorkers(ctx, i.cfg.BlocksStorageConfig.TSDB.ShipConcurrency, func(userID string) {
 		// Get the user's DB. If the user doesn't exist, we skip it.
 		userDB := i.getTSDB(userID)
 		if userDB == nil || userDB.shipper == nil {
@@ -1170,7 +1170,7 @@ func (i *Ingester) shipBlocks(ctx context.Context) {
 }
 
 func (i *Ingester) compactionLoop(ctx context.Context) error {
-	ticker := time.NewTicker(i.cfg.TSDBConfig.HeadCompactionInterval)
+	ticker := time.NewTicker(i.cfg.BlocksStorageConfig.TSDB.HeadCompactionInterval)
 	defer ticker.Stop()
 
 	for {
@@ -1204,7 +1204,7 @@ func (i *Ingester) compactBlocks(ctx context.Context, force bool) {
 		}
 	}
 
-	i.runConcurrentUserWorkers(ctx, i.cfg.TSDBConfig.HeadCompactionConcurrency, func(userID string) {
+	i.runConcurrentUserWorkers(ctx, i.cfg.BlocksStorageConfig.TSDB.HeadCompactionConcurrency, func(userID string) {
 		userDB := i.getTSDB(userID)
 		if userDB == nil {
 			return
@@ -1226,7 +1226,7 @@ func (i *Ingester) compactBlocks(ctx context.Context, force bool) {
 			reason = "forced"
 			err = userDB.CompactHead(tsdb.NewRangeHead(h, h.MinTime(), h.MaxTime()))
 
-		case i.cfg.TSDBConfig.HeadCompactionIdleTimeout > 0 && userDB.isIdle(time.Now(), i.cfg.TSDBConfig.HeadCompactionIdleTimeout):
+		case i.cfg.BlocksStorageConfig.TSDB.HeadCompactionIdleTimeout > 0 && userDB.isIdle(time.Now(), i.cfg.BlocksStorageConfig.TSDB.HeadCompactionIdleTimeout):
 			reason = "idle"
 			level.Info(util.Logger).Log("msg", "TSDB is idle, forcing compaction", "user", userID)
 			err = userDB.CompactHead(tsdb.NewRangeHead(h, h.MinTime(), h.MaxTime()))
@@ -1289,7 +1289,7 @@ func (i *Ingester) v2LifecyclerFlush() {
 	ctx := context.Background()
 
 	i.compactBlocks(ctx, true)
-	if i.cfg.TSDBConfig.ShipInterval > 0 {
+	if i.cfg.BlocksStorageConfig.TSDB.ShipInterval > 0 {
 		i.shipBlocks(ctx)
 	}
 
@@ -1325,7 +1325,7 @@ func (i *Ingester) v2FlushHandler(w http.ResponseWriter, _ *http.Request) {
 			return
 		}
 
-		if i.cfg.TSDBConfig.ShipInterval > 0 {
+		if i.cfg.BlocksStorageConfig.TSDB.ShipInterval > 0 {
 			level.Info(util.Logger).Log("msg", "flushing TSDB blocks: triggering shipping")
 
 			select {
diff --git a/vendor/github.com/cortexproject/cortex/pkg/ingester/mapper.go b/vendor/github.com/cortexproject/cortex/pkg/ingester/mapper.go
index 87c1f622b7d49..18977e7176e7a 100644
--- a/vendor/github.com/cortexproject/cortex/pkg/ingester/mapper.go
+++ b/vendor/github.com/cortexproject/cortex/pkg/ingester/mapper.go
@@ -5,10 +5,10 @@ import (
 	"sort"
 	"strings"
 	"sync"
-	"sync/atomic"
 
 	"github.com/go-kit/kit/log/level"
 	"github.com/prometheus/common/model"
+	"go.uber.org/atomic"
 
 	"github.com/cortexproject/cortex/pkg/util"
 )
@@ -24,8 +24,7 @@ type fpMappings map[model.Fingerprint]map[string]model.Fingerprint
 // fpMapper is used to map fingerprints in order to work around fingerprint
 // collisions.
 type fpMapper struct {
-	// highestMappedFP has to be aligned for atomic operations.
-	highestMappedFP model.Fingerprint
+	highestMappedFP atomic.Uint64
 
 	mtx      sync.RWMutex // Protects mappings.
 	mappings fpMappings
@@ -130,7 +129,7 @@ func (m *fpMapper) maybeAddMapping(
 }
 
 func (m *fpMapper) nextMappedFP() model.Fingerprint {
-	mappedFP := model.Fingerprint(atomic.AddUint64((*uint64)(&m.highestMappedFP), 1))
+	mappedFP := model.Fingerprint(m.highestMappedFP.Inc())
 	if mappedFP > maxMappedFP {
 		panic(fmt.Errorf("more than %v fingerprints mapped in collision detection", maxMappedFP))
 	}
diff --git a/vendor/github.com/cortexproject/cortex/pkg/ingester/metrics.go b/vendor/github.com/cortexproject/cortex/pkg/ingester/metrics.go
index 22e5ebc565426..b052c3b996c4d 100644
--- a/vendor/github.com/cortexproject/cortex/pkg/ingester/metrics.go
+++ b/vendor/github.com/cortexproject/cortex/pkg/ingester/metrics.go
@@ -53,7 +53,8 @@ type ingesterMetrics struct {
 	chunkSize                     prometheus.Histogram
 	chunkAge                      prometheus.Histogram
 	memoryChunks                  prometheus.Gauge
-	flushedSeries                 *prometheus.CounterVec
+	seriesEnqueuedForFlush        *prometheus.CounterVec
+	seriesDequeuedOutcome         *prometheus.CounterVec
 	droppedChunks                 prometheus.Counter
 	oldestUnflushedChunkTimestamp prometheus.Gauge
 }
@@ -192,10 +193,14 @@ func newIngesterMetrics(r prometheus.Registerer, createMetricsConflictingWithTSD
 			Name: "cortex_ingester_memory_chunks",
 			Help: "The total number of chunks in memory.",
 		}),
-		flushedSeries: promauto.With(r).NewCounterVec(prometheus.CounterOpts{
-			Name: "cortex_ingester_series_flushed_total",
-			Help: "Total number of flushed series, with reasons.",
+		seriesEnqueuedForFlush: promauto.With(r).NewCounterVec(prometheus.CounterOpts{
+			Name: "cortex_ingester_flushing_enqueued_series_total",
+			Help: "Total number of series enqueued for flushing, with reasons.",
 		}, []string{"reason"}),
+		seriesDequeuedOutcome: promauto.With(r).NewCounterVec(prometheus.CounterOpts{
+			Name: "cortex_ingester_flushing_dequeued_series_total",
+			Help: "Total number of series dequeued for flushing, with outcome (superset of enqueue reasons)",
+		}, []string{"outcome"}),
 		droppedChunks: promauto.With(r).NewCounter(prometheus.CounterOpts{
 			Name: "cortex_ingester_dropped_chunks_total",
 			Help: "Total number of chunks dropped from flushing because they have too few samples.",
diff --git a/vendor/github.com/cortexproject/cortex/pkg/ingester/rate.go b/vendor/github.com/cortexproject/cortex/pkg/ingester/rate.go
index d0c348677b4ef..ecabd8b783a0b 100644
--- a/vendor/github.com/cortexproject/cortex/pkg/ingester/rate.go
+++ b/vendor/github.com/cortexproject/cortex/pkg/ingester/rate.go
@@ -2,13 +2,14 @@ package ingester
 
 import (
 	"sync"
-	"sync/atomic"
 	"time"
+
+	"go.uber.org/atomic"
 )
 
 // ewmaRate tracks an exponentially weighted moving average of a per-second rate.
 type ewmaRate struct {
-	newEvents int64
+	newEvents atomic.Int64
 	alpha     float64
 	interval  time.Duration
 	lastRate  float64
@@ -32,8 +33,8 @@ func (r *ewmaRate) rate() float64 {
 
 // tick assumes to be called every r.interval.
 func (r *ewmaRate) tick() {
-	newEvents := atomic.LoadInt64(&r.newEvents)
-	atomic.AddInt64(&r.newEvents, -newEvents)
+	newEvents := r.newEvents.Load()
+	r.newEvents.Sub(newEvents)
 	instantRate := float64(newEvents) / r.interval.Seconds()
 
 	r.mutex.Lock()
@@ -49,9 +50,9 @@ func (r *ewmaRate) tick() {
 
 // inc counts one event.
 func (r *ewmaRate) inc() {
-	atomic.AddInt64(&r.newEvents, 1)
+	r.newEvents.Inc()
 }
 
 func (r *ewmaRate) add(delta int64) {
-	atomic.AddInt64(&r.newEvents, delta)
+	r.newEvents.Add(delta)
 }
diff --git a/vendor/github.com/cortexproject/cortex/pkg/ingester/series_map.go b/vendor/github.com/cortexproject/cortex/pkg/ingester/series_map.go
index a8e4ba70613b1..4d4a9a5b6694a 100644
--- a/vendor/github.com/cortexproject/cortex/pkg/ingester/series_map.go
+++ b/vendor/github.com/cortexproject/cortex/pkg/ingester/series_map.go
@@ -2,10 +2,10 @@ package ingester
 
 import (
 	"sync"
-	"sync/atomic"
 	"unsafe"
 
 	"github.com/prometheus/common/model"
+	"go.uber.org/atomic"
 
 	"github.com/cortexproject/cortex/pkg/util"
 )
@@ -16,7 +16,7 @@ const seriesMapShards = 128
 // goroutine-safe. A seriesMap is effectively a goroutine-safe version of
 // map[model.Fingerprint]*memorySeries.
 type seriesMap struct {
-	size   int32
+	size   atomic.Int32
 	shards []shard
 }
 
@@ -65,7 +65,7 @@ func (sm *seriesMap) put(fp model.Fingerprint, s *memorySeries) {
 	shard.mtx.Unlock()
 
 	if !ok {
-		atomic.AddInt32(&sm.size, 1)
+		sm.size.Inc()
 	}
 }
 
@@ -77,7 +77,7 @@ func (sm *seriesMap) del(fp model.Fingerprint) {
 	delete(shard.m, fp)
 	shard.mtx.Unlock()
 	if ok {
-		atomic.AddInt32(&sm.size, -1)
+		sm.size.Dec()
 	}
 }
 
@@ -106,5 +106,5 @@ func (sm *seriesMap) iter() <-chan fingerprintSeriesPair {
 }
 
 func (sm *seriesMap) length() int {
-	return int(atomic.LoadInt32(&sm.size))
+	return int(sm.size.Load())
 }
diff --git a/vendor/github.com/cortexproject/cortex/pkg/ingester/transfer.go b/vendor/github.com/cortexproject/cortex/pkg/ingester/transfer.go
index 5310353914c6b..383f00d78f6a0 100644
--- a/vendor/github.com/cortexproject/cortex/pkg/ingester/transfer.go
+++ b/vendor/github.com/cortexproject/cortex/pkg/ingester/transfer.go
@@ -221,7 +221,7 @@ func (i *Ingester) TransferTSDB(stream client.Ingester_TransferTSDBServer) error
 	xfer := func() error {
 
 		// Validate the final directory is empty, if it exists and is empty delete it so a move can succeed
-		err := removeEmptyDir(i.cfg.TSDBConfig.Dir)
+		err := removeEmptyDir(i.cfg.BlocksStorageConfig.TSDB.Dir)
 		if err != nil {
 			return errors.Wrap(err, "remove existing TSDB directory")
 		}
@@ -304,9 +304,9 @@ func (i *Ingester) TransferTSDB(stream client.Ingester_TransferTSDBServer) error
 		level.Info(util.Logger).Log("msg", "Total xfer", "from_ingester", fromIngesterID, "files", filesXfer, "bytes", bytesXfer)
 
 		// Move the tmpdir to the final location
-		err = os.Rename(tmpDir, i.cfg.TSDBConfig.Dir)
+		err = os.Rename(tmpDir, i.cfg.BlocksStorageConfig.TSDB.Dir)
 		if err != nil {
-			return errors.Wrap(err, fmt.Sprintf("unable to move received TSDB blocks from %s to %s", tmpDir, i.cfg.TSDBConfig.Dir))
+			return errors.Wrap(err, fmt.Sprintf("unable to move received TSDB blocks from %s to %s", tmpDir, i.cfg.BlocksStorageConfig.TSDB.Dir))
 		}
 
 		// At this point all TSDBs have been received, so we can proceed loading TSDBs in memory.
@@ -315,9 +315,9 @@ func (i *Ingester) TransferTSDB(stream client.Ingester_TransferTSDBServer) error
 		// 2. If a query is received on user X, for which the TSDB has been transferred, before
 		//    the first series is ingested, if we don't open the TSDB the query will return an
 		//    empty result (because the TSDB is opened only on first push or transfer)
-		userIDs, err := ioutil.ReadDir(i.cfg.TSDBConfig.Dir)
+		userIDs, err := ioutil.ReadDir(i.cfg.BlocksStorageConfig.TSDB.Dir)
 		if err != nil {
-			return errors.Wrap(err, fmt.Sprintf("unable to list TSDB users in %s", i.cfg.TSDBConfig.Dir))
+			return errors.Wrap(err, fmt.Sprintf("unable to list TSDB users in %s", i.cfg.BlocksStorageConfig.TSDB.Dir))
 		}
 
 		for _, user := range userIDs {
@@ -438,7 +438,7 @@ func (i *Ingester) TransferOut(ctx context.Context) error {
 }
 
 func (i *Ingester) transferOut(ctx context.Context) error {
-	if i.cfg.TSDBEnabled {
+	if i.cfg.BlocksStorageEnabled {
 		return i.v2TransferOut(ctx)
 	}
 
@@ -584,7 +584,7 @@ func (i *Ingester) v2TransferOut(ctx context.Context) error {
 	}
 
 	// Grab a list of all blocks that need to be shipped
-	blocks, err := unshippedBlocks(i.cfg.TSDBConfig.Dir)
+	blocks, err := unshippedBlocks(i.cfg.BlocksStorageConfig.TSDB.Dir)
 	if err != nil {
 		return err
 	}
@@ -592,7 +592,7 @@ func (i *Ingester) v2TransferOut(ctx context.Context) error {
 	for user, blockIDs := range blocks {
 		// Transfer the users TSDB
 		// TODO(thor) transferring users can be done concurrently
-		i.transferUser(ctx, stream, i.cfg.TSDBConfig.Dir, i.lifecycler.ID, user, blockIDs)
+		i.transferUser(ctx, stream, i.cfg.BlocksStorageConfig.TSDB.Dir, i.lifecycler.ID, user, blockIDs)
 	}
 
 	_, err = stream.CloseAndRecv()
diff --git a/vendor/github.com/cortexproject/cortex/pkg/querier/block.go b/vendor/github.com/cortexproject/cortex/pkg/querier/block.go
index 5cb7648b030dd..cfc90fcdb838a 100644
--- a/vendor/github.com/cortexproject/cortex/pkg/querier/block.go
+++ b/vendor/github.com/cortexproject/cortex/pkg/querier/block.go
@@ -6,7 +6,6 @@ import (
 
 	"github.com/pkg/errors"
 	"github.com/prometheus/prometheus/pkg/labels"
-	"github.com/prometheus/prometheus/promql"
 	"github.com/prometheus/prometheus/storage"
 	"github.com/prometheus/prometheus/tsdb/chunkenc"
 	"github.com/thanos-io/thanos/pkg/store/storepb"
@@ -207,7 +206,7 @@ func (it *blockQuerierSeriesIterator) Err() error {
 
 	err := it.iterators[it.i].Err()
 	if err != nil {
-		return promql.ErrStorage{Err: errors.Wrapf(err, "cannot iterate chunk for series: %v", it.labels)}
+		return errors.Wrapf(err, "cannot iterate chunk for series: %v", it.labels)
 	}
 	return nil
 }
diff --git a/vendor/github.com/cortexproject/cortex/pkg/querier/blocks_store_queryable.go b/vendor/github.com/cortexproject/cortex/pkg/querier/blocks_store_queryable.go
index 6c8e392cacbeb..e0c366d7e65c3 100644
--- a/vendor/github.com/cortexproject/cortex/pkg/querier/blocks_store_queryable.go
+++ b/vendor/github.com/cortexproject/cortex/pkg/querier/blocks_store_queryable.go
@@ -16,7 +16,6 @@ import (
 	"github.com/prometheus/client_golang/prometheus"
 	"github.com/prometheus/client_golang/prometheus/promauto"
 	"github.com/prometheus/prometheus/pkg/labels"
-	"github.com/prometheus/prometheus/promql"
 	"github.com/prometheus/prometheus/storage"
 	"github.com/thanos-io/thanos/pkg/block"
 	"github.com/thanos-io/thanos/pkg/block/metadata"
@@ -150,7 +149,7 @@ func NewBlocksStoreQueryable(stores BlocksStoreSet, finder BlocksFinder, consist
 	return q, nil
 }
 
-func NewBlocksStoreQueryableFromConfig(querierCfg Config, gatewayCfg storegateway.Config, storageCfg cortex_tsdb.Config, limits BlocksStoreLimits, logger log.Logger, reg prometheus.Registerer) (*BlocksStoreQueryable, error) {
+func NewBlocksStoreQueryableFromConfig(querierCfg Config, gatewayCfg storegateway.Config, storageCfg cortex_tsdb.BlocksStorageConfig, limits BlocksStoreLimits, logger log.Logger, reg prometheus.Registerer) (*BlocksStoreQueryable, error) {
 	var stores BlocksStoreSet
 
 	bucketClient, err := cortex_tsdb.NewBucketClient(context.Background(), storageCfg, "querier", logger, reg)
@@ -249,12 +248,12 @@ func (q *BlocksStoreQueryable) stopping(_ error) error {
 // Querier returns a new Querier on the storage.
 func (q *BlocksStoreQueryable) Querier(ctx context.Context, mint, maxt int64) (storage.Querier, error) {
 	if s := q.State(); s != services.Running {
-		return nil, promql.ErrStorage{Err: errors.Errorf("BlocksStoreQueryable is not running: %v", s)}
+		return nil, errors.Errorf("BlocksStoreQueryable is not running: %v", s)
 	}
 
 	userID, err := user.ExtractOrgID(ctx)
 	if err != nil {
-		return nil, promql.ErrStorage{Err: err}
+		return nil, err
 	}
 
 	return &blocksStoreQuerier{
@@ -291,14 +290,7 @@ type blocksStoreQuerier struct {
 // Select implements storage.Querier interface.
 // The bool passed is ignored because the series is always sorted.
 func (q *blocksStoreQuerier) Select(_ bool, sp *storage.SelectHints, matchers ...*labels.Matcher) storage.SeriesSet {
-	set := q.selectSorted(sp, matchers...)
-
-	// We need to wrap the error in order to have Prometheus returning a 5xx error.
-	if err := set.Err(); err != nil && !errors.Is(err, context.Canceled) && !errors.Is(err, context.DeadlineExceeded) {
-		set = storage.ErrSeriesSet(promql.ErrStorage{Err: err})
-	}
-
-	return set
+	return q.selectSorted(sp, matchers...)
 }
 
 func (q *blocksStoreQuerier) LabelValues(name string) ([]string, storage.Warnings, error) {
@@ -334,7 +326,7 @@ func (q *blocksStoreQuerier) selectSorted(sp *storage.SelectHints, matchers ...*
 		maxT = util.Min64(maxT, util.TimeToMillis(now.Add(-q.queryStoreAfter)))
 
 		if origMaxT != maxT {
-			level.Debug(spanLog).Log("msg", "query max time has been manipulated", "original", origMaxT, "updated", maxT)
+			level.Debug(spanLog).Log("msg", "the max time of the query to blocks storage has been manipulated", "original", origMaxT, "updated", maxT)
 		}
 
 		if maxT < minT {
diff --git a/vendor/github.com/cortexproject/cortex/pkg/querier/chunk_store_queryable.go b/vendor/github.com/cortexproject/cortex/pkg/querier/chunk_store_queryable.go
index 50fa8573fb642..b36db0a2429df 100644
--- a/vendor/github.com/cortexproject/cortex/pkg/querier/chunk_store_queryable.go
+++ b/vendor/github.com/cortexproject/cortex/pkg/querier/chunk_store_queryable.go
@@ -5,7 +5,6 @@ import (
 
 	"github.com/prometheus/common/model"
 	"github.com/prometheus/prometheus/pkg/labels"
-	"github.com/prometheus/prometheus/promql"
 	"github.com/prometheus/prometheus/storage"
 	"github.com/prometheus/prometheus/tsdb/chunkenc"
 	"github.com/weaveworks/common/user"
@@ -46,19 +45,7 @@ func (q *chunkStoreQuerier) Select(_ bool, sp *storage.SelectHints, matchers ...
 	}
 	chunks, err := q.store.Get(q.ctx, userID, model.Time(sp.Start), model.Time(sp.End), matchers...)
 	if err != nil {
-		switch err.(type) {
-		case promql.ErrStorage, promql.ErrTooManySamples, promql.ErrQueryCanceled, promql.ErrQueryTimeout:
-			// Recognized by Prometheus API, vendor/github.com/prometheus/prometheus/promql/engine.go:91.
-			// Don't translate those, just in case we use them internally.
-			return storage.ErrSeriesSet(err)
-		case chunk.QueryError:
-			// This will be returned with status code 422 by Prometheus API.
-			// vendor/github.com/prometheus/prometheus/web/api/v1/api.go:1393
-			return storage.ErrSeriesSet(err)
-		default:
-			// All other errors will be returned as 500.
-			return storage.ErrSeriesSet(promql.ErrStorage{Err: err})
-		}
+		return storage.ErrSeriesSet(err)
 	}
 
 	return partitionChunks(chunks, q.mint, q.maxt, q.chunkIteratorFunc)
diff --git a/vendor/github.com/cortexproject/cortex/pkg/querier/distributor_queryable.go b/vendor/github.com/cortexproject/cortex/pkg/querier/distributor_queryable.go
index 675946f0e4ccf..4912832a2c680 100644
--- a/vendor/github.com/cortexproject/cortex/pkg/querier/distributor_queryable.go
+++ b/vendor/github.com/cortexproject/cortex/pkg/querier/distributor_queryable.go
@@ -5,9 +5,9 @@ import (
 	"sort"
 	"time"
 
+	"github.com/go-kit/kit/log/level"
 	"github.com/prometheus/common/model"
 	"github.com/prometheus/prometheus/pkg/labels"
-	"github.com/prometheus/prometheus/promql"
 	"github.com/prometheus/prometheus/scrape"
 	"github.com/prometheus/prometheus/storage"
 	"github.com/weaveworks/common/user"
@@ -31,44 +31,46 @@ type Distributor interface {
 	MetricsMetadata(ctx context.Context) ([]scrape.MetricMetadata, error)
 }
 
-func newDistributorQueryable(distributor Distributor, streaming bool, iteratorFn chunkIteratorFunc, queryIngesterWithin time.Duration) QueryableWithFilter {
+func newDistributorQueryable(distributor Distributor, streaming bool, iteratorFn chunkIteratorFunc, queryIngestersWithin time.Duration) QueryableWithFilter {
 	return distributorQueryable{
-		distributor:         distributor,
-		streaming:           streaming,
-		iteratorFn:          iteratorFn,
-		queryIngesterWithin: queryIngesterWithin,
+		distributor:          distributor,
+		streaming:            streaming,
+		iteratorFn:           iteratorFn,
+		queryIngestersWithin: queryIngestersWithin,
 	}
 }
 
 type distributorQueryable struct {
-	distributor         Distributor
-	streaming           bool
-	iteratorFn          chunkIteratorFunc
-	queryIngesterWithin time.Duration
+	distributor          Distributor
+	streaming            bool
+	iteratorFn           chunkIteratorFunc
+	queryIngestersWithin time.Duration
 }
 
 func (d distributorQueryable) Querier(ctx context.Context, mint, maxt int64) (storage.Querier, error) {
 	return &distributorQuerier{
-		distributor: d.distributor,
-		ctx:         ctx,
-		mint:        mint,
-		maxt:        maxt,
-		streaming:   d.streaming,
-		chunkIterFn: d.iteratorFn,
+		distributor:          d.distributor,
+		ctx:                  ctx,
+		mint:                 mint,
+		maxt:                 maxt,
+		streaming:            d.streaming,
+		chunkIterFn:          d.iteratorFn,
+		queryIngestersWithin: d.queryIngestersWithin,
 	}, nil
 }
 
 func (d distributorQueryable) UseQueryable(now time.Time, _, queryMaxT int64) bool {
 	// Include ingester only if maxt is within QueryIngestersWithin w.r.t. current time.
-	return d.queryIngesterWithin == 0 || queryMaxT >= util.TimeToMillis(now.Add(-d.queryIngesterWithin))
+	return d.queryIngestersWithin == 0 || queryMaxT >= util.TimeToMillis(now.Add(-d.queryIngestersWithin))
 }
 
 type distributorQuerier struct {
-	distributor Distributor
-	ctx         context.Context
-	mint, maxt  int64
-	streaming   bool
-	chunkIterFn chunkIteratorFunc
+	distributor          Distributor
+	ctx                  context.Context
+	mint, maxt           int64
+	streaming            bool
+	chunkIterFn          chunkIteratorFunc
+	queryIngestersWithin time.Duration
 }
 
 // Select implements storage.Querier interface.
@@ -77,42 +79,62 @@ func (q *distributorQuerier) Select(_ bool, sp *storage.SelectHints, matchers ..
 	log, ctx := spanlogger.New(q.ctx, "distributorQuerier.Select")
 	defer log.Span.Finish()
 
+	minT, maxT := q.mint, q.maxt
+	if sp != nil {
+		minT, maxT = sp.Start, sp.End
+	}
+
+	// If queryIngestersWithin is enabled, we do manipulate the query mint to query samples up until
+	// now - queryIngestersWithin, because older time ranges are covered by the storage. This
+	// optimization is particularly important for the blocks storage where the blocks retention in the
+	// ingesters could be way higher than queryIngestersWithin.
+	if q.queryIngestersWithin > 0 {
+		now := time.Now()
+		origMinT := minT
+		minT = util.Max64(minT, util.TimeToMillis(now.Add(-q.queryIngestersWithin)))
+
+		if origMinT != minT {
+			level.Debug(log).Log("msg", "the min time of the query to ingesters has been manipulated", "original", origMinT, "updated", minT)
+		}
+
+		if minT > maxT {
+			level.Debug(log).Log("msg", "empty query time range after min time manipulation")
+			return storage.EmptySeriesSet()
+		}
+	}
+
 	// Kludge: Prometheus passes nil SelectParams if it is doing a 'series' operation,
 	// which needs only metadata.
 	if sp == nil {
-		ms, err := q.distributor.MetricsForLabelMatchers(ctx, model.Time(q.mint), model.Time(q.maxt), matchers...)
+		ms, err := q.distributor.MetricsForLabelMatchers(ctx, model.Time(minT), model.Time(maxT), matchers...)
 		if err != nil {
 			return storage.ErrSeriesSet(err)
 		}
 		return series.MetricsToSeriesSet(ms)
 	}
 
-	mint, maxt := sp.Start, sp.End
-
 	if q.streaming {
-		return q.streamingSelect(*sp, matchers)
+		return q.streamingSelect(minT, maxT, matchers)
 	}
 
-	matrix, err := q.distributor.Query(ctx, model.Time(mint), model.Time(maxt), matchers...)
+	matrix, err := q.distributor.Query(ctx, model.Time(minT), model.Time(maxT), matchers...)
 	if err != nil {
-		return storage.ErrSeriesSet(promql.ErrStorage{Err: err})
+		return storage.ErrSeriesSet(err)
 	}
 
 	// Using MatrixToSeriesSet (and in turn NewConcreteSeriesSet), sorts the series.
 	return series.MatrixToSeriesSet(matrix)
 }
 
-func (q *distributorQuerier) streamingSelect(sp storage.SelectHints, matchers []*labels.Matcher) storage.SeriesSet {
+func (q *distributorQuerier) streamingSelect(minT, maxT int64, matchers []*labels.Matcher) storage.SeriesSet {
 	userID, err := user.ExtractOrgID(q.ctx)
 	if err != nil {
-		return storage.ErrSeriesSet(promql.ErrStorage{Err: err})
+		return storage.ErrSeriesSet(err)
 	}
 
-	mint, maxt := sp.Start, sp.End
-
-	results, err := q.distributor.QueryStream(q.ctx, model.Time(mint), model.Time(maxt), matchers...)
+	results, err := q.distributor.QueryStream(q.ctx, model.Time(minT), model.Time(maxT), matchers...)
 	if err != nil {
-		return storage.ErrSeriesSet(promql.ErrStorage{Err: err})
+		return storage.ErrSeriesSet(err)
 	}
 
 	if len(results.Timeseries) != 0 {
@@ -131,7 +153,7 @@ func (q *distributorQuerier) streamingSelect(sp storage.SelectHints, matchers []
 
 		chunks, err := chunkcompat.FromChunks(userID, ls, result.Chunks)
 		if err != nil {
-			return storage.ErrSeriesSet(promql.ErrStorage{Err: err})
+			return storage.ErrSeriesSet(err)
 		}
 
 		series := &chunkSeries{
diff --git a/vendor/github.com/cortexproject/cortex/pkg/querier/frontend/frontend.go b/vendor/github.com/cortexproject/cortex/pkg/querier/frontend/frontend.go
index f629a0aafe5df..d6beed38322ec 100644
--- a/vendor/github.com/cortexproject/cortex/pkg/querier/frontend/frontend.go
+++ b/vendor/github.com/cortexproject/cortex/pkg/querier/frontend/frontend.go
@@ -71,7 +71,7 @@ type Frontend struct {
 
 	// Metrics.
 	queueDuration prometheus.Histogram
-	queueLength   prometheus.Gauge
+	queueLength   *prometheus.GaugeVec
 }
 
 type request struct {
@@ -96,11 +96,11 @@ func New(cfg Config, log log.Logger, registerer prometheus.Registerer) (*Fronten
 			Help:      "Time spend by requests queued.",
 			Buckets:   prometheus.DefBuckets,
 		}),
-		queueLength: promauto.With(registerer).NewGauge(prometheus.GaugeOpts{
+		queueLength: promauto.With(registerer).NewGaugeVec(prometheus.GaugeOpts{
 			Namespace: "cortex",
 			Name:      "query_frontend_queue_length",
 			Help:      "Number of queries in the queue.",
-		}),
+		}, []string{"user"}),
 		connectedClients: atomic.NewInt32(0),
 	}
 	f.cond = sync.NewCond(&f.mtx)
@@ -363,7 +363,7 @@ func (f *Frontend) queueRequest(ctx context.Context, req *request) error {
 
 	select {
 	case queue <- req:
-		f.queueLength.Add(1)
+		f.queueLength.WithLabelValues(userID).Inc()
 		f.cond.Broadcast()
 		return nil
 	default:
@@ -416,7 +416,7 @@ FindQueue:
 			f.cond.Broadcast()
 
 			f.queueDuration.Observe(time.Since(request.enqueueTime).Seconds())
-			f.queueLength.Add(-1)
+			f.queueLength.WithLabelValues(userID).Dec()
 			request.queueSpan.Finish()
 
 			// Ensure the request has not already expired.
diff --git a/vendor/github.com/cortexproject/cortex/pkg/querier/querier.go b/vendor/github.com/cortexproject/cortex/pkg/querier/querier.go
index 46e5b1e8dc18f..1fb6915bbb21d 100644
--- a/vendor/github.com/cortexproject/cortex/pkg/querier/querier.go
+++ b/vendor/github.com/cortexproject/cortex/pkg/querier/querier.go
@@ -296,7 +296,7 @@ func (q querier) Select(_ bool, sp *storage.SelectHints, matchers ...*labels.Mat
 
 	userID, err := user.ExtractOrgID(ctx)
 	if err != nil {
-		return storage.ErrSeriesSet(promql.ErrStorage{Err: err})
+		return storage.ErrSeriesSet(err)
 	}
 
 	// Validate query time range.
@@ -308,7 +308,7 @@ func (q querier) Select(_ bool, sp *storage.SelectHints, matchers ...*labels.Mat
 
 	tombstones, err := q.tombstonesLoader.GetPendingTombstonesForInterval(userID, startTime, endTime)
 	if err != nil {
-		return storage.ErrSeriesSet(promql.ErrStorage{Err: err})
+		return storage.ErrSeriesSet(err)
 	}
 
 	if len(q.queriers) == 1 {
diff --git a/vendor/github.com/cortexproject/cortex/pkg/querier/queryrange/limits.go b/vendor/github.com/cortexproject/cortex/pkg/querier/queryrange/limits.go
index 2256be9b1a4e9..54bb46f6d9c74 100644
--- a/vendor/github.com/cortexproject/cortex/pkg/querier/queryrange/limits.go
+++ b/vendor/github.com/cortexproject/cortex/pkg/querier/queryrange/limits.go
@@ -43,7 +43,7 @@ func (l limits) Do(ctx context.Context, r Request) (Response, error) {
 
 	maxQueryLen := l.MaxQueryLength(userid)
 	queryLen := timestamp.Time(r.GetEnd()).Sub(timestamp.Time(r.GetStart()))
-	if maxQueryLen != 0 && queryLen > maxQueryLen {
+	if maxQueryLen > 0 && queryLen > maxQueryLen {
 		return nil, httpgrpc.Errorf(http.StatusBadRequest, validation.ErrQueryTooLong, queryLen, maxQueryLen)
 	}
 	return l.next.Do(ctx, r)
diff --git a/vendor/github.com/cortexproject/cortex/pkg/querier/queryrange/results_cache.go b/vendor/github.com/cortexproject/cortex/pkg/querier/queryrange/results_cache.go
index d766c7b5509a6..30440d0b910dd 100644
--- a/vendor/github.com/cortexproject/cortex/pkg/querier/queryrange/results_cache.go
+++ b/vendor/github.com/cortexproject/cortex/pkg/querier/queryrange/results_cache.go
@@ -14,6 +14,7 @@ import (
 	"github.com/gogo/protobuf/types"
 	"github.com/opentracing/opentracing-go"
 	otlog "github.com/opentracing/opentracing-go/log"
+	"github.com/prometheus/client_golang/prometheus"
 	"github.com/prometheus/common/model"
 	"github.com/uber/jaeger-client-go"
 	"github.com/weaveworks/common/httpgrpc"
@@ -128,8 +129,9 @@ func NewResultsCacheMiddleware(
 	merger Merger,
 	extractor Extractor,
 	cacheGenNumberLoader CacheGenNumberLoader,
+	reg prometheus.Registerer,
 ) (Middleware, cache.Cache, error) {
-	c, err := cache.New(cfg.CacheConfig)
+	c, err := cache.New(cfg.CacheConfig, reg, logger)
 	if err != nil {
 		return nil, nil, err
 	}
diff --git a/vendor/github.com/cortexproject/cortex/pkg/querier/queryrange/roundtrip.go b/vendor/github.com/cortexproject/cortex/pkg/querier/queryrange/roundtrip.go
index dac7de72ccc8f..66c06117bcede 100644
--- a/vendor/github.com/cortexproject/cortex/pkg/querier/queryrange/roundtrip.go
+++ b/vendor/github.com/cortexproject/cortex/pkg/querier/queryrange/roundtrip.go
@@ -159,7 +159,7 @@ func NewTripperware(
 
 	var c cache.Cache
 	if cfg.CacheResults {
-		queryCacheMiddleware, cache, err := NewResultsCacheMiddleware(log, cfg.ResultsCacheConfig, constSplitter(cfg.SplitQueriesByInterval), limits, codec, cacheExtractor, cacheGenNumberLoader)
+		queryCacheMiddleware, cache, err := NewResultsCacheMiddleware(log, cfg.ResultsCacheConfig, constSplitter(cfg.SplitQueriesByInterval), limits, codec, cacheExtractor, cacheGenNumberLoader, registerer)
 		if err != nil {
 			return nil, nil, err
 		}
diff --git a/vendor/github.com/cortexproject/cortex/pkg/ring/basic_lifecycler.go b/vendor/github.com/cortexproject/cortex/pkg/ring/basic_lifecycler.go
index e23d5522bcb38..7334fa9c638eb 100644
--- a/vendor/github.com/cortexproject/cortex/pkg/ring/basic_lifecycler.go
+++ b/vendor/github.com/cortexproject/cortex/pkg/ring/basic_lifecycler.go
@@ -253,13 +253,11 @@ func (l *BasicLifecycler) registerInstance(ctx context.Context) error {
 			return ringDesc, true, nil
 		}
 
-		if instanceDesc.State != state || !tokens.Equals(instanceDesc.Tokens) {
-			instanceDesc = ringDesc.AddIngester(l.cfg.ID, l.cfg.Addr, l.cfg.Zone, tokens, state)
-			return ringDesc, true, nil
-		}
-
-		// We haven't modified the ring, so don't try to store it.
-		return nil, true, nil
+		// Always overwrite the instance in the ring (even if already exists) because some properties
+		// may have changed (stated, tokens, zone, address) and even if they didn't the heartbeat at
+		// least did.
+		instanceDesc = ringDesc.AddIngester(l.cfg.ID, l.cfg.Addr, l.cfg.Zone, tokens, state)
+		return ringDesc, true, nil
 	})
 
 	if err != nil {
diff --git a/vendor/github.com/cortexproject/cortex/pkg/ring/batch.go b/vendor/github.com/cortexproject/cortex/pkg/ring/batch.go
index c5e6a00b55939..89a24656aacbd 100644
--- a/vendor/github.com/cortexproject/cortex/pkg/ring/batch.go
+++ b/vendor/github.com/cortexproject/cortex/pkg/ring/batch.go
@@ -4,12 +4,13 @@ import (
 	"context"
 	"fmt"
 	"sync"
-	"sync/atomic"
+
+	"go.uber.org/atomic"
 )
 
 type batchTracker struct {
-	rpcsPending int32
-	rpcsFailed  int32
+	rpcsPending atomic.Int32
+	rpcsFailed  atomic.Int32
 	done        chan struct{}
 	err         chan error
 }
@@ -23,8 +24,8 @@ type ingester struct {
 type itemTracker struct {
 	minSuccess  int
 	maxFailures int
-	succeeded   int32
-	failed      int32
+	succeeded   atomic.Int32
+	failed      atomic.Int32
 }
 
 // DoBatch request against a set of keys in the ring, handling replication and
@@ -70,10 +71,10 @@ func DoBatch(ctx context.Context, r ReadRing, keys []uint32, callback func(Inges
 	}
 
 	tracker := batchTracker{
-		rpcsPending: int32(len(itemTrackers)),
-		done:        make(chan struct{}, 1),
-		err:         make(chan error, 1),
+		done: make(chan struct{}, 1),
+		err:  make(chan error, 1),
 	}
+	tracker.rpcsPending.Store(int32(len(itemTrackers)))
 
 	var wg sync.WaitGroup
 
@@ -115,17 +116,17 @@ func (b *batchTracker) record(sampleTrackers []*itemTracker, err error) {
 	// goroutine will write to either channel.
 	for i := range sampleTrackers {
 		if err != nil {
-			if atomic.AddInt32(&sampleTrackers[i].failed, 1) <= int32(sampleTrackers[i].maxFailures) {
+			if sampleTrackers[i].failed.Inc() <= int32(sampleTrackers[i].maxFailures) {
 				continue
 			}
-			if atomic.AddInt32(&b.rpcsFailed, 1) == 1 {
+			if b.rpcsFailed.Inc() == 1 {
 				b.err <- err
 			}
 		} else {
-			if atomic.AddInt32(&sampleTrackers[i].succeeded, 1) != int32(sampleTrackers[i].minSuccess) {
+			if sampleTrackers[i].succeeded.Inc() != int32(sampleTrackers[i].minSuccess) {
 				continue
 			}
-			if atomic.AddInt32(&b.rpcsPending, -1) == 0 {
+			if b.rpcsPending.Dec() == 0 {
 				b.done <- struct{}{}
 			}
 		}
diff --git a/vendor/github.com/cortexproject/cortex/pkg/ring/kv/memberlist/tcp_transport.go b/vendor/github.com/cortexproject/cortex/pkg/ring/kv/memberlist/tcp_transport.go
index dc1490787983d..d6af8b2ec9905 100644
--- a/vendor/github.com/cortexproject/cortex/pkg/ring/kv/memberlist/tcp_transport.go
+++ b/vendor/github.com/cortexproject/cortex/pkg/ring/kv/memberlist/tcp_transport.go
@@ -9,7 +9,6 @@ import (
 	"io/ioutil"
 	"net"
 	"sync"
-	"sync/atomic"
 	"time"
 
 	"github.com/go-kit/kit/log"
@@ -17,6 +16,7 @@ import (
 	"github.com/hashicorp/go-sockaddr"
 	"github.com/hashicorp/memberlist"
 	"github.com/prometheus/client_golang/prometheus"
+	"go.uber.org/atomic"
 
 	"github.com/cortexproject/cortex/pkg/util"
 	"github.com/cortexproject/cortex/pkg/util/flagext"
@@ -79,7 +79,7 @@ type TCPTransport struct {
 	wg           sync.WaitGroup
 	tcpListeners []*net.TCPListener
 
-	shutdown int32
+	shutdown atomic.Int32
 
 	advertiseMu   sync.RWMutex
 	advertiseAddr string
@@ -172,7 +172,7 @@ func (t *TCPTransport) tcpListen(tcpLn *net.TCPListener) {
 	for {
 		conn, err := tcpLn.AcceptTCP()
 		if err != nil {
-			if s := atomic.LoadInt32(&t.shutdown); s == 1 {
+			if s := t.shutdown.Load(); s == 1 {
 				break
 			}
 
@@ -503,7 +503,7 @@ func (t *TCPTransport) StreamCh() <-chan net.Conn {
 // transport a chance to clean up any listeners.
 func (t *TCPTransport) Shutdown() error {
 	// This will avoid log spam about errors when we shut down.
-	atomic.StoreInt32(&t.shutdown, 1)
+	t.shutdown.Store(1)
 
 	// Rip through all the connections and shut them down.
 	for _, conn := range t.tcpListeners {
diff --git a/vendor/github.com/cortexproject/cortex/pkg/ring/lifecycler.go b/vendor/github.com/cortexproject/cortex/pkg/ring/lifecycler.go
index a026a27064d0a..fb473d7263718 100644
--- a/vendor/github.com/cortexproject/cortex/pkg/ring/lifecycler.go
+++ b/vendor/github.com/cortexproject/cortex/pkg/ring/lifecycler.go
@@ -227,6 +227,9 @@ func (i *Lifecycler) CheckReady(ctx context.Context) error {
 	}
 
 	if err := ringDesc.Ready(time.Now(), i.cfg.RingConfig.HeartbeatTimeout); err != nil {
+		level.Warn(util.Logger).Log("msg", "found an existing ingester(s) with a problem in the ring, "+
+			"this ingester cannot complete joining and become ready until this problem is resolved. "+
+			"The /ring http endpoint on the distributor (or single binary) provides visibility into the ring.", "err", err)
 		return err
 	}
 
diff --git a/vendor/github.com/cortexproject/cortex/pkg/ruler/compat.go b/vendor/github.com/cortexproject/cortex/pkg/ruler/compat.go
index 597ac27618497..50e0247b780db 100644
--- a/vendor/github.com/cortexproject/cortex/pkg/ruler/compat.go
+++ b/vendor/github.com/cortexproject/cortex/pkg/ruler/compat.go
@@ -4,6 +4,9 @@ import (
 	"context"
 	"time"
 
+	"github.com/go-kit/kit/log"
+	"github.com/prometheus/client_golang/prometheus"
+	"github.com/prometheus/prometheus/notifier"
 	"github.com/prometheus/prometheus/pkg/labels"
 	"github.com/prometheus/prometheus/promql"
 	"github.com/prometheus/prometheus/rules"
@@ -17,14 +20,15 @@ import (
 type Pusher interface {
 	Push(context.Context, *client.WriteRequest) (*client.WriteResponse, error)
 }
-type appendable struct {
+
+type pusherAppender struct {
 	pusher  Pusher
 	labels  []labels.Labels
 	samples []client.Sample
 	userID  string
 }
 
-func (a *appendable) Add(l labels.Labels, t int64, v float64) (uint64, error) {
+func (a *pusherAppender) Add(l labels.Labels, t int64, v float64) (uint64, error) {
 	a.labels = append(a.labels, l)
 	a.samples = append(a.samples, client.Sample{
 		TimestampMs: t,
@@ -33,11 +37,11 @@ func (a *appendable) Add(l labels.Labels, t int64, v float64) (uint64, error) {
 	return 0, nil
 }
 
-func (a *appendable) AddFast(_ uint64, _ int64, _ float64) error {
+func (a *pusherAppender) AddFast(_ uint64, _ int64, _ float64) error {
 	return storage.ErrNotFound
 }
 
-func (a *appendable) Commit() error {
+func (a *pusherAppender) Commit() error {
 	// Since a.pusher is distributor, client.ReuseSlice will be called in a.pusher.Push.
 	// We shouldn't call client.ReuseSlice here.
 	_, err := a.pusher.Push(user.InjectOrgID(context.Background(), a.userID), client.ToWriteRequest(a.labels, a.samples, nil, client.RULE))
@@ -46,21 +50,21 @@ func (a *appendable) Commit() error {
 	return err
 }
 
-func (a *appendable) Rollback() error {
+func (a *pusherAppender) Rollback() error {
 	a.labels = nil
 	a.samples = nil
 	return nil
 }
 
-// appender fulfills the storage.Appendable interface for prometheus manager
-type appender struct {
+// PusherAppendable fulfills the storage.Appendable interface for prometheus manager
+type PusherAppendable struct {
 	pusher Pusher
 	userID string
 }
 
 // Appender returns a storage.Appender
-func (t *appender) Appender() storage.Appender {
-	return &appendable{
+func (t *PusherAppendable) Appender() storage.Appender {
+	return &pusherAppender{
 		pusher: t.pusher,
 		userID: t.userID,
 	}
@@ -74,3 +78,40 @@ func engineQueryFunc(engine *promql.Engine, q storage.Queryable, delay time.Dura
 		return orig(ctx, qs, t.Add(-delay))
 	}
 }
+
+type ManagerFactory = func(
+	ctx context.Context,
+	userID string,
+	notifier *notifier.Manager,
+	logger log.Logger,
+	reg prometheus.Registerer,
+) *rules.Manager
+
+func DefaultTenantManagerFactory(
+	cfg Config,
+	p Pusher,
+	q storage.Queryable,
+	engine *promql.Engine,
+) ManagerFactory {
+	return func(
+		ctx context.Context,
+		userID string,
+		notifier *notifier.Manager,
+		logger log.Logger,
+		reg prometheus.Registerer,
+	) *rules.Manager {
+		return rules.NewManager(&rules.ManagerOptions{
+			Appendable:      &PusherAppendable{pusher: p, userID: userID},
+			Queryable:       q,
+			QueryFunc:       engineQueryFunc(engine, q, cfg.EvaluationDelay),
+			Context:         user.InjectOrgID(ctx, userID),
+			ExternalURL:     cfg.ExternalURL.URL,
+			NotifyFunc:      SendAlerts(notifier, cfg.ExternalURL.URL.String()),
+			Logger:          log.With(logger, "user", userID),
+			Registerer:      reg,
+			OutageTolerance: cfg.OutageTolerance,
+			ForGracePeriod:  cfg.ForGracePeriod,
+			ResendDelay:     cfg.ResendDelay,
+		})
+	}
+}
diff --git a/vendor/github.com/cortexproject/cortex/pkg/ruler/manager_metrics.go b/vendor/github.com/cortexproject/cortex/pkg/ruler/manager_metrics.go
new file mode 100644
index 0000000000000..27aa32f654018
--- /dev/null
+++ b/vendor/github.com/cortexproject/cortex/pkg/ruler/manager_metrics.go
@@ -0,0 +1,149 @@
+package ruler
+
+import (
+	"sync"
+
+	"github.com/prometheus/client_golang/prometheus"
+
+	"github.com/cortexproject/cortex/pkg/util"
+)
+
+// ManagerMetrics aggregates metrics exported by the Prometheus
+// rules package and returns them as Cortex metrics
+type ManagerMetrics struct {
+	// Maps userID -> registry
+	regsMu sync.Mutex
+	regs   map[string]*prometheus.Registry
+
+	EvalDuration        *prometheus.Desc
+	IterationDuration   *prometheus.Desc
+	IterationsMissed    *prometheus.Desc
+	IterationsScheduled *prometheus.Desc
+	EvalTotal           *prometheus.Desc
+	EvalFailures        *prometheus.Desc
+	GroupInterval       *prometheus.Desc
+	GroupLastEvalTime   *prometheus.Desc
+	GroupLastDuration   *prometheus.Desc
+	GroupRules          *prometheus.Desc
+}
+
+// NewManagerMetrics returns a ManagerMetrics struct
+func NewManagerMetrics() *ManagerMetrics {
+	return &ManagerMetrics{
+		regs:   map[string]*prometheus.Registry{},
+		regsMu: sync.Mutex{},
+
+		EvalDuration: prometheus.NewDesc(
+			"cortex_prometheus_rule_evaluation_duration_seconds",
+			"The duration for a rule to execute.",
+			[]string{"user"},
+			nil,
+		),
+		IterationDuration: prometheus.NewDesc(
+			"cortex_prometheus_rule_group_duration_seconds",
+			"The duration of rule group evaluations.",
+			[]string{"user"},
+			nil,
+		),
+		IterationsMissed: prometheus.NewDesc(
+			"cortex_prometheus_rule_group_iterations_missed_total",
+			"The total number of rule group evaluations missed due to slow rule group evaluation.",
+			[]string{"user"},
+			nil,
+		),
+		IterationsScheduled: prometheus.NewDesc(
+			"cortex_prometheus_rule_group_iterations_total",
+			"The total number of scheduled rule group evaluations, whether executed or missed.",
+			[]string{"user"},
+			nil,
+		),
+		EvalTotal: prometheus.NewDesc(
+			"cortex_prometheus_rule_evaluations_total",
+			"The total number of rule evaluations.",
+			[]string{"user", "rule_group"},
+			nil,
+		),
+		EvalFailures: prometheus.NewDesc(
+			"cortex_prometheus_rule_evaluation_failures_total",
+			"The total number of rule evaluation failures.",
+			[]string{"user", "rule_group"},
+			nil,
+		),
+		GroupInterval: prometheus.NewDesc(
+			"cortex_prometheus_rule_group_interval_seconds",
+			"The interval of a rule group.",
+			[]string{"user", "rule_group"},
+			nil,
+		),
+		GroupLastEvalTime: prometheus.NewDesc(
+			"cortex_prometheus_rule_group_last_evaluation_timestamp_seconds",
+			"The timestamp of the last rule group evaluation in seconds.",
+			[]string{"user", "rule_group"},
+			nil,
+		),
+		GroupLastDuration: prometheus.NewDesc(
+			"cortex_prometheus_rule_group_last_duration_seconds",
+			"The duration of the last rule group evaluation.",
+			[]string{"user", "rule_group"},
+			nil,
+		),
+		GroupRules: prometheus.NewDesc(
+			"cortex_prometheus_rule_group_rules",
+			"The number of rules.",
+			[]string{"user", "rule_group"},
+			nil,
+		),
+	}
+}
+
+// AddUserRegistry adds a Prometheus registry to the struct
+func (m *ManagerMetrics) AddUserRegistry(user string, reg *prometheus.Registry) {
+	m.regsMu.Lock()
+	m.regs[user] = reg
+	m.regsMu.Unlock()
+}
+
+// Registries returns a map of prometheus registries managed by the struct
+func (m *ManagerMetrics) Registries() map[string]*prometheus.Registry {
+	regs := map[string]*prometheus.Registry{}
+
+	m.regsMu.Lock()
+	defer m.regsMu.Unlock()
+	for uid, r := range m.regs {
+		regs[uid] = r
+	}
+
+	return regs
+}
+
+// Describe implements the Collector interface
+func (m *ManagerMetrics) Describe(out chan<- *prometheus.Desc) {
+	out <- m.EvalDuration
+	out <- m.IterationDuration
+	out <- m.IterationsMissed
+	out <- m.IterationsScheduled
+	out <- m.EvalTotal
+	out <- m.EvalFailures
+	out <- m.GroupInterval
+	out <- m.GroupLastEvalTime
+	out <- m.GroupLastDuration
+	out <- m.GroupRules
+}
+
+// Collect implements the Collector interface
+func (m *ManagerMetrics) Collect(out chan<- prometheus.Metric) {
+	data := util.BuildMetricFamiliesPerUserFromUserRegistries(m.Registries())
+
+	data.SendSumOfSummariesPerUser(out, m.EvalDuration, "prometheus_rule_evaluation_duration_seconds")
+	data.SendSumOfSummariesPerUser(out, m.IterationDuration, "cortex_prometheus_rule_group_duration_seconds")
+
+	data.SendSumOfCountersPerUser(out, m.IterationsMissed, "prometheus_rule_group_iterations_missed_total")
+	data.SendSumOfCountersPerUser(out, m.IterationsScheduled, "prometheus_rule_group_iterations_total")
+
+	data.SendSumOfCountersPerUserWithLabels(out, m.EvalTotal, "prometheus_rule_evaluations_total", "rule_group")
+	data.SendSumOfCountersPerUserWithLabels(out, m.EvalFailures, "prometheus_rule_evaluation_failures_total", "rule_group")
+	data.SendSumOfGaugesPerUserWithLabels(out, m.GroupInterval, "prometheus_rule_group_interval_seconds", "rule_group")
+	data.SendSumOfGaugesPerUserWithLabels(out, m.GroupLastEvalTime, "prometheus_rule_group_last_evaluation_timestamp_seconds", "rule_group")
+	data.SendSumOfGaugesPerUserWithLabels(out, m.GroupLastDuration, "prometheus_rule_group_last_duration_seconds", "rule_group")
+	data.SendSumOfGaugesPerUserWithLabels(out, m.GroupRules, "prometheus_rule_group_rules", "rule_group")
+}
diff --git a/vendor/github.com/cortexproject/cortex/pkg/ruler/ruler.go b/vendor/github.com/cortexproject/cortex/pkg/ruler/ruler.go
index 5df91741c30bd..6d35943d1c015 100644
--- a/vendor/github.com/cortexproject/cortex/pkg/ruler/ruler.go
+++ b/vendor/github.com/cortexproject/cortex/pkg/ruler/ruler.go
@@ -20,9 +20,7 @@ import (
 	"github.com/prometheus/client_golang/prometheus/promauto"
 	"github.com/prometheus/prometheus/config"
 	"github.com/prometheus/prometheus/notifier"
-	"github.com/prometheus/prometheus/promql"
 	promRules "github.com/prometheus/prometheus/rules"
-	promStorage "github.com/prometheus/prometheus/storage"
 	"github.com/prometheus/prometheus/util/strutil"
 	"github.com/weaveworks/common/user"
 	"golang.org/x/net/context/ctxhttp"
@@ -155,21 +153,22 @@ func (cfg *Config) RegisterFlags(f *flag.FlagSet) {
 type Ruler struct {
 	services.Service
 
-	cfg         Config
-	engine      *promql.Engine
-	queryable   promStorage.Queryable
-	pusher      Pusher
-	alertURL    *url.URL
-	notifierCfg *config.Config
+	cfg            Config
+	notifierCfg    *config.Config
+	managerFactory ManagerFactory
 
 	lifecycler  *ring.BasicLifecycler
 	ring        *ring.Ring
 	subservices *services.Manager
 
-	store          rules.RuleStore
-	mapper         *mapper
-	userManagerMtx sync.Mutex
-	userManagers   map[string]*promRules.Manager
+	store  rules.RuleStore
+	mapper *mapper
+
+	// Structs for holding per-user Prometheus rules Managers
+	// and a corresponding metrics struct
+	userManagerMtx     sync.Mutex
+	userManagers       map[string]*promRules.Manager
+	userManagerMetrics *ManagerMetrics
 
 	// Per-user notifiers with separate queues.
 	notifiersMtx sync.Mutex
@@ -180,25 +179,29 @@ type Ruler struct {
 }
 
 // NewRuler creates a new ruler from a distributor and chunk store.
-func NewRuler(cfg Config, engine *promql.Engine, queryable promStorage.Queryable, pusher Pusher, reg prometheus.Registerer, logger log.Logger, ruleStore rules.RuleStore) (*Ruler, error) {
+func NewRuler(cfg Config, managerFactory ManagerFactory, reg prometheus.Registerer, logger log.Logger, ruleStore rules.RuleStore) (*Ruler, error) {
 	ncfg, err := buildNotifierConfig(&cfg)
 	if err != nil {
 		return nil, err
 	}
 
+	userManagerMetrics := NewManagerMetrics()
+
+	if reg != nil {
+		reg.MustRegister(userManagerMetrics)
+	}
+
 	ruler := &Ruler{
-		cfg:          cfg,
-		engine:       engine,
-		queryable:    queryable,
-		alertURL:     cfg.ExternalURL.URL,
-		notifierCfg:  ncfg,
-		notifiers:    map[string]*rulerNotifier{},
-		store:        ruleStore,
-		pusher:       pusher,
-		mapper:       newMapper(cfg.RulePath, logger),
-		userManagers: map[string]*promRules.Manager{},
-		registry:     reg,
-		logger:       logger,
+		cfg:                cfg,
+		notifierCfg:        ncfg,
+		managerFactory:     managerFactory,
+		notifiers:          map[string]*rulerNotifier{},
+		store:              ruleStore,
+		mapper:             newMapper(cfg.RulePath, logger),
+		userManagers:       map[string]*promRules.Manager{},
+		userManagerMetrics: userManagerMetrics,
+		registry:           reg,
+		logger:             logger,
 	}
 
 	if cfg.EnableSharding {
@@ -292,11 +295,11 @@ func (r *Ruler) stopping(_ error) error {
 	return nil
 }
 
-// sendAlerts implements a rules.NotifyFunc for a Notifier.
+// SendAlerts implements a rules.NotifyFunc for a Notifier.
 // It filters any non-firing alerts from the input.
 //
 // Copied from Prometheus's main.go.
-func sendAlerts(n *notifier.Manager, externalURL string) promRules.NotifyFunc {
+func SendAlerts(n *notifier.Manager, externalURL string) promRules.NotifyFunc {
 	return func(ctx context.Context, expr string, alerts ...*promRules.Alert) {
 		var res []*notifier.Alert
 
@@ -531,24 +534,13 @@ func (r *Ruler) newManager(ctx context.Context, userID string) (*promRules.Manag
 		return nil, err
 	}
 
-	// Wrap registerer with userID and cortex_ prefix
-	reg := prometheus.WrapRegistererWith(prometheus.Labels{"user": userID}, r.registry)
-	reg = prometheus.WrapRegistererWithPrefix("cortex_", reg)
+	// Create a new Prometheus registry and register it within
+	// our metrics struct for the provided user.
+	reg := prometheus.NewRegistry()
+	r.userManagerMetrics.AddUserRegistry(userID, reg)
+
 	logger := log.With(r.logger, "user", userID)
-	opts := &promRules.ManagerOptions{
-		Appendable:      &appender{pusher: r.pusher, userID: userID},
-		Queryable:       r.queryable,
-		QueryFunc:       engineQueryFunc(r.engine, r.queryable, r.cfg.EvaluationDelay),
-		Context:         user.InjectOrgID(ctx, userID),
-		ExternalURL:     r.alertURL,
-		NotifyFunc:      sendAlerts(notifier, r.alertURL.String()),
-		Logger:          logger,
-		Registerer:      reg,
-		OutageTolerance: r.cfg.OutageTolerance,
-		ForGracePeriod:  r.cfg.ForGracePeriod,
-		ResendDelay:     r.cfg.ResendDelay,
-	}
-	return promRules.NewManager(opts), nil
+	return r.managerFactory(ctx, userID, notifier, logger, reg), nil
 }
 
 // GetRules retrieves the running rules from this ruler and all running rulers in the ring if
diff --git a/vendor/github.com/cortexproject/cortex/pkg/storage/backend/azure/config.go b/vendor/github.com/cortexproject/cortex/pkg/storage/backend/azure/config.go
index 98e721160ac68..10343f522889a 100644
--- a/vendor/github.com/cortexproject/cortex/pkg/storage/backend/azure/config.go
+++ b/vendor/github.com/cortexproject/cortex/pkg/storage/backend/azure/config.go
@@ -17,7 +17,7 @@ type Config struct {
 
 // RegisterFlags registers the flags for TSDB Azure storage
 func (cfg *Config) RegisterFlags(f *flag.FlagSet) {
-	cfg.RegisterFlagsWithPrefix("experimental.tsdb.", f)
+	cfg.RegisterFlagsWithPrefix("experimental.blocks-storage.", f)
 }
 
 // RegisterFlagsWithPrefix registers the flags for TSDB Azure storage
diff --git a/vendor/github.com/cortexproject/cortex/pkg/storage/backend/filesystem/config.go b/vendor/github.com/cortexproject/cortex/pkg/storage/backend/filesystem/config.go
index 4834b5fa23337..28dc109e2030a 100644
--- a/vendor/github.com/cortexproject/cortex/pkg/storage/backend/filesystem/config.go
+++ b/vendor/github.com/cortexproject/cortex/pkg/storage/backend/filesystem/config.go
@@ -9,7 +9,7 @@ type Config struct {
 
 // RegisterFlags registers the flags for TSDB filesystem storage
 func (cfg *Config) RegisterFlags(f *flag.FlagSet) {
-	cfg.RegisterFlagsWithPrefix("experimental.tsdb.", f)
+	cfg.RegisterFlagsWithPrefix("experimental.blocks-storage.", f)
 }
 
 // RegisterFlagsWithPrefix registers the flags for TSDB filesystem storage with the provided prefix
diff --git a/vendor/github.com/cortexproject/cortex/pkg/storage/backend/gcs/config.go b/vendor/github.com/cortexproject/cortex/pkg/storage/backend/gcs/config.go
index d46131ffadcdc..899e706dd91c5 100644
--- a/vendor/github.com/cortexproject/cortex/pkg/storage/backend/gcs/config.go
+++ b/vendor/github.com/cortexproject/cortex/pkg/storage/backend/gcs/config.go
@@ -14,7 +14,7 @@ type Config struct {
 
 // RegisterFlags registers the flags for TSDB GCS storage
 func (cfg *Config) RegisterFlags(f *flag.FlagSet) {
-	cfg.RegisterFlagsWithPrefix("experimental.tsdb.", f)
+	cfg.RegisterFlagsWithPrefix("experimental.blocks-storage.", f)
 }
 
 // RegisterFlagsWithPrefix registers the flags for TSDB GCS storage with the provided prefix
diff --git a/vendor/github.com/cortexproject/cortex/pkg/storage/backend/s3/config.go b/vendor/github.com/cortexproject/cortex/pkg/storage/backend/s3/config.go
index ddd93f3f7f793..f5f396cb0a8f2 100644
--- a/vendor/github.com/cortexproject/cortex/pkg/storage/backend/s3/config.go
+++ b/vendor/github.com/cortexproject/cortex/pkg/storage/backend/s3/config.go
@@ -17,7 +17,7 @@ type Config struct {
 
 // RegisterFlags registers the flags for TSDB s3 storage with the provided prefix
 func (cfg *Config) RegisterFlags(f *flag.FlagSet) {
-	cfg.RegisterFlagsWithPrefix("experimental.tsdb.", f)
+	cfg.RegisterFlagsWithPrefix("experimental.blocks-storage.", f)
 }
 
 // RegisterFlagsWithPrefix registers the flags for TSDB s3 storage with the provided prefix
diff --git a/vendor/github.com/cortexproject/cortex/pkg/storage/tsdb/bucket_client.go b/vendor/github.com/cortexproject/cortex/pkg/storage/tsdb/bucket_client.go
index 9d61dc276e870..cccbe16be840d 100644
--- a/vendor/github.com/cortexproject/cortex/pkg/storage/tsdb/bucket_client.go
+++ b/vendor/github.com/cortexproject/cortex/pkg/storage/tsdb/bucket_client.go
@@ -14,7 +14,7 @@ import (
 )
 
 // NewBucketClient creates a new bucket client based on the configured backend
-func NewBucketClient(ctx context.Context, cfg Config, name string, logger log.Logger, reg prometheus.Registerer) (client objstore.Bucket, err error) {
+func NewBucketClient(ctx context.Context, cfg BlocksStorageConfig, name string, logger log.Logger, reg prometheus.Registerer) (client objstore.Bucket, err error) {
 	switch cfg.Backend {
 	case BackendS3:
 		client, err = s3.NewBucketClient(cfg.S3, name, logger)
diff --git a/vendor/github.com/cortexproject/cortex/pkg/storage/tsdb/config.go b/vendor/github.com/cortexproject/cortex/pkg/storage/tsdb/config.go
index 6049cf54e4fa9..61ff7e3e33ebc 100644
--- a/vendor/github.com/cortexproject/cortex/pkg/storage/tsdb/config.go
+++ b/vendor/github.com/cortexproject/cortex/pkg/storage/tsdb/config.go
@@ -56,34 +56,18 @@ var (
 	errEmptyBlockranges             = errors.New("empty block ranges for TSDB")
 )
 
-// Config holds the config information for TSDB storage
-type Config struct {
-	Dir                       string            `yaml:"dir"`
-	BlockRanges               DurationList      `yaml:"block_ranges_period"`
-	Retention                 time.Duration     `yaml:"retention_period"`
-	ShipInterval              time.Duration     `yaml:"ship_interval"`
-	ShipConcurrency           int               `yaml:"ship_concurrency"`
-	Backend                   string            `yaml:"backend"`
-	BucketStore               BucketStoreConfig `yaml:"bucket_store"`
-	HeadCompactionInterval    time.Duration     `yaml:"head_compaction_interval"`
-	HeadCompactionConcurrency int               `yaml:"head_compaction_concurrency"`
-	HeadCompactionIdleTimeout time.Duration     `yaml:"head_compaction_idle_timeout"`
-	StripeSize                int               `yaml:"stripe_size"`
-	WALCompressionEnabled     bool              `yaml:"wal_compression_enabled"`
-	FlushBlocksOnShutdown     bool              `yaml:"flush_blocks_on_shutdown"`
-
-	// MaxTSDBOpeningConcurrencyOnStartup limits the number of concurrently opening TSDB's during startup
-	MaxTSDBOpeningConcurrencyOnStartup int `yaml:"max_tsdb_opening_concurrency_on_startup"`
+// BlocksStorageConfig holds the config information for the blocks storage.
+//nolint:golint
+type BlocksStorageConfig struct {
+	Backend     string            `yaml:"backend"`
+	BucketStore BucketStoreConfig `yaml:"bucket_store" doc:"description=This configures how the store-gateway synchronizes blocks stored in the bucket."`
+	TSDB        TSDBConfig        `yaml:"tsdb"`
 
 	// Backends
 	S3         s3.Config         `yaml:"s3"`
 	GCS        gcs.Config        `yaml:"gcs"`
 	Azure      azure.Config      `yaml:"azure"`
 	Filesystem filesystem.Config `yaml:"filesystem"`
-
-	// If true, user TSDBs are not closed on shutdown. Only for testing.
-	// If false (default), user TSDBs are closed to make sure all resources are released and closed properly.
-	KeepUserTSDBOpenOnShutdown bool `yaml:"-"`
 }
 
 // DurationList is the block ranges for a tsdb
@@ -124,38 +108,75 @@ func (d *DurationList) ToMilliseconds() []int64 {
 }
 
 // RegisterFlags registers the TSDB flags
-func (cfg *Config) RegisterFlags(f *flag.FlagSet) {
+func (cfg *BlocksStorageConfig) RegisterFlags(f *flag.FlagSet) {
 	cfg.S3.RegisterFlags(f)
 	cfg.GCS.RegisterFlags(f)
 	cfg.Azure.RegisterFlags(f)
 	cfg.BucketStore.RegisterFlags(f)
 	cfg.Filesystem.RegisterFlags(f)
+	cfg.TSDB.RegisterFlags(f)
 
-	if len(cfg.BlockRanges) == 0 {
-		cfg.BlockRanges = []time.Duration{2 * time.Hour} // Default 2h block
-	}
-
-	f.StringVar(&cfg.Dir, "experimental.tsdb.dir", "tsdb", "Local directory to store TSDBs in the ingesters.")
-	f.Var(&cfg.BlockRanges, "experimental.tsdb.block-ranges-period", "TSDB blocks range period.")
-	f.DurationVar(&cfg.Retention, "experimental.tsdb.retention-period", 6*time.Hour, "TSDB blocks retention in the ingester before a block is removed. This should be larger than the block_ranges_period and large enough to give store-gateways and queriers enough time to discover newly uploaded blocks.")
-	f.DurationVar(&cfg.ShipInterval, "experimental.tsdb.ship-interval", 1*time.Minute, "How frequently the TSDB blocks are scanned and new ones are shipped to the storage. 0 means shipping is disabled.")
-	f.IntVar(&cfg.ShipConcurrency, "experimental.tsdb.ship-concurrency", 10, "Maximum number of tenants concurrently shipping blocks to the storage.")
-	f.StringVar(&cfg.Backend, "experimental.tsdb.backend", "s3", fmt.Sprintf("Backend storage to use. Supported backends are: %s.", strings.Join(supportedBackends, ", ")))
-	f.IntVar(&cfg.MaxTSDBOpeningConcurrencyOnStartup, "experimental.tsdb.max-tsdb-opening-concurrency-on-startup", 10, "limit the number of concurrently opening TSDB's on startup")
-	f.DurationVar(&cfg.HeadCompactionInterval, "experimental.tsdb.head-compaction-interval", 1*time.Minute, "How frequently does Cortex try to compact TSDB head. Block is only created if data covers smallest block range. Must be greater than 0 and max 5 minutes.")
-	f.IntVar(&cfg.HeadCompactionConcurrency, "experimental.tsdb.head-compaction-concurrency", 5, "Maximum number of tenants concurrently compacting TSDB head into a new block")
-	f.DurationVar(&cfg.HeadCompactionIdleTimeout, "experimental.tsdb.head-compaction-idle-timeout", 1*time.Hour, "If TSDB head is idle for this duration, it is compacted. 0 means disabled.")
-	f.IntVar(&cfg.StripeSize, "experimental.tsdb.stripe-size", 16384, "The number of shards of series to use in TSDB (must be a power of 2). Reducing this will decrease memory footprint, but can negatively impact performance.")
-	f.BoolVar(&cfg.WALCompressionEnabled, "experimental.tsdb.wal-compression-enabled", false, "True to enable TSDB WAL compression.")
-	f.BoolVar(&cfg.FlushBlocksOnShutdown, "experimental.tsdb.flush-blocks-on-shutdown", false, "If true, and transfer of blocks on shutdown fails or is disabled, incomplete blocks are flushed to storage instead. If false, incomplete blocks will be reused after restart, and uploaded when finished.")
+	f.StringVar(&cfg.Backend, "experimental.blocks-storage.backend", "s3", fmt.Sprintf("Backend storage to use. Supported backends are: %s.", strings.Join(supportedBackends, ", ")))
 }
 
 // Validate the config.
-func (cfg *Config) Validate() error {
+func (cfg *BlocksStorageConfig) Validate() error {
 	if !util.StringsContain(supportedBackends, cfg.Backend) {
 		return errUnsupportedStorageBackend
 	}
 
+	if err := cfg.TSDB.Validate(); err != nil {
+		return err
+	}
+
+	return cfg.BucketStore.Validate()
+}
+
+// TSDBConfig holds the config for TSDB opened in the ingesters.
+//nolint:golint
+type TSDBConfig struct {
+	Dir                       string        `yaml:"dir"`
+	BlockRanges               DurationList  `yaml:"block_ranges_period"`
+	Retention                 time.Duration `yaml:"retention_period"`
+	ShipInterval              time.Duration `yaml:"ship_interval"`
+	ShipConcurrency           int           `yaml:"ship_concurrency"`
+	HeadCompactionInterval    time.Duration `yaml:"head_compaction_interval"`
+	HeadCompactionConcurrency int           `yaml:"head_compaction_concurrency"`
+	HeadCompactionIdleTimeout time.Duration `yaml:"head_compaction_idle_timeout"`
+	StripeSize                int           `yaml:"stripe_size"`
+	WALCompressionEnabled     bool          `yaml:"wal_compression_enabled"`
+	FlushBlocksOnShutdown     bool          `yaml:"flush_blocks_on_shutdown"`
+
+	// MaxTSDBOpeningConcurrencyOnStartup limits the number of concurrently opening TSDB's during startup.
+	MaxTSDBOpeningConcurrencyOnStartup int `yaml:"max_tsdb_opening_concurrency_on_startup"`
+
+	// If true, user TSDBs are not closed on shutdown. Only for testing.
+	// If false (default), user TSDBs are closed to make sure all resources are released and closed properly.
+	KeepUserTSDBOpenOnShutdown bool `yaml:"-"`
+}
+
+// RegisterFlags registers the TSDBConfig flags.
+func (cfg *TSDBConfig) RegisterFlags(f *flag.FlagSet) {
+	if len(cfg.BlockRanges) == 0 {
+		cfg.BlockRanges = []time.Duration{2 * time.Hour} // Default 2h block
+	}
+
+	f.StringVar(&cfg.Dir, "experimental.blocks-storage.tsdb.dir", "tsdb", "Local directory to store TSDBs in the ingesters.")
+	f.Var(&cfg.BlockRanges, "experimental.blocks-storage.tsdb.block-ranges-period", "TSDB blocks range period.")
+	f.DurationVar(&cfg.Retention, "experimental.blocks-storage.tsdb.retention-period", 6*time.Hour, "TSDB blocks retention in the ingester before a block is removed. This should be larger than the block_ranges_period and large enough to give store-gateways and queriers enough time to discover newly uploaded blocks.")
+	f.DurationVar(&cfg.ShipInterval, "experimental.blocks-storage.tsdb.ship-interval", 1*time.Minute, "How frequently the TSDB blocks are scanned and new ones are shipped to the storage. 0 means shipping is disabled.")
+	f.IntVar(&cfg.ShipConcurrency, "experimental.blocks-storage.tsdb.ship-concurrency", 10, "Maximum number of tenants concurrently shipping blocks to the storage.")
+	f.IntVar(&cfg.MaxTSDBOpeningConcurrencyOnStartup, "experimental.blocks-storage.tsdb.max-tsdb-opening-concurrency-on-startup", 10, "limit the number of concurrently opening TSDB's on startup")
+	f.DurationVar(&cfg.HeadCompactionInterval, "experimental.blocks-storage.tsdb.head-compaction-interval", 1*time.Minute, "How frequently does Cortex try to compact TSDB head. Block is only created if data covers smallest block range. Must be greater than 0 and max 5 minutes.")
+	f.IntVar(&cfg.HeadCompactionConcurrency, "experimental.blocks-storage.tsdb.head-compaction-concurrency", 5, "Maximum number of tenants concurrently compacting TSDB head into a new block")
+	f.DurationVar(&cfg.HeadCompactionIdleTimeout, "experimental.blocks-storage.tsdb.head-compaction-idle-timeout", 1*time.Hour, "If TSDB head is idle for this duration, it is compacted. 0 means disabled.")
+	f.IntVar(&cfg.StripeSize, "experimental.blocks-storage.tsdb.stripe-size", 16384, "The number of shards of series to use in TSDB (must be a power of 2). Reducing this will decrease memory footprint, but can negatively impact performance.")
+	f.BoolVar(&cfg.WALCompressionEnabled, "experimental.blocks-storage.tsdb.wal-compression-enabled", false, "True to enable TSDB WAL compression.")
+	f.BoolVar(&cfg.FlushBlocksOnShutdown, "experimental.blocks-storage.tsdb.flush-blocks-on-shutdown", false, "If true, and transfer of blocks on shutdown fails or is disabled, incomplete blocks are flushed to storage instead. If false, incomplete blocks will be reused after restart, and uploaded when finished.")
+}
+
+// Validate the config.
+func (cfg *TSDBConfig) Validate() error {
 	if cfg.ShipInterval > 0 && cfg.ShipConcurrency <= 0 {
 		return errInvalidShipConcurrency
 	}
@@ -176,7 +197,13 @@ func (cfg *Config) Validate() error {
 		return errEmptyBlockranges
 	}
 
-	return cfg.BucketStore.Validate()
+	return nil
+}
+
+// BlocksDir returns the directory path where TSDB blocks and wal should be
+// stored by the ingester
+func (cfg *TSDBConfig) BlocksDir(userID string) string {
+	return filepath.Join(cfg.Dir, userID)
 }
 
 // BucketStoreConfig holds the config information for Bucket Stores used by the querier
@@ -204,22 +231,22 @@ type BucketStoreConfig struct {
 
 // RegisterFlags registers the BucketStore flags
 func (cfg *BucketStoreConfig) RegisterFlags(f *flag.FlagSet) {
-	cfg.IndexCache.RegisterFlagsWithPrefix(f, "experimental.tsdb.bucket-store.index-cache.")
-	cfg.ChunksCache.RegisterFlagsWithPrefix(f, "experimental.tsdb.bucket-store.chunks-cache.")
-	cfg.MetadataCache.RegisterFlagsWithPrefix(f, "experimental.tsdb.bucket-store.metadata-cache.")
-
-	f.StringVar(&cfg.SyncDir, "experimental.tsdb.bucket-store.sync-dir", "tsdb-sync", "Directory to store synchronized TSDB index headers.")
-	f.DurationVar(&cfg.SyncInterval, "experimental.tsdb.bucket-store.sync-interval", 5*time.Minute, "How frequently scan the bucket to look for changes (new blocks shipped by ingesters and blocks removed by retention or compaction). 0 disables it.")
-	f.Uint64Var(&cfg.MaxChunkPoolBytes, "experimental.tsdb.bucket-store.max-chunk-pool-bytes", uint64(2*units.Gibibyte), "Max size - in bytes - of a per-tenant chunk pool, used to reduce memory allocations.")
-	f.IntVar(&cfg.MaxConcurrent, "experimental.tsdb.bucket-store.max-concurrent", 100, "Max number of concurrent queries to execute against the long-term storage. The limit is shared across all tenants.")
-	f.IntVar(&cfg.TenantSyncConcurrency, "experimental.tsdb.bucket-store.tenant-sync-concurrency", 10, "Maximum number of concurrent tenants synching blocks.")
-	f.IntVar(&cfg.BlockSyncConcurrency, "experimental.tsdb.bucket-store.block-sync-concurrency", 20, "Maximum number of concurrent blocks synching per tenant.")
-	f.IntVar(&cfg.MetaSyncConcurrency, "experimental.tsdb.bucket-store.meta-sync-concurrency", 20, "Number of Go routines to use when syncing block meta files from object storage per tenant.")
-	f.DurationVar(&cfg.ConsistencyDelay, "experimental.tsdb.bucket-store.consistency-delay", 0, "Minimum age of a block before it's being read. Set it to safe value (e.g 30m) if your object storage is eventually consistent. GCS and S3 are (roughly) strongly consistent.")
-	f.DurationVar(&cfg.IgnoreDeletionMarksDelay, "experimental.tsdb.bucket-store.ignore-deletion-marks-delay", time.Hour*6, "Duration after which the blocks marked for deletion will be filtered out while fetching blocks. "+
+	cfg.IndexCache.RegisterFlagsWithPrefix(f, "experimental.blocks-storage.bucket-store.index-cache.")
+	cfg.ChunksCache.RegisterFlagsWithPrefix(f, "experimental.blocks-storage.bucket-store.chunks-cache.")
+	cfg.MetadataCache.RegisterFlagsWithPrefix(f, "experimental.blocks-storage.bucket-store.metadata-cache.")
+
+	f.StringVar(&cfg.SyncDir, "experimental.blocks-storage.bucket-store.sync-dir", "tsdb-sync", "Directory to store synchronized TSDB index headers.")
+	f.DurationVar(&cfg.SyncInterval, "experimental.blocks-storage.bucket-store.sync-interval", 5*time.Minute, "How frequently scan the bucket to look for changes (new blocks shipped by ingesters and blocks removed by retention or compaction). 0 disables it.")
+	f.Uint64Var(&cfg.MaxChunkPoolBytes, "experimental.blocks-storage.bucket-store.max-chunk-pool-bytes", uint64(2*units.Gibibyte), "Max size - in bytes - of a per-tenant chunk pool, used to reduce memory allocations.")
+	f.IntVar(&cfg.MaxConcurrent, "experimental.blocks-storage.bucket-store.max-concurrent", 100, "Max number of concurrent queries to execute against the long-term storage. The limit is shared across all tenants.")
+	f.IntVar(&cfg.TenantSyncConcurrency, "experimental.blocks-storage.bucket-store.tenant-sync-concurrency", 10, "Maximum number of concurrent tenants synching blocks.")
+	f.IntVar(&cfg.BlockSyncConcurrency, "experimental.blocks-storage.bucket-store.block-sync-concurrency", 20, "Maximum number of concurrent blocks synching per tenant.")
+	f.IntVar(&cfg.MetaSyncConcurrency, "experimental.blocks-storage.bucket-store.meta-sync-concurrency", 20, "Number of Go routines to use when syncing block meta files from object storage per tenant.")
+	f.DurationVar(&cfg.ConsistencyDelay, "experimental.blocks-storage.bucket-store.consistency-delay", 0, "Minimum age of a block before it's being read. Set it to safe value (e.g 30m) if your object storage is eventually consistent. GCS and S3 are (roughly) strongly consistent.")
+	f.DurationVar(&cfg.IgnoreDeletionMarksDelay, "experimental.blocks-storage.bucket-store.ignore-deletion-marks-delay", time.Hour*6, "Duration after which the blocks marked for deletion will be filtered out while fetching blocks. "+
 		"The idea of ignore-deletion-marks-delay is to ignore blocks that are marked for deletion with some delay. This ensures store can still serve blocks that are meant to be deleted but do not have a replacement yet. "+
 		"Default is 6h, half of the default value for -compactor.deletion-delay.")
-	f.IntVar(&cfg.PostingOffsetsInMemSampling, "experimental.tsdb.bucket-store.posting-offsets-in-mem-sampling", store.DefaultPostingOffsetInMemorySampling, "Controls what is the ratio of postings offsets that the store will hold in memory.")
+	f.IntVar(&cfg.PostingOffsetsInMemSampling, "experimental.blocks-storage.bucket-store.posting-offsets-in-mem-sampling", store.DefaultPostingOffsetInMemorySampling, "Controls what is the ratio of postings offsets that the store will hold in memory.")
 }
 
 // Validate the config.
@@ -238,9 +265,3 @@ func (cfg *BucketStoreConfig) Validate() error {
 	}
 	return nil
 }
-
-// BlocksDir returns the directory path where TSDB blocks and wal should be
-// stored by the ingester
-func (cfg *Config) BlocksDir(userID string) string {
-	return filepath.Join(cfg.Dir, userID)
-}
diff --git a/vendor/github.com/cortexproject/cortex/pkg/storage/tsdb/index_cache.go b/vendor/github.com/cortexproject/cortex/pkg/storage/tsdb/index_cache.go
index 6520d8dae86c0..dde7067be1f11 100644
--- a/vendor/github.com/cortexproject/cortex/pkg/storage/tsdb/index_cache.go
+++ b/vendor/github.com/cortexproject/cortex/pkg/storage/tsdb/index_cache.go
@@ -44,7 +44,7 @@ type IndexCacheConfig struct {
 }
 
 func (cfg *IndexCacheConfig) RegisterFlags(f *flag.FlagSet) {
-	cfg.RegisterFlagsWithPrefix(f, "experimental.tsdb.bucket-store.index-cache.")
+	cfg.RegisterFlagsWithPrefix(f, "experimental.blocks-storage.bucket-store.index-cache.")
 }
 
 func (cfg *IndexCacheConfig) RegisterFlagsWithPrefix(f *flag.FlagSet, prefix string) {
diff --git a/vendor/github.com/cortexproject/cortex/pkg/storage/tsdb/ref_cache.go b/vendor/github.com/cortexproject/cortex/pkg/storage/tsdb/ref_cache.go
index 0c5447c135af7..27f625b24c77d 100644
--- a/vendor/github.com/cortexproject/cortex/pkg/storage/tsdb/ref_cache.go
+++ b/vendor/github.com/cortexproject/cortex/pkg/storage/tsdb/ref_cache.go
@@ -2,11 +2,11 @@ package tsdb
 
 import (
 	"sync"
-	"sync/atomic"
 	"time"
 
 	"github.com/prometheus/common/model"
 	"github.com/prometheus/prometheus/pkg/labels"
+	"go.uber.org/atomic"
 
 	"github.com/cortexproject/cortex/pkg/ingester/client"
 	"github.com/cortexproject/cortex/pkg/util"
@@ -41,7 +41,7 @@ type refCacheStripe struct {
 type refCacheEntry struct {
 	lbs       labels.Labels
 	ref       uint64
-	touchedAt int64 // Unix nano time.
+	touchedAt atomic.Int64 // Unix nano time.
 }
 
 // NewRefCache makes a new RefCache.
@@ -93,7 +93,7 @@ func (s *refCacheStripe) ref(now time.Time, series labels.Labels, fp model.Finge
 	for ix := range entries {
 		if labels.Equal(entries[ix].lbs, series) {
 			// Since we use read-only lock, we need to use atomic update.
-			atomic.StoreInt64(&entries[ix].touchedAt, now.UnixNano())
+			entries[ix].touchedAt.Store(now.UnixNano())
 			return entries[ix].ref, true
 		}
 	}
@@ -112,13 +112,15 @@ func (s *refCacheStripe) setRef(now time.Time, series labels.Labels, fp model.Fi
 		}
 
 		entry.ref = ref
-		entry.touchedAt = now.UnixNano()
+		entry.touchedAt.Store(now.UnixNano())
 		s.refs[fp][ix] = entry
 		return
 	}
 
 	// The entry doesn't exist, so we have to add a new one.
-	s.refs[fp] = append(s.refs[fp], refCacheEntry{lbs: series, ref: ref, touchedAt: now.UnixNano()})
+	refCacheEntry := refCacheEntry{lbs: series, ref: ref}
+	refCacheEntry.touchedAt.Store(now.UnixNano())
+	s.refs[fp] = append(s.refs[fp], refCacheEntry)
 }
 
 func (s *refCacheStripe) purge(keepUntil time.Time) {
@@ -131,7 +133,7 @@ func (s *refCacheStripe) purge(keepUntil time.Time) {
 		// Since we do expect very few fingerprint collisions, we
 		// have an optimized implementation for the common case.
 		if len(entries) == 1 {
-			if entries[0].touchedAt < keepUntilNanos {
+			if entries[0].touchedAt.Load() < keepUntilNanos {
 				delete(s.refs, fp)
 			}
 
@@ -141,7 +143,7 @@ func (s *refCacheStripe) purge(keepUntil time.Time) {
 		// We have more entries, which means there's a collision,
 		// so we have to iterate over the entries.
 		for i := 0; i < len(entries); {
-			if entries[i].touchedAt < keepUntilNanos {
+			if entries[i].touchedAt.Load() < keepUntilNanos {
 				entries = append(entries[:i], entries[i+1:]...)
 			} else {
 				i++
diff --git a/vendor/github.com/cortexproject/cortex/pkg/storegateway/bucket_store_metrics.go b/vendor/github.com/cortexproject/cortex/pkg/storegateway/bucket_store_metrics.go
index 853478c2517b8..5b2d6ba8c16d5 100644
--- a/vendor/github.com/cortexproject/cortex/pkg/storegateway/bucket_store_metrics.go
+++ b/vendor/github.com/cortexproject/cortex/pkg/storegateway/bucket_store_metrics.go
@@ -30,6 +30,7 @@ type BucketStoreMetrics struct {
 	seriesMergeDuration   *prometheus.Desc
 	seriesRefetches       *prometheus.Desc
 	resultSeriesCount     *prometheus.Desc
+	queriesDropped        *prometheus.Desc
 
 	cachedPostingsCompressions           *prometheus.Desc
 	cachedPostingsCompressionErrors      *prometheus.Desc
@@ -99,6 +100,10 @@ func NewBucketStoreMetrics() *BucketStoreMetrics {
 			"cortex_bucket_store_series_result_series",
 			"Number of series observed in the final result of a query.",
 			nil, nil),
+		queriesDropped: prometheus.NewDesc(
+			"cortex_bucket_store_queries_dropped_total",
+			"Number of queries that were dropped due to the max chunks per query limit.",
+			nil, nil),
 
 		cachedPostingsCompressions: prometheus.NewDesc(
 			"cortex_bucket_store_cached_postings_compressions_total",
@@ -156,6 +161,7 @@ func (m *BucketStoreMetrics) Describe(out chan<- *prometheus.Desc) {
 	out <- m.seriesMergeDuration
 	out <- m.seriesRefetches
 	out <- m.resultSeriesCount
+	out <- m.queriesDropped
 
 	out <- m.cachedPostingsCompressions
 	out <- m.cachedPostingsCompressionErrors
@@ -184,6 +190,7 @@ func (m *BucketStoreMetrics) Collect(out chan<- prometheus.Metric) {
 	data.SendSumOfHistograms(out, m.seriesMergeDuration, "thanos_bucket_store_series_merge_duration_seconds")
 	data.SendSumOfCounters(out, m.seriesRefetches, "thanos_bucket_store_series_refetches_total")
 	data.SendSumOfSummaries(out, m.resultSeriesCount, "thanos_bucket_store_series_result_series")
+	data.SendSumOfCounters(out, m.queriesDropped, "thanos_bucket_store_queries_dropped_total")
 
 	data.SendSumOfCountersWithLabels(out, m.cachedPostingsCompressions, "thanos_bucket_store_cached_postings_compressions_total", "op")
 	data.SendSumOfCountersWithLabels(out, m.cachedPostingsCompressionErrors, "thanos_bucket_store_cached_postings_compression_errors_total", "op")
diff --git a/vendor/github.com/cortexproject/cortex/pkg/storegateway/bucket_stores.go b/vendor/github.com/cortexproject/cortex/pkg/storegateway/bucket_stores.go
index 381434ca8af3d..8203130d92f36 100644
--- a/vendor/github.com/cortexproject/cortex/pkg/storegateway/bucket_stores.go
+++ b/vendor/github.com/cortexproject/cortex/pkg/storegateway/bucket_stores.go
@@ -29,12 +29,14 @@ import (
 	"github.com/cortexproject/cortex/pkg/storage/tsdb"
 	"github.com/cortexproject/cortex/pkg/util"
 	"github.com/cortexproject/cortex/pkg/util/spanlogger"
+	"github.com/cortexproject/cortex/pkg/util/validation"
 )
 
 // BucketStores is a multi-tenant wrapper of Thanos BucketStore.
 type BucketStores struct {
 	logger             log.Logger
-	cfg                tsdb.Config
+	cfg                tsdb.BlocksStorageConfig
+	limits             *validation.Overrides
 	bucket             objstore.Bucket
 	logLevel           logging.Level
 	bucketStoreMetrics *BucketStoreMetrics
@@ -57,7 +59,7 @@ type BucketStores struct {
 }
 
 // NewBucketStores makes a new BucketStores.
-func NewBucketStores(cfg tsdb.Config, filters []block.MetadataFilter, bucketClient objstore.Bucket, logLevel logging.Level, logger log.Logger, reg prometheus.Registerer) (*BucketStores, error) {
+func NewBucketStores(cfg tsdb.BlocksStorageConfig, filters []block.MetadataFilter, bucketClient objstore.Bucket, limits *validation.Overrides, logLevel logging.Level, logger log.Logger, reg prometheus.Registerer) (*BucketStores, error) {
 	cachingBucket, err := tsdb.CreateCachingBucket(cfg.BucketStore.ChunksCache, cfg.BucketStore.MetadataCache, bucketClient, logger, reg)
 	if err != nil {
 		return nil, errors.Wrapf(err, "create caching bucket")
@@ -74,6 +76,7 @@ func NewBucketStores(cfg tsdb.Config, filters []block.MetadataFilter, bucketClie
 	u := &BucketStores{
 		logger:             logger,
 		cfg:                cfg,
+		limits:             limits,
 		bucket:             cachingBucket,
 		filters:            filters,
 		stores:             map[string]*store.BucketStore{},
@@ -283,7 +286,7 @@ func (u *BucketStores) getOrCreateStore(userID string) (*store.BucketStore, erro
 		u.indexCache,
 		u.queryGate,
 		u.cfg.BucketStore.MaxChunkPoolBytes,
-		0,                              // No max samples limit (it's flawed in Thanos)
+		newChunksLimiterFactory(u.limits, userID),
 		u.logLevel.String() == "debug", // Turn on debug logging, if the log level is set to debug
 		u.cfg.BucketStore.BlockSyncConcurrency,
 		nil,   // Do not limit timerange.
@@ -353,3 +356,11 @@ type spanSeriesServer struct {
 func (s spanSeriesServer) Context() context.Context {
 	return s.ctx
 }
+
+func newChunksLimiterFactory(limits *validation.Overrides, userID string) store.ChunksLimiterFactory {
+	return func(failedCounter prometheus.Counter) store.ChunksLimiter {
+		// Since limit overrides could be live reloaded, we have to get the current user's limit
+		// each time a new limiter is instantiated.
+		return store.NewLimiter(uint64(limits.MaxChunksPerQuery(userID)), failedCounter)
+	}
+}
diff --git a/vendor/github.com/cortexproject/cortex/pkg/storegateway/gateway.go b/vendor/github.com/cortexproject/cortex/pkg/storegateway/gateway.go
index 8ce68c660a6c1..4182a9878981b 100644
--- a/vendor/github.com/cortexproject/cortex/pkg/storegateway/gateway.go
+++ b/vendor/github.com/cortexproject/cortex/pkg/storegateway/gateway.go
@@ -22,6 +22,7 @@ import (
 	"github.com/cortexproject/cortex/pkg/storegateway/storegatewaypb"
 	"github.com/cortexproject/cortex/pkg/util"
 	"github.com/cortexproject/cortex/pkg/util/services"
+	"github.com/cortexproject/cortex/pkg/util/validation"
 )
 
 const (
@@ -58,7 +59,7 @@ type StoreGateway struct {
 	services.Service
 
 	gatewayCfg Config
-	storageCfg cortex_tsdb.Config
+	storageCfg cortex_tsdb.BlocksStorageConfig
 	logger     log.Logger
 	stores     *BucketStores
 
@@ -73,7 +74,7 @@ type StoreGateway struct {
 	bucketSync *prometheus.CounterVec
 }
 
-func NewStoreGateway(gatewayCfg Config, storageCfg cortex_tsdb.Config, logLevel logging.Level, logger log.Logger, reg prometheus.Registerer) (*StoreGateway, error) {
+func NewStoreGateway(gatewayCfg Config, storageCfg cortex_tsdb.BlocksStorageConfig, limits *validation.Overrides, logLevel logging.Level, logger log.Logger, reg prometheus.Registerer) (*StoreGateway, error) {
 	var ringStore kv.Client
 
 	bucketClient, err := createBucketClient(storageCfg, logger, reg)
@@ -92,10 +93,10 @@ func NewStoreGateway(gatewayCfg Config, storageCfg cortex_tsdb.Config, logLevel
 		}
 	}
 
-	return newStoreGateway(gatewayCfg, storageCfg, bucketClient, ringStore, logLevel, logger, reg)
+	return newStoreGateway(gatewayCfg, storageCfg, bucketClient, ringStore, limits, logLevel, logger, reg)
 }
 
-func newStoreGateway(gatewayCfg Config, storageCfg cortex_tsdb.Config, bucketClient objstore.Bucket, ringStore kv.Client, logLevel logging.Level, logger log.Logger, reg prometheus.Registerer) (*StoreGateway, error) {
+func newStoreGateway(gatewayCfg Config, storageCfg cortex_tsdb.BlocksStorageConfig, bucketClient objstore.Bucket, ringStore kv.Client, limits *validation.Overrides, logLevel logging.Level, logger log.Logger, reg prometheus.Registerer) (*StoreGateway, error) {
 	var err error
 	var filters []block.MetadataFilter
 
@@ -147,7 +148,7 @@ func newStoreGateway(gatewayCfg Config, storageCfg cortex_tsdb.Config, bucketCli
 		filters = append(filters, NewShardingMetadataFilter(g.ring, lifecyclerCfg.Addr, logger))
 	}
 
-	g.stores, err = NewBucketStores(storageCfg, filters, bucketClient, logLevel, logger, extprom.WrapRegistererWith(prometheus.Labels{"component": "store-gateway"}, reg))
+	g.stores, err = NewBucketStores(storageCfg, filters, bucketClient, limits, logLevel, logger, extprom.WrapRegistererWith(prometheus.Labels{"component": "store-gateway"}, reg))
 	if err != nil {
 		return nil, errors.Wrap(err, "create bucket stores")
 	}
@@ -305,7 +306,7 @@ func (g *StoreGateway) OnRingInstanceStopping(_ *ring.BasicLifecycler)
 func (g *StoreGateway) OnRingInstanceHeartbeat(_ *ring.BasicLifecycler, _ *ring.Desc, _ *ring.IngesterDesc) {
 }
 
-func createBucketClient(cfg cortex_tsdb.Config, logger log.Logger, reg prometheus.Registerer) (objstore.Bucket, error) {
+func createBucketClient(cfg cortex_tsdb.BlocksStorageConfig, logger log.Logger, reg prometheus.Registerer) (objstore.Bucket, error) {
 	bucketClient, err := cortex_tsdb.NewBucketClient(context.Background(), cfg, "store-gateway", logger, reg)
 	if err != nil {
 		return nil, errors.Wrap(err, "create bucket client")
diff --git a/vendor/github.com/cortexproject/cortex/pkg/util/events.go b/vendor/github.com/cortexproject/cortex/pkg/util/events.go
index ec96106be07e1..dba9ec30df3bf 100644
--- a/vendor/github.com/cortexproject/cortex/pkg/util/events.go
+++ b/vendor/github.com/cortexproject/cortex/pkg/util/events.go
@@ -2,9 +2,9 @@ package util
 
 import (
 	"os"
-	"sync/atomic"
 
 	"github.com/go-kit/kit/log"
+	"go.uber.org/atomic"
 )
 
 // Provide an "event" interface for observability
@@ -43,11 +43,11 @@ func newEventLogger(freq int) log.Logger {
 type samplingFilter struct {
 	next  log.Logger
 	freq  int
-	count int64
+	count atomic.Int64
 }
 
 func (e *samplingFilter) Log(keyvals ...interface{}) error {
-	count := atomic.AddInt64(&e.count, 1)
+	count := e.count.Inc()
 	if count%int64(e.freq) == 0 {
 		return e.next.Log(keyvals...)
 	}
diff --git a/vendor/github.com/cortexproject/cortex/pkg/util/metrics_helper.go b/vendor/github.com/cortexproject/cortex/pkg/util/metrics_helper.go
index 0dabf741fc549..20c6119675302 100644
--- a/vendor/github.com/cortexproject/cortex/pkg/util/metrics_helper.go
+++ b/vendor/github.com/cortexproject/cortex/pkg/util/metrics_helper.go
@@ -166,6 +166,17 @@ func (d MetricFamiliesPerUser) SendSumOfCountersPerUser(out chan<- prometheus.Me
 	}
 }
 
+// SendSumOfCountersPerUserWithLabels provides metrics with the provided label names on a per-user basis. This function assumes that `user` is the
+// first label on the provided metric Desc
+func (d MetricFamiliesPerUser) SendSumOfCountersPerUserWithLabels(out chan<- prometheus.Metric, desc *prometheus.Desc, metric string, labelNames ...string) {
+	for user, userMetrics := range d {
+		result := singleValueWithLabelsMap{}
+		userMetrics.sumOfSingleValuesWithLabels(metric, labelNames, counterValue, result.aggregateFn)
+		result.prependUserLabelValue(user)
+		result.WriteToMetricChannel(out, desc, prometheus.CounterValue)
+	}
+}
+
 func (d MetricFamiliesPerUser) GetSumOfGauges(gauge string) float64 {
 	result := float64(0)
 	for _, userMetrics := range d {
@@ -254,6 +265,13 @@ func (d MetricFamiliesPerUser) SendSumOfSummariesWithLabels(out chan<- prometheu
 	}
 }
 
+func (d MetricFamiliesPerUser) SendSumOfSummariesPerUser(out chan<- prometheus.Metric, desc *prometheus.Desc, summaryName string) {
+	for user, userMetrics := range d {
+		data := userMetrics.SumSummaries(summaryName)
+		out <- data.Metric(desc, user)
+	}
+}
+
 func (d MetricFamiliesPerUser) SendSumOfHistograms(out chan<- prometheus.Metric, desc *prometheus.Desc, histogramName string) {
 	hd := HistogramData{}
 	for _, userMetrics := range d {
diff --git a/vendor/github.com/cortexproject/cortex/pkg/util/validation/limits.go b/vendor/github.com/cortexproject/cortex/pkg/util/validation/limits.go
index 62a9029989189..2a7a38aa88635 100644
--- a/vendor/github.com/cortexproject/cortex/pkg/util/validation/limits.go
+++ b/vendor/github.com/cortexproject/cortex/pkg/util/validation/limits.go
@@ -93,17 +93,17 @@ func (l *Limits) RegisterFlags(f *flag.FlagSet) {
 	f.IntVar(&l.MaxLocalSeriesPerMetric, "ingester.max-series-per-metric", 50000, "The maximum number of active series per metric name, per ingester. 0 to disable.")
 	f.IntVar(&l.MaxGlobalSeriesPerUser, "ingester.max-global-series-per-user", 0, "The maximum number of active series per user, across the cluster. 0 to disable. Supported only if -distributor.shard-by-all-labels is true.")
 	f.IntVar(&l.MaxGlobalSeriesPerMetric, "ingester.max-global-series-per-metric", 0, "The maximum number of active series per metric name, across the cluster. 0 to disable.")
-	f.IntVar(&l.MinChunkLength, "ingester.min-chunk-length", 0, "Minimum number of samples in an idle chunk to flush it to the store. Use with care, if chunks are less than this size they will be discarded. This option is ignored when running the Cortex blocks storage.")
+	f.IntVar(&l.MinChunkLength, "ingester.min-chunk-length", 0, "Minimum number of samples in an idle chunk to flush it to the store. Use with care, if chunks are less than this size they will be discarded. This option is ignored when running the Cortex blocks storage. 0 to disable.")
 
 	f.IntVar(&l.MaxLocalMetricsWithMetadataPerUser, "ingester.max-metadata-per-user", 8000, "The maximum number of active metrics with metadata per user, per ingester. 0 to disable.")
 	f.IntVar(&l.MaxLocalMetadataPerMetric, "ingester.max-metadata-per-metric", 10, "The maximum number of metadata per metric, per ingester. 0 to disable.")
 	f.IntVar(&l.MaxGlobalMetricsWithMetadataPerUser, "ingester.max-global-metadata-per-user", 0, "The maximum number of active metrics with metadata per user, across the cluster. 0 to disable. Supported only if -distributor.shard-by-all-labels is true.")
 	f.IntVar(&l.MaxGlobalMetadataPerMetric, "ingester.max-global-metadata-per-metric", 0, "The maximum number of metadata per metric, across the cluster. 0 to disable.")
 
-	f.IntVar(&l.MaxChunksPerQuery, "store.query-chunk-limit", 2e6, "Maximum number of chunks that can be fetched in a single query. This limit is enforced when fetching chunks from the long-term storage.")
+	f.IntVar(&l.MaxChunksPerQuery, "store.query-chunk-limit", 2e6, "Maximum number of chunks that can be fetched in a single query. This limit is enforced when fetching chunks from the long-term storage. When running the Cortex chunks storage, this limit is enforced in the querier, while when running the Cortex blocks storage this limit is both enforced in the querier and store-gateway. 0 to disable.")
 	f.DurationVar(&l.MaxQueryLength, "store.max-query-length", 0, "Limit the query time range (end - start time). This limit is enforced in the query-frontend (on the received query), in the querier (on the query possibly split by the query-frontend) and in the chunks storage. 0 to disable.")
 	f.IntVar(&l.MaxQueryParallelism, "querier.max-query-parallelism", 14, "Maximum number of queries will be scheduled in parallel by the frontend.")
-	f.IntVar(&l.CardinalityLimit, "store.cardinality-limit", 1e5, "Cardinality limit for index queries. This limit is ignored when running the Cortex blocks storage.")
+	f.IntVar(&l.CardinalityLimit, "store.cardinality-limit", 1e5, "Cardinality limit for index queries. This limit is ignored when running the Cortex blocks storage. 0 to disable.")
 	f.DurationVar(&l.MaxCacheFreshness, "frontend.max-cache-freshness", 1*time.Minute, "Most recent allowed cacheable result per-tenant, to prevent caching very recent results that might still be in flux.")
 
 	f.StringVar(&l.PerTenantOverrideConfig, "limits.per-user-override-config", "", "File name of per-user overrides. [deprecated, use -runtime-config.file instead]")
diff --git a/vendor/github.com/thanos-io/thanos/pkg/block/fetcher.go b/vendor/github.com/thanos-io/thanos/pkg/block/fetcher.go
index 3b071a0695f5e..ed4a209cc6818 100644
--- a/vendor/github.com/thanos-io/thanos/pkg/block/fetcher.go
+++ b/vendor/github.com/thanos-io/thanos/pkg/block/fetcher.go
@@ -31,6 +31,7 @@ import (
 	"github.com/thanos-io/thanos/pkg/objstore"
 	"github.com/thanos-io/thanos/pkg/runutil"
 	"golang.org/x/sync/errgroup"
+	"gopkg.in/yaml.v2"
 )
 
 type fetcherMetrics struct {
@@ -795,3 +796,20 @@ func (f *IgnoreDeletionMarkFilter) Filter(ctx context.Context, metas map[ulid.UL
 	}
 	return nil
 }
+
+// ParseRelabelConfig parses relabel configuration.
+func ParseRelabelConfig(contentYaml []byte) ([]*relabel.Config, error) {
+	var relabelConfig []*relabel.Config
+	if err := yaml.Unmarshal(contentYaml, &relabelConfig); err != nil {
+		return nil, errors.Wrap(err, "parsing relabel configuration")
+	}
+	supportedActions := map[relabel.Action]struct{}{relabel.Keep: {}, relabel.Drop: {}, relabel.HashMod: {}}
+
+	for _, cfg := range relabelConfig {
+		if _, ok := supportedActions[cfg.Action]; !ok {
+			return nil, errors.Errorf("unsupported relabel action: %v", cfg.Action)
+		}
+	}
+
+	return relabelConfig, nil
+}
diff --git a/vendor/github.com/thanos-io/thanos/pkg/compact/compact.go b/vendor/github.com/thanos-io/thanos/pkg/compact/compact.go
index b034248fbe4d4..562a4ca336e44 100644
--- a/vendor/github.com/thanos-io/thanos/pkg/compact/compact.go
+++ b/vendor/github.com/thanos-io/thanos/pkg/compact/compact.go
@@ -22,6 +22,7 @@ import (
 	"github.com/prometheus/prometheus/pkg/labels"
 	"github.com/prometheus/prometheus/tsdb"
 	terrors "github.com/prometheus/prometheus/tsdb/errors"
+
 	"github.com/thanos-io/thanos/pkg/block"
 	"github.com/thanos-io/thanos/pkg/block/metadata"
 	"github.com/thanos-io/thanos/pkg/compact/downsample"
@@ -118,7 +119,7 @@ func UntilNextDownsampling(m *metadata.Meta) (time.Duration, error) {
 	}
 }
 
-// SyncMetas synchronises local state of block metas with what we have in the bucket.
+// SyncMetas synchronizes local state of block metas with what we have in the bucket.
 func (s *Syncer) SyncMetas(ctx context.Context) error {
 	s.mtx.Lock()
 	defer s.mtx.Unlock()
diff --git a/vendor/github.com/thanos-io/thanos/pkg/discovery/dns/provider.go b/vendor/github.com/thanos-io/thanos/pkg/discovery/dns/provider.go
index 153f82769ede4..e80f7aac49f50 100644
--- a/vendor/github.com/thanos-io/thanos/pkg/discovery/dns/provider.go
+++ b/vendor/github.com/thanos-io/thanos/pkg/discovery/dns/provider.go
@@ -14,6 +14,7 @@ import (
 	"github.com/prometheus/client_golang/prometheus"
 	"github.com/prometheus/client_golang/prometheus/promauto"
 	tsdberrors "github.com/prometheus/prometheus/tsdb/errors"
+
 	"github.com/thanos-io/thanos/pkg/discovery/dns/miekgdns"
 	"github.com/thanos-io/thanos/pkg/extprom"
 )
@@ -53,7 +54,7 @@ func (t ResolverType) ToResolver(logger log.Logger) ipLookupResolver {
 }
 
 // NewProvider returns a new empty provider with a given resolver type.
-// If empty resolver type is net.DefaultResolver.w
+// If empty resolver type is net.DefaultResolver.
 func NewProvider(logger log.Logger, reg prometheus.Registerer, resolverType ResolverType) *Provider {
 	p := &Provider{
 		resolver: NewResolver(resolverType.ToResolver(logger)),
diff --git a/vendor/github.com/thanos-io/thanos/pkg/gate/gate.go b/vendor/github.com/thanos-io/thanos/pkg/gate/gate.go
index 4a75df0d5ca66..a87e3af5aa7a5 100644
--- a/vendor/github.com/thanos-io/thanos/pkg/gate/gate.go
+++ b/vendor/github.com/thanos-io/thanos/pkg/gate/gate.go
@@ -12,7 +12,7 @@ import (
 	promgate "github.com/prometheus/prometheus/pkg/gate"
 )
 
-// Gate is an interface that mimics prometheus/pkg/gate behaviour.
+// Gate is an interface that mimics prometheus/pkg/gate behavior.
 type Gate interface {
 	Start(ctx context.Context) error
 	Done()
diff --git a/vendor/github.com/thanos-io/thanos/pkg/objstore/swift/swift.go b/vendor/github.com/thanos-io/thanos/pkg/objstore/swift/swift.go
index c11715ce4fa4e..6214579c68376 100644
--- a/vendor/github.com/thanos-io/thanos/pkg/objstore/swift/swift.go
+++ b/vendor/github.com/thanos-io/thanos/pkg/objstore/swift/swift.go
@@ -19,8 +19,9 @@ import (
 	"github.com/gophercloud/gophercloud/openstack/objectstorage/v1/objects"
 	"github.com/gophercloud/gophercloud/pagination"
 	"github.com/pkg/errors"
-	"github.com/thanos-io/thanos/pkg/objstore"
 	"gopkg.in/yaml.v2"
+
+	"github.com/thanos-io/thanos/pkg/objstore"
 )
 
 // DirDelim is the delimiter used to model a directory structure in an object store bucket.
@@ -55,12 +56,7 @@ func NewContainer(logger log.Logger, conf []byte) (*Container, error) {
 		return nil, err
 	}
 
-	authOpts, err := authOptsFromConfig(sc)
-	if err != nil {
-		return nil, err
-	}
-
-	provider, err := openstack.AuthenticatedClient(authOpts)
+	provider, err := openstack.AuthenticatedClient(authOptsFromConfig(sc))
 	if err != nil {
 		return nil, err
 	}
@@ -93,7 +89,7 @@ func (c *Container) Iter(ctx context.Context, dir string, f func(string) error)
 		dir = strings.TrimSuffix(dir, DirDelim) + DirDelim
 	}
 
-	options := &objects.ListOpts{Full: false, Prefix: dir, Delimiter: DirDelim}
+	options := &objects.ListOpts{Full: true, Prefix: dir, Delimiter: DirDelim}
 	return objects.List(c.client, c.name, options).EachPage(func(page pagination.Page) (bool, error) {
 		objectNames, err := objects.ExtractNames(page)
 		if err != nil {
@@ -120,9 +116,17 @@ func (c *Container) Get(ctx context.Context, name string) (io.ReadCloser, error)
 
 // GetRange returns a new range reader for the given object name and range.
 func (c *Container) GetRange(ctx context.Context, name string, off, length int64) (io.ReadCloser, error) {
+	lowerLimit := ""
+	upperLimit := ""
+	if off >= 0 {
+		lowerLimit = fmt.Sprintf("%d", off)
+	}
+	if length > 0 {
+		upperLimit = fmt.Sprintf("%d", off+length-1)
+	}
 	options := objects.DownloadOpts{
 		Newest: true,
-		Range:  fmt.Sprintf("bytes=%d-%d", off, off+length-1),
+		Range:  fmt.Sprintf("bytes=%s-%s", lowerLimit, upperLimit),
 	}
 	response := objects.Download(c.client, c.name, name, options)
 	return response.Body, response.Err
@@ -185,7 +189,7 @@ func parseConfig(conf []byte) (*SwiftConfig, error) {
 	return &sc, err
 }
 
-func authOptsFromConfig(sc *SwiftConfig) (gophercloud.AuthOptions, error) {
+func authOptsFromConfig(sc *SwiftConfig) gophercloud.AuthOptions {
 	authOpts := gophercloud.AuthOptions{
 		IdentityEndpoint: sc.AuthUrl,
 		Username:         sc.Username,
@@ -229,7 +233,7 @@ func authOptsFromConfig(sc *SwiftConfig) (gophercloud.AuthOptions, error) {
 			authOpts.Scope.ProjectID = sc.ProjectID
 		}
 	}
-	return authOpts, nil
+	return authOpts
 }
 
 func (c *Container) createContainer(name string) error {
@@ -251,7 +255,7 @@ func configFromEnv() SwiftConfig {
 		ProjectName:       os.Getenv("OS_PROJECT_NAME"),
 		UserDomainID:      os.Getenv("OS_USER_DOMAIN_ID"),
 		UserDomainName:    os.Getenv("OS_USER_DOMAIN_NAME"),
-		ProjectDomainID:   os.Getenv("OS_PROJET_DOMAIN_ID"),
+		ProjectDomainID:   os.Getenv("OS_PROJECT_DOMAIN_ID"),
 		ProjectDomainName: os.Getenv("OS_PROJECT_DOMAIN_NAME"),
 	}
 
diff --git a/vendor/github.com/thanos-io/thanos/pkg/rules/rulespb/custom.go b/vendor/github.com/thanos-io/thanos/pkg/rules/rulespb/custom.go
index 98205960e468c..2d49ae72ce314 100644
--- a/vendor/github.com/thanos-io/thanos/pkg/rules/rulespb/custom.go
+++ b/vendor/github.com/thanos-io/thanos/pkg/rules/rulespb/custom.go
@@ -177,6 +177,15 @@ func (r1 *Rule) Compare(r2 *Rule) int {
 	return 0
 }
 
+func (r *RuleGroups) MarshalJSON() ([]byte, error) {
+	if r.Groups == nil {
+		// Ensure that empty slices are marshaled as '[]' and not 'null'.
+		return []byte(`{"groups":[]}`), nil
+	}
+	type plain RuleGroups
+	return json.Marshal((*plain)(r))
+}
+
 func (m *Rule) UnmarshalJSON(entry []byte) error {
 	decider := struct {
 		Type string `json:"type"`
@@ -219,6 +228,10 @@ func (m *Rule) MarshalJSON() ([]byte, error) {
 		})
 	}
 	a := m.GetAlert()
+	if a.Alerts == nil {
+		// Ensure that empty slices are marshaled as '[]' and not 'null'.
+		a.Alerts = make([]*AlertInstance, 0)
+	}
 	return json.Marshal(struct {
 		*Alert
 		Type string `json:"type"`
@@ -247,7 +260,7 @@ func (x *AlertState) UnmarshalJSON(entry []byte) error {
 }
 
 func (x *AlertState) MarshalJSON() ([]byte, error) {
-	return []byte(strconv.Quote(x.String())), nil
+	return []byte(strconv.Quote(strings.ToLower(x.String()))), nil
 }
 
 // Compare compares alert state x and y and returns:
diff --git a/vendor/github.com/thanos-io/thanos/pkg/store/bucket.go b/vendor/github.com/thanos-io/thanos/pkg/store/bucket.go
index 33e9d68918019..d49dc51a3d106 100644
--- a/vendor/github.com/thanos-io/thanos/pkg/store/bucket.go
+++ b/vendor/github.com/thanos-io/thanos/pkg/store/bucket.go
@@ -54,13 +54,13 @@ import (
 )
 
 const (
-	// maxSamplesPerChunk is approximately the max number of samples that we may have in any given chunk. This is needed
+	// MaxSamplesPerChunk is approximately the max number of samples that we may have in any given chunk. This is needed
 	// for precalculating the number of samples that we may have to retrieve and decode for any given query
 	// without downloading them. Please take a look at https://github.com/prometheus/tsdb/pull/397 to know
 	// where this number comes from. Long story short: TSDB is made in such a way, and it is made in such a way
 	// because you barely get any improvements in compression when the number of samples is beyond this.
 	// Take a look at Figure 6 in this whitepaper http://www.vldb.org/pvldb/vol8/p1816-teller.pdf.
-	maxSamplesPerChunk = 120
+	MaxSamplesPerChunk = 120
 	maxChunkSize       = 16000
 	maxSeriesSize      = 64 * 1024
 
@@ -240,9 +240,9 @@ type BucketStore struct {
 	// Query gate which limits the maximum amount of concurrent queries.
 	queryGate gate.Gate
 
-	// samplesLimiter limits the number of samples per each Series() call.
-	samplesLimiter SampleLimiter
-	partitioner    partitioner
+	// chunksLimiterFactory creates a new limiter used to limit the number of chunks fetched by each Series() call.
+	chunksLimiterFactory ChunksLimiterFactory
+	partitioner          partitioner
 
 	filterConfig             *FilterConfig
 	advLabelSets             []storepb.LabelSet
@@ -269,7 +269,7 @@ func NewBucketStore(
 	indexCache storecache.IndexCache,
 	queryGate gate.Gate,
 	maxChunkPoolBytes uint64,
-	maxSampleCount uint64,
+	chunksLimiterFactory ChunksLimiterFactory,
 	debugLogging bool,
 	blockSyncConcurrency int,
 	filterConfig *FilterConfig,
@@ -287,7 +287,6 @@ func NewBucketStore(
 		return nil, errors.Wrap(err, "create chunk pool")
 	}
 
-	metrics := newBucketStoreMetrics(reg)
 	s := &BucketStore{
 		logger:                      logger,
 		bkt:                         bkt,
@@ -301,14 +300,14 @@ func NewBucketStore(
 		blockSyncConcurrency:        blockSyncConcurrency,
 		filterConfig:                filterConfig,
 		queryGate:                   queryGate,
-		samplesLimiter:              NewLimiter(maxSampleCount, metrics.queriesDropped),
+		chunksLimiterFactory:        chunksLimiterFactory,
 		partitioner:                 gapBasedPartitioner{maxGapSize: partitionerMaxGapSize},
 		enableCompatibilityLabel:    enableCompatibilityLabel,
 		enablePostingsCompression:   enablePostingsCompression,
 		postingOffsetsInMemSampling: postingOffsetsInMemSampling,
 		enableSeriesResponseHints:   enableSeriesResponseHints,
+		metrics:                     newBucketStoreMetrics(reg),
 	}
-	s.metrics = metrics
 
 	if err := os.MkdirAll(dir, 0777); err != nil {
 		return nil, errors.Wrap(err, "create dir")
@@ -649,7 +648,7 @@ func blockSeries(
 	chunkr *bucketChunkReader,
 	matchers []*labels.Matcher,
 	req *storepb.SeriesRequest,
-	samplesLimiter SampleLimiter,
+	chunksLimiter ChunksLimiter,
 ) (storepb.SeriesSet, *queryStats, error) {
 	ps, err := indexr.ExpandedPostings(matchers)
 	if err != nil {
@@ -722,12 +721,16 @@ func blockSeries(
 			s.refs = append(s.refs, meta.Ref)
 		}
 		if len(s.chks) > 0 {
+			if err := chunksLimiter.Reserve(uint64(len(s.chks))); err != nil {
+				return nil, nil, errors.Wrap(err, "exceeded chunks limit")
+			}
+
 			res = append(res, s)
 		}
 	}
 
 	// Preload all chunks that were marked in the previous stage.
-	if err := chunkr.preload(samplesLimiter); err != nil {
+	if err := chunkr.preload(); err != nil {
 		return nil, nil, errors.Wrap(err, "preload chunks")
 	}
 
@@ -858,6 +861,7 @@ func (s *BucketStore) Series(req *storepb.SeriesRequest, srv storepb.Store_Serie
 		g, gctx          = errgroup.WithContext(ctx)
 		resHints         = &hintspb.SeriesResponseHints{}
 		reqBlockMatchers []*labels.Matcher
+		chunksLimiter    = s.chunksLimiterFactory(s.metrics.queriesDropped)
 	)
 
 	if req.Hints != nil {
@@ -909,7 +913,7 @@ func (s *BucketStore) Series(req *storepb.SeriesRequest, srv storepb.Store_Serie
 					chunkr,
 					blockMatchers,
 					req,
-					s.samplesLimiter,
+					chunksLimiter,
 				)
 				if err != nil {
 					return errors.Wrapf(err, "fetch series for block %s", b.meta.ULID)
@@ -1705,7 +1709,8 @@ func (r *bucketIndexReader) fetchPostings(keys []labels.Label) ([]index.Postings
 					// Errors from corrupted postings will be reported when postings are used.
 					compressions++
 					s := time.Now()
-					data, err := diffVarintSnappyEncode(newBigEndianPostings(pBytes[4:]))
+					bep := newBigEndianPostings(pBytes[4:])
+					data, err := diffVarintSnappyEncode(bep, bep.length())
 					compressionTime = time.Since(s)
 					if err == nil {
 						dataToCache = data
@@ -1803,6 +1808,11 @@ func (it *bigEndianPostings) Err() error {
 	return nil
 }
 
+// Returns number of remaining postings values.
+func (it *bigEndianPostings) length() int {
+	return len(it.list) / 4
+}
+
 func (r *bucketIndexReader) PreloadSeries(ids []uint64) error {
 	// Load series from cache, overwriting the list of ids to preload
 	// with the missing ones.
@@ -1977,19 +1987,9 @@ func (r *bucketChunkReader) addPreload(id uint64) error {
 }
 
 // preload all added chunk IDs. Must be called before the first call to Chunk is made.
-func (r *bucketChunkReader) preload(samplesLimiter SampleLimiter) error {
+func (r *bucketChunkReader) preload() error {
 	g, ctx := errgroup.WithContext(r.ctx)
 
-	numChunks := uint64(0)
-	for _, offsets := range r.preloads {
-		for range offsets {
-			numChunks++
-		}
-	}
-	if err := samplesLimiter.Check(numChunks * maxSamplesPerChunk); err != nil {
-		return errors.Wrap(err, "exceeded samples limit")
-	}
-
 	for seq, offsets := range r.preloads {
 		sort.Slice(offsets, func(i, j int) bool {
 			return offsets[i] < offsets[j]
diff --git a/vendor/github.com/thanos-io/thanos/pkg/store/limiter.go b/vendor/github.com/thanos-io/thanos/pkg/store/limiter.go
index 5c23752d73e01..1e354721c23eb 100644
--- a/vendor/github.com/thanos-io/thanos/pkg/store/limiter.go
+++ b/vendor/github.com/thanos-io/thanos/pkg/store/limiter.go
@@ -4,20 +4,32 @@
 package store
 
 import (
+	"sync"
+	"sync/atomic"
+
 	"github.com/pkg/errors"
 	"github.com/prometheus/client_golang/prometheus"
 )
 
-type SampleLimiter interface {
-	Check(num uint64) error
+type ChunksLimiter interface {
+	// Reserve num chunks out of the total number of chunks enforced by the limiter.
+	// Returns an error if the limit has been exceeded. This function must be
+	// goroutine safe.
+	Reserve(num uint64) error
 }
 
+// ChunksLimiterFactory is used to create a new ChunksLimiter. The factory is useful for
+// projects depending on Thanos (eg. Cortex) which have dynamic limits.
+type ChunksLimiterFactory func(failedCounter prometheus.Counter) ChunksLimiter
+
 // Limiter is a simple mechanism for checking if something has passed a certain threshold.
 type Limiter struct {
-	limit uint64
+	limit    uint64
+	reserved uint64
 
-	// Counter metric which we will increase if Check() fails.
+	// Counter metric which we will increase if limit is exceeded.
 	failedCounter prometheus.Counter
+	failedOnce    sync.Once
 }
 
 // NewLimiter returns a new limiter with a specified limit. 0 disables the limit.
@@ -25,14 +37,23 @@ func NewLimiter(limit uint64, ctr prometheus.Counter) *Limiter {
 	return &Limiter{limit: limit, failedCounter: ctr}
 }
 
-// Check checks if the passed number exceeds the limits or not.
-func (l *Limiter) Check(num uint64) error {
+// Reserve implements ChunksLimiter.
+func (l *Limiter) Reserve(num uint64) error {
 	if l.limit == 0 {
 		return nil
 	}
-	if num > l.limit {
-		l.failedCounter.Inc()
-		return errors.Errorf("limit %v violated (got %v)", l.limit, num)
+	if reserved := atomic.AddUint64(&l.reserved, num); reserved > l.limit {
+		// We need to protect from the counter being incremented twice due to concurrency
+		// while calling Reserve().
+		l.failedOnce.Do(l.failedCounter.Inc)
+		return errors.Errorf("limit %v violated (got %v)", l.limit, reserved)
 	}
 	return nil
 }
+
+// NewChunksLimiterFactory makes a new ChunksLimiterFactory with a static limit.
+func NewChunksLimiterFactory(limit uint64) ChunksLimiterFactory {
+	return func(failedCounter prometheus.Counter) ChunksLimiter {
+		return NewLimiter(limit, failedCounter)
+	}
+}
diff --git a/vendor/github.com/thanos-io/thanos/pkg/store/local.go b/vendor/github.com/thanos-io/thanos/pkg/store/local.go
index cac727d9ac7fc..e50bcf6cd0408 100644
--- a/vendor/github.com/thanos-io/thanos/pkg/store/local.go
+++ b/vendor/github.com/thanos-io/thanos/pkg/store/local.go
@@ -17,11 +17,12 @@ import (
 	"github.com/pkg/errors"
 	"github.com/prometheus/prometheus/pkg/labels"
 	"github.com/prometheus/prometheus/tsdb/fileutil"
+	"google.golang.org/grpc/codes"
+	"google.golang.org/grpc/status"
+
 	"github.com/thanos-io/thanos/pkg/component"
 	"github.com/thanos-io/thanos/pkg/runutil"
 	"github.com/thanos-io/thanos/pkg/store/storepb"
-	"google.golang.org/grpc/codes"
-	"google.golang.org/grpc/status"
 )
 
 // LocalStore implements the store API against single file with stream of proto-based SeriesResponses in JSON format.
@@ -86,10 +87,10 @@ func NewLocalStoreFromJSONMmappableFile(
 		content = content[:idx+1]
 	}
 
-	scanner := NewNoCopyScanner(content, split)
+	skanner := NewNoCopyScanner(content, split)
 	resp := &storepb.SeriesResponse{}
-	for scanner.Scan() {
-		if err := jsonpb.Unmarshal(bytes.NewReader(scanner.Bytes()), resp); err != nil {
+	for skanner.Scan() {
+		if err := jsonpb.Unmarshal(bytes.NewReader(skanner.Bytes()), resp); err != nil {
 			return nil, errors.Wrapf(err, "unmarshal storepb.SeriesResponse frame for file %s", path)
 		}
 		series := resp.GetSeries()
@@ -116,7 +117,7 @@ func NewLocalStoreFromJSONMmappableFile(
 		s.sortedChunks = append(s.sortedChunks, chks)
 	}
 
-	if err := scanner.Err(); err != nil {
+	if err := skanner.Err(); err != nil {
 		return nil, errors.Wrapf(err, "scanning file %s", path)
 	}
 	level.Info(logger).Log("msg", "loading JSON file succeeded", "file", path, "info", s.info.String(), "series", len(s.series))
diff --git a/vendor/github.com/thanos-io/thanos/pkg/store/multitsdb.go b/vendor/github.com/thanos-io/thanos/pkg/store/multitsdb.go
index b55535b036646..4c6f43ce3aa7f 100644
--- a/vendor/github.com/thanos-io/thanos/pkg/store/multitsdb.go
+++ b/vendor/github.com/thanos-io/thanos/pkg/store/multitsdb.go
@@ -24,14 +24,15 @@ import (
 )
 
 // MultiTSDBStore implements the Store interface backed by multiple TSDBStore instances.
+// TODO(bwplotka): Remove this and use Proxy instead. Details: https://github.com/thanos-io/thanos/issues/2864
 type MultiTSDBStore struct {
 	logger     log.Logger
 	component  component.SourceStoreAPI
-	tsdbStores func() map[string]*TSDBStore
+	tsdbStores func() map[string]storepb.StoreServer
 }
 
 // NewMultiTSDBStore creates a new MultiTSDBStore.
-func NewMultiTSDBStore(logger log.Logger, _ prometheus.Registerer, component component.SourceStoreAPI, tsdbStores func() map[string]*TSDBStore) *MultiTSDBStore {
+func NewMultiTSDBStore(logger log.Logger, _ prometheus.Registerer, component component.SourceStoreAPI, tsdbStores func() map[string]storepb.StoreServer) *MultiTSDBStore {
 	if logger == nil {
 		logger = log.NewNopLogger()
 	}
@@ -89,59 +90,70 @@ type tenantSeriesSetServer struct {
 
 	ctx context.Context
 
-	warnCh warnSender
-	recv   chan *storepb.Series
-	cur    *storepb.Series
+	directCh directSender
+	recv     chan *storepb.Series
+	cur      *storepb.Series
 
 	err    error
 	tenant string
 }
 
+// TODO(bwplotka): Remove tenant awareness; keep it simple with single functionality.
+// Details https://github.com/thanos-io/thanos/issues/2864.
 func newTenantSeriesSetServer(
 	ctx context.Context,
 	tenant string,
-	warnCh warnSender,
+	directCh directSender,
 ) *tenantSeriesSetServer {
 	return &tenantSeriesSetServer{
-		ctx:    ctx,
-		tenant: tenant,
-		warnCh: warnCh,
-		recv:   make(chan *storepb.Series),
+		ctx:      ctx,
+		tenant:   tenant,
+		directCh: directCh,
+		recv:     make(chan *storepb.Series),
 	}
 }
 
-func (s *tenantSeriesSetServer) Context() context.Context {
-	return s.ctx
-}
+func (s *tenantSeriesSetServer) Context() context.Context { return s.ctx }
 
-func (s *tenantSeriesSetServer) Series(store *TSDBStore, r *storepb.SeriesRequest) {
+func (s *tenantSeriesSetServer) Series(store storepb.StoreServer, r *storepb.SeriesRequest) {
 	var err error
 	tracing.DoInSpan(s.ctx, "multitsdb_tenant_series", func(_ context.Context) {
 		err = store.Series(r, s)
 	})
-
 	if err != nil {
-		if r.PartialResponseDisabled {
+		if r.PartialResponseDisabled || r.PartialResponseStrategy == storepb.PartialResponseStrategy_ABORT {
 			s.err = errors.Wrapf(err, "get series for tenant %s", s.tenant)
 		} else {
 			// Consistently prefix tenant specific warnings as done in various other places.
 			err = errors.New(prefixTenantWarning(s.tenant, err.Error()))
-			s.warnCh.send(storepb.NewWarnSeriesResponse(err))
+			s.directCh.send(storepb.NewWarnSeriesResponse(err))
 		}
 	}
-
 	close(s.recv)
 }
 
 func (s *tenantSeriesSetServer) Send(r *storepb.SeriesResponse) error {
 	series := r.GetSeries()
+	if series == nil {
+		// Proxy non series responses directly to client
+		s.directCh.send(r)
+		return nil
+	}
+
+	// TODO(bwplotka): Consider avoid copying / learn why it has to copied.
 	chunks := make([]storepb.AggrChunk, len(series.Chunks))
 	copy(chunks, series.Chunks)
-	s.recv <- &storepb.Series{
+
+	// For series, pass it to our AggChunkSeriesSet.
+	select {
+	case <-s.ctx.Done():
+		return s.ctx.Err()
+	case s.recv <- &storepb.Series{
 		Labels: series.Labels,
 		Chunks: chunks,
+	}:
+		return nil
 	}
-	return nil
 }
 
 func (s *tenantSeriesSetServer) Next() (ok bool) {
@@ -156,29 +168,31 @@ func (s *tenantSeriesSetServer) At() ([]storepb.Label, []storepb.AggrChunk) {
 	return s.cur.Labels, s.cur.Chunks
 }
 
-func (s *tenantSeriesSetServer) Err() error {
-	return s.err
-}
+func (s *tenantSeriesSetServer) Err() error { return s.err }
 
 // Series returns all series for a requested time range and label matcher. The
 // returned data may exceed the requested time bounds. The data returned may
 // have been read and merged from multiple underlying TSDBStore instances.
 func (s *MultiTSDBStore) Series(r *storepb.SeriesRequest, srv storepb.Store_SeriesServer) error {
+	span, ctx := tracing.StartSpan(srv.Context(), "multitsdb_series")
+	defer span.Finish()
+
 	stores := s.tsdbStores()
 	if len(stores) == 0 {
 		return nil
 	}
 
-	var (
-		g, gctx   = errgroup.WithContext(srv.Context())
-		span, ctx = tracing.StartSpan(gctx, "multitsdb_series")
-		// Allow to buffer max 10 series response.
-		// Each might be quite large (multi chunk long series given by sidecar).
-		respSender, respRecv, closeFn = newRespCh(gctx, 10)
-	)
-	defer span.Finish()
+	g, gctx := errgroup.WithContext(ctx)
+
+	// Allow to buffer max 10 series response.
+	// Each might be quite large (multi chunk long series given by sidecar).
+	respSender, respCh := newCancelableRespChannel(gctx, 10)
 
 	g.Go(func() error {
+		// This go routine is responsible for calling store's Series concurrently. Merged results
+		// are passed to respCh and sent concurrently to client (if buffer of 10 have room).
+		// When this go routine finishes or is canceled, respCh channel is closed.
+
 		var (
 			seriesSet []storepb.SeriesSet
 			wg        = &sync.WaitGroup{}
@@ -186,7 +200,7 @@ func (s *MultiTSDBStore) Series(r *storepb.SeriesRequest, srv storepb.Store_Seri
 
 		defer func() {
 			wg.Wait()
-			closeFn()
+			close(respCh)
 		}()
 
 		for tenant, store := range stores {
@@ -202,7 +216,6 @@ func (s *MultiTSDBStore) Series(r *storepb.SeriesRequest, srv storepb.Store_Seri
 				defer wg.Done()
 				ss.Series(store, r)
 			}()
-
 			seriesSet = append(seriesSet, ss)
 		}
 
@@ -214,13 +227,16 @@ func (s *MultiTSDBStore) Series(r *storepb.SeriesRequest, srv storepb.Store_Seri
 		}
 		return mergedSet.Err()
 	})
-
-	for resp := range respRecv {
-		if err := srv.Send(resp); err != nil {
-			return status.Error(codes.Unknown, errors.Wrap(err, "send series response").Error())
+	g.Go(func() error {
+		// Go routine for gathering merged responses and sending them over to client. It stops when
+		// respCh channel is closed OR on error from client.
+		for resp := range respCh {
+			if err := srv.Send(resp); err != nil {
+				return status.Error(codes.Unknown, errors.Wrap(err, "send series response").Error())
+			}
 		}
-	}
-
+		return nil
+	})
 	return g.Wait()
 }
 
diff --git a/vendor/github.com/thanos-io/thanos/pkg/store/postings_codec.go b/vendor/github.com/thanos-io/thanos/pkg/store/postings_codec.go
index 7b1aaff477d30..8f5180663282d 100644
--- a/vendor/github.com/thanos-io/thanos/pkg/store/postings_codec.go
+++ b/vendor/github.com/thanos-io/thanos/pkg/store/postings_codec.go
@@ -33,8 +33,9 @@ func isDiffVarintSnappyEncodedPostings(input []byte) bool {
 // diffVarintSnappyEncode encodes postings into diff+varint representation,
 // and applies snappy compression on the result.
 // Returned byte slice starts with codecHeaderSnappy header.
-func diffVarintSnappyEncode(p index.Postings) ([]byte, error) {
-	buf, err := diffVarintEncodeNoHeader(p)
+// Length argument is expected number of postings, used for preallocating buffer.
+func diffVarintSnappyEncode(p index.Postings, length int) ([]byte, error) {
+	buf, err := diffVarintEncodeNoHeader(p, length)
 	if err != nil {
 		return nil, err
 	}
@@ -52,9 +53,16 @@ func diffVarintSnappyEncode(p index.Postings) ([]byte, error) {
 
 // diffVarintEncodeNoHeader encodes postings into diff+varint representation.
 // It doesn't add any header to the output bytes.
-func diffVarintEncodeNoHeader(p index.Postings) ([]byte, error) {
+// Length argument is expected number of postings, used for preallocating buffer.
+func diffVarintEncodeNoHeader(p index.Postings, length int) ([]byte, error) {
 	buf := encoding.Encbuf{}
 
+	// This encoding uses around ~1 bytes per posting, but let's use
+	// conservative 1.25 bytes per posting to avoid extra allocations.
+	if length > 0 {
+		buf.B = make([]byte, 0, 5*length/4)
+	}
+
 	prev := uint64(0)
 	for p.Next() {
 		v := p.At()
diff --git a/vendor/github.com/thanos-io/thanos/pkg/store/prometheus.go b/vendor/github.com/thanos-io/thanos/pkg/store/prometheus.go
index 882c32fead31b..6d5a737a90d4d 100644
--- a/vendor/github.com/thanos-io/thanos/pkg/store/prometheus.go
+++ b/vendor/github.com/thanos-io/thanos/pkg/store/prometheus.go
@@ -10,7 +10,6 @@ import (
 	"fmt"
 	"io"
 	"io/ioutil"
-	"math"
 	"net/http"
 	"net/url"
 	"path"
@@ -27,6 +26,9 @@ import (
 	"github.com/prometheus/prometheus/pkg/labels"
 	"github.com/prometheus/prometheus/storage/remote"
 	"github.com/prometheus/prometheus/tsdb/chunkenc"
+	"google.golang.org/grpc/codes"
+	"google.golang.org/grpc/status"
+
 	"github.com/thanos-io/thanos/pkg/component"
 	thanoshttp "github.com/thanos-io/thanos/pkg/http"
 	"github.com/thanos-io/thanos/pkg/promclient"
@@ -34,8 +36,6 @@ import (
 	"github.com/thanos-io/thanos/pkg/store/storepb"
 	"github.com/thanos-io/thanos/pkg/store/storepb/prompb"
 	"github.com/thanos-io/thanos/pkg/tracing"
-	"google.golang.org/grpc/codes"
-	"google.golang.org/grpc/status"
 )
 
 // PrometheusStore implements the store node API on top of the Prometheus remote read API.
@@ -239,10 +239,7 @@ func (p *PrometheusStore) handleSampledPrometheusResponse(s storepb.Store_Series
 			continue
 		}
 
-		// XOR encoding supports a max size of 2^16 - 1 samples, so we need
-		// to chunk all samples into groups of no more than 2^16 - 1
-		// See: https://github.com/thanos-io/thanos/pull/718.
-		aggregatedChunks, err := p.chunkSamples(e, math.MaxUint16)
+		aggregatedChunks, err := p.chunkSamples(e, MaxSamplesPerChunk)
 		if err != nil {
 			return err
 		}
@@ -477,7 +474,8 @@ func matchesExternalLabels(ms []storepb.LabelMatcher, externalLabels labels.Labe
 }
 
 // encodeChunk translates the sample pairs into a chunk.
-func (p *PrometheusStore) encodeChunk(ss []prompb.Sample) (storepb.Chunk_Encoding, []byte, error) {
+// TODO(kakkoyun): Linter - result 0 (github.com/thanos-io/thanos/pkg/store/storepb.Chunk_Encoding) is always 0.
+func (p *PrometheusStore) encodeChunk(ss []prompb.Sample) (storepb.Chunk_Encoding, []byte, error) { //nolint:unparam
 	c := chunkenc.NewXORChunk()
 
 	a, err := c.Appender()
diff --git a/vendor/github.com/thanos-io/thanos/pkg/store/proxy.go b/vendor/github.com/thanos-io/thanos/pkg/store/proxy.go
index b5c310920edb4..67ae5a908f1b5 100644
--- a/vendor/github.com/thanos-io/thanos/pkg/store/proxy.go
+++ b/vendor/github.com/thanos-io/thanos/pkg/store/proxy.go
@@ -184,18 +184,23 @@ func mergeLabels(a []storepb.Label, b labels.Labels) []storepb.Label {
 	return res
 }
 
-type ctxRespSender struct {
+// cancelableRespSender is a response channel that does need to be exhausted on cancel.
+type cancelableRespSender struct {
 	ctx context.Context
 	ch  chan<- *storepb.SeriesResponse
 }
 
-func newRespCh(ctx context.Context, buffer int) (*ctxRespSender, <-chan *storepb.SeriesResponse, func()) {
+func newCancelableRespChannel(ctx context.Context, buffer int) (*cancelableRespSender, chan *storepb.SeriesResponse) {
 	respCh := make(chan *storepb.SeriesResponse, buffer)
-	return &ctxRespSender{ctx: ctx, ch: respCh}, respCh, func() { close(respCh) }
+	return &cancelableRespSender{ctx: ctx, ch: respCh}, respCh
 }
 
-func (s ctxRespSender) send(r *storepb.SeriesResponse) {
-	s.ch <- r
+// send or return on cancel.
+func (s cancelableRespSender) send(r *storepb.SeriesResponse) {
+	select {
+	case <-s.ctx.Done():
+	case s.ch <- r:
+	}
 }
 
 // Series returns all series for a requested time range and label matcher. Requested series are taken from other
@@ -213,15 +218,17 @@ func (s *ProxyStore) Series(r *storepb.SeriesRequest, srv storepb.Store_SeriesSe
 		return status.Error(codes.InvalidArgument, errors.New("no matchers specified (excluding external labels)").Error())
 	}
 
-	var (
-		g, gctx = errgroup.WithContext(srv.Context())
+	g, gctx := errgroup.WithContext(srv.Context())
 
-		// Allow to buffer max 10 series response.
-		// Each might be quite large (multi chunk long series given by sidecar).
-		respSender, respRecv, closeFn = newRespCh(gctx, 10)
-	)
+	// Allow to buffer max 10 series response.
+	// Each might be quite large (multi chunk long series given by sidecar).
+	respSender, respCh := newCancelableRespChannel(gctx, 10)
 
 	g.Go(func() error {
+		// This go routine is responsible for calling store's Series concurrently. Merged results
+		// are passed to respCh and sent concurrently to client (if buffer of 10 have room).
+		// When this go routine finishes or is canceled, respCh channel is closed.
+
 		var (
 			seriesSet      []storepb.SeriesSet
 			storeDebugMsgs []string
@@ -239,7 +246,7 @@ func (s *ProxyStore) Series(r *storepb.SeriesRequest, srv storepb.Store_SeriesSe
 
 		defer func() {
 			wg.Wait()
-			closeFn()
+			close(respCh)
 		}()
 
 		for _, st := range s.stores() {
@@ -294,6 +301,10 @@ func (s *ProxyStore) Series(r *storepb.SeriesRequest, srv storepb.Store_SeriesSe
 			return nil
 		}
 
+		// TODO(bwplotka): Currently we stream into big frames. Consider ensuring 1MB maximum.
+		// This however does not matter much when used with QueryAPI. Matters for federated Queries a lot.
+		// https://github.com/thanos-io/thanos/issues/2332
+		// Series are not necessarily merged across themselves.
 		mergedSet := storepb.MergeSeriesSets(seriesSet...)
 		for mergedSet.Next() {
 			var series storepb.Series
@@ -302,21 +313,25 @@ func (s *ProxyStore) Series(r *storepb.SeriesRequest, srv storepb.Store_SeriesSe
 		}
 		return mergedSet.Err()
 	})
-
-	for resp := range respRecv {
-		if err := srv.Send(resp); err != nil {
-			return status.Error(codes.Unknown, errors.Wrap(err, "send series response").Error())
+	g.Go(func() error {
+		// Go routine for gathering merged responses and sending them over to client. It stops when
+		// respCh channel is closed OR on error from client.
+		for resp := range respCh {
+			if err := srv.Send(resp); err != nil {
+				return status.Error(codes.Unknown, errors.Wrap(err, "send series response").Error())
+			}
 		}
-	}
-
+		return nil
+	})
 	if err := g.Wait(); err != nil {
+		// TODO(bwplotka): Replace with request logger.
 		level.Error(s.logger).Log("err", err)
 		return err
 	}
 	return nil
 }
 
-type warnSender interface {
+type directSender interface {
 	send(*storepb.SeriesResponse)
 }
 
@@ -327,7 +342,7 @@ type streamSeriesSet struct {
 	logger log.Logger
 
 	stream storepb.Store_SeriesClient
-	warnCh warnSender
+	warnCh directSender
 
 	currSeries *storepb.Series
 	recvCh     chan *storepb.Series
@@ -363,7 +378,7 @@ func startStreamSeriesSet(
 	closeSeries context.CancelFunc,
 	wg *sync.WaitGroup,
 	stream storepb.Store_SeriesClient,
-	warnCh warnSender,
+	warnCh directSender,
 	name string,
 	partialResponse bool,
 	responseTimeout time.Duration,
diff --git a/vendor/github.com/thanos-io/thanos/pkg/store/tsdb.go b/vendor/github.com/thanos-io/thanos/pkg/store/tsdb.go
index b06cb3ac3efd1..70454a39f5069 100644
--- a/vendor/github.com/thanos-io/thanos/pkg/store/tsdb.go
+++ b/vendor/github.com/thanos-io/thanos/pkg/store/tsdb.go
@@ -12,7 +12,7 @@ import (
 	"github.com/pkg/errors"
 	"github.com/prometheus/client_golang/prometheus"
 	"github.com/prometheus/prometheus/pkg/labels"
-	"github.com/prometheus/prometheus/tsdb"
+	"github.com/prometheus/prometheus/storage"
 	"github.com/prometheus/prometheus/tsdb/chunkenc"
 	"google.golang.org/grpc/codes"
 	"google.golang.org/grpc/status"
@@ -23,12 +23,17 @@ import (
 	"github.com/thanos-io/thanos/pkg/store/storepb"
 )
 
+type TSDBReader interface {
+	storage.Queryable
+	StartTime() (int64, error)
+}
+
 // TSDBStore implements the store API against a local TSDB instance.
 // It attaches the provided external labels to all results. It only responds with raw data
 // and does not support downsampling.
 type TSDBStore struct {
 	logger         log.Logger
-	db             *tsdb.DB
+	db             TSDBReader
 	component      component.StoreAPI
 	externalLabels labels.Labels
 }
@@ -40,7 +45,7 @@ type ReadWriteTSDBStore struct {
 }
 
 // NewTSDBStore creates a new TSDBStore.
-func NewTSDBStore(logger log.Logger, _ prometheus.Registerer, db *tsdb.DB, component component.StoreAPI, externalLabels labels.Labels) *TSDBStore {
+func NewTSDBStore(logger log.Logger, _ prometheus.Registerer, db TSDBReader, component component.StoreAPI, externalLabels labels.Labels) *TSDBStore {
 	if logger == nil {
 		logger = log.NewNopLogger()
 	}
@@ -54,15 +59,17 @@ func NewTSDBStore(logger log.Logger, _ prometheus.Registerer, db *tsdb.DB, compo
 
 // Info returns store information about the Prometheus instance.
 func (s *TSDBStore) Info(_ context.Context, _ *storepb.InfoRequest) (*storepb.InfoResponse, error) {
+	minTime, err := s.db.StartTime()
+	if err != nil {
+		return nil, errors.Wrap(err, "TSDB min Time")
+	}
+
 	res := &storepb.InfoResponse{
 		Labels:    make([]storepb.Label, 0, len(s.externalLabels)),
 		StoreType: s.component.ToProto(),
-		MinTime:   0,
+		MinTime:   minTime,
 		MaxTime:   math.MaxInt64,
 	}
-	if blocks := s.db.Blocks(); len(blocks) > 0 {
-		res.MinTime = blocks[0].Meta().MinTime
-	}
 	for _, l := range s.externalLabels {
 		res.Labels = append(res.Labels, storepb.Label{
 			Name:  l.Name,
@@ -120,12 +127,7 @@ func (s *TSDBStore) Series(r *storepb.SeriesRequest, srv storepb.Store_SeriesSer
 		if !r.SkipChunks {
 			// TODO(fabxc): An improvement over this trivial approach would be to directly
 			// use the chunks provided by TSDB in the response.
-			// But since the sidecar has a similar approach, optimizing here has only
-			// limited benefit for now.
-			// NOTE: XOR encoding supports a max size of 2^16 - 1 samples, so we need
-			// to chunk all samples into groups of no more than 2^16 - 1
-			// See: https://github.com/thanos-io/thanos/pull/1038.
-			c, err := s.encodeChunks(series.Iterator(), math.MaxUint16)
+			c, err := s.encodeChunks(series.Iterator(), MaxSamplesPerChunk)
 			if err != nil {
 				return status.Errorf(codes.Internal, "encode chunk: %s", err)
 			}
diff --git a/vendor/modules.txt b/vendor/modules.txt
index d9023d515998c..910955034f601 100644
--- a/vendor/modules.txt
+++ b/vendor/modules.txt
@@ -65,8 +65,6 @@ github.com/armon/go-metrics
 github.com/armon/go-metrics/prometheus
 # github.com/asaskevich/govalidator v0.0.0-20200108200545-475eaeb16496
 github.com/asaskevich/govalidator
-# github.com/aws/aws-lambda-go v1.17.0
-## explicit
 # github.com/aws/aws-sdk-go v1.33.12
 github.com/aws/aws-sdk-go/aws
 github.com/aws/aws-sdk-go/aws/arn
@@ -153,7 +151,7 @@ github.com/coreos/go-systemd/journal
 github.com/coreos/go-systemd/sdjournal
 # github.com/coreos/pkg v0.0.0-20180928190104-399ea9e2e55f
 github.com/coreos/pkg/capnslog
-# github.com/cortexproject/cortex v1.2.1-0.20200727121049-4cfa4a2978c2
+# github.com/cortexproject/cortex v1.2.1-0.20200731141046-75cc6c827e31
 ## explicit
 github.com/cortexproject/cortex/pkg/alertmanager
 github.com/cortexproject/cortex/pkg/alertmanager/alerts
@@ -785,7 +783,7 @@ github.com/stretchr/objx
 github.com/stretchr/testify/assert
 github.com/stretchr/testify/mock
 github.com/stretchr/testify/require
-# github.com/thanos-io/thanos v0.13.1-0.20200625180332-f078faed1b96
+# github.com/thanos-io/thanos v0.13.1-0.20200722150410-6485769a1350
 github.com/thanos-io/thanos/pkg/block
 github.com/thanos-io/thanos/pkg/block/indexheader
 github.com/thanos-io/thanos/pkg/block/metadata
@@ -1118,7 +1116,7 @@ google.golang.org/genproto/googleapis/rpc/code
 google.golang.org/genproto/googleapis/rpc/status
 google.golang.org/genproto/googleapis/type/expr
 google.golang.org/genproto/protobuf/field_mask
-# google.golang.org/grpc v1.29.1
+# google.golang.org/grpc v1.30.0 => google.golang.org/grpc v1.29.1
 ## explicit
 google.golang.org/grpc
 google.golang.org/grpc/attributes
@@ -1431,3 +1429,4 @@ sigs.k8s.io/yaml
 # k8s.io/client-go => k8s.io/client-go v0.18.3
 # github.com/satori/go.uuid => github.com/satori/go.uuid v1.2.0
 # github.com/gocql/gocql => github.com/grafana/gocql v0.0.0-20200605141915-ba5dc39ece85
+# google.golang.org/grpc => google.golang.org/grpc v1.29.1

From d8721009c25437b9912bff616340fb35362190a8 Mon Sep 17 00:00:00 2001
From: Owen Diehl 
Date: Fri, 31 Jul 2020 11:09:03 -0400
Subject: [PATCH 36/40] ignore emacs stashing

---
 .gitignore                            | 3 +++
 pkg/querier/queryrange/.#roundtrip.go | 1 -
 2 files changed, 3 insertions(+), 1 deletion(-)
 delete mode 120000 pkg/querier/queryrange/.#roundtrip.go

diff --git a/.gitignore b/.gitignore
index d1934999fa6c0..4412645f38e9f 100644
--- a/.gitignore
+++ b/.gitignore
@@ -25,3 +25,6 @@ dist
 coverage.txt
 .DS_Store
 .aws-sam
+
+# emacs
+.#*
\ No newline at end of file
diff --git a/pkg/querier/queryrange/.#roundtrip.go b/pkg/querier/queryrange/.#roundtrip.go
deleted file mode 120000
index 0f53fe955a53f..0000000000000
--- a/pkg/querier/queryrange/.#roundtrip.go
+++ /dev/null
@@ -1 +0,0 @@
-owendiehl@Owens-MBP.fios-router.home.85533
\ No newline at end of file

From 91a71e2f556e2a3ba5889df98c0e34e8c64de58d Mon Sep 17 00:00:00 2001
From: Owen Diehl 
Date: Thu, 20 Aug 2020 14:02:43 -0400
Subject: [PATCH 37/40] adds comments

---
 pkg/ruler/manager/memstore.go | 5 +++++
 1 file changed, 5 insertions(+)

diff --git a/pkg/ruler/manager/memstore.go b/pkg/ruler/manager/memstore.go
index fbbcb8b3369bf..cd1a2a2bbc927 100644
--- a/pkg/ruler/manager/memstore.go
+++ b/pkg/ruler/manager/memstore.go
@@ -189,10 +189,15 @@ type memStoreQuerier struct {
 	*MemStore
 }
 
+// Select implements storage.Querier but takes advantage of the fact that it's only called when restoring for state
+// in order to lookup & cache previous rule evaluations. This results in a sort of synthetic metric store.
 func (m *memStoreQuerier) Select(sortSeries bool, params *storage.SelectHints, matchers ...*labels.Matcher) storage.SeriesSet {
 	b := labels.NewBuilder(nil)
 	var ruleKey string
 	for _, matcher := range matchers {
+		// Since Select is only called to restore the for state of an alert, we can deduce two things:
+		// 1) The matchers will all be in the form {foo="bar"}. This means we can construct the cache entry from these matchers.
+		// 2) The alertname label value can be used to discover the rule this query is associated with.
 		b.Set(matcher.Name, matcher.Value)
 		if matcher.Name == labels.AlertName && matcher.Type == labels.MatchEqual {
 			ruleKey = matcher.Value

From 7bad7315ebd2039c0ef094844d1c3e70a625b949 Mon Sep 17 00:00:00 2001
From: Owen Diehl 
Date: Tue, 25 Aug 2020 14:50:32 -0400
Subject: [PATCH 38/40] ruler /loki/api/v1 prefix

---
 pkg/loki/modules.go | 10 +++++-----
 1 file changed, 5 insertions(+), 5 deletions(-)

diff --git a/pkg/loki/modules.go b/pkg/loki/modules.go
index f775d393b163c..7d48df407fceb 100644
--- a/pkg/loki/modules.go
+++ b/pkg/loki/modules.go
@@ -411,11 +411,11 @@ func (t *Loki) initRuler() (_ services.Service, err error) {
 		t.server.HTTP.Path("/api/prom/rules/{namespace}/{groupName}").Methods("DELETE").Handler(t.httpAuthMiddleware.Wrap(http.HandlerFunc(t.ruler.DeleteRuleGroup)))
 
 		// Ruler API Routes
-		t.server.HTTP.Path("/api/v1/rules").Methods("GET").Handler(t.httpAuthMiddleware.Wrap(http.HandlerFunc(t.ruler.ListRules)))
-		t.server.HTTP.Path("/api/v1/rules/{namespace}").Methods("GET").Handler(t.httpAuthMiddleware.Wrap(http.HandlerFunc(t.ruler.ListRules)))
-		t.server.HTTP.Path("/api/v1/rules/{namespace}/{groupName}").Methods("GET").Handler(t.httpAuthMiddleware.Wrap(http.HandlerFunc(t.ruler.GetRuleGroup)))
-		t.server.HTTP.Path("/api/v1/rules/{namespace}").Methods("POST").Handler(t.httpAuthMiddleware.Wrap(http.HandlerFunc(t.ruler.CreateRuleGroup)))
-		t.server.HTTP.Path("/api/v1/rules/{namespace}/{groupName}").Methods("DELETE").Handler(t.httpAuthMiddleware.Wrap(http.HandlerFunc(t.ruler.DeleteRuleGroup)))
+		t.server.HTTP.Path("/loki/api/v1/rules").Methods("GET").Handler(t.httpAuthMiddleware.Wrap(http.HandlerFunc(t.ruler.ListRules)))
+		t.server.HTTP.Path("/loki/api/v1/rules/{namespace}").Methods("GET").Handler(t.httpAuthMiddleware.Wrap(http.HandlerFunc(t.ruler.ListRules)))
+		t.server.HTTP.Path("/loki/api/v1/rules/{namespace}/{groupName}").Methods("GET").Handler(t.httpAuthMiddleware.Wrap(http.HandlerFunc(t.ruler.GetRuleGroup)))
+		t.server.HTTP.Path("/loki/api/v1/rules/{namespace}").Methods("POST").Handler(t.httpAuthMiddleware.Wrap(http.HandlerFunc(t.ruler.CreateRuleGroup)))
+		t.server.HTTP.Path("/loki/api/v1/rules/{namespace}/{groupName}").Methods("DELETE").Handler(t.httpAuthMiddleware.Wrap(http.HandlerFunc(t.ruler.DeleteRuleGroup)))
 	}
 
 	return t.ruler, nil

From cff0efa837eb0d9b61c6fe0c28160e08f1de9865 Mon Sep 17 00:00:00 2001
From: Owen Diehl 
Date: Tue, 25 Aug 2020 15:24:20 -0400
Subject: [PATCH 39/40] revendoring compat

---
 pkg/ruler/manager/memstore.go                 |  2 +-
 pkg/ruler/ruler.go                            | 16 +++++++++---
 .../pkg/querier/distributor_queryable.go      | 26 +------------------
 .../cortex/pkg/storage/tsdb/config.go         |  1 -
 4 files changed, 15 insertions(+), 30 deletions(-)

diff --git a/pkg/ruler/manager/memstore.go b/pkg/ruler/manager/memstore.go
index cd1a2a2bbc927..16694e5f0fe76 100644
--- a/pkg/ruler/manager/memstore.go
+++ b/pkg/ruler/manager/memstore.go
@@ -27,7 +27,7 @@ const (
 
 type NoopAppender struct{}
 
-func (a NoopAppender) Appender() storage.Appender                              { return a }
+func (a NoopAppender) Appender(_ context.Context) storage.Appender             { return a }
 func (a NoopAppender) Add(l labels.Labels, t int64, v float64) (uint64, error) { return 0, nil }
 func (a NoopAppender) AddFast(ref uint64, t int64, v float64) error {
 	return errors.New("unimplemented")
diff --git a/pkg/ruler/ruler.go b/pkg/ruler/ruler.go
index 48c9d65fc8fb7..13c9dd503ba29 100644
--- a/pkg/ruler/ruler.go
+++ b/pkg/ruler/ruler.go
@@ -15,15 +15,25 @@ type Config struct {
 
 func NewRuler(cfg Config, engine *logql.Engine, reg prometheus.Registerer, logger log.Logger, ruleStore cRules.RuleStore) (*ruler.Ruler, error) {
 
-	tenantManager := manager.MemstoreTenantManager(
+	manager, err := ruler.NewDefaultMultiTenantManager(
 		cfg.Config,
-		engine,
+		manager.MemstoreTenantManager(
+			cfg.Config,
+			engine,
+		),
+		prometheus.DefaultRegisterer,
+		logger,
 	)
+
+	if err != nil {
+		return nil, err
+	}
 	return ruler.NewRuler(
 		cfg.Config,
-		tenantManager,
+		manager,
 		reg,
 		logger,
 		ruleStore,
 	)
+
 }
diff --git a/vendor/github.com/cortexproject/cortex/pkg/querier/distributor_queryable.go b/vendor/github.com/cortexproject/cortex/pkg/querier/distributor_queryable.go
index 2446179b43bb6..4414d7bb04883 100644
--- a/vendor/github.com/cortexproject/cortex/pkg/querier/distributor_queryable.go
+++ b/vendor/github.com/cortexproject/cortex/pkg/querier/distributor_queryable.go
@@ -79,37 +79,13 @@ func (q *distributorQuerier) Select(_ bool, sp *storage.SelectHints, matchers ..
 	log, ctx := spanlogger.New(q.ctx, "distributorQuerier.Select")
 	defer log.Span.Finish()
 
-	minT, maxT := q.mint, q.maxt
-	if sp != nil {
-		minT, maxT = sp.Start, sp.End
-	}
-
-	// If queryIngestersWithin is enabled, we do manipulate the query mint to query samples up until
-	// now - queryIngestersWithin, because older time ranges are covered by the storage. This
-	// optimization is particularly important for the blocks storage where the blocks retention in the
-	// ingesters could be way higher than queryIngestersWithin.
-	if q.queryIngestersWithin > 0 {
-		now := time.Now()
-		origMinT := minT
-		minT = util.Max64(minT, util.TimeToMillis(now.Add(-q.queryIngestersWithin)))
-
-		if origMinT != minT {
-			level.Debug(log).Log("msg", "the min time of the query to ingesters has been manipulated", "original", origMinT, "updated", minT)
-		}
-
-		if minT > maxT {
-			level.Debug(log).Log("msg", "empty query time range after min time manipulation")
-			return storage.EmptySeriesSet()
-		}
-	}
-
 	// Kludge: Prometheus passes nil SelectParams if it is doing a 'series' operation,
 	// which needs only metadata. For this specific case we shouldn't apply the queryIngestersWithin
 	// time range manipulation, otherwise we'll end up returning no series at all for
 	// older time ranges (while in Cortex we do ignore the start/end and always return
 	// series in ingesters).
 	if sp == nil {
-		ms, err := q.distributor.MetricsForLabelMatchers(ctx, model.Time(minT), model.Time(maxT), matchers...)
+		ms, err := q.distributor.MetricsForLabelMatchers(ctx, model.Time(q.mint), model.Time(q.maxt), matchers...)
 		if err != nil {
 			return storage.ErrSeriesSet(err)
 		}
diff --git a/vendor/github.com/cortexproject/cortex/pkg/storage/tsdb/config.go b/vendor/github.com/cortexproject/cortex/pkg/storage/tsdb/config.go
index 66bd3cc7ed48e..56b1f16676eed 100644
--- a/vendor/github.com/cortexproject/cortex/pkg/storage/tsdb/config.go
+++ b/vendor/github.com/cortexproject/cortex/pkg/storage/tsdb/config.go
@@ -117,7 +117,6 @@ func (cfg *BucketConfig) RegisterFlags(f *flag.FlagSet) {
 	cfg.GCS.RegisterFlags(f)
 	cfg.Azure.RegisterFlags(f)
 	cfg.Filesystem.RegisterFlags(f)
-	cfg.TSDB.RegisterFlags(f)
 
 	f.StringVar(&cfg.Backend, "experimental.blocks-storage.backend", "s3", fmt.Sprintf("Backend storage to use. Supported backends are: %s.", strings.Join(supportedBackends, ", ")))
 }

From 2a8662ad0586add7d7243329547570b752135388 Mon Sep 17 00:00:00 2001
From: Owen Diehl 
Date: Tue, 25 Aug 2020 15:48:20 -0400
Subject: [PATCH 40/40] comment

---
 pkg/ruler/manager/memstore.go | 3 ++-
 1 file changed, 2 insertions(+), 1 deletion(-)

diff --git a/pkg/ruler/manager/memstore.go b/pkg/ruler/manager/memstore.go
index 16694e5f0fe76..7a0c2797ca475 100644
--- a/pkg/ruler/manager/memstore.go
+++ b/pkg/ruler/manager/memstore.go
@@ -287,7 +287,8 @@ func (m *memStoreQuerier) Select(sortSeries bool, params *storage.SelectHints, m
 	// cache the result of the evaluation at this timestamp
 	cache.Set(m.ts, forStateVec)
 
-	// Finally return the series if it exists
+	// Finally return the series if it exists.
+	// Calling cache.Get leverages the existing code to return only single sample.
 	smpl, ok = cache.Get(m.ts, ls)
 	if !ok || smpl == nil {
 		return storage.NoopSeriesSet()