From 19d6f0f0a20f8930ff51d4e03a64226e6a896928 Mon Sep 17 00:00:00 2001 From: Radoslaw Lesniewski Date: Wed, 30 Jan 2019 16:38:31 +0100 Subject: [PATCH 1/2] Vendor packages improbable-eng/thanos, prometheus/prometheus and prometheus/tsdb updated Signed-off-by: Radoslaw Lesniewski --- Gopkg.lock | 8 +- runnable/flags.go | 4 +- runnable/receiver.go | 8 +- .../improbable-eng/thanos/pkg/block/block.go | 133 +--- .../improbable-eng/thanos/pkg/block/index.go | 89 ++- .../thanos/pkg/cluster/cluster.go | 57 +- .../pkg/compact/downsample/downsample.go | 24 +- .../thanos/pkg/compact/downsample/pool.go | 5 +- .../thanos/pkg/runutil/runutil.go | 46 +- .../improbable-eng/thanos/pkg/store/bucket.go | 417 ++++++----- .../thanos/pkg/store/prometheus.go | 54 +- .../thanos/pkg/store/prompb/remote.pb.go | 428 +++++++++-- .../improbable-eng/thanos/pkg/store/proxy.go | 227 ++++-- .../thanos/pkg/store/storepb/custom.go | 19 + .../thanos/pkg/store/storepb/rpc.pb.go | 631 ++++++++++++++--- .../thanos/pkg/store/storepb/rpc.proto | 20 +- .../thanos/pkg/store/storepb/types.pb.go | 282 ++++++-- .../prometheus/prometheus/pkg/value/value.go | 2 +- .../prometheus/prometheus/prompb/README.md | 11 +- .../prometheus/prometheus/prompb/remote.pb.go | 357 +++++++--- .../prometheus/prometheus/prompb/remote.proto | 3 +- .../prometheus/prometheus/prompb/rpc.pb.go | 346 +++++++-- .../prometheus/prometheus/prompb/rpc.pb.gw.go | 41 +- .../prometheus/prometheus/prompb/types.pb.go | 428 ++++++++--- .../prometheus/prometheus/prompb/types.proto | 4 +- .../prometheus/prometheus/storage/buffer.go | 38 +- .../prometheus/prometheus/storage/fanout.go | 130 +++- .../prometheus/storage/interface.go | 7 +- .../prometheus/prometheus/storage/noop.go | 8 +- .../prometheus/storage/tsdb/tsdb.go | 66 +- vendor/github.com/prometheus/tsdb/.travis.yml | 21 +- vendor/github.com/prometheus/tsdb/README.md | 11 +- vendor/github.com/prometheus/tsdb/block.go | 133 +++- .../prometheus/tsdb/chunkenc/bstream.go | 4 +- .../prometheus/tsdb/chunkenc/xor.go | 8 +- .../prometheus/tsdb/chunks/chunks.go | 39 +- vendor/github.com/prometheus/tsdb/compact.go | 159 ++++- vendor/github.com/prometheus/tsdb/db.go | 581 +++++++++------ .../prometheus/tsdb/encoding_helpers.go | 13 + .../prometheus/tsdb/fileutil/fileutil.go | 125 ++++ .../prometheus/tsdb/fileutil/mmap.go | 13 + .../prometheus/tsdb/fileutil/mmap_386.go | 13 + .../prometheus/tsdb/fileutil/mmap_amd64.go | 13 + .../prometheus/tsdb/fileutil/sync_linux.go | 2 +- vendor/github.com/prometheus/tsdb/head.go | 667 +++++++++++++----- .../prometheus/tsdb/index/encoding_helpers.go | 69 ++ .../github.com/prometheus/tsdb/index/index.go | 450 ++++++------ .../prometheus/tsdb/index/postings.go | 202 +++--- .../prometheus/tsdb/labels/labels.go | 6 +- .../prometheus/tsdb/labels/selector.go | 20 - vendor/github.com/prometheus/tsdb/querier.go | 113 +-- vendor/github.com/prometheus/tsdb/repair.go | 51 +- .../github.com/prometheus/tsdb/tombstones.go | 102 ++- vendor/github.com/prometheus/tsdb/wal.go | 173 ++++- 54 files changed, 4835 insertions(+), 2046 deletions(-) diff --git a/Gopkg.lock b/Gopkg.lock index 1e4a157..37479af 100644 --- a/Gopkg.lock +++ b/Gopkg.lock @@ -200,7 +200,7 @@ "pkg/strutil", "pkg/tracing" ] - revision = "4980e85d716a4872b1d36fd5b08d3112b5fdae4f" + revision = "5e5e353e2c30dfff9e4504d7a4d884cf39118316" [[projects]] name = "github.com/julienschmidt/httprouter" @@ -334,8 +334,8 @@ "storage", "storage/tsdb" ] - revision = "167a4b4e73a8eca8df648d2d2043e21bdb9a7449" - version = "v2.4.3" + revision = "a2ef8cf2f509b58d140acf9e37f1e906b28582cd" + version = "v2.7.0" [[projects]] name = "github.com/prometheus/tsdb" @@ -347,7 +347,7 @@ "index", "labels" ] - revision = "bd832fc8274e8fe63999ac749daaaff9d881241f" + revision = "eb5034d5b0c54651beee345590b17a1040d67f9c" [[projects]] branch = "master" diff --git a/runnable/flags.go b/runnable/flags.go index 356f109..b786d2f 100644 --- a/runnable/flags.go +++ b/runnable/flags.go @@ -23,7 +23,7 @@ func regCommonServerFlags(cmd *kingpin.CmdClause) ( grpcTLSSrvCert *string, grpcTLSSrvKey *string, grpcTLSSrvClientCA *string, - peerFunc func(log.Logger, *prometheus.Registry, bool, string, bool) (*cluster.Peer, error)) { + peerFunc func(log.Logger, *prometheus.Registry, bool, string, bool) (cluster.Peer, error)) { grpcBindAddr = cmd.Flag("grpc-address", "Listen ip:port address for gRPC endpoints (StoreAPI). Make sure this address is routable from other components if you use gossip, 'grpc-advertise-address' is empty and you require cross-node connection."). Default("0.0.0.0:10901").String() @@ -68,7 +68,7 @@ func regCommonServerFlags(cmd *kingpin.CmdClause) ( grpcTLSSrvCert, grpcTLSSrvKey, grpcTLSSrvClientCA, - func(logger log.Logger, reg *prometheus.Registry, waitIfEmpty bool, httpAdvertiseAddr string, queryAPIEnabled bool) (*cluster.Peer, error) { + func(logger log.Logger, reg *prometheus.Registry, waitIfEmpty bool, httpAdvertiseAddr string, queryAPIEnabled bool) (cluster.Peer, error) { host, port, err := cluster.CalculateAdvertiseAddress(*grpcBindAddr, *grpcAdvertiseAddr) if err != nil { return nil, errors.Wrapf(err, "calculate advertise StoreAPI addr for gossip based on bindAddr: %s and advAddr: %s", *grpcBindAddr, *grpcAdvertiseAddr) diff --git a/runnable/receiver.go b/runnable/receiver.go index fb33730..1d53609 100644 --- a/runnable/receiver.go +++ b/runnable/receiver.go @@ -73,7 +73,7 @@ func RunReceiver( httpMetricsBindAddr string, remoteWriteAddress string, dataDir string, - peer *cluster.Peer, + peer cluster.Peer, component string, ) error { level.Info(logger).Log("msg", "setting up receiver") @@ -86,10 +86,10 @@ func RunReceiver( } tsdbCfg := &tsdb.Options{ - Retention: model.Duration(time.Hour * 24 * 15), + Retention: model.Duration(time.Minute * 6), NoLockfile: true, - MinBlockDuration: model.Duration(time.Hour * 2), - MaxBlockDuration: model.Duration(time.Hour * 2), + MinBlockDuration: model.Duration(time.Minute * 3), + MaxBlockDuration: model.Duration(time.Minute * 3), } ctxWeb, cancelWeb := context.WithCancel(context.Background()) diff --git a/vendor/github.com/improbable-eng/thanos/pkg/block/block.go b/vendor/github.com/improbable-eng/thanos/pkg/block/block.go index 118b7ea..cdc52a3 100644 --- a/vendor/github.com/improbable-eng/thanos/pkg/block/block.go +++ b/vendor/github.com/improbable-eng/thanos/pkg/block/block.go @@ -5,11 +5,12 @@ package block import ( "context" "encoding/json" - "io/ioutil" "os" "path" "path/filepath" + "github.com/improbable-eng/thanos/pkg/block/metadata" + "fmt" "github.com/go-kit/kit/log" @@ -17,8 +18,6 @@ import ( "github.com/improbable-eng/thanos/pkg/runutil" "github.com/oklog/ulid" "github.com/pkg/errors" - "github.com/prometheus/tsdb" - "github.com/prometheus/tsdb/fileutil" ) const ( @@ -33,103 +32,6 @@ const ( DebugMetas = "debug/metas" ) -type SourceType string - -const ( - UnknownSource SourceType = "" - SidecarSource SourceType = "sidecar" - CompactorSource SourceType = "compactor" - CompactorRepairSource SourceType = "compactor.repair" - RulerSource SourceType = "ruler" - BucketRepairSource SourceType = "bucket.repair" - TestSource SourceType = "test" -) - -// Meta describes the a block's meta. It wraps the known TSDB meta structure and -// extends it by Thanos-specific fields. -type Meta struct { - Version int `json:"version"` - - tsdb.BlockMeta - - Thanos ThanosMeta `json:"thanos"` -} - -// ThanosMeta holds block meta information specific to Thanos. -type ThanosMeta struct { - Labels map[string]string `json:"labels"` - Downsample ThanosDownsampleMeta `json:"downsample"` - - // Source is a real upload source of the block. - Source SourceType `json:"source"` -} - -type ThanosDownsampleMeta struct { - Resolution int64 `json:"resolution"` -} - -// WriteMetaFile writes the given meta into /meta.json. -func WriteMetaFile(logger log.Logger, dir string, meta *Meta) error { - // Make any changes to the file appear atomic. - path := filepath.Join(dir, MetaFilename) - tmp := path + ".tmp" - - f, err := os.Create(tmp) - if err != nil { - return err - } - - enc := json.NewEncoder(f) - enc.SetIndent("", "\t") - - if err := enc.Encode(meta); err != nil { - runutil.CloseWithLogOnErr(logger, f, "close meta") - return err - } - if err := f.Close(); err != nil { - return err - } - return renameFile(logger, tmp, path) -} - -// ReadMetaFile reads the given meta from /meta.json. -func ReadMetaFile(dir string) (*Meta, error) { - b, err := ioutil.ReadFile(filepath.Join(dir, MetaFilename)) - if err != nil { - return nil, err - } - var m Meta - - if err := json.Unmarshal(b, &m); err != nil { - return nil, err - } - if m.Version != 1 { - return nil, errors.Errorf("unexpected meta file version %d", m.Version) - } - return &m, nil -} - -func renameFile(logger log.Logger, from, to string) error { - if err := os.RemoveAll(to); err != nil { - return err - } - if err := os.Rename(from, to); err != nil { - return err - } - - // Directory was renamed; sync parent dir to persist rename. - pdir, err := fileutil.OpenDir(filepath.Dir(to)) - if err != nil { - return err - } - - if err = fileutil.Fsync(pdir); err != nil { - runutil.CloseWithLogOnErr(logger, pdir, "close dir") - return err - } - return pdir.Close() -} - // Download downloads directory that is mean to be block directory. func Download(ctx context.Context, logger log.Logger, bucket objstore.Bucket, id ulid.ULID, dst string) error { if err := objstore.DownloadDir(ctx, logger, bucket, id.String(), dst); err != nil { @@ -169,7 +71,7 @@ func Upload(ctx context.Context, logger log.Logger, bkt objstore.Bucket, bdir st return errors.Wrap(err, "not a block dir") } - meta, err := ReadMetaFile(bdir) + meta, err := metadata.Read(bdir) if err != nil { // No meta or broken meta file. return errors.Wrap(err, "read meta") @@ -216,16 +118,16 @@ func Delete(ctx context.Context, bucket objstore.Bucket, id ulid.ULID) error { } // DownloadMeta downloads only meta file from bucket by block ID. -func DownloadMeta(ctx context.Context, logger log.Logger, bkt objstore.Bucket, id ulid.ULID) (Meta, error) { +func DownloadMeta(ctx context.Context, logger log.Logger, bkt objstore.Bucket, id ulid.ULID) (metadata.Meta, error) { rc, err := bkt.Get(ctx, path.Join(id.String(), MetaFilename)) if err != nil { - return Meta{}, errors.Wrapf(err, "meta.json bkt get for %s", id.String()) + return metadata.Meta{}, errors.Wrapf(err, "meta.json bkt get for %s", id.String()) } defer runutil.CloseWithLogOnErr(logger, rc, "download meta bucket client") - var m Meta + var m metadata.Meta if err := json.NewDecoder(rc).Decode(&m); err != nil { - return Meta{}, errors.Wrapf(err, "decode meta.json for block %s", id.String()) + return metadata.Meta{}, errors.Wrapf(err, "decode meta.json for block %s", id.String()) } return m, nil } @@ -234,24 +136,3 @@ func IsBlockDir(path string) (id ulid.ULID, ok bool) { id, err := ulid.Parse(filepath.Base(path)) return id, err == nil } - -// InjectThanosMeta sets Thanos meta to the block meta JSON and saves it to the disk. -// NOTE: It should be used after writing any block by any Thanos component, otherwise we will miss crucial metadata. -func InjectThanosMeta(logger log.Logger, bdir string, meta ThanosMeta, downsampledMeta *tsdb.BlockMeta) (*Meta, error) { - newMeta, err := ReadMetaFile(bdir) - if err != nil { - return nil, errors.Wrap(err, "read new meta") - } - newMeta.Thanos = meta - - // While downsampling we need to copy original compaction. - if downsampledMeta != nil { - newMeta.Compaction = downsampledMeta.Compaction - } - - if err := WriteMetaFile(logger, bdir, newMeta); err != nil { - return nil, errors.Wrap(err, "write new meta") - } - - return newMeta, nil -} diff --git a/vendor/github.com/improbable-eng/thanos/pkg/block/index.go b/vendor/github.com/improbable-eng/thanos/pkg/block/index.go index 2249863..eb0a368 100644 --- a/vendor/github.com/improbable-eng/thanos/pkg/block/index.go +++ b/vendor/github.com/improbable-eng/thanos/pkg/block/index.go @@ -11,6 +11,10 @@ import ( "strings" "time" + "github.com/improbable-eng/thanos/pkg/block/metadata" + + "github.com/prometheus/tsdb/fileutil" + "github.com/go-kit/kit/log" "github.com/improbable-eng/thanos/pkg/runutil" "github.com/oklog/ulid" @@ -36,23 +40,84 @@ type indexCache struct { Postings []postingsRange } +type realByteSlice []byte + +func (b realByteSlice) Len() int { + return len(b) +} + +func (b realByteSlice) Range(start, end int) []byte { + return b[start:end] +} + +func (b realByteSlice) Sub(start, end int) index.ByteSlice { + return b[start:end] +} + +func getSymbolTable(b index.ByteSlice) (map[uint32]string, error) { + version := int(b.Range(4, 5)[0]) + + if version != 1 && version != 2 { + return nil, errors.Errorf("unknown index file version %d", version) + } + + toc, err := index.NewTOCFromByteSlice(b) + if err != nil { + return nil, errors.Wrap(err, "read TOC") + } + + symbolsV2, symbolsV1, err := index.ReadSymbols(b, version, int(toc.Symbols)) + if err != nil { + return nil, errors.Wrap(err, "read symbols") + } + + symbolsTable := make(map[uint32]string, len(symbolsV1)+len(symbolsV2)) + for o, s := range symbolsV1 { + symbolsTable[o] = s + } + for o, s := range symbolsV2 { + symbolsTable[uint32(o)] = s + } + + return symbolsTable, nil +} + // WriteIndexCache writes a cache file containing the first lookup stages // for an index file. -func WriteIndexCache(logger log.Logger, fn string, r *index.Reader) error { +func WriteIndexCache(logger log.Logger, indexFn string, fn string) error { + indexFile, err := fileutil.OpenMmapFile(indexFn) + if err != nil { + return errors.Wrapf(err, "open mmap index file %s", indexFn) + } + defer runutil.CloseWithLogOnErr(logger, indexFile, "close index cache mmap file from %s", indexFn) + + b := realByteSlice(indexFile.Bytes()) + indexr, err := index.NewReader(b) + if err != nil { + return errors.Wrap(err, "open index reader") + } + defer runutil.CloseWithLogOnErr(logger, indexr, "load index cache reader") + + // We assume reader verified index already. + symbols, err := getSymbolTable(b) + if err != nil { + return err + } + f, err := os.Create(fn) if err != nil { - return errors.Wrap(err, "create file") + return errors.Wrap(err, "create index cache file") } defer runutil.CloseWithLogOnErr(logger, f, "index cache writer") v := indexCache{ - Version: r.Version(), - Symbols: r.SymbolTable(), + Version: indexr.Version(), + Symbols: symbols, LabelValues: map[string][]string{}, } // Extract label value indices. - lnames, err := r.LabelIndices() + lnames, err := indexr.LabelIndices() if err != nil { return errors.Wrap(err, "read label indices") } @@ -62,7 +127,7 @@ func WriteIndexCache(logger log.Logger, fn string, r *index.Reader) error { } ln := lns[0] - tpls, err := r.LabelValues(ln) + tpls, err := indexr.LabelValues(ln) if err != nil { return errors.Wrap(err, "get label values") } @@ -82,7 +147,7 @@ func WriteIndexCache(logger log.Logger, fn string, r *index.Reader) error { } // Extract postings ranges. - pranges, err := r.PostingsRanges() + pranges, err := indexr.PostingsRanges() if err != nil { return errors.Wrap(err, "read postings ranges") } @@ -346,7 +411,7 @@ type ignoreFnType func(mint, maxt int64, prev *chunks.Meta, curr *chunks.Meta) ( // - removes all near "complete" outside chunks introduced by https://github.com/prometheus/tsdb/issues/347. // Fixable inconsistencies are resolved in the new block. // TODO(bplotka): https://github.com/improbable-eng/thanos/issues/378 -func Repair(logger log.Logger, dir string, id ulid.ULID, source SourceType, ignoreChkFns ...ignoreFnType) (resid ulid.ULID, err error) { +func Repair(logger log.Logger, dir string, id ulid.ULID, source metadata.SourceType, ignoreChkFns ...ignoreFnType) (resid ulid.ULID, err error) { if len(ignoreChkFns) == 0 { return resid, errors.New("no ignore chunk function specified") } @@ -355,7 +420,7 @@ func Repair(logger log.Logger, dir string, id ulid.ULID, source SourceType, igno entropy := rand.New(rand.NewSource(time.Now().UnixNano())) resid = ulid.MustNew(ulid.Now(), entropy) - meta, err := ReadMetaFile(bdir) + meta, err := metadata.Read(bdir) if err != nil { return resid, errors.Wrap(err, "read meta file") } @@ -363,7 +428,7 @@ func Repair(logger log.Logger, dir string, id ulid.ULID, source SourceType, igno return resid, errors.New("cannot repair downsampled block") } - b, err := tsdb.OpenBlock(bdir, nil) + b, err := tsdb.OpenBlock(logger, bdir, nil) if err != nil { return resid, errors.Wrap(err, "open block") } @@ -405,7 +470,7 @@ func Repair(logger log.Logger, dir string, id ulid.ULID, source SourceType, igno if err := rewrite(indexr, chunkr, indexw, chunkw, &resmeta, ignoreChkFns); err != nil { return resid, errors.Wrap(err, "rewrite block") } - if err := WriteMetaFile(logger, resdir, &resmeta); err != nil { + if err := metadata.Write(logger, resdir, &resmeta); err != nil { return resid, err } return resid, nil @@ -494,7 +559,7 @@ OUTER: func rewrite( indexr tsdb.IndexReader, chunkr tsdb.ChunkReader, indexw tsdb.IndexWriter, chunkw tsdb.ChunkWriter, - meta *Meta, + meta *metadata.Meta, ignoreChkFns []ignoreFnType, ) error { symbols, err := indexr.Symbols() diff --git a/vendor/github.com/improbable-eng/thanos/pkg/cluster/cluster.go b/vendor/github.com/improbable-eng/thanos/pkg/cluster/cluster.go index ba1ecb1..67470b0 100644 --- a/vendor/github.com/improbable-eng/thanos/pkg/cluster/cluster.go +++ b/vendor/github.com/improbable-eng/thanos/pkg/cluster/cluster.go @@ -21,8 +21,23 @@ import ( "github.com/prometheus/common/model" ) +type PeerStateFetcher interface { + PeerState(id string) (PeerState, bool) +} + +type Peer interface { + PeerStateFetcher + + Name() string + SetLabels(labels []storepb.Label) + SetTimestamps(mint int64, maxt int64) + Join(peerType PeerType, initialMetadata PeerMetadata) error + PeerStates(types ...PeerType) map[string]PeerState + Close(timeout time.Duration) +} + // Peer is a single peer in a gossip cluster. -type Peer struct { +type peer struct { logger log.Logger mlistMtx sync.RWMutex mlist *memberlist.Memberlist @@ -112,7 +127,7 @@ func New( refreshInterval time.Duration, secretKey []byte, networkType string, -) (*Peer, error) { +) (*peer, error) { l = log.With(l, "component", "cluster") bindHost, bindPortStr, err := net.SplitHostPort(bindAddr) @@ -180,7 +195,7 @@ func New( reg.MustRegister(gossipMsgsReceived) reg.MustRegister(gossipClusterMembers) - return &Peer{ + return &peer{ logger: l, knownPeers: knownPeers, cfg: cfg, @@ -196,7 +211,7 @@ func New( } // Join joins to the memberlist gossip cluster using knownPeers and given peerType and initialMetadata. -func (p *Peer) Join(peerType PeerType, initialMetadata PeerMetadata) error { +func (p *peer) Join(peerType PeerType, initialMetadata PeerMetadata) error { if p.hasJoined() { return errors.New("peer already joined; close it first to rejoin") } @@ -241,7 +256,7 @@ func (p *Peer) Join(peerType PeerType, initialMetadata PeerMetadata) error { return nil } -func (p *Peer) periodicallyRefresh() { +func (p *peer) periodicallyRefresh() { tick := time.NewTicker(p.refreshInterval) defer tick.Stop() @@ -258,7 +273,7 @@ func (p *Peer) periodicallyRefresh() { } // Refresh renews membership cluster, this will refresh DNS names and join newly added members -func (p *Peer) Refresh() error { +func (p *peer) Refresh() error { p.mlistMtx.Lock() defer p.mlistMtx.Unlock() @@ -302,7 +317,7 @@ func (p *Peer) Refresh() error { return nil } -func (p *Peer) hasJoined() bool { +func (p *peer) hasJoined() bool { p.mlistMtx.RLock() defer p.mlistMtx.RUnlock() @@ -327,7 +342,7 @@ func warnIfAlone(logger log.Logger, d time.Duration, stopc chan struct{}, numNod // SetLabels updates internal metadata's labels stored in PeerState for this peer. // Note that this data will be propagated based on gossipInterval we set. -func (p *Peer) SetLabels(labels []storepb.Label) { +func (p *peer) SetLabels(labels []storepb.Label) { if !p.hasJoined() { return } @@ -339,7 +354,7 @@ func (p *Peer) SetLabels(labels []storepb.Label) { // SetTimestamps updates internal metadata's timestamps stored in PeerState for this peer. // Note that this data will be propagated based on gossipInterval we set. -func (p *Peer) SetTimestamps(mint int64, maxt int64) { +func (p *peer) SetTimestamps(mint int64, maxt int64) { if !p.hasJoined() { return } @@ -352,7 +367,7 @@ func (p *Peer) SetTimestamps(mint int64, maxt int64) { // Close leaves the cluster waiting up to timeout and shutdowns peer if cluster left. // TODO(bplotka): Add this method into run.Group closing logic for each command. This will improve graceful shutdown. -func (p *Peer) Close(timeout time.Duration) { +func (p *peer) Close(timeout time.Duration) { if !p.hasJoined() { return } @@ -368,7 +383,7 @@ func (p *Peer) Close(timeout time.Duration) { } // Name returns the unique ID of this peer in the cluster. -func (p *Peer) Name() string { +func (p *peer) Name() string { if !p.hasJoined() { return "" } @@ -382,7 +397,7 @@ func PeerTypesStoreAPIs() []PeerType { } // PeerStates returns the custom state information for each peer by memberlist peer id (name). -func (p *Peer) PeerStates(types ...PeerType) map[string]PeerState { +func (p *peer) PeerStates(types ...PeerType) map[string]PeerState { if !p.hasJoined() { return nil } @@ -409,7 +424,7 @@ func (p *Peer) PeerStates(types ...PeerType) map[string]PeerState { } // PeerState returns the custom state information by memberlist peer name. -func (p *Peer) PeerState(id string) (PeerState, bool) { +func (p *peer) PeerState(id string) (PeerState, bool) { if !p.hasJoined() { return PeerState{}, false } @@ -423,7 +438,7 @@ func (p *Peer) PeerState(id string) (PeerState, bool) { // Info returns a JSON-serializable dump of cluster state. // Useful for debug. -func (p *Peer) Info() map[string]interface{} { +func (p *peer) Info() map[string]interface{} { if !p.hasJoined() { return nil } @@ -538,3 +553,17 @@ func parseNetworkConfig(networkType string) (*memberlist.Config, error) { return mc, nil } + +func NewNoop() Peer { + return noopPeer{} +} + +type noopPeer struct{} + +func (n noopPeer) Name() string { return "no gossip" } +func (n noopPeer) SetLabels(labels []storepb.Label) {} +func (n noopPeer) SetTimestamps(mint int64, maxt int64) {} +func (n noopPeer) PeerState(id string) (PeerState, bool) { return PeerState{}, false } +func (n noopPeer) Join(peerType PeerType, initialMetadata PeerMetadata) error { return nil } +func (n noopPeer) PeerStates(types ...PeerType) map[string]PeerState { return nil } +func (n noopPeer) Close(timeout time.Duration) {} diff --git a/vendor/github.com/improbable-eng/thanos/pkg/compact/downsample/downsample.go b/vendor/github.com/improbable-eng/thanos/pkg/compact/downsample/downsample.go index 305f720..f5afecd 100644 --- a/vendor/github.com/improbable-eng/thanos/pkg/compact/downsample/downsample.go +++ b/vendor/github.com/improbable-eng/thanos/pkg/compact/downsample/downsample.go @@ -5,7 +5,8 @@ import ( "path/filepath" "sort" - "github.com/improbable-eng/thanos/pkg/block" + "github.com/improbable-eng/thanos/pkg/block/metadata" + "github.com/prometheus/prometheus/pkg/value" "github.com/prometheus/tsdb/chunkenc" @@ -31,7 +32,7 @@ const ( // Downsample downsamples the given block. It writes a new block into dir and returns its ID. func Downsample( logger log.Logger, - origMeta *block.Meta, + origMeta *metadata.Meta, b tsdb.BlockReader, dir string, resolution int64, @@ -113,6 +114,7 @@ func Downsample( origMeta.Thanos.Downsample.Resolution, resolution, ) + if err != nil { return id, errors.Wrap(err, "downsample aggregate block") } @@ -125,18 +127,18 @@ func Downsample( if err != nil { return id, errors.Wrap(err, "create compactor") } - id, err = comp.Write(dir, newb, origMeta.MinTime, origMeta.MaxTime) + id, err = comp.Write(dir, newb, origMeta.MinTime, origMeta.MaxTime, &origMeta.BlockMeta) if err != nil { return id, errors.Wrap(err, "compact head") } bdir := filepath.Join(dir, id.String()) - var tmeta block.ThanosMeta + var tmeta metadata.Thanos tmeta = origMeta.Thanos - tmeta.Source = block.CompactorSource + tmeta.Source = metadata.CompactorSource tmeta.Downsample.Resolution = resolution - _, err = block.InjectThanosMeta(logger, bdir, tmeta, &origMeta.BlockMeta) + _, err = metadata.InjectThanos(logger, bdir, tmeta, &origMeta.BlockMeta) if err != nil { return id, errors.Wrapf(err, "failed to finalize the block %s", bdir) } @@ -228,13 +230,20 @@ func (b *memBlock) Chunks() (tsdb.ChunkReader, error) { } func (b *memBlock) Tombstones() (tsdb.TombstoneReader, error) { - return tsdb.EmptyTombstoneReader(), nil + return emptyTombstoneReader{}, nil } func (b *memBlock) Close() error { return nil } +type emptyTombstoneReader struct{} + +func (emptyTombstoneReader) Get(ref uint64) (tsdb.Intervals, error) { return nil, nil } +func (emptyTombstoneReader) Iter(func(uint64, tsdb.Intervals) error) error { return nil } +func (emptyTombstoneReader) Total() uint64 { return 0 } +func (emptyTombstoneReader) Close() error { return nil } + // currentWindow returns the end timestamp of the window that t falls into. func currentWindow(t, r int64) int64 { // The next timestamp is the next number after s.t that's aligned with window. @@ -412,6 +421,7 @@ func downsampleRaw(data []sample, resolution int64) []chunks.Meta { chks = append(chks, ab.encode()) } + return chks } diff --git a/vendor/github.com/improbable-eng/thanos/pkg/compact/downsample/pool.go b/vendor/github.com/improbable-eng/thanos/pkg/compact/downsample/pool.go index 9b199e7..17094cd 100644 --- a/vendor/github.com/improbable-eng/thanos/pkg/compact/downsample/pool.go +++ b/vendor/github.com/improbable-eng/thanos/pkg/compact/downsample/pool.go @@ -6,14 +6,14 @@ import ( "github.com/prometheus/tsdb/chunkenc" ) -// Pool is a memory pool of chunk objects, supporting Thanos aggr chunk encoding. +// Pool is a memory pool of chunk objects, supporting Thanos aggregated chunk encoding. // It maintains separate pools for xor and aggr chunks. type pool struct { wrapped chunkenc.Pool aggr sync.Pool } -// TODO(bplotka): Add reasonable limits to our sync pools them to detect OOMs early. +// TODO(bwplotka): Add reasonable limits to our sync pooling them to detect OOMs early. func NewPool() chunkenc.Pool { return &pool{ wrapped: chunkenc.NewPool(), @@ -51,6 +51,7 @@ func (p *pool) Put(c chunkenc.Chunk) error { // Clear []byte. *ac = AggrChunk(nil) p.aggr.Put(ac) + return nil } return p.wrapped.Put(c) diff --git a/vendor/github.com/improbable-eng/thanos/pkg/runutil/runutil.go b/vendor/github.com/improbable-eng/thanos/pkg/runutil/runutil.go index e10732c..d6dbfe7 100644 --- a/vendor/github.com/improbable-eng/thanos/pkg/runutil/runutil.go +++ b/vendor/github.com/improbable-eng/thanos/pkg/runutil/runutil.go @@ -1,13 +1,51 @@ +// Package runutil provides helpers to advanced function scheduling control like repeat or retry. +// +// It's very often the case when you need to excutes some code every fixed intervals or have it retried automatically. +// To make it reliably with proper timeout, you need to carefully arrange some boilerplate for this. +// Below function does it for you. +// +// For repeat executes, use Repeat: +// +// err := runutil.Repeat(10*time.Second, stopc, func() error { +// // ... +// }) +// +// Retry starts executing closure function f until no error is returned from f: +// +// err := runutil.Retry(10*time.Second, stopc, func() error { +// // ... +// }) +// +// For logging an error on each f error, use RetryWithLog: +// +// err := runutil.RetryWithLog(logger, 10*time.Second, stopc, func() error { +// // ... +// }) +// +// Another use case for runutil package is when you want to close a `Closer` interface. As we all know, we should close all implements of `Closer`, such as *os.File. Commonly we will use: +// +// defer closer.Close() +// +// The problem is that Close() usually can return important error e.g for os.File the actual file flush might happen (and fail) on `Close` method. It's important to *always* check error. Thanos provides utility functions to log every error like those, allowing to put them in convenient `defer`: +// +// defer runutil.CloseWithLogOnErr(logger, closer, "log format message") +// +// For capturing error, use CloseWithErrCapture: +// +// var err error +// defer runutil.CloseWithErrCapture(logger, &err, closer, "log format message") +// +// // ... +// +// If Close() returns error, err will capture it and return by argument. package runutil import ( + "fmt" + "io" "os" "time" - "io" - - "fmt" - "github.com/go-kit/kit/log" "github.com/go-kit/kit/log/level" "github.com/pkg/errors" diff --git a/vendor/github.com/improbable-eng/thanos/pkg/store/bucket.go b/vendor/github.com/improbable-eng/thanos/pkg/store/bucket.go index cf6301a..3b45756 100644 --- a/vendor/github.com/improbable-eng/thanos/pkg/store/bucket.go +++ b/vendor/github.com/improbable-eng/thanos/pkg/store/bucket.go @@ -19,6 +19,7 @@ import ( "github.com/go-kit/kit/log" "github.com/go-kit/kit/log/level" "github.com/improbable-eng/thanos/pkg/block" + "github.com/improbable-eng/thanos/pkg/block/metadata" "github.com/improbable-eng/thanos/pkg/compact/downsample" "github.com/improbable-eng/thanos/pkg/objstore" "github.com/improbable-eng/thanos/pkg/pool" @@ -30,7 +31,6 @@ import ( "github.com/oklog/ulid" "github.com/pkg/errors" "github.com/prometheus/client_golang/prometheus" - "github.com/prometheus/tsdb" "github.com/prometheus/tsdb/chunkenc" "github.com/prometheus/tsdb/chunks" "github.com/prometheus/tsdb/fileutil" @@ -468,42 +468,20 @@ func (s *BucketStore) blockSeries( matchers []labels.Matcher, req *storepb.SeriesRequest, ) (storepb.SeriesSet, *queryStats, error) { - stats := &queryStats{} - - // The postings to preload are registered within the call to PostingsForMatchers, - // when it invokes indexr.Postings for each underlying postings list. - // They are ready to use ONLY after preloadPostings was called successfully. - lazyPostings, err := tsdb.PostingsForMatchers(indexr, matchers...) - if err != nil { - return nil, stats, errors.Wrap(err, "get postings for matchers") - } - // If the tree was reduced to the empty postings list, don't preload the registered - // leaf postings and return early with an empty result. - if lazyPostings == index.EmptyPostings() { - return storepb.EmptySeriesSet(), stats, nil - } - if err := indexr.preloadPostings(); err != nil { - return nil, stats, errors.Wrap(err, "preload postings") - } - // Get result postings list by resolving the postings tree. - ps, err := index.ExpandPostings(lazyPostings) + ps, err := indexr.ExpandedPostings(matchers) if err != nil { - return nil, stats, errors.Wrap(err, "expand postings") + return nil, nil, errors.Wrap(err, "expanded matching posting") } - // As of version two all series entries are 16 byte padded. All references - // we get have to account for that to get the correct offset. - // We do it right at the beginning as it's easier than doing it more fine-grained - // at the loading level. - if indexr.block.indexVersion >= 2 { - for i, id := range ps { - ps[i] = id * 16 - } + if len(ps) == 0 { + return storepb.EmptySeriesSet(), indexr.stats, nil } - // Preload all series index data - if err := indexr.preloadSeries(ps); err != nil { - return nil, stats, errors.Wrap(err, "preload series") + // Preload all series index data. + // TODO(bwplotka): Consider not keeping all series in memory all the time. + // TODO(bwplotka): Do lazy loading in one step as `ExpandingPostings` method. + if err := indexr.PreloadSeries(ps); err != nil { + return nil, nil, errors.Wrap(err, "preload series") } // Transform all series into the response types and mark their relevant chunks @@ -514,8 +492,8 @@ func (s *BucketStore) blockSeries( chks []chunks.Meta ) for _, id := range ps { - if err := indexr.Series(id, &lset, &chks); err != nil { - return nil, stats, errors.Wrap(err, "read series") + if err := indexr.LoadedSeries(id, &lset, &chks); err != nil { + return nil, nil, errors.Wrap(err, "read series") } s := seriesEntry{ lset: make([]storepb.Label, 0, len(lset)), @@ -552,7 +530,7 @@ func (s *BucketStore) blockSeries( } if err := chunkr.addPreload(meta.Ref); err != nil { - return nil, stats, errors.Wrap(err, "add chunk preload") + return nil, nil, errors.Wrap(err, "add chunk preload") } s.chks = append(s.chks, storepb.AggrChunk{ MinTime: meta.MinTime, @@ -567,7 +545,7 @@ func (s *BucketStore) blockSeries( // Preload all chunks that were marked in the previous stage. if err := chunkr.preload(); err != nil { - return nil, stats, errors.Wrap(err, "preload chunks") + return nil, nil, errors.Wrap(err, "preload chunks") } // Transform all chunks into the response format. @@ -575,18 +553,15 @@ func (s *BucketStore) blockSeries( for i, ref := range s.refs { chk, err := chunkr.Chunk(ref) if err != nil { - return nil, stats, errors.Wrap(err, "get chunk") + return nil, nil, errors.Wrap(err, "get chunk") } if err := populateChunk(&s.chks[i], chk, req.Aggregates); err != nil { - return nil, stats, errors.Wrap(err, "populate chunk") + return nil, nil, errors.Wrap(err, "populate chunk") } } } - stats = stats.merge(indexr.stats) - stats = stats.merge(chunkr.stats) - - return newBucketSeriesSet(res), stats, nil + return newBucketSeriesSet(res), indexr.stats.merge(chunkr.stats), nil } func populateChunk(out *storepb.AggrChunk, in chunkenc.Chunk, aggrs []storepb.Aggr) error { @@ -673,6 +648,9 @@ func debugFoundBlockSetOverview(logger log.Logger, mint, maxt int64, lset labels } // Series implements the storepb.StoreServer interface. +// TODO(bwplotka): It buffers all chunks in memory and only then streams to client. +// 1. Either count chunk sizes and error out too big query. +// 2. Stream posting -> series -> chunk all together. func (s *BucketStore) Series(req *storepb.SeriesRequest, srv storepb.Store_SeriesServer) error { matchers, err := translateMatchers(req.Matchers) if err != nil { @@ -819,7 +797,7 @@ func (s *BucketStore) LabelNames(context.Context, *storepb.LabelNamesRequest) (* // LabelValues implements the storepb.StoreServer interface. func (s *BucketStore) LabelValues(ctx context.Context, req *storepb.LabelValuesRequest) (*storepb.LabelValuesResponse, error) { - var g errgroup.Group + g, gctx := errgroup.WithContext(ctx) s.mtx.RLock() @@ -827,25 +805,13 @@ func (s *BucketStore) LabelValues(ctx context.Context, req *storepb.LabelValuesR var sets [][]string for _, b := range s.blocks { - indexr := b.indexReader(ctx) + indexr := b.indexReader(gctx) // TODO(fabxc): only aggregate chunk metas first and add a subsequent fetch stage // where we consolidate requests. g.Go(func() error { defer runutil.CloseWithLogOnErr(s.logger, indexr, "label values") - tpls, err := indexr.LabelValues(req.Label) - if err != nil { - return errors.Wrap(err, "lookup label values") - } - res := make([]string, 0, tpls.Len()) - - for i := 0; i < tpls.Len(); i++ { - e, err := tpls.At(i) - if err != nil { - return errors.Wrap(err, "get string tuple entry") - } - res = append(res, e[0]) - } + res := indexr.LabelValues(req.Label) mtx.Lock() sets = append(sets, res) @@ -995,7 +961,7 @@ func (s *bucketBlockSet) labelMatchers(matchers ...labels.Matcher) ([]labels.Mat type bucketBlock struct { logger log.Logger bucket objstore.BucketReader - meta *block.Meta + meta *metadata.Meta dir string indexCache *indexCache chunkPool *pool.BytesPool @@ -1059,7 +1025,7 @@ func (b *bucketBlock) loadMeta(ctx context.Context, id ulid.ULID) error { } else if err != nil { return err } - meta, err := block.ReadMetaFile(b.dir) + meta, err := metadata.Read(b.dir) if err != nil { return errors.Wrap(err, "read meta.json") } @@ -1089,13 +1055,7 @@ func (b *bucketBlock) loadIndexCache(ctx context.Context) (err error) { } }() - indexr, err := index.NewFileReader(fn) - if err != nil { - return errors.Wrap(err, "open index reader") - } - defer runutil.CloseWithLogOnErr(b.logger, indexr, "load index cache reader") - - if err := block.WriteIndexCache(b.logger, cachefn, indexr); err != nil { + if err := block.WriteIndexCache(b.logger, fn, cachefn); err != nil { return errors.Wrap(err, "write index cache") } @@ -1155,6 +1115,8 @@ func (b *bucketBlock) Close() error { return nil } +// bucketIndexReader is a custom index reader (not conforming index.Reader interface) that gets postings +// by type bucketIndexReader struct { logger log.Logger ctx context.Context @@ -1163,9 +1125,8 @@ type bucketIndexReader struct { stats *queryStats cache *indexCache - mtx sync.Mutex - loadedPostings []*lazyPostings - loadedSeries map[uint64][]byte + mtx sync.Mutex + loadedSeries map[uint64][]byte } func newBucketIndexReader(ctx context.Context, logger log.Logger, block *bucketBlock, cache *indexCache) *bucketIndexReader { @@ -1178,72 +1139,206 @@ func newBucketIndexReader(ctx context.Context, logger log.Logger, block *bucketB cache: cache, loadedSeries: map[uint64][]byte{}, } - r.dec.SetSymbolTable(r.block.symbols) + r.dec.LookupSymbol = r.lookupSymbol return r } -func (r *bucketIndexReader) preloadPostings() error { +func (r *bucketIndexReader) lookupSymbol(o uint32) (string, error) { + s, ok := r.block.symbols[o] + if !ok { + return "", errors.Errorf("bucketIndexReader: unknown symbol offset %d", o) + } + return s, nil +} + +// Postings returns postings in expanded list instead of index.Postings. +// This is because we need to have them buffered anyway to perform efficient lookup +// on object storage. +// Found posting IDs (ps) are not strictly required to point to a valid Series, e.g. during +// background garbage collections. +// +// Reminder: A posting is a reference (represented as a uint64) to a series reference, which in turn points to the first +// chunk where the series contains the matching label-value pair for a given block of data. Postings can be fetched by +// single label name=value. +func (r *bucketIndexReader) ExpandedPostings(ms []labels.Matcher) ([]uint64, error) { + var postingsToIntersect []index.Postings + + // NOTE: Derived from tsdb.PostingsForMatchers. + for _, m := range ms { + matching, err := matchingLabels(r.LabelValues, m) + if err != nil { + return nil, errors.Wrap(err, "match labels") + } + if len(matching) == 0 { + continue + } + + // We need to load all matching postings to tell what postings are intersecting with what. + postings, err := r.fetchPostings(matching) + if err != nil { + return nil, errors.Wrap(err, "get postings") + } + + postingsToIntersect = append(postingsToIntersect, postings) + } + + if len(postingsToIntersect) == 0 { + return nil, nil + } + + ps, err := index.ExpandPostings(index.Intersect(postingsToIntersect...)) + if err != nil { + return nil, errors.Wrap(err, "expand") + } + + // As of version two all series entries are 16 byte padded. All references + // we get have to account for that to get the correct offset. + if r.block.indexVersion >= 2 { + for i, id := range ps { + ps[i] = id * 16 + } + } + + return ps, nil +} + +// NOTE: Derived from tsdb.postingsForMatcher. index.Merge is equivalent to map duplication. +func matchingLabels(lvalsFn func(name string) []string, m labels.Matcher) (labels.Labels, error) { + // If the matcher selects an empty value, it selects all the series which don't + // have the label name set too. See: https://github.com/prometheus/prometheus/issues/3575 + // and https://github.com/prometheus/prometheus/pull/3578#issuecomment-351653555 + if m.Matches("") { + // We don't support tsdb.postingsForUnsetLabelMatcher. + // This is because it requires fetching all postings for index. + // This requires additional logic to avoid fetching big bytes range (todo: how big?). See https://github.com/prometheus/prometheus/pull/3578#issuecomment-351653555 + // to what it blocks. + return nil, errors.Errorf("support for <> != matcher is not implemented; empty matcher for label name %s", m.Name()) + } + + // Fast-path for equal matching. + if em, ok := m.(*labels.EqualMatcher); ok { + return labels.Labels{{Name: em.Name(), Value: em.Value()}}, nil + } + + var matchingLabels labels.Labels + for _, val := range lvalsFn(m.Name()) { + if m.Matches(val) { + matchingLabels = append(matchingLabels, labels.Label{Name: m.Name(), Value: val}) + } + } + + return matchingLabels, nil +} + +type postingPtr struct { + key labels.Label + ptr index.Range +} + +// fetchPostings returns sorted slice of postings that match the selected labels. +func (r *bucketIndexReader) fetchPostings(keys labels.Labels) (index.Postings, error) { const maxGapSize = 512 * 1024 - ps := r.loadedPostings + var ( + ptrs []postingPtr + postings = make([]index.Postings, 0, len(keys)) + ) + + // TODO(bwplotka): sort postings? + + for _, k := range keys { + // Get postings for given key from cache first. + if b, ok := r.cache.postings(r.block.meta.ULID, k); ok { + r.stats.postingsTouched++ + r.stats.postingsTouchedSizeSum += len(b) - sort.Slice(ps, func(i, j int) bool { - return ps[i].ptr.Start < ps[j].ptr.Start + _, l, err := r.dec.Postings(b) + if err != nil { + return nil, errors.Wrap(err, "decode postings") + } + postings = append(postings, l) + continue + } + + // Cache miss; save pointer for actual posting in index stored in object store. + ptr, ok := r.block.postings[k] + if !ok { + // Index malformed? Should not happen. + continue + } + + ptrs = append(ptrs, postingPtr{ptr: ptr, key: k}) + } + + sort.Slice(ptrs, func(i, j int) bool { + return ptrs[i].ptr.Start < ptrs[j].ptr.Start }) - parts := partitionRanges(len(ps), func(i int) (start, end uint64) { - return uint64(ps[i].ptr.Start), uint64(ps[i].ptr.End) + + // TODO(bwplotka): Asses how large in worst case scenario this can be. (e.g fetch for AllPostingsKeys) + // Consider sub split if too big. + parts := partitionRanges(len(ptrs), func(i int) (start, end uint64) { + return uint64(ptrs[i].ptr.Start), uint64(ptrs[i].ptr.End) }, maxGapSize) - var g run.Group + var g run.Group for _, p := range parts { ctx, cancel := context.WithCancel(r.ctx) - i, j := p[0], p[1] + i, j := p.elemRng[0], p.elemRng[1] + + start := int64(p.start) + // We assume index does not have any ptrs that has 0 length. + length := int64(p.end) - start + // Fetch from object storage concurrently and update stats and posting list. g.Add(func() error { - return r.loadPostings(ctx, ps[i:j], ps[i].ptr.Start, ps[j-1].ptr.End) - }, func(err error) { + begin := time.Now() + + b, err := r.block.readIndexRange(ctx, start, length) if err != nil { - cancel() + return errors.Wrap(err, "read postings range") } - }) - } - return g.Run() -} + fetchTime := time.Since(begin) -// loadPostings loads given postings using given start + length. It is expected to have given postings data within given range. -func (r *bucketIndexReader) loadPostings(ctx context.Context, postings []*lazyPostings, start, end int64) error { - begin := time.Now() + r.mtx.Lock() + defer r.mtx.Unlock() - b, err := r.block.readIndexRange(r.ctx, int64(start), int64(end-start)) - if err != nil { - return errors.Wrap(err, "read postings range") - } + r.stats.postingsFetchCount++ + r.stats.postingsFetched += j - i + r.stats.postingsFetchDurationSum += fetchTime + r.stats.postingsFetchedSizeSum += int(length) - r.mtx.Lock() - defer r.mtx.Unlock() + for _, p := range ptrs[i:j] { + c := b[p.ptr.Start-start : p.ptr.End-start] - r.stats.postingsFetchCount++ - r.stats.postingsFetched += len(postings) - r.stats.postingsFetchDurationSum += time.Since(begin) - r.stats.postingsFetchedSizeSum += int(end - start) + _, fetchedPostings, err := r.dec.Postings(c) + if err != nil { + return errors.Wrap(err, "read postings list") + } - for _, p := range postings { - c := b[p.ptr.Start-start : p.ptr.End-start] + // Return postings and fill LRU cache. + postings = append(postings, fetchedPostings) + r.cache.setPostings(r.block.meta.ULID, p.key, c) - _, l, err := r.dec.Postings(c) - if err != nil { - return errors.Wrap(err, "read postings list") - } - p.set(l) - r.cache.setPostings(r.block.meta.ULID, p.key, c) - // If we just fetched it we still have to update the stats for touched postings. - r.stats.postingsTouched++ - r.stats.postingsTouchedSizeSum += len(c) + // If we just fetched it we still have to update the stats for touched postings. + r.stats.postingsTouched++ + r.stats.postingsTouchedSizeSum += len(c) + } + return nil + }, func(err error) { + if err != nil { + cancel() + } + }) } - return nil + + if err := g.Run(); err != nil { + return nil, err + } + + return index.Merge(postings...), nil } -func (r *bucketIndexReader) preloadSeries(ids []uint64) error { +func (r *bucketIndexReader) PreloadSeries(ids []uint64) error { const maxSeriesSize = 64 * 1024 const maxGapSize = 512 * 1024 @@ -1265,10 +1360,10 @@ func (r *bucketIndexReader) preloadSeries(ids []uint64) error { for _, p := range parts { ctx, cancel := context.WithCancel(r.ctx) - i, j := p[0], p[1] + i, j := p.elemRng[0], p.elemRng[1] g.Add(func() error { - return r.loadSeries(ctx, ids[i:j], ids[i], ids[j-1]+maxSeriesSize) + return r.loadSeries(ctx, ids[i:j], p.start, p.end+maxSeriesSize) }, func(err error) { if err != nil { cancel() @@ -1311,90 +1406,50 @@ func (r *bucketIndexReader) loadSeries(ctx context.Context, ids []uint64, start, return nil } +type part struct { + start uint64 + end uint64 + + elemRng [2]int +} + // partitionRanges partitions length entries into n <= length ranges that cover all // input ranges. // It combines entries that are separated by reasonably small gaps. -func partitionRanges(length int, rng func(int) (uint64, uint64), maxGapSize uint64) (parts [][2]int) { +// It supports overlapping ranges. +// NOTE: It expects range to be sorted by start time. +func partitionRanges(length int, rng func(int) (uint64, uint64), maxGapSize uint64) (parts []part) { j := 0 k := 0 for k < length { j = k k++ - _, end := rng(j) + p := part{} + p.start, p.end = rng(j) // Keep growing the range until the end or we encounter a large gap. for ; k < length; k++ { s, e := rng(k) - if end+maxGapSize < s { + if p.end+maxGapSize < s { break } - end = e - } - parts = append(parts, [2]int{j, k}) - } - return parts -} - -func (r *bucketIndexReader) Symbols() (map[string]struct{}, error) { - return nil, errors.New("not implemented") -} - -// LabelValues returns the possible label values. -func (r *bucketIndexReader) LabelValues(names ...string) (index.StringTuples, error) { - if len(names) != 1 { - return nil, errors.New("label value lookups only supported for single name") - } - return index.NewStringTuples(r.block.lvals[names[0]], 1) -} - -type lazyPostings struct { - index.Postings - key labels.Label - ptr index.Range -} -func (p *lazyPostings) set(v index.Postings) { - p.Postings = v -} - -// Postings returns the postings list iterator for the label pair. -// The Postings here contain the offsets to the series inside the index. -// Found IDs are not strictly required to point to a valid Series, e.g. during -// background garbage collections. -func (r *bucketIndexReader) Postings(name, value string) (index.Postings, error) { - l := labels.Label{Name: name, Value: value} - ptr, ok := r.block.postings[l] - if !ok { - return index.EmptyPostings(), nil - } - if b, ok := r.cache.postings(r.block.meta.ULID, l); ok { - r.stats.postingsTouched++ - r.stats.postingsTouchedSizeSum += len(b) - - _, p, err := r.dec.Postings(b) - if err != nil { - return nil, errors.Wrap(err, "decode postings") + if p.end <= e { + p.end = e + } } - return p, nil + p.elemRng = [2]int{j, k} + parts = append(parts, p) } - // The stats for touched postings are updated as they are loaded. - p := &lazyPostings{key: l, ptr: ptr} - r.loadedPostings = append(r.loadedPostings, p) - return p, nil -} - -// SortedPostings returns a postings list that is reordered to be sorted -// by the label set of the underlying series. -func (r *bucketIndexReader) SortedPostings(p index.Postings) index.Postings { - return p + return parts } -// Series populates the given labels and chunk metas for the series identified +// LoadedSeries populates the given labels and chunk metas for the series identified // by the reference. // Returns ErrNotFound if the ref does not resolve to a known series. -func (r *bucketIndexReader) Series(ref uint64, lset *labels.Labels, chks *[]chunks.Meta) error { +func (r *bucketIndexReader) LoadedSeries(ref uint64, lset *labels.Labels, chks *[]chunks.Meta) error { b, ok := r.loadedSeries[ref] if !ok { return errors.Errorf("series %d not found", ref) @@ -1406,9 +1461,13 @@ func (r *bucketIndexReader) Series(ref uint64, lset *labels.Labels, chks *[]chun return r.dec.Series(b, lset, chks) } -// LabelIndices returns the label pairs for which indices exist. -func (r *bucketIndexReader) LabelIndices() ([][]string, error) { - return nil, errors.New("not implemented") +// LabelValues returns label values for single name. +func (r *bucketIndexReader) LabelValues(name string) []string { + res := make([]string, 0, len(r.block.lvals[name])) + for _, v := range r.block.lvals[name] { + res = append(res, v) + } + return res } // Close released the underlying resources of the reader. @@ -1473,10 +1532,10 @@ func (r *bucketChunkReader) preload() error { for _, p := range parts { ctx, cancel := context.WithCancel(r.ctx) - m, n := p[0], p[1] + m, n := p.elemRng[0], p.elemRng[1] g.Add(func() error { - return r.loadChunks(ctx, offsets[m:n], seq, offsets[m], offsets[n-1]+maxChunkSize) + return r.loadChunks(ctx, offsets[m:n], seq, uint32(p.start), uint32(p.end)+maxChunkSize) }, func(err error) { if err != nil { cancel() diff --git a/vendor/github.com/improbable-eng/thanos/pkg/store/prometheus.go b/vendor/github.com/improbable-eng/thanos/pkg/store/prometheus.go index c467170..9296636 100644 --- a/vendor/github.com/improbable-eng/thanos/pkg/store/prometheus.go +++ b/vendor/github.com/improbable-eng/thanos/pkg/store/prometheus.go @@ -6,6 +6,7 @@ import ( "encoding/json" "fmt" "io" + "math" "net/http" "net/url" "path" @@ -155,19 +156,16 @@ func (p *PrometheusStore) Series(r *storepb.SeriesRequest, s storepb.Store_Serie continue } - // We generally expect all samples of the requested range to be traversed - // so we just encode all samples into one big chunk regardless of size. - enc, cb, err := p.encodeChunk(e.Samples) + // XOR encoding supports a max size of 2^16 - 1 samples, so we need + // to chunk all samples into groups of no more than 2^16 - 1 + aggregatedChunks, err := p.chunkSamples(e, math.MaxUint16) if err != nil { - return status.Error(codes.Unknown, err.Error()) + return err } + resp := storepb.NewSeriesResponse(&storepb.Series{ Labels: lset, - Chunks: []storepb.AggrChunk{{ - MinTime: int64(e.Samples[0].Timestamp), - MaxTime: int64(e.Samples[len(e.Samples)-1].Timestamp), - Raw: &storepb.Chunk{Type: enc, Data: cb}, - }}, + Chunks: aggregatedChunks, }) if err := s.Send(resp); err != nil { return err @@ -176,6 +174,33 @@ func (p *PrometheusStore) Series(r *storepb.SeriesRequest, s storepb.Store_Serie return nil } +func (p *PrometheusStore) chunkSamples(series prompb.TimeSeries, samplesPerChunk int) ([]storepb.AggrChunk, error) { + var aggregatedChunks []storepb.AggrChunk + samples := series.Samples + + for len(samples) > 0 { + chunkSize := len(samples) + if chunkSize > samplesPerChunk { + chunkSize = samplesPerChunk + } + + enc, cb, err := p.encodeChunk(samples[:chunkSize]) + if err != nil { + return nil, status.Error(codes.Unknown, err.Error()) + } + + aggregatedChunks = append(aggregatedChunks, storepb.AggrChunk{ + MinTime: int64(samples[0].Timestamp), + MaxTime: int64(samples[chunkSize-1].Timestamp), + Raw: &storepb.Chunk{Type: enc, Data: cb}, + }) + + samples = samples[chunkSize:] + } + + return aggregatedChunks, nil +} + func (p *PrometheusStore) promSeries(ctx context.Context, q prompb.Query) (*prompb.ReadResponse, error) { span, ctx := tracing.StartSpan(ctx, "query_prometheus") defer span.Finish() @@ -313,9 +338,14 @@ func (p *PrometheusStore) LabelNames(ctx context.Context, r *storepb.LabelNamesR } // LabelValues returns all known label values for a given label name. -func (p *PrometheusStore) LabelValues(ctx context.Context, r *storepb.LabelValuesRequest) ( - *storepb.LabelValuesResponse, error, -) { +func (p *PrometheusStore) LabelValues(ctx context.Context, r *storepb.LabelValuesRequest) (*storepb.LabelValuesResponse, error) { + externalLset := p.externalLabels() + + // First check for matching external label which has priority. + if l := externalLset.Get(r.Label); l != "" { + return &storepb.LabelValuesResponse{Values: []string{l}}, nil + } + u := *p.base u.Path = path.Join(u.Path, "/api/v1/label/", r.Label, "/values") diff --git a/vendor/github.com/improbable-eng/thanos/pkg/store/prompb/remote.pb.go b/vendor/github.com/improbable-eng/thanos/pkg/store/prompb/remote.pb.go index 3c86272..590290c 100644 --- a/vendor/github.com/improbable-eng/thanos/pkg/store/prompb/remote.pb.go +++ b/vendor/github.com/improbable-eng/thanos/pkg/store/prompb/remote.pb.go @@ -1,22 +1,6 @@ // Code generated by protoc-gen-gogo. DO NOT EDIT. // source: remote.proto -/* - Package prompb is a generated protocol buffer package. - - It is generated from these files: - remote.proto - - It has these top-level messages: - ReadRequest - ReadResponse - Query - QueryResult - Sample - TimeSeries - Label - LabelMatcher -*/ package prompb import proto "github.com/gogo/protobuf/proto" @@ -24,7 +8,7 @@ import fmt "fmt" import math "math" import _ "github.com/gogo/protobuf/gogoproto" -import binary "encoding/binary" +import encoding_binary "encoding/binary" import io "io" @@ -64,88 +48,338 @@ var LabelMatcher_Type_value = map[string]int32{ func (x LabelMatcher_Type) String() string { return proto.EnumName(LabelMatcher_Type_name, int32(x)) } -func (LabelMatcher_Type) EnumDescriptor() ([]byte, []int) { return fileDescriptorRemote, []int{7, 0} } +func (LabelMatcher_Type) EnumDescriptor() ([]byte, []int) { + return fileDescriptor_remote_5645ea049238b205, []int{7, 0} +} type ReadRequest struct { - Queries []Query `protobuf:"bytes,1,rep,name=queries" json:"queries"` + Queries []Query `protobuf:"bytes,1,rep,name=queries" json:"queries"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *ReadRequest) Reset() { *m = ReadRequest{} } +func (m *ReadRequest) String() string { return proto.CompactTextString(m) } +func (*ReadRequest) ProtoMessage() {} +func (*ReadRequest) Descriptor() ([]byte, []int) { + return fileDescriptor_remote_5645ea049238b205, []int{0} +} +func (m *ReadRequest) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *ReadRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + if deterministic { + return xxx_messageInfo_ReadRequest.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalTo(b) + if err != nil { + return nil, err + } + return b[:n], nil + } +} +func (dst *ReadRequest) XXX_Merge(src proto.Message) { + xxx_messageInfo_ReadRequest.Merge(dst, src) +} +func (m *ReadRequest) XXX_Size() int { + return m.Size() +} +func (m *ReadRequest) XXX_DiscardUnknown() { + xxx_messageInfo_ReadRequest.DiscardUnknown(m) } -func (m *ReadRequest) Reset() { *m = ReadRequest{} } -func (m *ReadRequest) String() string { return proto.CompactTextString(m) } -func (*ReadRequest) ProtoMessage() {} -func (*ReadRequest) Descriptor() ([]byte, []int) { return fileDescriptorRemote, []int{0} } +var xxx_messageInfo_ReadRequest proto.InternalMessageInfo type ReadResponse struct { // In same order as the request's queries. - Results []QueryResult `protobuf:"bytes,1,rep,name=results" json:"results"` + Results []QueryResult `protobuf:"bytes,1,rep,name=results" json:"results"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` } -func (m *ReadResponse) Reset() { *m = ReadResponse{} } -func (m *ReadResponse) String() string { return proto.CompactTextString(m) } -func (*ReadResponse) ProtoMessage() {} -func (*ReadResponse) Descriptor() ([]byte, []int) { return fileDescriptorRemote, []int{1} } +func (m *ReadResponse) Reset() { *m = ReadResponse{} } +func (m *ReadResponse) String() string { return proto.CompactTextString(m) } +func (*ReadResponse) ProtoMessage() {} +func (*ReadResponse) Descriptor() ([]byte, []int) { + return fileDescriptor_remote_5645ea049238b205, []int{1} +} +func (m *ReadResponse) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *ReadResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + if deterministic { + return xxx_messageInfo_ReadResponse.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalTo(b) + if err != nil { + return nil, err + } + return b[:n], nil + } +} +func (dst *ReadResponse) XXX_Merge(src proto.Message) { + xxx_messageInfo_ReadResponse.Merge(dst, src) +} +func (m *ReadResponse) XXX_Size() int { + return m.Size() +} +func (m *ReadResponse) XXX_DiscardUnknown() { + xxx_messageInfo_ReadResponse.DiscardUnknown(m) +} + +var xxx_messageInfo_ReadResponse proto.InternalMessageInfo type Query struct { - StartTimestampMs int64 `protobuf:"varint,1,opt,name=start_timestamp_ms,json=startTimestampMs,proto3" json:"start_timestamp_ms,omitempty"` - EndTimestampMs int64 `protobuf:"varint,2,opt,name=end_timestamp_ms,json=endTimestampMs,proto3" json:"end_timestamp_ms,omitempty"` - Matchers []LabelMatcher `protobuf:"bytes,3,rep,name=matchers" json:"matchers"` + StartTimestampMs int64 `protobuf:"varint,1,opt,name=start_timestamp_ms,json=startTimestampMs,proto3" json:"start_timestamp_ms,omitempty"` + EndTimestampMs int64 `protobuf:"varint,2,opt,name=end_timestamp_ms,json=endTimestampMs,proto3" json:"end_timestamp_ms,omitempty"` + Matchers []LabelMatcher `protobuf:"bytes,3,rep,name=matchers" json:"matchers"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` } -func (m *Query) Reset() { *m = Query{} } -func (m *Query) String() string { return proto.CompactTextString(m) } -func (*Query) ProtoMessage() {} -func (*Query) Descriptor() ([]byte, []int) { return fileDescriptorRemote, []int{2} } +func (m *Query) Reset() { *m = Query{} } +func (m *Query) String() string { return proto.CompactTextString(m) } +func (*Query) ProtoMessage() {} +func (*Query) Descriptor() ([]byte, []int) { + return fileDescriptor_remote_5645ea049238b205, []int{2} +} +func (m *Query) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *Query) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + if deterministic { + return xxx_messageInfo_Query.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalTo(b) + if err != nil { + return nil, err + } + return b[:n], nil + } +} +func (dst *Query) XXX_Merge(src proto.Message) { + xxx_messageInfo_Query.Merge(dst, src) +} +func (m *Query) XXX_Size() int { + return m.Size() +} +func (m *Query) XXX_DiscardUnknown() { + xxx_messageInfo_Query.DiscardUnknown(m) +} + +var xxx_messageInfo_Query proto.InternalMessageInfo type QueryResult struct { - Timeseries []TimeSeries `protobuf:"bytes,1,rep,name=timeseries" json:"timeseries"` + Timeseries []TimeSeries `protobuf:"bytes,1,rep,name=timeseries" json:"timeseries"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *QueryResult) Reset() { *m = QueryResult{} } +func (m *QueryResult) String() string { return proto.CompactTextString(m) } +func (*QueryResult) ProtoMessage() {} +func (*QueryResult) Descriptor() ([]byte, []int) { + return fileDescriptor_remote_5645ea049238b205, []int{3} +} +func (m *QueryResult) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *QueryResult) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + if deterministic { + return xxx_messageInfo_QueryResult.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalTo(b) + if err != nil { + return nil, err + } + return b[:n], nil + } +} +func (dst *QueryResult) XXX_Merge(src proto.Message) { + xxx_messageInfo_QueryResult.Merge(dst, src) +} +func (m *QueryResult) XXX_Size() int { + return m.Size() +} +func (m *QueryResult) XXX_DiscardUnknown() { + xxx_messageInfo_QueryResult.DiscardUnknown(m) } -func (m *QueryResult) Reset() { *m = QueryResult{} } -func (m *QueryResult) String() string { return proto.CompactTextString(m) } -func (*QueryResult) ProtoMessage() {} -func (*QueryResult) Descriptor() ([]byte, []int) { return fileDescriptorRemote, []int{3} } +var xxx_messageInfo_QueryResult proto.InternalMessageInfo type Sample struct { - Value float64 `protobuf:"fixed64,1,opt,name=value,proto3" json:"value,omitempty"` - Timestamp int64 `protobuf:"varint,2,opt,name=timestamp,proto3" json:"timestamp,omitempty"` + Value float64 `protobuf:"fixed64,1,opt,name=value,proto3" json:"value,omitempty"` + Timestamp int64 `protobuf:"varint,2,opt,name=timestamp,proto3" json:"timestamp,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` } -func (m *Sample) Reset() { *m = Sample{} } -func (m *Sample) String() string { return proto.CompactTextString(m) } -func (*Sample) ProtoMessage() {} -func (*Sample) Descriptor() ([]byte, []int) { return fileDescriptorRemote, []int{4} } +func (m *Sample) Reset() { *m = Sample{} } +func (m *Sample) String() string { return proto.CompactTextString(m) } +func (*Sample) ProtoMessage() {} +func (*Sample) Descriptor() ([]byte, []int) { + return fileDescriptor_remote_5645ea049238b205, []int{4} +} +func (m *Sample) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *Sample) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + if deterministic { + return xxx_messageInfo_Sample.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalTo(b) + if err != nil { + return nil, err + } + return b[:n], nil + } +} +func (dst *Sample) XXX_Merge(src proto.Message) { + xxx_messageInfo_Sample.Merge(dst, src) +} +func (m *Sample) XXX_Size() int { + return m.Size() +} +func (m *Sample) XXX_DiscardUnknown() { + xxx_messageInfo_Sample.DiscardUnknown(m) +} + +var xxx_messageInfo_Sample proto.InternalMessageInfo type TimeSeries struct { - Labels []Label `protobuf:"bytes,1,rep,name=labels" json:"labels"` - Samples []Sample `protobuf:"bytes,2,rep,name=samples" json:"samples"` + Labels []Label `protobuf:"bytes,1,rep,name=labels" json:"labels"` + Samples []Sample `protobuf:"bytes,2,rep,name=samples" json:"samples"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` } -func (m *TimeSeries) Reset() { *m = TimeSeries{} } -func (m *TimeSeries) String() string { return proto.CompactTextString(m) } -func (*TimeSeries) ProtoMessage() {} -func (*TimeSeries) Descriptor() ([]byte, []int) { return fileDescriptorRemote, []int{5} } +func (m *TimeSeries) Reset() { *m = TimeSeries{} } +func (m *TimeSeries) String() string { return proto.CompactTextString(m) } +func (*TimeSeries) ProtoMessage() {} +func (*TimeSeries) Descriptor() ([]byte, []int) { + return fileDescriptor_remote_5645ea049238b205, []int{5} +} +func (m *TimeSeries) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *TimeSeries) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + if deterministic { + return xxx_messageInfo_TimeSeries.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalTo(b) + if err != nil { + return nil, err + } + return b[:n], nil + } +} +func (dst *TimeSeries) XXX_Merge(src proto.Message) { + xxx_messageInfo_TimeSeries.Merge(dst, src) +} +func (m *TimeSeries) XXX_Size() int { + return m.Size() +} +func (m *TimeSeries) XXX_DiscardUnknown() { + xxx_messageInfo_TimeSeries.DiscardUnknown(m) +} + +var xxx_messageInfo_TimeSeries proto.InternalMessageInfo type Label struct { - Name string `protobuf:"bytes,1,opt,name=name,proto3" json:"name,omitempty"` - Value string `protobuf:"bytes,2,opt,name=value,proto3" json:"value,omitempty"` + Name string `protobuf:"bytes,1,opt,name=name,proto3" json:"name,omitempty"` + Value string `protobuf:"bytes,2,opt,name=value,proto3" json:"value,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *Label) Reset() { *m = Label{} } +func (m *Label) String() string { return proto.CompactTextString(m) } +func (*Label) ProtoMessage() {} +func (*Label) Descriptor() ([]byte, []int) { + return fileDescriptor_remote_5645ea049238b205, []int{6} +} +func (m *Label) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *Label) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + if deterministic { + return xxx_messageInfo_Label.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalTo(b) + if err != nil { + return nil, err + } + return b[:n], nil + } +} +func (dst *Label) XXX_Merge(src proto.Message) { + xxx_messageInfo_Label.Merge(dst, src) +} +func (m *Label) XXX_Size() int { + return m.Size() +} +func (m *Label) XXX_DiscardUnknown() { + xxx_messageInfo_Label.DiscardUnknown(m) } -func (m *Label) Reset() { *m = Label{} } -func (m *Label) String() string { return proto.CompactTextString(m) } -func (*Label) ProtoMessage() {} -func (*Label) Descriptor() ([]byte, []int) { return fileDescriptorRemote, []int{6} } +var xxx_messageInfo_Label proto.InternalMessageInfo // Matcher specifies a rule, which can match or set of labels or not. type LabelMatcher struct { - Type LabelMatcher_Type `protobuf:"varint,1,opt,name=type,proto3,enum=prometheus.LabelMatcher_Type" json:"type,omitempty"` - Name string `protobuf:"bytes,2,opt,name=name,proto3" json:"name,omitempty"` - Value string `protobuf:"bytes,3,opt,name=value,proto3" json:"value,omitempty"` + Type LabelMatcher_Type `protobuf:"varint,1,opt,name=type,proto3,enum=prometheus.LabelMatcher_Type" json:"type,omitempty"` + Name string `protobuf:"bytes,2,opt,name=name,proto3" json:"name,omitempty"` + Value string `protobuf:"bytes,3,opt,name=value,proto3" json:"value,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *LabelMatcher) Reset() { *m = LabelMatcher{} } +func (m *LabelMatcher) String() string { return proto.CompactTextString(m) } +func (*LabelMatcher) ProtoMessage() {} +func (*LabelMatcher) Descriptor() ([]byte, []int) { + return fileDescriptor_remote_5645ea049238b205, []int{7} +} +func (m *LabelMatcher) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *LabelMatcher) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + if deterministic { + return xxx_messageInfo_LabelMatcher.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalTo(b) + if err != nil { + return nil, err + } + return b[:n], nil + } +} +func (dst *LabelMatcher) XXX_Merge(src proto.Message) { + xxx_messageInfo_LabelMatcher.Merge(dst, src) +} +func (m *LabelMatcher) XXX_Size() int { + return m.Size() +} +func (m *LabelMatcher) XXX_DiscardUnknown() { + xxx_messageInfo_LabelMatcher.DiscardUnknown(m) } -func (m *LabelMatcher) Reset() { *m = LabelMatcher{} } -func (m *LabelMatcher) String() string { return proto.CompactTextString(m) } -func (*LabelMatcher) ProtoMessage() {} -func (*LabelMatcher) Descriptor() ([]byte, []int) { return fileDescriptorRemote, []int{7} } +var xxx_messageInfo_LabelMatcher proto.InternalMessageInfo func init() { proto.RegisterType((*ReadRequest)(nil), "prometheus.ReadRequest") @@ -185,6 +419,9 @@ func (m *ReadRequest) MarshalTo(dAtA []byte) (int, error) { i += n } } + if m.XXX_unrecognized != nil { + i += copy(dAtA[i:], m.XXX_unrecognized) + } return i, nil } @@ -215,6 +452,9 @@ func (m *ReadResponse) MarshalTo(dAtA []byte) (int, error) { i += n } } + if m.XXX_unrecognized != nil { + i += copy(dAtA[i:], m.XXX_unrecognized) + } return i, nil } @@ -255,6 +495,9 @@ func (m *Query) MarshalTo(dAtA []byte) (int, error) { i += n } } + if m.XXX_unrecognized != nil { + i += copy(dAtA[i:], m.XXX_unrecognized) + } return i, nil } @@ -285,6 +528,9 @@ func (m *QueryResult) MarshalTo(dAtA []byte) (int, error) { i += n } } + if m.XXX_unrecognized != nil { + i += copy(dAtA[i:], m.XXX_unrecognized) + } return i, nil } @@ -306,7 +552,7 @@ func (m *Sample) MarshalTo(dAtA []byte) (int, error) { if m.Value != 0 { dAtA[i] = 0x9 i++ - binary.LittleEndian.PutUint64(dAtA[i:], uint64(math.Float64bits(float64(m.Value)))) + encoding_binary.LittleEndian.PutUint64(dAtA[i:], uint64(math.Float64bits(float64(m.Value)))) i += 8 } if m.Timestamp != 0 { @@ -314,6 +560,9 @@ func (m *Sample) MarshalTo(dAtA []byte) (int, error) { i++ i = encodeVarintRemote(dAtA, i, uint64(m.Timestamp)) } + if m.XXX_unrecognized != nil { + i += copy(dAtA[i:], m.XXX_unrecognized) + } return i, nil } @@ -356,6 +605,9 @@ func (m *TimeSeries) MarshalTo(dAtA []byte) (int, error) { i += n } } + if m.XXX_unrecognized != nil { + i += copy(dAtA[i:], m.XXX_unrecognized) + } return i, nil } @@ -386,6 +638,9 @@ func (m *Label) MarshalTo(dAtA []byte) (int, error) { i = encodeVarintRemote(dAtA, i, uint64(len(m.Value))) i += copy(dAtA[i:], m.Value) } + if m.XXX_unrecognized != nil { + i += copy(dAtA[i:], m.XXX_unrecognized) + } return i, nil } @@ -421,6 +676,9 @@ func (m *LabelMatcher) MarshalTo(dAtA []byte) (int, error) { i = encodeVarintRemote(dAtA, i, uint64(len(m.Value))) i += copy(dAtA[i:], m.Value) } + if m.XXX_unrecognized != nil { + i += copy(dAtA[i:], m.XXX_unrecognized) + } return i, nil } @@ -442,6 +700,9 @@ func (m *ReadRequest) Size() (n int) { n += 1 + l + sovRemote(uint64(l)) } } + if m.XXX_unrecognized != nil { + n += len(m.XXX_unrecognized) + } return n } @@ -454,6 +715,9 @@ func (m *ReadResponse) Size() (n int) { n += 1 + l + sovRemote(uint64(l)) } } + if m.XXX_unrecognized != nil { + n += len(m.XXX_unrecognized) + } return n } @@ -472,6 +736,9 @@ func (m *Query) Size() (n int) { n += 1 + l + sovRemote(uint64(l)) } } + if m.XXX_unrecognized != nil { + n += len(m.XXX_unrecognized) + } return n } @@ -484,6 +751,9 @@ func (m *QueryResult) Size() (n int) { n += 1 + l + sovRemote(uint64(l)) } } + if m.XXX_unrecognized != nil { + n += len(m.XXX_unrecognized) + } return n } @@ -496,6 +766,9 @@ func (m *Sample) Size() (n int) { if m.Timestamp != 0 { n += 1 + sovRemote(uint64(m.Timestamp)) } + if m.XXX_unrecognized != nil { + n += len(m.XXX_unrecognized) + } return n } @@ -514,6 +787,9 @@ func (m *TimeSeries) Size() (n int) { n += 1 + l + sovRemote(uint64(l)) } } + if m.XXX_unrecognized != nil { + n += len(m.XXX_unrecognized) + } return n } @@ -528,6 +804,9 @@ func (m *Label) Size() (n int) { if l > 0 { n += 1 + l + sovRemote(uint64(l)) } + if m.XXX_unrecognized != nil { + n += len(m.XXX_unrecognized) + } return n } @@ -545,6 +824,9 @@ func (m *LabelMatcher) Size() (n int) { if l > 0 { n += 1 + l + sovRemote(uint64(l)) } + if m.XXX_unrecognized != nil { + n += len(m.XXX_unrecognized) + } return n } @@ -633,6 +915,7 @@ func (m *ReadRequest) Unmarshal(dAtA []byte) error { if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } + m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...) iNdEx += skippy } } @@ -714,6 +997,7 @@ func (m *ReadResponse) Unmarshal(dAtA []byte) error { if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } + m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...) iNdEx += skippy } } @@ -833,6 +1117,7 @@ func (m *Query) Unmarshal(dAtA []byte) error { if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } + m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...) iNdEx += skippy } } @@ -914,6 +1199,7 @@ func (m *QueryResult) Unmarshal(dAtA []byte) error { if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } + m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...) iNdEx += skippy } } @@ -960,7 +1246,7 @@ func (m *Sample) Unmarshal(dAtA []byte) error { if (iNdEx + 8) > l { return io.ErrUnexpectedEOF } - v = uint64(binary.LittleEndian.Uint64(dAtA[iNdEx:])) + v = uint64(encoding_binary.LittleEndian.Uint64(dAtA[iNdEx:])) iNdEx += 8 m.Value = float64(math.Float64frombits(v)) case 2: @@ -994,6 +1280,7 @@ func (m *Sample) Unmarshal(dAtA []byte) error { if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } + m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...) iNdEx += skippy } } @@ -1106,6 +1393,7 @@ func (m *TimeSeries) Unmarshal(dAtA []byte) error { if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } + m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...) iNdEx += skippy } } @@ -1214,6 +1502,7 @@ func (m *Label) Unmarshal(dAtA []byte) error { if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } + m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...) iNdEx += skippy } } @@ -1341,6 +1630,7 @@ func (m *LabelMatcher) Unmarshal(dAtA []byte) error { if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } + m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...) iNdEx += skippy } } @@ -1455,9 +1745,9 @@ var ( ErrIntOverflowRemote = fmt.Errorf("proto: integer overflow") ) -func init() { proto.RegisterFile("remote.proto", fileDescriptorRemote) } +func init() { proto.RegisterFile("remote.proto", fileDescriptor_remote_5645ea049238b205) } -var fileDescriptorRemote = []byte{ +var fileDescriptor_remote_5645ea049238b205 = []byte{ // 448 bytes of a gzipped FileDescriptorProto 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0x74, 0x93, 0xc1, 0x8a, 0x13, 0x41, 0x10, 0x86, 0xd3, 0x33, 0xc9, 0xc4, 0xad, 0x84, 0x65, 0x2c, 0x16, 0x0d, 0xa2, 0x51, 0xe6, 0x94, diff --git a/vendor/github.com/improbable-eng/thanos/pkg/store/proxy.go b/vendor/github.com/improbable-eng/thanos/pkg/store/proxy.go index b1ebc7c..4eab458 100644 --- a/vendor/github.com/improbable-eng/thanos/pkg/store/proxy.go +++ b/vendor/github.com/improbable-eng/thanos/pkg/store/proxy.go @@ -25,7 +25,7 @@ type Client interface { // Client to access the store. storepb.StoreClient - // Labels that apply to all date exposed by the backing store. + // Labels that apply to all data exposed by the backing store. Labels() []storepb.Label // Minimum and maximum time range of data in the store. @@ -75,6 +75,25 @@ func (s *ProxyStore) Info(ctx context.Context, r *storepb.InfoRequest) (*storepb return res, nil } +type ctxRespSender struct { + ctx context.Context + ch chan<- *storepb.SeriesResponse +} + +func newRespCh(ctx context.Context, buffer int) (*ctxRespSender, <-chan *storepb.SeriesResponse, func()) { + respCh := make(chan *storepb.SeriesResponse, buffer) + return &ctxRespSender{ctx: ctx, ch: respCh}, respCh, func() { close(respCh) } +} + +func (s ctxRespSender) send(r *storepb.SeriesResponse) { + select { + case <-s.ctx.Done(): + return + case s.ch <- r: + return + } +} + // Series returns all series for a requested time range and label matcher. Requested series are taken from other // stores and proxied to RPC client. NOTE: Resulted data are not trimmed exactly to min and max time range. func (s *ProxyStore) Series(r *storepb.SeriesRequest, srv storepb.Store_SeriesServer) error { @@ -94,65 +113,82 @@ func (s *ProxyStore) Series(r *storepb.SeriesRequest, srv storepb.Store_SeriesSe } var ( - seriesSet []storepb.SeriesSet - respCh = make(chan *storepb.SeriesResponse, len(stores)+1) - g errgroup.Group + g, gctx = errgroup.WithContext(srv.Context()) + + // Allow to buffer max 10 series response. + // Each might be quite large (multi chunk long series given by sidecar). + respSender, respRecv, closeFn = newRespCh(gctx, 10) ) - var storeDebugMsgs []string + g.Go(func() error { + var ( + seriesSet []storepb.SeriesSet + storeDebugMsgs []string + r = &storepb.SeriesRequest{ + MinTime: r.MinTime, + MaxTime: r.MaxTime, + Matchers: newMatchers, + Aggregates: r.Aggregates, + MaxResolutionWindow: r.MaxResolutionWindow, + PartialResponseDisabled: r.PartialResponseDisabled, + } + wg = &sync.WaitGroup{} + ) + + defer func() { + wg.Wait() + closeFn() + }() + + for _, st := range stores { + // We might be able to skip the store if its meta information indicates + // it cannot have series matching our query. + // NOTE: all matchers are validated in labelsMatches method so we explicitly ignore error. + if ok, _ := storeMatches(st, r.MinTime, r.MaxTime, r.Matchers...); !ok { + storeDebugMsgs = append(storeDebugMsgs, fmt.Sprintf("store %s filtered out", st)) + continue + } + storeDebugMsgs = append(storeDebugMsgs, fmt.Sprintf("store %s queried", st)) - for _, st := range stores { - // We might be able to skip the store if its meta information indicates - // it cannot have series matching our query. - // NOTE: all matchers are validated in labelsMatches method so we explicitly ignore error. - if ok, _ := storeMatches(st, r.MinTime, r.MaxTime, newMatchers...); !ok { - storeDebugMsgs = append(storeDebugMsgs, fmt.Sprintf("store %s filtered out", st)) - continue - } - storeDebugMsgs = append(storeDebugMsgs, fmt.Sprintf("store %s queried", st)) - - sc, err := st.Series(srv.Context(), &storepb.SeriesRequest{ - MinTime: r.MinTime, - MaxTime: r.MaxTime, - Matchers: newMatchers, - Aggregates: r.Aggregates, - MaxResolutionWindow: r.MaxResolutionWindow, - }) - if err != nil { - storeID := fmt.Sprintf("%v", st.Labels()) - if storeID == "" { - storeID = "Store Gateway" + sc, err := st.Series(gctx, r) + if err != nil { + storeID := fmt.Sprintf("%v", storepb.LabelsToString(st.Labels())) + if storeID == "" { + storeID = "Store Gateway" + } + err = errors.Wrapf(err, "fetch series for %s %s", storeID, st) + if r.PartialResponseDisabled { + level.Error(s.logger).Log("err", err, "msg", "partial response disabled; aborting request") + return err + } + respSender.send(storepb.NewWarnSeriesResponse(err)) + continue } - err = errors.Wrapf(err, "fetch series for %s", storeID) - level.Error(s.logger).Log("err", err) - respCh <- storepb.NewWarnSeriesResponse(err) - continue - } - seriesSet = append(seriesSet, startStreamSeriesSet(sc, respCh, 10)) - } - if len(seriesSet) == 0 { - err := errors.New("No store matched for this query") - level.Warn(s.logger).Log("err", err, "stores", strings.Join(storeDebugMsgs, ";")) - respCh <- storepb.NewWarnSeriesResponse(err) - return nil - } + // Schedule streamSeriesSet that translates gRPC streamed response into seriesSet (if series) or respCh if warnings. + seriesSet = append(seriesSet, startStreamSeriesSet(gctx, wg, sc, respSender, st.String(), !r.PartialResponseDisabled)) + } - level.Debug(s.logger).Log("msg", strings.Join(storeDebugMsgs, ";")) + level.Debug(s.logger).Log("msg", strings.Join(storeDebugMsgs, ";")) - g.Go(func() error { - defer close(respCh) + if len(seriesSet) == 0 { + // This is indicates that configured StoreAPIs are not the ones end user expects + err := errors.New("No store matched for this query") + level.Warn(s.logger).Log("err", err, "stores", strings.Join(storeDebugMsgs, ";")) + respSender.send(storepb.NewWarnSeriesResponse(err)) + return nil + } mergedSet := storepb.MergeSeriesSets(seriesSet...) for mergedSet.Next() { var series storepb.Series series.Labels, series.Chunks = mergedSet.At() - respCh <- storepb.NewSeriesResponse(&series) + respSender.send(storepb.NewSeriesResponse(&series)) } return mergedSet.Err() }) - for resp := range respCh { + for resp := range respRecv { if err := srv.Send(resp); err != nil { return status.Error(codes.Unknown, errors.Wrap(err, "send series response").Error()) } @@ -166,48 +202,74 @@ func (s *ProxyStore) Series(r *storepb.SeriesRequest, srv storepb.Store_SeriesSe } +type warnSender interface { + send(*storepb.SeriesResponse) +} + // streamSeriesSet iterates over incoming stream of series. // All errors are sent out of band via warning channel. type streamSeriesSet struct { stream storepb.Store_SeriesClient - warnCh chan<- *storepb.SeriesResponse + warnCh warnSender currSeries *storepb.Series recvCh chan *storepb.Series + + errMtx sync.Mutex + err error + + name string } func startStreamSeriesSet( + ctx context.Context, + wg *sync.WaitGroup, stream storepb.Store_SeriesClient, - warnCh chan<- *storepb.SeriesResponse, - bufferSize int, + warnCh warnSender, + name string, + partialResponse bool, ) *streamSeriesSet { s := &streamSeriesSet{ stream: stream, warnCh: warnCh, - recvCh: make(chan *storepb.Series, bufferSize), + recvCh: make(chan *storepb.Series, 10), + name: name, } - go s.fetchLoop() - return s -} -func (s *streamSeriesSet) fetchLoop() { - defer close(s.recvCh) - for { - r, err := s.stream.Recv() - if err == io.EOF { - return - } - if err != nil { - s.warnCh <- storepb.NewWarnSeriesResponse(errors.Wrap(err, "receive series")) - return - } + wg.Add(1) + go func() { + defer wg.Done() + defer close(s.recvCh) + for { + r, err := s.stream.Recv() + if err == io.EOF { + return + } + + if ctx.Err() != nil { + return + } + + if err != nil { + if partialResponse { + s.warnCh.send(storepb.NewWarnSeriesResponse(errors.Wrap(err, "receive series"))) + return + } + + s.errMtx.Lock() + defer s.errMtx.Unlock() + s.err = err + return + } - if w := r.GetWarning(); w != "" { - s.warnCh <- storepb.NewWarnSeriesResponse(errors.New(w)) - continue + if w := r.GetWarning(); w != "" { + s.warnCh.send(storepb.NewWarnSeriesResponse(errors.New(w))) + continue + } + s.recvCh <- r.GetSeries() } - s.recvCh <- r.GetSeries() - } + }() + return s } // Next blocks until new message is received or stream is closed. @@ -223,7 +285,9 @@ func (s *streamSeriesSet) At() ([]storepb.Label, []storepb.AggrChunk) { return s.currSeries.Labels, s.currSeries.Chunks } func (s *streamSeriesSet) Err() error { - return nil + s.errMtx.Lock() + defer s.errMtx.Unlock() + return errors.Wrap(s.err, s.name) } // matchStore returns true if the given store may hold data for the given label matchers. @@ -266,24 +330,30 @@ func (s *ProxyStore) LabelValues(ctx context.Context, r *storepb.LabelValuesRequ warnings []string all [][]string mtx sync.Mutex - wg sync.WaitGroup + g, gctx = errgroup.WithContext(ctx) ) + stores, err := s.stores(ctx) if err != nil { return nil, status.Errorf(codes.Unknown, err.Error()) } for _, st := range stores { - wg.Add(1) - go func(s Client) { - defer wg.Done() - resp, err := s.LabelValues(ctx, &storepb.LabelValuesRequest{ + store := st + g.Go(func() error { + resp, err := store.LabelValues(gctx, &storepb.LabelValuesRequest{ Label: r.Label, + PartialResponseDisabled: r.PartialResponseDisabled, }) if err != nil { + err = errors.Wrapf(err, "fetch label values from store %s", store) + if r.PartialResponseDisabled { + return err + } + mtx.Lock() warnings = append(warnings, errors.Wrap(err, "fetch label values").Error()) mtx.Unlock() - return + return nil } mtx.Lock() @@ -291,11 +361,14 @@ func (s *ProxyStore) LabelValues(ctx context.Context, r *storepb.LabelValuesRequ all = append(all, resp.Values) mtx.Unlock() - return - }(st) + return nil + }) + } + + if err := g.Wait(); err != nil { + return nil, err } - wg.Wait() return &storepb.LabelValuesResponse{ Values: strutil.MergeUnsortedSlices(all...), Warnings: warnings, diff --git a/vendor/github.com/improbable-eng/thanos/pkg/store/storepb/custom.go b/vendor/github.com/improbable-eng/thanos/pkg/store/storepb/custom.go index 5447a03..18e2035 100644 --- a/vendor/github.com/improbable-eng/thanos/pkg/store/storepb/custom.go +++ b/vendor/github.com/improbable-eng/thanos/pkg/store/storepb/custom.go @@ -2,6 +2,8 @@ package storepb import ( "strings" + + "github.com/prometheus/prometheus/pkg/labels" ) func NewWarnSeriesResponse(err error) *SeriesResponse { @@ -150,3 +152,20 @@ func (s *mergedSeriesSet) Next() bool { } return true } + +func LabelsToPromLabels(lset []Label) labels.Labels { + ret := make(labels.Labels, len(lset), len(lset)) + for i, l := range lset { + ret[i] = labels.Label{Name: l.Name, Value: l.Value} + } + + return ret +} + +func LabelsToString(lset []Label) string { + var s []string + for _, l := range lset { + s = append(s, l.String()) + } + return "[" + strings.Join(s, ",") + "]" +} diff --git a/vendor/github.com/improbable-eng/thanos/pkg/store/storepb/rpc.pb.go b/vendor/github.com/improbable-eng/thanos/pkg/store/storepb/rpc.pb.go index e1dca6b..432d2ec 100644 --- a/vendor/github.com/improbable-eng/thanos/pkg/store/storepb/rpc.pb.go +++ b/vendor/github.com/improbable-eng/thanos/pkg/store/storepb/rpc.pb.go @@ -1,28 +1,6 @@ // Code generated by protoc-gen-gogo. DO NOT EDIT. // source: rpc.proto -/* - Package storepb is a generated protocol buffer package. - - It is generated from these files: - rpc.proto - types.proto - - It has these top-level messages: - InfoRequest - InfoResponse - SeriesRequest - SeriesResponse - LabelNamesRequest - LabelNamesResponse - LabelValuesRequest - LabelValuesResponse - Label - Chunk - Series - AggrChunk - LabelMatcher -*/ package storepb import proto "github.com/gogo/protobuf/proto" @@ -77,51 +55,178 @@ var Aggr_value = map[string]int32{ func (x Aggr) String() string { return proto.EnumName(Aggr_name, int32(x)) } -func (Aggr) EnumDescriptor() ([]byte, []int) { return fileDescriptorRpc, []int{0} } +func (Aggr) EnumDescriptor() ([]byte, []int) { + return fileDescriptor_rpc_6ccafde20b200300, []int{0} +} type InfoRequest struct { + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *InfoRequest) Reset() { *m = InfoRequest{} } +func (m *InfoRequest) String() string { return proto.CompactTextString(m) } +func (*InfoRequest) ProtoMessage() {} +func (*InfoRequest) Descriptor() ([]byte, []int) { + return fileDescriptor_rpc_6ccafde20b200300, []int{0} +} +func (m *InfoRequest) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *InfoRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + if deterministic { + return xxx_messageInfo_InfoRequest.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalTo(b) + if err != nil { + return nil, err + } + return b[:n], nil + } +} +func (dst *InfoRequest) XXX_Merge(src proto.Message) { + xxx_messageInfo_InfoRequest.Merge(dst, src) +} +func (m *InfoRequest) XXX_Size() int { + return m.Size() +} +func (m *InfoRequest) XXX_DiscardUnknown() { + xxx_messageInfo_InfoRequest.DiscardUnknown(m) } -func (m *InfoRequest) Reset() { *m = InfoRequest{} } -func (m *InfoRequest) String() string { return proto.CompactTextString(m) } -func (*InfoRequest) ProtoMessage() {} -func (*InfoRequest) Descriptor() ([]byte, []int) { return fileDescriptorRpc, []int{0} } +var xxx_messageInfo_InfoRequest proto.InternalMessageInfo type InfoResponse struct { - Labels []Label `protobuf:"bytes,1,rep,name=labels" json:"labels"` - MinTime int64 `protobuf:"varint,2,opt,name=min_time,json=minTime,proto3" json:"min_time,omitempty"` - MaxTime int64 `protobuf:"varint,3,opt,name=max_time,json=maxTime,proto3" json:"max_time,omitempty"` + Labels []Label `protobuf:"bytes,1,rep,name=labels" json:"labels"` + MinTime int64 `protobuf:"varint,2,opt,name=min_time,json=minTime,proto3" json:"min_time,omitempty"` + MaxTime int64 `protobuf:"varint,3,opt,name=max_time,json=maxTime,proto3" json:"max_time,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *InfoResponse) Reset() { *m = InfoResponse{} } +func (m *InfoResponse) String() string { return proto.CompactTextString(m) } +func (*InfoResponse) ProtoMessage() {} +func (*InfoResponse) Descriptor() ([]byte, []int) { + return fileDescriptor_rpc_6ccafde20b200300, []int{1} +} +func (m *InfoResponse) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *InfoResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + if deterministic { + return xxx_messageInfo_InfoResponse.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalTo(b) + if err != nil { + return nil, err + } + return b[:n], nil + } +} +func (dst *InfoResponse) XXX_Merge(src proto.Message) { + xxx_messageInfo_InfoResponse.Merge(dst, src) +} +func (m *InfoResponse) XXX_Size() int { + return m.Size() +} +func (m *InfoResponse) XXX_DiscardUnknown() { + xxx_messageInfo_InfoResponse.DiscardUnknown(m) } -func (m *InfoResponse) Reset() { *m = InfoResponse{} } -func (m *InfoResponse) String() string { return proto.CompactTextString(m) } -func (*InfoResponse) ProtoMessage() {} -func (*InfoResponse) Descriptor() ([]byte, []int) { return fileDescriptorRpc, []int{1} } +var xxx_messageInfo_InfoResponse proto.InternalMessageInfo type SeriesRequest struct { - MinTime int64 `protobuf:"varint,1,opt,name=min_time,json=minTime,proto3" json:"min_time,omitempty"` - MaxTime int64 `protobuf:"varint,2,opt,name=max_time,json=maxTime,proto3" json:"max_time,omitempty"` - Matchers []LabelMatcher `protobuf:"bytes,3,rep,name=matchers" json:"matchers"` - MaxResolutionWindow int64 `protobuf:"varint,4,opt,name=max_resolution_window,json=maxResolutionWindow,proto3" json:"max_resolution_window,omitempty"` - Aggregates []Aggr `protobuf:"varint,5,rep,packed,name=aggregates,enum=thanos.Aggr" json:"aggregates,omitempty"` + MinTime int64 `protobuf:"varint,1,opt,name=min_time,json=minTime,proto3" json:"min_time,omitempty"` + MaxTime int64 `protobuf:"varint,2,opt,name=max_time,json=maxTime,proto3" json:"max_time,omitempty"` + Matchers []LabelMatcher `protobuf:"bytes,3,rep,name=matchers" json:"matchers"` + MaxResolutionWindow int64 `protobuf:"varint,4,opt,name=max_resolution_window,json=maxResolutionWindow,proto3" json:"max_resolution_window,omitempty"` + Aggregates []Aggr `protobuf:"varint,5,rep,packed,name=aggregates,enum=thanos.Aggr" json:"aggregates,omitempty"` + PartialResponseDisabled bool `protobuf:"varint,6,opt,name=partial_response_disabled,json=partialResponseDisabled,proto3" json:"partial_response_disabled,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *SeriesRequest) Reset() { *m = SeriesRequest{} } +func (m *SeriesRequest) String() string { return proto.CompactTextString(m) } +func (*SeriesRequest) ProtoMessage() {} +func (*SeriesRequest) Descriptor() ([]byte, []int) { + return fileDescriptor_rpc_6ccafde20b200300, []int{2} +} +func (m *SeriesRequest) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *SeriesRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + if deterministic { + return xxx_messageInfo_SeriesRequest.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalTo(b) + if err != nil { + return nil, err + } + return b[:n], nil + } +} +func (dst *SeriesRequest) XXX_Merge(src proto.Message) { + xxx_messageInfo_SeriesRequest.Merge(dst, src) +} +func (m *SeriesRequest) XXX_Size() int { + return m.Size() +} +func (m *SeriesRequest) XXX_DiscardUnknown() { + xxx_messageInfo_SeriesRequest.DiscardUnknown(m) } -func (m *SeriesRequest) Reset() { *m = SeriesRequest{} } -func (m *SeriesRequest) String() string { return proto.CompactTextString(m) } -func (*SeriesRequest) ProtoMessage() {} -func (*SeriesRequest) Descriptor() ([]byte, []int) { return fileDescriptorRpc, []int{2} } +var xxx_messageInfo_SeriesRequest proto.InternalMessageInfo type SeriesResponse struct { // Types that are valid to be assigned to Result: // *SeriesResponse_Series // *SeriesResponse_Warning - Result isSeriesResponse_Result `protobuf_oneof:"result"` + Result isSeriesResponse_Result `protobuf_oneof:"result"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *SeriesResponse) Reset() { *m = SeriesResponse{} } +func (m *SeriesResponse) String() string { return proto.CompactTextString(m) } +func (*SeriesResponse) ProtoMessage() {} +func (*SeriesResponse) Descriptor() ([]byte, []int) { + return fileDescriptor_rpc_6ccafde20b200300, []int{3} +} +func (m *SeriesResponse) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *SeriesResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + if deterministic { + return xxx_messageInfo_SeriesResponse.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalTo(b) + if err != nil { + return nil, err + } + return b[:n], nil + } +} +func (dst *SeriesResponse) XXX_Merge(src proto.Message) { + xxx_messageInfo_SeriesResponse.Merge(dst, src) +} +func (m *SeriesResponse) XXX_Size() int { + return m.Size() +} +func (m *SeriesResponse) XXX_DiscardUnknown() { + xxx_messageInfo_SeriesResponse.DiscardUnknown(m) } -func (m *SeriesResponse) Reset() { *m = SeriesResponse{} } -func (m *SeriesResponse) String() string { return proto.CompactTextString(m) } -func (*SeriesResponse) ProtoMessage() {} -func (*SeriesResponse) Descriptor() ([]byte, []int) { return fileDescriptorRpc, []int{3} } +var xxx_messageInfo_SeriesResponse proto.InternalMessageInfo type isSeriesResponse_Result interface { isSeriesResponse_Result() @@ -216,11 +321,11 @@ func _SeriesResponse_OneofSizer(msg proto.Message) (n int) { switch x := m.Result.(type) { case *SeriesResponse_Series: s := proto.Size(x.Series) - n += proto.SizeVarint(1<<3 | proto.WireBytes) + n += 1 // tag and wire n += proto.SizeVarint(uint64(s)) n += s case *SeriesResponse_Warning: - n += proto.SizeVarint(2<<3 | proto.WireBytes) + n += 1 // tag and wire n += proto.SizeVarint(uint64(len(x.Warning))) n += len(x.Warning) case nil: @@ -231,41 +336,167 @@ func _SeriesResponse_OneofSizer(msg proto.Message) (n int) { } type LabelNamesRequest struct { + PartialResponseDisabled bool `protobuf:"varint,1,opt,name=partial_response_disabled,json=partialResponseDisabled,proto3" json:"partial_response_disabled,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *LabelNamesRequest) Reset() { *m = LabelNamesRequest{} } +func (m *LabelNamesRequest) String() string { return proto.CompactTextString(m) } +func (*LabelNamesRequest) ProtoMessage() {} +func (*LabelNamesRequest) Descriptor() ([]byte, []int) { + return fileDescriptor_rpc_6ccafde20b200300, []int{4} +} +func (m *LabelNamesRequest) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *LabelNamesRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + if deterministic { + return xxx_messageInfo_LabelNamesRequest.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalTo(b) + if err != nil { + return nil, err + } + return b[:n], nil + } +} +func (dst *LabelNamesRequest) XXX_Merge(src proto.Message) { + xxx_messageInfo_LabelNamesRequest.Merge(dst, src) +} +func (m *LabelNamesRequest) XXX_Size() int { + return m.Size() +} +func (m *LabelNamesRequest) XXX_DiscardUnknown() { + xxx_messageInfo_LabelNamesRequest.DiscardUnknown(m) } -func (m *LabelNamesRequest) Reset() { *m = LabelNamesRequest{} } -func (m *LabelNamesRequest) String() string { return proto.CompactTextString(m) } -func (*LabelNamesRequest) ProtoMessage() {} -func (*LabelNamesRequest) Descriptor() ([]byte, []int) { return fileDescriptorRpc, []int{4} } +var xxx_messageInfo_LabelNamesRequest proto.InternalMessageInfo type LabelNamesResponse struct { - Names []string `protobuf:"bytes,1,rep,name=names" json:"names,omitempty"` - Warnings []string `protobuf:"bytes,2,rep,name=warnings" json:"warnings,omitempty"` + Names []string `protobuf:"bytes,1,rep,name=names" json:"names,omitempty"` + Warnings []string `protobuf:"bytes,2,rep,name=warnings" json:"warnings,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *LabelNamesResponse) Reset() { *m = LabelNamesResponse{} } +func (m *LabelNamesResponse) String() string { return proto.CompactTextString(m) } +func (*LabelNamesResponse) ProtoMessage() {} +func (*LabelNamesResponse) Descriptor() ([]byte, []int) { + return fileDescriptor_rpc_6ccafde20b200300, []int{5} +} +func (m *LabelNamesResponse) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *LabelNamesResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + if deterministic { + return xxx_messageInfo_LabelNamesResponse.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalTo(b) + if err != nil { + return nil, err + } + return b[:n], nil + } +} +func (dst *LabelNamesResponse) XXX_Merge(src proto.Message) { + xxx_messageInfo_LabelNamesResponse.Merge(dst, src) +} +func (m *LabelNamesResponse) XXX_Size() int { + return m.Size() +} +func (m *LabelNamesResponse) XXX_DiscardUnknown() { + xxx_messageInfo_LabelNamesResponse.DiscardUnknown(m) } -func (m *LabelNamesResponse) Reset() { *m = LabelNamesResponse{} } -func (m *LabelNamesResponse) String() string { return proto.CompactTextString(m) } -func (*LabelNamesResponse) ProtoMessage() {} -func (*LabelNamesResponse) Descriptor() ([]byte, []int) { return fileDescriptorRpc, []int{5} } +var xxx_messageInfo_LabelNamesResponse proto.InternalMessageInfo type LabelValuesRequest struct { - Label string `protobuf:"bytes,1,opt,name=label,proto3" json:"label,omitempty"` + Label string `protobuf:"bytes,1,opt,name=label,proto3" json:"label,omitempty"` + PartialResponseDisabled bool `protobuf:"varint,2,opt,name=partial_response_disabled,json=partialResponseDisabled,proto3" json:"partial_response_disabled,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *LabelValuesRequest) Reset() { *m = LabelValuesRequest{} } +func (m *LabelValuesRequest) String() string { return proto.CompactTextString(m) } +func (*LabelValuesRequest) ProtoMessage() {} +func (*LabelValuesRequest) Descriptor() ([]byte, []int) { + return fileDescriptor_rpc_6ccafde20b200300, []int{6} +} +func (m *LabelValuesRequest) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *LabelValuesRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + if deterministic { + return xxx_messageInfo_LabelValuesRequest.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalTo(b) + if err != nil { + return nil, err + } + return b[:n], nil + } +} +func (dst *LabelValuesRequest) XXX_Merge(src proto.Message) { + xxx_messageInfo_LabelValuesRequest.Merge(dst, src) +} +func (m *LabelValuesRequest) XXX_Size() int { + return m.Size() +} +func (m *LabelValuesRequest) XXX_DiscardUnknown() { + xxx_messageInfo_LabelValuesRequest.DiscardUnknown(m) } -func (m *LabelValuesRequest) Reset() { *m = LabelValuesRequest{} } -func (m *LabelValuesRequest) String() string { return proto.CompactTextString(m) } -func (*LabelValuesRequest) ProtoMessage() {} -func (*LabelValuesRequest) Descriptor() ([]byte, []int) { return fileDescriptorRpc, []int{6} } +var xxx_messageInfo_LabelValuesRequest proto.InternalMessageInfo type LabelValuesResponse struct { - Values []string `protobuf:"bytes,1,rep,name=values" json:"values,omitempty"` - Warnings []string `protobuf:"bytes,2,rep,name=warnings" json:"warnings,omitempty"` + Values []string `protobuf:"bytes,1,rep,name=values" json:"values,omitempty"` + Warnings []string `protobuf:"bytes,2,rep,name=warnings" json:"warnings,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *LabelValuesResponse) Reset() { *m = LabelValuesResponse{} } +func (m *LabelValuesResponse) String() string { return proto.CompactTextString(m) } +func (*LabelValuesResponse) ProtoMessage() {} +func (*LabelValuesResponse) Descriptor() ([]byte, []int) { + return fileDescriptor_rpc_6ccafde20b200300, []int{7} +} +func (m *LabelValuesResponse) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *LabelValuesResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + if deterministic { + return xxx_messageInfo_LabelValuesResponse.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalTo(b) + if err != nil { + return nil, err + } + return b[:n], nil + } +} +func (dst *LabelValuesResponse) XXX_Merge(src proto.Message) { + xxx_messageInfo_LabelValuesResponse.Merge(dst, src) +} +func (m *LabelValuesResponse) XXX_Size() int { + return m.Size() +} +func (m *LabelValuesResponse) XXX_DiscardUnknown() { + xxx_messageInfo_LabelValuesResponse.DiscardUnknown(m) } -func (m *LabelValuesResponse) Reset() { *m = LabelValuesResponse{} } -func (m *LabelValuesResponse) String() string { return proto.CompactTextString(m) } -func (*LabelValuesResponse) ProtoMessage() {} -func (*LabelValuesResponse) Descriptor() ([]byte, []int) { return fileDescriptorRpc, []int{7} } +var xxx_messageInfo_LabelValuesResponse proto.InternalMessageInfo func init() { proto.RegisterType((*InfoRequest)(nil), "thanos.InfoRequest") @@ -290,10 +521,15 @@ const _ = grpc.SupportPackageIsVersion4 // Client API for Store service type StoreClient interface { - // / Info returns meta information about a store e.g labels that makes that store unique. + // / Info returns meta information about a store e.g labels that makes that store unique as well as time range that is + // / available. Info(ctx context.Context, in *InfoRequest, opts ...grpc.CallOption) (*InfoResponse, error) + // / Series streams each Series (Labels and chunk/downsampling chunk) for given label matchers and time range. Series(ctx context.Context, in *SeriesRequest, opts ...grpc.CallOption) (Store_SeriesClient, error) + // / LabelNames returns all label names that is available. + // / Currently unimplemented in all Thanos implementations, because Query API does not implement this either. LabelNames(ctx context.Context, in *LabelNamesRequest, opts ...grpc.CallOption) (*LabelNamesResponse, error) + // / LabelValues returns all label values for given label name. LabelValues(ctx context.Context, in *LabelValuesRequest, opts ...grpc.CallOption) (*LabelValuesResponse, error) } @@ -307,7 +543,7 @@ func NewStoreClient(cc *grpc.ClientConn) StoreClient { func (c *storeClient) Info(ctx context.Context, in *InfoRequest, opts ...grpc.CallOption) (*InfoResponse, error) { out := new(InfoResponse) - err := grpc.Invoke(ctx, "/thanos.Store/Info", in, out, c.cc, opts...) + err := c.cc.Invoke(ctx, "/thanos.Store/Info", in, out, opts...) if err != nil { return nil, err } @@ -315,7 +551,7 @@ func (c *storeClient) Info(ctx context.Context, in *InfoRequest, opts ...grpc.Ca } func (c *storeClient) Series(ctx context.Context, in *SeriesRequest, opts ...grpc.CallOption) (Store_SeriesClient, error) { - stream, err := grpc.NewClientStream(ctx, &_Store_serviceDesc.Streams[0], c.cc, "/thanos.Store/Series", opts...) + stream, err := c.cc.NewStream(ctx, &_Store_serviceDesc.Streams[0], "/thanos.Store/Series", opts...) if err != nil { return nil, err } @@ -348,7 +584,7 @@ func (x *storeSeriesClient) Recv() (*SeriesResponse, error) { func (c *storeClient) LabelNames(ctx context.Context, in *LabelNamesRequest, opts ...grpc.CallOption) (*LabelNamesResponse, error) { out := new(LabelNamesResponse) - err := grpc.Invoke(ctx, "/thanos.Store/LabelNames", in, out, c.cc, opts...) + err := c.cc.Invoke(ctx, "/thanos.Store/LabelNames", in, out, opts...) if err != nil { return nil, err } @@ -357,7 +593,7 @@ func (c *storeClient) LabelNames(ctx context.Context, in *LabelNamesRequest, opt func (c *storeClient) LabelValues(ctx context.Context, in *LabelValuesRequest, opts ...grpc.CallOption) (*LabelValuesResponse, error) { out := new(LabelValuesResponse) - err := grpc.Invoke(ctx, "/thanos.Store/LabelValues", in, out, c.cc, opts...) + err := c.cc.Invoke(ctx, "/thanos.Store/LabelValues", in, out, opts...) if err != nil { return nil, err } @@ -367,10 +603,15 @@ func (c *storeClient) LabelValues(ctx context.Context, in *LabelValuesRequest, o // Server API for Store service type StoreServer interface { - // / Info returns meta information about a store e.g labels that makes that store unique. + // / Info returns meta information about a store e.g labels that makes that store unique as well as time range that is + // / available. Info(context.Context, *InfoRequest) (*InfoResponse, error) + // / Series streams each Series (Labels and chunk/downsampling chunk) for given label matchers and time range. Series(*SeriesRequest, Store_SeriesServer) error + // / LabelNames returns all label names that is available. + // / Currently unimplemented in all Thanos implementations, because Query API does not implement this either. LabelNames(context.Context, *LabelNamesRequest) (*LabelNamesResponse, error) + // / LabelValues returns all label values for given label name. LabelValues(context.Context, *LabelValuesRequest) (*LabelValuesResponse, error) } @@ -495,6 +736,9 @@ func (m *InfoRequest) MarshalTo(dAtA []byte) (int, error) { _ = i var l int _ = l + if m.XXX_unrecognized != nil { + i += copy(dAtA[i:], m.XXX_unrecognized) + } return i, nil } @@ -535,6 +779,9 @@ func (m *InfoResponse) MarshalTo(dAtA []byte) (int, error) { i++ i = encodeVarintRpc(dAtA, i, uint64(m.MaxTime)) } + if m.XXX_unrecognized != nil { + i += copy(dAtA[i:], m.XXX_unrecognized) + } return i, nil } @@ -597,6 +844,19 @@ func (m *SeriesRequest) MarshalTo(dAtA []byte) (int, error) { i = encodeVarintRpc(dAtA, i, uint64(j1)) i += copy(dAtA[i:], dAtA2[:j1]) } + if m.PartialResponseDisabled { + dAtA[i] = 0x30 + i++ + if m.PartialResponseDisabled { + dAtA[i] = 1 + } else { + dAtA[i] = 0 + } + i++ + } + if m.XXX_unrecognized != nil { + i += copy(dAtA[i:], m.XXX_unrecognized) + } return i, nil } @@ -622,6 +882,9 @@ func (m *SeriesResponse) MarshalTo(dAtA []byte) (int, error) { } i += nn3 } + if m.XXX_unrecognized != nil { + i += copy(dAtA[i:], m.XXX_unrecognized) + } return i, nil } @@ -662,6 +925,19 @@ func (m *LabelNamesRequest) MarshalTo(dAtA []byte) (int, error) { _ = i var l int _ = l + if m.PartialResponseDisabled { + dAtA[i] = 0x8 + i++ + if m.PartialResponseDisabled { + dAtA[i] = 1 + } else { + dAtA[i] = 0 + } + i++ + } + if m.XXX_unrecognized != nil { + i += copy(dAtA[i:], m.XXX_unrecognized) + } return i, nil } @@ -710,6 +986,9 @@ func (m *LabelNamesResponse) MarshalTo(dAtA []byte) (int, error) { i += copy(dAtA[i:], s) } } + if m.XXX_unrecognized != nil { + i += copy(dAtA[i:], m.XXX_unrecognized) + } return i, nil } @@ -734,6 +1013,19 @@ func (m *LabelValuesRequest) MarshalTo(dAtA []byte) (int, error) { i = encodeVarintRpc(dAtA, i, uint64(len(m.Label))) i += copy(dAtA[i:], m.Label) } + if m.PartialResponseDisabled { + dAtA[i] = 0x10 + i++ + if m.PartialResponseDisabled { + dAtA[i] = 1 + } else { + dAtA[i] = 0 + } + i++ + } + if m.XXX_unrecognized != nil { + i += copy(dAtA[i:], m.XXX_unrecognized) + } return i, nil } @@ -782,6 +1074,9 @@ func (m *LabelValuesResponse) MarshalTo(dAtA []byte) (int, error) { i += copy(dAtA[i:], s) } } + if m.XXX_unrecognized != nil { + i += copy(dAtA[i:], m.XXX_unrecognized) + } return i, nil } @@ -797,6 +1092,9 @@ func encodeVarintRpc(dAtA []byte, offset int, v uint64) int { func (m *InfoRequest) Size() (n int) { var l int _ = l + if m.XXX_unrecognized != nil { + n += len(m.XXX_unrecognized) + } return n } @@ -815,6 +1113,9 @@ func (m *InfoResponse) Size() (n int) { if m.MaxTime != 0 { n += 1 + sovRpc(uint64(m.MaxTime)) } + if m.XXX_unrecognized != nil { + n += len(m.XXX_unrecognized) + } return n } @@ -843,6 +1144,12 @@ func (m *SeriesRequest) Size() (n int) { } n += 1 + sovRpc(uint64(l)) + l } + if m.PartialResponseDisabled { + n += 2 + } + if m.XXX_unrecognized != nil { + n += len(m.XXX_unrecognized) + } return n } @@ -852,6 +1159,9 @@ func (m *SeriesResponse) Size() (n int) { if m.Result != nil { n += m.Result.Size() } + if m.XXX_unrecognized != nil { + n += len(m.XXX_unrecognized) + } return n } @@ -874,6 +1184,12 @@ func (m *SeriesResponse_Warning) Size() (n int) { func (m *LabelNamesRequest) Size() (n int) { var l int _ = l + if m.PartialResponseDisabled { + n += 2 + } + if m.XXX_unrecognized != nil { + n += len(m.XXX_unrecognized) + } return n } @@ -892,6 +1208,9 @@ func (m *LabelNamesResponse) Size() (n int) { n += 1 + l + sovRpc(uint64(l)) } } + if m.XXX_unrecognized != nil { + n += len(m.XXX_unrecognized) + } return n } @@ -902,6 +1221,12 @@ func (m *LabelValuesRequest) Size() (n int) { if l > 0 { n += 1 + l + sovRpc(uint64(l)) } + if m.PartialResponseDisabled { + n += 2 + } + if m.XXX_unrecognized != nil { + n += len(m.XXX_unrecognized) + } return n } @@ -920,6 +1245,9 @@ func (m *LabelValuesResponse) Size() (n int) { n += 1 + l + sovRpc(uint64(l)) } } + if m.XXX_unrecognized != nil { + n += len(m.XXX_unrecognized) + } return n } @@ -977,6 +1305,7 @@ func (m *InfoRequest) Unmarshal(dAtA []byte) error { if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } + m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...) iNdEx += skippy } } @@ -1096,6 +1425,7 @@ func (m *InfoResponse) Unmarshal(dAtA []byte) error { if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } + m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...) iNdEx += skippy } } @@ -1284,6 +1614,26 @@ func (m *SeriesRequest) Unmarshal(dAtA []byte) error { } else { return fmt.Errorf("proto: wrong wireType = %d for field Aggregates", wireType) } + case 6: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field PartialResponseDisabled", wireType) + } + var v int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowRpc + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + v |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + m.PartialResponseDisabled = bool(v != 0) default: iNdEx = preIndex skippy, err := skipRpc(dAtA[iNdEx:]) @@ -1296,6 +1646,7 @@ func (m *SeriesRequest) Unmarshal(dAtA []byte) error { if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } + m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...) iNdEx += skippy } } @@ -1407,6 +1758,7 @@ func (m *SeriesResponse) Unmarshal(dAtA []byte) error { if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } + m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...) iNdEx += skippy } } @@ -1445,6 +1797,26 @@ func (m *LabelNamesRequest) Unmarshal(dAtA []byte) error { return fmt.Errorf("proto: LabelNamesRequest: illegal tag %d (wire type %d)", fieldNum, wire) } switch fieldNum { + case 1: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field PartialResponseDisabled", wireType) + } + var v int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowRpc + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + v |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + m.PartialResponseDisabled = bool(v != 0) default: iNdEx = preIndex skippy, err := skipRpc(dAtA[iNdEx:]) @@ -1457,6 +1829,7 @@ func (m *LabelNamesRequest) Unmarshal(dAtA []byte) error { if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } + m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...) iNdEx += skippy } } @@ -1565,6 +1938,7 @@ func (m *LabelNamesResponse) Unmarshal(dAtA []byte) error { if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } + m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...) iNdEx += skippy } } @@ -1632,6 +2006,26 @@ func (m *LabelValuesRequest) Unmarshal(dAtA []byte) error { } m.Label = string(dAtA[iNdEx:postIndex]) iNdEx = postIndex + case 2: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field PartialResponseDisabled", wireType) + } + var v int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowRpc + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + v |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + m.PartialResponseDisabled = bool(v != 0) default: iNdEx = preIndex skippy, err := skipRpc(dAtA[iNdEx:]) @@ -1644,6 +2038,7 @@ func (m *LabelValuesRequest) Unmarshal(dAtA []byte) error { if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } + m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...) iNdEx += skippy } } @@ -1752,6 +2147,7 @@ func (m *LabelValuesResponse) Unmarshal(dAtA []byte) error { if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } + m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...) iNdEx += skippy } } @@ -1866,43 +2262,46 @@ var ( ErrIntOverflowRpc = fmt.Errorf("proto: integer overflow") ) -func init() { proto.RegisterFile("rpc.proto", fileDescriptorRpc) } - -var fileDescriptorRpc = []byte{ - // 555 bytes of a gzipped FileDescriptorProto - 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0x7c, 0x54, 0x5d, 0x6f, 0xd2, 0x50, - 0x18, 0xa6, 0x94, 0x16, 0x78, 0x3b, 0x48, 0x3d, 0xb0, 0xa5, 0xd4, 0x04, 0x49, 0xaf, 0xc8, 0x34, - 0xa8, 0x98, 0x98, 0x78, 0x09, 0x8b, 0xcb, 0x48, 0x04, 0x93, 0xc3, 0xe6, 0x8c, 0x37, 0xb3, 0xcc, - 0x63, 0xd7, 0x84, 0xf6, 0xb0, 0x9e, 0x22, 0x78, 0xeb, 0xaf, 0xe3, 0xd2, 0x5f, 0xe0, 0x07, 0xbf, - 0xc4, 0x9c, 0x8f, 0x32, 0x6a, 0xa6, 0x77, 0xef, 0xfb, 0x3c, 0x6f, 0x9f, 0xf7, 0xe3, 0x39, 0x29, - 0x54, 0x93, 0xc5, 0x75, 0x6f, 0x91, 0xd0, 0x94, 0x22, 0x33, 0xbd, 0xf1, 0x63, 0xca, 0x5c, 0x2b, - 0xfd, 0xba, 0x20, 0x4c, 0x82, 0x6e, 0x33, 0xa0, 0x01, 0x15, 0xe1, 0x53, 0x1e, 0x49, 0xd4, 0xab, - 0x81, 0x35, 0x8a, 0x3f, 0x53, 0x4c, 0x6e, 0x97, 0x84, 0xa5, 0xde, 0x2d, 0x1c, 0xc8, 0x94, 0x2d, - 0x68, 0xcc, 0x08, 0x7a, 0x0c, 0xe6, 0xdc, 0x9f, 0x91, 0x39, 0x73, 0xb4, 0x8e, 0xde, 0xb5, 0xfa, - 0xb5, 0x9e, 0x94, 0xee, 0xbd, 0xe1, 0xe8, 0xb0, 0xb4, 0xf9, 0xf1, 0xa8, 0x80, 0x55, 0x09, 0x6a, - 0x41, 0x25, 0x0a, 0xe3, 0xab, 0x34, 0x8c, 0x88, 0x53, 0xec, 0x68, 0x5d, 0x1d, 0x97, 0xa3, 0x30, - 0x3e, 0x0f, 0x23, 0x22, 0x28, 0x7f, 0x2d, 0x29, 0x5d, 0x51, 0xfe, 0x9a, 0x53, 0xde, 0x4f, 0x0d, - 0x6a, 0x53, 0x92, 0x84, 0x84, 0xa9, 0x21, 0x72, 0x3a, 0xda, 0xbf, 0x75, 0x8a, 0x39, 0x1d, 0xf4, - 0x92, 0x53, 0xe9, 0xf5, 0x0d, 0x49, 0x98, 0xa3, 0x8b, 0x61, 0x9b, 0xb9, 0x61, 0xc7, 0x92, 0x54, - 0x33, 0xef, 0x6a, 0x51, 0x1f, 0x0e, 0xb9, 0x64, 0x42, 0x18, 0x9d, 0x2f, 0xd3, 0x90, 0xc6, 0x57, - 0xab, 0x30, 0xfe, 0x44, 0x57, 0x4e, 0x49, 0xe8, 0x37, 0x22, 0x7f, 0x8d, 0x77, 0xdc, 0xa5, 0xa0, - 0xd0, 0x13, 0x00, 0x3f, 0x08, 0x12, 0x12, 0xf8, 0x29, 0x61, 0x8e, 0xd1, 0xd1, 0xbb, 0xf5, 0xfe, - 0x41, 0xd6, 0x6d, 0x10, 0x04, 0x09, 0xde, 0xe3, 0xbd, 0x8f, 0x50, 0xcf, 0x16, 0x54, 0x67, 0xed, - 0x82, 0xc9, 0x04, 0x22, 0xf6, 0xb3, 0xfa, 0xf5, 0xec, 0x5b, 0x59, 0x77, 0x56, 0xc0, 0x8a, 0x47, - 0x2e, 0x94, 0x57, 0x7e, 0x12, 0x87, 0x71, 0x20, 0xf6, 0xad, 0x9e, 0x15, 0x70, 0x06, 0x0c, 0x2b, - 0x60, 0x26, 0x84, 0x2d, 0xe7, 0xa9, 0xd7, 0x80, 0x07, 0x62, 0xc7, 0x89, 0x1f, 0xed, 0xce, 0xe8, - 0x9d, 0x02, 0xda, 0x07, 0x55, 0xeb, 0x26, 0x18, 0x31, 0x07, 0x84, 0xa1, 0x55, 0x2c, 0x13, 0xe4, - 0x42, 0x45, 0xa9, 0x32, 0xa7, 0x28, 0x88, 0x5d, 0xee, 0x1d, 0x2b, 0x9d, 0x77, 0xfe, 0x7c, 0x79, - 0x67, 0x52, 0x13, 0x0c, 0x61, 0xbb, 0xd8, 0xa0, 0x8a, 0x65, 0xe2, 0x8d, 0xa0, 0x91, 0xab, 0x55, - 0x4d, 0x8f, 0xc0, 0xfc, 0x22, 0x10, 0xd5, 0x55, 0x65, 0xff, 0x6b, 0x7b, 0x3c, 0x84, 0x12, 0xbf, - 0x24, 0x2a, 0x83, 0x8e, 0x07, 0x97, 0x76, 0x01, 0x55, 0xc1, 0x38, 0x79, 0x7b, 0x31, 0x39, 0xb7, - 0x35, 0x8e, 0x4d, 0x2f, 0xc6, 0x76, 0x91, 0x07, 0xe3, 0xd1, 0xc4, 0xd6, 0x45, 0x30, 0x78, 0x6f, - 0x97, 0x90, 0x05, 0x65, 0x51, 0xf5, 0x1a, 0xdb, 0x46, 0xff, 0x5b, 0x11, 0x8c, 0x69, 0x4a, 0x13, - 0x82, 0x9e, 0x43, 0x89, 0x3f, 0x6c, 0xd4, 0xc8, 0x2e, 0xbd, 0xf7, 0xea, 0xdd, 0x66, 0x1e, 0x54, - 0x43, 0xbf, 0x02, 0x53, 0xda, 0x81, 0x0e, 0xf3, 0xf6, 0x64, 0x9f, 0x1d, 0xfd, 0x0d, 0xcb, 0x0f, - 0x9f, 0x69, 0xe8, 0x04, 0xe0, 0xee, 0xf4, 0xa8, 0x95, 0x7b, 0x87, 0xfb, 0x1e, 0xb9, 0xee, 0x7d, - 0x94, 0xea, 0x7f, 0x0a, 0xd6, 0xde, 0x2d, 0x51, 0xbe, 0x34, 0x67, 0x86, 0xfb, 0xf0, 0x5e, 0x4e, - 0xea, 0x0c, 0x5b, 0x9b, 0xdf, 0xed, 0xc2, 0x66, 0xdb, 0xd6, 0xbe, 0x6f, 0xdb, 0xda, 0xaf, 0x6d, - 0x5b, 0xfb, 0x50, 0x66, 0xfc, 0x26, 0x8b, 0xd9, 0xcc, 0x14, 0x3f, 0x81, 0x17, 0x7f, 0x02, 0x00, - 0x00, 0xff, 0xff, 0x4c, 0xf3, 0x8b, 0x29, 0x3c, 0x04, 0x00, 0x00, +func init() { proto.RegisterFile("rpc.proto", fileDescriptor_rpc_6ccafde20b200300) } + +var fileDescriptor_rpc_6ccafde20b200300 = []byte{ + // 602 bytes of a gzipped FileDescriptorProto + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0x7c, 0x54, 0xcd, 0x6e, 0xd3, 0x40, + 0x10, 0x8e, 0xed, 0xd8, 0x49, 0xc6, 0x6d, 0x65, 0xb6, 0x69, 0x71, 0x8c, 0x14, 0x22, 0x9f, 0x22, + 0x40, 0x05, 0x82, 0x84, 0x04, 0xb7, 0xa6, 0x50, 0xb5, 0x12, 0x6d, 0xa5, 0x6d, 0x4b, 0x11, 0x97, + 0xb2, 0x69, 0xb7, 0xae, 0x25, 0xff, 0xd5, 0xeb, 0xd0, 0x72, 0xe5, 0x35, 0x78, 0xa1, 0x1c, 0x79, + 0x02, 0x04, 0x79, 0x12, 0xb4, 0x3f, 0x4e, 0x62, 0x54, 0x72, 0x9b, 0xf9, 0xbe, 0xf1, 0x37, 0x9f, + 0x66, 0x66, 0x0d, 0xad, 0x3c, 0xbb, 0xd8, 0xca, 0xf2, 0xb4, 0x48, 0x91, 0x55, 0x5c, 0x93, 0x24, + 0x65, 0x9e, 0x5d, 0x7c, 0xcb, 0x28, 0x93, 0xa0, 0xd7, 0x0e, 0xd2, 0x20, 0x15, 0xe1, 0x73, 0x1e, + 0x49, 0xd4, 0x5f, 0x05, 0x7b, 0x3f, 0xb9, 0x4a, 0x31, 0xbd, 0x19, 0x53, 0x56, 0xf8, 0x37, 0xb0, + 0x22, 0x53, 0x96, 0xa5, 0x09, 0xa3, 0xe8, 0x29, 0x58, 0x11, 0x19, 0xd1, 0x88, 0xb9, 0x5a, 0xcf, + 0xe8, 0xdb, 0x83, 0xd5, 0x2d, 0x29, 0xbd, 0xf5, 0x81, 0xa3, 0xc3, 0xfa, 0xe4, 0xd7, 0xe3, 0x1a, + 0x56, 0x25, 0xa8, 0x03, 0xcd, 0x38, 0x4c, 0xce, 0x8b, 0x30, 0xa6, 0xae, 0xde, 0xd3, 0xfa, 0x06, + 0x6e, 0xc4, 0x61, 0x72, 0x12, 0xc6, 0x54, 0x50, 0xe4, 0x4e, 0x52, 0x86, 0xa2, 0xc8, 0x1d, 0xa7, + 0xfc, 0x1f, 0x3a, 0xac, 0x1e, 0xd3, 0x3c, 0xa4, 0x4c, 0x99, 0xa8, 0xe8, 0x68, 0xff, 0xd7, 0xd1, + 0x2b, 0x3a, 0xe8, 0x35, 0xa7, 0x8a, 0x8b, 0x6b, 0x9a, 0x33, 0xd7, 0x10, 0x66, 0xdb, 0x15, 0xb3, + 0x07, 0x92, 0x54, 0x9e, 0x67, 0xb5, 0x68, 0x00, 0x1b, 0x5c, 0x32, 0xa7, 0x2c, 0x8d, 0xc6, 0x45, + 0x98, 0x26, 0xe7, 0xb7, 0x61, 0x72, 0x99, 0xde, 0xba, 0x75, 0xa1, 0xbf, 0x1e, 0x93, 0x3b, 0x3c, + 0xe3, 0xce, 0x04, 0x85, 0x9e, 0x01, 0x90, 0x20, 0xc8, 0x69, 0x40, 0x0a, 0xca, 0x5c, 0xb3, 0x67, + 0xf4, 0xd7, 0x06, 0x2b, 0x65, 0xb7, 0xed, 0x20, 0xc8, 0xf1, 0x02, 0x8f, 0xde, 0x42, 0x27, 0x23, + 0x79, 0x11, 0x92, 0x88, 0x77, 0x11, 0x83, 0x3d, 0xbf, 0x0c, 0x19, 0x19, 0x45, 0xf4, 0xd2, 0xb5, + 0x7a, 0x5a, 0xbf, 0x89, 0x1f, 0xaa, 0x82, 0x72, 0xf0, 0xef, 0x14, 0xed, 0x7f, 0x81, 0xb5, 0x72, + 0x38, 0x6a, 0x25, 0x7d, 0xb0, 0x98, 0x40, 0xc4, 0x6c, 0xec, 0xc1, 0x5a, 0xd9, 0x57, 0xd6, 0xed, + 0xd5, 0xb0, 0xe2, 0x91, 0x07, 0x8d, 0x5b, 0x92, 0x27, 0x61, 0x12, 0x88, 0x59, 0xb5, 0xf6, 0x6a, + 0xb8, 0x04, 0x86, 0x4d, 0xb0, 0x72, 0xca, 0xc6, 0x51, 0xe1, 0x1f, 0xc1, 0x03, 0x31, 0x9f, 0x43, + 0x12, 0xcf, 0x57, 0xb0, 0xd4, 0xb2, 0xb6, 0xdc, 0xf2, 0x2e, 0xa0, 0x45, 0x41, 0x65, 0xbb, 0x0d, + 0x66, 0xc2, 0x01, 0x71, 0x48, 0x2d, 0x2c, 0x13, 0xe4, 0x41, 0x53, 0x39, 0x62, 0xae, 0x2e, 0x88, + 0x59, 0xee, 0x5f, 0x29, 0x9d, 0x8f, 0x24, 0x1a, 0xcf, 0x9d, 0xb5, 0xc1, 0x14, 0xe7, 0x26, 0x5c, + 0xb4, 0xb0, 0x4c, 0x96, 0xfb, 0xd5, 0x97, 0xfb, 0xdd, 0x87, 0xf5, 0x4a, 0x1f, 0x65, 0x78, 0x13, + 0xac, 0xaf, 0x02, 0x51, 0x8e, 0x55, 0xb6, 0xcc, 0xf2, 0x93, 0x21, 0xd4, 0xf9, 0xf6, 0x51, 0x03, + 0x0c, 0xbc, 0x7d, 0xe6, 0xd4, 0x50, 0x0b, 0xcc, 0x9d, 0xa3, 0xd3, 0xc3, 0x13, 0x47, 0xe3, 0xd8, + 0xf1, 0xe9, 0x81, 0xa3, 0xf3, 0xe0, 0x60, 0xff, 0xd0, 0x31, 0x44, 0xb0, 0xfd, 0xc9, 0xa9, 0x23, + 0x1b, 0x1a, 0xa2, 0xea, 0x3d, 0x76, 0xcc, 0xc1, 0x77, 0x1d, 0xcc, 0xe3, 0x22, 0xcd, 0x29, 0x7a, + 0x09, 0x75, 0xfe, 0x18, 0xd1, 0x7a, 0xb9, 0xe1, 0x85, 0x97, 0xea, 0xb5, 0xab, 0xa0, 0x32, 0xfd, + 0x06, 0x2c, 0x79, 0x06, 0x68, 0xa3, 0x7a, 0x16, 0xe5, 0x67, 0x9b, 0xff, 0xc2, 0xf2, 0xc3, 0x17, + 0x1a, 0xda, 0x01, 0x98, 0xaf, 0x0d, 0x75, 0x2a, 0x6f, 0x67, 0xf1, 0x36, 0x3c, 0xef, 0x3e, 0x4a, + 0xf5, 0xdf, 0x05, 0x7b, 0x61, 0x96, 0xa8, 0x5a, 0x5a, 0x59, 0xa4, 0xf7, 0xe8, 0x5e, 0x4e, 0xea, + 0x0c, 0x3b, 0x93, 0x3f, 0xdd, 0xda, 0x64, 0xda, 0xd5, 0x7e, 0x4e, 0xbb, 0xda, 0xef, 0x69, 0x57, + 0xfb, 0xdc, 0x60, 0x7c, 0x26, 0xd9, 0x68, 0x64, 0x89, 0x1f, 0xd7, 0xab, 0xbf, 0x01, 0x00, 0x00, + 0xff, 0xff, 0x60, 0xf8, 0xf1, 0xea, 0xf0, 0x04, 0x00, 0x00, } diff --git a/vendor/github.com/improbable-eng/thanos/pkg/store/storepb/rpc.proto b/vendor/github.com/improbable-eng/thanos/pkg/store/storepb/rpc.proto index 3a9a2e5..cc95738 100644 --- a/vendor/github.com/improbable-eng/thanos/pkg/store/storepb/rpc.proto +++ b/vendor/github.com/improbable-eng/thanos/pkg/store/storepb/rpc.proto @@ -11,14 +11,24 @@ option (gogoproto.marshaler_all) = true; option (gogoproto.unmarshaler_all) = true; option (gogoproto.goproto_getters_all) = false; +/// Store reprents API against instance that stores XOR encoded values with label set metadata (e.g Prometheus metrics). +/// +/// Partial Response is supported unless `partial_response_disabled` is true. When disabled any error that will result +/// in partial data returned (e.g missing chunk series because of underlying storeAPI is temporarily not available) is +/// failing the request. service Store { - /// Info returns meta information about a store e.g labels that makes that store unique. + /// Info returns meta information about a store e.g labels that makes that store unique as well as time range that is + /// available. rpc Info(InfoRequest) returns (InfoResponse); + /// Series streams each Series (Labels and chunk/downsampling chunk) for given label matchers and time range. rpc Series(SeriesRequest) returns (stream SeriesResponse); + /// LabelNames returns all label names that is available. + /// Currently unimplemented in all Thanos implementations, because Query API does not implement this either. rpc LabelNames(LabelNamesRequest) returns (LabelNamesResponse); + /// LabelValues returns all label values for given label name. rpc LabelValues(LabelValuesRequest) returns (LabelValuesResponse); } @@ -38,6 +48,8 @@ message SeriesRequest { int64 max_resolution_window = 4; repeated Aggr aggregates = 5; + + bool partial_response_disabled = 6; } enum Aggr { @@ -52,11 +64,15 @@ enum Aggr { message SeriesResponse { oneof result { Series series = 1; + + /// warning is considered an information piece in place of series for warning purposes. + /// It is used to warn query customer about suspicious cases or partial response (if enabled). string warning = 2; } } message LabelNamesRequest { + bool partial_response_disabled = 1; } message LabelNamesResponse { @@ -66,6 +82,8 @@ message LabelNamesResponse { message LabelValuesRequest { string label = 1; + + bool partial_response_disabled = 2; } message LabelValuesResponse { diff --git a/vendor/github.com/improbable-eng/thanos/pkg/store/storepb/types.pb.go b/vendor/github.com/improbable-eng/thanos/pkg/store/storepb/types.pb.go index a05bbeb..0f6767b 100644 --- a/vendor/github.com/improbable-eng/thanos/pkg/store/storepb/types.pb.go +++ b/vendor/github.com/improbable-eng/thanos/pkg/store/storepb/types.pb.go @@ -15,6 +15,12 @@ var _ = proto.Marshal var _ = fmt.Errorf var _ = math.Inf +// This is a compile-time assertion to ensure that this generated file +// is compatible with the proto package it is being compiled against. +// A compilation error at this line likely means your copy of the +// proto package needs to be updated. +const _ = proto.GoGoProtoPackageIsVersion2 // please upgrade the proto package + type Chunk_Encoding int32 const ( @@ -31,7 +37,9 @@ var Chunk_Encoding_value = map[string]int32{ func (x Chunk_Encoding) String() string { return proto.EnumName(Chunk_Encoding_name, int32(x)) } -func (Chunk_Encoding) EnumDescriptor() ([]byte, []int) { return fileDescriptorTypes, []int{1, 0} } +func (Chunk_Encoding) EnumDescriptor() ([]byte, []int) { + return fileDescriptor_types_60e135d4a4f03620, []int{1, 0} +} type LabelMatcher_Type int32 @@ -58,65 +66,222 @@ var LabelMatcher_Type_value = map[string]int32{ func (x LabelMatcher_Type) String() string { return proto.EnumName(LabelMatcher_Type_name, int32(x)) } -func (LabelMatcher_Type) EnumDescriptor() ([]byte, []int) { return fileDescriptorTypes, []int{4, 0} } +func (LabelMatcher_Type) EnumDescriptor() ([]byte, []int) { + return fileDescriptor_types_60e135d4a4f03620, []int{4, 0} +} type Label struct { - Name string `protobuf:"bytes,1,opt,name=name,proto3" json:"name,omitempty"` - Value string `protobuf:"bytes,2,opt,name=value,proto3" json:"value,omitempty"` + Name string `protobuf:"bytes,1,opt,name=name,proto3" json:"name,omitempty"` + Value string `protobuf:"bytes,2,opt,name=value,proto3" json:"value,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *Label) Reset() { *m = Label{} } +func (m *Label) String() string { return proto.CompactTextString(m) } +func (*Label) ProtoMessage() {} +func (*Label) Descriptor() ([]byte, []int) { + return fileDescriptor_types_60e135d4a4f03620, []int{0} +} +func (m *Label) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *Label) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + if deterministic { + return xxx_messageInfo_Label.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalTo(b) + if err != nil { + return nil, err + } + return b[:n], nil + } +} +func (dst *Label) XXX_Merge(src proto.Message) { + xxx_messageInfo_Label.Merge(dst, src) +} +func (m *Label) XXX_Size() int { + return m.Size() +} +func (m *Label) XXX_DiscardUnknown() { + xxx_messageInfo_Label.DiscardUnknown(m) } -func (m *Label) Reset() { *m = Label{} } -func (m *Label) String() string { return proto.CompactTextString(m) } -func (*Label) ProtoMessage() {} -func (*Label) Descriptor() ([]byte, []int) { return fileDescriptorTypes, []int{0} } +var xxx_messageInfo_Label proto.InternalMessageInfo type Chunk struct { - Type Chunk_Encoding `protobuf:"varint,1,opt,name=type,proto3,enum=thanos.Chunk_Encoding" json:"type,omitempty"` - Data []byte `protobuf:"bytes,2,opt,name=data,proto3" json:"data,omitempty"` + Type Chunk_Encoding `protobuf:"varint,1,opt,name=type,proto3,enum=thanos.Chunk_Encoding" json:"type,omitempty"` + Data []byte `protobuf:"bytes,2,opt,name=data,proto3" json:"data,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *Chunk) Reset() { *m = Chunk{} } +func (m *Chunk) String() string { return proto.CompactTextString(m) } +func (*Chunk) ProtoMessage() {} +func (*Chunk) Descriptor() ([]byte, []int) { + return fileDescriptor_types_60e135d4a4f03620, []int{1} +} +func (m *Chunk) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *Chunk) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + if deterministic { + return xxx_messageInfo_Chunk.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalTo(b) + if err != nil { + return nil, err + } + return b[:n], nil + } +} +func (dst *Chunk) XXX_Merge(src proto.Message) { + xxx_messageInfo_Chunk.Merge(dst, src) +} +func (m *Chunk) XXX_Size() int { + return m.Size() +} +func (m *Chunk) XXX_DiscardUnknown() { + xxx_messageInfo_Chunk.DiscardUnknown(m) } -func (m *Chunk) Reset() { *m = Chunk{} } -func (m *Chunk) String() string { return proto.CompactTextString(m) } -func (*Chunk) ProtoMessage() {} -func (*Chunk) Descriptor() ([]byte, []int) { return fileDescriptorTypes, []int{1} } +var xxx_messageInfo_Chunk proto.InternalMessageInfo type Series struct { - Labels []Label `protobuf:"bytes,1,rep,name=labels" json:"labels"` - Chunks []AggrChunk `protobuf:"bytes,2,rep,name=chunks" json:"chunks"` + Labels []Label `protobuf:"bytes,1,rep,name=labels" json:"labels"` + Chunks []AggrChunk `protobuf:"bytes,2,rep,name=chunks" json:"chunks"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` } -func (m *Series) Reset() { *m = Series{} } -func (m *Series) String() string { return proto.CompactTextString(m) } -func (*Series) ProtoMessage() {} -func (*Series) Descriptor() ([]byte, []int) { return fileDescriptorTypes, []int{2} } +func (m *Series) Reset() { *m = Series{} } +func (m *Series) String() string { return proto.CompactTextString(m) } +func (*Series) ProtoMessage() {} +func (*Series) Descriptor() ([]byte, []int) { + return fileDescriptor_types_60e135d4a4f03620, []int{2} +} +func (m *Series) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *Series) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + if deterministic { + return xxx_messageInfo_Series.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalTo(b) + if err != nil { + return nil, err + } + return b[:n], nil + } +} +func (dst *Series) XXX_Merge(src proto.Message) { + xxx_messageInfo_Series.Merge(dst, src) +} +func (m *Series) XXX_Size() int { + return m.Size() +} +func (m *Series) XXX_DiscardUnknown() { + xxx_messageInfo_Series.DiscardUnknown(m) +} + +var xxx_messageInfo_Series proto.InternalMessageInfo type AggrChunk struct { - MinTime int64 `protobuf:"varint,1,opt,name=min_time,json=minTime,proto3" json:"min_time,omitempty"` - MaxTime int64 `protobuf:"varint,2,opt,name=max_time,json=maxTime,proto3" json:"max_time,omitempty"` - Raw *Chunk `protobuf:"bytes,3,opt,name=raw" json:"raw,omitempty"` - Count *Chunk `protobuf:"bytes,4,opt,name=count" json:"count,omitempty"` - Sum *Chunk `protobuf:"bytes,5,opt,name=sum" json:"sum,omitempty"` - Min *Chunk `protobuf:"bytes,6,opt,name=min" json:"min,omitempty"` - Max *Chunk `protobuf:"bytes,7,opt,name=max" json:"max,omitempty"` - Counter *Chunk `protobuf:"bytes,8,opt,name=counter" json:"counter,omitempty"` + MinTime int64 `protobuf:"varint,1,opt,name=min_time,json=minTime,proto3" json:"min_time,omitempty"` + MaxTime int64 `protobuf:"varint,2,opt,name=max_time,json=maxTime,proto3" json:"max_time,omitempty"` + Raw *Chunk `protobuf:"bytes,3,opt,name=raw" json:"raw,omitempty"` + Count *Chunk `protobuf:"bytes,4,opt,name=count" json:"count,omitempty"` + Sum *Chunk `protobuf:"bytes,5,opt,name=sum" json:"sum,omitempty"` + Min *Chunk `protobuf:"bytes,6,opt,name=min" json:"min,omitempty"` + Max *Chunk `protobuf:"bytes,7,opt,name=max" json:"max,omitempty"` + Counter *Chunk `protobuf:"bytes,8,opt,name=counter" json:"counter,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *AggrChunk) Reset() { *m = AggrChunk{} } +func (m *AggrChunk) String() string { return proto.CompactTextString(m) } +func (*AggrChunk) ProtoMessage() {} +func (*AggrChunk) Descriptor() ([]byte, []int) { + return fileDescriptor_types_60e135d4a4f03620, []int{3} +} +func (m *AggrChunk) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *AggrChunk) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + if deterministic { + return xxx_messageInfo_AggrChunk.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalTo(b) + if err != nil { + return nil, err + } + return b[:n], nil + } +} +func (dst *AggrChunk) XXX_Merge(src proto.Message) { + xxx_messageInfo_AggrChunk.Merge(dst, src) +} +func (m *AggrChunk) XXX_Size() int { + return m.Size() +} +func (m *AggrChunk) XXX_DiscardUnknown() { + xxx_messageInfo_AggrChunk.DiscardUnknown(m) } -func (m *AggrChunk) Reset() { *m = AggrChunk{} } -func (m *AggrChunk) String() string { return proto.CompactTextString(m) } -func (*AggrChunk) ProtoMessage() {} -func (*AggrChunk) Descriptor() ([]byte, []int) { return fileDescriptorTypes, []int{3} } +var xxx_messageInfo_AggrChunk proto.InternalMessageInfo // Matcher specifies a rule, which can match or set of labels or not. type LabelMatcher struct { - Type LabelMatcher_Type `protobuf:"varint,1,opt,name=type,proto3,enum=thanos.LabelMatcher_Type" json:"type,omitempty"` - Name string `protobuf:"bytes,2,opt,name=name,proto3" json:"name,omitempty"` - Value string `protobuf:"bytes,3,opt,name=value,proto3" json:"value,omitempty"` + Type LabelMatcher_Type `protobuf:"varint,1,opt,name=type,proto3,enum=thanos.LabelMatcher_Type" json:"type,omitempty"` + Name string `protobuf:"bytes,2,opt,name=name,proto3" json:"name,omitempty"` + Value string `protobuf:"bytes,3,opt,name=value,proto3" json:"value,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` } -func (m *LabelMatcher) Reset() { *m = LabelMatcher{} } -func (m *LabelMatcher) String() string { return proto.CompactTextString(m) } -func (*LabelMatcher) ProtoMessage() {} -func (*LabelMatcher) Descriptor() ([]byte, []int) { return fileDescriptorTypes, []int{4} } +func (m *LabelMatcher) Reset() { *m = LabelMatcher{} } +func (m *LabelMatcher) String() string { return proto.CompactTextString(m) } +func (*LabelMatcher) ProtoMessage() {} +func (*LabelMatcher) Descriptor() ([]byte, []int) { + return fileDescriptor_types_60e135d4a4f03620, []int{4} +} +func (m *LabelMatcher) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *LabelMatcher) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + if deterministic { + return xxx_messageInfo_LabelMatcher.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalTo(b) + if err != nil { + return nil, err + } + return b[:n], nil + } +} +func (dst *LabelMatcher) XXX_Merge(src proto.Message) { + xxx_messageInfo_LabelMatcher.Merge(dst, src) +} +func (m *LabelMatcher) XXX_Size() int { + return m.Size() +} +func (m *LabelMatcher) XXX_DiscardUnknown() { + xxx_messageInfo_LabelMatcher.DiscardUnknown(m) +} + +var xxx_messageInfo_LabelMatcher proto.InternalMessageInfo func init() { proto.RegisterType((*Label)(nil), "thanos.Label") @@ -154,6 +319,9 @@ func (m *Label) MarshalTo(dAtA []byte) (int, error) { i = encodeVarintTypes(dAtA, i, uint64(len(m.Value))) i += copy(dAtA[i:], m.Value) } + if m.XXX_unrecognized != nil { + i += copy(dAtA[i:], m.XXX_unrecognized) + } return i, nil } @@ -183,6 +351,9 @@ func (m *Chunk) MarshalTo(dAtA []byte) (int, error) { i = encodeVarintTypes(dAtA, i, uint64(len(m.Data))) i += copy(dAtA[i:], m.Data) } + if m.XXX_unrecognized != nil { + i += copy(dAtA[i:], m.XXX_unrecognized) + } return i, nil } @@ -225,6 +396,9 @@ func (m *Series) MarshalTo(dAtA []byte) (int, error) { i += n } } + if m.XXX_unrecognized != nil { + i += copy(dAtA[i:], m.XXX_unrecognized) + } return i, nil } @@ -313,6 +487,9 @@ func (m *AggrChunk) MarshalTo(dAtA []byte) (int, error) { } i += n6 } + if m.XXX_unrecognized != nil { + i += copy(dAtA[i:], m.XXX_unrecognized) + } return i, nil } @@ -348,6 +525,9 @@ func (m *LabelMatcher) MarshalTo(dAtA []byte) (int, error) { i = encodeVarintTypes(dAtA, i, uint64(len(m.Value))) i += copy(dAtA[i:], m.Value) } + if m.XXX_unrecognized != nil { + i += copy(dAtA[i:], m.XXX_unrecognized) + } return i, nil } @@ -371,6 +551,9 @@ func (m *Label) Size() (n int) { if l > 0 { n += 1 + l + sovTypes(uint64(l)) } + if m.XXX_unrecognized != nil { + n += len(m.XXX_unrecognized) + } return n } @@ -384,6 +567,9 @@ func (m *Chunk) Size() (n int) { if l > 0 { n += 1 + l + sovTypes(uint64(l)) } + if m.XXX_unrecognized != nil { + n += len(m.XXX_unrecognized) + } return n } @@ -402,6 +588,9 @@ func (m *Series) Size() (n int) { n += 1 + l + sovTypes(uint64(l)) } } + if m.XXX_unrecognized != nil { + n += len(m.XXX_unrecognized) + } return n } @@ -438,6 +627,9 @@ func (m *AggrChunk) Size() (n int) { l = m.Counter.Size() n += 1 + l + sovTypes(uint64(l)) } + if m.XXX_unrecognized != nil { + n += len(m.XXX_unrecognized) + } return n } @@ -455,6 +647,9 @@ func (m *LabelMatcher) Size() (n int) { if l > 0 { n += 1 + l + sovTypes(uint64(l)) } + if m.XXX_unrecognized != nil { + n += len(m.XXX_unrecognized) + } return n } @@ -570,6 +765,7 @@ func (m *Label) Unmarshal(dAtA []byte) error { if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } + m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...) iNdEx += skippy } } @@ -670,6 +866,7 @@ func (m *Chunk) Unmarshal(dAtA []byte) error { if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } + m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...) iNdEx += skippy } } @@ -782,6 +979,7 @@ func (m *Series) Unmarshal(dAtA []byte) error { if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } + m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...) iNdEx += skippy } } @@ -1068,6 +1266,7 @@ func (m *AggrChunk) Unmarshal(dAtA []byte) error { if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } + m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...) iNdEx += skippy } } @@ -1195,6 +1394,7 @@ func (m *LabelMatcher) Unmarshal(dAtA []byte) error { if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } + m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...) iNdEx += skippy } } @@ -1309,9 +1509,9 @@ var ( ErrIntOverflowTypes = fmt.Errorf("proto: integer overflow") ) -func init() { proto.RegisterFile("types.proto", fileDescriptorTypes) } +func init() { proto.RegisterFile("types.proto", fileDescriptor_types_60e135d4a4f03620) } -var fileDescriptorTypes = []byte{ +var fileDescriptor_types_60e135d4a4f03620 = []byte{ // 432 bytes of a gzipped FileDescriptorProto 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0x6c, 0x92, 0xdd, 0x6e, 0xd3, 0x30, 0x14, 0xc7, 0xeb, 0x7c, 0x76, 0x67, 0x03, 0x05, 0x33, 0x21, 0x97, 0x8b, 0xae, 0x0a, 0x17, 0x54, diff --git a/vendor/github.com/prometheus/prometheus/pkg/value/value.go b/vendor/github.com/prometheus/prometheus/pkg/value/value.go index a10f8f3..655ce85 100644 --- a/vendor/github.com/prometheus/prometheus/pkg/value/value.go +++ b/vendor/github.com/prometheus/prometheus/pkg/value/value.go @@ -21,7 +21,7 @@ const ( // NormalNaN is a quiet NaN. This is also math.NaN(). NormalNaN uint64 = 0x7ff8000000000001 - // StaleNaN is a signalling NaN, due to the MSB of the mantissa being 0. + // StaleNaN is a signaling NaN, due to the MSB of the mantissa being 0. // This value is chosen with many leading 0s, so we have scope to store more // complicated values in the future. It is 2 rather than 1 to make // it easier to distinguish from the NormalNaN by a human when debugging. diff --git a/vendor/github.com/prometheus/prometheus/prompb/README.md b/vendor/github.com/prometheus/prometheus/prompb/README.md index d2aa933..7f24391 100644 --- a/vendor/github.com/prometheus/prometheus/prompb/README.md +++ b/vendor/github.com/prometheus/prometheus/prompb/README.md @@ -2,13 +2,8 @@ The compiled protobufs are version controlled and you won't normally need to re-compile them when building Prometheus. If however you have modified the defs and do need to re-compile, run -`./scripts/genproto.sh` from the parent dir. +`make proto` from the parent dir. -In order for the script to run, you'll need `protoc` (version 3.5) in your -PATH, and the following Go packages installed: +In order for the script to run, you'll need `protoc` (version 3.5.1) in your +PATH. -- github.com/gogo/protobuf -- github.com/gogo/protobuf/protoc-gen-gogofast -- github.com/grpc-ecosystem/grpc-gateway/protoc-gen-grpc-gateway/ -- github.com/grpc-ecosystem/grpc-gateway/protoc-gen-swagger -- golang.org/x/tools/cmd/goimports diff --git a/vendor/github.com/prometheus/prometheus/prompb/remote.pb.go b/vendor/github.com/prometheus/prometheus/prompb/remote.pb.go index 08ceed0..804e4eb 100644 --- a/vendor/github.com/prometheus/prometheus/prompb/remote.pb.go +++ b/vendor/github.com/prometheus/prometheus/prompb/remote.pb.go @@ -1,33 +1,6 @@ // Code generated by protoc-gen-gogo. DO NOT EDIT. // source: remote.proto -/* - Package prompb is a generated protocol buffer package. - - It is generated from these files: - remote.proto - rpc.proto - types.proto - - It has these top-level messages: - WriteRequest - ReadRequest - ReadResponse - Query - QueryResult - TSDBSnapshotRequest - TSDBSnapshotResponse - TSDBCleanTombstonesRequest - TSDBCleanTombstonesResponse - SeriesDeleteRequest - SeriesDeleteResponse - Sample - TimeSeries - Label - Labels - LabelMatcher - ReadHints -*/ package prompb import proto "github.com/gogo/protobuf/proto" @@ -48,15 +21,46 @@ var _ = math.Inf const _ = proto.GoGoProtoPackageIsVersion2 // please upgrade the proto package type WriteRequest struct { - Timeseries []*TimeSeries `protobuf:"bytes,1,rep,name=timeseries" json:"timeseries,omitempty"` + Timeseries []TimeSeries `protobuf:"bytes,1,rep,name=timeseries,proto3" json:"timeseries"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *WriteRequest) Reset() { *m = WriteRequest{} } +func (m *WriteRequest) String() string { return proto.CompactTextString(m) } +func (*WriteRequest) ProtoMessage() {} +func (*WriteRequest) Descriptor() ([]byte, []int) { + return fileDescriptor_remote_007cb64b4d8cdf66, []int{0} +} +func (m *WriteRequest) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *WriteRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + if deterministic { + return xxx_messageInfo_WriteRequest.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalTo(b) + if err != nil { + return nil, err + } + return b[:n], nil + } +} +func (dst *WriteRequest) XXX_Merge(src proto.Message) { + xxx_messageInfo_WriteRequest.Merge(dst, src) +} +func (m *WriteRequest) XXX_Size() int { + return m.Size() +} +func (m *WriteRequest) XXX_DiscardUnknown() { + xxx_messageInfo_WriteRequest.DiscardUnknown(m) } -func (m *WriteRequest) Reset() { *m = WriteRequest{} } -func (m *WriteRequest) String() string { return proto.CompactTextString(m) } -func (*WriteRequest) ProtoMessage() {} -func (*WriteRequest) Descriptor() ([]byte, []int) { return fileDescriptorRemote, []int{0} } +var xxx_messageInfo_WriteRequest proto.InternalMessageInfo -func (m *WriteRequest) GetTimeseries() []*TimeSeries { +func (m *WriteRequest) GetTimeseries() []TimeSeries { if m != nil { return m.Timeseries } @@ -64,13 +68,44 @@ func (m *WriteRequest) GetTimeseries() []*TimeSeries { } type ReadRequest struct { - Queries []*Query `protobuf:"bytes,1,rep,name=queries" json:"queries,omitempty"` + Queries []*Query `protobuf:"bytes,1,rep,name=queries,proto3" json:"queries,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *ReadRequest) Reset() { *m = ReadRequest{} } +func (m *ReadRequest) String() string { return proto.CompactTextString(m) } +func (*ReadRequest) ProtoMessage() {} +func (*ReadRequest) Descriptor() ([]byte, []int) { + return fileDescriptor_remote_007cb64b4d8cdf66, []int{1} +} +func (m *ReadRequest) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *ReadRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + if deterministic { + return xxx_messageInfo_ReadRequest.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalTo(b) + if err != nil { + return nil, err + } + return b[:n], nil + } +} +func (dst *ReadRequest) XXX_Merge(src proto.Message) { + xxx_messageInfo_ReadRequest.Merge(dst, src) +} +func (m *ReadRequest) XXX_Size() int { + return m.Size() +} +func (m *ReadRequest) XXX_DiscardUnknown() { + xxx_messageInfo_ReadRequest.DiscardUnknown(m) } -func (m *ReadRequest) Reset() { *m = ReadRequest{} } -func (m *ReadRequest) String() string { return proto.CompactTextString(m) } -func (*ReadRequest) ProtoMessage() {} -func (*ReadRequest) Descriptor() ([]byte, []int) { return fileDescriptorRemote, []int{1} } +var xxx_messageInfo_ReadRequest proto.InternalMessageInfo func (m *ReadRequest) GetQueries() []*Query { if m != nil { @@ -81,13 +116,44 @@ func (m *ReadRequest) GetQueries() []*Query { type ReadResponse struct { // In same order as the request's queries. - Results []*QueryResult `protobuf:"bytes,1,rep,name=results" json:"results,omitempty"` + Results []*QueryResult `protobuf:"bytes,1,rep,name=results,proto3" json:"results,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *ReadResponse) Reset() { *m = ReadResponse{} } +func (m *ReadResponse) String() string { return proto.CompactTextString(m) } +func (*ReadResponse) ProtoMessage() {} +func (*ReadResponse) Descriptor() ([]byte, []int) { + return fileDescriptor_remote_007cb64b4d8cdf66, []int{2} +} +func (m *ReadResponse) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *ReadResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + if deterministic { + return xxx_messageInfo_ReadResponse.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalTo(b) + if err != nil { + return nil, err + } + return b[:n], nil + } +} +func (dst *ReadResponse) XXX_Merge(src proto.Message) { + xxx_messageInfo_ReadResponse.Merge(dst, src) +} +func (m *ReadResponse) XXX_Size() int { + return m.Size() +} +func (m *ReadResponse) XXX_DiscardUnknown() { + xxx_messageInfo_ReadResponse.DiscardUnknown(m) } -func (m *ReadResponse) Reset() { *m = ReadResponse{} } -func (m *ReadResponse) String() string { return proto.CompactTextString(m) } -func (*ReadResponse) ProtoMessage() {} -func (*ReadResponse) Descriptor() ([]byte, []int) { return fileDescriptorRemote, []int{2} } +var xxx_messageInfo_ReadResponse proto.InternalMessageInfo func (m *ReadResponse) GetResults() []*QueryResult { if m != nil { @@ -97,16 +163,47 @@ func (m *ReadResponse) GetResults() []*QueryResult { } type Query struct { - StartTimestampMs int64 `protobuf:"varint,1,opt,name=start_timestamp_ms,json=startTimestampMs,proto3" json:"start_timestamp_ms,omitempty"` - EndTimestampMs int64 `protobuf:"varint,2,opt,name=end_timestamp_ms,json=endTimestampMs,proto3" json:"end_timestamp_ms,omitempty"` - Matchers []*LabelMatcher `protobuf:"bytes,3,rep,name=matchers" json:"matchers,omitempty"` - Hints *ReadHints `protobuf:"bytes,4,opt,name=hints" json:"hints,omitempty"` + StartTimestampMs int64 `protobuf:"varint,1,opt,name=start_timestamp_ms,json=startTimestampMs,proto3" json:"start_timestamp_ms,omitempty"` + EndTimestampMs int64 `protobuf:"varint,2,opt,name=end_timestamp_ms,json=endTimestampMs,proto3" json:"end_timestamp_ms,omitempty"` + Matchers []*LabelMatcher `protobuf:"bytes,3,rep,name=matchers,proto3" json:"matchers,omitempty"` + Hints *ReadHints `protobuf:"bytes,4,opt,name=hints,proto3" json:"hints,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` } -func (m *Query) Reset() { *m = Query{} } -func (m *Query) String() string { return proto.CompactTextString(m) } -func (*Query) ProtoMessage() {} -func (*Query) Descriptor() ([]byte, []int) { return fileDescriptorRemote, []int{3} } +func (m *Query) Reset() { *m = Query{} } +func (m *Query) String() string { return proto.CompactTextString(m) } +func (*Query) ProtoMessage() {} +func (*Query) Descriptor() ([]byte, []int) { + return fileDescriptor_remote_007cb64b4d8cdf66, []int{3} +} +func (m *Query) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *Query) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + if deterministic { + return xxx_messageInfo_Query.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalTo(b) + if err != nil { + return nil, err + } + return b[:n], nil + } +} +func (dst *Query) XXX_Merge(src proto.Message) { + xxx_messageInfo_Query.Merge(dst, src) +} +func (m *Query) XXX_Size() int { + return m.Size() +} +func (m *Query) XXX_DiscardUnknown() { + xxx_messageInfo_Query.DiscardUnknown(m) +} + +var xxx_messageInfo_Query proto.InternalMessageInfo func (m *Query) GetStartTimestampMs() int64 { if m != nil { @@ -138,13 +235,44 @@ func (m *Query) GetHints() *ReadHints { type QueryResult struct { // Samples within a time series must be ordered by time. - Timeseries []*TimeSeries `protobuf:"bytes,1,rep,name=timeseries" json:"timeseries,omitempty"` + Timeseries []*TimeSeries `protobuf:"bytes,1,rep,name=timeseries,proto3" json:"timeseries,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *QueryResult) Reset() { *m = QueryResult{} } +func (m *QueryResult) String() string { return proto.CompactTextString(m) } +func (*QueryResult) ProtoMessage() {} +func (*QueryResult) Descriptor() ([]byte, []int) { + return fileDescriptor_remote_007cb64b4d8cdf66, []int{4} +} +func (m *QueryResult) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *QueryResult) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + if deterministic { + return xxx_messageInfo_QueryResult.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalTo(b) + if err != nil { + return nil, err + } + return b[:n], nil + } +} +func (dst *QueryResult) XXX_Merge(src proto.Message) { + xxx_messageInfo_QueryResult.Merge(dst, src) +} +func (m *QueryResult) XXX_Size() int { + return m.Size() +} +func (m *QueryResult) XXX_DiscardUnknown() { + xxx_messageInfo_QueryResult.DiscardUnknown(m) } -func (m *QueryResult) Reset() { *m = QueryResult{} } -func (m *QueryResult) String() string { return proto.CompactTextString(m) } -func (*QueryResult) ProtoMessage() {} -func (*QueryResult) Descriptor() ([]byte, []int) { return fileDescriptorRemote, []int{4} } +var xxx_messageInfo_QueryResult proto.InternalMessageInfo func (m *QueryResult) GetTimeseries() []*TimeSeries { if m != nil { @@ -187,6 +315,9 @@ func (m *WriteRequest) MarshalTo(dAtA []byte) (int, error) { i += n } } + if m.XXX_unrecognized != nil { + i += copy(dAtA[i:], m.XXX_unrecognized) + } return i, nil } @@ -217,6 +348,9 @@ func (m *ReadRequest) MarshalTo(dAtA []byte) (int, error) { i += n } } + if m.XXX_unrecognized != nil { + i += copy(dAtA[i:], m.XXX_unrecognized) + } return i, nil } @@ -247,6 +381,9 @@ func (m *ReadResponse) MarshalTo(dAtA []byte) (int, error) { i += n } } + if m.XXX_unrecognized != nil { + i += copy(dAtA[i:], m.XXX_unrecognized) + } return i, nil } @@ -297,6 +434,9 @@ func (m *Query) MarshalTo(dAtA []byte) (int, error) { } i += n1 } + if m.XXX_unrecognized != nil { + i += copy(dAtA[i:], m.XXX_unrecognized) + } return i, nil } @@ -327,27 +467,12 @@ func (m *QueryResult) MarshalTo(dAtA []byte) (int, error) { i += n } } + if m.XXX_unrecognized != nil { + i += copy(dAtA[i:], m.XXX_unrecognized) + } return i, nil } -func encodeFixed64Remote(dAtA []byte, offset int, v uint64) int { - dAtA[offset] = uint8(v) - dAtA[offset+1] = uint8(v >> 8) - dAtA[offset+2] = uint8(v >> 16) - dAtA[offset+3] = uint8(v >> 24) - dAtA[offset+4] = uint8(v >> 32) - dAtA[offset+5] = uint8(v >> 40) - dAtA[offset+6] = uint8(v >> 48) - dAtA[offset+7] = uint8(v >> 56) - return offset + 8 -} -func encodeFixed32Remote(dAtA []byte, offset int, v uint32) int { - dAtA[offset] = uint8(v) - dAtA[offset+1] = uint8(v >> 8) - dAtA[offset+2] = uint8(v >> 16) - dAtA[offset+3] = uint8(v >> 24) - return offset + 4 -} func encodeVarintRemote(dAtA []byte, offset int, v uint64) int { for v >= 1<<7 { dAtA[offset] = uint8(v&0x7f | 0x80) @@ -358,6 +483,9 @@ func encodeVarintRemote(dAtA []byte, offset int, v uint64) int { return offset + 1 } func (m *WriteRequest) Size() (n int) { + if m == nil { + return 0 + } var l int _ = l if len(m.Timeseries) > 0 { @@ -366,10 +494,16 @@ func (m *WriteRequest) Size() (n int) { n += 1 + l + sovRemote(uint64(l)) } } + if m.XXX_unrecognized != nil { + n += len(m.XXX_unrecognized) + } return n } func (m *ReadRequest) Size() (n int) { + if m == nil { + return 0 + } var l int _ = l if len(m.Queries) > 0 { @@ -378,10 +512,16 @@ func (m *ReadRequest) Size() (n int) { n += 1 + l + sovRemote(uint64(l)) } } + if m.XXX_unrecognized != nil { + n += len(m.XXX_unrecognized) + } return n } func (m *ReadResponse) Size() (n int) { + if m == nil { + return 0 + } var l int _ = l if len(m.Results) > 0 { @@ -390,10 +530,16 @@ func (m *ReadResponse) Size() (n int) { n += 1 + l + sovRemote(uint64(l)) } } + if m.XXX_unrecognized != nil { + n += len(m.XXX_unrecognized) + } return n } func (m *Query) Size() (n int) { + if m == nil { + return 0 + } var l int _ = l if m.StartTimestampMs != 0 { @@ -412,10 +558,16 @@ func (m *Query) Size() (n int) { l = m.Hints.Size() n += 1 + l + sovRemote(uint64(l)) } + if m.XXX_unrecognized != nil { + n += len(m.XXX_unrecognized) + } return n } func (m *QueryResult) Size() (n int) { + if m == nil { + return 0 + } var l int _ = l if len(m.Timeseries) > 0 { @@ -424,6 +576,9 @@ func (m *QueryResult) Size() (n int) { n += 1 + l + sovRemote(uint64(l)) } } + if m.XXX_unrecognized != nil { + n += len(m.XXX_unrecognized) + } return n } @@ -495,7 +650,7 @@ func (m *WriteRequest) Unmarshal(dAtA []byte) error { if postIndex > l { return io.ErrUnexpectedEOF } - m.Timeseries = append(m.Timeseries, &TimeSeries{}) + m.Timeseries = append(m.Timeseries, TimeSeries{}) if err := m.Timeseries[len(m.Timeseries)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { return err } @@ -512,6 +667,7 @@ func (m *WriteRequest) Unmarshal(dAtA []byte) error { if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } + m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...) iNdEx += skippy } } @@ -593,6 +749,7 @@ func (m *ReadRequest) Unmarshal(dAtA []byte) error { if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } + m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...) iNdEx += skippy } } @@ -674,6 +831,7 @@ func (m *ReadResponse) Unmarshal(dAtA []byte) error { if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } + m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...) iNdEx += skippy } } @@ -826,6 +984,7 @@ func (m *Query) Unmarshal(dAtA []byte) error { if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } + m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...) iNdEx += skippy } } @@ -907,6 +1066,7 @@ func (m *QueryResult) Unmarshal(dAtA []byte) error { if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } + m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...) iNdEx += skippy } } @@ -1021,28 +1181,29 @@ var ( ErrIntOverflowRemote = fmt.Errorf("proto: integer overflow") ) -func init() { proto.RegisterFile("remote.proto", fileDescriptorRemote) } +func init() { proto.RegisterFile("remote.proto", fileDescriptor_remote_007cb64b4d8cdf66) } -var fileDescriptorRemote = []byte{ - // 308 bytes of a gzipped FileDescriptorProto - 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0x9c, 0x91, 0x4f, 0x4b, 0xf3, 0x40, - 0x10, 0xc6, 0xd9, 0xb7, 0x6f, 0x5b, 0x99, 0x14, 0xa9, 0x8b, 0x7f, 0x82, 0x87, 0x52, 0x72, 0x0a, - 0x54, 0x0a, 0x56, 0xf1, 0xe0, 0x4d, 0x41, 0xf1, 0x60, 0x0f, 0xae, 0x05, 0xc1, 0x4b, 0x49, 0xed, - 0x40, 0x03, 0xdd, 0x64, 0xbb, 0x33, 0x39, 0xf4, 0xeb, 0x79, 0xf2, 0xe8, 0x47, 0x90, 0x7c, 0x12, - 0xc9, 0x86, 0xe8, 0x8a, 0x37, 0xcf, 0xbf, 0xdf, 0xf3, 0xf0, 0x0c, 0x03, 0x3d, 0x8b, 0x3a, 0x67, - 0x1c, 0x1b, 0x9b, 0x73, 0x2e, 0xc1, 0xd8, 0x5c, 0x23, 0xaf, 0xb0, 0xa0, 0xe3, 0x80, 0xb7, 0x06, - 0xa9, 0x06, 0xd1, 0x2d, 0xf4, 0x9e, 0x6c, 0xca, 0xa8, 0x70, 0x53, 0x20, 0xb1, 0xbc, 0x00, 0xe0, - 0x54, 0x23, 0xa1, 0x4d, 0x91, 0x42, 0x31, 0x6c, 0xc5, 0xc1, 0xe4, 0x70, 0xfc, 0x9d, 0x1e, 0xcf, - 0x52, 0x8d, 0x8f, 0x8e, 0x2a, 0xcf, 0x8c, 0x2e, 0x21, 0x50, 0x98, 0x2c, 0x9b, 0x9a, 0x11, 0x74, - 0x37, 0x85, 0xdf, 0xb1, 0xe7, 0x77, 0x3c, 0x14, 0x68, 0xb7, 0xaa, 0x31, 0xa2, 0x2b, 0xe8, 0xd5, - 0x59, 0x32, 0x79, 0x46, 0x28, 0x4f, 0xa1, 0x6b, 0x91, 0x8a, 0x35, 0x37, 0xe1, 0xa3, 0xdf, 0x61, - 0xc7, 0x55, 0xe3, 0x45, 0xaf, 0x02, 0xda, 0x0e, 0xc8, 0x13, 0x90, 0xc4, 0x89, 0xe5, 0xb9, 0x1b, - 0xc7, 0x89, 0x36, 0x73, 0x5d, 0xf5, 0x88, 0xb8, 0xa5, 0xfa, 0x8e, 0xcc, 0x1a, 0x30, 0x25, 0x19, - 0x43, 0x1f, 0xb3, 0xe5, 0x4f, 0xf7, 0x9f, 0x73, 0x77, 0x31, 0x5b, 0xfa, 0xe6, 0x39, 0xec, 0xe8, - 0x84, 0x5f, 0x56, 0x68, 0x29, 0x6c, 0xb9, 0x55, 0xa1, 0xbf, 0xea, 0x3e, 0x59, 0xe0, 0x7a, 0x5a, - 0x0b, 0xea, 0xcb, 0x94, 0x23, 0x68, 0xaf, 0xd2, 0x8c, 0x29, 0xfc, 0x3f, 0x14, 0x71, 0x30, 0x39, - 0xf0, 0x23, 0xd5, 0xcd, 0x77, 0x15, 0x54, 0xb5, 0x13, 0xdd, 0x40, 0xe0, 0x1d, 0xf7, 0xd7, 0x57, - 0x5c, 0xef, 0xbf, 0x95, 0x03, 0xf1, 0x5e, 0x0e, 0xc4, 0x47, 0x39, 0x10, 0xcf, 0x9d, 0x2a, 0x60, - 0x16, 0x8b, 0x8e, 0xfb, 0xf7, 0xd9, 0x67, 0x00, 0x00, 0x00, 0xff, 0xff, 0x81, 0x1e, 0xc0, 0x24, - 0x18, 0x02, 0x00, 0x00, +var fileDescriptor_remote_007cb64b4d8cdf66 = []byte{ + // 333 bytes of a gzipped FileDescriptorProto + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0x8c, 0x91, 0xbf, 0x4a, 0x2b, 0x41, + 0x14, 0xc6, 0xef, 0xdc, 0xfc, 0xbb, 0x9c, 0x0d, 0x97, 0xdc, 0x21, 0x57, 0x97, 0x14, 0x31, 0x6c, + 0xb5, 0x10, 0x89, 0x18, 0xc5, 0x42, 0x6c, 0x0c, 0x08, 0x16, 0x49, 0xe1, 0x18, 0x10, 0x6c, 0xc2, + 0xc6, 0x1c, 0x92, 0x85, 0xcc, 0xce, 0x66, 0xe6, 0x6c, 0x91, 0xd7, 0xb3, 0x4a, 0xe9, 0x13, 0x88, + 0xe4, 0x49, 0x64, 0x67, 0x59, 0x1d, 0xb1, 0xb1, 0x1b, 0xe6, 0xf7, 0xfb, 0x3e, 0xce, 0xe1, 0x40, + 0x53, 0xa3, 0x54, 0x84, 0x83, 0x54, 0x2b, 0x52, 0x1c, 0x52, 0xad, 0x24, 0xd2, 0x0a, 0x33, 0xd3, + 0xf1, 0x68, 0x9b, 0xa2, 0x29, 0x40, 0xa7, 0xbd, 0x54, 0x4b, 0x65, 0x9f, 0x27, 0xf9, 0xab, 0xf8, + 0x0d, 0xc6, 0xd0, 0x7c, 0xd0, 0x31, 0xa1, 0xc0, 0x4d, 0x86, 0x86, 0xf8, 0x15, 0x00, 0xc5, 0x12, + 0x0d, 0xea, 0x18, 0x8d, 0xcf, 0x7a, 0x95, 0xd0, 0x1b, 0x1e, 0x0c, 0x3e, 0x3b, 0x07, 0xd3, 0x58, + 0xe2, 0xbd, 0xa5, 0xa3, 0xea, 0xee, 0xf5, 0xe8, 0x97, 0x70, 0xfc, 0xe0, 0x12, 0x3c, 0x81, 0xd1, + 0xa2, 0x2c, 0xeb, 0x43, 0x63, 0x93, 0xb9, 0x4d, 0xff, 0xdc, 0xa6, 0xbb, 0x0c, 0xf5, 0x56, 0x94, + 0x46, 0x70, 0x0d, 0xcd, 0x22, 0x6b, 0x52, 0x95, 0x18, 0xe4, 0xa7, 0xd0, 0xd0, 0x68, 0xb2, 0x35, + 0x95, 0xe1, 0xc3, 0xef, 0x61, 0xcb, 0x45, 0xe9, 0x05, 0xcf, 0x0c, 0x6a, 0x16, 0xf0, 0x63, 0xe0, + 0x86, 0x22, 0x4d, 0x33, 0x3b, 0x1c, 0x45, 0x32, 0x9d, 0xc9, 0xbc, 0x87, 0x85, 0x15, 0xd1, 0xb2, + 0x64, 0x5a, 0x82, 0x89, 0xe1, 0x21, 0xb4, 0x30, 0x59, 0x7c, 0x75, 0x7f, 0x5b, 0xf7, 0x2f, 0x26, + 0x0b, 0xd7, 0x3c, 0x87, 0x3f, 0x32, 0xa2, 0xa7, 0x15, 0x6a, 0xe3, 0x57, 0xec, 0x54, 0xbe, 0x3b, + 0xd5, 0x38, 0x9a, 0xe3, 0x7a, 0x52, 0x08, 0xe2, 0xc3, 0xe4, 0x7d, 0xa8, 0xad, 0xe2, 0x84, 0x8c, + 0x5f, 0xed, 0xb1, 0xd0, 0x1b, 0xfe, 0x77, 0x23, 0xf9, 0xce, 0xb7, 0x39, 0x14, 0x85, 0x13, 0xdc, + 0x80, 0xe7, 0x2c, 0xc7, 0x2f, 0x7e, 0x7e, 0x10, 0xf7, 0x14, 0xa3, 0xf6, 0x6e, 0xdf, 0x65, 0x2f, + 0xfb, 0x2e, 0x7b, 0xdb, 0x77, 0xd9, 0x63, 0x3d, 0x0f, 0xa4, 0xf3, 0x79, 0xdd, 0x5e, 0xfd, 0xec, + 0x3d, 0x00, 0x00, 0xff, 0xff, 0x9e, 0xb6, 0x05, 0x1c, 0x34, 0x02, 0x00, 0x00, } diff --git a/vendor/github.com/prometheus/prometheus/prompb/remote.proto b/vendor/github.com/prometheus/prometheus/prompb/remote.proto index 90877cc..cf86f0d 100644 --- a/vendor/github.com/prometheus/prometheus/prompb/remote.proto +++ b/vendor/github.com/prometheus/prometheus/prompb/remote.proto @@ -17,9 +17,10 @@ package prometheus; option go_package = "prompb"; import "types.proto"; +import "gogoproto/gogo.proto"; message WriteRequest { - repeated prometheus.TimeSeries timeseries = 1; + repeated prometheus.TimeSeries timeseries = 1 [(gogoproto.nullable) = false]; } message ReadRequest { diff --git a/vendor/github.com/prometheus/prometheus/prompb/rpc.pb.go b/vendor/github.com/prometheus/prometheus/prompb/rpc.pb.go index 4da57ee..27097b3 100644 --- a/vendor/github.com/prometheus/prometheus/prompb/rpc.pb.go +++ b/vendor/github.com/prometheus/prometheus/prompb/rpc.pb.go @@ -26,58 +26,250 @@ var _ = fmt.Errorf var _ = math.Inf var _ = time.Kitchen +// This is a compile-time assertion to ensure that this generated file +// is compatible with the proto package it is being compiled against. +// A compilation error at this line likely means your copy of the +// proto package needs to be updated. +const _ = proto.GoGoProtoPackageIsVersion2 // please upgrade the proto package + type TSDBSnapshotRequest struct { - SkipHead bool `protobuf:"varint,1,opt,name=skip_head,json=skipHead,proto3" json:"skip_head,omitempty"` + SkipHead bool `protobuf:"varint,1,opt,name=skip_head,json=skipHead,proto3" json:"skip_head,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *TSDBSnapshotRequest) Reset() { *m = TSDBSnapshotRequest{} } +func (m *TSDBSnapshotRequest) String() string { return proto.CompactTextString(m) } +func (*TSDBSnapshotRequest) ProtoMessage() {} +func (*TSDBSnapshotRequest) Descriptor() ([]byte, []int) { + return fileDescriptor_rpc_e0d54cfadc26b2e1, []int{0} +} +func (m *TSDBSnapshotRequest) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *TSDBSnapshotRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + if deterministic { + return xxx_messageInfo_TSDBSnapshotRequest.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalTo(b) + if err != nil { + return nil, err + } + return b[:n], nil + } +} +func (dst *TSDBSnapshotRequest) XXX_Merge(src proto.Message) { + xxx_messageInfo_TSDBSnapshotRequest.Merge(dst, src) +} +func (m *TSDBSnapshotRequest) XXX_Size() int { + return m.Size() +} +func (m *TSDBSnapshotRequest) XXX_DiscardUnknown() { + xxx_messageInfo_TSDBSnapshotRequest.DiscardUnknown(m) } -func (m *TSDBSnapshotRequest) Reset() { *m = TSDBSnapshotRequest{} } -func (m *TSDBSnapshotRequest) String() string { return proto.CompactTextString(m) } -func (*TSDBSnapshotRequest) ProtoMessage() {} -func (*TSDBSnapshotRequest) Descriptor() ([]byte, []int) { return fileDescriptorRpc, []int{0} } +var xxx_messageInfo_TSDBSnapshotRequest proto.InternalMessageInfo type TSDBSnapshotResponse struct { - Name string `protobuf:"bytes,1,opt,name=name,proto3" json:"name,omitempty"` + Name string `protobuf:"bytes,1,opt,name=name,proto3" json:"name,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *TSDBSnapshotResponse) Reset() { *m = TSDBSnapshotResponse{} } +func (m *TSDBSnapshotResponse) String() string { return proto.CompactTextString(m) } +func (*TSDBSnapshotResponse) ProtoMessage() {} +func (*TSDBSnapshotResponse) Descriptor() ([]byte, []int) { + return fileDescriptor_rpc_e0d54cfadc26b2e1, []int{1} +} +func (m *TSDBSnapshotResponse) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *TSDBSnapshotResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + if deterministic { + return xxx_messageInfo_TSDBSnapshotResponse.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalTo(b) + if err != nil { + return nil, err + } + return b[:n], nil + } +} +func (dst *TSDBSnapshotResponse) XXX_Merge(src proto.Message) { + xxx_messageInfo_TSDBSnapshotResponse.Merge(dst, src) +} +func (m *TSDBSnapshotResponse) XXX_Size() int { + return m.Size() +} +func (m *TSDBSnapshotResponse) XXX_DiscardUnknown() { + xxx_messageInfo_TSDBSnapshotResponse.DiscardUnknown(m) } -func (m *TSDBSnapshotResponse) Reset() { *m = TSDBSnapshotResponse{} } -func (m *TSDBSnapshotResponse) String() string { return proto.CompactTextString(m) } -func (*TSDBSnapshotResponse) ProtoMessage() {} -func (*TSDBSnapshotResponse) Descriptor() ([]byte, []int) { return fileDescriptorRpc, []int{1} } +var xxx_messageInfo_TSDBSnapshotResponse proto.InternalMessageInfo type TSDBCleanTombstonesRequest struct { + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` } -func (m *TSDBCleanTombstonesRequest) Reset() { *m = TSDBCleanTombstonesRequest{} } -func (m *TSDBCleanTombstonesRequest) String() string { return proto.CompactTextString(m) } -func (*TSDBCleanTombstonesRequest) ProtoMessage() {} -func (*TSDBCleanTombstonesRequest) Descriptor() ([]byte, []int) { return fileDescriptorRpc, []int{2} } +func (m *TSDBCleanTombstonesRequest) Reset() { *m = TSDBCleanTombstonesRequest{} } +func (m *TSDBCleanTombstonesRequest) String() string { return proto.CompactTextString(m) } +func (*TSDBCleanTombstonesRequest) ProtoMessage() {} +func (*TSDBCleanTombstonesRequest) Descriptor() ([]byte, []int) { + return fileDescriptor_rpc_e0d54cfadc26b2e1, []int{2} +} +func (m *TSDBCleanTombstonesRequest) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *TSDBCleanTombstonesRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + if deterministic { + return xxx_messageInfo_TSDBCleanTombstonesRequest.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalTo(b) + if err != nil { + return nil, err + } + return b[:n], nil + } +} +func (dst *TSDBCleanTombstonesRequest) XXX_Merge(src proto.Message) { + xxx_messageInfo_TSDBCleanTombstonesRequest.Merge(dst, src) +} +func (m *TSDBCleanTombstonesRequest) XXX_Size() int { + return m.Size() +} +func (m *TSDBCleanTombstonesRequest) XXX_DiscardUnknown() { + xxx_messageInfo_TSDBCleanTombstonesRequest.DiscardUnknown(m) +} + +var xxx_messageInfo_TSDBCleanTombstonesRequest proto.InternalMessageInfo type TSDBCleanTombstonesResponse struct { + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` } -func (m *TSDBCleanTombstonesResponse) Reset() { *m = TSDBCleanTombstonesResponse{} } -func (m *TSDBCleanTombstonesResponse) String() string { return proto.CompactTextString(m) } -func (*TSDBCleanTombstonesResponse) ProtoMessage() {} -func (*TSDBCleanTombstonesResponse) Descriptor() ([]byte, []int) { return fileDescriptorRpc, []int{3} } +func (m *TSDBCleanTombstonesResponse) Reset() { *m = TSDBCleanTombstonesResponse{} } +func (m *TSDBCleanTombstonesResponse) String() string { return proto.CompactTextString(m) } +func (*TSDBCleanTombstonesResponse) ProtoMessage() {} +func (*TSDBCleanTombstonesResponse) Descriptor() ([]byte, []int) { + return fileDescriptor_rpc_e0d54cfadc26b2e1, []int{3} +} +func (m *TSDBCleanTombstonesResponse) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *TSDBCleanTombstonesResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + if deterministic { + return xxx_messageInfo_TSDBCleanTombstonesResponse.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalTo(b) + if err != nil { + return nil, err + } + return b[:n], nil + } +} +func (dst *TSDBCleanTombstonesResponse) XXX_Merge(src proto.Message) { + xxx_messageInfo_TSDBCleanTombstonesResponse.Merge(dst, src) +} +func (m *TSDBCleanTombstonesResponse) XXX_Size() int { + return m.Size() +} +func (m *TSDBCleanTombstonesResponse) XXX_DiscardUnknown() { + xxx_messageInfo_TSDBCleanTombstonesResponse.DiscardUnknown(m) +} + +var xxx_messageInfo_TSDBCleanTombstonesResponse proto.InternalMessageInfo type SeriesDeleteRequest struct { - MinTime *time.Time `protobuf:"bytes,1,opt,name=min_time,json=minTime,stdtime" json:"min_time,omitempty"` - MaxTime *time.Time `protobuf:"bytes,2,opt,name=max_time,json=maxTime,stdtime" json:"max_time,omitempty"` - Matchers []LabelMatcher `protobuf:"bytes,3,rep,name=matchers" json:"matchers"` + MinTime *time.Time `protobuf:"bytes,1,opt,name=min_time,json=minTime,proto3,stdtime" json:"min_time,omitempty"` + MaxTime *time.Time `protobuf:"bytes,2,opt,name=max_time,json=maxTime,proto3,stdtime" json:"max_time,omitempty"` + Matchers []LabelMatcher `protobuf:"bytes,3,rep,name=matchers,proto3" json:"matchers"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` } -func (m *SeriesDeleteRequest) Reset() { *m = SeriesDeleteRequest{} } -func (m *SeriesDeleteRequest) String() string { return proto.CompactTextString(m) } -func (*SeriesDeleteRequest) ProtoMessage() {} -func (*SeriesDeleteRequest) Descriptor() ([]byte, []int) { return fileDescriptorRpc, []int{4} } +func (m *SeriesDeleteRequest) Reset() { *m = SeriesDeleteRequest{} } +func (m *SeriesDeleteRequest) String() string { return proto.CompactTextString(m) } +func (*SeriesDeleteRequest) ProtoMessage() {} +func (*SeriesDeleteRequest) Descriptor() ([]byte, []int) { + return fileDescriptor_rpc_e0d54cfadc26b2e1, []int{4} +} +func (m *SeriesDeleteRequest) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *SeriesDeleteRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + if deterministic { + return xxx_messageInfo_SeriesDeleteRequest.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalTo(b) + if err != nil { + return nil, err + } + return b[:n], nil + } +} +func (dst *SeriesDeleteRequest) XXX_Merge(src proto.Message) { + xxx_messageInfo_SeriesDeleteRequest.Merge(dst, src) +} +func (m *SeriesDeleteRequest) XXX_Size() int { + return m.Size() +} +func (m *SeriesDeleteRequest) XXX_DiscardUnknown() { + xxx_messageInfo_SeriesDeleteRequest.DiscardUnknown(m) +} + +var xxx_messageInfo_SeriesDeleteRequest proto.InternalMessageInfo type SeriesDeleteResponse struct { + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` } -func (m *SeriesDeleteResponse) Reset() { *m = SeriesDeleteResponse{} } -func (m *SeriesDeleteResponse) String() string { return proto.CompactTextString(m) } -func (*SeriesDeleteResponse) ProtoMessage() {} -func (*SeriesDeleteResponse) Descriptor() ([]byte, []int) { return fileDescriptorRpc, []int{5} } +func (m *SeriesDeleteResponse) Reset() { *m = SeriesDeleteResponse{} } +func (m *SeriesDeleteResponse) String() string { return proto.CompactTextString(m) } +func (*SeriesDeleteResponse) ProtoMessage() {} +func (*SeriesDeleteResponse) Descriptor() ([]byte, []int) { + return fileDescriptor_rpc_e0d54cfadc26b2e1, []int{5} +} +func (m *SeriesDeleteResponse) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *SeriesDeleteResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + if deterministic { + return xxx_messageInfo_SeriesDeleteResponse.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalTo(b) + if err != nil { + return nil, err + } + return b[:n], nil + } +} +func (dst *SeriesDeleteResponse) XXX_Merge(src proto.Message) { + xxx_messageInfo_SeriesDeleteResponse.Merge(dst, src) +} +func (m *SeriesDeleteResponse) XXX_Size() int { + return m.Size() +} +func (m *SeriesDeleteResponse) XXX_DiscardUnknown() { + xxx_messageInfo_SeriesDeleteResponse.DiscardUnknown(m) +} + +var xxx_messageInfo_SeriesDeleteResponse proto.InternalMessageInfo func init() { proto.RegisterType((*TSDBSnapshotRequest)(nil), "prometheus.TSDBSnapshotRequest") @@ -96,8 +288,9 @@ var _ grpc.ClientConn // is compatible with the grpc package it is being compiled against. const _ = grpc.SupportPackageIsVersion4 -// Client API for Admin service - +// AdminClient is the client API for Admin service. +// +// For semantics around ctx use and closing/ending streaming RPCs, please refer to https://godoc.org/google.golang.org/grpc#ClientConn.NewStream. type AdminClient interface { // Snapshot creates a snapshot of all current data into 'snapshots/-' under the TSDB's data directory. TSDBSnapshot(ctx context.Context, in *TSDBSnapshotRequest, opts ...grpc.CallOption) (*TSDBSnapshotResponse, error) @@ -117,7 +310,7 @@ func NewAdminClient(cc *grpc.ClientConn) AdminClient { func (c *adminClient) TSDBSnapshot(ctx context.Context, in *TSDBSnapshotRequest, opts ...grpc.CallOption) (*TSDBSnapshotResponse, error) { out := new(TSDBSnapshotResponse) - err := grpc.Invoke(ctx, "/prometheus.Admin/TSDBSnapshot", in, out, c.cc, opts...) + err := c.cc.Invoke(ctx, "/prometheus.Admin/TSDBSnapshot", in, out, opts...) if err != nil { return nil, err } @@ -126,7 +319,7 @@ func (c *adminClient) TSDBSnapshot(ctx context.Context, in *TSDBSnapshotRequest, func (c *adminClient) TSDBCleanTombstones(ctx context.Context, in *TSDBCleanTombstonesRequest, opts ...grpc.CallOption) (*TSDBCleanTombstonesResponse, error) { out := new(TSDBCleanTombstonesResponse) - err := grpc.Invoke(ctx, "/prometheus.Admin/TSDBCleanTombstones", in, out, c.cc, opts...) + err := c.cc.Invoke(ctx, "/prometheus.Admin/TSDBCleanTombstones", in, out, opts...) if err != nil { return nil, err } @@ -135,15 +328,14 @@ func (c *adminClient) TSDBCleanTombstones(ctx context.Context, in *TSDBCleanTomb func (c *adminClient) DeleteSeries(ctx context.Context, in *SeriesDeleteRequest, opts ...grpc.CallOption) (*SeriesDeleteResponse, error) { out := new(SeriesDeleteResponse) - err := grpc.Invoke(ctx, "/prometheus.Admin/DeleteSeries", in, out, c.cc, opts...) + err := c.cc.Invoke(ctx, "/prometheus.Admin/DeleteSeries", in, out, opts...) if err != nil { return nil, err } return out, nil } -// Server API for Admin service - +// AdminServer is the server API for Admin service. type AdminServer interface { // Snapshot creates a snapshot of all current data into 'snapshots/-' under the TSDB's data directory. TSDBSnapshot(context.Context, *TSDBSnapshotRequest) (*TSDBSnapshotResponse, error) @@ -257,6 +449,9 @@ func (m *TSDBSnapshotRequest) MarshalTo(dAtA []byte) (int, error) { } i++ } + if m.XXX_unrecognized != nil { + i += copy(dAtA[i:], m.XXX_unrecognized) + } return i, nil } @@ -281,6 +476,9 @@ func (m *TSDBSnapshotResponse) MarshalTo(dAtA []byte) (int, error) { i = encodeVarintRpc(dAtA, i, uint64(len(m.Name))) i += copy(dAtA[i:], m.Name) } + if m.XXX_unrecognized != nil { + i += copy(dAtA[i:], m.XXX_unrecognized) + } return i, nil } @@ -299,6 +497,9 @@ func (m *TSDBCleanTombstonesRequest) MarshalTo(dAtA []byte) (int, error) { _ = i var l int _ = l + if m.XXX_unrecognized != nil { + i += copy(dAtA[i:], m.XXX_unrecognized) + } return i, nil } @@ -317,6 +518,9 @@ func (m *TSDBCleanTombstonesResponse) MarshalTo(dAtA []byte) (int, error) { _ = i var l int _ = l + if m.XXX_unrecognized != nil { + i += copy(dAtA[i:], m.XXX_unrecognized) + } return i, nil } @@ -367,6 +571,9 @@ func (m *SeriesDeleteRequest) MarshalTo(dAtA []byte) (int, error) { i += n } } + if m.XXX_unrecognized != nil { + i += copy(dAtA[i:], m.XXX_unrecognized) + } return i, nil } @@ -385,27 +592,12 @@ func (m *SeriesDeleteResponse) MarshalTo(dAtA []byte) (int, error) { _ = i var l int _ = l + if m.XXX_unrecognized != nil { + i += copy(dAtA[i:], m.XXX_unrecognized) + } return i, nil } -func encodeFixed64Rpc(dAtA []byte, offset int, v uint64) int { - dAtA[offset] = uint8(v) - dAtA[offset+1] = uint8(v >> 8) - dAtA[offset+2] = uint8(v >> 16) - dAtA[offset+3] = uint8(v >> 24) - dAtA[offset+4] = uint8(v >> 32) - dAtA[offset+5] = uint8(v >> 40) - dAtA[offset+6] = uint8(v >> 48) - dAtA[offset+7] = uint8(v >> 56) - return offset + 8 -} -func encodeFixed32Rpc(dAtA []byte, offset int, v uint32) int { - dAtA[offset] = uint8(v) - dAtA[offset+1] = uint8(v >> 8) - dAtA[offset+2] = uint8(v >> 16) - dAtA[offset+3] = uint8(v >> 24) - return offset + 4 -} func encodeVarintRpc(dAtA []byte, offset int, v uint64) int { for v >= 1<<7 { dAtA[offset] = uint8(v&0x7f | 0x80) @@ -416,37 +608,64 @@ func encodeVarintRpc(dAtA []byte, offset int, v uint64) int { return offset + 1 } func (m *TSDBSnapshotRequest) Size() (n int) { + if m == nil { + return 0 + } var l int _ = l if m.SkipHead { n += 2 } + if m.XXX_unrecognized != nil { + n += len(m.XXX_unrecognized) + } return n } func (m *TSDBSnapshotResponse) Size() (n int) { + if m == nil { + return 0 + } var l int _ = l l = len(m.Name) if l > 0 { n += 1 + l + sovRpc(uint64(l)) } + if m.XXX_unrecognized != nil { + n += len(m.XXX_unrecognized) + } return n } func (m *TSDBCleanTombstonesRequest) Size() (n int) { + if m == nil { + return 0 + } var l int _ = l + if m.XXX_unrecognized != nil { + n += len(m.XXX_unrecognized) + } return n } func (m *TSDBCleanTombstonesResponse) Size() (n int) { + if m == nil { + return 0 + } var l int _ = l + if m.XXX_unrecognized != nil { + n += len(m.XXX_unrecognized) + } return n } func (m *SeriesDeleteRequest) Size() (n int) { + if m == nil { + return 0 + } var l int _ = l if m.MinTime != nil { @@ -463,12 +682,21 @@ func (m *SeriesDeleteRequest) Size() (n int) { n += 1 + l + sovRpc(uint64(l)) } } + if m.XXX_unrecognized != nil { + n += len(m.XXX_unrecognized) + } return n } func (m *SeriesDeleteResponse) Size() (n int) { + if m == nil { + return 0 + } var l int _ = l + if m.XXX_unrecognized != nil { + n += len(m.XXX_unrecognized) + } return n } @@ -546,6 +774,7 @@ func (m *TSDBSnapshotRequest) Unmarshal(dAtA []byte) error { if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } + m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...) iNdEx += skippy } } @@ -625,6 +854,7 @@ func (m *TSDBSnapshotResponse) Unmarshal(dAtA []byte) error { if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } + m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...) iNdEx += skippy } } @@ -675,6 +905,7 @@ func (m *TSDBCleanTombstonesRequest) Unmarshal(dAtA []byte) error { if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } + m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...) iNdEx += skippy } } @@ -725,6 +956,7 @@ func (m *TSDBCleanTombstonesResponse) Unmarshal(dAtA []byte) error { if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } + m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...) iNdEx += skippy } } @@ -872,6 +1104,7 @@ func (m *SeriesDeleteRequest) Unmarshal(dAtA []byte) error { if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } + m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...) iNdEx += skippy } } @@ -922,6 +1155,7 @@ func (m *SeriesDeleteResponse) Unmarshal(dAtA []byte) error { if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } + m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...) iNdEx += skippy } } @@ -1036,9 +1270,9 @@ var ( ErrIntOverflowRpc = fmt.Errorf("proto: integer overflow") ) -func init() { proto.RegisterFile("rpc.proto", fileDescriptorRpc) } +func init() { proto.RegisterFile("rpc.proto", fileDescriptor_rpc_e0d54cfadc26b2e1) } -var fileDescriptorRpc = []byte{ +var fileDescriptor_rpc_e0d54cfadc26b2e1 = []byte{ // 471 bytes of a gzipped FileDescriptorProto 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0x8c, 0x52, 0x3d, 0x8f, 0xd3, 0x40, 0x10, 0xbd, 0xbd, 0x84, 0x23, 0xd9, 0x5c, 0xe5, 0x8b, 0x20, 0xf8, 0x42, 0x1c, 0x5c, 0x70, 0xa7, diff --git a/vendor/github.com/prometheus/prometheus/prompb/rpc.pb.gw.go b/vendor/github.com/prometheus/prometheus/prompb/rpc.pb.gw.go index fd1526d..e35ae4e 100644 --- a/vendor/github.com/prometheus/prometheus/prompb/rpc.pb.gw.go +++ b/vendor/github.com/prometheus/prometheus/prompb/rpc.pb.gw.go @@ -58,7 +58,11 @@ func request_Admin_DeleteSeries_0(ctx context.Context, marshaler runtime.Marshal var protoReq SeriesDeleteRequest var metadata runtime.ServerMetadata - if err := marshaler.NewDecoder(req.Body).Decode(&protoReq); err != nil && err != io.EOF { + newReader, berr := utilities.IOReaderFactory(req.Body) + if berr != nil { + return nil, metadata, status.Errorf(codes.InvalidArgument, "%v", berr) + } + if err := marshaler.NewDecoder(newReader()).Decode(&protoReq); err != nil && err != io.EOF { return nil, metadata, status.Errorf(codes.InvalidArgument, "%v", err) } @@ -77,14 +81,14 @@ func RegisterAdminHandlerFromEndpoint(ctx context.Context, mux *runtime.ServeMux defer func() { if err != nil { if cerr := conn.Close(); cerr != nil { - grpclog.Printf("Failed to close conn to %s: %v", endpoint, cerr) + grpclog.Infof("Failed to close conn to %s: %v", endpoint, cerr) } return } go func() { <-ctx.Done() if cerr := conn.Close(); cerr != nil { - grpclog.Printf("Failed to close conn to %s: %v", endpoint, cerr) + grpclog.Infof("Failed to close conn to %s: %v", endpoint, cerr) } }() }() @@ -98,8 +102,8 @@ func RegisterAdminHandler(ctx context.Context, mux *runtime.ServeMux, conn *grpc return RegisterAdminHandlerClient(ctx, mux, NewAdminClient(conn)) } -// RegisterAdminHandler registers the http handlers for service Admin to "mux". -// The handlers forward requests to the grpc endpoint over the given implementation of "AdminClient". +// RegisterAdminHandlerClient registers the http handlers for service Admin +// to "mux". The handlers forward requests to the grpc endpoint over the given implementation of "AdminClient". // Note: the gRPC framework executes interceptors within the gRPC handler. If the passed in "AdminClient" // doesn't go through the normal gRPC flow (creating a gRPC client etc.) then it will be up to the passed in // "AdminClient" to call the correct interceptors. @@ -108,15 +112,6 @@ func RegisterAdminHandlerClient(ctx context.Context, mux *runtime.ServeMux, clie mux.Handle("POST", pattern_Admin_TSDBSnapshot_0, func(w http.ResponseWriter, req *http.Request, pathParams map[string]string) { ctx, cancel := context.WithCancel(req.Context()) defer cancel() - if cn, ok := w.(http.CloseNotifier); ok { - go func(done <-chan struct{}, closed <-chan bool) { - select { - case <-done: - case <-closed: - cancel() - } - }(ctx.Done(), cn.CloseNotify()) - } inboundMarshaler, outboundMarshaler := runtime.MarshalerForRequest(mux, req) rctx, err := runtime.AnnotateContext(ctx, mux, req) if err != nil { @@ -137,15 +132,6 @@ func RegisterAdminHandlerClient(ctx context.Context, mux *runtime.ServeMux, clie mux.Handle("POST", pattern_Admin_TSDBCleanTombstones_0, func(w http.ResponseWriter, req *http.Request, pathParams map[string]string) { ctx, cancel := context.WithCancel(req.Context()) defer cancel() - if cn, ok := w.(http.CloseNotifier); ok { - go func(done <-chan struct{}, closed <-chan bool) { - select { - case <-done: - case <-closed: - cancel() - } - }(ctx.Done(), cn.CloseNotify()) - } inboundMarshaler, outboundMarshaler := runtime.MarshalerForRequest(mux, req) rctx, err := runtime.AnnotateContext(ctx, mux, req) if err != nil { @@ -166,15 +152,6 @@ func RegisterAdminHandlerClient(ctx context.Context, mux *runtime.ServeMux, clie mux.Handle("POST", pattern_Admin_DeleteSeries_0, func(w http.ResponseWriter, req *http.Request, pathParams map[string]string) { ctx, cancel := context.WithCancel(req.Context()) defer cancel() - if cn, ok := w.(http.CloseNotifier); ok { - go func(done <-chan struct{}, closed <-chan bool) { - select { - case <-done: - case <-closed: - cancel() - } - }(ctx.Done(), cn.CloseNotify()) - } inboundMarshaler, outboundMarshaler := runtime.MarshalerForRequest(mux, req) rctx, err := runtime.AnnotateContext(ctx, mux, req) if err != nil { diff --git a/vendor/github.com/prometheus/prometheus/prompb/types.pb.go b/vendor/github.com/prometheus/prometheus/prompb/types.pb.go index 75b0178..7cecce1 100644 --- a/vendor/github.com/prometheus/prometheus/prompb/types.pb.go +++ b/vendor/github.com/prometheus/prometheus/prompb/types.pb.go @@ -7,6 +7,8 @@ import proto "github.com/gogo/protobuf/proto" import fmt "fmt" import math "math" +import encoding_binary "encoding/binary" + import io "io" // Reference imports to suppress errors if they are not otherwise used. @@ -14,6 +16,12 @@ var _ = proto.Marshal var _ = fmt.Errorf var _ = math.Inf +// This is a compile-time assertion to ensure that this generated file +// is compatible with the proto package it is being compiled against. +// A compilation error at this line likely means your copy of the +// proto package needs to be updated. +const _ = proto.GoGoProtoPackageIsVersion2 // please upgrade the proto package + type LabelMatcher_Type int32 const ( @@ -39,17 +47,50 @@ var LabelMatcher_Type_value = map[string]int32{ func (x LabelMatcher_Type) String() string { return proto.EnumName(LabelMatcher_Type_name, int32(x)) } -func (LabelMatcher_Type) EnumDescriptor() ([]byte, []int) { return fileDescriptorTypes, []int{4, 0} } +func (LabelMatcher_Type) EnumDescriptor() ([]byte, []int) { + return fileDescriptor_types_38f70661e771add3, []int{4, 0} +} type Sample struct { - Value float64 `protobuf:"fixed64,1,opt,name=value,proto3" json:"value,omitempty"` - Timestamp int64 `protobuf:"varint,2,opt,name=timestamp,proto3" json:"timestamp,omitempty"` + Value float64 `protobuf:"fixed64,1,opt,name=value,proto3" json:"value,omitempty"` + Timestamp int64 `protobuf:"varint,2,opt,name=timestamp,proto3" json:"timestamp,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *Sample) Reset() { *m = Sample{} } +func (m *Sample) String() string { return proto.CompactTextString(m) } +func (*Sample) ProtoMessage() {} +func (*Sample) Descriptor() ([]byte, []int) { + return fileDescriptor_types_38f70661e771add3, []int{0} +} +func (m *Sample) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *Sample) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + if deterministic { + return xxx_messageInfo_Sample.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalTo(b) + if err != nil { + return nil, err + } + return b[:n], nil + } +} +func (dst *Sample) XXX_Merge(src proto.Message) { + xxx_messageInfo_Sample.Merge(dst, src) +} +func (m *Sample) XXX_Size() int { + return m.Size() +} +func (m *Sample) XXX_DiscardUnknown() { + xxx_messageInfo_Sample.DiscardUnknown(m) } -func (m *Sample) Reset() { *m = Sample{} } -func (m *Sample) String() string { return proto.CompactTextString(m) } -func (*Sample) ProtoMessage() {} -func (*Sample) Descriptor() ([]byte, []int) { return fileDescriptorTypes, []int{0} } +var xxx_messageInfo_Sample proto.InternalMessageInfo func (m *Sample) GetValue() float64 { if m != nil { @@ -66,23 +107,54 @@ func (m *Sample) GetTimestamp() int64 { } type TimeSeries struct { - Labels []*Label `protobuf:"bytes,1,rep,name=labels" json:"labels,omitempty"` - Samples []*Sample `protobuf:"bytes,2,rep,name=samples" json:"samples,omitempty"` + Labels []Label `protobuf:"bytes,1,rep,name=labels,proto3" json:"labels"` + Samples []Sample `protobuf:"bytes,2,rep,name=samples,proto3" json:"samples"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *TimeSeries) Reset() { *m = TimeSeries{} } +func (m *TimeSeries) String() string { return proto.CompactTextString(m) } +func (*TimeSeries) ProtoMessage() {} +func (*TimeSeries) Descriptor() ([]byte, []int) { + return fileDescriptor_types_38f70661e771add3, []int{1} +} +func (m *TimeSeries) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *TimeSeries) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + if deterministic { + return xxx_messageInfo_TimeSeries.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalTo(b) + if err != nil { + return nil, err + } + return b[:n], nil + } +} +func (dst *TimeSeries) XXX_Merge(src proto.Message) { + xxx_messageInfo_TimeSeries.Merge(dst, src) +} +func (m *TimeSeries) XXX_Size() int { + return m.Size() +} +func (m *TimeSeries) XXX_DiscardUnknown() { + xxx_messageInfo_TimeSeries.DiscardUnknown(m) } -func (m *TimeSeries) Reset() { *m = TimeSeries{} } -func (m *TimeSeries) String() string { return proto.CompactTextString(m) } -func (*TimeSeries) ProtoMessage() {} -func (*TimeSeries) Descriptor() ([]byte, []int) { return fileDescriptorTypes, []int{1} } +var xxx_messageInfo_TimeSeries proto.InternalMessageInfo -func (m *TimeSeries) GetLabels() []*Label { +func (m *TimeSeries) GetLabels() []Label { if m != nil { return m.Labels } return nil } -func (m *TimeSeries) GetSamples() []*Sample { +func (m *TimeSeries) GetSamples() []Sample { if m != nil { return m.Samples } @@ -90,14 +162,45 @@ func (m *TimeSeries) GetSamples() []*Sample { } type Label struct { - Name string `protobuf:"bytes,1,opt,name=name,proto3" json:"name,omitempty"` - Value string `protobuf:"bytes,2,opt,name=value,proto3" json:"value,omitempty"` + Name string `protobuf:"bytes,1,opt,name=name,proto3" json:"name,omitempty"` + Value string `protobuf:"bytes,2,opt,name=value,proto3" json:"value,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *Label) Reset() { *m = Label{} } +func (m *Label) String() string { return proto.CompactTextString(m) } +func (*Label) ProtoMessage() {} +func (*Label) Descriptor() ([]byte, []int) { + return fileDescriptor_types_38f70661e771add3, []int{2} +} +func (m *Label) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *Label) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + if deterministic { + return xxx_messageInfo_Label.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalTo(b) + if err != nil { + return nil, err + } + return b[:n], nil + } +} +func (dst *Label) XXX_Merge(src proto.Message) { + xxx_messageInfo_Label.Merge(dst, src) +} +func (m *Label) XXX_Size() int { + return m.Size() +} +func (m *Label) XXX_DiscardUnknown() { + xxx_messageInfo_Label.DiscardUnknown(m) } -func (m *Label) Reset() { *m = Label{} } -func (m *Label) String() string { return proto.CompactTextString(m) } -func (*Label) ProtoMessage() {} -func (*Label) Descriptor() ([]byte, []int) { return fileDescriptorTypes, []int{2} } +var xxx_messageInfo_Label proto.InternalMessageInfo func (m *Label) GetName() string { if m != nil { @@ -114,13 +217,44 @@ func (m *Label) GetValue() string { } type Labels struct { - Labels []Label `protobuf:"bytes,1,rep,name=labels" json:"labels"` + Labels []Label `protobuf:"bytes,1,rep,name=labels,proto3" json:"labels"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` } -func (m *Labels) Reset() { *m = Labels{} } -func (m *Labels) String() string { return proto.CompactTextString(m) } -func (*Labels) ProtoMessage() {} -func (*Labels) Descriptor() ([]byte, []int) { return fileDescriptorTypes, []int{3} } +func (m *Labels) Reset() { *m = Labels{} } +func (m *Labels) String() string { return proto.CompactTextString(m) } +func (*Labels) ProtoMessage() {} +func (*Labels) Descriptor() ([]byte, []int) { + return fileDescriptor_types_38f70661e771add3, []int{3} +} +func (m *Labels) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *Labels) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + if deterministic { + return xxx_messageInfo_Labels.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalTo(b) + if err != nil { + return nil, err + } + return b[:n], nil + } +} +func (dst *Labels) XXX_Merge(src proto.Message) { + xxx_messageInfo_Labels.Merge(dst, src) +} +func (m *Labels) XXX_Size() int { + return m.Size() +} +func (m *Labels) XXX_DiscardUnknown() { + xxx_messageInfo_Labels.DiscardUnknown(m) +} + +var xxx_messageInfo_Labels proto.InternalMessageInfo func (m *Labels) GetLabels() []Label { if m != nil { @@ -131,15 +265,46 @@ func (m *Labels) GetLabels() []Label { // Matcher specifies a rule, which can match or set of labels or not. type LabelMatcher struct { - Type LabelMatcher_Type `protobuf:"varint,1,opt,name=type,proto3,enum=prometheus.LabelMatcher_Type" json:"type,omitempty"` - Name string `protobuf:"bytes,2,opt,name=name,proto3" json:"name,omitempty"` - Value string `protobuf:"bytes,3,opt,name=value,proto3" json:"value,omitempty"` + Type LabelMatcher_Type `protobuf:"varint,1,opt,name=type,proto3,enum=prometheus.LabelMatcher_Type" json:"type,omitempty"` + Name string `protobuf:"bytes,2,opt,name=name,proto3" json:"name,omitempty"` + Value string `protobuf:"bytes,3,opt,name=value,proto3" json:"value,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *LabelMatcher) Reset() { *m = LabelMatcher{} } +func (m *LabelMatcher) String() string { return proto.CompactTextString(m) } +func (*LabelMatcher) ProtoMessage() {} +func (*LabelMatcher) Descriptor() ([]byte, []int) { + return fileDescriptor_types_38f70661e771add3, []int{4} +} +func (m *LabelMatcher) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *LabelMatcher) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + if deterministic { + return xxx_messageInfo_LabelMatcher.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalTo(b) + if err != nil { + return nil, err + } + return b[:n], nil + } +} +func (dst *LabelMatcher) XXX_Merge(src proto.Message) { + xxx_messageInfo_LabelMatcher.Merge(dst, src) +} +func (m *LabelMatcher) XXX_Size() int { + return m.Size() +} +func (m *LabelMatcher) XXX_DiscardUnknown() { + xxx_messageInfo_LabelMatcher.DiscardUnknown(m) } -func (m *LabelMatcher) Reset() { *m = LabelMatcher{} } -func (m *LabelMatcher) String() string { return proto.CompactTextString(m) } -func (*LabelMatcher) ProtoMessage() {} -func (*LabelMatcher) Descriptor() ([]byte, []int) { return fileDescriptorTypes, []int{4} } +var xxx_messageInfo_LabelMatcher proto.InternalMessageInfo func (m *LabelMatcher) GetType() LabelMatcher_Type { if m != nil { @@ -163,16 +328,47 @@ func (m *LabelMatcher) GetValue() string { } type ReadHints struct { - StepMs int64 `protobuf:"varint,1,opt,name=step_ms,json=stepMs,proto3" json:"step_ms,omitempty"` - Func string `protobuf:"bytes,2,opt,name=func,proto3" json:"func,omitempty"` - StartMs int64 `protobuf:"varint,3,opt,name=start_ms,json=startMs,proto3" json:"start_ms,omitempty"` - EndMs int64 `protobuf:"varint,4,opt,name=end_ms,json=endMs,proto3" json:"end_ms,omitempty"` + StepMs int64 `protobuf:"varint,1,opt,name=step_ms,json=stepMs,proto3" json:"step_ms,omitempty"` + Func string `protobuf:"bytes,2,opt,name=func,proto3" json:"func,omitempty"` + StartMs int64 `protobuf:"varint,3,opt,name=start_ms,json=startMs,proto3" json:"start_ms,omitempty"` + EndMs int64 `protobuf:"varint,4,opt,name=end_ms,json=endMs,proto3" json:"end_ms,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *ReadHints) Reset() { *m = ReadHints{} } +func (m *ReadHints) String() string { return proto.CompactTextString(m) } +func (*ReadHints) ProtoMessage() {} +func (*ReadHints) Descriptor() ([]byte, []int) { + return fileDescriptor_types_38f70661e771add3, []int{5} +} +func (m *ReadHints) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *ReadHints) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + if deterministic { + return xxx_messageInfo_ReadHints.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalTo(b) + if err != nil { + return nil, err + } + return b[:n], nil + } +} +func (dst *ReadHints) XXX_Merge(src proto.Message) { + xxx_messageInfo_ReadHints.Merge(dst, src) +} +func (m *ReadHints) XXX_Size() int { + return m.Size() +} +func (m *ReadHints) XXX_DiscardUnknown() { + xxx_messageInfo_ReadHints.DiscardUnknown(m) } -func (m *ReadHints) Reset() { *m = ReadHints{} } -func (m *ReadHints) String() string { return proto.CompactTextString(m) } -func (*ReadHints) ProtoMessage() {} -func (*ReadHints) Descriptor() ([]byte, []int) { return fileDescriptorTypes, []int{5} } +var xxx_messageInfo_ReadHints proto.InternalMessageInfo func (m *ReadHints) GetStepMs() int64 { if m != nil { @@ -229,13 +425,17 @@ func (m *Sample) MarshalTo(dAtA []byte) (int, error) { if m.Value != 0 { dAtA[i] = 0x9 i++ - i = encodeFixed64Types(dAtA, i, uint64(math.Float64bits(float64(m.Value)))) + encoding_binary.LittleEndian.PutUint64(dAtA[i:], uint64(math.Float64bits(float64(m.Value)))) + i += 8 } if m.Timestamp != 0 { dAtA[i] = 0x10 i++ i = encodeVarintTypes(dAtA, i, uint64(m.Timestamp)) } + if m.XXX_unrecognized != nil { + i += copy(dAtA[i:], m.XXX_unrecognized) + } return i, nil } @@ -278,6 +478,9 @@ func (m *TimeSeries) MarshalTo(dAtA []byte) (int, error) { i += n } } + if m.XXX_unrecognized != nil { + i += copy(dAtA[i:], m.XXX_unrecognized) + } return i, nil } @@ -308,6 +511,9 @@ func (m *Label) MarshalTo(dAtA []byte) (int, error) { i = encodeVarintTypes(dAtA, i, uint64(len(m.Value))) i += copy(dAtA[i:], m.Value) } + if m.XXX_unrecognized != nil { + i += copy(dAtA[i:], m.XXX_unrecognized) + } return i, nil } @@ -338,6 +544,9 @@ func (m *Labels) MarshalTo(dAtA []byte) (int, error) { i += n } } + if m.XXX_unrecognized != nil { + i += copy(dAtA[i:], m.XXX_unrecognized) + } return i, nil } @@ -373,6 +582,9 @@ func (m *LabelMatcher) MarshalTo(dAtA []byte) (int, error) { i = encodeVarintTypes(dAtA, i, uint64(len(m.Value))) i += copy(dAtA[i:], m.Value) } + if m.XXX_unrecognized != nil { + i += copy(dAtA[i:], m.XXX_unrecognized) + } return i, nil } @@ -412,27 +624,12 @@ func (m *ReadHints) MarshalTo(dAtA []byte) (int, error) { i++ i = encodeVarintTypes(dAtA, i, uint64(m.EndMs)) } + if m.XXX_unrecognized != nil { + i += copy(dAtA[i:], m.XXX_unrecognized) + } return i, nil } -func encodeFixed64Types(dAtA []byte, offset int, v uint64) int { - dAtA[offset] = uint8(v) - dAtA[offset+1] = uint8(v >> 8) - dAtA[offset+2] = uint8(v >> 16) - dAtA[offset+3] = uint8(v >> 24) - dAtA[offset+4] = uint8(v >> 32) - dAtA[offset+5] = uint8(v >> 40) - dAtA[offset+6] = uint8(v >> 48) - dAtA[offset+7] = uint8(v >> 56) - return offset + 8 -} -func encodeFixed32Types(dAtA []byte, offset int, v uint32) int { - dAtA[offset] = uint8(v) - dAtA[offset+1] = uint8(v >> 8) - dAtA[offset+2] = uint8(v >> 16) - dAtA[offset+3] = uint8(v >> 24) - return offset + 4 -} func encodeVarintTypes(dAtA []byte, offset int, v uint64) int { for v >= 1<<7 { dAtA[offset] = uint8(v&0x7f | 0x80) @@ -443,6 +640,9 @@ func encodeVarintTypes(dAtA []byte, offset int, v uint64) int { return offset + 1 } func (m *Sample) Size() (n int) { + if m == nil { + return 0 + } var l int _ = l if m.Value != 0 { @@ -451,10 +651,16 @@ func (m *Sample) Size() (n int) { if m.Timestamp != 0 { n += 1 + sovTypes(uint64(m.Timestamp)) } + if m.XXX_unrecognized != nil { + n += len(m.XXX_unrecognized) + } return n } func (m *TimeSeries) Size() (n int) { + if m == nil { + return 0 + } var l int _ = l if len(m.Labels) > 0 { @@ -469,10 +675,16 @@ func (m *TimeSeries) Size() (n int) { n += 1 + l + sovTypes(uint64(l)) } } + if m.XXX_unrecognized != nil { + n += len(m.XXX_unrecognized) + } return n } func (m *Label) Size() (n int) { + if m == nil { + return 0 + } var l int _ = l l = len(m.Name) @@ -483,10 +695,16 @@ func (m *Label) Size() (n int) { if l > 0 { n += 1 + l + sovTypes(uint64(l)) } + if m.XXX_unrecognized != nil { + n += len(m.XXX_unrecognized) + } return n } func (m *Labels) Size() (n int) { + if m == nil { + return 0 + } var l int _ = l if len(m.Labels) > 0 { @@ -495,10 +713,16 @@ func (m *Labels) Size() (n int) { n += 1 + l + sovTypes(uint64(l)) } } + if m.XXX_unrecognized != nil { + n += len(m.XXX_unrecognized) + } return n } func (m *LabelMatcher) Size() (n int) { + if m == nil { + return 0 + } var l int _ = l if m.Type != 0 { @@ -512,10 +736,16 @@ func (m *LabelMatcher) Size() (n int) { if l > 0 { n += 1 + l + sovTypes(uint64(l)) } + if m.XXX_unrecognized != nil { + n += len(m.XXX_unrecognized) + } return n } func (m *ReadHints) Size() (n int) { + if m == nil { + return 0 + } var l int _ = l if m.StepMs != 0 { @@ -531,6 +761,9 @@ func (m *ReadHints) Size() (n int) { if m.EndMs != 0 { n += 1 + sovTypes(uint64(m.EndMs)) } + if m.XXX_unrecognized != nil { + n += len(m.XXX_unrecognized) + } return n } @@ -584,15 +817,8 @@ func (m *Sample) Unmarshal(dAtA []byte) error { if (iNdEx + 8) > l { return io.ErrUnexpectedEOF } + v = uint64(encoding_binary.LittleEndian.Uint64(dAtA[iNdEx:])) iNdEx += 8 - v = uint64(dAtA[iNdEx-8]) - v |= uint64(dAtA[iNdEx-7]) << 8 - v |= uint64(dAtA[iNdEx-6]) << 16 - v |= uint64(dAtA[iNdEx-5]) << 24 - v |= uint64(dAtA[iNdEx-4]) << 32 - v |= uint64(dAtA[iNdEx-3]) << 40 - v |= uint64(dAtA[iNdEx-2]) << 48 - v |= uint64(dAtA[iNdEx-1]) << 56 m.Value = float64(math.Float64frombits(v)) case 2: if wireType != 0 { @@ -625,6 +851,7 @@ func (m *Sample) Unmarshal(dAtA []byte) error { if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } + m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...) iNdEx += skippy } } @@ -689,7 +916,7 @@ func (m *TimeSeries) Unmarshal(dAtA []byte) error { if postIndex > l { return io.ErrUnexpectedEOF } - m.Labels = append(m.Labels, &Label{}) + m.Labels = append(m.Labels, Label{}) if err := m.Labels[len(m.Labels)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { return err } @@ -720,7 +947,7 @@ func (m *TimeSeries) Unmarshal(dAtA []byte) error { if postIndex > l { return io.ErrUnexpectedEOF } - m.Samples = append(m.Samples, &Sample{}) + m.Samples = append(m.Samples, Sample{}) if err := m.Samples[len(m.Samples)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { return err } @@ -737,6 +964,7 @@ func (m *TimeSeries) Unmarshal(dAtA []byte) error { if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } + m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...) iNdEx += skippy } } @@ -845,6 +1073,7 @@ func (m *Label) Unmarshal(dAtA []byte) error { if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } + m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...) iNdEx += skippy } } @@ -926,6 +1155,7 @@ func (m *Labels) Unmarshal(dAtA []byte) error { if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } + m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...) iNdEx += skippy } } @@ -1053,6 +1283,7 @@ func (m *LabelMatcher) Unmarshal(dAtA []byte) error { if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } + m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...) iNdEx += skippy } } @@ -1189,6 +1420,7 @@ func (m *ReadHints) Unmarshal(dAtA []byte) error { if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } + m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...) iNdEx += skippy } } @@ -1303,32 +1535,32 @@ var ( ErrIntOverflowTypes = fmt.Errorf("proto: integer overflow") ) -func init() { proto.RegisterFile("types.proto", fileDescriptorTypes) } - -var fileDescriptorTypes = []byte{ - // 380 bytes of a gzipped FileDescriptorProto - 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0x84, 0x92, 0xcd, 0x0a, 0xd3, 0x40, - 0x14, 0x85, 0x3b, 0x49, 0x3a, 0xb1, 0xb7, 0x22, 0x71, 0xa8, 0x18, 0x45, 0x6b, 0xc9, 0x2a, 0x82, - 0xa4, 0xb4, 0xae, 0x04, 0x57, 0x85, 0x80, 0x8b, 0x46, 0xe8, 0xb4, 0x2b, 0x37, 0x32, 0x6d, 0xaf, - 0x6d, 0x24, 0x3f, 0x43, 0x66, 0x2a, 0xf4, 0x41, 0x7c, 0xa7, 0x2e, 0x7d, 0x02, 0x91, 0x3e, 0x89, - 0xcc, 0xa4, 0xb5, 0x05, 0x05, 0x77, 0xf7, 0x9c, 0x7b, 0x2e, 0xe7, 0x4b, 0x18, 0xe8, 0xeb, 0xa3, - 0x44, 0x95, 0xc8, 0xa6, 0xd6, 0x35, 0x03, 0xd9, 0xd4, 0x25, 0xea, 0x3d, 0x1e, 0xd4, 0xf3, 0xc1, - 0xae, 0xde, 0xd5, 0xd6, 0x1e, 0x9b, 0xa9, 0x4d, 0x44, 0xef, 0x81, 0x2e, 0x45, 0x29, 0x0b, 0x64, - 0x03, 0xe8, 0x7e, 0x13, 0xc5, 0x01, 0x43, 0x32, 0x22, 0x31, 0xe1, 0xad, 0x60, 0x2f, 0xa0, 0xa7, - 0xf3, 0x12, 0x95, 0x16, 0xa5, 0x0c, 0x9d, 0x11, 0x89, 0x5d, 0x7e, 0x33, 0x22, 0x04, 0x58, 0xe5, - 0x25, 0x2e, 0xb1, 0xc9, 0x51, 0xb1, 0xd7, 0x40, 0x0b, 0xb1, 0xc6, 0x42, 0x85, 0x64, 0xe4, 0xc6, - 0xfd, 0xe9, 0xe3, 0xe4, 0x56, 0x9f, 0xcc, 0xcd, 0x86, 0x5f, 0x02, 0xec, 0x0d, 0xf8, 0xca, 0xd6, - 0xaa, 0xd0, 0xb1, 0x59, 0x76, 0x9f, 0x6d, 0x89, 0xf8, 0x35, 0x12, 0x4d, 0xa0, 0x6b, 0xcf, 0x19, - 0x03, 0xaf, 0x12, 0x65, 0x8b, 0xd8, 0xe3, 0x76, 0xbe, 0x71, 0x3b, 0xd6, 0x6c, 0x45, 0xf4, 0x0e, - 0xe8, 0xbc, 0xad, 0x1a, 0xff, 0x97, 0x6a, 0xe6, 0x9d, 0x7e, 0xbe, 0xea, 0x5c, 0xd9, 0xa2, 0xef, - 0x04, 0x1e, 0x5a, 0x3f, 0x13, 0x7a, 0xb3, 0xc7, 0x86, 0x4d, 0xc0, 0x33, 0x3f, 0xd5, 0xb6, 0x3e, - 0x9a, 0xbe, 0xfc, 0xeb, 0xfe, 0x92, 0x4b, 0x56, 0x47, 0x89, 0xdc, 0x46, 0xff, 0x80, 0x3a, 0xff, - 0x02, 0x75, 0xef, 0x41, 0x63, 0xf0, 0xcc, 0x1d, 0xa3, 0xe0, 0xa4, 0x8b, 0xa0, 0xc3, 0x7c, 0x70, - 0x3f, 0xa6, 0x8b, 0x80, 0x18, 0x83, 0xa7, 0x81, 0x63, 0x0d, 0x9e, 0x06, 0x6e, 0xf4, 0x15, 0x7a, - 0x1c, 0xc5, 0xf6, 0x43, 0x5e, 0x69, 0xc5, 0x9e, 0x82, 0xaf, 0x34, 0xca, 0xcf, 0xa5, 0xb2, 0x58, - 0x2e, 0xa7, 0x46, 0x66, 0xca, 0x34, 0x7f, 0x39, 0x54, 0x9b, 0x6b, 0xb3, 0x99, 0xd9, 0x33, 0x78, - 0xa0, 0xb4, 0x68, 0xb4, 0x49, 0xbb, 0x36, 0xed, 0x5b, 0x9d, 0x29, 0xf6, 0x04, 0x28, 0x56, 0x5b, - 0xb3, 0xf0, 0xec, 0xa2, 0x8b, 0xd5, 0x36, 0x53, 0xb3, 0xc1, 0xe9, 0x3c, 0x24, 0x3f, 0xce, 0x43, - 0xf2, 0xeb, 0x3c, 0x24, 0x9f, 0xa8, 0xf9, 0x62, 0xb9, 0x5e, 0x53, 0xfb, 0x66, 0xde, 0xfe, 0x0e, - 0x00, 0x00, 0xff, 0xff, 0x32, 0x7f, 0x5e, 0x49, 0x64, 0x02, 0x00, 0x00, +func init() { proto.RegisterFile("types.proto", fileDescriptor_types_38f70661e771add3) } + +var fileDescriptor_types_38f70661e771add3 = []byte{ + // 379 bytes of a gzipped FileDescriptorProto + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0x94, 0x52, 0x4f, 0xef, 0xd2, 0x40, + 0x14, 0x64, 0xdb, 0xb2, 0x95, 0x87, 0x31, 0x75, 0x83, 0xb1, 0x1a, 0x45, 0xd2, 0x53, 0x4f, 0x25, + 0xe0, 0xc9, 0xc4, 0x13, 0x49, 0x13, 0x0f, 0xd4, 0x84, 0x85, 0x93, 0x17, 0xb3, 0xc0, 0x13, 0x6a, + 0xfa, 0x67, 0xed, 0x2e, 0x26, 0x7c, 0x10, 0xbf, 0x13, 0x47, 0x3f, 0x81, 0x31, 0x7c, 0x12, 0xb3, + 0x5b, 0x10, 0x12, 0xbd, 0xfc, 0x6e, 0x6f, 0xe6, 0xcd, 0x74, 0xe6, 0x35, 0x0b, 0x7d, 0x7d, 0x94, + 0xa8, 0x12, 0xd9, 0xd4, 0xba, 0x66, 0x20, 0x9b, 0xba, 0x44, 0xbd, 0xc7, 0x83, 0x7a, 0x39, 0xd8, + 0xd5, 0xbb, 0xda, 0xd2, 0x63, 0x33, 0xb5, 0x8a, 0xe8, 0x3d, 0xd0, 0xa5, 0x28, 0x65, 0x81, 0x6c, + 0x00, 0xdd, 0xef, 0xa2, 0x38, 0x60, 0x48, 0x46, 0x24, 0x26, 0xbc, 0x05, 0xec, 0x15, 0xf4, 0x74, + 0x5e, 0xa2, 0xd2, 0xa2, 0x94, 0xa1, 0x33, 0x22, 0xb1, 0xcb, 0x6f, 0x44, 0xf4, 0x0d, 0x60, 0x95, + 0x97, 0xb8, 0xc4, 0x26, 0x47, 0xc5, 0xc6, 0x40, 0x0b, 0xb1, 0xc6, 0x42, 0x85, 0x64, 0xe4, 0xc6, + 0xfd, 0xe9, 0xd3, 0xe4, 0x16, 0x9f, 0xcc, 0xcd, 0x66, 0xe6, 0x9d, 0x7e, 0xbd, 0xe9, 0xf0, 0x8b, + 0x8c, 0x4d, 0xc1, 0x57, 0x36, 0x5c, 0x85, 0x8e, 0x75, 0xb0, 0x7b, 0x47, 0xdb, 0xeb, 0x62, 0xb9, + 0x0a, 0xa3, 0x09, 0x74, 0xed, 0xa7, 0x18, 0x03, 0xaf, 0x12, 0x65, 0x5b, 0xb7, 0xc7, 0xed, 0x7c, + 0xbb, 0xc1, 0xb1, 0x64, 0x0b, 0xa2, 0x77, 0x40, 0xe7, 0x6d, 0xe0, 0x43, 0x1b, 0x46, 0x3f, 0x08, + 0x3c, 0xb6, 0x7c, 0x26, 0xf4, 0x66, 0x8f, 0x0d, 0x9b, 0x80, 0x67, 0x7e, 0xb0, 0x4d, 0x7d, 0x32, + 0x7d, 0xfd, 0x8f, 0xff, 0xa2, 0x4b, 0x56, 0x47, 0x89, 0xdc, 0x4a, 0xff, 0x16, 0x75, 0xfe, 0x57, + 0xd4, 0xbd, 0x2f, 0x1a, 0x83, 0x67, 0x7c, 0x8c, 0x82, 0x93, 0x2e, 0x82, 0x0e, 0xf3, 0xc1, 0xfd, + 0x98, 0x2e, 0x02, 0x62, 0x08, 0x9e, 0x06, 0x8e, 0x25, 0x78, 0x1a, 0xb8, 0xd1, 0x57, 0xe8, 0x71, + 0x14, 0xdb, 0x0f, 0x79, 0xa5, 0x15, 0x7b, 0x0e, 0xbe, 0xd2, 0x28, 0x3f, 0x97, 0xca, 0xd6, 0x72, + 0x39, 0x35, 0x30, 0x53, 0x26, 0xf9, 0xcb, 0xa1, 0xda, 0x5c, 0x93, 0xcd, 0xcc, 0x5e, 0xc0, 0x23, + 0xa5, 0x45, 0xa3, 0x8d, 0xda, 0xb5, 0x6a, 0xdf, 0xe2, 0x4c, 0xb1, 0x67, 0x40, 0xb1, 0xda, 0x9a, + 0x85, 0x67, 0x17, 0x5d, 0xac, 0xb6, 0x99, 0x9a, 0x0d, 0x4e, 0xe7, 0x21, 0xf9, 0x79, 0x1e, 0x92, + 0xdf, 0xe7, 0x21, 0xf9, 0x44, 0xcd, 0xc5, 0x72, 0xbd, 0xa6, 0xf6, 0xfd, 0xbc, 0xfd, 0x13, 0x00, + 0x00, 0xff, 0xff, 0xe3, 0x8a, 0x88, 0x84, 0x70, 0x02, 0x00, 0x00, } diff --git a/vendor/github.com/prometheus/prometheus/prompb/types.proto b/vendor/github.com/prometheus/prometheus/prompb/types.proto index c453045..6fc84c4 100644 --- a/vendor/github.com/prometheus/prometheus/prompb/types.proto +++ b/vendor/github.com/prometheus/prometheus/prompb/types.proto @@ -24,8 +24,8 @@ message Sample { } message TimeSeries { - repeated Label labels = 1; - repeated Sample samples = 2; + repeated Label labels = 1 [(gogoproto.nullable) = false]; + repeated Sample samples = 2 [(gogoproto.nullable) = false]; } message Label { diff --git a/vendor/github.com/prometheus/prometheus/storage/buffer.go b/vendor/github.com/prometheus/prometheus/storage/buffer.go index 55a0c60..1b1d09b 100644 --- a/vendor/github.com/prometheus/prometheus/storage/buffer.go +++ b/vendor/github.com/prometheus/prometheus/storage/buffer.go @@ -59,11 +59,7 @@ func (b *BufferedSeriesIterator) Reset(it SeriesIterator) { // ReduceDelta lowers the buffered time delta, for the current SeriesIterator only. func (b *BufferedSeriesIterator) ReduceDelta(delta int64) bool { - if delta > b.buf.delta { - return false - } - b.buf.delta = delta - return true + return b.buf.reduceDelta(delta) } // PeekBack returns the nth previous element of the iterator. If there is none buffered, @@ -83,7 +79,7 @@ func (b *BufferedSeriesIterator) Seek(t int64) bool { t0 := t - b.buf.delta // If the delta would cause us to seek backwards, preserve the buffer - // and just continue regular advancment while filling the buffer on the way. + // and just continue regular advancement while filling the buffer on the way. if t0 > b.lastTime { b.buf.reset() @@ -162,7 +158,7 @@ func (r *sampleRing) reset() { r.f = 0 } -// Returns the current iterator. Invalidates previously retuned iterators. +// Returns the current iterator. Invalidates previously returned iterators. func (r *sampleRing) iterator() SeriesIterator { r.it.r = r r.it.i = -1 @@ -222,13 +218,39 @@ func (r *sampleRing) add(t int64, v float64) { r.l++ // Free head of the buffer of samples that just fell out of the range. - for r.buf[r.f].t < t-r.delta { + tmin := t - r.delta + for r.buf[r.f].t < tmin { + r.f++ + if r.f >= l { + r.f -= l + } + r.l-- + } +} + +// reduceDelta lowers the buffered time delta, dropping any samples that are +// out of the new delta range. +func (r *sampleRing) reduceDelta(delta int64) bool { + if delta > r.delta { + return false + } + r.delta = delta + + if r.l == 0 { + return true + } + + // Free head of the buffer of samples that just fell out of the range. + l := len(r.buf) + tmin := r.buf[r.i].t - delta + for r.buf[r.f].t < tmin { r.f++ if r.f >= l { r.f -= l } r.l-- } + return true } // nthLast returns the nth most recent element added to the ring. diff --git a/vendor/github.com/prometheus/prometheus/storage/fanout.go b/vendor/github.com/prometheus/prometheus/storage/fanout.go index 3ab9943..81ae99d 100644 --- a/vendor/github.com/prometheus/prometheus/storage/fanout.go +++ b/vendor/github.com/prometheus/prometheus/storage/fanout.go @@ -16,10 +16,12 @@ package storage import ( "container/heap" "context" + "sort" "strings" "github.com/go-kit/kit/log" "github.com/go-kit/kit/log/level" + "github.com/pkg/errors" "github.com/prometheus/common/model" "github.com/prometheus/prometheus/pkg/labels" ) @@ -66,23 +68,23 @@ func (f *fanout) Querier(ctx context.Context, mint, maxt int64) (Querier, error) queriers := make([]Querier, 0, 1+len(f.secondaries)) // Add primary querier - querier, err := f.primary.Querier(ctx, mint, maxt) + primaryQuerier, err := f.primary.Querier(ctx, mint, maxt) if err != nil { return nil, err } - queriers = append(queriers, querier) + queriers = append(queriers, primaryQuerier) // Add secondary queriers for _, storage := range f.secondaries { querier, err := storage.Querier(ctx, mint, maxt) if err != nil { - NewMergeQuerier(queriers).Close() + NewMergeQuerier(primaryQuerier, queriers).Close() return nil, err } queriers = append(queriers, querier) } - return NewMergeQuerier(queriers), nil + return NewMergeQuerier(primaryQuerier, queriers), nil } func (f *fanout) Appender() (Appender, error) { @@ -188,14 +190,18 @@ func (f *fanoutAppender) Rollback() (err error) { // mergeQuerier implements Querier. type mergeQuerier struct { - queriers []Querier + primaryQuerier Querier + queriers []Querier + + failedQueriers map[Querier]struct{} + setQuerierMap map[SeriesSet]Querier } // NewMergeQuerier returns a new Querier that merges results of input queriers. // NB NewMergeQuerier will return NoopQuerier if no queriers are passed to it, // and will filter NoopQueriers from its arguments, in order to reduce overhead // when only one querier is passed. -func NewMergeQuerier(queriers []Querier) Querier { +func NewMergeQuerier(primaryQuerier Querier, queriers []Querier) Querier { filtered := make([]Querier, 0, len(queriers)) for _, querier := range queriers { if querier != NoopQuerier() { @@ -203,6 +209,9 @@ func NewMergeQuerier(queriers []Querier) Querier { } } + setQuerierMap := make(map[SeriesSet]Querier) + failedQueriers := make(map[Querier]struct{}) + switch len(filtered) { case 0: return NoopQuerier() @@ -210,22 +219,37 @@ func NewMergeQuerier(queriers []Querier) Querier { return filtered[0] default: return &mergeQuerier{ - queriers: filtered, + primaryQuerier: primaryQuerier, + queriers: filtered, + failedQueriers: failedQueriers, + setQuerierMap: setQuerierMap, } } } // Select returns a set of series that matches the given label matchers. -func (q *mergeQuerier) Select(params *SelectParams, matchers ...*labels.Matcher) (SeriesSet, error) { +func (q *mergeQuerier) Select(params *SelectParams, matchers ...*labels.Matcher) (SeriesSet, Warnings, error) { seriesSets := make([]SeriesSet, 0, len(q.queriers)) + var warnings Warnings for _, querier := range q.queriers { - set, err := querier.Select(params, matchers...) + set, wrn, err := querier.Select(params, matchers...) + q.setQuerierMap[set] = querier + if wrn != nil { + warnings = append(warnings, wrn...) + } if err != nil { - return nil, err + q.failedQueriers[querier] = struct{}{} + // If the error source isn't the primary querier, return the error as a warning and continue. + if querier != q.primaryQuerier { + warnings = append(warnings, err) + continue + } else { + return nil, nil, err + } } seriesSets = append(seriesSets, set) } - return NewMergeSeriesSet(seriesSets), nil + return NewMergeSeriesSet(seriesSets, q), warnings, nil } // LabelValues returns all potential values for a label name. @@ -241,6 +265,11 @@ func (q *mergeQuerier) LabelValues(name string) ([]string, error) { return mergeStringSlices(results), nil } +func (q *mergeQuerier) IsFailedSet(set SeriesSet) bool { + _, isFailedQuerier := q.failedQueriers[q.setQuerierMap[set]] + return isFailedQuerier +} + func mergeStringSlices(ss [][]string) []string { switch len(ss) { case 0: @@ -280,6 +309,28 @@ func mergeTwoStringSlices(a, b []string) []string { return result } +// LabelNames returns all the unique label names present in the block in sorted order. +func (q *mergeQuerier) LabelNames() ([]string, error) { + labelNamesMap := make(map[string]struct{}) + for _, b := range q.queriers { + names, err := b.LabelNames() + if err != nil { + return nil, errors.Wrap(err, "LabelNames() from Querier") + } + for _, name := range names { + labelNamesMap[name] = struct{}{} + } + } + + labelNames := make([]string, 0, len(labelNamesMap)) + for name := range labelNamesMap { + labelNames = append(labelNames, name) + } + sort.Strings(labelNames) + + return labelNames, nil +} + // Close releases the resources of the Querier. func (q *mergeQuerier) Close() error { // TODO return multiple errors? @@ -298,11 +349,13 @@ type mergeSeriesSet struct { currentSets []SeriesSet heap seriesSetHeap sets []SeriesSet + + querier *mergeQuerier } // NewMergeSeriesSet returns a new series set that merges (deduplicates) // series returned by the input series sets when iterating. -func NewMergeSeriesSet(sets []SeriesSet) SeriesSet { +func NewMergeSeriesSet(sets []SeriesSet, querier *mergeQuerier) SeriesSet { if len(sets) == 1 { return sets[0] } @@ -311,34 +364,53 @@ func NewMergeSeriesSet(sets []SeriesSet) SeriesSet { // series under the cursor. var h seriesSetHeap for _, set := range sets { + if set == nil { + continue + } if set.Next() { heap.Push(&h, set) } } return &mergeSeriesSet{ - heap: h, - sets: sets, + heap: h, + sets: sets, + querier: querier, } } func (c *mergeSeriesSet) Next() bool { - // Firstly advance all the current series sets. If any of them have run out - // we can drop them, otherwise they should be inserted back into the heap. - for _, set := range c.currentSets { - if set.Next() { - heap.Push(&c.heap, set) + // Run in a loop because the "next" series sets may not be valid anymore. + // If a remote querier fails, we discard all series sets from that querier. + // If, for the current label set, all the next series sets come from + // failed remote storage sources, we want to keep trying with the next label set. + for { + // Firstly advance all the current series sets. If any of them have run out + // we can drop them, otherwise they should be inserted back into the heap. + for _, set := range c.currentSets { + if set.Next() { + heap.Push(&c.heap, set) + } + } + if len(c.heap) == 0 { + return false } - } - if len(c.heap) == 0 { - return false - } - // Now, pop items of the heap that have equal label sets. - c.currentSets = nil - c.currentLabels = c.heap[0].At().Labels() - for len(c.heap) > 0 && labels.Equal(c.currentLabels, c.heap[0].At().Labels()) { - set := heap.Pop(&c.heap).(SeriesSet) - c.currentSets = append(c.currentSets, set) + // Now, pop items of the heap that have equal label sets. + c.currentSets = nil + c.currentLabels = c.heap[0].At().Labels() + for len(c.heap) > 0 && labels.Equal(c.currentLabels, c.heap[0].At().Labels()) { + set := heap.Pop(&c.heap).(SeriesSet) + if c.querier != nil && c.querier.IsFailedSet(set) { + continue + } + c.currentSets = append(c.currentSets, set) + } + + // As long as the current set contains at least 1 set, + // then it should return true. + if len(c.currentSets) != 0 { + break + } } return true } diff --git a/vendor/github.com/prometheus/prometheus/storage/interface.go b/vendor/github.com/prometheus/prometheus/storage/interface.go index 1d120cd..fea1c91 100644 --- a/vendor/github.com/prometheus/prometheus/storage/interface.go +++ b/vendor/github.com/prometheus/prometheus/storage/interface.go @@ -52,11 +52,14 @@ type Queryable interface { // Querier provides reading access to time series data. type Querier interface { // Select returns a set of series that matches the given label matchers. - Select(*SelectParams, ...*labels.Matcher) (SeriesSet, error) + Select(*SelectParams, ...*labels.Matcher) (SeriesSet, Warnings, error) // LabelValues returns all potential values for a label name. LabelValues(name string) ([]string, error) + // LabelNames returns all the unique label names present in the block in sorted order. + LabelNames() ([]string, error) + // Close releases the resources of the Querier. Close() error } @@ -119,3 +122,5 @@ type SeriesIterator interface { // Err returns the current error. Err() error } + +type Warnings []error diff --git a/vendor/github.com/prometheus/prometheus/storage/noop.go b/vendor/github.com/prometheus/prometheus/storage/noop.go index 7bf92db..1c1fed4 100644 --- a/vendor/github.com/prometheus/prometheus/storage/noop.go +++ b/vendor/github.com/prometheus/prometheus/storage/noop.go @@ -26,14 +26,18 @@ func NoopQuerier() Querier { return noopQuerier{} } -func (noopQuerier) Select(*SelectParams, ...*labels.Matcher) (SeriesSet, error) { - return NoopSeriesSet(), nil +func (noopQuerier) Select(*SelectParams, ...*labels.Matcher) (SeriesSet, Warnings, error) { + return NoopSeriesSet(), nil, nil } func (noopQuerier) LabelValues(name string) ([]string, error) { return nil, nil } +func (noopQuerier) LabelNames() ([]string, error) { + return nil, nil +} + func (noopQuerier) Close() error { return nil } diff --git a/vendor/github.com/prometheus/prometheus/storage/tsdb/tsdb.go b/vendor/github.com/prometheus/prometheus/storage/tsdb/tsdb.go index 92e8cad..4b45dd4 100644 --- a/vendor/github.com/prometheus/prometheus/storage/tsdb/tsdb.go +++ b/vendor/github.com/prometheus/prometheus/storage/tsdb/tsdb.go @@ -19,6 +19,7 @@ import ( "time" "unsafe" + "github.com/alecthomas/units" "github.com/go-kit/kit/log" "github.com/pkg/errors" "github.com/prometheus/client_golang/prometheus" @@ -107,9 +108,6 @@ type adapter struct { // Options of the DB storage. type Options struct { - // The interval at which the write ahead log is flushed to disc. - WALFlushInterval time.Duration - // The timestamp range of head blocks after which they get persisted. // It's the minimum duration of any persisted block. MinBlockDuration model.Duration @@ -117,13 +115,59 @@ type Options struct { // The maximum timestamp range of compacted blocks. MaxBlockDuration model.Duration + // The maximum size of each WAL segment file. + WALSegmentSize units.Base2Bytes + // Duration for how long to retain data. - Retention model.Duration + RetentionDuration model.Duration + + // Maximum number of bytes to be retained. + MaxBytes units.Base2Bytes // Disable creation and consideration of lockfile. NoLockfile bool } +var ( + startTime prometheus.GaugeFunc + headMaxTime prometheus.GaugeFunc + headMinTime prometheus.GaugeFunc +) + +func registerMetrics(db *tsdb.DB, r prometheus.Registerer) { + + startTime = prometheus.NewGaugeFunc(prometheus.GaugeOpts{ + Name: "prometheus_tsdb_lowest_timestamp_seconds", + Help: "Lowest timestamp value stored in the database.", + }, func() float64 { + bb := db.Blocks() + if len(bb) == 0 { + return float64(db.Head().MinTime()) / 1000 + } + return float64(db.Blocks()[0].Meta().MinTime) / 1000 + }) + headMinTime = prometheus.NewGaugeFunc(prometheus.GaugeOpts{ + Name: "prometheus_tsdb_head_min_time_seconds", + Help: "Minimum time bound of the head block.", + }, func() float64 { + return float64(db.Head().MinTime()) / 1000 + }) + headMaxTime = prometheus.NewGaugeFunc(prometheus.GaugeOpts{ + Name: "prometheus_tsdb_head_max_time_seconds", + Help: "Maximum timestamp of the head block.", + }, func() float64 { + return float64(db.Head().MaxTime()) / 1000 + }) + + if r != nil { + r.MustRegister( + startTime, + headMaxTime, + headMinTime, + ) + } +} + // Open returns a new storage backed by a TSDB database that is configured for Prometheus. func Open(path string, l log.Logger, r prometheus.Registerer, opts *Options) (*tsdb.DB, error) { if opts.MinBlockDuration > opts.MaxBlockDuration { @@ -141,14 +185,17 @@ func Open(path string, l log.Logger, r prometheus.Registerer, opts *Options) (*t } db, err := tsdb.Open(path, l, r, &tsdb.Options{ - WALFlushInterval: 10 * time.Second, - RetentionDuration: uint64(time.Duration(opts.Retention).Seconds() * 1000), + WALSegmentSize: int(opts.WALSegmentSize), + RetentionDuration: uint64(time.Duration(opts.RetentionDuration).Seconds() * 1000), + MaxBytes: int64(opts.MaxBytes), BlockRanges: rngs, NoLockfile: opts.NoLockfile, }) if err != nil { return nil, err } + registerMetrics(db, r) + return db, nil } @@ -188,7 +235,7 @@ type querier struct { q tsdb.Querier } -func (q querier) Select(_ *storage.SelectParams, oms ...*labels.Matcher) (storage.SeriesSet, error) { +func (q querier) Select(_ *storage.SelectParams, oms ...*labels.Matcher) (storage.SeriesSet, storage.Warnings, error) { ms := make([]tsdbLabels.Matcher, 0, len(oms)) for _, om := range oms { @@ -196,12 +243,13 @@ func (q querier) Select(_ *storage.SelectParams, oms ...*labels.Matcher) (storag } set, err := q.q.Select(ms...) if err != nil { - return nil, err + return nil, nil, err } - return seriesSet{set: set}, nil + return seriesSet{set: set}, nil, nil } func (q querier) LabelValues(name string) ([]string, error) { return q.q.LabelValues(name) } +func (q querier) LabelNames() ([]string, error) { return q.q.LabelNames() } func (q querier) Close() error { return q.q.Close() } type seriesSet struct { diff --git a/vendor/github.com/prometheus/tsdb/.travis.yml b/vendor/github.com/prometheus/tsdb/.travis.yml index d62e488..c5012a3 100644 --- a/vendor/github.com/prometheus/tsdb/.travis.yml +++ b/vendor/github.com/prometheus/tsdb/.travis.yml @@ -1,13 +1,24 @@ -sudo: false - +# sudo is enabled because it provides more memory which was needed to run go test -race +sudo: required +dist: trusty language: go +os: + - windows + - linux + - osx go: -- 1.9.x + - 1.10.x + - 1.11.x go_import_path: github.com/prometheus/tsdb -script: -- go test -timeout 5m ./... +before_install: + - if [[ "$TRAVIS_OS_NAME" == "windows" ]]; then choco install make; fi +install: + - make deps +script: + # `staticcheck` target is omitted due to linting errors + - if [[ "$TRAVIS_OS_NAME" == "windows" ]]; then make test; else make; fi diff --git a/vendor/github.com/prometheus/tsdb/README.md b/vendor/github.com/prometheus/tsdb/README.md index 7b187c0..4393d51 100644 --- a/vendor/github.com/prometheus/tsdb/README.md +++ b/vendor/github.com/prometheus/tsdb/README.md @@ -1,7 +1,14 @@ -# TSDB +# TSDB [![Build Status](https://travis-ci.org/prometheus/tsdb.svg?branch=master)](https://travis-ci.org/prometheus/tsdb) -This repository contains the new Prometheus storage layer that will be used in its 2.0 release. +[![GoDoc](https://godoc.org/github.com/prometheus/tsdb?status.svg)](https://godoc.org/github.com/prometheus/tsdb) +[![Go Report Card](https://goreportcard.com/badge/github.com/prometheus/tsdb)](https://goreportcard.com/report/github.com/prometheus/tsdb) + +This repository contains the Prometheus storage layer that is used in its 2.x releases. A writeup of its design can be found [here](https://fabxc.org/blog/2017-04-10-writing-a-tsdb/). +Based on the Gorilla TSDB [white papers](http://www.vldb.org/pvldb/vol8/p1816-teller.pdf). + +Video: [Storing 16 Bytes at Scale](https://youtu.be/b_pEevMAC3I) from [PromCon 2017](https://promcon.io/2017-munich/). + See also the [format documentation](docs/format/README.md). diff --git a/vendor/github.com/prometheus/tsdb/block.go b/vendor/github.com/prometheus/tsdb/block.go index e3760df..42e11d9 100644 --- a/vendor/github.com/prometheus/tsdb/block.go +++ b/vendor/github.com/prometheus/tsdb/block.go @@ -21,6 +21,8 @@ import ( "path/filepath" "sync" + "github.com/go-kit/kit/log" + "github.com/go-kit/kit/log/level" "github.com/oklog/ulid" "github.com/pkg/errors" "github.com/prometheus/tsdb/chunkenc" @@ -82,8 +84,12 @@ type IndexReader interface { Series(ref uint64, lset *labels.Labels, chks *[]chunks.Meta) error // LabelIndices returns a list of string tuples for which a label value index exists. + // NOTE: This is deprecated. Use `LabelNames()` instead. LabelIndices() ([][]string, error) + // LabelNames returns all the unique label names present in the index in sorted order. + LabelNames() ([]string, error) + // Close releases the underlying resources of the reader. Close() error } @@ -136,6 +142,12 @@ type Appendable interface { Appender() Appender } +// SizeReader returns the size of the object in bytes. +type SizeReader interface { + // Size returns the size in bytes. + Size() int64 +} + // BlockMeta provides meta information about a block. type BlockMeta struct { // Unique identifier for the block and its contents. Changes on compaction. @@ -162,6 +174,14 @@ type BlockStats struct { NumSeries uint64 `json:"numSeries,omitempty"` NumChunks uint64 `json:"numChunks,omitempty"` NumTombstones uint64 `json:"numTombstones,omitempty"` + NumBytes int64 `json:"numBytes,omitempty"` +} + +// BlockDesc describes a block by ULID and time range. +type BlockDesc struct { + ULID ulid.ULID `json:"ulid"` + MinTime int64 `json:"minTime"` + MaxTime int64 `json:"maxTime"` } // BlockMetaCompaction holds information about compactions a block went through. @@ -171,19 +191,19 @@ type BlockMetaCompaction struct { Level int `json:"level"` // ULIDs of all source head blocks that went into the block. Sources []ulid.ULID `json:"sources,omitempty"` + // Indicates that during compaction it resulted in a block without any samples + // so it should be deleted on the next reload. + Deletable bool `json:"deletable,omitempty"` + // Short descriptions of the direct blocks that were used to create + // this block. + Parents []BlockDesc `json:"parents,omitempty"` Failed bool `json:"failed,omitempty"` } -const ( - flagNone = 0 - flagStd = 1 -) - const indexFilename = "index" const metaFilename = "meta.json" func chunkDir(dir string) string { return filepath.Join(dir, "chunks") } -func walDir(dir string) string { return filepath.Join(dir, "wal") } func readMetaFile(dir string) (*BlockMeta, error) { b, err := ioutil.ReadFile(filepath.Join(dir, metaFilename)) @@ -238,6 +258,10 @@ type Block struct { dir string meta BlockMeta + // Symbol Table Size in bytes. + // We maintain this variable to avoid recalculation everytime. + symbolTableSize uint64 + chunkr ChunkReader indexr IndexReader tombstones TombstoneReader @@ -245,7 +269,10 @@ type Block struct { // OpenBlock opens the block in the directory. It can be passed a chunk pool, which is used // to instantiate chunk structs. -func OpenBlock(dir string, pool chunkenc.Pool) (*Block, error) { +func OpenBlock(logger log.Logger, dir string, pool chunkenc.Pool) (*Block, error) { + if logger == nil { + logger = log.NewNopLogger() + } meta, err := readMetaFile(dir) if err != nil { return nil, err @@ -260,21 +287,41 @@ func OpenBlock(dir string, pool chunkenc.Pool) (*Block, error) { return nil, err } - tr, err := readTombstones(dir) + tr, tsr, err := readTombstones(dir) if err != nil { return nil, err } + // TODO refactor to set this at block creation time as + // that would be the logical place for a block size to be calculated. + bs := blockSize(cr, ir, tsr) + meta.Stats.NumBytes = bs + err = writeMetaFile(dir, meta) + if err != nil { + level.Warn(logger).Log("msg", "couldn't write the meta file for the block size", "block", dir, "err", err) + } + pb := &Block{ - dir: dir, - meta: *meta, - chunkr: cr, - indexr: ir, - tombstones: tr, + dir: dir, + meta: *meta, + chunkr: cr, + indexr: ir, + tombstones: tr, + symbolTableSize: ir.SymbolTableSize(), } return pb, nil } +func blockSize(rr ...SizeReader) int64 { + var total int64 + for _, r := range rr { + if r != nil { + total += r.Size() + } + } + return total +} + // Close closes the on-disk block. It blocks as long as there are readers reading from the block. func (pb *Block) Close() error { pb.mtx.Lock() @@ -302,6 +349,9 @@ func (pb *Block) Dir() string { return pb.dir } // Meta returns meta information about the block. func (pb *Block) Meta() BlockMeta { return pb.meta } +// Size returns the number of bytes that the block takes up. +func (pb *Block) Size() int64 { return pb.meta.Stats.NumBytes } + // ErrClosing is returned when a block is in the process of being closed. var ErrClosing = errors.New("block is closing") @@ -340,6 +390,11 @@ func (pb *Block) Tombstones() (TombstoneReader, error) { return blockTombstoneReader{TombstoneReader: pb.tombstones, b: pb}, nil } +// GetSymbolTableSize returns the Symbol Table Size in the index of this block. +func (pb *Block) GetSymbolTableSize() uint64 { + return pb.symbolTableSize +} + func (pb *Block) setCompactionFailed() error { pb.meta.Compaction.Failed = true return writeMetaFile(pb.dir, &pb.meta) @@ -382,6 +437,10 @@ func (r blockIndexReader) LabelIndices() ([][]string, error) { return ss, errors.Wrapf(err, "block: %s", r.b.Meta().ULID) } +func (r blockIndexReader) LabelNames() ([]string, error) { + return r.b.LabelNames() +} + func (r blockIndexReader) Close() error { r.b.pendingReaders.Done() return nil @@ -424,7 +483,7 @@ func (pb *Block) Delete(mint, maxt int64, ms ...labels.Matcher) error { ir := pb.indexr // Choose only valid postings which have chunks in the time-range. - stones := memTombstones{} + stones := newMemTombstones() var lset labels.Labels var chks []chunks.Meta @@ -437,10 +496,10 @@ Outer: } for _, chk := range chks { - if intervalOverlap(mint, maxt, chk.MinTime, chk.MaxTime) { + if chk.OverlapsClosedInterval(mint, maxt) { // Delete only until the current values and not beyond. tmin, tmax := clampInterval(mint, maxt, chks[0].MinTime, chks[len(chks)-1].MaxTime) - stones[p.At()] = Intervals{{tmin, tmax}} + stones.addInterval(p.At(), Interval{tmin, tmax}) continue Outer } } @@ -452,8 +511,7 @@ Outer: err = pb.tombstones.Iter(func(id uint64, ivs Intervals) error { for _, iv := range ivs { - stones.add(id, iv) - pb.meta.Stats.NumTombstones++ + stones.addInterval(id, iv) } return nil }) @@ -461,6 +519,7 @@ Outer: return err } pb.tombstones = stones + pb.meta.Stats.NumTombstones = pb.tombstones.Total() if err := writeTombstoneFile(pb.dir, pb.tombstones); err != nil { return err @@ -468,26 +527,28 @@ Outer: return writeMetaFile(pb.dir, &pb.meta) } -// CleanTombstones will rewrite the block if there any tombstones to remove them -// and returns if there was a re-write. -func (pb *Block) CleanTombstones(dest string, c Compactor) (bool, error) { +// CleanTombstones will remove the tombstones and rewrite the block (only if there are any tombstones). +// If there was a rewrite, then it returns the ULID of the new block written, else nil. +func (pb *Block) CleanTombstones(dest string, c Compactor) (*ulid.ULID, error) { numStones := 0 - pb.tombstones.Iter(func(id uint64, ivs Intervals) error { + if err := pb.tombstones.Iter(func(id uint64, ivs Intervals) error { numStones += len(ivs) - return nil - }) - + }); err != nil { + // This should never happen, as the iteration function only returns nil. + panic(err) + } if numStones == 0 { - return false, nil + return nil, nil } - if _, err := c.Write(dest, pb, pb.meta.MinTime, pb.meta.MaxTime); err != nil { - return false, err + meta := pb.Meta() + uid, err := c.Write(dest, pb, pb.meta.MinTime, pb.meta.MaxTime, &meta) + if err != nil { + return nil, err } - - return true, nil + return &uid, nil } // Snapshot creates snapshot of the block into dir. @@ -530,6 +591,18 @@ func (pb *Block) Snapshot(dir string) error { return nil } +// OverlapsClosedInterval returns true if the block overlaps [mint, maxt]. +func (pb *Block) OverlapsClosedInterval(mint, maxt int64) bool { + // The block itself is a half-open interval + // [pb.meta.MinTime, pb.meta.MaxTime). + return pb.meta.MinTime <= maxt && mint < pb.meta.MaxTime +} + +// LabelNames returns all the unique label names present in the Block in sorted order. +func (pb *Block) LabelNames() ([]string, error) { + return pb.indexr.LabelNames() +} + func clampInterval(a, b, mint, maxt int64) (int64, int64) { if a < mint { a = mint diff --git a/vendor/github.com/prometheus/tsdb/chunkenc/bstream.go b/vendor/github.com/prometheus/tsdb/chunkenc/bstream.go index a352f07..ef04d44 100644 --- a/vendor/github.com/prometheus/tsdb/chunkenc/bstream.go +++ b/vendor/github.com/prometheus/tsdb/chunkenc/bstream.go @@ -49,8 +49,8 @@ type bstream struct { count uint8 // how many bits are valid in current byte } -func newBReader(b []byte) *bstream { - return &bstream{stream: b, count: 8} +func newBReader(b []byte) bstream { + return bstream{stream: b, count: 8} } func newBWriter(size int) *bstream { diff --git a/vendor/github.com/prometheus/tsdb/chunkenc/xor.go b/vendor/github.com/prometheus/tsdb/chunkenc/xor.go index 0f14cf7..77cc320 100644 --- a/vendor/github.com/prometheus/tsdb/chunkenc/xor.go +++ b/vendor/github.com/prometheus/tsdb/chunkenc/xor.go @@ -13,7 +13,7 @@ // The code in this file was largely written by Damian Gryski as part of // https://github.com/dgryski/go-tsz and published under the license below. -// It was modified to accomodate reading from byte slices without modifying +// It was modified to accommodate reading from byte slices without modifying // the underlying bytes, which would panic when reading from mmaped // read-only byte slices. @@ -221,7 +221,7 @@ func (a *xorAppender) writeVDelta(v float64) { } type xorIterator struct { - br *bstream + br bstream numTotal uint16 numRead uint16 @@ -249,7 +249,7 @@ func (it *xorIterator) Next() bool { } if it.numRead == 0 { - t, err := binary.ReadVarint(it.br) + t, err := binary.ReadVarint(&it.br) if err != nil { it.err = err return false @@ -266,7 +266,7 @@ func (it *xorIterator) Next() bool { return true } if it.numRead == 1 { - tDelta, err := binary.ReadUvarint(it.br) + tDelta, err := binary.ReadUvarint(&it.br) if err != nil { it.err = err return false diff --git a/vendor/github.com/prometheus/tsdb/chunks/chunks.go b/vendor/github.com/prometheus/tsdb/chunks/chunks.go index 9c80767..f35ad2c 100644 --- a/vendor/github.com/prometheus/tsdb/chunks/chunks.go +++ b/vendor/github.com/prometheus/tsdb/chunks/chunks.go @@ -57,6 +57,12 @@ func (cm *Meta) writeHash(h hash.Hash) error { return nil } +// Returns true if the chunk overlaps [mint, maxt]. +func (cm *Meta) OverlapsClosedInterval(mint, maxt int64) bool { + // The chunk itself is a closed interval [cm.MinTime, cm.MaxTime]. + return cm.MinTime <= maxt && mint <= cm.MaxTime +} + var ( errInvalidSize = fmt.Errorf("invalid size") errInvalidFlag = fmt.Errorf("invalid flag") @@ -199,6 +205,7 @@ func (w *Writer) WriteChunks(chks ...Meta) error { for _, c := range chks { maxLen += binary.MaxVarintLen32 + 1 // The number of bytes in the chunk and its encoding. maxLen += int64(len(c.Chunk.Bytes())) + maxLen += 4 // The 4 bytes of crc32 } newsz := w.n + maxLen @@ -278,17 +285,15 @@ func (b realByteSlice) Sub(start, end int) ByteSlice { // Reader implements a SeriesReader for a serialized byte stream // of series data. type Reader struct { - // The underlying bytes holding the encoded series data. - bs []ByteSlice - - // Closers for resources behind the byte slices. - cs []io.Closer - + bs []ByteSlice // The underlying bytes holding the encoded series data. + cs []io.Closer // Closers for resources behind the byte slices. + size int64 // The total size of bytes in the reader. pool chunkenc.Pool } func newReader(bs []ByteSlice, cs []io.Closer, pool chunkenc.Pool) (*Reader, error) { cr := Reader{pool: pool, bs: bs, cs: cs} + var totalSize int64 for i, b := range cr.bs { if b.Len() < 4 { @@ -296,9 +301,11 @@ func newReader(bs []ByteSlice, cs []io.Closer, pool chunkenc.Pool) (*Reader, err } // Verify magic number. if m := binary.BigEndian.Uint32(b.Range(0, 4)); m != MagicChunks { - return nil, fmt.Errorf("invalid magic number %x", m) + return nil, errors.Errorf("invalid magic number %x", m) } + totalSize += int64(b.Len()) } + cr.size = totalSize return &cr, nil } @@ -321,9 +328,10 @@ func NewDirReader(dir string, pool chunkenc.Pool) (*Reader, error) { pool = chunkenc.NewPool() } - var bs []ByteSlice - var cs []io.Closer - + var ( + bs []ByteSlice + cs []io.Closer + ) for _, fn := range files { f, err := fileutil.OpenMmapFile(fn) if err != nil { @@ -339,6 +347,11 @@ func (s *Reader) Close() error { return closeAll(s.cs...) } +// Size returns the size of the chunks. +func (s *Reader) Size() int64 { + return s.size +} + func (s *Reader) Chunk(ref uint64) (chunkenc.Chunk, error) { var ( seq = int(ref >> 32) @@ -357,10 +370,10 @@ func (s *Reader) Chunk(ref uint64) (chunkenc.Chunk, error) { r := b.Range(off, off+binary.MaxVarintLen32) l, n := binary.Uvarint(r) - if n < 0 { - return nil, fmt.Errorf("reading chunk length failed") + if n <= 0 { + return nil, errors.Errorf("reading chunk length failed with %d", n) } - r = b.Range(off+n, off+n+int(l)) + r = b.Range(off+n, off+n+1+int(l)) return s.pool.Get(chunkenc.Encoding(r[0]), r[1:1+l]) } diff --git a/vendor/github.com/prometheus/tsdb/compact.go b/vendor/github.com/prometheus/tsdb/compact.go index 16a3bd7..0358a80 100644 --- a/vendor/github.com/prometheus/tsdb/compact.go +++ b/vendor/github.com/prometheus/tsdb/compact.go @@ -55,11 +55,18 @@ type Compactor interface { Plan(dir string) ([]string, error) // Write persists a Block into a directory. - Write(dest string, b BlockReader, mint, maxt int64) (ulid.ULID, error) + // No Block is written when resulting Block has 0 samples, and returns empty ulid.ULID{}. + Write(dest string, b BlockReader, mint, maxt int64, parent *BlockMeta) (ulid.ULID, error) // Compact runs compaction against the provided directories. Must // only be called concurrently with results of Plan(). - Compact(dest string, dirs ...string) (ulid.ULID, error) + // Can optionally pass a list of already open blocks, + // to avoid having to reopen them. + // When resulting Block has 0 samples + // * No block is written. + // * The source dirs are marked Deletable. + // * Returns empty ulid.ULID{}. + Compact(dest string, dirs []string, open []*Block) (ulid.ULID, error) } // LeveledCompactor implements the Compactor interface. @@ -97,7 +104,7 @@ func newCompactorMetrics(r prometheus.Registerer) *compactorMetrics { Buckets: prometheus.ExponentialBuckets(1, 2, 10), }) m.chunkSize = prometheus.NewHistogram(prometheus.HistogramOpts{ - Name: "prometheus_tsdb_compaction_chunk_size", + Name: "prometheus_tsdb_compaction_chunk_size_bytes", Help: "Final size of chunks on their first compaction", Buckets: prometheus.ExponentialBuckets(32, 1.5, 12), }) @@ -107,7 +114,7 @@ func newCompactorMetrics(r prometheus.Registerer) *compactorMetrics { Buckets: prometheus.ExponentialBuckets(4, 1.5, 12), }) m.chunkRange = prometheus.NewHistogram(prometheus.HistogramOpts{ - Name: "prometheus_tsdb_compaction_chunk_range", + Name: "prometheus_tsdb_compaction_chunk_range_seconds", Help: "Final time range of chunks on their first compaction", Buckets: prometheus.ExponentialBuckets(100, 4, 10), }) @@ -184,13 +191,12 @@ func (c *LeveledCompactor) plan(dms []dirMeta) ([]string, error) { return res, nil } - // Compact any blocks that have >5% tombstones. + // Compact any blocks with big enough time range that have >5% tombstones. for i := len(dms) - 1; i >= 0; i-- { meta := dms[i].meta if meta.MaxTime-meta.MinTime < c.ranges[len(c.ranges)/2] { break } - if float64(meta.Stats.NumTombstones)/float64(meta.Stats.NumSeries+1) > 0.05 { return []string{dms[i].dir}, nil } @@ -297,6 +303,11 @@ func compactBlockMetas(uid ulid.ULID, blocks ...*BlockMeta) *BlockMeta { for _, s := range b.Compaction.Sources { sources[s] = struct{}{} } + res.Compaction.Parents = append(res.Compaction.Parents, BlockDesc{ + ULID: b.ULID, + MinTime: b.MinTime, + MaxTime: b.MaxTime, + }) } res.Compaction.Level++ @@ -312,24 +323,39 @@ func compactBlockMetas(uid ulid.ULID, blocks ...*BlockMeta) *BlockMeta { // Compact creates a new block in the compactor's directory from the blocks in the // provided directories. -func (c *LeveledCompactor) Compact(dest string, dirs ...string) (uid ulid.ULID, err error) { +func (c *LeveledCompactor) Compact(dest string, dirs []string, open []*Block) (uid ulid.ULID, err error) { var ( blocks []BlockReader bs []*Block metas []*BlockMeta uids []string ) + start := time.Now() for _, d := range dirs { - b, err := OpenBlock(d, c.chunkPool) + meta, err := readMetaFile(d) if err != nil { return uid, err } - defer b.Close() - meta, err := readMetaFile(d) - if err != nil { - return uid, err + var b *Block + + // Use already open blocks if we can, to avoid + // having the index data in memory twice. + for _, o := range open { + if meta.ULID == o.Meta().ULID { + b = o + break + } + } + + if b == nil { + var err error + b, err = OpenBlock(c.logger, d, c.chunkPool) + if err != nil { + return uid, err + } + defer b.Close() } metas = append(metas, meta) @@ -344,14 +370,34 @@ func (c *LeveledCompactor) Compact(dest string, dirs ...string) (uid ulid.ULID, meta := compactBlockMetas(uid, metas...) err = c.write(dest, meta, blocks...) if err == nil { - level.Info(c.logger).Log( - "msg", "compact blocks", - "count", len(blocks), - "mint", meta.MinTime, - "maxt", meta.MaxTime, - "ulid", meta.ULID, - "sources", fmt.Sprintf("%v", uids), - ) + if meta.Stats.NumSamples == 0 { + for _, b := range bs { + b.meta.Compaction.Deletable = true + if err = writeMetaFile(b.dir, &b.meta); err != nil { + level.Error(c.logger).Log( + "msg", "Failed to write 'Deletable' to meta file after compaction", + "ulid", b.meta.ULID, + ) + } + } + uid = ulid.ULID{} + level.Info(c.logger).Log( + "msg", "compact blocks resulted in empty block", + "count", len(blocks), + "sources", fmt.Sprintf("%v", uids), + "duration", time.Since(start), + ) + } else { + level.Info(c.logger).Log( + "msg", "compact blocks", + "count", len(blocks), + "mint", meta.MinTime, + "maxt", meta.MaxTime, + "ulid", meta.ULID, + "sources", fmt.Sprintf("%v", uids), + "duration", time.Since(start), + ) + } return uid, nil } @@ -367,7 +413,9 @@ func (c *LeveledCompactor) Compact(dest string, dirs ...string) (uid ulid.ULID, return uid, merr } -func (c *LeveledCompactor) Write(dest string, b BlockReader, mint, maxt int64) (ulid.ULID, error) { +func (c *LeveledCompactor) Write(dest string, b BlockReader, mint, maxt int64, parent *BlockMeta) (ulid.ULID, error) { + start := time.Now() + entropy := rand.New(rand.NewSource(time.Now().UnixNano())) uid := ulid.MustNew(ulid.Now(), entropy) @@ -379,12 +427,28 @@ func (c *LeveledCompactor) Write(dest string, b BlockReader, mint, maxt int64) ( meta.Compaction.Level = 1 meta.Compaction.Sources = []ulid.ULID{uid} + if parent != nil { + meta.Compaction.Parents = []BlockDesc{ + {ULID: parent.ULID, MinTime: parent.MinTime, MaxTime: parent.MaxTime}, + } + } + err := c.write(dest, meta, b) if err != nil { return uid, err } - level.Info(c.logger).Log("msg", "write block", "mint", meta.MinTime, "maxt", meta.MaxTime, "ulid", meta.ULID) + if meta.Stats.NumSamples == 0 { + return ulid.ULID{}, nil + } + + level.Info(c.logger).Log( + "msg", "write block", + "mint", meta.MinTime, + "maxt", meta.MaxTime, + "ulid", meta.ULID, + "duration", time.Since(start), + ) return uid, nil } @@ -441,6 +505,7 @@ func (c *LeveledCompactor) write(dest string, meta *BlockMeta, blocks ...BlockRe if err != nil { return errors.Wrap(err, "open chunk writer") } + defer chunkw.Close() // Record written chunk sizes on level 1 compactions. if meta.Compaction.Level == 1 { chunkw = &instrumentedChunkWriter{ @@ -455,15 +520,15 @@ func (c *LeveledCompactor) write(dest string, meta *BlockMeta, blocks ...BlockRe if err != nil { return errors.Wrap(err, "open index writer") } + defer indexw.Close() if err := c.populateBlock(blocks, meta, indexw, chunkw); err != nil { return errors.Wrap(err, "write compaction") } - - if err = writeMetaFile(tmp, meta); err != nil { - return errors.Wrap(err, "write merged meta") - } - + // We are explicitly closing them here to check for error even + // though these are covered under defer. This is because in Windows, + // you cannot delete these unless they are closed and the defer is to + // make sure they are closed if the function exits due to an error above. if err = chunkw.Close(); err != nil { return errors.Wrap(err, "close chunk writer") } @@ -471,8 +536,20 @@ func (c *LeveledCompactor) write(dest string, meta *BlockMeta, blocks ...BlockRe return errors.Wrap(err, "close index writer") } + // Populated block is empty, so cleanup and exit. + if meta.Stats.NumSamples == 0 { + if err := os.RemoveAll(tmp); err != nil { + return errors.Wrap(err, "remove tmp folder after empty block failed") + } + return nil + } + + if err = writeMetaFile(tmp, meta); err != nil { + return errors.Wrap(err, "write merged meta") + } + // Create an empty tombstones file. - if err := writeTombstoneFile(tmp, EmptyTombstoneReader()); err != nil { + if err := writeTombstoneFile(tmp, newMemTombstones()); err != nil { return errors.Wrap(err, "write new tombstones file") } @@ -507,6 +584,10 @@ func (c *LeveledCompactor) write(dest string, meta *BlockMeta, blocks ...BlockRe // populateBlock fills the index and chunk writers with new data gathered as the union // of the provided blocks. It returns meta information for the new block. func (c *LeveledCompactor) populateBlock(blocks []BlockReader, meta *BlockMeta, indexw IndexWriter, chunkw ChunkWriter) error { + if len(blocks) == 0 { + return errors.New("cannot populate block from no readers") + } + var ( set ChunkSeriesSet allSymbols = make(map[string]struct{}, 1<<16) @@ -578,13 +659,17 @@ func (c *LeveledCompactor) populateBlock(blocks []BlockReader, meta *BlockMeta, continue } - if len(dranges) > 0 { - // Re-encode the chunk to not have deleted values. - for i, chk := range chks { - if !intervalOverlap(dranges[0].Mint, dranges[len(dranges)-1].Maxt, chk.MinTime, chk.MaxTime) { + for i, chk := range chks { + if chk.MinTime < meta.MinTime || chk.MaxTime > meta.MaxTime { + return errors.Errorf("found chunk with minTime: %d maxTime: %d outside of compacted minTime: %d maxTime: %d", + chk.MinTime, chk.MaxTime, meta.MinTime, meta.MaxTime) + } + + if len(dranges) > 0 { + // Re-encode the chunk to not have deleted values. + if !chk.OverlapsClosedInterval(dranges[0].Mint, dranges[len(dranges)-1].Maxt) { continue } - newChunk := chunkenc.NewXORChunk() app, err := newChunk.Appender() if err != nil { @@ -600,6 +685,7 @@ func (c *LeveledCompactor) populateBlock(blocks []BlockReader, meta *BlockMeta, chks[i].Chunk = newChunk } } + if err := chunkw.WriteChunks(chks...); err != nil { return errors.Wrap(err, "write chunks") } @@ -615,7 +701,9 @@ func (c *LeveledCompactor) populateBlock(blocks []BlockReader, meta *BlockMeta, } for _, chk := range chks { - c.chunkPool.Put(chk.Chunk) + if err := c.chunkPool.Put(chk.Chunk); err != nil { + return errors.Wrap(err, "put chunk") + } } for _, l := range lset { @@ -772,7 +860,6 @@ func (c *compactionMerger) Next() bool { var chks []chunks.Meta d := c.compare() - // Both sets contain the current series. Chain them into a single one. if d > 0 { lset, chks, c.intervals = c.b.At() c.l = append(c.l[:0], lset...) @@ -786,8 +873,10 @@ func (c *compactionMerger) Next() bool { c.aok = c.a.Next() } else { + // Both sets contain the current series. Chain them into a single one. l, ca, ra := c.a.At() _, cb, rb := c.b.At() + for _, r := range rb { ra = ra.add(r) } diff --git a/vendor/github.com/prometheus/tsdb/db.go b/vendor/github.com/prometheus/tsdb/db.go index 5947961..bd3388b 100644 --- a/vendor/github.com/prometheus/tsdb/db.go +++ b/vendor/github.com/prometheus/tsdb/db.go @@ -31,20 +31,20 @@ import ( "github.com/go-kit/kit/log" "github.com/go-kit/kit/log/level" - "github.com/nightlyone/lockfile" "github.com/oklog/ulid" "github.com/pkg/errors" "github.com/prometheus/client_golang/prometheus" "github.com/prometheus/tsdb/chunkenc" "github.com/prometheus/tsdb/fileutil" "github.com/prometheus/tsdb/labels" + "github.com/prometheus/tsdb/wal" "golang.org/x/sync/errgroup" ) // DefaultOptions used for the DB. They are sane for setups using // millisecond precision timestamps. var DefaultOptions = &Options{ - WALFlushInterval: 5 * time.Second, + WALSegmentSize: wal.DefaultSegmentSize, RetentionDuration: 15 * 24 * 60 * 60 * 1000, // 15 days in milliseconds BlockRanges: ExponentialBlockRanges(int64(2*time.Hour)/1e6, 3, 5), NoLockfile: false, @@ -52,12 +52,19 @@ var DefaultOptions = &Options{ // Options of the DB storage. type Options struct { - // The interval at which the write ahead log is flushed to disk. - WALFlushInterval time.Duration + // Segments (wal files) max size + WALSegmentSize int // Duration of persisted data to keep. RetentionDuration uint64 + // Maximum number of bytes in blocks to be retained. + // 0 or less means disabled. + // NOTE: For proper storage calculations need to consider + // the size of the WAL folder which is not added when calculating + // the current size of the database. + MaxBytes int64 + // The sizes of the Blocks. BlockRanges []int64 @@ -76,7 +83,7 @@ type Appender interface { // Returned reference numbers are ephemeral and may be rejected in calls // to AddFast() at any point. Adding the sample via Add() returns a new // reference number. - // If the reference is the empty string it must not be used for caching. + // If the reference is 0 it must not be used for caching. Add(l labels.Labels, t int64, v float64) (uint64, error) // Add adds a sample pair for the referenced series. It is generally faster @@ -94,7 +101,7 @@ type Appender interface { // a hashed partition of a seriedb. type DB struct { dir string - lockf *lockfile.Lockfile + lockf fileutil.Releaser logger log.Logger metrics *dbMetrics @@ -112,19 +119,27 @@ type DB struct { donec chan struct{} stopc chan struct{} - // cmtx is used to control compactions and deletions. - cmtx sync.Mutex - compactionsEnabled bool + // cmtx ensures that compactions and deletions don't run simultaneously. + cmtx sync.Mutex + + // autoCompactMtx ensures that no compaction gets triggered while + // changing the autoCompact var. + autoCompactMtx sync.Mutex + autoCompact bool } type dbMetrics struct { loadedBlocks prometheus.GaugeFunc + symbolTableSize prometheus.GaugeFunc reloads prometheus.Counter reloadsFailed prometheus.Counter compactionsTriggered prometheus.Counter - cutoffs prometheus.Counter - cutoffsFailed prometheus.Counter + timeRetentionCount prometheus.Counter + compactionsSkipped prometheus.Counter + startTime prometheus.GaugeFunc tombCleanTimer prometheus.Histogram + blocksBytes prometheus.Gauge + sizeRetentionCount prometheus.Counter } func newDBMetrics(db *DB, r prometheus.Registerer) *dbMetrics { @@ -138,6 +153,19 @@ func newDBMetrics(db *DB, r prometheus.Registerer) *dbMetrics { defer db.mtx.RUnlock() return float64(len(db.blocks)) }) + m.symbolTableSize = prometheus.NewGaugeFunc(prometheus.GaugeOpts{ + Name: "prometheus_tsdb_symbol_table_size_bytes", + Help: "Size of symbol table on disk (in bytes)", + }, func() float64 { + db.mtx.RLock() + blocks := db.blocks[:] + db.mtx.RUnlock() + symTblSize := uint64(0) + for _, b := range blocks { + symTblSize += b.GetSymbolTableSize() + } + return float64(symTblSize) + }) m.reloads = prometheus.NewCounter(prometheus.CounterOpts{ Name: "prometheus_tsdb_reloads_total", Help: "Number of times the database reloaded block data from disk.", @@ -150,28 +178,50 @@ func newDBMetrics(db *DB, r prometheus.Registerer) *dbMetrics { Name: "prometheus_tsdb_compactions_triggered_total", Help: "Total number of triggered compactions for the partition.", }) - m.cutoffs = prometheus.NewCounter(prometheus.CounterOpts{ - Name: "prometheus_tsdb_retention_cutoffs_total", - Help: "Number of times the database cut off block data from disk.", + m.timeRetentionCount = prometheus.NewCounter(prometheus.CounterOpts{ + Name: "prometheus_tsdb_time_retentions_total", + Help: "The number of times that blocks were deleted because the maximum time limit was exceeded.", }) - m.cutoffsFailed = prometheus.NewCounter(prometheus.CounterOpts{ - Name: "prometheus_tsdb_retention_cutoffs_failures_total", - Help: "Number of times the database failed to cut off block data from disk.", + m.compactionsSkipped = prometheus.NewCounter(prometheus.CounterOpts{ + Name: "prometheus_tsdb_compactions_skipped_total", + Help: "Total number of skipped compactions due to disabled auto compaction.", + }) + m.startTime = prometheus.NewGaugeFunc(prometheus.GaugeOpts{ + Name: "prometheus_tsdb_lowest_timestamp", + Help: "Lowest timestamp value stored in the database. The unit is decided by the library consumer.", + }, func() float64 { + db.mtx.RLock() + defer db.mtx.RUnlock() + if len(db.blocks) == 0 { + return float64(db.head.minTime) + } + return float64(db.blocks[0].meta.MinTime) }) m.tombCleanTimer = prometheus.NewHistogram(prometheus.HistogramOpts{ Name: "prometheus_tsdb_tombstone_cleanup_seconds", Help: "The time taken to recompact blocks to remove tombstones.", }) + m.blocksBytes = prometheus.NewGauge(prometheus.GaugeOpts{ + Name: "prometheus_tsdb_storage_blocks_bytes", + Help: "The number of bytes that are currently used for local storage by all blocks.", + }) + m.sizeRetentionCount = prometheus.NewCounter(prometheus.CounterOpts{ + Name: "prometheus_tsdb_size_retentions_total", + Help: "The number of times that blocks were deleted because the maximum number of bytes was exceeded.", + }) if r != nil { r.MustRegister( m.loadedBlocks, + m.symbolTableSize, m.reloads, m.reloadsFailed, - m.cutoffs, - m.cutoffsFailed, + m.timeRetentionCount, m.compactionsTriggered, + m.startTime, m.tombCleanTimer, + m.blocksBytes, + m.sizeRetentionCount, ) } return m @@ -192,16 +242,20 @@ func Open(dir string, l log.Logger, r prometheus.Registerer, opts *Options) (db if err := repairBadIndexVersion(l, dir); err != nil { return nil, err } + // Migrate old WAL if one exists. + if err := MigrateWAL(l, filepath.Join(dir, "wal")); err != nil { + return nil, errors.Wrap(err, "migrate WAL") + } db = &DB{ - dir: dir, - logger: l, - opts: opts, - compactc: make(chan struct{}, 1), - donec: make(chan struct{}), - stopc: make(chan struct{}), - compactionsEnabled: true, - chunkPool: chunkenc.NewPool(), + dir: dir, + logger: l, + opts: opts, + compactc: make(chan struct{}, 1), + donec: make(chan struct{}), + stopc: make(chan struct{}), + autoCompact: true, + chunkPool: chunkenc.NewPool(), } db.metrics = newDBMetrics(db, r) @@ -210,14 +264,11 @@ func Open(dir string, l log.Logger, r prometheus.Registerer, opts *Options) (db if err != nil { return nil, err } - lockf, err := lockfile.New(filepath.Join(absdir, "lock")) + lockf, _, err := fileutil.Flock(filepath.Join(absdir, "lock")) if err != nil { - return nil, err + return nil, errors.Wrap(err, "lock DB directory") } - if err := lockf.TryLock(); err != nil { - return nil, errors.Wrapf(err, "open DB in %s", dir) - } - db.lockf = &lockf + db.lockf = lockf } db.compactor, err = NewLeveledCompactor(r, l, opts.BlockRanges, db.chunkPool) @@ -225,18 +276,31 @@ func Open(dir string, l log.Logger, r prometheus.Registerer, opts *Options) (db return nil, errors.Wrap(err, "create leveled compactor") } - wal, err := OpenSegmentWAL(filepath.Join(dir, "wal"), l, opts.WALFlushInterval, r) + segmentSize := wal.DefaultSegmentSize + if opts.WALSegmentSize > 0 { + segmentSize = opts.WALSegmentSize + } + wlog, err := wal.NewSize(l, r, filepath.Join(dir, "wal"), segmentSize) if err != nil { return nil, err } - db.head, err = NewHead(r, l, wal, opts.BlockRanges[0]) + db.head, err = NewHead(r, l, wlog, opts.BlockRanges[0]) if err != nil { return nil, err } + if err := db.reload(); err != nil { return nil, err } - if err := db.head.ReadWAL(); err != nil { + // Set the min valid time for the ingested samples + // to be no lower than the maxt of the last block. + blocks := db.Blocks() + minValidTime := int64(math.MinInt64) + if len(blocks) > 0 { + minValidTime = blocks[len(blocks)-1].Meta().MaxTime + } + + if err := db.head.Init(minValidTime); err != nil { return nil, errors.Wrap(err, "read WAL") } @@ -271,67 +335,24 @@ func (db *DB) run() { case <-db.compactc: db.metrics.compactionsTriggered.Inc() - _, err1 := db.retentionCutoff() - if err1 != nil { - level.Error(db.logger).Log("msg", "retention cutoff failed", "err", err1) - } - - _, err2 := db.compact() - if err2 != nil { - level.Error(db.logger).Log("msg", "compaction failed", "err", err2) - } - - if err1 != nil || err2 != nil { - backoff = exponential(backoff, 1*time.Second, 1*time.Minute) + db.autoCompactMtx.Lock() + if db.autoCompact { + if err := db.compact(); err != nil { + level.Error(db.logger).Log("msg", "compaction failed", "err", err) + backoff = exponential(backoff, 1*time.Second, 1*time.Minute) + } else { + backoff = 0 + } } else { - backoff = 0 + db.metrics.compactionsSkipped.Inc() } - + db.autoCompactMtx.Unlock() case <-db.stopc: return } } } -func (db *DB) retentionCutoff() (b bool, err error) { - defer func() { - if !b && err == nil { - // no data had to be cut off. - return - } - db.metrics.cutoffs.Inc() - if err != nil { - db.metrics.cutoffsFailed.Inc() - } - }() - if db.opts.RetentionDuration == 0 { - return false, nil - } - - db.mtx.RLock() - blocks := db.blocks[:] - db.mtx.RUnlock() - - if len(blocks) == 0 { - return false, nil - } - - last := blocks[len(db.blocks)-1] - - mint := last.Meta().MaxTime - int64(db.opts.RetentionDuration) - dirs, err := retentionCutoffDirs(db.dir, mint) - if err != nil { - return false, err - } - - // This will close the dirs and then delete the dirs. - if len(dirs) > 0 { - return true, db.reload(dirs...) - } - - return false, nil -} - // Appender opens a new appender against the database. func (db *DB) Appender() Appender { return dbAppender{db: db, Appender: db.head.Appender()} @@ -358,20 +379,22 @@ func (a dbAppender) Commit() error { return err } -func (db *DB) compact() (changes bool, err error) { +// Compact data if possible. After successful compaction blocks are reloaded +// which will also trigger blocks to be deleted that fall out of the retention +// window. +// If no blocks are compacted, the retention window state doesn't change. Thus, +// this is sufficient to reliably delete old data. +// Old blocks are only deleted on reload based on the new block's parent information. +// See DB.reload documentation for further information. +func (db *DB) compact() (err error) { db.cmtx.Lock() defer db.cmtx.Unlock() - - if !db.compactionsEnabled { - return false, nil - } - // Check whether we have pending head blocks that are ready to be persisted. // They have the highest priority. for { select { case <-db.stopc: - return changes, nil + return nil default: } // The head has a compactable range if 1.5 level 0 ranges are between the oldest @@ -379,23 +402,38 @@ func (db *DB) compact() (changes bool, err error) { if db.head.MaxTime()-db.head.MinTime() <= db.opts.BlockRanges[0]/2*3 { break } - mint, maxt := rangeForTimestamp(db.head.MinTime(), db.opts.BlockRanges[0]) + mint := db.head.MinTime() + maxt := rangeForTimestamp(mint, db.opts.BlockRanges[0]) // Wrap head into a range that bounds all reads to it. head := &rangeHead{ head: db.head, mint: mint, - maxt: maxt, + // We remove 1 millisecond from maxt because block + // intervals are half-open: [b.MinTime, b.MaxTime). But + // chunk intervals are closed: [c.MinTime, c.MaxTime]; + // so in order to make sure that overlaps are evaluated + // consistently, we explicitly remove the last value + // from the block interval here. + maxt: maxt - 1, } - if _, err = db.compactor.Write(db.dir, head, mint, maxt); err != nil { - return changes, errors.Wrap(err, "persist head block") + uid, err := db.compactor.Write(db.dir, head, mint, maxt, nil) + if err != nil { + return errors.Wrap(err, "persist head block") } - changes = true runtime.GC() if err := db.reload(); err != nil { - return changes, errors.Wrap(err, "reload blocks") + return errors.Wrap(err, "reload blocks") + } + if (uid == ulid.ULID{}) { + // Compaction resulted in an empty block. + // Head truncating during db.reload() depends on the persisted blocks and + // in this case no new block will be persisted so manually truncate the head. + if err = db.head.Truncate(maxt); err != nil { + return errors.Wrap(err, "head truncate failed (in compact)") + } } runtime.GC() } @@ -404,7 +442,7 @@ func (db *DB) compact() (changes bool, err error) { for { plan, err := db.compactor.Plan(db.dir) if err != nil { - return changes, errors.Wrap(err, "plan compaction") + return errors.Wrap(err, "plan compaction") } if len(plan) == 0 { break @@ -412,151 +450,231 @@ func (db *DB) compact() (changes bool, err error) { select { case <-db.stopc: - return changes, nil + return nil default: } - if _, err := db.compactor.Compact(db.dir, plan...); err != nil { - return changes, errors.Wrapf(err, "compact %s", plan) + if _, err := db.compactor.Compact(db.dir, plan, db.blocks); err != nil { + return errors.Wrapf(err, "compact %s", plan) } - changes = true runtime.GC() - if err := db.reload(plan...); err != nil { - return changes, errors.Wrap(err, "reload blocks") + if err := db.reload(); err != nil { + return errors.Wrap(err, "reload blocks") } runtime.GC() } - return changes, nil + return nil } -// retentionCutoffDirs returns all directories of blocks in dir that are strictly -// before mint. -func retentionCutoffDirs(dir string, mint int64) ([]string, error) { - df, err := fileutil.OpenDir(dir) - if err != nil { - return nil, errors.Wrapf(err, "open directory") +func (db *DB) getBlock(id ulid.ULID) (*Block, bool) { + for _, b := range db.blocks { + if b.Meta().ULID == id { + return b, true + } } - defer df.Close() + return nil, false +} - dirs, err := blockDirs(dir) +// reload blocks and trigger head truncation if new blocks appeared. +// Blocks that are obsolete due to replacement or retention will be deleted. +func (db *DB) reload() (err error) { + defer func() { + if err != nil { + db.metrics.reloadsFailed.Inc() + } + db.metrics.reloads.Inc() + }() + + loadable, corrupted, err := db.openBlocks() if err != nil { - return nil, errors.Wrapf(err, "list block dirs %s", dir) + return err } - delDirs := []string{} + deletable := db.deletableBlocks(loadable) - for _, dir := range dirs { - meta, err := readMetaFile(dir) - if err != nil { - return nil, errors.Wrapf(err, "read block meta %s", dir) + // Corrupted blocks that have been replaced by parents can be safely ignored and deleted. + // This makes it resilient against the process crashing towards the end of a compaction. + // Creation of a new block and deletion of its parents cannot happen atomically. + // By creating blocks with their parents, we can pick up the deletion where it left off during a crash. + for _, block := range loadable { + for _, b := range block.Meta().Compaction.Parents { + delete(corrupted, b.ULID) + deletable[b.ULID] = nil } - // The first block we encounter marks that we crossed the boundary - // of deletable blocks. - if meta.MaxTime >= mint { - break + } + if len(corrupted) > 0 { + return errors.Wrap(err, "unexpected corrupted block") + } + + // All deletable blocks should not be loaded. + var ( + bb []*Block + blocksSize int64 + ) + for _, block := range loadable { + if _, ok := deletable[block.Meta().ULID]; ok { + deletable[block.Meta().ULID] = block + continue } + bb = append(bb, block) + blocksSize += block.Size() - delDirs = append(delDirs, dir) } + loadable = bb + db.metrics.blocksBytes.Set(float64(blocksSize)) - return delDirs, nil -} + sort.Slice(loadable, func(i, j int) bool { + return loadable[i].Meta().MaxTime < loadable[j].Meta().MaxTime + }) + if err := validateBlockSequence(loadable); err != nil { + return errors.Wrap(err, "invalid block sequence") + } -func (db *DB) getBlock(id ulid.ULID) (*Block, bool) { - for _, b := range db.blocks { - if b.Meta().ULID == id { - return b, true + // Swap new blocks first for subsequently created readers to be seen. + db.mtx.Lock() + oldBlocks := db.blocks + db.blocks = loadable + db.mtx.Unlock() + + for _, b := range oldBlocks { + if _, ok := deletable[b.Meta().ULID]; ok { + deletable[b.Meta().ULID] = b } } - return nil, false -} -func stringsContain(set []string, elem string) bool { - for _, e := range set { - if elem == e { - return true - } + if err := db.deleteBlocks(deletable); err != nil { + return err } - return false -} -// reload on-disk blocks and trigger head truncation if new blocks appeared. It takes -// a list of block directories which should be deleted during reload. -func (db *DB) reload(deleteable ...string) (err error) { - defer func() { - if err != nil { - db.metrics.reloadsFailed.Inc() - } - db.metrics.reloads.Inc() - }() + // Garbage collect data in the head if the most recent persisted block + // covers data of its current time range. + if len(loadable) == 0 { + return nil + } + maxt := loadable[len(loadable)-1].Meta().MaxTime + + return errors.Wrap(db.head.Truncate(maxt), "head truncate failed") +} + +func (db *DB) openBlocks() (blocks []*Block, corrupted map[ulid.ULID]error, err error) { dirs, err := blockDirs(db.dir) if err != nil { - return errors.Wrap(err, "find blocks") + return nil, nil, errors.Wrap(err, "find blocks") } - var ( - blocks []*Block - exist = map[ulid.ULID]struct{}{} - ) + corrupted = make(map[ulid.ULID]error) for _, dir := range dirs { meta, err := readMetaFile(dir) if err != nil { - return errors.Wrapf(err, "read meta information %s", dir) - } - // If the block is pending for deletion, don't add it to the new block set. - if stringsContain(deleteable, dir) { + level.Error(db.logger).Log("msg", "not a block dir", "dir", dir) continue } - b, ok := db.getBlock(meta.ULID) + // See if we already have the block in memory or open it otherwise. + block, ok := db.getBlock(meta.ULID) if !ok { - b, err = OpenBlock(dir, db.chunkPool) + block, err = OpenBlock(db.logger, dir, db.chunkPool) if err != nil { - return errors.Wrapf(err, "open block %s", dir) + corrupted[meta.ULID] = err + continue } } + blocks = append(blocks, block) + } + return blocks, corrupted, nil +} + +// deletableBlocks returns all blocks past retention policy. +func (db *DB) deletableBlocks(blocks []*Block) map[ulid.ULID]*Block { + deletable := make(map[ulid.ULID]*Block) - blocks = append(blocks, b) - exist[meta.ULID] = struct{}{} + // Sort the blocks by time - newest to oldest (largest to smallest timestamp). + // This ensures that the retentions will remove the oldest blocks. + sort.Slice(blocks, func(i, j int) bool { + return blocks[i].Meta().MaxTime > blocks[j].Meta().MaxTime + }) + + for _, block := range blocks { + if block.Meta().Compaction.Deletable { + deletable[block.Meta().ULID] = block + } } - if err := validateBlockSequence(blocks); err != nil { - return errors.Wrap(err, "invalid block sequence") + for ulid, block := range db.beyondTimeRetention(blocks) { + deletable[ulid] = block } - // Swap in new blocks first for subsequently created readers to be seen. - // Then close previous blocks, which may block for pending readers to complete. - db.mtx.Lock() - oldBlocks := db.blocks - db.blocks = blocks - db.mtx.Unlock() + for ulid, block := range db.beyondSizeRetention(blocks) { + deletable[ulid] = block + } - for _, b := range oldBlocks { - if _, ok := exist[b.Meta().ULID]; ok { - continue - } - if err := b.Close(); err != nil { - level.Warn(db.logger).Log("msg", "closing block failed", "err", err) - } - if err := os.RemoveAll(b.Dir()); err != nil { - level.Warn(db.logger).Log("msg", "deleting block failed", "err", err) + return deletable +} + +func (db *DB) beyondTimeRetention(blocks []*Block) (deleteable map[ulid.ULID]*Block) { + // Time retention is disabled or no blocks to work with. + if len(db.blocks) == 0 || db.opts.RetentionDuration == 0 { + return + } + + deleteable = make(map[ulid.ULID]*Block) + for i, block := range blocks { + // The difference between the first block and this block is larger than + // the retention period so any blocks after that are added as deleteable. + if i > 0 && blocks[0].Meta().MaxTime-block.Meta().MaxTime > int64(db.opts.RetentionDuration) { + for _, b := range blocks[i:] { + deleteable[b.meta.ULID] = b + } + db.metrics.timeRetentionCount.Inc() + break } } + return deleteable +} - // Garbage collect data in the head if the most recent persisted block - // covers data of its current time range. - if len(blocks) == 0 { - return nil +func (db *DB) beyondSizeRetention(blocks []*Block) (deleteable map[ulid.ULID]*Block) { + // Size retention is disabled or no blocks to work with. + if len(db.blocks) == 0 || db.opts.MaxBytes <= 0 { + return } - maxt := blocks[len(blocks)-1].Meta().MaxTime - return errors.Wrap(db.head.Truncate(maxt), "head truncate failed") + deleteable = make(map[ulid.ULID]*Block) + blocksSize := int64(0) + for i, block := range blocks { + blocksSize += block.Size() + if blocksSize > db.opts.MaxBytes { + // Add this and all following blocks for deletion. + for _, b := range blocks[i:] { + deleteable[b.meta.ULID] = b + } + db.metrics.sizeRetentionCount.Inc() + break + } + } + return deleteable } -// ValidateBlockSequence returns error if given block meta files indicate that some blocks overlaps within sequence. +// deleteBlocks closes and deletes blocks from the disk. +// When the map contains a non nil block object it means it is loaded in memory +// so needs to be closed first as it might need to wait for pending readers to complete. +func (db *DB) deleteBlocks(blocks map[ulid.ULID]*Block) error { + for ulid, block := range blocks { + if block != nil { + if err := block.Close(); err != nil { + level.Warn(db.logger).Log("msg", "closing block failed", "err", err) + } + } + if err := os.RemoveAll(filepath.Join(db.dir, ulid.String())); err != nil { + return errors.Wrapf(err, "delete obsolete block %s", ulid) + } + } + return nil +} + +// validateBlockSequence returns error if given block meta files indicate that some blocks overlaps within sequence. func validateBlockSequence(bs []*Block) error { if len(bs) <= 1 { return nil @@ -613,10 +731,6 @@ func OverlappingBlocks(bm []BlockMeta) Overlaps { if len(bm) <= 1 { return nil } - sort.Slice(bm, func(i, j int) bool { - return bm[i].MinTime < bm[j].MinTime - }) - var ( overlaps [][]BlockMeta @@ -716,27 +830,27 @@ func (db *DB) Close() error { merr.Add(g.Wait()) if db.lockf != nil { - merr.Add(db.lockf.Unlock()) + merr.Add(db.lockf.Release()) } merr.Add(db.head.Close()) return merr.Err() } -// DisableCompactions disables compactions. +// DisableCompactions disables auto compactions. func (db *DB) DisableCompactions() { - db.cmtx.Lock() - defer db.cmtx.Unlock() + db.autoCompactMtx.Lock() + defer db.autoCompactMtx.Unlock() - db.compactionsEnabled = false + db.autoCompact = false level.Info(db.logger).Log("msg", "compactions disabled") } -// EnableCompactions enables compactions. +// EnableCompactions enables auto compactions. func (db *DB) EnableCompactions() { - db.cmtx.Lock() - defer db.cmtx.Unlock() + db.autoCompactMtx.Lock() + defer db.autoCompactMtx.Unlock() - db.compactionsEnabled = true + db.autoCompact = true level.Info(db.logger).Log("msg", "compactions enabled") } @@ -766,7 +880,7 @@ func (db *DB) Snapshot(dir string, withHead bool) error { if !withHead { return nil } - _, err := db.compactor.Write(dir, db.head, db.head.MinTime(), db.head.MaxTime()) + _, err := db.compactor.Write(dir, db.head, db.head.MinTime(), db.head.MaxTime(), nil) return errors.Wrap(err, "snapshot head block") } @@ -779,13 +893,16 @@ func (db *DB) Querier(mint, maxt int64) (Querier, error) { defer db.mtx.RUnlock() for _, b := range db.blocks { - m := b.Meta() - if intervalOverlap(mint, maxt, m.MinTime, m.MaxTime) { + if b.OverlapsClosedInterval(mint, maxt) { blocks = append(blocks, b) } } if maxt >= db.head.MinTime() { - blocks = append(blocks, db.head) + blocks = append(blocks, &rangeHead{ + head: db.head, + mint: mint, + maxt: maxt, + }) } sq := &querier{ @@ -806,9 +923,8 @@ func (db *DB) Querier(mint, maxt int64) (Querier, error) { return sq, nil } -func rangeForTimestamp(t int64, width int64) (mint, maxt int64) { - mint = (t / width) * width - return mint, mint + width +func rangeForTimestamp(t int64, width int64) (maxt int64) { + return (t/width)*width + width } // Delete implements deletion of metrics. It only has atomicity guarantees on a per-block basis. @@ -822,8 +938,7 @@ func (db *DB) Delete(mint, maxt int64, ms ...labels.Matcher) error { defer db.mtx.RUnlock() for _, b := range db.blocks { - m := b.Meta() - if intervalOverlap(mint, maxt, m.MinTime, m.MaxTime) { + if b.OverlapsClosedInterval(mint, maxt) { g.Go(func(b *Block) func() error { return func() error { return b.Delete(mint, maxt, ms...) } }(b)) @@ -836,39 +951,39 @@ func (db *DB) Delete(mint, maxt int64, ms ...labels.Matcher) error { } // CleanTombstones re-writes any blocks with tombstones. -func (db *DB) CleanTombstones() error { +func (db *DB) CleanTombstones() (err error) { db.cmtx.Lock() defer db.cmtx.Unlock() start := time.Now() defer db.metrics.tombCleanTimer.Observe(time.Since(start).Seconds()) + newUIDs := []ulid.ULID{} + defer func() { + // If any error is caused, we need to delete all the new directory created. + if err != nil { + for _, uid := range newUIDs { + dir := filepath.Join(db.Dir(), uid.String()) + if err := os.RemoveAll(dir); err != nil { + level.Error(db.logger).Log("msg", "failed to delete block after failed `CleanTombstones`", "dir", dir, "err", err) + } + } + } + }() + db.mtx.RLock() blocks := db.blocks[:] db.mtx.RUnlock() - deleted := []string{} for _, b := range blocks { - ok, err := b.CleanTombstones(db.Dir(), db.compactor) - if err != nil { - return errors.Wrapf(err, "clean tombstones: %s", b.Dir()) - } - - if ok { - deleted = append(deleted, b.Dir()) + if uid, er := b.CleanTombstones(db.Dir(), db.compactor); er != nil { + err = errors.Wrapf(er, "clean tombstones: %s", b.Dir()) + return err + } else if uid != nil { // New block was created. + newUIDs = append(newUIDs, *uid) } } - - if len(deleted) == 0 { - return nil - } - - return errors.Wrap(db.reload(deleted...), "reload blocks") -} - -func intervalOverlap(amin, amax, bmin, bmax int64) bool { - // Checks Overlap: http://stackoverflow.com/questions/3269434/ - return amin <= bmax && bmin <= amax + return errors.Wrap(db.reload(), "reload blocks") } func isBlockDir(fi os.FileInfo) bool { diff --git a/vendor/github.com/prometheus/tsdb/encoding_helpers.go b/vendor/github.com/prometheus/tsdb/encoding_helpers.go index ffb58b5..6dd6e7c 100644 --- a/vendor/github.com/prometheus/tsdb/encoding_helpers.go +++ b/vendor/github.com/prometheus/tsdb/encoding_helpers.go @@ -1,3 +1,16 @@ +// Copyright 2018 The Prometheus Authors +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + package tsdb import ( diff --git a/vendor/github.com/prometheus/tsdb/fileutil/fileutil.go b/vendor/github.com/prometheus/tsdb/fileutil/fileutil.go index c2c2584..677df8c 100644 --- a/vendor/github.com/prometheus/tsdb/fileutil/fileutil.go +++ b/vendor/github.com/prometheus/tsdb/fileutil/fileutil.go @@ -1,3 +1,16 @@ +// Copyright 2018 The Prometheus Authors +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + // Package fileutil provides utility methods used when dealing with the filesystem in tsdb. // It is largely copied from github.com/coreos/etcd/pkg/fileutil to avoid the // dependency chain it brings with it. @@ -5,10 +18,80 @@ package fileutil import ( + "io/ioutil" "os" + "path/filepath" "sort" + "strings" ) +// CopyDirs copies all directories, subdirectories and files recursively including the empty folders. +// Source and destination must be full paths. +func CopyDirs(src, dest string) error { + if err := os.MkdirAll(dest, 0777); err != nil { + return err + } + files, err := readDirs(src) + if err != nil { + return err + } + + for _, f := range files { + dp := filepath.Join(dest, f) + sp := filepath.Join(src, f) + + stat, err := os.Stat(sp) + if err != nil { + return err + } + + // Empty directories are also created. + if stat.IsDir() { + if err := os.MkdirAll(dp, 0777); err != nil { + return err + } + continue + } + + if err := copyFile(sp, dp); err != nil { + return err + } + } + return nil +} + +func copyFile(src, dest string) error { + data, err := ioutil.ReadFile(src) + if err != nil { + return err + } + + err = ioutil.WriteFile(dest, data, 0644) + if err != nil { + return err + } + return nil +} + +// readDirs reads the source directory recursively and +// returns relative paths to all files and empty directories. +func readDirs(src string) ([]string, error) { + var files []string + var err error + + err = filepath.Walk(src, func(path string, f os.FileInfo, err error) error { + relativePath := strings.TrimPrefix(path, src) + if len(relativePath) > 0 { + files = append(files, relativePath) + } + return nil + }) + if err != nil { + return nil, err + } + return files, nil +} + // ReadDir returns the filenames in the given directory in sorted order. func ReadDir(dirpath string) ([]string, error) { dir, err := os.Open(dirpath) @@ -23,3 +106,45 @@ func ReadDir(dirpath string) ([]string, error) { sort.Strings(names) return names, nil } + +// Rename safely renames a file. +func Rename(from, to string) error { + if err := os.Rename(from, to); err != nil { + return err + } + + // Directory was renamed; sync parent dir to persist rename. + pdir, err := OpenDir(filepath.Dir(to)) + if err != nil { + return err + } + + if err = Fsync(pdir); err != nil { + pdir.Close() + return err + } + return pdir.Close() +} + +// Replace moves a file or directory to a new location and deletes any previous data. +// It is not atomic. +func Replace(from, to string) error { + if err := os.RemoveAll(to); err != nil { + return err + } + if err := os.Rename(from, to); err != nil { + return err + } + + // Directory was renamed; sync parent dir to persist rename. + pdir, err := OpenDir(filepath.Dir(to)) + if err != nil { + return err + } + + if err = Fsync(pdir); err != nil { + pdir.Close() + return err + } + return pdir.Close() +} diff --git a/vendor/github.com/prometheus/tsdb/fileutil/mmap.go b/vendor/github.com/prometheus/tsdb/fileutil/mmap.go index a0c5982..26fc80c 100644 --- a/vendor/github.com/prometheus/tsdb/fileutil/mmap.go +++ b/vendor/github.com/prometheus/tsdb/fileutil/mmap.go @@ -1,3 +1,16 @@ +// Copyright 2018 The Prometheus Authors +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + package fileutil import ( diff --git a/vendor/github.com/prometheus/tsdb/fileutil/mmap_386.go b/vendor/github.com/prometheus/tsdb/fileutil/mmap_386.go index 156f81b..66b9d36 100644 --- a/vendor/github.com/prometheus/tsdb/fileutil/mmap_386.go +++ b/vendor/github.com/prometheus/tsdb/fileutil/mmap_386.go @@ -1,3 +1,16 @@ +// Copyright 2018 The Prometheus Authors +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + // +build windows package fileutil diff --git a/vendor/github.com/prometheus/tsdb/fileutil/mmap_amd64.go b/vendor/github.com/prometheus/tsdb/fileutil/mmap_amd64.go index 4025dbf..4b523bc 100644 --- a/vendor/github.com/prometheus/tsdb/fileutil/mmap_amd64.go +++ b/vendor/github.com/prometheus/tsdb/fileutil/mmap_amd64.go @@ -1,3 +1,16 @@ +// Copyright 2018 The Prometheus Authors +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + // +build windows package fileutil diff --git a/vendor/github.com/prometheus/tsdb/fileutil/sync_linux.go b/vendor/github.com/prometheus/tsdb/fileutil/sync_linux.go index 1145904..1bbced9 100644 --- a/vendor/github.com/prometheus/tsdb/fileutil/sync_linux.go +++ b/vendor/github.com/prometheus/tsdb/fileutil/sync_linux.go @@ -31,4 +31,4 @@ func Fsync(f *os.File) error { // to be correctly handled. func Fdatasync(f *os.File) error { return syscall.Fdatasync(int(f.Fd())) -} \ No newline at end of file +} diff --git a/vendor/github.com/prometheus/tsdb/head.go b/vendor/github.com/prometheus/tsdb/head.go index 2c7c7ec..cbc8661 100644 --- a/vendor/github.com/prometheus/tsdb/head.go +++ b/vendor/github.com/prometheus/tsdb/head.go @@ -30,6 +30,7 @@ import ( "github.com/prometheus/tsdb/chunks" "github.com/prometheus/tsdb/index" "github.com/prometheus/tsdb/labels" + "github.com/prometheus/tsdb/wal" ) var ( @@ -53,11 +54,13 @@ var ( type Head struct { chunkRange int64 metrics *headMetrics - wal WAL + wal *wal.WAL logger log.Logger appendPool sync.Pool + bytesPool sync.Pool - minTime, maxTime int64 + minTime, maxTime int64 // Current min and max of the samples included in the head. + minValidTime int64 // Mint allowed to be added to the head. It shouldn't be lower than the maxt of the last persisted block. lastSeriesID uint64 // All series addressable by their ID or hash. @@ -69,23 +72,30 @@ type Head struct { postings *index.MemPostings // postings lists for terms - tombstones memTombstones + tombstones *memTombstones } type headMetrics struct { - activeAppenders prometheus.Gauge - series prometheus.Gauge - seriesCreated prometheus.Counter - seriesRemoved prometheus.Counter - seriesNotFound prometheus.Counter - chunks prometheus.Gauge - chunksCreated prometheus.Gauge - chunksRemoved prometheus.Gauge - gcDuration prometheus.Summary - minTime prometheus.GaugeFunc - maxTime prometheus.GaugeFunc - samplesAppended prometheus.Counter - walTruncateDuration prometheus.Summary + activeAppenders prometheus.Gauge + series prometheus.Gauge + seriesCreated prometheus.Counter + seriesRemoved prometheus.Counter + seriesNotFound prometheus.Counter + chunks prometheus.Gauge + chunksCreated prometheus.Counter + chunksRemoved prometheus.Counter + gcDuration prometheus.Summary + minTime prometheus.GaugeFunc + maxTime prometheus.GaugeFunc + samplesAppended prometheus.Counter + walTruncateDuration prometheus.Summary + walCorruptionsTotal prometheus.Counter + headTruncateFail prometheus.Counter + headTruncateTotal prometheus.Counter + checkpointDeleteFail prometheus.Counter + checkpointDeleteTotal prometheus.Counter + checkpointCreationFail prometheus.Counter + checkpointCreationTotal prometheus.Counter } func newHeadMetrics(h *Head, r prometheus.Registerer) *headMetrics { @@ -99,27 +109,27 @@ func newHeadMetrics(h *Head, r prometheus.Registerer) *headMetrics { Name: "prometheus_tsdb_head_series", Help: "Total number of series in the head block.", }) - m.seriesCreated = prometheus.NewGauge(prometheus.GaugeOpts{ + m.seriesCreated = prometheus.NewCounter(prometheus.CounterOpts{ Name: "prometheus_tsdb_head_series_created_total", Help: "Total number of series created in the head", }) - m.seriesRemoved = prometheus.NewGauge(prometheus.GaugeOpts{ + m.seriesRemoved = prometheus.NewCounter(prometheus.CounterOpts{ Name: "prometheus_tsdb_head_series_removed_total", Help: "Total number of series removed in the head", }) m.seriesNotFound = prometheus.NewCounter(prometheus.CounterOpts{ - Name: "prometheus_tsdb_head_series_not_found", + Name: "prometheus_tsdb_head_series_not_found_total", Help: "Total number of requests for series that were not found.", }) m.chunks = prometheus.NewGauge(prometheus.GaugeOpts{ Name: "prometheus_tsdb_head_chunks", Help: "Total number of chunks in the head block.", }) - m.chunksCreated = prometheus.NewGauge(prometheus.GaugeOpts{ + m.chunksCreated = prometheus.NewCounter(prometheus.CounterOpts{ Name: "prometheus_tsdb_head_chunks_created_total", Help: "Total number of chunks created in the head", }) - m.chunksRemoved = prometheus.NewGauge(prometheus.GaugeOpts{ + m.chunksRemoved = prometheus.NewCounter(prometheus.CounterOpts{ Name: "prometheus_tsdb_head_chunks_removed_total", Help: "Total number of chunks removed in the head", }) @@ -129,13 +139,13 @@ func newHeadMetrics(h *Head, r prometheus.Registerer) *headMetrics { }) m.maxTime = prometheus.NewGaugeFunc(prometheus.GaugeOpts{ Name: "prometheus_tsdb_head_max_time", - Help: "Maximum timestamp of the head block.", + Help: "Maximum timestamp of the head block. The unit is decided by the library consumer.", }, func() float64 { return float64(h.MaxTime()) }) m.minTime = prometheus.NewGaugeFunc(prometheus.GaugeOpts{ Name: "prometheus_tsdb_head_min_time", - Help: "Minimum time bound of the head block.", + Help: "Minimum time bound of the head block. The unit is decided by the library consumer.", }, func() float64 { return float64(h.MinTime()) }) @@ -143,10 +153,38 @@ func newHeadMetrics(h *Head, r prometheus.Registerer) *headMetrics { Name: "prometheus_tsdb_wal_truncate_duration_seconds", Help: "Duration of WAL truncation.", }) + m.walCorruptionsTotal = prometheus.NewCounter(prometheus.CounterOpts{ + Name: "prometheus_tsdb_wal_corruptions_total", + Help: "Total number of WAL corruptions.", + }) m.samplesAppended = prometheus.NewCounter(prometheus.CounterOpts{ Name: "prometheus_tsdb_head_samples_appended_total", Help: "Total number of appended samples.", }) + m.headTruncateFail = prometheus.NewCounter(prometheus.CounterOpts{ + Name: "prometheus_tsdb_head_truncations_failed_total", + Help: "Total number of head truncations that failed.", + }) + m.headTruncateTotal = prometheus.NewCounter(prometheus.CounterOpts{ + Name: "prometheus_tsdb_head_truncations_total", + Help: "Total number of head truncations attempted.", + }) + m.checkpointDeleteFail = prometheus.NewCounter(prometheus.CounterOpts{ + Name: "prometheus_tsdb_checkpoint_deletions_failed_total", + Help: "Total number of checkpoint deletions that failed.", + }) + m.checkpointDeleteTotal = prometheus.NewCounter(prometheus.CounterOpts{ + Name: "prometheus_tsdb_checkpoint_deletions_total", + Help: "Total number of checkpoint deletions attempted.", + }) + m.checkpointCreationFail = prometheus.NewCounter(prometheus.CounterOpts{ + Name: "prometheus_tsdb_checkpoint_creations_failed_total", + Help: "Total number of checkpoint creations that failed.", + }) + m.checkpointCreationTotal = prometheus.NewCounter(prometheus.CounterOpts{ + Name: "prometheus_tsdb_checkpoint_creations_total", + Help: "Total number of checkpoint creations attempted.", + }) if r != nil { r.MustRegister( @@ -162,20 +200,24 @@ func newHeadMetrics(h *Head, r prometheus.Registerer) *headMetrics { m.maxTime, m.gcDuration, m.walTruncateDuration, + m.walCorruptionsTotal, m.samplesAppended, + m.headTruncateFail, + m.headTruncateTotal, + m.checkpointDeleteFail, + m.checkpointDeleteTotal, + m.checkpointCreationFail, + m.checkpointCreationTotal, ) } return m } // NewHead opens the head block in dir. -func NewHead(r prometheus.Registerer, l log.Logger, wal WAL, chunkRange int64) (*Head, error) { +func NewHead(r prometheus.Registerer, l log.Logger, wal *wal.WAL, chunkRange int64) (*Head, error) { if l == nil { l = log.NewNopLogger() } - if wal == nil { - wal = NopWAL() - } if chunkRange < 1 { return nil, errors.Errorf("invalid chunk range %d", chunkRange) } @@ -183,13 +225,13 @@ func NewHead(r prometheus.Registerer, l log.Logger, wal WAL, chunkRange int64) ( wal: wal, logger: l, chunkRange: chunkRange, - minTime: math.MinInt64, + minTime: math.MaxInt64, maxTime: math.MinInt64, series: newStripeSeries(), values: map[string]stringset{}, symbols: map[string]struct{}{}, postings: index.NewUnorderedMemPostings(), - tombstones: memTombstones{}, + tombstones: newMemTombstones(), } h.metrics = newHeadMetrics(h, r) @@ -200,40 +242,71 @@ func NewHead(r prometheus.Registerer, l log.Logger, wal WAL, chunkRange int64) ( // them on to other workers. // Samples before the mint timestamp are discarded. func (h *Head) processWALSamples( - mint int64, - partition, total uint64, + minValidTime int64, input <-chan []RefSample, output chan<- []RefSample, ) (unknownRefs uint64) { defer close(output) + // Mitigate lock contention in getByID. + refSeries := map[uint64]*memSeries{} + + mint, maxt := int64(math.MaxInt64), int64(math.MinInt64) + for samples := range input { for _, s := range samples { - if s.T < mint || s.Ref%total != partition { + if s.T < minValidTime { continue } - ms := h.series.getByID(s.Ref) + ms := refSeries[s.Ref] if ms == nil { - unknownRefs++ - continue + ms = h.series.getByID(s.Ref) + if ms == nil { + unknownRefs++ + continue + } + refSeries[s.Ref] = ms } _, chunkCreated := ms.append(s.T, s.V) if chunkCreated { h.metrics.chunksCreated.Inc() h.metrics.chunks.Inc() } + if s.T > maxt { + maxt = s.T + } + if s.T < mint { + mint = s.T + } } output <- samples } + h.updateMinMaxTime(mint, maxt) + return unknownRefs } -// ReadWAL initializes the head by consuming the write ahead log. -func (h *Head) ReadWAL() error { - defer h.postings.EnsureOrder() - - r := h.wal.Reader() - mint := h.MinTime() +func (h *Head) updateMinMaxTime(mint, maxt int64) { + for { + lt := h.MinTime() + if mint >= lt { + break + } + if atomic.CompareAndSwapInt64(&h.minTime, lt, mint) { + break + } + } + for { + ht := h.MaxTime() + if maxt <= ht { + break + } + if atomic.CompareAndSwapInt64(&h.maxTime, ht, maxt) { + break + } + } +} +func (h *Head) loadWAL(r *wal.Reader) error { // Track number of samples that referenced a series we don't know about // for error reporting. var unknownRefs uint64 @@ -242,94 +315,198 @@ func (h *Head) ReadWAL() error { // They are connected through a ring of channels which ensures that all sample batches // read from the WAL are processed in order. var ( - wg sync.WaitGroup - n = runtime.GOMAXPROCS(0) - firstInput = make(chan []RefSample, 300) - input = firstInput + wg sync.WaitGroup + n = runtime.GOMAXPROCS(0) + inputs = make([]chan []RefSample, n) + outputs = make([]chan []RefSample, n) ) wg.Add(n) for i := 0; i < n; i++ { - output := make(chan []RefSample, 300) + outputs[i] = make(chan []RefSample, 300) + inputs[i] = make(chan []RefSample, 300) - go func(i int, input <-chan []RefSample, output chan<- []RefSample) { - unknown := h.processWALSamples(mint, uint64(i), uint64(n), input, output) + go func(input <-chan []RefSample, output chan<- []RefSample) { + unknown := h.processWALSamples(h.minValidTime, input, output) atomic.AddUint64(&unknownRefs, unknown) wg.Done() - }(i, input, output) - - // The output feeds the next worker goroutine. For the last worker, - // it feeds the initial input again to reuse the RefSample slices. - input = output + }(inputs[i], outputs[i]) } - // TODO(fabxc): series entries spread between samples can starve the sample workers. - // Even with bufferd channels, this can impact startup time with lots of series churn. - // We must not paralellize series creation itself but could make the indexing asynchronous. - seriesFunc := func(series []RefSeries) { - for _, s := range series { - h.getOrCreateWithID(s.Ref, s.Labels.Hash(), s.Labels) + var ( + dec RecordDecoder + series []RefSeries + samples []RefSample + tstones []Stone + err error + ) + for r.Next() { + series, samples, tstones = series[:0], samples[:0], tstones[:0] + rec := r.Record() + + switch dec.Type(rec) { + case RecordSeries: + series, err = dec.Series(rec, series) + if err != nil { + return &wal.CorruptionErr{ + Err: errors.Wrap(err, "decode series"), + Segment: r.Segment(), + Offset: r.Offset(), + } + } + for _, s := range series { + h.getOrCreateWithID(s.Ref, s.Labels.Hash(), s.Labels) - if h.lastSeriesID < s.Ref { - h.lastSeriesID = s.Ref + if h.lastSeriesID < s.Ref { + h.lastSeriesID = s.Ref + } } - } - } - samplesFunc := func(samples []RefSample) { - // We split up the samples into chunks of 5000 samples or less. - // With O(300 * #cores) in-flight sample batches, large scrapes could otherwise - // cause thousands of very large in flight buffers occupying large amounts - // of unused memory. - for len(samples) > 0 { - n := 5000 - if len(samples) < n { - n = len(samples) + case RecordSamples: + samples, err = dec.Samples(rec, samples) + s := samples + if err != nil { + return &wal.CorruptionErr{ + Err: errors.Wrap(err, "decode samples"), + Segment: r.Segment(), + Offset: r.Offset(), + } } - var buf []RefSample - select { - case buf = <-input: - default: + // We split up the samples into chunks of 5000 samples or less. + // With O(300 * #cores) in-flight sample batches, large scrapes could otherwise + // cause thousands of very large in flight buffers occupying large amounts + // of unused memory. + for len(samples) > 0 { + m := 5000 + if len(samples) < m { + m = len(samples) + } + shards := make([][]RefSample, n) + for i := 0; i < n; i++ { + var buf []RefSample + select { + case buf = <-outputs[i]: + default: + } + shards[i] = buf[:0] + } + for _, sam := range samples[:m] { + mod := sam.Ref % uint64(n) + shards[mod] = append(shards[mod], sam) + } + for i := 0; i < n; i++ { + inputs[i] <- shards[i] + } + samples = samples[m:] } - firstInput <- append(buf[:0], samples[:n]...) - samples = samples[n:] - } - } - deletesFunc := func(stones []Stone) { - for _, s := range stones { - for _, itv := range s.intervals { - if itv.Maxt < mint { - continue + samples = s // Keep whole slice for reuse. + case RecordTombstones: + tstones, err = dec.Tombstones(rec, tstones) + if err != nil { + return &wal.CorruptionErr{ + Err: errors.Wrap(err, "decode tombstones"), + Segment: r.Segment(), + Offset: r.Offset(), } - h.tombstones.add(s.ref, itv) + } + for _, s := range tstones { + for _, itv := range s.intervals { + if itv.Maxt < h.minValidTime { + continue + } + h.tombstones.addInterval(s.ref, itv) + } + } + default: + return &wal.CorruptionErr{ + Err: errors.Errorf("invalid record type %v", dec.Type(rec)), + Segment: r.Segment(), + Offset: r.Offset(), } } } + if r.Err() != nil { + return errors.Wrap(r.Err(), "read records") + } - err := r.Read(seriesFunc, samplesFunc, deletesFunc) - - // Signal termination to first worker and wait for last one to close its output channel. - close(firstInput) - for range input { + // Signal termination to each worker and wait for it to close its output channel. + for i := 0; i < n; i++ { + close(inputs[i]) + for range outputs[i] { + } } wg.Wait() + if unknownRefs > 0 { + level.Warn(h.logger).Log("msg", "unknown series references", "count", unknownRefs) + } + return nil +} + +// Init loads data from the write ahead log and prepares the head for writes. +// It should be called before using an appender so that +// limits the ingested samples to the head min valid time. +func (h *Head) Init(minValidTime int64) error { + h.minValidTime = minValidTime + defer h.postings.EnsureOrder() + defer h.gc() // After loading the wal remove the obsolete data from the head. + + if h.wal == nil { + return nil + } + + // Backfill the checkpoint first if it exists. + dir, startFrom, err := LastCheckpoint(h.wal.Dir()) + if err != nil && err != ErrNotFound { + return errors.Wrap(err, "find last checkpoint") + } + if err == nil { + sr, err := wal.NewSegmentsReader(dir) + if err != nil { + return errors.Wrap(err, "open checkpoint") + } + defer sr.Close() + + // A corrupted checkpoint is a hard error for now and requires user + // intervention. There's likely little data that can be recovered anyway. + if err := h.loadWAL(wal.NewReader(sr)); err != nil { + return errors.Wrap(err, "backfill checkpoint") + } + startFrom++ + } + + // Backfill segments from the last checkpoint onwards + sr, err := wal.NewSegmentsRangeReader(wal.SegmentRange{Dir: h.wal.Dir(), First: startFrom, Last: -1}) if err != nil { - return errors.Wrap(err, "consume WAL") + return errors.Wrap(err, "open WAL segments") } - if unknownRefs > 0 { - level.Warn(h.logger).Log("msg", "unknown series references in WAL samples", "count", unknownRefs) + + err = h.loadWAL(wal.NewReader(sr)) + sr.Close() // Close the reader so that if there was an error the repair can remove the corrupted file under Windows. + if err == nil { + return nil + } + level.Warn(h.logger).Log("msg", "encountered WAL error, attempting repair", "err", err) + h.metrics.walCorruptionsTotal.Inc() + if err := h.wal.Repair(err); err != nil { + return errors.Wrap(err, "repair corrupted WAL") } return nil } -// Truncate removes all data before mint from the head block and truncates its WAL. -func (h *Head) Truncate(mint int64) error { - initialize := h.MinTime() == math.MinInt64 +// Truncate removes old data before mint from the head. +func (h *Head) Truncate(mint int64) (err error) { + defer func() { + if err != nil { + h.metrics.headTruncateFail.Inc() + } + }() + initialize := h.MinTime() == math.MaxInt64 - if h.MinTime() >= mint { + if h.MinTime() >= mint && !initialize { return nil } atomic.StoreInt64(&h.minTime, mint) + atomic.StoreInt64(&h.minValidTime, mint) // Ensure that max time is at least as high as min time. for h.MaxTime() < mint { @@ -342,35 +519,68 @@ func (h *Head) Truncate(mint int64) error { return nil } + h.metrics.headTruncateTotal.Inc() start := time.Now() h.gc() level.Info(h.logger).Log("msg", "head GC completed", "duration", time.Since(start)) h.metrics.gcDuration.Observe(time.Since(start).Seconds()) + if h.wal == nil { + return nil + } start = time.Now() + first, last, err := h.wal.Segments() + if err != nil { + return errors.Wrap(err, "get segment range") + } + last-- // Never consider last segment for checkpoint. + if last < 0 { + return nil // no segments yet. + } + // The lower third of segments should contain mostly obsolete samples. + // If we have less than three segments, it's not worth checkpointing yet. + last = first + (last-first)/3 + if last <= first { + return nil + } + keep := func(id uint64) bool { return h.series.getByID(id) != nil } - if err := h.wal.Truncate(mint, keep); err == nil { - level.Info(h.logger).Log("msg", "WAL truncation completed", "duration", time.Since(start)) - } else { - level.Error(h.logger).Log("msg", "WAL truncation failed", "err", err, "duration", time.Since(start)) + h.metrics.checkpointCreationTotal.Inc() + if _, err = Checkpoint(h.wal, first, last, keep, mint); err != nil { + h.metrics.checkpointCreationFail.Inc() + return errors.Wrap(err, "create checkpoint") + } + if err := h.wal.Truncate(last + 1); err != nil { + // If truncating fails, we'll just try again at the next checkpoint. + // Leftover segments will just be ignored in the future if there's a checkpoint + // that supersedes them. + level.Error(h.logger).Log("msg", "truncating segments failed", "err", err) + } + h.metrics.checkpointDeleteTotal.Inc() + if err := DeleteCheckpoints(h.wal.Dir(), last); err != nil { + // Leftover old checkpoints do not cause problems down the line beyond + // occupying disk space. + // They will just be ignored since a higher checkpoint exists. + level.Error(h.logger).Log("msg", "delete old checkpoints", "err", err) + h.metrics.checkpointDeleteFail.Inc() } h.metrics.walTruncateDuration.Observe(time.Since(start).Seconds()) + level.Info(h.logger).Log("msg", "WAL checkpoint complete", + "first", first, "last", last, "duration", time.Since(start)) + return nil } // initTime initializes a head with the first timestamp. This only needs to be called -// for a compltely fresh head with an empty WAL. +// for a completely fresh head with an empty WAL. // Returns true if the initialization took an effect. func (h *Head) initTime(t int64) (initialized bool) { - // In the init state, the head has a high timestamp of math.MinInt64. - mint, _ := rangeForTimestamp(t, h.chunkRange) - - if !atomic.CompareAndSwapInt64(&h.minTime, math.MinInt64, mint) { + if !atomic.CompareAndSwapInt64(&h.minTime, math.MaxInt64, t) { return false } // Ensure that max time is initialized to at least the min time we just set. @@ -441,7 +651,7 @@ func (h *Head) Appender() Appender { // The head cache might not have a starting point yet. The init appender // picks up the first appended timestamp as the base. - if h.MinTime() == math.MinInt64 { + if h.MinTime() == math.MaxInt64 { return &initAppender{head: h} } return h.appender() @@ -449,11 +659,21 @@ func (h *Head) Appender() Appender { func (h *Head) appender() *headAppender { return &headAppender{ - head: h, - mint: h.MaxTime() - h.chunkRange/2, - maxt: math.MinInt64, - samples: h.getAppendBuffer(), + head: h, + // Set the minimum valid time to whichever is greater the head min valid time or the compaciton window. + // This ensures that no samples will be added within the compaction window to avoid races. + minValidTime: max(atomic.LoadInt64(&h.minValidTime), h.MaxTime()-h.chunkRange/2), + mint: math.MaxInt64, + maxt: math.MinInt64, + samples: h.getAppendBuffer(), + } +} + +func max(a, b int64) int64 { + if a > b { + return a } + return b } func (h *Head) getAppendBuffer() []RefSample { @@ -465,19 +685,34 @@ func (h *Head) getAppendBuffer() []RefSample { } func (h *Head) putAppendBuffer(b []RefSample) { + //lint:ignore SA6002 safe to ignore and actually fixing it has some performance penalty. h.appendPool.Put(b[:0]) } +func (h *Head) getBytesBuffer() []byte { + b := h.bytesPool.Get() + if b == nil { + return make([]byte, 0, 1024) + } + return b.([]byte) +} + +func (h *Head) putBytesBuffer(b []byte) { + //lint:ignore SA6002 safe to ignore and actually fixing it has some performance penalty. + h.bytesPool.Put(b[:0]) +} + type headAppender struct { - head *Head - mint, maxt int64 + head *Head + minValidTime int64 // No samples below this timestamp are allowed. + mint, maxt int64 series []RefSeries samples []RefSample } func (a *headAppender) Add(lset labels.Labels, t int64, v float64) (uint64, error) { - if t < a.mint { + if t < a.minValidTime { return 0, ErrOutOfBounds } @@ -492,20 +727,24 @@ func (a *headAppender) Add(lset labels.Labels, t int64, v float64) (uint64, erro } func (a *headAppender) AddFast(ref uint64, t int64, v float64) error { - s := a.head.series.getByID(ref) + if t < a.minValidTime { + return ErrOutOfBounds + } + s := a.head.series.getByID(ref) if s == nil { return errors.Wrap(ErrNotFound, "unknown series") } s.Lock() - err := s.appendable(t, v) - s.Unlock() - - if err != nil { + if err := s.appendable(t, v); err != nil { + s.Unlock() return err } + s.pendingCommit = true + s.Unlock() + if t < a.mint { - return ErrOutOfBounds + a.mint = t } if t > a.maxt { a.maxt = t @@ -520,14 +759,42 @@ func (a *headAppender) AddFast(ref uint64, t int64, v float64) error { return nil } -func (a *headAppender) Commit() error { - defer a.Rollback() +func (a *headAppender) log() error { + if a.head.wal == nil { + return nil + } - if err := a.head.wal.LogSeries(a.series); err != nil { - return err + buf := a.head.getBytesBuffer() + defer func() { a.head.putBytesBuffer(buf) }() + + var rec []byte + var enc RecordEncoder + + if len(a.series) > 0 { + rec = enc.Series(a.series, buf) + buf = rec[:0] + + if err := a.head.wal.Log(rec); err != nil { + return errors.Wrap(err, "log series") + } } - if err := a.head.wal.LogSamples(a.samples); err != nil { - return errors.Wrap(err, "WAL log samples") + if len(a.samples) > 0 { + rec = enc.Samples(a.samples, buf) + buf = rec[:0] + + if err := a.head.wal.Log(rec); err != nil { + return errors.Wrap(err, "log samples") + } + } + return nil +} + +func (a *headAppender) Commit() error { + defer a.head.metrics.activeAppenders.Dec() + defer a.head.putAppendBuffer(a.samples) + + if err := a.log(); err != nil { + return errors.Wrap(err, "write to WAL") } total := len(a.samples) @@ -535,6 +802,7 @@ func (a *headAppender) Commit() error { for _, s := range a.samples { s.series.Lock() ok, chunkCreated := s.series.append(s.T, s.V) + s.series.pendingCommit = false s.series.Unlock() if !ok { @@ -547,25 +815,24 @@ func (a *headAppender) Commit() error { } a.head.metrics.samplesAppended.Add(float64(total)) - - for { - ht := a.head.MaxTime() - if a.maxt <= ht { - break - } - if atomic.CompareAndSwapInt64(&a.head.maxTime, ht, a.maxt) { - break - } - } + a.head.updateMinMaxTime(a.mint, a.maxt) return nil } func (a *headAppender) Rollback() error { a.head.metrics.activeAppenders.Dec() + for _, s := range a.samples { + s.series.Lock() + s.series.pendingCommit = false + s.series.Unlock() + } a.head.putAppendBuffer(a.samples) - return nil + // Series are created in the head memory regardless of rollback. Thus we have + // to log them to the WAL in any case. + a.samples = nil + return a.log() } // Delete all samples in the range of [mint, maxt] for series that satisfy the given @@ -598,11 +865,15 @@ func (h *Head) Delete(mint, maxt int64, ms ...labels.Matcher) error { if p.Err() != nil { return p.Err() } - if err := h.wal.LogDeletes(stones); err != nil { - return err + var enc RecordEncoder + + if h.wal != nil { + if err := h.wal.Log(enc.Tombstones(stones, nil)); err != nil { + return err + } } for _, s := range stones { - h.tombstones.add(s.ref, s.intervals[0]) + h.tombstones.addInterval(s.ref, s.intervals[0]) } return nil } @@ -629,7 +900,7 @@ func (h *Head) gc() { symbols := make(map[string]struct{}) values := make(map[string]stringset, len(h.values)) - h.postings.Iter(func(t labels.Label, _ index.Postings) error { + if err := h.postings.Iter(func(t labels.Label, _ index.Postings) error { symbols[t.Name] = struct{}{} symbols[t.Value] = struct{}{} @@ -640,7 +911,10 @@ func (h *Head) gc() { } ss.set(t.Value) return nil - }) + }); err != nil { + // This should never happen, as the iteration function only returns nil. + panic(err) + } h.symMtx.Lock() @@ -691,6 +965,9 @@ func (h *Head) MaxTime() int64 { // Close flushes the WAL and closes the head. func (h *Head) Close() error { + if h.wal == nil { + return nil + } return h.wal.Close() } @@ -732,19 +1009,14 @@ func (h *headChunkReader) Chunk(ref uint64) (chunkenc.Chunk, error) { s.Lock() c := s.chunk(int(cid)) - // This means that the chunk has been garbage collected. - if c == nil { + // This means that the chunk has been garbage collected or is outside + // the specified range. + if c == nil || !c.OverlapsClosedInterval(h.mint, h.maxt) { s.Unlock() return nil, ErrNotFound } - - mint, maxt := c.minTime, c.maxTime s.Unlock() - // Do not expose chunks that are outside of the specified range. - if c == nil || !intervalOverlap(mint, maxt, h.mint, h.maxt) { - return nil, ErrNotFound - } return &safeChunk{ Chunk: c.chunk, s: s, @@ -791,44 +1063,63 @@ func (h *headIndexReader) LabelValues(names ...string) (index.StringTuples, erro if len(names) != 1 { return nil, errInvalidSize } - var sl []string h.head.symMtx.RLock() - defer h.head.symMtx.RUnlock() - + sl := make([]string, 0, len(h.head.values[names[0]])) for s := range h.head.values[names[0]] { sl = append(sl, s) } + h.head.symMtx.RUnlock() sort.Strings(sl) return index.NewStringTuples(sl, len(names)) } +// LabelNames returns all the unique label names present in the head. +func (h *headIndexReader) LabelNames() ([]string, error) { + h.head.symMtx.RLock() + defer h.head.symMtx.RUnlock() + labelNames := make([]string, 0, len(h.head.values)) + for name := range h.head.values { + if name == "" { + continue + } + labelNames = append(labelNames, name) + } + sort.Strings(labelNames) + return labelNames, nil +} + // Postings returns the postings list iterator for the label pair. func (h *headIndexReader) Postings(name, value string) (index.Postings, error) { return h.head.postings.Get(name, value), nil } func (h *headIndexReader) SortedPostings(p index.Postings) index.Postings { - ep := make([]uint64, 0, 128) + series := make([]*memSeries, 0, 128) + // Fetch all the series only once. for p.Next() { - ep = append(ep, p.At()) + s := h.head.series.getByID(p.At()) + if s == nil { + level.Debug(h.head.logger).Log("msg", "looked up series not found") + } else { + series = append(series, s) + } } if err := p.Err(); err != nil { return index.ErrPostings(errors.Wrap(err, "expand postings")) } - sort.Slice(ep, func(i, j int) bool { - a := h.head.series.getByID(ep[i]) - b := h.head.series.getByID(ep[j]) - - if a == nil || b == nil { - level.Debug(h.head.logger).Log("msg", "looked up series not found") - return false - } - return labels.Compare(a.lset, b.lset) < 0 + sort.Slice(series, func(i, j int) bool { + return labels.Compare(series[i].lset, series[j].lset) < 0 }) + + // Convert back to list. + ep := make([]uint64, 0, len(series)) + for _, p := range series { + ep = append(ep, p.ref) + } return index.NewListPostings(ep) } @@ -849,7 +1140,7 @@ func (h *headIndexReader) Series(ref uint64, lbls *labels.Labels, chks *[]chunks for i, c := range s.chunks { // Do not expose chunks that are outside of the specified range. - if !intervalOverlap(c.minTime, c.maxTime, h.mint, h.maxt) { + if !c.OverlapsClosedInterval(h.mint, h.maxt) { continue } *chks = append(*chks, chunks.Meta{ @@ -865,9 +1156,7 @@ func (h *headIndexReader) Series(ref uint64, lbls *labels.Labels, chks *[]chunks func (h *headIndexReader) LabelIndices() ([][]string, error) { h.head.symMtx.RLock() defer h.head.symMtx.RUnlock() - res := [][]string{} - for s := range h.head.values { res = append(res, []string{s}) } @@ -1010,7 +1299,7 @@ func (s *stripeSeries) gc(mint int64) (map[uint64]struct{}, int) { series.Lock() rmChunks += series.truncateChunksBefore(mint) - if len(series.chunks) > 0 { + if len(series.chunks) > 0 || series.pendingCommit { series.Unlock() continue } @@ -1090,6 +1379,14 @@ type sample struct { v float64 } +func (s sample) T() int64 { + return s.t +} + +func (s sample) V() float64 { + return s.v +} + // memSeries is the in-memory representation of a series. None of its methods // are goroutine safe and it is the caller's responsibility to lock it. type memSeries struct { @@ -1098,12 +1395,13 @@ type memSeries struct { ref uint64 lset labels.Labels chunks []*memChunk + headChunk *memChunk chunkRange int64 firstChunkID int - nextAt int64 // timestamp at which to cut the next chunk. - lastValue float64 - sampleBuf [4]sample + nextAt int64 // Timestamp at which to cut the next chunk. + sampleBuf [4]sample + pendingCommit bool // Whether there are samples waiting to be committed to this series. app chunkenc.Appender // Current appender for the chunk. } @@ -1130,10 +1428,11 @@ func (s *memSeries) cut(mint int64) *memChunk { maxTime: math.MinInt64, } s.chunks = append(s.chunks, c) + s.headChunk = c // Set upper bound on when the next chunk must be started. An earlier timestamp // may be chosen dynamically at a later point. - _, s.nextAt = rangeForTimestamp(mint, s.chunkRange) + s.nextAt = rangeForTimestamp(mint, s.chunkRange) app, err := c.chunk.Appender() if err != nil { @@ -1168,7 +1467,7 @@ func (s *memSeries) appendable(t int64, v float64) error { } // We are allowing exact duplicates as we can encounter them in valid cases // like federation and erroring out at that time would be extremely noisy. - if math.Float64bits(s.lastValue) != math.Float64bits(v) { + if math.Float64bits(s.sampleBuf[3].v) != math.Float64bits(v) { return ErrAmendSample } return nil @@ -1198,12 +1497,20 @@ func (s *memSeries) truncateChunksBefore(mint int64) (removed int) { } s.chunks = append(s.chunks[:0], s.chunks[k:]...) s.firstChunkID += k + if len(s.chunks) == 0 { + s.headChunk = nil + } else { + s.headChunk = s.chunks[len(s.chunks)-1] + } return k } // append adds the sample (t, v) to the series. func (s *memSeries) append(t int64, v float64) (success, chunkCreated bool) { + // Based on Gorilla white papers this offers near-optimal compression ratio + // so anything bigger that this has diminishing returns and increases + // the time range within which we have to decompress all samples. const samplesPerChunk = 120 c := s.head() @@ -1232,8 +1539,6 @@ func (s *memSeries) append(t int64, v float64) (success, chunkCreated bool) { c.maxTime = t - s.lastValue = v - s.sampleBuf[0] = s.sampleBuf[1] s.sampleBuf[1] = s.sampleBuf[2] s.sampleBuf[2] = s.sampleBuf[3] @@ -1277,10 +1582,7 @@ func (s *memSeries) iterator(id int) chunkenc.Iterator { } func (s *memSeries) head() *memChunk { - if len(s.chunks) == 0 { - return nil - } - return s.chunks[len(s.chunks)-1] + return s.headChunk } type memChunk struct { @@ -1288,6 +1590,11 @@ type memChunk struct { minTime, maxTime int64 } +// Returns true if the chunk overlaps [mint, maxt]. +func (mc *memChunk) OverlapsClosedInterval(mint, maxt int64) bool { + return mc.minTime <= maxt && mint <= mc.maxTime +} + type memSafeIterator struct { chunkenc.Iterator diff --git a/vendor/github.com/prometheus/tsdb/index/encoding_helpers.go b/vendor/github.com/prometheus/tsdb/index/encoding_helpers.go index 69e7297..9104f1c 100644 --- a/vendor/github.com/prometheus/tsdb/index/encoding_helpers.go +++ b/vendor/github.com/prometheus/tsdb/index/encoding_helpers.go @@ -1,3 +1,16 @@ +// Copyright 2018 The Prometheus Authors +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + package index import ( @@ -5,6 +18,8 @@ import ( "hash" "hash/crc32" "unsafe" + + "github.com/pkg/errors" ) // enbuf is a helper type to populate a byte slice with various types. @@ -73,6 +88,60 @@ type decbuf struct { e error } +// newDecbufAt returns a new decoding buffer. It expects the first 4 bytes +// after offset to hold the big endian encoded content length, followed by the contents and the expected +// checksum. +func newDecbufAt(bs ByteSlice, off int) decbuf { + if bs.Len() < off+4 { + return decbuf{e: errInvalidSize} + } + b := bs.Range(off, off+4) + l := int(binary.BigEndian.Uint32(b)) + + if bs.Len() < off+4+l+4 { + return decbuf{e: errInvalidSize} + } + + // Load bytes holding the contents plus a CRC32 checksum. + b = bs.Range(off+4, off+4+l+4) + dec := decbuf{b: b[:len(b)-4]} + + if exp := binary.BigEndian.Uint32(b[len(b)-4:]); dec.crc32() != exp { + return decbuf{e: errInvalidChecksum} + } + return dec +} + +// decbufUvarintAt returns a new decoding buffer. It expects the first bytes +// after offset to hold the uvarint-encoded buffers length, followed by the contents and the expected +// checksum. +func newDecbufUvarintAt(bs ByteSlice, off int) decbuf { + // We never have to access this method at the far end of the byte slice. Thus just checking + // against the MaxVarintLen32 is sufficient. + if bs.Len() < off+binary.MaxVarintLen32 { + return decbuf{e: errInvalidSize} + } + b := bs.Range(off, off+binary.MaxVarintLen32) + + l, n := binary.Uvarint(b) + if n <= 0 || n > binary.MaxVarintLen32 { + return decbuf{e: errors.Errorf("invalid uvarint %d", n)} + } + + if bs.Len() < off+n+int(l)+4 { + return decbuf{e: errInvalidSize} + } + + // Load bytes holding the contents plus a CRC32 checksum. + b = bs.Range(off+n, off+n+int(l)+4) + dec := decbuf{b: b[:len(b)-4]} + + if dec.crc32() != binary.BigEndian.Uint32(b[len(b)-4:]) { + return decbuf{e: errInvalidChecksum} + } + return dec +} + func (d *decbuf) uvarint() int { return int(d.uvarint64()) } func (d *decbuf) uvarint32() uint32 { return uint32(d.uvarint64()) } func (d *decbuf) be32int() int { return int(d.be32()) } diff --git a/vendor/github.com/prometheus/tsdb/index/index.go b/vendor/github.com/prometheus/tsdb/index/index.go index 72ca383..74e08d4 100644 --- a/vendor/github.com/prometheus/tsdb/index/index.go +++ b/vendor/github.com/prometheus/tsdb/index/index.go @@ -20,6 +20,7 @@ import ( "hash" "hash/crc32" "io" + "io/ioutil" "math" "os" "path/filepath" @@ -35,9 +36,15 @@ import ( const ( // MagicIndex 4 bytes at the head of an index file. MagicIndex = 0xBAAAD700 + // HeaderLen represents number of bytes reserved of index for header. + HeaderLen = 5 - indexFormatV1 = 1 - indexFormatV2 = 2 + // FormatV1 represents 1 version of index. + FormatV1 = 1 + // FormatV2 represents 2 version of index. + FormatV2 = 2 + + labelNameSeperator = "\xff" ) type indexWriterSeries struct { @@ -106,7 +113,7 @@ type Writer struct { fbuf *bufio.Writer pos uint64 - toc indexTOC + toc TOC stage indexWriterStage // Reusable memory. @@ -127,13 +134,42 @@ type Writer struct { Version int } -type indexTOC struct { - symbols uint64 - series uint64 - labelIndices uint64 - labelIndicesTable uint64 - postings uint64 - postingsTable uint64 +// TOC represents index Table Of Content that states where each section of index starts. +type TOC struct { + Symbols uint64 + Series uint64 + LabelIndices uint64 + LabelIndicesTable uint64 + Postings uint64 + PostingsTable uint64 +} + +// NewTOCFromByteSlice return parsed TOC from given index byte slice. +func NewTOCFromByteSlice(bs ByteSlice) (*TOC, error) { + if bs.Len() < indexTOCLen { + return nil, errInvalidSize + } + b := bs.Range(bs.Len()-indexTOCLen, bs.Len()) + + expCRC := binary.BigEndian.Uint32(b[len(b)-4:]) + d := decbuf{b: b[:len(b)-4]} + + if d.crc32() != expCRC { + return nil, errors.Wrap(errInvalidChecksum, "read TOC") + } + + if err := d.err(); err != nil { + return nil, err + } + + return &TOC{ + Symbols: d.be64(), + Series: d.be64(), + LabelIndices: d.be64(), + LabelIndicesTable: d.be64(), + Postings: d.be64(), + PostingsTable: d.be64(), + }, nil } // NewWriter returns a new Writer to the given filename. It serializes data in format version 2. @@ -221,22 +257,22 @@ func (w *Writer) ensureStage(s indexWriterStage) error { // Mark start of sections in table of contents. switch s { case idxStageSymbols: - w.toc.symbols = w.pos + w.toc.Symbols = w.pos case idxStageSeries: - w.toc.series = w.pos + w.toc.Series = w.pos case idxStageLabelIndex: - w.toc.labelIndices = w.pos + w.toc.LabelIndices = w.pos case idxStagePostings: - w.toc.postings = w.pos + w.toc.Postings = w.pos case idxStageDone: - w.toc.labelIndicesTable = w.pos + w.toc.LabelIndicesTable = w.pos if err := w.writeOffsetTable(w.labelIndexes); err != nil { return err } - w.toc.postingsTable = w.pos + w.toc.PostingsTable = w.pos if err := w.writeOffsetTable(w.postings); err != nil { return err } @@ -252,7 +288,7 @@ func (w *Writer) ensureStage(s indexWriterStage) error { func (w *Writer) writeMeta() error { w.buf1.reset() w.buf1.putBE32(MagicIndex) - w.buf1.putByte(indexFormatV2) + w.buf1.putByte(FormatV2) return w.write(w.buf1.get()) } @@ -271,7 +307,9 @@ func (w *Writer) AddSeries(ref uint64, lset labels.Labels, chunks ...chunks.Meta } // We add padding to 16 bytes to increase the addressable space we get through 4 byte // series references. - w.addPadding(16) + if err := w.addPadding(16); err != nil { + return errors.Errorf("failed to write padding bytes: %v", err) + } if w.pos%16 != 0 { return errors.Errorf("series write not 16-byte aligned at %d", w.pos) @@ -342,8 +380,6 @@ func (w *Writer) AddSymbols(sym map[string]struct{}) error { } sort.Strings(symbols) - const headerSize = 4 - w.buf1.reset() w.buf2.reset() @@ -392,7 +428,7 @@ func (w *Writer) WriteLabelIndex(names []string, values []string) error { w.buf2.putBE32int(valt.Len()) // here we have an index for the symbol file if v2, otherwise it's an offset - for _, v := range valt.s { + for _, v := range valt.entries { index, ok := w.symbols[v] if !ok { return errors.Errorf("symbol entry for %q does not exist", v) @@ -434,12 +470,12 @@ const indexTOCLen = 6*8 + 4 func (w *Writer) writeTOC() error { w.buf1.reset() - w.buf1.putBE64(w.toc.symbols) - w.buf1.putBE64(w.toc.series) - w.buf1.putBE64(w.toc.labelIndices) - w.buf1.putBE64(w.toc.labelIndicesTable) - w.buf1.putBE64(w.toc.postings) - w.buf1.putBE64(w.toc.postingsTable) + w.buf1.putBE64(w.toc.Symbols) + w.buf1.putBE64(w.toc.Series) + w.buf1.putBE64(w.toc.LabelIndices) + w.buf1.putBE64(w.toc.LabelIndicesTable) + w.buf1.putBE64(w.toc.Postings) + w.buf1.putBE64(w.toc.PostingsTable) w.buf1.putHash(w.crc32) @@ -531,33 +567,32 @@ type StringTuples interface { } type Reader struct { - // The underlying byte slice holding the encoded series data. - b ByteSlice - toc indexTOC + b ByteSlice // Close that releases the underlying resources of the byte slice. c io.Closer // Cached hashmaps of section offsets. - labels map[string]uint64 - postings map[labels.Label]uint64 + labels map[string]uint64 + // LabelName to LabelValue to offset map. + postings map[string]map[string]uint64 // Cache of read symbols. Strings that are returned when reading from the // block are always backed by true strings held in here rather than // strings that are backed by byte slices from the mmap'd index file. This // prevents memory faults when applications work with read symbols after - // the block has been unmapped. - symbols map[uint32]string + // the block has been unmapped. The older format has sparse indexes so a map + // must be used, but the new format is not so we can use a slice. + symbolsV1 map[uint32]string + symbolsV2 []string + symbolsTableSize uint64 dec *Decoder - crc32 hash.Hash32 - version int } var ( errInvalidSize = fmt.Errorf("invalid size") - errInvalidFlag = fmt.Errorf("invalid flag") errInvalidChecksum = fmt.Errorf("invalid checksum") ) @@ -581,10 +616,10 @@ func (b realByteSlice) Sub(start, end int) ByteSlice { return b[start:end] } -// NewReader returns a new IndexReader on the given byte slice. It automatically +// NewReader returns a new index reader on the given byte slice. It automatically // handles different format versions. func NewReader(b ByteSlice) (*Reader, error) { - return newReader(b, nil) + return newReader(b, ioutil.NopCloser(nil)) } // NewFileReader returns a new index reader against the given index file. @@ -600,14 +635,12 @@ func newReader(b ByteSlice, c io.Closer) (*Reader, error) { r := &Reader{ b: b, c: c, - symbols: map[uint32]string{}, labels: map[string]uint64{}, - postings: map[labels.Label]uint64{}, - crc32: newCRC32(), + postings: map[string]map[string]uint64{}, } // Verify header. - if b.Len() < 5 { + if r.b.Len() < HeaderLen { return nil, errors.Wrap(errInvalidSize, "index header") } if m := binary.BigEndian.Uint32(r.b.Range(0, 4)); m != MagicIndex { @@ -615,40 +648,59 @@ func newReader(b ByteSlice, c io.Closer) (*Reader, error) { } r.version = int(r.b.Range(4, 5)[0]) - if r.version != 1 && r.version != 2 { + if r.version != FormatV1 && r.version != FormatV2 { return nil, errors.Errorf("unknown index file version %d", r.version) } - if err := r.readTOC(); err != nil { + toc, err := NewTOCFromByteSlice(b) + if err != nil { return nil, errors.Wrap(err, "read TOC") } - if err := r.readSymbols(int(r.toc.symbols)); err != nil { + + r.symbolsV2, r.symbolsV1, err = ReadSymbols(r.b, r.version, int(toc.Symbols)) + if err != nil { return nil, errors.Wrap(err, "read symbols") } - var err error - err = r.readOffsetTable(r.toc.labelIndicesTable, func(key []string, off uint64) error { + // Use the strings already allocated by symbols, rather than + // re-allocating them again below. + // Additionally, calculate symbolsTableSize. + allocatedSymbols := make(map[string]string, len(r.symbolsV1)+len(r.symbolsV2)) + for _, s := range r.symbolsV1 { + r.symbolsTableSize += uint64(len(s) + 8) + allocatedSymbols[s] = s + } + for _, s := range r.symbolsV2 { + r.symbolsTableSize += uint64(len(s) + 8) + allocatedSymbols[s] = s + } + + if err := ReadOffsetTable(r.b, toc.LabelIndicesTable, func(key []string, off uint64) error { if len(key) != 1 { - return errors.Errorf("unexpected key length %d", len(key)) + return errors.Errorf("unexpected key length for label indices table %d", len(key)) } - r.labels[key[0]] = off + + r.labels[allocatedSymbols[key[0]]] = off return nil - }) - if err != nil { + }); err != nil { return nil, errors.Wrap(err, "read label index table") } - err = r.readOffsetTable(r.toc.postingsTable, func(key []string, off uint64) error { + + r.postings[""] = map[string]uint64{} + if err := ReadOffsetTable(r.b, toc.PostingsTable, func(key []string, off uint64) error { if len(key) != 2 { - return errors.Errorf("unexpected key length %d", len(key)) + return errors.Errorf("unexpected key length for posting table %d", len(key)) } - r.postings[labels.Label{Name: key[0], Value: key[1]}] = off + if _, ok := r.postings[key[0]]; !ok { + r.postings[allocatedSymbols[key[0]]] = map[string]uint64{} + } + r.postings[key[0]][allocatedSymbols[key[1]]] = off return nil - }) - if err != nil { + }); err != nil { return nil, errors.Wrap(err, "read postings table") } - r.dec = &Decoder{symbols: r.symbols} + r.dec = &Decoder{LookupSymbol: r.lookupSymbol} return r, nil } @@ -668,135 +720,60 @@ type Range struct { func (r *Reader) PostingsRanges() (map[labels.Label]Range, error) { m := map[labels.Label]Range{} - for l, start := range r.postings { - d := r.decbufAt(int(start)) - if d.err() != nil { - return nil, d.err() - } - m[l] = Range{ - Start: int64(start) + 4, - End: int64(start) + 4 + int64(d.len()), + for k, e := range r.postings { + for v, start := range e { + d := newDecbufAt(r.b, int(start)) + if d.err() != nil { + return nil, d.err() + } + m[labels.Label{Name: k, Value: v}] = Range{ + Start: int64(start) + 4, + End: int64(start) + 4 + int64(d.len()), + } } } return m, nil } -func (r *Reader) readTOC() error { - if r.b.Len() < indexTOCLen { - return errInvalidSize - } - b := r.b.Range(r.b.Len()-indexTOCLen, r.b.Len()) - - expCRC := binary.BigEndian.Uint32(b[len(b)-4:]) - d := decbuf{b: b[:len(b)-4]} - - if d.crc32() != expCRC { - return errors.Wrap(errInvalidChecksum, "read TOC") - } - - r.toc.symbols = d.be64() - r.toc.series = d.be64() - r.toc.labelIndices = d.be64() - r.toc.labelIndicesTable = d.be64() - r.toc.postings = d.be64() - r.toc.postingsTable = d.be64() - - return d.err() -} - -// decbufAt returns a new decoding buffer. It expects the first 4 bytes -// after offset to hold the big endian encoded content length, followed by the contents and the expected -// checksum. -func (r *Reader) decbufAt(off int) decbuf { - if r.b.Len() < off+4 { - return decbuf{e: errInvalidSize} - } - b := r.b.Range(off, off+4) - l := int(binary.BigEndian.Uint32(b)) - - if r.b.Len() < off+4+l+4 { - return decbuf{e: errInvalidSize} - } - - // Load bytes holding the contents plus a CRC32 checksum. - b = r.b.Range(off+4, off+4+l+4) - dec := decbuf{b: b[:len(b)-4]} - - if exp := binary.BigEndian.Uint32(b[len(b)-4:]); dec.crc32() != exp { - return decbuf{e: errInvalidChecksum} - } - return dec -} - -// decbufUvarintAt returns a new decoding buffer. It expects the first bytes -// after offset to hold the uvarint-encoded buffers length, followed by the contents and the expected -// checksum. -func (r *Reader) decbufUvarintAt(off int) decbuf { - // We never have to access this method at the far end of the byte slice. Thus just checking - // against the MaxVarintLen32 is sufficient. - if r.b.Len() < off+binary.MaxVarintLen32 { - return decbuf{e: errInvalidSize} - } - b := r.b.Range(off, off+binary.MaxVarintLen32) - - l, n := binary.Uvarint(b) - if n > binary.MaxVarintLen32 { - return decbuf{e: errors.New("invalid uvarint")} - } - - if r.b.Len() < off+n+int(l)+4 { - return decbuf{e: errInvalidSize} - } - - // Load bytes holding the contents plus a CRC32 checksum. - b = r.b.Range(off+n, off+n+int(l)+4) - dec := decbuf{b: b[:len(b)-4]} - - if dec.crc32() != binary.BigEndian.Uint32(b[len(b)-4:]) { - return decbuf{e: errInvalidChecksum} - } - return dec -} - -// readSymbols reads the symbol table fully into memory and allocates proper strings for them. +// ReadSymbols reads the symbol table fully into memory and allocates proper strings for them. // Strings backed by the mmap'd memory would cause memory faults if applications keep using them // after the reader is closed. -func (r *Reader) readSymbols(off int) error { +func ReadSymbols(bs ByteSlice, version int, off int) ([]string, map[uint32]string, error) { if off == 0 { - return nil + return nil, nil, nil } - d := r.decbufAt(off) + d := newDecbufAt(bs, off) var ( - origLen = d.len() - cnt = d.be32int() - basePos = uint32(off) + 4 - nextPos = basePos + uint32(origLen-d.len()) + origLen = d.len() + cnt = d.be32int() + basePos = uint32(off) + 4 + nextPos = basePos + uint32(origLen-d.len()) + symbolSlice []string + symbols = map[uint32]string{} ) - - if r.version == 2 { - nextPos = 0 + if version == 2 { + symbolSlice = make([]string, 0, cnt) } for d.err() == nil && d.len() > 0 && cnt > 0 { s := d.uvarintStr() - r.symbols[nextPos] = s - if r.version == 2 { - nextPos++ + if version == FormatV2 { + symbolSlice = append(symbolSlice, s) } else { + symbols[nextPos] = s nextPos = basePos + uint32(origLen-d.len()) } cnt-- } - return errors.Wrap(d.err(), "read symbols") + return symbolSlice, symbols, errors.Wrap(d.err(), "read symbols") } -// readOffsetTable reads an offset table at the given position calls f for each -// found entry.f -// If f returns an error it stops decoding and returns the received error, -func (r *Reader) readOffsetTable(off uint64, f func([]string, uint64) error) error { - d := r.decbufAt(int(off)) +// ReadOffsetTable reads an offset table and at the given position calls f for each +// found entry. If f returns an error it stops decoding and returns the received error. +func ReadOffsetTable(bs ByteSlice, off uint64, f func([]string, uint64) error) error { + d := newDecbufAt(bs, int(off)) cnt := d.be32() for d.err() == nil && d.len() > 0 && cnt > 0 { @@ -824,7 +801,10 @@ func (r *Reader) Close() error { } func (r *Reader) lookupSymbol(o uint32) (string, error) { - s, ok := r.symbols[o] + if int(o) < len(r.symbolsV2) { + return r.symbolsV2[o], nil + } + s, ok := r.symbolsV1[o] if !ok { return "", errors.Errorf("unknown symbol offset %d", o) } @@ -833,24 +813,26 @@ func (r *Reader) lookupSymbol(o uint32) (string, error) { // Symbols returns a set of symbols that exist within the index. func (r *Reader) Symbols() (map[string]struct{}, error) { - res := make(map[string]struct{}, len(r.symbols)) + res := make(map[string]struct{}, len(r.symbolsV1)+len(r.symbolsV2)) - for _, s := range r.symbols { + for _, s := range r.symbolsV1 { + res[s] = struct{}{} + } + for _, s := range r.symbolsV2 { res[s] = struct{}{} } return res, nil } -// SymbolTable returns the symbol table that is used to resolve symbol references. -func (r *Reader) SymbolTable() map[uint32]string { - return r.symbols +// SymbolTableSize returns the symbol table size in bytes. +func (r *Reader) SymbolTableSize() uint64 { + return r.symbolsTableSize } // LabelValues returns value tuples that exist for the given label name tuples. func (r *Reader) LabelValues(names ...string) (StringTuples, error) { - const sep = "\xff" - key := strings.Join(names, sep) + key := strings.Join(names, labelNameSeperator) off, ok := r.labels[key] if !ok { // XXX(fabxc): hot fix. Should return a partial data error and handle cases @@ -859,7 +841,7 @@ func (r *Reader) LabelValues(names ...string) (StringTuples, error) { //return nil, fmt.Errorf("label index doesn't exist") } - d := r.decbufAt(int(off)) + d := newDecbufAt(r.b, int(off)) nc := d.be32int() d.be32() // consume unused value entry count. @@ -868,9 +850,9 @@ func (r *Reader) LabelValues(names ...string) (StringTuples, error) { return nil, errors.Wrap(d.err(), "read label value index") } st := &serializedStringTuples{ - l: nc, - b: d.get(), - lookup: r.lookupSymbol, + idsCount: nc, + idsBytes: d.get(), + lookup: r.lookupSymbol, } return st, nil } @@ -880,14 +862,12 @@ type emptyStringTuples struct{} func (emptyStringTuples) At(i int) ([]string, error) { return nil, nil } func (emptyStringTuples) Len() int { return 0 } -// LabelIndices returns a for which labels or label tuples value indices exist. +// LabelIndices returns a slice of label names for which labels or label tuples value indices exist. +// NOTE: This is deprecated. Use `LabelNames()` instead. func (r *Reader) LabelIndices() ([][]string, error) { - const sep = "\xff" - - res := [][]string{} - + var res [][]string for s := range r.labels { - res = append(res, strings.Split(s, sep)) + res = append(res, strings.Split(s, labelNameSeperator)) } return res, nil } @@ -897,10 +877,10 @@ func (r *Reader) Series(id uint64, lbls *labels.Labels, chks *[]chunks.Meta) err offset := id // In version 2 series IDs are no longer exact references but series are 16-byte padded // and the ID is the multiple of 16 of the actual position. - if r.version == 2 { + if r.version == FormatV2 { offset = id * 16 } - d := r.decbufUvarintAt(int(offset)) + d := newDecbufUvarintAt(r.b, int(offset)) if d.err() != nil { return d.err() } @@ -909,14 +889,15 @@ func (r *Reader) Series(id uint64, lbls *labels.Labels, chks *[]chunks.Meta) err // Postings returns a postings list for the given label pair. func (r *Reader) Postings(name, value string) (Postings, error) { - off, ok := r.postings[labels.Label{ - Name: name, - Value: value, - }] + e, ok := r.postings[name] if !ok { return EmptyPostings(), nil } - d := r.decbufAt(int(off)) + off, ok := e[value] + if !ok { + return EmptyPostings(), nil + } + d := newDecbufAt(r.b, int(off)) if d.err() != nil { return nil, errors.Wrap(d.err(), "get postings entry") } @@ -933,34 +914,63 @@ func (r *Reader) SortedPostings(p Postings) Postings { return p } +// Size returns the size of an index file. +func (r *Reader) Size() int64 { + return int64(r.b.Len()) +} + +// LabelNames returns all the unique label names present in the index. +func (r *Reader) LabelNames() ([]string, error) { + labelNamesMap := make(map[string]struct{}, len(r.labels)) + for key := range r.labels { + // 'key' contains the label names concatenated with the + // delimiter 'labelNameSeperator'. + names := strings.Split(key, labelNameSeperator) + for _, name := range names { + if name == allPostingsKey.Name { + // This is not from any metric. + // It is basically an empty label name. + continue + } + labelNamesMap[name] = struct{}{} + } + } + labelNames := make([]string, 0, len(labelNamesMap)) + for name := range labelNamesMap { + labelNames = append(labelNames, name) + } + sort.Strings(labelNames) + return labelNames, nil +} + type stringTuples struct { - l int // tuple length - s []string // flattened tuple entries + length int // tuple length + entries []string // flattened tuple entries } -func NewStringTuples(s []string, l int) (*stringTuples, error) { - if len(s)%l != 0 { +func NewStringTuples(entries []string, length int) (*stringTuples, error) { + if len(entries)%length != 0 { return nil, errors.Wrap(errInvalidSize, "string tuple list") } - return &stringTuples{s: s, l: l}, nil + return &stringTuples{entries: entries, length: length}, nil } -func (t *stringTuples) Len() int { return len(t.s) / t.l } -func (t *stringTuples) At(i int) ([]string, error) { return t.s[i : i+t.l], nil } +func (t *stringTuples) Len() int { return len(t.entries) / t.length } +func (t *stringTuples) At(i int) ([]string, error) { return t.entries[i : i+t.length], nil } func (t *stringTuples) Swap(i, j int) { - c := make([]string, t.l) - copy(c, t.s[i:i+t.l]) + c := make([]string, t.length) + copy(c, t.entries[i:i+t.length]) - for k := 0; k < t.l; k++ { - t.s[i+k] = t.s[j+k] - t.s[j+k] = c[k] + for k := 0; k < t.length; k++ { + t.entries[i+k] = t.entries[j+k] + t.entries[j+k] = c[k] } } func (t *stringTuples) Less(i, j int) bool { - for k := 0; k < t.l; k++ { - d := strings.Compare(t.s[i+k], t.s[j+k]) + for k := 0; k < t.length; k++ { + d := strings.Compare(t.entries[i+k], t.entries[j+k]) if d < 0 { return true @@ -973,23 +983,23 @@ func (t *stringTuples) Less(i, j int) bool { } type serializedStringTuples struct { - l int - b []byte - lookup func(uint32) (string, error) + idsCount int + idsBytes []byte // bytes containing the ids pointing to the string in the lookup table. + lookup func(uint32) (string, error) } func (t *serializedStringTuples) Len() int { - return len(t.b) / (4 * t.l) + return len(t.idsBytes) / (4 * t.idsCount) } func (t *serializedStringTuples) At(i int) ([]string, error) { - if len(t.b) < (i+t.l)*4 { + if len(t.idsBytes) < (i+t.idsCount)*4 { return nil, errInvalidSize } - res := make([]string, 0, t.l) + res := make([]string, 0, t.idsCount) - for k := 0; k < t.l; k++ { - offset := binary.BigEndian.Uint32(t.b[(i+k)*4:]) + for k := 0; k < t.idsCount; k++ { + offset := binary.BigEndian.Uint32(t.idsBytes[(i+k)*4:]) s, err := t.lookup(offset) if err != nil { @@ -1006,21 +1016,7 @@ func (t *serializedStringTuples) At(i int) ([]string, error) { // It currently does not contain decoding methods for all entry types but can be extended // by them if there's demand. type Decoder struct { - symbols map[uint32]string -} - -func (dec *Decoder) lookupSymbol(o uint32) (string, error) { - s, ok := dec.symbols[o] - if !ok { - return "", errors.Errorf("unknown symbol offset %d", o) - } - return s, nil -} - -// SetSymbolTable set the symbol table to be used for lookups when decoding series -// and label indices -func (dec *Decoder) SetSymbolTable(t map[uint32]string) { - dec.symbols = t + LookupSymbol func(uint32) (string, error) } // Postings returns a postings list for b and its number of elements. @@ -1048,11 +1044,11 @@ func (dec *Decoder) Series(b []byte, lbls *labels.Labels, chks *[]chunks.Meta) e return errors.Wrap(d.err(), "read series label offsets") } - ln, err := dec.lookupSymbol(lno) + ln, err := dec.LookupSymbol(lno) if err != nil { return errors.Wrap(err, "lookup label name") } - lv, err := dec.lookupSymbol(lvo) + lv, err := dec.LookupSymbol(lvo) if err != nil { return errors.Wrap(err, "lookup label value") } diff --git a/vendor/github.com/prometheus/tsdb/index/postings.go b/vendor/github.com/prometheus/tsdb/index/postings.go index 53449f3..13df1c6 100644 --- a/vendor/github.com/prometheus/tsdb/index/postings.go +++ b/vendor/github.com/prometheus/tsdb/index/postings.go @@ -36,14 +36,14 @@ func AllPostingsKey() (name, value string) { // unordered batch fills on startup. type MemPostings struct { mtx sync.RWMutex - m map[labels.Label][]uint64 + m map[string]map[string][]uint64 ordered bool } // NewMemPostings returns a memPostings that's ready for reads and writes. func NewMemPostings() *MemPostings { return &MemPostings{ - m: make(map[labels.Label][]uint64, 512), + m: make(map[string]map[string][]uint64, 512), ordered: true, } } @@ -52,7 +52,7 @@ func NewMemPostings() *MemPostings { // until ensureOrder was called once. func NewUnorderedMemPostings() *MemPostings { return &MemPostings{ - m: make(map[labels.Label][]uint64, 512), + m: make(map[string]map[string][]uint64, 512), ordered: false, } } @@ -62,8 +62,10 @@ func (p *MemPostings) SortedKeys() []labels.Label { p.mtx.RLock() keys := make([]labels.Label, 0, len(p.m)) - for l := range p.m { - keys = append(keys, l) + for n, e := range p.m { + for v := range e { + keys = append(keys, labels.Label{Name: n, Value: v}) + } } p.mtx.RUnlock() @@ -78,14 +80,18 @@ func (p *MemPostings) SortedKeys() []labels.Label { // Get returns a postings list for the given label pair. func (p *MemPostings) Get(name, value string) Postings { + var lp []uint64 p.mtx.RLock() - l := p.m[labels.Label{Name: name, Value: value}] + l := p.m[name] + if l != nil { + lp = l[value] + } p.mtx.RUnlock() - if l == nil { + if lp == nil { return EmptyPostings() } - return newListPostings(l) + return newListPostings(lp) } // All returns a postings list over all documents ever added. @@ -118,8 +124,10 @@ func (p *MemPostings) EnsureOrder() { }() } - for _, l := range p.m { - workc <- l + for _, e := range p.m { + for _, l := range e { + workc <- l + } } close(workc) wg.Wait() @@ -129,44 +137,58 @@ func (p *MemPostings) EnsureOrder() { // Delete removes all ids in the given map from the postings lists. func (p *MemPostings) Delete(deleted map[uint64]struct{}) { - var keys []labels.Label + var keys, vals []string // Collect all keys relevant for deletion once. New keys added afterwards // can by definition not be affected by any of the given deletes. p.mtx.RLock() - for l := range p.m { - keys = append(keys, l) + for n := range p.m { + keys = append(keys, n) } p.mtx.RUnlock() - // For each key we first analyse whether the postings list is affected by the deletes. - // If yes, we actually reallocate a new postings list. - for _, l := range keys { - // Only lock for processing one postings list so we don't block reads for too long. - p.mtx.Lock() - - found := false - for _, id := range p.m[l] { - if _, ok := deleted[id]; ok { - found = true - break - } + for _, n := range keys { + p.mtx.RLock() + vals = vals[:0] + for v := range p.m[n] { + vals = append(vals, v) } - if !found { - p.mtx.Unlock() - continue - } - repl := make([]uint64, 0, len(p.m[l])) + p.mtx.RUnlock() + + // For each posting we first analyse whether the postings list is affected by the deletes. + // If yes, we actually reallocate a new postings list. + for _, l := range vals { + // Only lock for processing one postings list so we don't block reads for too long. + p.mtx.Lock() + + found := false + for _, id := range p.m[n][l] { + if _, ok := deleted[id]; ok { + found = true + break + } + } + if !found { + p.mtx.Unlock() + continue + } + repl := make([]uint64, 0, len(p.m[n][l])) - for _, id := range p.m[l] { - if _, ok := deleted[id]; !ok { - repl = append(repl, id) + for _, id := range p.m[n][l] { + if _, ok := deleted[id]; !ok { + repl = append(repl, id) + } + } + if len(repl) > 0 { + p.m[n][l] = repl + } else { + delete(p.m[n], l) } + p.mtx.Unlock() } - if len(repl) > 0 { - p.m[l] = repl - } else { - delete(p.m, l) + p.mtx.Lock() + if len(p.m[n]) == 0 { + delete(p.m, n) } p.mtx.Unlock() } @@ -177,9 +199,11 @@ func (p *MemPostings) Iter(f func(labels.Label, Postings) error) error { p.mtx.RLock() defer p.mtx.RUnlock() - for l, p := range p.m { - if err := f(l, newListPostings(p)); err != nil { - return err + for n, e := range p.m { + for v, p := range e { + if err := f(labels.Label{Name: n, Value: v}, newListPostings(p)); err != nil { + return err + } } } return nil @@ -198,8 +222,13 @@ func (p *MemPostings) Add(id uint64, lset labels.Labels) { } func (p *MemPostings) addFor(id uint64, l labels.Label) { - list := append(p.m[l], id) - p.m[l] = list + nm, ok := p.m[l.Name] + if !ok { + nm = map[string][]uint64{} + p.m[l.Name] = nm + } + list := append(nm[l.Value], id) + nm[l.Value] = list if !p.ordered { return @@ -337,80 +366,25 @@ func Merge(its ...Postings) Postings { if len(its) == 1 { return its[0] } - l := len(its) / 2 - return newMergedPostings(Merge(its[:l]...), Merge(its[l:]...)) -} - -type mergedPostings struct { - a, b Postings - initialized bool - aok, bok bool - cur uint64 -} - -func newMergedPostings(a, b Postings) *mergedPostings { - return &mergedPostings{a: a, b: b} -} - -func (it *mergedPostings) At() uint64 { - return it.cur -} - -func (it *mergedPostings) Next() bool { - if !it.initialized { - it.aok = it.a.Next() - it.bok = it.b.Next() - it.initialized = true - } - - if !it.aok && !it.bok { - return false - } - - if !it.aok { - it.cur = it.b.At() - it.bok = it.b.Next() - return true - } - if !it.bok { - it.cur = it.a.At() - it.aok = it.a.Next() - return true - } - - acur, bcur := it.a.At(), it.b.At() - - if acur < bcur { - it.cur = acur - it.aok = it.a.Next() - } else if acur > bcur { - it.cur = bcur - it.bok = it.b.Next() - } else { - it.cur = acur - it.aok = it.a.Next() - it.bok = it.b.Next() - } - return true -} - -func (it *mergedPostings) Seek(id uint64) bool { - if it.cur >= id { - return true + // All the uses of this function immediately expand it, so + // collect everything in a map. This is more efficient + // when there's 100ks of postings, compared to + // having a tree of merge objects. + pm := make(map[uint64]struct{}, len(its)) + for _, it := range its { + for it.Next() { + pm[it.At()] = struct{}{} + } + if it.Err() != nil { + return ErrPostings(it.Err()) + } } - - it.aok = it.a.Seek(id) - it.bok = it.b.Seek(id) - it.initialized = true - - return it.Next() -} - -func (it *mergedPostings) Err() error { - if it.a.Err() != nil { - return it.a.Err() + pl := make([]uint64, 0, len(pm)) + for p := range pm { + pl = append(pl, p) } - return it.b.Err() + sort.Slice(pl, func(i, j int) bool { return pl[i] < pl[j] }) + return newListPostings(pl) } // Without returns a new postings list that contains all elements from the full list that diff --git a/vendor/github.com/prometheus/tsdb/labels/labels.go b/vendor/github.com/prometheus/tsdb/labels/labels.go index d76ba0d..d1ba70b 100644 --- a/vendor/github.com/prometheus/tsdb/labels/labels.go +++ b/vendor/github.com/prometheus/tsdb/labels/labels.go @@ -117,11 +117,13 @@ func New(ls ...Label) Labels { // FromMap returns new sorted Labels from the given map. func FromMap(m map[string]string) Labels { - l := make([]Label, 0, len(m)) + l := make(Labels, 0, len(m)) for k, v := range m { l = append(l, Label{Name: k, Value: v}) } - return New(l...) + sort.Sort(l) + + return l } // FromStrings creates new labels from pairs of strings. diff --git a/vendor/github.com/prometheus/tsdb/labels/selector.go b/vendor/github.com/prometheus/tsdb/labels/selector.go index 7bc452f..c0c74ed 100644 --- a/vendor/github.com/prometheus/tsdb/labels/selector.go +++ b/vendor/github.com/prometheus/tsdb/labels/selector.go @@ -15,7 +15,6 @@ package labels import ( "regexp" - "strings" ) // Selector holds constraints for matching against a label set. @@ -99,22 +98,3 @@ func (m *notMatcher) Matches(v string) bool { return !m.Matcher.Matches(v) } func Not(m Matcher) Matcher { return ¬Matcher{m} } - -// PrefixMatcher implements Matcher for labels which values matches prefix. -type PrefixMatcher struct { - name, prefix string -} - -// NewPrefixMatcher returns new Matcher for label name matching prefix. -func NewPrefixMatcher(name, prefix string) Matcher { - return &PrefixMatcher{name: name, prefix: prefix} -} - -// Name implements Matcher interface. -func (m *PrefixMatcher) Name() string { return m.name } - -// Prefix returns matching prefix. -func (m *PrefixMatcher) Prefix() string { return m.prefix } - -// Matches implements Matcher interface. -func (m *PrefixMatcher) Matches(v string) bool { return strings.HasPrefix(v, m.prefix) } diff --git a/vendor/github.com/prometheus/tsdb/querier.go b/vendor/github.com/prometheus/tsdb/querier.go index b5b9ae0..4a5a406 100644 --- a/vendor/github.com/prometheus/tsdb/querier.go +++ b/vendor/github.com/prometheus/tsdb/querier.go @@ -33,10 +33,14 @@ type Querier interface { // LabelValues returns all potential values for a label name. LabelValues(string) ([]string, error) + // LabelValuesFor returns all potential values for a label name. // under the constraint of another label. LabelValuesFor(string, labels.Label) ([]string, error) + // LabelNames returns all the unique label names present in the block in sorted order. + LabelNames() ([]string, error) + // Close releases the resources of the Querier. Close() error } @@ -60,6 +64,28 @@ func (q *querier) LabelValues(n string) ([]string, error) { return q.lvals(q.blocks, n) } +// LabelNames returns all the unique label names present querier blocks. +func (q *querier) LabelNames() ([]string, error) { + labelNamesMap := make(map[string]struct{}) + for _, b := range q.blocks { + names, err := b.LabelNames() + if err != nil { + return nil, errors.Wrap(err, "LabelNames() from Querier") + } + for _, name := range names { + labelNamesMap[name] = struct{}{} + } + } + + labelNames := make([]string, 0, len(labelNamesMap)) + for name := range labelNamesMap { + labelNames = append(labelNames, name) + } + sort.Strings(labelNames) + + return labelNames, nil +} + func (q *querier) lvals(qs []Querier, n string) ([]string, error) { if len(qs) == 0 { return nil, nil @@ -187,6 +213,10 @@ func (q *blockQuerier) LabelValues(name string) ([]string, error) { return res, nil } +func (q *blockQuerier) LabelNames() ([]string, error) { + return q.index.LabelNames() +} + func (q *blockQuerier) LabelValuesFor(string, labels.Label) ([]string, error) { return nil, fmt.Errorf("not implemented") } @@ -217,39 +247,8 @@ func PostingsForMatchers(ix IndexReader, ms ...labels.Matcher) (index.Postings, return ix.SortedPostings(index.Intersect(its...)), nil } -// tuplesByPrefix uses binary search to find prefix matches within ts. -func tuplesByPrefix(m *labels.PrefixMatcher, ts StringTuples) ([]string, error) { - var outErr error - tslen := ts.Len() - i := sort.Search(tslen, func(i int) bool { - vs, err := ts.At(i) - if err != nil { - outErr = fmt.Errorf("Failed to read tuple %d/%d: %v", i, tslen, err) - return true - } - val := vs[0] - l := len(m.Prefix()) - if l > len(vs) { - l = len(val) - } - return val[:l] >= m.Prefix() - }) - if outErr != nil { - return nil, outErr - } - var matches []string - for ; i < tslen; i++ { - vs, err := ts.At(i) - if err != nil || !m.Matches(vs[0]) { - return matches, err - } - matches = append(matches, vs[0]) - } - return matches, nil -} - func postingsForMatcher(ix IndexReader, m labels.Matcher) (index.Postings, error) { - // If the matcher selects an empty value, it selects all the series which dont + // If the matcher selects an empty value, it selects all the series which don't // have the label name set too. See: https://github.com/prometheus/prometheus/issues/3575 // and https://github.com/prometheus/prometheus/pull/3578#issuecomment-351653555 if m.Matches("") { @@ -271,21 +270,13 @@ func postingsForMatcher(ix IndexReader, m labels.Matcher) (index.Postings, error } var res []string - if pm, ok := m.(*labels.PrefixMatcher); ok { - res, err = tuplesByPrefix(pm, tpls) + for i := 0; i < tpls.Len(); i++ { + vals, err := tpls.At(i) if err != nil { return nil, err } - - } else { - for i := 0; i < tpls.Len(); i++ { - vals, err := tpls.At(i) - if err != nil { - return nil, err - } - if m.Matches(vals[0]) { - res = append(res, vals[0]) - } + if m.Matches(vals[0]) { + res = append(res, vals[0]) } } @@ -478,7 +469,7 @@ type baseChunkSeries struct { // over them. It drops chunks based on tombstones in the given reader. func LookupChunkSeries(ir IndexReader, tr TombstoneReader, ms ...labels.Matcher) (ChunkSeriesSet, error) { if tr == nil { - tr = EmptyTombstoneReader() + tr = newMemTombstones() } p, err := PostingsForMatchers(ir, ms...) if err != nil { @@ -499,8 +490,8 @@ func (s *baseChunkSeries) Err() error { return s.err } func (s *baseChunkSeries) Next() bool { var ( - lset labels.Labels - chkMetas []chunks.Meta + lset = make(labels.Labels, len(s.lset)) + chkMetas = make([]chunks.Meta, len(s.chks)) err error ) @@ -590,11 +581,9 @@ func (s *populatedChunkSeries) Next() bool { // This means that the chunk has be garbage collected. Remove it from the list. if s.err == ErrNotFound { s.err = nil - // Delete in-place. - chks = append(chks[:j], chks[j+1:]...) + s.chks = append(chks[:j], chks[j+1:]...) } - return false } } @@ -892,30 +881,6 @@ func (it *deletedIterator) Err() error { return it.it.Err() } -type mockSeriesSet struct { - next func() bool - series func() Series - err func() error -} - -func (m *mockSeriesSet) Next() bool { return m.next() } -func (m *mockSeriesSet) At() Series { return m.series() } -func (m *mockSeriesSet) Err() error { return m.err() } - -func newListSeriesSet(list []Series) *mockSeriesSet { - i := -1 - return &mockSeriesSet{ - next: func() bool { - i++ - return i < len(list) - }, - series: func() Series { - return list[i] - }, - err: func() error { return nil }, - } -} - type errSeriesSet struct { err error } diff --git a/vendor/github.com/prometheus/tsdb/repair.go b/vendor/github.com/prometheus/tsdb/repair.go index 2722609..15f79d5 100644 --- a/vendor/github.com/prometheus/tsdb/repair.go +++ b/vendor/github.com/prometheus/tsdb/repair.go @@ -1,3 +1,16 @@ +// Copyright 2018 The Prometheus Authors +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + package tsdb import ( @@ -5,12 +18,10 @@ import ( "io" "io/ioutil" "os" - "path" "path/filepath" "github.com/go-kit/kit/log" "github.com/go-kit/kit/log/level" - "github.com/oklog/ulid" "github.com/pkg/errors" "github.com/prometheus/tsdb/fileutil" ) @@ -20,20 +31,18 @@ import ( func repairBadIndexVersion(logger log.Logger, dir string) error { // All blocks written by Prometheus 2.1 with a meta.json version of 2 are affected. // We must actually set the index file version to 2 and revert the meta.json version back to 1. - subdirs, err := fileutil.ReadDir(dir) + dirs, err := blockDirs(dir) if err != nil { - return err + return errors.Wrapf(err, "list block dirs in %q", dir) } - for _, d := range subdirs { - // Skip non-block dirs. - if _, err := ulid.Parse(d); err != nil { - continue - } - d = path.Join(dir, d) + wrapErr := func(err error, d string) error { + return errors.Wrapf(err, "block dir: %q", d) + } + for _, d := range dirs { meta, err := readBogusMetaFile(d) if err != nil { - return err + return wrapErr(err, d) } if meta.Version == 1 { level.Info(logger).Log( @@ -53,35 +62,35 @@ func repairBadIndexVersion(logger log.Logger, dir string) error { repl, err := os.Create(filepath.Join(d, "index.repaired")) if err != nil { - return err + return wrapErr(err, d) } broken, err := os.Open(filepath.Join(d, "index")) if err != nil { - return err + return wrapErr(err, d) } if _, err := io.Copy(repl, broken); err != nil { - return err + return wrapErr(err, d) } - // Set the 5th byte to 2 to indiciate the correct file format version. + // Set the 5th byte to 2 to indicate the correct file format version. if _, err := repl.WriteAt([]byte{2}, 4); err != nil { - return err + return wrapErr(err, d) } if err := fileutil.Fsync(repl); err != nil { - return err + return wrapErr(err, d) } if err := repl.Close(); err != nil { - return err + return wrapErr(err, d) } if err := broken.Close(); err != nil { - return err + return wrapErr(err, d) } if err := renameFile(repl.Name(), broken.Name()); err != nil { - return err + return wrapErr(err, d) } // Reset version of meta.json to 1. meta.Version = 1 if err := writeMetaFile(d, meta); err != nil { - return err + return wrapErr(err, d) } } return nil diff --git a/vendor/github.com/prometheus/tsdb/tombstones.go b/vendor/github.com/prometheus/tsdb/tombstones.go index 8c760cd..0781404 100644 --- a/vendor/github.com/prometheus/tsdb/tombstones.go +++ b/vendor/github.com/prometheus/tsdb/tombstones.go @@ -20,6 +20,7 @@ import ( "io/ioutil" "os" "path/filepath" + "sync" "github.com/pkg/errors" ) @@ -28,7 +29,7 @@ const tombstoneFilename = "tombstones" const ( // MagicTombstone is 4 bytes at the head of a tombstone file. - MagicTombstone = 0x130BA30 + MagicTombstone = 0x0130BA30 tombstoneFormatV1 = 1 ) @@ -41,6 +42,9 @@ type TombstoneReader interface { // Iter calls the given function for each encountered interval. Iter(func(uint64, Intervals) error) error + // Total returns the total count of tombstones. + Total() uint64 + // Close any underlying resources Close() error } @@ -72,7 +76,7 @@ func writeTombstoneFile(dir string, tr TombstoneReader) error { mw := io.MultiWriter(f, hash) - tr.Iter(func(ref uint64, ivs Intervals) error { + if err := tr.Iter(func(ref uint64, ivs Intervals) error { for _, iv := range ivs { buf.reset() @@ -86,7 +90,9 @@ func writeTombstoneFile(dir string, tr TombstoneReader) error { } } return nil - }) + }); err != nil { + return fmt.Errorf("error writing tombstones: %v", err) + } _, err = f.Write(hash.Sum(nil)) if err != nil { @@ -107,70 +113,80 @@ type Stone struct { intervals Intervals } -func readTombstones(dir string) (memTombstones, error) { +func readTombstones(dir string) (TombstoneReader, SizeReader, error) { b, err := ioutil.ReadFile(filepath.Join(dir, tombstoneFilename)) if os.IsNotExist(err) { - return memTombstones{}, nil + return newMemTombstones(), nil, nil } else if err != nil { - return nil, err + return nil, nil, err + } + + sr := &TombstoneFile{ + size: int64(len(b)), } if len(b) < 5 { - return nil, errors.Wrap(errInvalidSize, "tombstones header") + return nil, sr, errors.Wrap(errInvalidSize, "tombstones header") } d := &decbuf{b: b[:len(b)-4]} // 4 for the checksum. if mg := d.be32(); mg != MagicTombstone { - return nil, fmt.Errorf("invalid magic number %x", mg) + return nil, sr, fmt.Errorf("invalid magic number %x", mg) } if flag := d.byte(); flag != tombstoneFormatV1 { - return nil, fmt.Errorf("invalid tombstone format %x", flag) + return nil, sr, fmt.Errorf("invalid tombstone format %x", flag) } if d.err() != nil { - return nil, d.err() + return nil, sr, d.err() } // Verify checksum. hash := newCRC32() if _, err := hash.Write(d.get()); err != nil { - return nil, errors.Wrap(err, "write to hash") + return nil, sr, errors.Wrap(err, "write to hash") } if binary.BigEndian.Uint32(b[len(b)-4:]) != hash.Sum32() { - return nil, errors.New("checksum did not match") + return nil, sr, errors.New("checksum did not match") } - stonesMap := memTombstones{} + stonesMap := newMemTombstones() for d.len() > 0 { k := d.uvarint64() mint := d.varint64() maxt := d.varint64() if d.err() != nil { - return nil, d.err() + return nil, sr, d.err() } - stonesMap.add(k, Interval{mint, maxt}) + stonesMap.addInterval(k, Interval{mint, maxt}) } - return stonesMap, nil + return stonesMap, sr, nil } -type memTombstones map[uint64]Intervals - -var emptyTombstoneReader = memTombstones{} +type memTombstones struct { + intvlGroups map[uint64]Intervals + mtx sync.RWMutex +} -// EmptyTombstoneReader returns a TombstoneReader that is always empty. -func EmptyTombstoneReader() TombstoneReader { - return emptyTombstoneReader +// newMemTombstones creates new in memory TombstoneReader +// that allows adding new intervals. +func newMemTombstones() *memTombstones { + return &memTombstones{intvlGroups: make(map[uint64]Intervals)} } -func (t memTombstones) Get(ref uint64) (Intervals, error) { - return t[ref], nil +func (t *memTombstones) Get(ref uint64) (Intervals, error) { + t.mtx.RLock() + defer t.mtx.RUnlock() + return t.intvlGroups[ref], nil } -func (t memTombstones) Iter(f func(uint64, Intervals) error) error { - for ref, ivs := range t { +func (t *memTombstones) Iter(f func(uint64, Intervals) error) error { + t.mtx.RLock() + defer t.mtx.RUnlock() + for ref, ivs := range t.intvlGroups { if err := f(ref, ivs); err != nil { return err } @@ -178,11 +194,37 @@ func (t memTombstones) Iter(f func(uint64, Intervals) error) error { return nil } -func (t memTombstones) add(ref uint64, itv Interval) { - t[ref] = t[ref].add(itv) +func (t *memTombstones) Total() uint64 { + t.mtx.RLock() + defer t.mtx.RUnlock() + + total := uint64(0) + for _, ivs := range t.intvlGroups { + total += uint64(len(ivs)) + } + return total +} + +// addInterval to an existing memTombstones +func (t *memTombstones) addInterval(ref uint64, itvs ...Interval) { + t.mtx.Lock() + defer t.mtx.Unlock() + for _, itv := range itvs { + t.intvlGroups[ref] = t.intvlGroups[ref].add(itv) + } +} + +// TombstoneFile holds information about the tombstone file. +type TombstoneFile struct { + size int64 +} + +// Size returns the tombstone file size. +func (t *TombstoneFile) Size() int64 { + return t.size } -func (memTombstones) Close() error { +func (*memTombstones) Close() error { return nil } @@ -208,7 +250,7 @@ func (tr Interval) isSubrange(dranges Intervals) bool { // Intervals represents a set of increasing and non-overlapping time-intervals. type Intervals []Interval -// This adds the new time-range to the existing ones. +// add the new time-range to the existing ones. // The existing ones must be sorted. func (itvs Intervals) add(n Interval) Intervals { for i, r := range itvs { diff --git a/vendor/github.com/prometheus/tsdb/wal.go b/vendor/github.com/prometheus/tsdb/wal.go index 3f1cf7d..60e1c58 100644 --- a/vendor/github.com/prometheus/tsdb/wal.go +++ b/vendor/github.com/prometheus/tsdb/wal.go @@ -33,6 +33,7 @@ import ( "github.com/prometheus/client_golang/prometheus" "github.com/prometheus/tsdb/fileutil" "github.com/prometheus/tsdb/labels" + "github.com/prometheus/tsdb/wal" ) // WALEntryType indicates what data a WAL entry contains. @@ -82,6 +83,8 @@ func newWalMetrics(wal *SegmentWAL, r prometheus.Registerer) *walMetrics { // WAL is a write ahead log that can log new series labels and samples. // It must be completely read before new entries are logged. +// +// DEPRECATED: use wal pkg combined with the record codex instead. type WAL interface { Reader() WALReader LogSeries([]RefSeries) error @@ -91,27 +94,6 @@ type WAL interface { Close() error } -// NopWAL is a WAL that does nothing. -func NopWAL() WAL { - return nopWAL{} -} - -type nopWAL struct{} - -func (nopWAL) Read( - seriesf func([]RefSeries), - samplesf func([]RefSample), - deletesf func([]Stone), -) error { - return nil -} -func (w nopWAL) Reader() WALReader { return w } -func (nopWAL) LogSeries([]RefSeries) error { return nil } -func (nopWAL) LogSamples([]RefSample) error { return nil } -func (nopWAL) LogDeletes([]Stone) error { return nil } -func (nopWAL) Truncate(int64, func(uint64) bool) error { return nil } -func (nopWAL) Close() error { return nil } - // WALReader reads entries from a WAL. type WALReader interface { Read( @@ -173,6 +155,8 @@ func newCRC32() hash.Hash32 { } // SegmentWAL is a write ahead log for series data. +// +// DEPRECATED: use wal pkg combined with the record coders instead. type SegmentWAL struct { mtx sync.Mutex metrics *walMetrics @@ -317,7 +301,7 @@ func (w *SegmentWAL) putBuffer(b *encbuf) { } // Truncate deletes the values prior to mint and the series which the keep function -// does not indiciate to preserve. +// does not indicate to preserve. func (w *SegmentWAL) Truncate(mint int64, keep func(uint64) bool) error { // The last segment is always active. if len(w.files) < 2 { @@ -418,7 +402,7 @@ func (w *SegmentWAL) Truncate(mint int64, keep func(uint64) bool) error { } // The file object of csf still holds the name before rename. Recreate it so - // subsequent truncations do not look at a non-existant file name. + // subsequent truncations do not look at a non-existent file name. csf.File, err = w.openSegmentFile(candidates[0].Name()) if err != nil { return err @@ -718,6 +702,13 @@ func (w *SegmentWAL) run(interval time.Duration) { // Close syncs all data and closes the underlying resources. func (w *SegmentWAL) Close() error { + // Make sure you can call Close() multiple times. + select { + case <-w.stopc: + return nil // Already closed. + default: + } + close(w.stopc) <-w.donec @@ -730,21 +721,14 @@ func (w *SegmentWAL) Close() error { // On opening, a WAL must be fully consumed once. Afterwards // only the current segment will still be open. if hf := w.head(); hf != nil { - return errors.Wrapf(hf.Close(), "closing WAL head %s", hf.Name()) + if err := hf.Close(); err != nil { + return errors.Wrapf(err, "closing WAL head %s", hf.Name()) + } } - return w.dirFile.Close() + return errors.Wrapf(w.dirFile.Close(), "closing WAL dir %s", w.dirFile.Name()) } -const ( - minSectorSize = 512 - - // walPageBytes is the alignment for flushing records to the backing Writer. - // It should be a multiple of the minimum sector size so that WAL can safely - // distinguish between torn writes and ordinary data corruption. - walPageBytes = 16 * minSectorSize -) - func (w *SegmentWAL) write(t WALEntryType, flag uint8, buf []byte) error { // Cut to the next segment if the entry exceeds the file size unless it would also // exceed the size of a new segment. @@ -904,16 +888,19 @@ func (r *walReader) Read( if seriesf != nil { seriesf(v) } + //lint:ignore SA6002 safe to ignore and actually fixing it has some performance penalty. seriesPool.Put(v[:0]) case []RefSample: if samplesf != nil { samplesf(v) } + //lint:ignore SA6002 safe to ignore and actually fixing it has some performance penalty. samplePool.Put(v[:0]) case []Stone: if deletesf != nil { deletesf(v) } + //lint:ignore SA6002 safe to ignore and actually fixing it has some performance penalty. deletePool.Put(v[:0]) default: level.Error(r.logger).Log("msg", "unexpected data type") @@ -937,7 +924,7 @@ func (r *walReader) Read( series = v.([]RefSeries) } - err := r.decodeSeries(flag, b, &series) + err = r.decodeSeries(flag, b, &series) if err != nil { err = errors.Wrap(err, "decode series entry") break @@ -958,7 +945,7 @@ func (r *walReader) Read( samples = v.([]RefSample) } - err := r.decodeSamples(flag, b, &samples) + err = r.decodeSamples(flag, b, &samples) if err != nil { err = errors.Wrap(err, "decode samples entry") break @@ -980,7 +967,7 @@ func (r *walReader) Read( deletes = v.([]Stone) } - err := r.decodeDeletes(flag, b, &deletes) + err = r.decodeDeletes(flag, b, &deletes) if err != nil { err = errors.Wrap(err, "decode delete entry") break @@ -1015,7 +1002,7 @@ func (r *walReader) at() (WALEntryType, byte, []byte) { } // next returns decodes the next entry pair and returns true -// if it was succesful. +// if it was successful. func (r *walReader) next() bool { if r.cur >= len(r.files) { return false @@ -1206,3 +1193,113 @@ func (r *walReader) decodeDeletes(flag byte, b []byte, res *[]Stone) error { } return nil } + +func deprecatedWALExists(logger log.Logger, dir string) (bool, error) { + // Detect whether we still have the old WAL. + fns, err := sequenceFiles(dir) + if err != nil && !os.IsNotExist(err) { + return false, errors.Wrap(err, "list sequence files") + } + if len(fns) == 0 { + return false, nil // No WAL at all yet. + } + // Check header of first segment to see whether we are still dealing with an + // old WAL. + f, err := os.Open(fns[0]) + if err != nil { + return false, errors.Wrap(err, "check first existing segment") + } + defer f.Close() + + var hdr [4]byte + if _, err := f.Read(hdr[:]); err != nil && err != io.EOF { + return false, errors.Wrap(err, "read header from first segment") + } + // If we cannot read the magic header for segments of the old WAL, abort. + // Either it's migrated already or there's a corruption issue with which + // we cannot deal here anyway. Subsequent attempts to open the WAL will error in that case. + if binary.BigEndian.Uint32(hdr[:]) != WALMagic { + return false, nil + } + return true, nil +} + +// MigrateWAL rewrites the deprecated write ahead log into the new format. +func MigrateWAL(logger log.Logger, dir string) (err error) { + if logger == nil { + logger = log.NewNopLogger() + } + if exists, err := deprecatedWALExists(logger, dir); err != nil || !exists { + return err + } + level.Info(logger).Log("msg", "migrating WAL format") + + tmpdir := dir + ".tmp" + if err := os.RemoveAll(tmpdir); err != nil { + return errors.Wrap(err, "cleanup replacement dir") + } + repl, err := wal.New(logger, nil, tmpdir) + if err != nil { + return errors.Wrap(err, "open new WAL") + } + + // It should've already been closed as part of the previous finalization. + // Do it once again in case of prior errors. + defer func() { + if err != nil { + repl.Close() + } + }() + + w, err := OpenSegmentWAL(dir, logger, time.Minute, nil) + if err != nil { + return errors.Wrap(err, "open old WAL") + } + defer w.Close() + + rdr := w.Reader() + + var ( + enc RecordEncoder + b []byte + ) + decErr := rdr.Read( + func(s []RefSeries) { + if err != nil { + return + } + err = repl.Log(enc.Series(s, b[:0])) + }, + func(s []RefSample) { + if err != nil { + return + } + err = repl.Log(enc.Samples(s, b[:0])) + }, + func(s []Stone) { + if err != nil { + return + } + err = repl.Log(enc.Tombstones(s, b[:0])) + }, + ) + if decErr != nil { + return errors.Wrap(err, "decode old entries") + } + if err != nil { + return errors.Wrap(err, "write new entries") + } + // We explicitly close even when there is a defer for Windows to be + // able to delete it. The defer is in place to close it in-case there + // are errors above. + if err := w.Close(); err != nil { + return errors.Wrap(err, "close old WAL") + } + if err := repl.Close(); err != nil { + return errors.Wrap(err, "close new WAL") + } + if err := fileutil.Replace(tmpdir, dir); err != nil { + return errors.Wrap(err, "replace old WAL") + } + return nil +} From b64ed6822c2b4d495afe94e8fb4cc8f06edeeb63 Mon Sep 17 00:00:00 2001 From: Radoslaw Lesniewski Date: Thu, 31 Jan 2019 10:22:53 +0100 Subject: [PATCH 2/2] receiver.go - reverted tsdb.Options default values, "Retention" changed to "RetentionDuration" --- runnable/receiver.go | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) diff --git a/runnable/receiver.go b/runnable/receiver.go index 1d53609..233e6ac 100644 --- a/runnable/receiver.go +++ b/runnable/receiver.go @@ -86,10 +86,10 @@ func RunReceiver( } tsdbCfg := &tsdb.Options{ - Retention: model.Duration(time.Minute * 6), - NoLockfile: true, - MinBlockDuration: model.Duration(time.Minute * 3), - MaxBlockDuration: model.Duration(time.Minute * 3), + RetentionDuration: model.Duration(time.Hour * 24 * 15), + NoLockfile: true, + MinBlockDuration: model.Duration(time.Hour * 2), + MaxBlockDuration: model.Duration(time.Hour * 2), } ctxWeb, cancelWeb := context.WithCancel(context.Background())