diff --git a/CHANGELOG.md b/CHANGELOG.md index 40eb11232..d85337fee 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -48,6 +48,7 @@ For example, let's say we want to store all data that triggered an alert in Infl - [#451](https://github.com/influxdata/kapacitor/issues/451): StreamNode supports `|groupBy` and `|where` methods. - [#93](https://github.com/influxdata/kapacitor/issues/93): AlertNode now outputs data to child nodes. The output data can have either a tag or field indicating the alert level. - [#281](https://github.com/influxdata/kapacitor/issues/281): AlertNode now has an `.all()` property that specifies that all points in a batch must match the criteria in order to trigger an alert. +- [#384](https://github.com/influxdata/kapacitor/issues/384): Add `elapsed` function to compute the time difference between subsequent points. ### Bugfixes diff --git a/cmd/kapacitord/run/server_helper_test.go b/cmd/kapacitord/run/server_helper_test.go index 3b69f6f20..e95902684 100644 --- a/cmd/kapacitord/run/server_helper_test.go +++ b/cmd/kapacitord/run/server_helper_test.go @@ -105,7 +105,9 @@ func (s *Server) Write(db, rp, body string, params url.Values) (results string, resp, err := http.Post(s.URL()+"/write?"+params.Encode(), "", strings.NewReader(body)) if err != nil { return "", err - } else if resp.StatusCode != http.StatusOK && resp.StatusCode != http.StatusNoContent { + } + defer resp.Body.Close() + if resp.StatusCode != http.StatusOK && resp.StatusCode != http.StatusNoContent { return "", fmt.Errorf("invalid status code: code=%d, body=%s", resp.StatusCode, MustReadAll(resp.Body)) } return string(MustReadAll(resp.Body)), nil @@ -118,6 +120,7 @@ func (s *Server) HTTPGetRetry(url, exp string, retries int, sleep time.Duration) if err != nil { return err } + defer resp.Body.Close() r = string(MustReadAll(resp.Body)) if r == exp { break diff --git a/cmd/kapacitord/run/server_test.go b/cmd/kapacitord/run/server_test.go index 7fdac462a..37ae41c63 100644 --- a/cmd/kapacitord/run/server_test.go +++ b/cmd/kapacitord/run/server_test.go @@ -352,7 +352,7 @@ func TestServer_StreamTask(t *testing.T) { endpoint := fmt.Sprintf("%s/task/%s/count", s.URL(), name) // Request data before any writes and expect null responses - nullResponse := `{"Series":null,"Err":null}` + nullResponse := `{"Series":null,"Messages":null,"Err":null}` err = s.HTTPGetRetry(endpoint, nullResponse, 100, time.Millisecond*5) if err != nil { t.Error(err) @@ -380,7 +380,7 @@ test value=1 0000000011 v.Add("precision", "s") s.MustWrite("mydb", "myrp", points, v) - exp := `{"Series":[{"name":"test","columns":["time","count"],"values":[["1970-01-01T00:00:10Z",15]]}],"Err":null}` + exp := `{"Series":[{"name":"test","columns":["time","count"],"values":[["1970-01-01T00:00:10Z",15]]}],"Messages":null,"Err":null}` err = s.HTTPGetRetry(endpoint, exp, 100, time.Millisecond*5) if err != nil { t.Error(err) @@ -419,7 +419,7 @@ func TestServer_StreamTask_AllMeasurements(t *testing.T) { endpoint := fmt.Sprintf("%s/task/%s/count", s.URL(), name) // Request data before any writes and expect null responses - nullResponse := `{"Series":null,"Err":null}` + nullResponse := `{"Series":null,"Messages":null,"Err":null}` err = s.HTTPGetRetry(endpoint, nullResponse, 100, time.Millisecond*5) if err != nil { t.Error(err) @@ -447,7 +447,7 @@ test0 value=1 0000000011 v.Add("precision", "s") s.MustWrite("mydb", "myrp", points, v) - exp := `{"Series":[{"name":"test0","columns":["time","count"],"values":[["1970-01-01T00:00:10Z",15]]}],"Err":null}` + exp := `{"Series":[{"name":"test0","columns":["time","count"],"values":[["1970-01-01T00:00:10Z",15]]}],"Messages":null,"Err":null}` err = s.HTTPGetRetry(endpoint, exp, 100, time.Millisecond*5) if err != nil { t.Error(err) @@ -513,7 +513,7 @@ func TestServer_BatchTask(t *testing.T) { endpoint := fmt.Sprintf("%s/task/%s/count", s.URL(), name) - exp := `{"Series":[{"name":"cpu","columns":["time","count"],"values":[["1971-01-01T00:00:01.002Z",2]]}],"Err":null}` + exp := `{"Series":[{"name":"cpu","columns":["time","count"],"values":[["1971-01-01T00:00:01.002Z",2]]}],"Messages":null,"Err":null}` err = s.HTTPGetRetry(endpoint, exp, 100, time.Millisecond*5) if err != nil { t.Error(err) @@ -955,7 +955,7 @@ func testStreamAgent(t *testing.T, c *run.Config) { endpoint := fmt.Sprintf("%s/task/%s/moving_avg", s.URL(), name) // Request data before any writes and expect null responses - nullResponse := `{"Series":null,"Err":null}` + nullResponse := `{"Series":null,"Messages":null,"Err":null}` err = s.HTTPGetRetry(endpoint, nullResponse, 100, time.Millisecond*5) if err != nil { t.Error(err) @@ -990,7 +990,7 @@ test,group=b value=0 0000000011 v.Add("precision", "s") s.MustWrite("mydb", "myrp", points, v) - exp := `{"Series":[{"name":"test","tags":{"group":"a"},"columns":["time","mean"],"values":[["1970-01-01T00:00:11Z",0.9]]},{"name":"test","tags":{"group":"b"},"columns":["time","mean"],"values":[["1970-01-01T00:00:11Z",1.9]]}],"Err":null}` + exp := `{"Series":[{"name":"test","tags":{"group":"a"},"columns":["time","mean"],"values":[["1970-01-01T00:00:11Z",0.9]]},{"name":"test","tags":{"group":"b"},"columns":["time","mean"],"values":[["1970-01-01T00:00:11Z",1.9]]}],"Messages":null,"Err":null}` err = s.HTTPGetRetry(endpoint, exp, 100, time.Millisecond*5) if err != nil { t.Error(err) @@ -1126,7 +1126,7 @@ func testStreamAgentSocket(t *testing.T, c *run.Config) { endpoint := fmt.Sprintf("%s/task/%s/count", s.URL(), name) // Request data before any writes and expect null responses - nullResponse := `{"Series":null,"Err":null}` + nullResponse := `{"Series":null,"Messages":null,"Err":null}` err = s.HTTPGetRetry(endpoint, nullResponse, 100, time.Millisecond*5) if err != nil { t.Error(err) @@ -1149,7 +1149,7 @@ test,group=a value=0 0000000011 v.Add("precision", "s") s.MustWrite("mydb", "myrp", points, v) - exp := `{"Series":[{"name":"test","tags":{"group":"a"},"columns":["time","count"],"values":[["1970-01-01T00:00:10Z",10]]}],"Err":null}` + exp := `{"Series":[{"name":"test","tags":{"group":"a"},"columns":["time","count"],"values":[["1970-01-01T00:00:10Z",10]]}],"Messages":null,"Err":null}` err = s.HTTPGetRetry(endpoint, exp, 100, time.Millisecond*5) if err != nil { t.Error(err) @@ -1328,7 +1328,7 @@ func testBatchAgent(t *testing.T, c *run.Config) { } endpoint := fmt.Sprintf("%s/task/%s/count", s.URL(), name) - exp := `{"Series":[{"name":"cpu","tags":{"count":"1"},"columns":["time","count"],"values":[["1971-01-01T00:00:00.02Z",5]]},{"name":"cpu","tags":{"count":"0"},"columns":["time","count"],"values":[["1971-01-01T00:00:00.02Z",5]]}],"Err":null}` + exp := `{"Series":[{"name":"cpu","tags":{"count":"1"},"columns":["time","count"],"values":[["1971-01-01T00:00:00.02Z",5]]},{"name":"cpu","tags":{"count":"0"},"columns":["time","count"],"values":[["1971-01-01T00:00:00.02Z",5]]}],"Messages":null,"Err":null}` err = s.HTTPGetRetry(endpoint, exp, 100, time.Millisecond*50) if err != nil { t.Error(err) diff --git a/influxql.gen.go b/influxql.gen.go index 80cb0a4a1..52c3e1cf6 100644 --- a/influxql.gen.go +++ b/influxql.gen.go @@ -129,7 +129,7 @@ type floatPointEmitter struct { func (e *floatPointEmitter) EmitPoint() (models.Point, error) { slice := e.emitter.Emit() if len(slice) != 1 { - return models.Point{}, fmt.Errorf("unexpected result from InfluxQL function, got %d points expected 1", len(slice)) + return models.Point{}, ErrEmptyEmit } ap := slice[0] var t time.Time @@ -301,7 +301,7 @@ type integerPointEmitter struct { func (e *integerPointEmitter) EmitPoint() (models.Point, error) { slice := e.emitter.Emit() if len(slice) != 1 { - return models.Point{}, fmt.Errorf("unexpected result from InfluxQL function, got %d points expected 1", len(slice)) + return models.Point{}, ErrEmptyEmit } ap := slice[0] var t time.Time diff --git a/influxql.gen.go.tmpl b/influxql.gen.go.tmpl index 07d3b93a8..a058da4d7 100644 --- a/influxql.gen.go.tmpl +++ b/influxql.gen.go.tmpl @@ -129,7 +129,7 @@ type {{.name}}PointEmitter struct { func (e *{{.name}}PointEmitter) EmitPoint() (models.Point, error) { slice := e.emitter.Emit() if len(slice) != 1 { - return models.Point{}, fmt.Errorf("unexpected result from InfluxQL function, got %d points expected 1", len(slice)) + return models.Point{}, ErrEmptyEmit } ap := slice[0] var t time.Time diff --git a/influxql.go b/influxql.go index b1268f921..b9efd0cd3 100644 --- a/influxql.go +++ b/influxql.go @@ -1,6 +1,7 @@ package kapacitor import ( + "errors" "fmt" "log" "time" @@ -14,16 +15,20 @@ import ( type createReduceContextFunc func(c baseReduceContext) reduceContext +var ErrEmptyEmit = errors.New("error call to emit produced no results") + type InfluxQLNode struct { node - n *pipeline.InfluxQLNode - createFn createReduceContextFunc + n *pipeline.InfluxQLNode + createFn createReduceContextFunc + isStreamTransformation bool } func newInfluxQLNode(et *ExecutingTask, n *pipeline.InfluxQLNode, l *log.Logger) (*InfluxQLNode, error) { m := &InfluxQLNode{ node: node{Node: n, et: et, logger: l}, n: n, + isStreamTransformation: n.ReduceCreater.IsStreamTransformation, } m.node.runF = m.runInfluxQLs return m, nil @@ -79,7 +84,7 @@ func (n *InfluxQLNode) runStreamInfluxQL() error { dimensions: p.Dimensions, tags: p.PointTags(), time: p.Time, - pointTimes: n.n.PointTimes, + pointTimes: n.n.PointTimes || n.isStreamTransformation, } createFn, err := n.getCreateFn(p.Fields[c.field]) @@ -89,22 +94,32 @@ func (n *InfluxQLNode) runStreamInfluxQL() error { context = createFn(c) contexts[p.Group] = context - context.AggregatePoint(&p) - } else if p.Time.Equal(context.Time()) { + } + if n.isStreamTransformation { context.AggregatePoint(&p) - // advance to next point p, ok = n.ins[0].NextPoint() - } else { + err := n.emit(context) - if err != nil { + if err != nil && err != ErrEmptyEmit { return err } - - // Nil out reduced point - contexts[p.Group] = nil - // do not advance, - // go through loop again to initialize new iterator. + } else { + if p.Time.Equal(context.Time()) { + context.AggregatePoint(&p) + // advance to next point + p, ok = n.ins[0].NextPoint() + } else { + err := n.emit(context) + if err != nil { + return err + } + + // Nil out reduced point + contexts[p.Group] = nil + // do not advance, + // go through loop again to initialize new iterator. + } } } return nil diff --git a/integrations/batcher_test.go b/integrations/batcher_test.go index f4301b400..02bd12b71 100644 --- a/integrations/batcher_test.go +++ b/integrations/batcher_test.go @@ -197,6 +197,39 @@ batch testBatcherWithOutput(t, "TestBatch_DerivativeNN", script, 21*time.Second, er) } +func TestBatch_Elapsed(t *testing.T) { + + var script = ` +batch + |query(''' + SELECT "value" + FROM "telegraf"."default".packets +''') + .period(10s) + .every(10s) + |elapsed('value', 1ms) + |httpOut('TestBatch_Elapsed') +` + + er := kapacitor.Result{ + Series: imodels.Rows{ + { + Name: "packets", + Tags: nil, + Columns: []string{"time", "elapsed"}, + Values: [][]interface{}{ + { + time.Date(1971, 1, 1, 0, 0, 8, 0, time.UTC), + 2000.0, + }, + }, + }, + }, + } + + testBatcherWithOutput(t, "TestBatch_Elapsed", script, 21*time.Second, er) +} + func TestBatch_SimpleMR(t *testing.T) { var script = ` diff --git a/integrations/data/TestBatch_Elapsed.0.brpl b/integrations/data/TestBatch_Elapsed.0.brpl new file mode 100644 index 000000000..b7bdf4432 --- /dev/null +++ b/integrations/data/TestBatch_Elapsed.0.brpl @@ -0,0 +1 @@ +{"name":"packets","points":[{"fields":{"value":1000},"time":"2015-10-18T00:00:00Z"},{"fields":{"value":1001},"time":"2015-10-18T00:00:02Z"},{"fields":{"value":1002},"time":"2015-10-18T00:00:04Z"},{"fields":{"value":1003},"time":"2015-10-18T00:00:06Z"},{"fields":{"value":1004},"time":"2015-10-18T00:00:08Z"}]} diff --git a/integrations/data/TestStream_Elapsed.srpl b/integrations/data/TestStream_Elapsed.srpl new file mode 100644 index 000000000..1a989e2b4 --- /dev/null +++ b/integrations/data/TestStream_Elapsed.srpl @@ -0,0 +1,27 @@ +dbname +rpname +packets value=1000 0000000001 +dbname +rpname +packets value=1001 0000000002 +dbname +rpname +packets value=1002 0000000003 +dbname +rpname +packets value=1003 0000000004 +dbname +rpname +packets value=1004 0000000005 +dbname +rpname +packets value=1006 0000000006 +dbname +rpname +packets value=1009 0000000010 +dbname +rpname +packets value=1010 0000000011 +dbname +rpname +packets value=1011 0000000012 diff --git a/integrations/streamer_test.go b/integrations/streamer_test.go index 6bebd2768..628072fe1 100644 --- a/integrations/streamer_test.go +++ b/integrations/streamer_test.go @@ -205,6 +205,36 @@ stream testStreamerWithOutput(t, "TestStream_DerivativeNN", script, 15*time.Second, er, nil, false) } +func TestStream_Elapsed(t *testing.T) { + + var script = ` +stream + |from() + .measurement('packets') + |elapsed('value', 1s) + |window() + .period(10s) + .every(10s) + |max('elapsed') + |httpOut('TestStream_Elapsed') +` + er := kapacitor.Result{ + Series: imodels.Rows{ + { + Name: "packets", + Tags: nil, + Columns: []string{"time", "max"}, + Values: [][]interface{}{[]interface{}{ + time.Date(1971, 1, 1, 0, 0, 11, 0, time.UTC), + 4.0, + }}, + }, + }, + } + + testStreamerWithOutput(t, "TestStream_Elapsed", script, 15*time.Second, er, nil, false) +} + func TestStream_WindowMissing(t *testing.T) { var script = ` diff --git a/pipeline/influxql.gen.go b/pipeline/influxql.gen.go index d41e8dd2e..579885f70 100644 --- a/pipeline/influxql.gen.go +++ b/pipeline/influxql.gen.go @@ -22,8 +22,9 @@ type ReduceCreater struct { CreateIntegerReducer func() (influxql.IntegerPointAggregator, influxql.IntegerPointEmitter) CreateIntegerBulkReducer func() (IntegerBulkPointAggregator, influxql.IntegerPointEmitter) - TopBottomCallInfo *TopBottomCallInfo - IsSimpleSelector bool + TopBottomCallInfo *TopBottomCallInfo + IsSimpleSelector bool + IsStreamTransformation bool } type FloatBulkPointAggregator interface { diff --git a/pipeline/influxql.gen.go.tmpl b/pipeline/influxql.gen.go.tmpl index 4d07cf4b9..be2f9f1ff 100644 --- a/pipeline/influxql.gen.go.tmpl +++ b/pipeline/influxql.gen.go.tmpl @@ -13,6 +13,7 @@ type ReduceCreater struct { TopBottomCallInfo *TopBottomCallInfo IsSimpleSelector bool + IsStreamTransformation bool } {{range .}} diff --git a/pipeline/influxql.go b/pipeline/influxql.go index 99c9ac302..259aa0c6a 100644 --- a/pipeline/influxql.go +++ b/pipeline/influxql.go @@ -1,6 +1,10 @@ package pipeline -import "github.com/influxdata/influxdb/influxql" +import ( + "time" + + "github.com/influxdata/influxdb/influxql" +) // tmpl -- go get github.com/benbjohnson/tmpl //go:generate tmpl -data=@../tmpldata influxql.gen.go.tmpl @@ -85,12 +89,12 @@ func (n *chainnode) Count(field string) *InfluxQLNode { // Produce batch of only the distinct points. func (n *chainnode) Distinct(field string) *InfluxQLNode { i := newInfluxQLNode("distinct", field, n.Provides(), BatchEdge, ReduceCreater{ - CreateFloatBulkReducer: func() (FloatBulkPointAggregator, influxql.FloatPointEmitter) { - fn := influxql.NewFloatSliceFuncReducer(influxql.FloatDistinctReduceSlice) + CreateFloatReducer: func() (influxql.FloatPointAggregator, influxql.FloatPointEmitter) { + fn := influxql.NewFloatDistinctReducer() return fn, fn }, - CreateIntegerBulkReducer: func() (IntegerBulkPointAggregator, influxql.IntegerPointEmitter) { - fn := influxql.NewIntegerSliceFuncReducer(influxql.IntegerDistinctReduceSlice) + CreateIntegerReducer: func() (influxql.IntegerPointAggregator, influxql.IntegerPointEmitter) { + fn := influxql.NewIntegerDistinctReducer() return fn, fn }, }) @@ -338,3 +342,20 @@ func (n *chainnode) Stddev(field string) *InfluxQLNode { n.linkChild(i) return i } + +// Compute the elapsed time between points +func (n *chainnode) Elapsed(field string, unit time.Duration) *InfluxQLNode { + i := newInfluxQLNode("elapsed", field, n.Provides(), n.Provides(), ReduceCreater{ + CreateFloatIntegerReducer: func() (influxql.FloatPointAggregator, influxql.IntegerPointEmitter) { + fn := influxql.NewFloatElapsedReducer(influxql.Interval{Duration: unit}) + return fn, fn + }, + CreateIntegerReducer: func() (influxql.IntegerPointAggregator, influxql.IntegerPointEmitter) { + fn := influxql.NewIntegerElapsedReducer(influxql.Interval{Duration: unit}) + return fn, fn + }, + IsStreamTransformation: true, + }) + n.linkChild(i) + return i +} diff --git a/vendor.yml b/vendor.yml index e6ae9c36b..1367630ed 100644 --- a/vendor.yml +++ b/vendor.yml @@ -12,11 +12,11 @@ vendors: - path: github.com/gogo/protobuf rev: 4f262e4b0f3a6cea646e15798109335551e21756 - path: github.com/golang/protobuf - rev: f0a097ddac24fb00e07d2ac17f8671423f3ea47c + rev: 2ebff28ac76fb19e2d25e5ddd4885708dfdd5611 - path: github.com/gorhill/cronexpr rev: f0984319b44273e83de132089ae42b1810f4933b - path: github.com/influxdata/influxdb - rev: 93a76eecb6d7575225a68235667de68ed906222b + rev: 7def8bc0c98c04080ed649d3eb31fb00baf68482 - path: github.com/influxdata/wlog rev: 7c63b0a71ef8300adc255344d275e10e5c3a71ec - path: github.com/influxdb/usage-client @@ -38,13 +38,13 @@ vendors: - path: github.com/shurcooL/sanitized_anchor_name rev: 10ef21a441db47d8b13ebcc5fd2310f636973c77 - path: github.com/stretchr/testify - rev: bcd9e3389dd03b0b668d11f4d462a6af6c2dfd60 + rev: c5d7a69bf8a2c9c374798160849c071093e41dd1 - path: github.com/twinj/uuid rev: 89173bcdda19db0eb88aef1e1cb1cb2505561d31 - path: golang.org/x/crypto - rev: 1777f3ba8c1fed80fcaec3317e3aaa4f627764d2 + rev: 2f6fccd33b9b1fc23ebb73ad4890698820f7174d - path: golang.org/x/sys - rev: 9eef40adf05b951699605195b829612bd7b69952 + rev: f64b50fbea64174967a8882830d621a18ee1548e - path: gopkg.in/alexcesaro/quotedprintable.v3 rev: 2caba252f4dc53eaf6b553000885530023f54623 - path: gopkg.in/gomail.v2 diff --git a/vendor/github.com/golang/protobuf/proto/properties.go b/vendor/github.com/golang/protobuf/proto/properties.go index 4fe2ec22e..880eb22d8 100644 --- a/vendor/github.com/golang/protobuf/proto/properties.go +++ b/vendor/github.com/golang/protobuf/proto/properties.go @@ -701,7 +701,11 @@ func getPropertiesLocked(t reflect.Type) *StructProperties { if f.Name == "XXX_unrecognized" { // special case prop.unrecField = toField(&f) } - oneof := f.Tag.Get("protobuf_oneof") != "" // special case + oneof := f.Tag.Get("protobuf_oneof") // special case + if oneof != "" { + // Oneof fields don't use the traditional protobuf tag. + p.OrigName = oneof + } prop.Prop[i] = p prop.order[i] = i if debug { @@ -711,7 +715,7 @@ func getPropertiesLocked(t reflect.Type) *StructProperties { } print("\n") } - if p.enc == nil && !strings.HasPrefix(f.Name, "XXX_") && !oneof { + if p.enc == nil && !strings.HasPrefix(f.Name, "XXX_") && oneof == "" { fmt.Fprintln(os.Stderr, "proto: no encoder for", f.Name, f.Type.String(), "[GetProperties]") } } diff --git a/vendor/github.com/influxdata/influxdb/CHANGELOG.md b/vendor/github.com/influxdata/influxdb/CHANGELOG.md index e953a6fe9..87f818f2d 100644 --- a/vendor/github.com/influxdata/influxdb/CHANGELOG.md +++ b/vendor/github.com/influxdata/influxdb/CHANGELOG.md @@ -4,19 +4,46 @@ - [#6237](https://github.com/influxdata/influxdb/issues/6237): Enable continuous integration testing on Windows platform via AppVeyor. Thanks @mvadu - [#6263](https://github.com/influxdata/influxdb/pull/6263): Reduce UDP Service allocation size. +- [#6228](https://github.com/influxdata/influxdb/pull/6228): Support for multiple listeners for collectd and OpenTSDB inputs. +- [#6292](https://github.com/influxdata/influxdb/issues/6292): Allow percentile to be used as a selector. +- [#5707](https://github.com/influxdata/influxdb/issues/5707): Return a deprecated message when IF NOT EXISTS is used. +- [#6334](https://github.com/influxdata/influxdb/pull/6334): Allow environment variables to be set per input type. +- [#6394](https://github.com/influxdata/influxdb/pull/6394): Allow time math with integer timestamps. +- [#3247](https://github.com/influxdata/influxdb/issues/3247): Implement derivatives across intervals for aggregate queries. +- [#3166](https://github.com/influxdata/influxdb/issues/3166): Sort the series keys inside of a tag set so output is deterministic. +- [#1856](https://github.com/influxdata/influxdb/issues/1856): Add `elapsed` function that returns the time delta between subsequent points. ### Bugfixes -- [#6206](https://github.com/influxdata/influxdb/issues/6206): Handle nil values from the tsm1 cursor correctly. -- [#6248](https://github.com/influxdata/influxdb/issues/6248): Panic using incorrectly quoted "queries" field key. -- [#6257](https://github.com/influxdata/influxdb/issues/6257): CreateShardGroup was incrementing meta data index even when it was idempotent. -- [#6223](https://github.com/influxdata/influxdb/issues/6223): Failure to start/run on Windows. Thanks @mvadu -- [#6229](https://github.com/influxdata/influxdb/issues/6229): Fixed aggregate queries with no GROUP BY to include the end time. - [#6283](https://github.com/influxdata/influxdb/pull/6283): Fix GROUP BY tag to produce consistent results when a series has no tags. - [#3773](https://github.com/influxdata/influxdb/issues/3773): Support empty tags for all WHERE equality operations. - [#6270](https://github.com/influxdata/influxdb/issues/6270): tsm1 query engine alloc reduction - [#6271](https://github.com/influxdata/influxdb/issues/6271): Fixed deadlock in tsm1 file store. - [#6287](https://github.com/influxdata/influxdb/issues/6287): Fix data race in Influx Client. +- [#6252](https://github.com/influxdata/influxdb/pull/6252): Remove TSDB listener accept message @simnv +- [#6202](https://github.com/influxdata/influxdb/pull/6202): Check default SHARD DURATION when recreating the same database. +- [#6296](https://github.com/influxdata/influxdb/issues/6296): Allow the implicit time field to be renamed again. +- [#6379](https://github.com/influxdata/influxdb/issues/6379): Validate the first argument to percentile() is a variable. +- [#6294](https://github.com/influxdata/influxdb/issues/6294): Fix panic running influx_inspect info. +- [#6382](https://github.com/influxdata/influxdb/pull/6382): Removed dead code from the old query engine. +- [#6383](https://github.com/influxdata/influxdb/pull/6383): Recover from a panic during query execution. +- [#3369](https://github.com/influxdata/influxdb/issues/3369): Detect when a timer literal will overflow or underflow the query engine. +- [#6398](https://github.com/influxdata/influxdb/issues/6398): Fix CREATE RETENTION POLICY parsing so it doesn't consume tokens it shouldn't. +- [#6413](https://github.com/influxdata/influxdb/pull/6413): Prevent goroutine leak from persistent http connections. Thanks @aaronknister. +- [#6414](https://github.com/influxdata/influxdb/pull/6414): Send "Connection: close" header for queries. +- [#6425](https://github.com/influxdata/influxdb/pull/6425): Close idle tcp connections in HTTP client to prevent tcp conn leak. + +## v0.12.1 [2016-04-08] + +### Bugfixes + +- [#6225](https://github.com/influxdata/influxdb/pull/6225): Refresh admin assets. +- [#6206](https://github.com/influxdata/influxdb/issues/6206): Handle nil values from the tsm1 cursor correctly. +- [#6190](https://github.com/influxdata/influxdb/pull/6190): Fix race on measurementFields. +- [#6248](https://github.com/influxdata/influxdb/issues/6248): Panic using incorrectly quoted "queries" field key. +- [#6257](https://github.com/influxdata/influxdb/issues/6257): CreateShardGroup was incrementing meta data index even when it was idempotent. +- [#6223](https://github.com/influxdata/influxdb/issues/6223): Failure to start/run on Windows. Thanks @mvadu +- [#6229](https://github.com/influxdata/influxdb/issues/6229): Fixed aggregate queries with no GROUP BY to include the end time. ## v0.12.0 [2016-04-05] ### Release Notes diff --git a/vendor/github.com/influxdata/influxdb/client/influxdb.go b/vendor/github.com/influxdata/influxdb/client/influxdb.go index 343a4e09d..020c2cd3b 100644 --- a/vendor/github.com/influxdata/influxdb/client/influxdb.go +++ b/vendor/github.com/influxdata/influxdb/client/influxdb.go @@ -387,22 +387,31 @@ func (c *Client) Ping() (time.Duration, string, error) { // Structs +// Message represents a user message. +type Message struct { + Level string `json:"level,omitempty"` + Text string `json:"text,omitempty"` +} + // Result represents a resultset returned from a single statement. type Result struct { - Series []models.Row - Err error + Series []models.Row + Messages []*Message + Err error } // MarshalJSON encodes the result into JSON. func (r *Result) MarshalJSON() ([]byte, error) { // Define a struct that outputs "error" as a string. var o struct { - Series []models.Row `json:"series,omitempty"` - Err string `json:"error,omitempty"` + Series []models.Row `json:"series,omitempty"` + Messages []*Message `json:"messages,omitempty"` + Err string `json:"error,omitempty"` } // Copy fields to output struct. o.Series = r.Series + o.Messages = r.Messages if r.Err != nil { o.Err = r.Err.Error() } @@ -413,8 +422,9 @@ func (r *Result) MarshalJSON() ([]byte, error) { // UnmarshalJSON decodes the data into the Result struct func (r *Result) UnmarshalJSON(b []byte) error { var o struct { - Series []models.Row `json:"series,omitempty"` - Err string `json:"error,omitempty"` + Series []models.Row `json:"series,omitempty"` + Messages []*Message `json:"messages,omitempty"` + Err string `json:"error,omitempty"` } dec := json.NewDecoder(bytes.NewBuffer(b)) @@ -424,6 +434,7 @@ func (r *Result) UnmarshalJSON(b []byte) error { return err } r.Series = o.Series + r.Messages = o.Messages if o.Err != "" { r.Err = errors.New(o.Err) } diff --git a/vendor/github.com/influxdata/influxdb/client/v2/client.go b/vendor/github.com/influxdata/influxdb/client/v2/client.go index 26037691d..3776eab30 100644 --- a/vendor/github.com/influxdata/influxdb/client/v2/client.go +++ b/vendor/github.com/influxdata/influxdb/client/v2/client.go @@ -123,6 +123,7 @@ func NewHTTPClient(conf HTTPConfig) (Client, error) { Timeout: conf.Timeout, Transport: tr, }, + transport: tr, }, nil } @@ -172,6 +173,7 @@ func (c *client) Ping(timeout time.Duration) (time.Duration, string, error) { // Close releases the client's resources. func (c *client) Close() error { + c.transport.CloseIdleConnections() return nil } @@ -221,6 +223,7 @@ type client struct { password string useragent string httpClient *http.Client + transport *http.Transport } type udpclient struct { @@ -516,10 +519,17 @@ func (r *Response) Error() error { return nil } +// Message represents a user message. +type Message struct { + Level string + Text string +} + // Result represents a resultset returned from a single statement. type Result struct { - Series []models.Row - Err string `json:"error,omitempty"` + Series []models.Row + Messages []*Message + Err string `json:"error,omitempty"` } func (uc *udpclient) Query(q Query) (*Response, error) { diff --git a/vendor/github.com/influxdata/influxdb/influxql/ast.go b/vendor/github.com/influxdata/influxdb/influxql/ast.go index 368328fff..1c19d6b06 100644 --- a/vendor/github.com/influxdata/influxdb/influxql/ast.go +++ b/vendor/github.com/influxdata/influxdb/influxql/ast.go @@ -903,6 +903,9 @@ type SelectStatement struct { // The value to fill empty aggregate buckets with, if any FillValue interface{} + // Renames the implicit time field name. + TimeAlias string + // Removes the "time" column from the output. OmitTime bool @@ -968,6 +971,14 @@ func (s *SelectStatement) TimeAscending() bool { return len(s.SortFields) == 0 || s.SortFields[0].Ascending } +// TimeFieldName returns the name of the time field. +func (s *SelectStatement) TimeFieldName() string { + if s.TimeAlias != "" { + return s.TimeAlias + } + return "time" +} + // Clone returns a deep copy of the statement. func (s *SelectStatement) Clone() *SelectStatement { clone := &SelectStatement{ @@ -1133,6 +1144,7 @@ func (s *SelectStatement) RewriteTimeFields() { switch expr := s.Fields[i].Expr.(type) { case *VarRef: if expr.Val == "time" { + s.TimeAlias = s.Fields[i].Alias s.Fields = append(s.Fields[:i], s.Fields[i+1:]...) } } @@ -1169,7 +1181,7 @@ func (s *SelectStatement) ColumnNames() []string { columnNames := make([]string, len(columnFields)+offset) if !s.OmitTime { // Add the implicit time if requested. - columnNames[0] = "time" + columnNames[0] = s.TimeFieldName() } // Keep track of the encountered column names. @@ -1403,12 +1415,12 @@ func (s *SelectStatement) validSelectWithAggregate() error { numAggregates++ } } - // For TOP, BOTTOM, MAX, MIN, FIRST, LAST (selector functions) it is ok to ask for fields and tags + // For TOP, BOTTOM, MAX, MIN, FIRST, LAST, PERCENTILE (selector functions) it is ok to ask for fields and tags // but only if one function is specified. Combining multiple functions and fields and tags is not currently supported onlySelectors := true for k := range calls { switch k { - case "top", "bottom", "max", "min", "first", "last": + case "top", "bottom", "max", "min", "first", "last", "percentile": default: onlySelectors = false break @@ -1464,6 +1476,13 @@ func (s *SelectStatement) validPercentileAggr(expr *Call) error { return fmt.Errorf("invalid number of arguments for %s, expected %d, got %d", expr.Name, exp, got) } + switch expr.Args[0].(type) { + case *VarRef: + // do nothing + default: + return fmt.Errorf("expected field argument in percentile()") + } + switch expr.Args[1].(type) { case *IntegerLiteral, *NumberLiteral: return nil @@ -1476,7 +1495,7 @@ func (s *SelectStatement) validateAggregates(tr targetRequirement) error { for _, f := range s.Fields { for _, expr := range walkFunctionCalls(f.Expr) { switch expr.Name { - case "derivative", "non_negative_derivative", "difference", "moving_average": + case "derivative", "non_negative_derivative", "difference", "moving_average", "elapsed": if err := s.validSelectWithAggregate(); err != nil { return err } @@ -1485,6 +1504,17 @@ func (s *SelectStatement) validateAggregates(tr targetRequirement) error { if min, max, got := 1, 2, len(expr.Args); got > max || got < min { return fmt.Errorf("invalid number of arguments for %s, expected at least %d but no more than %d, got %d", expr.Name, min, max, got) } + case "elapsed": + if min, max, got := 1, 2, len(expr.Args); got > max || got < min { + return fmt.Errorf("invalid number of arguments for %s, expected at least %d but no more than %d, got %d", expr.Name, min, max, got) + } + // If a duration arg is passed, make sure it's a duration + if len(expr.Args) == 2 { + // Second must be a duration .e.g (1h) + if _, ok := expr.Args[1].(*DurationLiteral); !ok { + return errors.New("elapsed requires a duration argument") + } + } case "difference": if got := len(expr.Args); got != 1 { return fmt.Errorf("invalid number of arguments for difference, expected 1, got %d", got) @@ -1719,7 +1749,7 @@ func (s *SelectStatement) validateDerivative() error { return fmt.Errorf("derivative requires a field argument") } - // If a duration arg is pased, make sure it's a duration + // If a duration arg is passed, make sure it's a duration if len(derivativeCall.Args) == 2 { // Second must be a duration .e.g (1h) if _, ok := derivativeCall.Args[1].(*DurationLiteral); !ok { @@ -3469,6 +3499,11 @@ func timeExprValue(ref Expr, lit Expr) (t time.Time, err error) { if ref, ok := ref.(*VarRef); ok && strings.ToLower(ref.Val) == "time" { switch lit := lit.(type) { case *TimeLiteral: + if lit.Val.After(time.Unix(0, MaxTime)) { + return time.Time{}, fmt.Errorf("time %s overflows time literal", lit.Val.Format(time.RFC3339)) + } else if lit.Val.Before(time.Unix(0, MinTime)) { + return time.Time{}, fmt.Errorf("time %s underflows time literal", lit.Val.Format(time.RFC3339)) + } return lit.Val, nil case *DurationLiteral: return time.Unix(0, int64(lit.Val)).UTC(), nil @@ -4063,6 +4098,14 @@ func reduceBinaryExprIntegerLHS(op Token, lhs *IntegerLiteral, rhs Expr) Expr { case LTE: return &BooleanLiteral{Val: lhs.Val <= rhs.Val} } + case *DurationLiteral: + // Treat the integer as a timestamp. + switch op { + case ADD: + return &TimeLiteral{Val: time.Unix(0, lhs.Val).Add(rhs.Val)} + case SUB: + return &TimeLiteral{Val: time.Unix(0, lhs.Val).Add(-rhs.Val)} + } case *nilLiteral: return &BooleanLiteral{Val: false} } diff --git a/vendor/github.com/influxdata/influxdb/influxql/call_iterator.go b/vendor/github.com/influxdata/influxdb/influxql/call_iterator.go index 5b1c2b71c..2cb3ae507 100644 --- a/vendor/github.com/influxdata/influxdb/influxql/call_iterator.go +++ b/vendor/github.com/influxdata/influxdb/influxql/call_iterator.go @@ -363,25 +363,25 @@ func NewDistinctIterator(input Iterator, opt IteratorOptions) (Iterator, error) switch input := input.(type) { case FloatIterator: createFn := func() (FloatPointAggregator, FloatPointEmitter) { - fn := NewFloatSliceFuncReducer(FloatDistinctReduceSlice) + fn := NewFloatDistinctReducer() return fn, fn } return &floatReduceFloatIterator{input: newBufFloatIterator(input), opt: opt, create: createFn}, nil case IntegerIterator: createFn := func() (IntegerPointAggregator, IntegerPointEmitter) { - fn := NewIntegerSliceFuncReducer(IntegerDistinctReduceSlice) + fn := NewIntegerDistinctReducer() return fn, fn } return &integerReduceIntegerIterator{input: newBufIntegerIterator(input), opt: opt, create: createFn}, nil case StringIterator: createFn := func() (StringPointAggregator, StringPointEmitter) { - fn := NewStringSliceFuncReducer(StringDistinctReduceSlice) + fn := NewStringDistinctReducer() return fn, fn } return &stringReduceStringIterator{input: newBufStringIterator(input), opt: opt, create: createFn}, nil case BooleanIterator: createFn := func() (BooleanPointAggregator, BooleanPointEmitter) { - fn := NewBooleanSliceFuncReducer(BooleanDistinctReduceSlice) + fn := NewBooleanDistinctReducer() return fn, fn } return &booleanReduceBooleanIterator{input: newBufBooleanIterator(input), opt: opt, create: createFn}, nil @@ -390,74 +390,6 @@ func NewDistinctIterator(input Iterator, opt IteratorOptions) (Iterator, error) } } -// FloatDistinctReduceSlice returns the distinct value within a window. -func FloatDistinctReduceSlice(a []FloatPoint) []FloatPoint { - m := make(map[float64]FloatPoint) - for _, p := range a { - if _, ok := m[p.Value]; !ok { - m[p.Value] = p - } - } - - points := make([]FloatPoint, 0, len(m)) - for _, p := range m { - points = append(points, FloatPoint{Time: p.Time, Value: p.Value}) - } - sort.Sort(floatPoints(points)) - return points -} - -// IntegerDistinctReduceSlice returns the distinct value within a window. -func IntegerDistinctReduceSlice(a []IntegerPoint) []IntegerPoint { - m := make(map[int64]IntegerPoint) - for _, p := range a { - if _, ok := m[p.Value]; !ok { - m[p.Value] = p - } - } - - points := make([]IntegerPoint, 0, len(m)) - for _, p := range m { - points = append(points, IntegerPoint{Time: p.Time, Value: p.Value}) - } - sort.Sort(integerPoints(points)) - return points -} - -// StringDistinctReduceSlice returns the distinct value within a window. -func StringDistinctReduceSlice(a []StringPoint) []StringPoint { - m := make(map[string]StringPoint) - for _, p := range a { - if _, ok := m[p.Value]; !ok { - m[p.Value] = p - } - } - - points := make([]StringPoint, 0, len(m)) - for _, p := range m { - points = append(points, StringPoint{Time: p.Time, Value: p.Value}) - } - sort.Sort(stringPoints(points)) - return points -} - -// BooleanDistinctReduceSlice returns the distinct value within a window. -func BooleanDistinctReduceSlice(a []BooleanPoint) []BooleanPoint { - m := make(map[bool]BooleanPoint) - for _, p := range a { - if _, ok := m[p.Value]; !ok { - m[p.Value] = p - } - } - - points := make([]BooleanPoint, 0, len(m)) - for _, p := range m { - points = append(points, BooleanPoint{Time: p.Time, Value: p.Value}) - } - sort.Sort(booleanPoints(points)) - return points -} - // newMeanIterator returns an iterator for operating on a mean() call. func newMeanIterator(input Iterator, opt IteratorOptions) (Iterator, error) { switch input := input.(type) { @@ -988,7 +920,7 @@ func NewFloatPercentileReduceSliceFunc(percentile float64) FloatReduceSliceFunc } sort.Sort(floatPointsByValue(a)) - return []FloatPoint{{Time: ZeroTime, Value: a[i].Value, Aux: a[i].Aux}} + return []FloatPoint{{Time: a[i].Time, Value: a[i].Value, Aux: a[i].Aux}} } } @@ -1009,176 +941,74 @@ func NewIntegerPercentileReduceSliceFunc(percentile float64) IntegerReduceSliceF // newDerivativeIterator returns an iterator for operating on a derivative() call. func newDerivativeIterator(input Iterator, opt IteratorOptions, interval Interval, isNonNegative bool) (Iterator, error) { - // Derivatives do not use GROUP BY intervals or time constraints, so clear these options. - opt.Interval = Interval{} - opt.StartTime, opt.EndTime = MinTime, MaxTime - switch input := input.(type) { case FloatIterator: - floatDerivativeReduceSlice := NewFloatDerivativeReduceSliceFunc(interval, isNonNegative) createFn := func() (FloatPointAggregator, FloatPointEmitter) { - fn := NewFloatSliceFuncReducer(floatDerivativeReduceSlice) + fn := NewFloatDerivativeReducer(interval, isNonNegative, opt.Ascending) return fn, fn } - return &floatReduceFloatIterator{input: newBufFloatIterator(input), opt: opt, create: createFn}, nil + return newFloatStreamFloatIterator(input, createFn, opt), nil case IntegerIterator: - integerDerivativeReduceSlice := NewIntegerDerivativeReduceSliceFunc(interval, isNonNegative) createFn := func() (IntegerPointAggregator, FloatPointEmitter) { - fn := NewIntegerSliceFuncFloatReducer(integerDerivativeReduceSlice) + fn := NewIntegerDerivativeReducer(interval, isNonNegative, opt.Ascending) return fn, fn } - return &integerReduceFloatIterator{input: newBufIntegerIterator(input), opt: opt, create: createFn}, nil + return newIntegerStreamFloatIterator(input, createFn, opt), nil default: return nil, fmt.Errorf("unsupported derivative iterator type: %T", input) } } -// NewFloatDerivativeReduceSliceFunc returns the derivative value within a window. -func NewFloatDerivativeReduceSliceFunc(interval Interval, isNonNegative bool) FloatReduceSliceFunc { - prev := FloatPoint{Nil: true} - - return func(a []FloatPoint) []FloatPoint { - if len(a) == 0 { - return a - } else if len(a) == 1 { - return []FloatPoint{{Time: a[0].Time, Nil: true}} - } - - if prev.Nil { - prev = a[0] - } - - output := make([]FloatPoint, 0, len(a)-1) - for i := 1; i < len(a); i++ { - p := &a[i] - - // Calculate the derivative of successive points by dividing the - // difference of each value by the elapsed time normalized to the interval. - diff := p.Value - prev.Value - elapsed := p.Time - prev.Time - - value := 0.0 - if elapsed > 0 { - value = diff / (float64(elapsed) / float64(interval.Duration)) - } - - prev = *p - - // Drop negative values for non-negative derivatives. - if isNonNegative && diff < 0 { - continue - } - - output = append(output, FloatPoint{Time: p.Time, Value: value}) - } - return output - } -} - -// NewIntegerDerivativeReduceSliceFunc returns the derivative value within a window. -func NewIntegerDerivativeReduceSliceFunc(interval Interval, isNonNegative bool) IntegerReduceFloatSliceFunc { - prev := IntegerPoint{Nil: true} - - return func(a []IntegerPoint) []FloatPoint { - if len(a) == 0 { - return []FloatPoint{} - } else if len(a) == 1 { - return []FloatPoint{{Time: a[0].Time, Nil: true}} - } - - if prev.Nil { - prev = a[0] - } - - output := make([]FloatPoint, 0, len(a)-1) - for i := 1; i < len(a); i++ { - p := &a[i] - - // Calculate the derivative of successive points by dividing the - // difference of each value by the elapsed time normalized to the interval. - diff := float64(p.Value - prev.Value) - elapsed := p.Time - prev.Time - - value := 0.0 - if elapsed > 0 { - value = diff / (float64(elapsed) / float64(interval.Duration)) - } - - prev = *p - - // Drop negative values for non-negative derivatives. - if isNonNegative && diff < 0 { - continue - } - - output = append(output, FloatPoint{Time: p.Time, Value: value}) - } - return output - } -} - // newDifferenceIterator returns an iterator for operating on a difference() call. func newDifferenceIterator(input Iterator, opt IteratorOptions) (Iterator, error) { - // Differences do not use GROUP BY intervals or time constraints, so clear these options. - opt.Interval = Interval{} - opt.StartTime, opt.EndTime = MinTime, MaxTime - switch input := input.(type) { case FloatIterator: createFn := func() (FloatPointAggregator, FloatPointEmitter) { - fn := NewFloatSliceFuncReducer(FloatDifferenceReduceSlice) + fn := NewFloatDifferenceReducer() return fn, fn } - return &floatReduceFloatIterator{input: newBufFloatIterator(input), opt: opt, create: createFn}, nil + return newFloatStreamFloatIterator(input, createFn, opt), nil case IntegerIterator: createFn := func() (IntegerPointAggregator, IntegerPointEmitter) { - fn := NewIntegerSliceFuncReducer(IntegerDifferenceReduceSlice) + fn := NewIntegerDifferenceReducer() return fn, fn } - return &integerReduceIntegerIterator{input: newBufIntegerIterator(input), opt: opt, create: createFn}, nil + return newIntegerStreamIntegerIterator(input, createFn, opt), nil default: return nil, fmt.Errorf("unsupported difference iterator type: %T", input) } } -// FloatDifferenceReduceSlice returns the difference values within a window. -func FloatDifferenceReduceSlice(a []FloatPoint) []FloatPoint { - if len(a) < 2 { - return []FloatPoint{} - } - prev := a[0] - - output := make([]FloatPoint, 0, len(a)-1) - for i := 1; i < len(a); i++ { - p := &a[i] - - // Calculate the difference of successive points. - value := p.Value - prev.Value - prev = *p - - output = append(output, FloatPoint{Time: p.Time, Value: value}) - } - return output -} - -// IntegerDifferenceReduceSlice returns the difference values within a window. -func IntegerDifferenceReduceSlice(a []IntegerPoint) []IntegerPoint { - if len(a) < 2 { - return []IntegerPoint{} - } - prev := a[0] - - output := make([]IntegerPoint, 0, len(a)-1) - for i := 1; i < len(a); i++ { - p := &a[i] - - // Calculate the difference of successive points. - value := p.Value - prev.Value - prev = *p - - output = append(output, IntegerPoint{Time: p.Time, Value: value}) +// newElapsedIterator returns an iterator for operating on a elapsed() call. +func newElapsedIterator(input Iterator, opt IteratorOptions, interval Interval) (Iterator, error) { + switch input := input.(type) { + case FloatIterator: + createFn := func() (FloatPointAggregator, IntegerPointEmitter) { + fn := NewFloatElapsedReducer(interval) + return fn, fn + } + return newFloatStreamIntegerIterator(input, createFn, opt), nil + case IntegerIterator: + createFn := func() (IntegerPointAggregator, IntegerPointEmitter) { + fn := NewIntegerElapsedReducer(interval) + return fn, fn + } + return newIntegerStreamIntegerIterator(input, createFn, opt), nil + case BooleanIterator: + createFn := func() (BooleanPointAggregator, IntegerPointEmitter) { + fn := NewBooleanElapsedReducer(interval) + return fn, fn + } + return newBooleanStreamIntegerIterator(input, createFn, opt), nil + case StringIterator: + createFn := func() (StringPointAggregator, IntegerPointEmitter) { + fn := NewStringElapsedReducer(interval) + return fn, fn + } + return newStringStreamIntegerIterator(input, createFn, opt), nil + default: + return nil, fmt.Errorf("unsupported elapsed iterator type: %T", input) } - return output } // newMovingAverageIterator returns an iterator for operating on a moving_average() call. diff --git a/vendor/github.com/influxdata/influxdb/influxql/functions.gen.go b/vendor/github.com/influxdata/influxdb/influxql/functions.gen.go index 717efc84f..281fe6efd 100644 --- a/vendor/github.com/influxdata/influxdb/influxql/functions.gen.go +++ b/vendor/github.com/influxdata/influxdb/influxql/functions.gen.go @@ -6,6 +6,8 @@ package influxql +import "sort" + // FloatPointAggregator aggregates points to produce a single point. type FloatPointAggregator interface { AggregateFloat(p *FloatPoint) @@ -255,6 +257,66 @@ func (r *FloatSliceFuncBooleanReducer) Emit() []BooleanPoint { return r.fn(r.points) } +// FloatDistinctReducer returns the distinct points in a series. +type FloatDistinctReducer struct { + m map[float64]FloatPoint +} + +// NewFloatDistinctReducer creates a new FloatDistinctReducer. +func NewFloatDistinctReducer() *FloatDistinctReducer { + return &FloatDistinctReducer{m: make(map[float64]FloatPoint)} +} + +// AggregateFloat aggregates a point into the reducer. +func (r *FloatDistinctReducer) AggregateFloat(p *FloatPoint) { + if _, ok := r.m[p.Value]; !ok { + r.m[p.Value] = *p + } +} + +// Emit emits the distinct points that have been aggregated into the reducer. +func (r *FloatDistinctReducer) Emit() []FloatPoint { + points := make([]FloatPoint, 0, len(r.m)) + for _, p := range r.m { + points = append(points, FloatPoint{Time: p.Time, Value: p.Value}) + } + sort.Sort(floatPoints(points)) + return points +} + +// FloatElapsedReducer calculates the elapsed of the aggregated points. +type FloatElapsedReducer struct { + unitConversion int64 + prev FloatPoint + curr FloatPoint +} + +// NewFloatElapsedReducer creates a new FloatElapsedReducer. +func NewFloatElapsedReducer(interval Interval) *FloatElapsedReducer { + return &FloatElapsedReducer{ + unitConversion: int64(interval.Duration), + prev: FloatPoint{Nil: true}, + curr: FloatPoint{Nil: true}, + } +} + +// AggregateFloat aggregates a point into the reducer and updates the current window. +func (r *FloatElapsedReducer) AggregateFloat(p *FloatPoint) { + r.prev = r.curr + r.curr = *p +} + +// Emit emits the elapsed of the reducer at the current point. +func (r *FloatElapsedReducer) Emit() []IntegerPoint { + if !r.prev.Nil { + elapsed := (r.curr.Time - r.prev.Time) / r.unitConversion + return []IntegerPoint{ + {Time: r.curr.Time, Value: elapsed}, + } + } + return nil +} + // IntegerPointAggregator aggregates points to produce a single point. type IntegerPointAggregator interface { AggregateInteger(p *IntegerPoint) @@ -504,6 +566,66 @@ func (r *IntegerSliceFuncBooleanReducer) Emit() []BooleanPoint { return r.fn(r.points) } +// IntegerDistinctReducer returns the distinct points in a series. +type IntegerDistinctReducer struct { + m map[int64]IntegerPoint +} + +// NewIntegerDistinctReducer creates a new IntegerDistinctReducer. +func NewIntegerDistinctReducer() *IntegerDistinctReducer { + return &IntegerDistinctReducer{m: make(map[int64]IntegerPoint)} +} + +// AggregateInteger aggregates a point into the reducer. +func (r *IntegerDistinctReducer) AggregateInteger(p *IntegerPoint) { + if _, ok := r.m[p.Value]; !ok { + r.m[p.Value] = *p + } +} + +// Emit emits the distinct points that have been aggregated into the reducer. +func (r *IntegerDistinctReducer) Emit() []IntegerPoint { + points := make([]IntegerPoint, 0, len(r.m)) + for _, p := range r.m { + points = append(points, IntegerPoint{Time: p.Time, Value: p.Value}) + } + sort.Sort(integerPoints(points)) + return points +} + +// IntegerElapsedReducer calculates the elapsed of the aggregated points. +type IntegerElapsedReducer struct { + unitConversion int64 + prev IntegerPoint + curr IntegerPoint +} + +// NewIntegerElapsedReducer creates a new IntegerElapsedReducer. +func NewIntegerElapsedReducer(interval Interval) *IntegerElapsedReducer { + return &IntegerElapsedReducer{ + unitConversion: int64(interval.Duration), + prev: IntegerPoint{Nil: true}, + curr: IntegerPoint{Nil: true}, + } +} + +// AggregateInteger aggregates a point into the reducer and updates the current window. +func (r *IntegerElapsedReducer) AggregateInteger(p *IntegerPoint) { + r.prev = r.curr + r.curr = *p +} + +// Emit emits the elapsed of the reducer at the current point. +func (r *IntegerElapsedReducer) Emit() []IntegerPoint { + if !r.prev.Nil { + elapsed := (r.curr.Time - r.prev.Time) / r.unitConversion + return []IntegerPoint{ + {Time: r.curr.Time, Value: elapsed}, + } + } + return nil +} + // StringPointAggregator aggregates points to produce a single point. type StringPointAggregator interface { AggregateString(p *StringPoint) @@ -753,6 +875,66 @@ func (r *StringSliceFuncBooleanReducer) Emit() []BooleanPoint { return r.fn(r.points) } +// StringDistinctReducer returns the distinct points in a series. +type StringDistinctReducer struct { + m map[string]StringPoint +} + +// NewStringDistinctReducer creates a new StringDistinctReducer. +func NewStringDistinctReducer() *StringDistinctReducer { + return &StringDistinctReducer{m: make(map[string]StringPoint)} +} + +// AggregateString aggregates a point into the reducer. +func (r *StringDistinctReducer) AggregateString(p *StringPoint) { + if _, ok := r.m[p.Value]; !ok { + r.m[p.Value] = *p + } +} + +// Emit emits the distinct points that have been aggregated into the reducer. +func (r *StringDistinctReducer) Emit() []StringPoint { + points := make([]StringPoint, 0, len(r.m)) + for _, p := range r.m { + points = append(points, StringPoint{Time: p.Time, Value: p.Value}) + } + sort.Sort(stringPoints(points)) + return points +} + +// StringElapsedReducer calculates the elapsed of the aggregated points. +type StringElapsedReducer struct { + unitConversion int64 + prev StringPoint + curr StringPoint +} + +// NewStringElapsedReducer creates a new StringElapsedReducer. +func NewStringElapsedReducer(interval Interval) *StringElapsedReducer { + return &StringElapsedReducer{ + unitConversion: int64(interval.Duration), + prev: StringPoint{Nil: true}, + curr: StringPoint{Nil: true}, + } +} + +// AggregateString aggregates a point into the reducer and updates the current window. +func (r *StringElapsedReducer) AggregateString(p *StringPoint) { + r.prev = r.curr + r.curr = *p +} + +// Emit emits the elapsed of the reducer at the current point. +func (r *StringElapsedReducer) Emit() []IntegerPoint { + if !r.prev.Nil { + elapsed := (r.curr.Time - r.prev.Time) / r.unitConversion + return []IntegerPoint{ + {Time: r.curr.Time, Value: elapsed}, + } + } + return nil +} + // BooleanPointAggregator aggregates points to produce a single point. type BooleanPointAggregator interface { AggregateBoolean(p *BooleanPoint) @@ -1001,3 +1183,63 @@ func (r *BooleanSliceFuncReducer) AggregateBooleanBulk(points []BooleanPoint) { func (r *BooleanSliceFuncReducer) Emit() []BooleanPoint { return r.fn(r.points) } + +// BooleanDistinctReducer returns the distinct points in a series. +type BooleanDistinctReducer struct { + m map[bool]BooleanPoint +} + +// NewBooleanDistinctReducer creates a new BooleanDistinctReducer. +func NewBooleanDistinctReducer() *BooleanDistinctReducer { + return &BooleanDistinctReducer{m: make(map[bool]BooleanPoint)} +} + +// AggregateBoolean aggregates a point into the reducer. +func (r *BooleanDistinctReducer) AggregateBoolean(p *BooleanPoint) { + if _, ok := r.m[p.Value]; !ok { + r.m[p.Value] = *p + } +} + +// Emit emits the distinct points that have been aggregated into the reducer. +func (r *BooleanDistinctReducer) Emit() []BooleanPoint { + points := make([]BooleanPoint, 0, len(r.m)) + for _, p := range r.m { + points = append(points, BooleanPoint{Time: p.Time, Value: p.Value}) + } + sort.Sort(booleanPoints(points)) + return points +} + +// BooleanElapsedReducer calculates the elapsed of the aggregated points. +type BooleanElapsedReducer struct { + unitConversion int64 + prev BooleanPoint + curr BooleanPoint +} + +// NewBooleanElapsedReducer creates a new BooleanElapsedReducer. +func NewBooleanElapsedReducer(interval Interval) *BooleanElapsedReducer { + return &BooleanElapsedReducer{ + unitConversion: int64(interval.Duration), + prev: BooleanPoint{Nil: true}, + curr: BooleanPoint{Nil: true}, + } +} + +// AggregateBoolean aggregates a point into the reducer and updates the current window. +func (r *BooleanElapsedReducer) AggregateBoolean(p *BooleanPoint) { + r.prev = r.curr + r.curr = *p +} + +// Emit emits the elapsed of the reducer at the current point. +func (r *BooleanElapsedReducer) Emit() []IntegerPoint { + if !r.prev.Nil { + elapsed := (r.curr.Time - r.prev.Time) / r.unitConversion + return []IntegerPoint{ + {Time: r.curr.Time, Value: elapsed}, + } + } + return nil +} diff --git a/vendor/github.com/influxdata/influxdb/influxql/functions.gen.go.tmpl b/vendor/github.com/influxdata/influxdb/influxql/functions.gen.go.tmpl index d8f0f13ed..9d876ab39 100644 --- a/vendor/github.com/influxdata/influxdb/influxql/functions.gen.go.tmpl +++ b/vendor/github.com/influxdata/influxdb/influxql/functions.gen.go.tmpl @@ -1,5 +1,7 @@ package influxql +import "sort" + {{with $types := .}}{{range $k := $types}} // {{$k.Name}}PointAggregator aggregates points to produce a single point. @@ -87,4 +89,67 @@ func (r *{{$k.Name}}SliceFunc{{if ne $k.Name $v.Name}}{{$v.Name}}{{end}}Reducer) func (r *{{$k.Name}}SliceFunc{{if ne $k.Name $v.Name}}{{$v.Name}}{{end}}Reducer) Emit() []{{$v.Name}}Point { return r.fn(r.points) } -{{end}}{{end}}{{end}} +{{end}} + +// {{$k.Name}}DistinctReducer returns the distinct points in a series. +type {{$k.Name}}DistinctReducer struct { + m map[{{$k.Type}}]{{$k.Name}}Point +} + +// New{{$k.Name}}DistinctReducer creates a new {{$k.Name}}DistinctReducer. +func New{{$k.Name}}DistinctReducer() *{{$k.Name}}DistinctReducer { + return &{{$k.Name}}DistinctReducer{m: make(map[{{$k.Type}}]{{$k.Name}}Point)} +} + +// Aggregate{{$k.Name}} aggregates a point into the reducer. +func (r *{{$k.Name}}DistinctReducer) Aggregate{{$k.Name}}(p *{{$k.Name}}Point) { + if _, ok := r.m[p.Value]; !ok { + r.m[p.Value] = *p + } +} + +// Emit emits the distinct points that have been aggregated into the reducer. +func (r *{{$k.Name}}DistinctReducer) Emit() []{{$k.Name}}Point { + points := make([]{{$k.Name}}Point, 0, len(r.m)) + for _, p := range r.m { + points = append(points, {{$k.Name}}Point{Time: p.Time, Value: p.Value}) + } + sort.Sort({{$k.name}}Points(points)) + return points +} + +// {{$k.Name}}ElapsedReducer calculates the elapsed of the aggregated points. +type {{$k.Name}}ElapsedReducer struct { + unitConversion int64 + prev {{$k.Name}}Point + curr {{$k.Name}}Point +} + +// New{{$k.Name}}ElapsedReducer creates a new {{$k.Name}}ElapsedReducer. +func New{{$k.Name}}ElapsedReducer(interval Interval) *{{$k.Name}}ElapsedReducer { + return &{{$k.Name}}ElapsedReducer{ + unitConversion: int64(interval.Duration), + prev: {{$k.Name}}Point{Nil: true}, + curr: {{$k.Name}}Point{Nil: true}, + } +} + +// Aggregate{{$k.Name}} aggregates a point into the reducer and updates the current window. +func (r *{{$k.Name}}ElapsedReducer) Aggregate{{$k.Name}}(p *{{$k.Name}}Point) { + r.prev = r.curr + r.curr = *p +} + +// Emit emits the elapsed of the reducer at the current point. +func (r *{{$k.Name}}ElapsedReducer) Emit() []IntegerPoint { + if !r.prev.Nil { + elapsed := (r.curr.Time - r.prev.Time) / r.unitConversion + return []IntegerPoint{ + {Time: r.curr.Time, Value: elapsed}, + } + } + return nil +} + + +{{end}}{{end}} diff --git a/vendor/github.com/influxdata/influxdb/influxql/functions.go b/vendor/github.com/influxdata/influxdb/influxql/functions.go index 87d9a0ff8..ffd8402bc 100644 --- a/vendor/github.com/influxdata/influxdb/influxql/functions.go +++ b/vendor/github.com/influxdata/influxdb/influxql/functions.go @@ -62,6 +62,168 @@ func (r *IntegerMeanReducer) Emit() []FloatPoint { }} } +// FloatDerivativeReducer calculates the derivative of the aggregated points. +type FloatDerivativeReducer struct { + interval Interval + prev FloatPoint + curr FloatPoint + isNonNegative bool + ascending bool +} + +// NewFloatDerivativeReducer creates a new FloatDerivativeReducer. +func NewFloatDerivativeReducer(interval Interval, isNonNegative, ascending bool) *FloatDerivativeReducer { + return &FloatDerivativeReducer{ + interval: interval, + isNonNegative: isNonNegative, + ascending: ascending, + prev: FloatPoint{Nil: true}, + curr: FloatPoint{Nil: true}, + } +} + +// AggregateFloat aggregates a point into the reducer and updates the current window. +func (r *FloatDerivativeReducer) AggregateFloat(p *FloatPoint) { + r.prev = r.curr + r.curr = *p +} + +// Emit emits the derivative of the reducer at the current point. +func (r *FloatDerivativeReducer) Emit() []FloatPoint { + if !r.prev.Nil { + // Calculate the derivative of successive points by dividing the + // difference of each value by the elapsed time normalized to the interval. + diff := r.curr.Value - r.prev.Value + elapsed := r.curr.Time - r.prev.Time + if !r.ascending { + elapsed = -elapsed + } + + value := 0.0 + if elapsed > 0 { + value = diff / (float64(elapsed) / float64(r.interval.Duration)) + } + + // Drop negative values for non-negative derivatives. + if r.isNonNegative && diff < 0 { + return nil + } + return []FloatPoint{{Time: r.curr.Time, Value: value}} + } + return nil +} + +// IntegerDerivativeReducer calculates the derivative of the aggregated points. +type IntegerDerivativeReducer struct { + interval Interval + prev IntegerPoint + curr IntegerPoint + isNonNegative bool + ascending bool +} + +// NewIntegerDerivativeReducer creates a new IntegerDerivativeReducer. +func NewIntegerDerivativeReducer(interval Interval, isNonNegative, ascending bool) *IntegerDerivativeReducer { + return &IntegerDerivativeReducer{ + interval: interval, + isNonNegative: isNonNegative, + ascending: ascending, + prev: IntegerPoint{Nil: true}, + curr: IntegerPoint{Nil: true}, + } +} + +// AggregateInteger aggregates a point into the reducer and updates the current window. +func (r *IntegerDerivativeReducer) AggregateInteger(p *IntegerPoint) { + r.prev = r.curr + r.curr = *p +} + +// Emit emits the derivative of the reducer at the current point. +func (r *IntegerDerivativeReducer) Emit() []FloatPoint { + if !r.prev.Nil { + // Calculate the derivative of successive points by dividing the + // difference of each value by the elapsed time normalized to the interval. + diff := float64(r.curr.Value - r.prev.Value) + elapsed := r.curr.Time - r.prev.Time + if !r.ascending { + elapsed = -elapsed + } + + value := 0.0 + if elapsed > 0 { + value = diff / (float64(elapsed) / float64(r.interval.Duration)) + } + + // Drop negative values for non-negative derivatives. + if r.isNonNegative && diff < 0 { + return nil + } + return []FloatPoint{{Time: r.curr.Time, Value: value}} + } + return nil +} + +// FloatDifferenceReducer calculates the derivative of the aggregated points. +type FloatDifferenceReducer struct { + prev FloatPoint + curr FloatPoint +} + +// NewFloatDifferenceReducer creates a new FloatDifferenceReducer. +func NewFloatDifferenceReducer() *FloatDifferenceReducer { + return &FloatDifferenceReducer{ + prev: FloatPoint{Nil: true}, + curr: FloatPoint{Nil: true}, + } +} + +// AggregateFloat aggregates a point into the reducer and updates the current window. +func (r *FloatDifferenceReducer) AggregateFloat(p *FloatPoint) { + r.prev = r.curr + r.curr = *p +} + +// Emit emits the difference of the reducer at the current point. +func (r *FloatDifferenceReducer) Emit() []FloatPoint { + if !r.prev.Nil { + // Calculate the difference of successive points. + value := r.curr.Value - r.prev.Value + return []FloatPoint{{Time: r.curr.Time, Value: value}} + } + return nil +} + +// IntegerDifferenceReducer calculates the derivative of the aggregated points. +type IntegerDifferenceReducer struct { + prev IntegerPoint + curr IntegerPoint +} + +// NewIntegerDifferenceReducer creates a new IntegerDifferenceReducer. +func NewIntegerDifferenceReducer() *IntegerDifferenceReducer { + return &IntegerDifferenceReducer{ + prev: IntegerPoint{Nil: true}, + curr: IntegerPoint{Nil: true}, + } +} + +// AggregateInteger aggregates a point into the reducer and updates the current window. +func (r *IntegerDifferenceReducer) AggregateInteger(p *IntegerPoint) { + r.prev = r.curr + r.curr = *p +} + +// Emit emits the difference of the reducer at the current point. +func (r *IntegerDifferenceReducer) Emit() []IntegerPoint { + if !r.prev.Nil { + // Calculate the difference of successive points. + value := r.curr.Value - r.prev.Value + return []IntegerPoint{{Time: r.curr.Time, Value: value}} + } + return nil +} + // FloatMovingAverageReducer calculates the moving average of the aggregated points. type FloatMovingAverageReducer struct { pos int diff --git a/vendor/github.com/influxdata/influxdb/influxql/iterator.gen.go b/vendor/github.com/influxdata/influxdb/influxql/iterator.gen.go index 2e45d17ab..fe6a70a73 100644 --- a/vendor/github.com/influxdata/influxdb/influxql/iterator.gen.go +++ b/vendor/github.com/influxdata/influxdb/influxql/iterator.gen.go @@ -467,7 +467,6 @@ type floatFillIterator struct { startTime int64 endTime int64 auxFields []interface{} - done bool opt IteratorOptions window struct { @@ -488,9 +487,9 @@ func newFloatFillIterator(input FloatIterator, expr Expr, opt IteratorOptions) * var startTime, endTime int64 if opt.Ascending { startTime, _ = opt.Window(opt.StartTime) - _, endTime = opt.Window(opt.EndTime) + endTime, _ = opt.Window(opt.EndTime) } else { - _, startTime = opt.Window(opt.EndTime) + startTime, _ = opt.Window(opt.EndTime) endTime, _ = opt.Window(opt.StartTime) } @@ -512,7 +511,11 @@ func newFloatFillIterator(input FloatIterator, expr Expr, opt IteratorOptions) * itr.window.name, itr.window.tags = p.Name, p.Tags itr.window.time = itr.startTime } else { - itr.window.time = itr.endTime + if opt.Ascending { + itr.window.time = itr.endTime + 1 + } else { + itr.window.time = itr.endTime - 1 + } } return itr } @@ -528,7 +531,7 @@ func (itr *floatFillIterator) Next() *FloatPoint { // If we are inside of an interval, unread the point and continue below to // constructing a new point. if itr.opt.Ascending { - if itr.window.time < itr.endTime { + if itr.window.time <= itr.endTime { itr.input.unread(p) p = nil break @@ -555,7 +558,7 @@ func (itr *floatFillIterator) Next() *FloatPoint { } // Check if the point is our next expected point. - if p == nil || p.Time > itr.window.time { + if p == nil || (itr.opt.Ascending && p.Time > itr.window.time) || (!itr.opt.Ascending && p.Time < itr.window.time) { if p != nil { itr.input.unread(p) } @@ -726,7 +729,11 @@ func (itr *floatAuxIterator) stream() { // floatChanIterator represents a new instance of floatChanIterator. type floatChanIterator struct { - buf *FloatPoint + buf struct { + i int + filled bool + points [2]FloatPoint + } cond *sync.Cond done bool } @@ -749,7 +756,7 @@ func (itr *floatChanIterator) setBuf(name string, tags Tags, time int64, value i // Wait for either the iterator to be done (so we don't have to set the value) // or for the buffer to have been read and ready for another write. - for !itr.done && itr.buf != nil { + for !itr.done && itr.buf.filled { itr.cond.Wait() } @@ -762,14 +769,16 @@ func (itr *floatChanIterator) setBuf(name string, tags Tags, time int64, value i switch v := value.(type) { case float64: - itr.buf = &FloatPoint{Name: name, Tags: tags, Time: time, Value: v} + itr.buf.points[itr.buf.i] = FloatPoint{Name: name, Tags: tags, Time: time, Value: v} case int64: - itr.buf = &FloatPoint{Name: name, Tags: tags, Time: time, Value: float64(v)} + itr.buf.points[itr.buf.i] = FloatPoint{Name: name, Tags: tags, Time: time, Value: float64(v)} default: - itr.buf = &FloatPoint{Name: name, Tags: tags, Time: time, Nil: true} + itr.buf.points[itr.buf.i] = FloatPoint{Name: name, Tags: tags, Time: time, Nil: true} } + itr.buf.filled = true + // Signal to all waiting goroutines that a new value is ready to read. itr.cond.Signal() return true @@ -780,15 +789,22 @@ func (itr *floatChanIterator) Next() *FloatPoint { // Wait until either a value is available in the buffer or // the iterator is closed. - for !itr.done && itr.buf == nil { + for !itr.done && !itr.buf.filled { itr.cond.Wait() } + // Return nil once the channel is done and the buffer is empty. + if itr.done && !itr.buf.filled { + itr.cond.L.Unlock() + return nil + } + // Always read from the buffer if it exists, even if the iterator // is closed. This prevents the last value from being truncated by // the parent iterator. - p := itr.buf - itr.buf = nil + p := &itr.buf.points[itr.buf.i] + itr.buf.i = (itr.buf.i + 1) % len(itr.buf.points) + itr.buf.filled = false itr.cond.Signal() // Do not defer the unlock so we don't create an unnecessary allocation. @@ -899,7 +915,7 @@ func (itr *floatReduceFloatIterator) reduce() []FloatPoint { return a } -// floatStreamFloatIterator +// floatStreamFloatIterator streams inputs into the iterator and emits points gradually. type floatStreamFloatIterator struct { input *bufFloatIterator create func() (FloatPointAggregator, FloatPointEmitter) @@ -908,6 +924,7 @@ type floatStreamFloatIterator struct { points []FloatPoint } +// newFloatStreamFloatIterator returns a new instance of floatStreamFloatIterator. func newFloatStreamFloatIterator(input FloatIterator, createFn func() (FloatPointAggregator, FloatPointEmitter), opt IteratorOptions) *floatStreamFloatIterator { return &floatStreamFloatIterator{ input: newBufFloatIterator(input), @@ -1123,7 +1140,7 @@ func (itr *floatReduceIntegerIterator) reduce() []IntegerPoint { return a } -// floatStreamIntegerIterator +// floatStreamIntegerIterator streams inputs into the iterator and emits points gradually. type floatStreamIntegerIterator struct { input *bufFloatIterator create func() (FloatPointAggregator, IntegerPointEmitter) @@ -1132,6 +1149,7 @@ type floatStreamIntegerIterator struct { points []IntegerPoint } +// newFloatStreamIntegerIterator returns a new instance of floatStreamIntegerIterator. func newFloatStreamIntegerIterator(input FloatIterator, createFn func() (FloatPointAggregator, IntegerPointEmitter), opt IteratorOptions) *floatStreamIntegerIterator { return &floatStreamIntegerIterator{ input: newBufFloatIterator(input), @@ -1347,7 +1365,7 @@ func (itr *floatReduceStringIterator) reduce() []StringPoint { return a } -// floatStreamStringIterator +// floatStreamStringIterator streams inputs into the iterator and emits points gradually. type floatStreamStringIterator struct { input *bufFloatIterator create func() (FloatPointAggregator, StringPointEmitter) @@ -1356,6 +1374,7 @@ type floatStreamStringIterator struct { points []StringPoint } +// newFloatStreamStringIterator returns a new instance of floatStreamStringIterator. func newFloatStreamStringIterator(input FloatIterator, createFn func() (FloatPointAggregator, StringPointEmitter), opt IteratorOptions) *floatStreamStringIterator { return &floatStreamStringIterator{ input: newBufFloatIterator(input), @@ -1571,7 +1590,7 @@ func (itr *floatReduceBooleanIterator) reduce() []BooleanPoint { return a } -// floatStreamBooleanIterator +// floatStreamBooleanIterator streams inputs into the iterator and emits points gradually. type floatStreamBooleanIterator struct { input *bufFloatIterator create func() (FloatPointAggregator, BooleanPointEmitter) @@ -1580,6 +1599,7 @@ type floatStreamBooleanIterator struct { points []BooleanPoint } +// newFloatStreamBooleanIterator returns a new instance of floatStreamBooleanIterator. func newFloatStreamBooleanIterator(input FloatIterator, createFn func() (FloatPointAggregator, BooleanPointEmitter), opt IteratorOptions) *floatStreamBooleanIterator { return &floatStreamBooleanIterator{ input: newBufFloatIterator(input), @@ -2279,7 +2299,6 @@ type integerFillIterator struct { startTime int64 endTime int64 auxFields []interface{} - done bool opt IteratorOptions window struct { @@ -2300,9 +2319,9 @@ func newIntegerFillIterator(input IntegerIterator, expr Expr, opt IteratorOption var startTime, endTime int64 if opt.Ascending { startTime, _ = opt.Window(opt.StartTime) - _, endTime = opt.Window(opt.EndTime) + endTime, _ = opt.Window(opt.EndTime) } else { - _, startTime = opt.Window(opt.EndTime) + startTime, _ = opt.Window(opt.EndTime) endTime, _ = opt.Window(opt.StartTime) } @@ -2324,7 +2343,11 @@ func newIntegerFillIterator(input IntegerIterator, expr Expr, opt IteratorOption itr.window.name, itr.window.tags = p.Name, p.Tags itr.window.time = itr.startTime } else { - itr.window.time = itr.endTime + if opt.Ascending { + itr.window.time = itr.endTime + 1 + } else { + itr.window.time = itr.endTime - 1 + } } return itr } @@ -2340,7 +2363,7 @@ func (itr *integerFillIterator) Next() *IntegerPoint { // If we are inside of an interval, unread the point and continue below to // constructing a new point. if itr.opt.Ascending { - if itr.window.time < itr.endTime { + if itr.window.time <= itr.endTime { itr.input.unread(p) p = nil break @@ -2367,7 +2390,7 @@ func (itr *integerFillIterator) Next() *IntegerPoint { } // Check if the point is our next expected point. - if p == nil || p.Time > itr.window.time { + if p == nil || (itr.opt.Ascending && p.Time > itr.window.time) || (!itr.opt.Ascending && p.Time < itr.window.time) { if p != nil { itr.input.unread(p) } @@ -2538,7 +2561,11 @@ func (itr *integerAuxIterator) stream() { // integerChanIterator represents a new instance of integerChanIterator. type integerChanIterator struct { - buf *IntegerPoint + buf struct { + i int + filled bool + points [2]IntegerPoint + } cond *sync.Cond done bool } @@ -2561,7 +2588,7 @@ func (itr *integerChanIterator) setBuf(name string, tags Tags, time int64, value // Wait for either the iterator to be done (so we don't have to set the value) // or for the buffer to have been read and ready for another write. - for !itr.done && itr.buf != nil { + for !itr.done && itr.buf.filled { itr.cond.Wait() } @@ -2574,11 +2601,13 @@ func (itr *integerChanIterator) setBuf(name string, tags Tags, time int64, value switch v := value.(type) { case int64: - itr.buf = &IntegerPoint{Name: name, Tags: tags, Time: time, Value: v} + itr.buf.points[itr.buf.i] = IntegerPoint{Name: name, Tags: tags, Time: time, Value: v} default: - itr.buf = &IntegerPoint{Name: name, Tags: tags, Time: time, Nil: true} + itr.buf.points[itr.buf.i] = IntegerPoint{Name: name, Tags: tags, Time: time, Nil: true} } + itr.buf.filled = true + // Signal to all waiting goroutines that a new value is ready to read. itr.cond.Signal() return true @@ -2589,15 +2618,22 @@ func (itr *integerChanIterator) Next() *IntegerPoint { // Wait until either a value is available in the buffer or // the iterator is closed. - for !itr.done && itr.buf == nil { + for !itr.done && !itr.buf.filled { itr.cond.Wait() } + // Return nil once the channel is done and the buffer is empty. + if itr.done && !itr.buf.filled { + itr.cond.L.Unlock() + return nil + } + // Always read from the buffer if it exists, even if the iterator // is closed. This prevents the last value from being truncated by // the parent iterator. - p := itr.buf - itr.buf = nil + p := &itr.buf.points[itr.buf.i] + itr.buf.i = (itr.buf.i + 1) % len(itr.buf.points) + itr.buf.filled = false itr.cond.Signal() // Do not defer the unlock so we don't create an unnecessary allocation. @@ -2708,7 +2744,7 @@ func (itr *integerReduceFloatIterator) reduce() []FloatPoint { return a } -// integerStreamFloatIterator +// integerStreamFloatIterator streams inputs into the iterator and emits points gradually. type integerStreamFloatIterator struct { input *bufIntegerIterator create func() (IntegerPointAggregator, FloatPointEmitter) @@ -2717,6 +2753,7 @@ type integerStreamFloatIterator struct { points []FloatPoint } +// newIntegerStreamFloatIterator returns a new instance of integerStreamFloatIterator. func newIntegerStreamFloatIterator(input IntegerIterator, createFn func() (IntegerPointAggregator, FloatPointEmitter), opt IteratorOptions) *integerStreamFloatIterator { return &integerStreamFloatIterator{ input: newBufIntegerIterator(input), @@ -2932,7 +2969,7 @@ func (itr *integerReduceIntegerIterator) reduce() []IntegerPoint { return a } -// integerStreamIntegerIterator +// integerStreamIntegerIterator streams inputs into the iterator and emits points gradually. type integerStreamIntegerIterator struct { input *bufIntegerIterator create func() (IntegerPointAggregator, IntegerPointEmitter) @@ -2941,6 +2978,7 @@ type integerStreamIntegerIterator struct { points []IntegerPoint } +// newIntegerStreamIntegerIterator returns a new instance of integerStreamIntegerIterator. func newIntegerStreamIntegerIterator(input IntegerIterator, createFn func() (IntegerPointAggregator, IntegerPointEmitter), opt IteratorOptions) *integerStreamIntegerIterator { return &integerStreamIntegerIterator{ input: newBufIntegerIterator(input), @@ -3156,7 +3194,7 @@ func (itr *integerReduceStringIterator) reduce() []StringPoint { return a } -// integerStreamStringIterator +// integerStreamStringIterator streams inputs into the iterator and emits points gradually. type integerStreamStringIterator struct { input *bufIntegerIterator create func() (IntegerPointAggregator, StringPointEmitter) @@ -3165,6 +3203,7 @@ type integerStreamStringIterator struct { points []StringPoint } +// newIntegerStreamStringIterator returns a new instance of integerStreamStringIterator. func newIntegerStreamStringIterator(input IntegerIterator, createFn func() (IntegerPointAggregator, StringPointEmitter), opt IteratorOptions) *integerStreamStringIterator { return &integerStreamStringIterator{ input: newBufIntegerIterator(input), @@ -3380,7 +3419,7 @@ func (itr *integerReduceBooleanIterator) reduce() []BooleanPoint { return a } -// integerStreamBooleanIterator +// integerStreamBooleanIterator streams inputs into the iterator and emits points gradually. type integerStreamBooleanIterator struct { input *bufIntegerIterator create func() (IntegerPointAggregator, BooleanPointEmitter) @@ -3389,6 +3428,7 @@ type integerStreamBooleanIterator struct { points []BooleanPoint } +// newIntegerStreamBooleanIterator returns a new instance of integerStreamBooleanIterator. func newIntegerStreamBooleanIterator(input IntegerIterator, createFn func() (IntegerPointAggregator, BooleanPointEmitter), opt IteratorOptions) *integerStreamBooleanIterator { return &integerStreamBooleanIterator{ input: newBufIntegerIterator(input), @@ -4088,7 +4128,6 @@ type stringFillIterator struct { startTime int64 endTime int64 auxFields []interface{} - done bool opt IteratorOptions window struct { @@ -4109,9 +4148,9 @@ func newStringFillIterator(input StringIterator, expr Expr, opt IteratorOptions) var startTime, endTime int64 if opt.Ascending { startTime, _ = opt.Window(opt.StartTime) - _, endTime = opt.Window(opt.EndTime) + endTime, _ = opt.Window(opt.EndTime) } else { - _, startTime = opt.Window(opt.EndTime) + startTime, _ = opt.Window(opt.EndTime) endTime, _ = opt.Window(opt.StartTime) } @@ -4133,7 +4172,11 @@ func newStringFillIterator(input StringIterator, expr Expr, opt IteratorOptions) itr.window.name, itr.window.tags = p.Name, p.Tags itr.window.time = itr.startTime } else { - itr.window.time = itr.endTime + if opt.Ascending { + itr.window.time = itr.endTime + 1 + } else { + itr.window.time = itr.endTime - 1 + } } return itr } @@ -4149,7 +4192,7 @@ func (itr *stringFillIterator) Next() *StringPoint { // If we are inside of an interval, unread the point and continue below to // constructing a new point. if itr.opt.Ascending { - if itr.window.time < itr.endTime { + if itr.window.time <= itr.endTime { itr.input.unread(p) p = nil break @@ -4176,7 +4219,7 @@ func (itr *stringFillIterator) Next() *StringPoint { } // Check if the point is our next expected point. - if p == nil || p.Time > itr.window.time { + if p == nil || (itr.opt.Ascending && p.Time > itr.window.time) || (!itr.opt.Ascending && p.Time < itr.window.time) { if p != nil { itr.input.unread(p) } @@ -4347,7 +4390,11 @@ func (itr *stringAuxIterator) stream() { // stringChanIterator represents a new instance of stringChanIterator. type stringChanIterator struct { - buf *StringPoint + buf struct { + i int + filled bool + points [2]StringPoint + } cond *sync.Cond done bool } @@ -4370,7 +4417,7 @@ func (itr *stringChanIterator) setBuf(name string, tags Tags, time int64, value // Wait for either the iterator to be done (so we don't have to set the value) // or for the buffer to have been read and ready for another write. - for !itr.done && itr.buf != nil { + for !itr.done && itr.buf.filled { itr.cond.Wait() } @@ -4383,11 +4430,13 @@ func (itr *stringChanIterator) setBuf(name string, tags Tags, time int64, value switch v := value.(type) { case string: - itr.buf = &StringPoint{Name: name, Tags: tags, Time: time, Value: v} + itr.buf.points[itr.buf.i] = StringPoint{Name: name, Tags: tags, Time: time, Value: v} default: - itr.buf = &StringPoint{Name: name, Tags: tags, Time: time, Nil: true} + itr.buf.points[itr.buf.i] = StringPoint{Name: name, Tags: tags, Time: time, Nil: true} } + itr.buf.filled = true + // Signal to all waiting goroutines that a new value is ready to read. itr.cond.Signal() return true @@ -4398,15 +4447,22 @@ func (itr *stringChanIterator) Next() *StringPoint { // Wait until either a value is available in the buffer or // the iterator is closed. - for !itr.done && itr.buf == nil { + for !itr.done && !itr.buf.filled { itr.cond.Wait() } + // Return nil once the channel is done and the buffer is empty. + if itr.done && !itr.buf.filled { + itr.cond.L.Unlock() + return nil + } + // Always read from the buffer if it exists, even if the iterator // is closed. This prevents the last value from being truncated by // the parent iterator. - p := itr.buf - itr.buf = nil + p := &itr.buf.points[itr.buf.i] + itr.buf.i = (itr.buf.i + 1) % len(itr.buf.points) + itr.buf.filled = false itr.cond.Signal() // Do not defer the unlock so we don't create an unnecessary allocation. @@ -4517,7 +4573,7 @@ func (itr *stringReduceFloatIterator) reduce() []FloatPoint { return a } -// stringStreamFloatIterator +// stringStreamFloatIterator streams inputs into the iterator and emits points gradually. type stringStreamFloatIterator struct { input *bufStringIterator create func() (StringPointAggregator, FloatPointEmitter) @@ -4526,6 +4582,7 @@ type stringStreamFloatIterator struct { points []FloatPoint } +// newStringStreamFloatIterator returns a new instance of stringStreamFloatIterator. func newStringStreamFloatIterator(input StringIterator, createFn func() (StringPointAggregator, FloatPointEmitter), opt IteratorOptions) *stringStreamFloatIterator { return &stringStreamFloatIterator{ input: newBufStringIterator(input), @@ -4741,7 +4798,7 @@ func (itr *stringReduceIntegerIterator) reduce() []IntegerPoint { return a } -// stringStreamIntegerIterator +// stringStreamIntegerIterator streams inputs into the iterator and emits points gradually. type stringStreamIntegerIterator struct { input *bufStringIterator create func() (StringPointAggregator, IntegerPointEmitter) @@ -4750,6 +4807,7 @@ type stringStreamIntegerIterator struct { points []IntegerPoint } +// newStringStreamIntegerIterator returns a new instance of stringStreamIntegerIterator. func newStringStreamIntegerIterator(input StringIterator, createFn func() (StringPointAggregator, IntegerPointEmitter), opt IteratorOptions) *stringStreamIntegerIterator { return &stringStreamIntegerIterator{ input: newBufStringIterator(input), @@ -4965,7 +5023,7 @@ func (itr *stringReduceStringIterator) reduce() []StringPoint { return a } -// stringStreamStringIterator +// stringStreamStringIterator streams inputs into the iterator and emits points gradually. type stringStreamStringIterator struct { input *bufStringIterator create func() (StringPointAggregator, StringPointEmitter) @@ -4974,6 +5032,7 @@ type stringStreamStringIterator struct { points []StringPoint } +// newStringStreamStringIterator returns a new instance of stringStreamStringIterator. func newStringStreamStringIterator(input StringIterator, createFn func() (StringPointAggregator, StringPointEmitter), opt IteratorOptions) *stringStreamStringIterator { return &stringStreamStringIterator{ input: newBufStringIterator(input), @@ -5189,7 +5248,7 @@ func (itr *stringReduceBooleanIterator) reduce() []BooleanPoint { return a } -// stringStreamBooleanIterator +// stringStreamBooleanIterator streams inputs into the iterator and emits points gradually. type stringStreamBooleanIterator struct { input *bufStringIterator create func() (StringPointAggregator, BooleanPointEmitter) @@ -5198,6 +5257,7 @@ type stringStreamBooleanIterator struct { points []BooleanPoint } +// newStringStreamBooleanIterator returns a new instance of stringStreamBooleanIterator. func newStringStreamBooleanIterator(input StringIterator, createFn func() (StringPointAggregator, BooleanPointEmitter), opt IteratorOptions) *stringStreamBooleanIterator { return &stringStreamBooleanIterator{ input: newBufStringIterator(input), @@ -5897,7 +5957,6 @@ type booleanFillIterator struct { startTime int64 endTime int64 auxFields []interface{} - done bool opt IteratorOptions window struct { @@ -5918,9 +5977,9 @@ func newBooleanFillIterator(input BooleanIterator, expr Expr, opt IteratorOption var startTime, endTime int64 if opt.Ascending { startTime, _ = opt.Window(opt.StartTime) - _, endTime = opt.Window(opt.EndTime) + endTime, _ = opt.Window(opt.EndTime) } else { - _, startTime = opt.Window(opt.EndTime) + startTime, _ = opt.Window(opt.EndTime) endTime, _ = opt.Window(opt.StartTime) } @@ -5942,7 +6001,11 @@ func newBooleanFillIterator(input BooleanIterator, expr Expr, opt IteratorOption itr.window.name, itr.window.tags = p.Name, p.Tags itr.window.time = itr.startTime } else { - itr.window.time = itr.endTime + if opt.Ascending { + itr.window.time = itr.endTime + 1 + } else { + itr.window.time = itr.endTime - 1 + } } return itr } @@ -5958,7 +6021,7 @@ func (itr *booleanFillIterator) Next() *BooleanPoint { // If we are inside of an interval, unread the point and continue below to // constructing a new point. if itr.opt.Ascending { - if itr.window.time < itr.endTime { + if itr.window.time <= itr.endTime { itr.input.unread(p) p = nil break @@ -5985,7 +6048,7 @@ func (itr *booleanFillIterator) Next() *BooleanPoint { } // Check if the point is our next expected point. - if p == nil || p.Time > itr.window.time { + if p == nil || (itr.opt.Ascending && p.Time > itr.window.time) || (!itr.opt.Ascending && p.Time < itr.window.time) { if p != nil { itr.input.unread(p) } @@ -6156,7 +6219,11 @@ func (itr *booleanAuxIterator) stream() { // booleanChanIterator represents a new instance of booleanChanIterator. type booleanChanIterator struct { - buf *BooleanPoint + buf struct { + i int + filled bool + points [2]BooleanPoint + } cond *sync.Cond done bool } @@ -6179,7 +6246,7 @@ func (itr *booleanChanIterator) setBuf(name string, tags Tags, time int64, value // Wait for either the iterator to be done (so we don't have to set the value) // or for the buffer to have been read and ready for another write. - for !itr.done && itr.buf != nil { + for !itr.done && itr.buf.filled { itr.cond.Wait() } @@ -6192,11 +6259,13 @@ func (itr *booleanChanIterator) setBuf(name string, tags Tags, time int64, value switch v := value.(type) { case bool: - itr.buf = &BooleanPoint{Name: name, Tags: tags, Time: time, Value: v} + itr.buf.points[itr.buf.i] = BooleanPoint{Name: name, Tags: tags, Time: time, Value: v} default: - itr.buf = &BooleanPoint{Name: name, Tags: tags, Time: time, Nil: true} + itr.buf.points[itr.buf.i] = BooleanPoint{Name: name, Tags: tags, Time: time, Nil: true} } + itr.buf.filled = true + // Signal to all waiting goroutines that a new value is ready to read. itr.cond.Signal() return true @@ -6207,15 +6276,22 @@ func (itr *booleanChanIterator) Next() *BooleanPoint { // Wait until either a value is available in the buffer or // the iterator is closed. - for !itr.done && itr.buf == nil { + for !itr.done && !itr.buf.filled { itr.cond.Wait() } + // Return nil once the channel is done and the buffer is empty. + if itr.done && !itr.buf.filled { + itr.cond.L.Unlock() + return nil + } + // Always read from the buffer if it exists, even if the iterator // is closed. This prevents the last value from being truncated by // the parent iterator. - p := itr.buf - itr.buf = nil + p := &itr.buf.points[itr.buf.i] + itr.buf.i = (itr.buf.i + 1) % len(itr.buf.points) + itr.buf.filled = false itr.cond.Signal() // Do not defer the unlock so we don't create an unnecessary allocation. @@ -6326,7 +6402,7 @@ func (itr *booleanReduceFloatIterator) reduce() []FloatPoint { return a } -// booleanStreamFloatIterator +// booleanStreamFloatIterator streams inputs into the iterator and emits points gradually. type booleanStreamFloatIterator struct { input *bufBooleanIterator create func() (BooleanPointAggregator, FloatPointEmitter) @@ -6335,6 +6411,7 @@ type booleanStreamFloatIterator struct { points []FloatPoint } +// newBooleanStreamFloatIterator returns a new instance of booleanStreamFloatIterator. func newBooleanStreamFloatIterator(input BooleanIterator, createFn func() (BooleanPointAggregator, FloatPointEmitter), opt IteratorOptions) *booleanStreamFloatIterator { return &booleanStreamFloatIterator{ input: newBufBooleanIterator(input), @@ -6550,7 +6627,7 @@ func (itr *booleanReduceIntegerIterator) reduce() []IntegerPoint { return a } -// booleanStreamIntegerIterator +// booleanStreamIntegerIterator streams inputs into the iterator and emits points gradually. type booleanStreamIntegerIterator struct { input *bufBooleanIterator create func() (BooleanPointAggregator, IntegerPointEmitter) @@ -6559,6 +6636,7 @@ type booleanStreamIntegerIterator struct { points []IntegerPoint } +// newBooleanStreamIntegerIterator returns a new instance of booleanStreamIntegerIterator. func newBooleanStreamIntegerIterator(input BooleanIterator, createFn func() (BooleanPointAggregator, IntegerPointEmitter), opt IteratorOptions) *booleanStreamIntegerIterator { return &booleanStreamIntegerIterator{ input: newBufBooleanIterator(input), @@ -6774,7 +6852,7 @@ func (itr *booleanReduceStringIterator) reduce() []StringPoint { return a } -// booleanStreamStringIterator +// booleanStreamStringIterator streams inputs into the iterator and emits points gradually. type booleanStreamStringIterator struct { input *bufBooleanIterator create func() (BooleanPointAggregator, StringPointEmitter) @@ -6783,6 +6861,7 @@ type booleanStreamStringIterator struct { points []StringPoint } +// newBooleanStreamStringIterator returns a new instance of booleanStreamStringIterator. func newBooleanStreamStringIterator(input BooleanIterator, createFn func() (BooleanPointAggregator, StringPointEmitter), opt IteratorOptions) *booleanStreamStringIterator { return &booleanStreamStringIterator{ input: newBufBooleanIterator(input), @@ -6998,7 +7077,7 @@ func (itr *booleanReduceBooleanIterator) reduce() []BooleanPoint { return a } -// booleanStreamBooleanIterator +// booleanStreamBooleanIterator streams inputs into the iterator and emits points gradually. type booleanStreamBooleanIterator struct { input *bufBooleanIterator create func() (BooleanPointAggregator, BooleanPointEmitter) @@ -7007,6 +7086,7 @@ type booleanStreamBooleanIterator struct { points []BooleanPoint } +// newBooleanStreamBooleanIterator returns a new instance of booleanStreamBooleanIterator. func newBooleanStreamBooleanIterator(input BooleanIterator, createFn func() (BooleanPointAggregator, BooleanPointEmitter), opt IteratorOptions) *booleanStreamBooleanIterator { return &booleanStreamBooleanIterator{ input: newBufBooleanIterator(input), diff --git a/vendor/github.com/influxdata/influxdb/influxql/iterator.gen.go.tmpl b/vendor/github.com/influxdata/influxdb/influxql/iterator.gen.go.tmpl index 397ed9e60..28878ad00 100644 --- a/vendor/github.com/influxdata/influxdb/influxql/iterator.gen.go.tmpl +++ b/vendor/github.com/influxdata/influxdb/influxql/iterator.gen.go.tmpl @@ -466,7 +466,6 @@ type {{$k.name}}FillIterator struct { startTime int64 endTime int64 auxFields []interface{} - done bool opt IteratorOptions window struct { @@ -487,9 +486,9 @@ func new{{$k.Name}}FillIterator(input {{$k.Name}}Iterator, expr Expr, opt Iterat var startTime, endTime int64 if opt.Ascending { startTime, _ = opt.Window(opt.StartTime) - _, endTime = opt.Window(opt.EndTime) + endTime, _ = opt.Window(opt.EndTime) } else { - _, startTime = opt.Window(opt.EndTime) + startTime, _ = opt.Window(opt.EndTime) endTime, _ = opt.Window(opt.StartTime) } @@ -511,7 +510,11 @@ func new{{$k.Name}}FillIterator(input {{$k.Name}}Iterator, expr Expr, opt Iterat itr.window.name, itr.window.tags = p.Name, p.Tags itr.window.time = itr.startTime } else { - itr.window.time = itr.endTime + if opt.Ascending { + itr.window.time = itr.endTime + 1 + } else { + itr.window.time = itr.endTime - 1 + } } return itr } @@ -527,7 +530,7 @@ func (itr *{{$k.name}}FillIterator) Next() *{{$k.Name}}Point { // If we are inside of an interval, unread the point and continue below to // constructing a new point. if itr.opt.Ascending { - if itr.window.time < itr.endTime { + if itr.window.time <= itr.endTime { itr.input.unread(p) p = nil break @@ -554,7 +557,7 @@ func (itr *{{$k.name}}FillIterator) Next() *{{$k.Name}}Point { } // Check if the point is our next expected point. - if p == nil || p.Time > itr.window.time { + if p == nil || (itr.opt.Ascending && p.Time > itr.window.time) || (!itr.opt.Ascending && p.Time < itr.window.time) { if p != nil { itr.input.unread(p) } @@ -725,7 +728,11 @@ func (itr *{{.name}}AuxIterator) stream() { // {{$k.name}}ChanIterator represents a new instance of {{$k.name}}ChanIterator. type {{$k.name}}ChanIterator struct { - buf *{{$k.Name}}Point + buf struct { + i int + filled bool + points [2]{{$k.Name}}Point + } cond *sync.Cond done bool } @@ -748,7 +755,7 @@ func (itr *{{$k.name}}ChanIterator) setBuf(name string, tags Tags, time int64, v // Wait for either the iterator to be done (so we don't have to set the value) // or for the buffer to have been read and ready for another write. - for !itr.done && itr.buf != nil { + for !itr.done && itr.buf.filled { itr.cond.Wait() } @@ -761,14 +768,16 @@ func (itr *{{$k.name}}ChanIterator) setBuf(name string, tags Tags, time int64, v switch v := value.(type) { case {{$k.Type}}: - itr.buf = &{{$k.Name}}Point{Name: name, Tags: tags, Time: time, Value: v} + itr.buf.points[itr.buf.i] = {{$k.Name}}Point{Name: name, Tags: tags, Time: time, Value: v} {{if eq $k.Name "Float"}} case int64: - itr.buf = &{{$k.Name}}Point{Name: name, Tags: tags, Time: time, Value: float64(v)} + itr.buf.points[itr.buf.i] = {{$k.Name}}Point{Name: name, Tags: tags, Time: time, Value: float64(v)} {{end}} default: - itr.buf = &{{$k.Name}}Point{Name: name, Tags: tags, Time: time, Nil: true} + itr.buf.points[itr.buf.i] = {{$k.Name}}Point{Name: name, Tags: tags, Time: time, Nil: true} } + itr.buf.filled = true + // Signal to all waiting goroutines that a new value is ready to read. itr.cond.Signal() return true @@ -779,15 +788,22 @@ func (itr *{{$k.name}}ChanIterator) Next() *{{$k.Name}}Point { // Wait until either a value is available in the buffer or // the iterator is closed. - for !itr.done && itr.buf == nil { + for !itr.done && !itr.buf.filled { itr.cond.Wait() } + // Return nil once the channel is done and the buffer is empty. + if itr.done && !itr.buf.filled { + itr.cond.L.Unlock() + return nil + } + // Always read from the buffer if it exists, even if the iterator // is closed. This prevents the last value from being truncated by // the parent iterator. - p := itr.buf - itr.buf = nil + p := &itr.buf.points[itr.buf.i] + itr.buf.i = (itr.buf.i + 1) % len(itr.buf.points) + itr.buf.filled = false itr.cond.Signal() // Do not defer the unlock so we don't create an unnecessary allocation. @@ -799,10 +815,10 @@ func (itr *{{$k.name}}ChanIterator) Next() *{{$k.Name}}Point { // {{$k.name}}Reduce{{$v.Name}}Iterator executes a reducer for every interval and buffers the result. type {{$k.name}}Reduce{{$v.Name}}Iterator struct { - input *buf{{$k.Name}}Iterator - create func() ({{$k.Name}}PointAggregator, {{$v.Name}}PointEmitter) - opt IteratorOptions - points []{{$v.Name}}Point + input *buf{{$k.Name}}Iterator + create func() ({{$k.Name}}PointAggregator, {{$v.Name}}PointEmitter) + opt IteratorOptions + points []{{$v.Name}}Point } // Stats returns stats from the input iterator. @@ -900,7 +916,7 @@ func (itr *{{$k.name}}Reduce{{$v.Name}}Iterator) reduce() []{{$v.Name}}Point { return a } -// {{$k.name}}Stream{{$v.Name}}Iterator +// {{$k.name}}Stream{{$v.Name}}Iterator streams inputs into the iterator and emits points gradually. type {{$k.name}}Stream{{$v.Name}}Iterator struct { input *buf{{$k.Name}}Iterator create func() ({{$k.Name}}PointAggregator, {{$v.Name}}PointEmitter) @@ -909,6 +925,7 @@ type {{$k.name}}Stream{{$v.Name}}Iterator struct { points []{{$v.Name}}Point } +// new{{$k.Name}}Stream{{$v.Name}}Iterator returns a new instance of {{$k.name}}Stream{{$v.Name}}Iterator. func new{{$k.Name}}Stream{{$v.Name}}Iterator(input {{$k.Name}}Iterator, createFn func() ({{$k.Name}}PointAggregator, {{$v.Name}}PointEmitter), opt IteratorOptions) *{{$k.name}}Stream{{$v.Name}}Iterator { return &{{$k.name}}Stream{{$v.Name}}Iterator{ input: newBuf{{$k.Name}}Iterator(input), @@ -1227,7 +1244,6 @@ func (enc *IteratorEncoder) encode{{.Name}}Iterator(itr {{.Name}}Iterator) error default: } - // Retrieve the next point from the iterator. p := itr.Next() if p == nil { diff --git a/vendor/github.com/influxdata/influxdb/influxql/iterator.go b/vendor/github.com/influxdata/influxdb/influxql/iterator.go index 3df58421d..a6ff2549f 100644 --- a/vendor/github.com/influxdata/influxdb/influxql/iterator.go +++ b/vendor/github.com/influxdata/influxdb/influxql/iterator.go @@ -415,6 +415,41 @@ func DrainIterator(itr Iterator) { } } +// DrainIterators reads all points from all iterators. +func DrainIterators(itrs []Iterator) { + for { + var hasData bool + + for _, itr := range itrs { + switch itr := itr.(type) { + case FloatIterator: + if p := itr.Next(); p != nil { + hasData = true + } + case IntegerIterator: + if p := itr.Next(); p != nil { + hasData = true + } + case StringIterator: + if p := itr.Next(); p != nil { + hasData = true + } + case BooleanIterator: + if p := itr.Next(); p != nil { + hasData = true + } + default: + panic(fmt.Sprintf("unsupported iterator type for draining: %T", itr)) + } + } + + // Exit once all iterators return a nil point. + if !hasData { + break + } + } +} + // NewReaderIterator returns an iterator that streams from a reader. func NewReaderIterator(r io.Reader, typ DataType, stats IteratorStats) (Iterator, error) { switch typ { @@ -750,6 +785,16 @@ func (opt IteratorOptions) DerivativeInterval() Interval { return Interval{Duration: time.Second} } +// ElapsedInterval returns the time interval for the elapsed function. +func (opt IteratorOptions) ElapsedInterval() Interval { + // Use the interval on the elapsed() call, if specified. + if expr, ok := opt.Expr.(*Call); ok && len(expr.Args) == 2 { + return Interval{Duration: expr.Args[1].(*DurationLiteral).Val} + } + + return Interval{Duration: time.Nanosecond} +} + // MarshalBinary encodes opt into a binary format. func (opt *IteratorOptions) MarshalBinary() ([]byte, error) { return proto.Marshal(encodeIteratorOptions(opt)) diff --git a/vendor/github.com/influxdata/influxdb/influxql/parser.go b/vendor/github.com/influxdata/influxdb/influxql/parser.go index f5e42b02b..72d20458b 100644 --- a/vendor/github.com/influxdata/influxdb/influxql/parser.go +++ b/vendor/github.com/influxdata/influxdb/influxql/parser.go @@ -413,7 +413,7 @@ func (p *Parser) parseCreateRetentionPolicyStatement() (*CreateRetentionPolicySt stmt.Replication = n // Parse optional SHARD token. - if tok, pos, lit := p.scanIgnoreWhitespace(); tok == SHARD { + if tok, _, _ := p.scanIgnoreWhitespace(); tok == SHARD { if tok, pos, lit := p.scanIgnoreWhitespace(); tok != DURATION { return nil, newParseError(tokstr(tok, lit), []string{"DURATION"}, pos) } @@ -422,17 +422,15 @@ func (p *Parser) parseCreateRetentionPolicyStatement() (*CreateRetentionPolicySt return nil, err } stmt.ShardGroupDuration = d - } else if tok != EOF && tok != SEMICOLON && tok != DEFAULT { - return nil, newParseError(tokstr(tok, lit), []string{"SHARD"}, pos) } else { p.unscan() } // Parse optional DEFAULT token. - if tok, pos, lit := p.scanIgnoreWhitespace(); tok == DEFAULT { + if tok, _, _ := p.scanIgnoreWhitespace(); tok == DEFAULT { stmt.Default = true - } else if tok != EOF && tok != SEMICOLON { - return nil, newParseError(tokstr(tok, lit), []string{"DEFAULT"}, pos) + } else { + p.unscan() } return stmt, nil diff --git a/vendor/github.com/influxdata/influxdb/influxql/point.go b/vendor/github.com/influxdata/influxdb/influxql/point.go index e1c88a973..613d2d6d7 100644 --- a/vendor/github.com/influxdata/influxdb/influxql/point.go +++ b/vendor/github.com/influxdata/influxdb/influxql/point.go @@ -3,6 +3,7 @@ package influxql import ( "bytes" "encoding/binary" + "fmt" "io" "sort" @@ -32,6 +33,31 @@ type Point interface { // Points represents a list of points. type Points []Point +// Clone returns a deep copy of a. +func (a Points) Clone() []Point { + other := make([]Point, len(a)) + for i, p := range a { + if p == nil { + other[i] = nil + continue + } + + switch p := p.(type) { + case *FloatPoint: + other[i] = p.Clone() + case *IntegerPoint: + other[i] = p.Clone() + case *StringPoint: + other[i] = p.Clone() + case *BooleanPoint: + other[i] = p.Clone() + default: + panic(fmt.Sprintf("unable to clone point: %T", p)) + } + } + return other +} + // Tags represent a map of keys and values. // It memoizes its key so it can be used efficiently during query execution. type Tags struct { diff --git a/vendor/github.com/influxdata/influxdb/influxql/query_executor.go b/vendor/github.com/influxdata/influxdb/influxql/query_executor.go index 875c64764..5336785d7 100644 --- a/vendor/github.com/influxdata/influxdb/influxql/query_executor.go +++ b/vendor/github.com/influxdata/influxdb/influxql/query_executor.go @@ -79,6 +79,9 @@ type ExecutionContext struct { // The requested maximum number of points to return in each result. ChunkSize int + // Hold the query executor's logger. + Log *log.Logger + // A channel that is closed when the query is interrupted. InterruptCh <-chan struct{} } @@ -153,6 +156,7 @@ func (e *QueryExecutor) ExecuteQuery(query *Query, database string, chunkSize in func (e *QueryExecutor) executeQuery(query *Query, database string, chunkSize int, closing <-chan struct{}, results chan *Result) { defer close(results) + defer e.recover(query, results) e.statMap.Add(statQueriesActive, 1) defer func(start time.Time) { @@ -176,6 +180,7 @@ func (e *QueryExecutor) executeQuery(query *Query, database string, chunkSize in Results: results, Database: database, ChunkSize: chunkSize, + Log: logger, InterruptCh: task.closing, } @@ -269,6 +274,15 @@ loop: } } +func (e *QueryExecutor) recover(query *Query, results chan *Result) { + if err := recover(); err != nil { + results <- &Result{ + StatementID: -1, + Err: fmt.Errorf("%s [panic:%s]", query.String(), err), + } + } +} + func (e *QueryExecutor) executeKillQueryStatement(stmt *KillQueryStatement) error { return e.killQuery(stmt.QueryID) } diff --git a/vendor/github.com/influxdata/influxdb/influxql/result.go b/vendor/github.com/influxdata/influxdb/influxql/result.go index ccf151ee9..d1f2085bb 100644 --- a/vendor/github.com/influxdata/influxdb/influxql/result.go +++ b/vendor/github.com/influxdata/influxdb/influxql/result.go @@ -7,6 +7,11 @@ import ( "github.com/influxdata/influxdb/models" ) +const ( + // WarningLevel is the message level for a warning. + WarningLevel = "warning" +) + // TagSet is a fundamental concept within the query system. It represents a composite series, // composed of multiple individual series that share a set of tag attributes. type TagSet struct { @@ -22,6 +27,19 @@ func (t *TagSet) AddFilter(key string, filter Expr) { t.Filters = append(t.Filters, filter) } +func (t *TagSet) Len() int { return len(t.SeriesKeys) } +func (t *TagSet) Less(i, j int) bool { return t.SeriesKeys[i] < t.SeriesKeys[j] } +func (t *TagSet) Swap(i, j int) { + t.SeriesKeys[i], t.SeriesKeys[j] = t.SeriesKeys[j], t.SeriesKeys[i] + t.Filters[i], t.Filters[j] = t.Filters[j], t.Filters[i] +} + +// Message represents a user-facing message to be included with the result. +type Message struct { + Level string `json:"level"` + Text string `json:"text"` +} + // Result represents a resultset returned from a single statement. // Rows represents a list of rows that can be sorted consistently by name/tag. type Result struct { @@ -29,6 +47,7 @@ type Result struct { // to combine statement results if they're being buffered in memory. StatementID int `json:"-"` Series models.Rows + Messages []*Message Err error } @@ -36,12 +55,14 @@ type Result struct { func (r *Result) MarshalJSON() ([]byte, error) { // Define a struct that outputs "error" as a string. var o struct { - Series []*models.Row `json:"series,omitempty"` - Err string `json:"error,omitempty"` + Series []*models.Row `json:"series,omitempty"` + Messages []*Message `json:"messages,omitempty"` + Err string `json:"error,omitempty"` } // Copy fields to output struct. o.Series = r.Series + o.Messages = r.Messages if r.Err != nil { o.Err = r.Err.Error() } @@ -52,8 +73,9 @@ func (r *Result) MarshalJSON() ([]byte, error) { // UnmarshalJSON decodes the data into the Result struct func (r *Result) UnmarshalJSON(b []byte) error { var o struct { - Series []*models.Row `json:"series,omitempty"` - Err string `json:"error,omitempty"` + Series []*models.Row `json:"series,omitempty"` + Messages []*Message `json:"messages,omitempty"` + Err string `json:"error,omitempty"` } err := json.Unmarshal(b, &o) @@ -61,129 +83,9 @@ func (r *Result) UnmarshalJSON(b []byte) error { return err } r.Series = o.Series + r.Messages = o.Messages if o.Err != "" { r.Err = errors.New(o.Err) } return nil } - -// GetProcessor is a Method that returns processor type and index -// based on the type of expression. -func GetProcessor(expr Expr, startIndex int) (Processor, int) { - switch expr := expr.(type) { - case *VarRef: - return newEchoProcessor(startIndex), startIndex + 1 - case *Call: - return newEchoProcessor(startIndex), startIndex + 1 - case *BinaryExpr: - return getBinaryProcessor(expr, startIndex) - case *ParenExpr: - return GetProcessor(expr.Expr, startIndex) - case *NumberLiteral: - return newLiteralProcessor(expr.Val), startIndex - case *StringLiteral: - return newLiteralProcessor(expr.Val), startIndex - case *BooleanLiteral: - return newLiteralProcessor(expr.Val), startIndex - case *TimeLiteral: - return newLiteralProcessor(expr.Val), startIndex - case *DurationLiteral: - return newLiteralProcessor(expr.Val), startIndex - } - panic("unreachable") -} - -// Processor is a prcessor type returned by GetProcessor -type Processor func(values []interface{}) interface{} - -func newEchoProcessor(index int) Processor { - return func(values []interface{}) interface{} { - if index > len(values)-1 { - return nil - } - return values[index] - } -} - -func newLiteralProcessor(val interface{}) Processor { - return func(values []interface{}) interface{} { - return val - } -} - -func getBinaryProcessor(expr *BinaryExpr, startIndex int) (Processor, int) { - lhs, index := GetProcessor(expr.LHS, startIndex) - rhs, index := GetProcessor(expr.RHS, index) - - return newBinaryExprEvaluator(expr.Op, lhs, rhs), index -} - -func newBinaryExprEvaluator(op Token, lhs, rhs Processor) Processor { - switch op { - case ADD: - return func(values []interface{}) interface{} { - l := lhs(values) - r := rhs(values) - if lf, rf, ok := processorValuesAsFloat64(l, r); ok { - return lf + rf - } - return nil - } - case SUB: - return func(values []interface{}) interface{} { - l := lhs(values) - r := rhs(values) - if lf, rf, ok := processorValuesAsFloat64(l, r); ok { - return lf - rf - } - return nil - } - case MUL: - return func(values []interface{}) interface{} { - l := lhs(values) - r := rhs(values) - if lf, rf, ok := processorValuesAsFloat64(l, r); ok { - return lf * rf - } - return nil - } - case DIV: - return func(values []interface{}) interface{} { - l := lhs(values) - r := rhs(values) - if lf, rf, ok := processorValuesAsFloat64(l, r); ok { - return lf / rf - } - return nil - } - default: - // we shouldn't get here, but give them back nils if it goes this way - return func(values []interface{}) interface{} { - return nil - } - } -} - -func processorValuesAsFloat64(lhs interface{}, rhs interface{}) (float64, float64, bool) { - var lf float64 - var rf float64 - var ok bool - - lf, ok = lhs.(float64) - if !ok { - var li int64 - if li, ok = lhs.(int64); !ok { - return 0, 0, false - } - lf = float64(li) - } - rf, ok = rhs.(float64) - if !ok { - var ri int64 - if ri, ok = rhs.(int64); !ok { - return 0, 0, false - } - rf = float64(ri) - } - return lf, rf, true -} diff --git a/vendor/github.com/influxdata/influxdb/influxql/select.go b/vendor/github.com/influxdata/influxdb/influxql/select.go index 9a050833d..980b1f92d 100644 --- a/vendor/github.com/influxdata/influxdb/influxql/select.go +++ b/vendor/github.com/influxdata/influxdb/influxql/select.go @@ -230,7 +230,15 @@ func buildExprIterator(expr Expr, ic IteratorCreator, opt IteratorOptions) (Iter return nil, err } return NewIntervalIterator(input, opt), nil - case "derivative", "non_negative_derivative", "difference", "moving_average": + case "derivative", "non_negative_derivative", "difference", "moving_average", "elapsed": + if !opt.Interval.IsZero() { + if opt.Ascending { + opt.StartTime -= int64(opt.Interval.Duration) + } else { + opt.EndTime += int64(opt.Interval.Duration) + } + } + input, err := buildExprIterator(expr.Args[0], ic, opt) if err != nil { return nil, err @@ -241,10 +249,20 @@ func buildExprIterator(expr Expr, ic IteratorCreator, opt IteratorOptions) (Iter interval := opt.DerivativeInterval() isNonNegative := (expr.Name == "non_negative_derivative") return newDerivativeIterator(input, opt, interval, isNonNegative) + case "elapsed": + interval := opt.ElapsedInterval() + return newElapsedIterator(input, opt, interval) case "difference": return newDifferenceIterator(input, opt) case "moving_average": n := expr.Args[1].(*IntegerLiteral) + if n.Val > 1 && !opt.Interval.IsZero() { + if opt.Ascending { + opt.StartTime -= int64(opt.Interval.Duration) * (n.Val - 1) + } else { + opt.EndTime += int64(opt.Interval.Duration) * (n.Val - 1) + } + } return newMovingAverageIterator(input, int(n.Val), opt) } panic(fmt.Sprintf("invalid series aggregate function: %s", expr.Name)) diff --git a/vendor/github.com/influxdata/influxdb/services/collectd/README.md b/vendor/github.com/influxdata/influxdb/services/collectd/README.md index 34bf5ff88..35cb01c72 100644 --- a/vendor/github.com/influxdata/influxdb/services/collectd/README.md +++ b/vendor/github.com/influxdata/influxdb/services/collectd/README.md @@ -22,7 +22,7 @@ Please note that UDP packets larger than the standard size of 1452 are dropped a ## Config Example ``` -[collectd] +[[collectd]] enabled = true bind-address = ":25826" # the bind address database = "collectd" # Name of the database that will be written to diff --git a/vendor/github.com/influxdata/influxdb/services/collectd/config.go b/vendor/github.com/influxdata/influxdb/services/collectd/config.go index a794b352f..ca95b049b 100644 --- a/vendor/github.com/influxdata/influxdb/services/collectd/config.go +++ b/vendor/github.com/influxdata/influxdb/services/collectd/config.go @@ -68,3 +68,35 @@ func NewConfig() Config { TypesDB: DefaultTypesDB, } } + +// WithDefaults takes the given config and returns a new config with any required +// default values set. +func (c *Config) WithDefaults() *Config { + d := *c + if d.BindAddress == "" { + d.BindAddress = DefaultBindAddress + } + if d.Database == "" { + d.Database = DefaultDatabase + } + if d.RetentionPolicy == "" { + d.RetentionPolicy = DefaultRetentionPolicy + } + if d.BatchSize == 0 { + d.BatchSize = DefaultBatchSize + } + if d.BatchPending == 0 { + d.BatchPending = DefaultBatchPending + } + if d.BatchDuration == 0 { + d.BatchDuration = DefaultBatchDuration + } + if d.ReadBuffer == 0 { + d.ReadBuffer = DefaultReadBuffer + } + if d.TypesDB == "" { + d.TypesDB = DefaultTypesDB + } + + return &d +} diff --git a/vendor/github.com/influxdata/influxdb/services/collectd/service.go b/vendor/github.com/influxdata/influxdb/services/collectd/service.go index 2bb36120f..5cfd28d74 100644 --- a/vendor/github.com/influxdata/influxdb/services/collectd/service.go +++ b/vendor/github.com/influxdata/influxdb/services/collectd/service.go @@ -63,13 +63,15 @@ type Service struct { // NewService returns a new instance of the collectd service. func NewService(c Config) *Service { - s := &Service{ - Config: &c, + s := Service{ + // Use defaults where necessary. + Config: c.WithDefaults(), + Logger: log.New(os.Stderr, "[collectd] ", log.LstdFlags), err: make(chan error), } - return s + return &s } // Open starts the service. diff --git a/vendor/github.com/influxdata/influxdb/services/meta/client.go b/vendor/github.com/influxdata/influxdb/services/meta/client.go index 41c46972f..52c84db4c 100644 --- a/vendor/github.com/influxdata/influxdb/services/meta/client.go +++ b/vendor/github.com/influxdata/influxdb/services/meta/client.go @@ -230,6 +230,8 @@ func (c *Client) CreateDatabaseWithRetentionPolicy(name string, rpi *RetentionPo // Check if the retention policy already exists. If it does and matches // the desired retention policy, exit with no error. if rp := db.RetentionPolicy(rpi.Name); rp != nil { + // Normalise ShardDuration before comparing to any existing retention policies. + rpi.ShardGroupDuration = normalisedShardDuration(rpi.ShardGroupDuration, rpi.Duration) if rp.ReplicaN != rpi.ReplicaN || rp.Duration != rpi.Duration || rp.ShardGroupDuration != rpi.ShardGroupDuration { return nil, ErrRetentionPolicyConflict } diff --git a/vendor/github.com/influxdata/influxdb/services/meta/data.go b/vendor/github.com/influxdata/influxdb/services/meta/data.go index 6f517a1a7..8a9154785 100644 --- a/vendor/github.com/influxdata/influxdb/services/meta/data.go +++ b/vendor/github.com/influxdata/influxdb/services/meta/data.go @@ -141,9 +141,7 @@ func (data *Data) CreateRetentionPolicy(database string, rpi *RetentionPolicyInf // Normalise ShardDuration before comparing to any existing // retention policies - if rpi.ShardGroupDuration == 0 { - rpi.ShardGroupDuration = shardGroupDuration(rpi.Duration) - } + rpi.ShardGroupDuration = normalisedShardDuration(rpi.ShardGroupDuration, rpi.Duration) // Find database. di := data.Database(database) @@ -959,6 +957,14 @@ func shardGroupDuration(d time.Duration) time.Duration { return 1 * time.Hour } +// normalisedShardDuration returns normalised shard duration based on a policy duration. +func normalisedShardDuration(sgd, d time.Duration) time.Duration { + if sgd == 0 { + return shardGroupDuration(d) + } + return sgd +} + // ShardGroupInfo represents metadata about a shard group. The DeletedAt field is important // because it makes it clear that a ShardGroup has been marked as deleted, and allow the system // to be sure that a ShardGroup is not simply missing. If the DeletedAt is set, the system can diff --git a/vendor/github.com/influxdata/influxdb/services/meta/write_authorizer.go b/vendor/github.com/influxdata/influxdb/services/meta/write_authorizer.go new file mode 100644 index 000000000..ffa2320ce --- /dev/null +++ b/vendor/github.com/influxdata/influxdb/services/meta/write_authorizer.go @@ -0,0 +1,27 @@ +package meta + +import ( + "fmt" + + "github.com/influxdata/influxdb/influxql" +) + +type WriteAuthorizer struct { + Client *Client +} + +func NewWriteAuthorizer(c *Client) *WriteAuthorizer { + return &WriteAuthorizer{Client: c} +} + +// AuthorizeWrite returns nil if the user has permission to write to the database. +func (a WriteAuthorizer) AuthorizeWrite(username, database string) error { + u, err := a.Client.User(username) + if err != nil || u == nil || !u.Authorize(influxql.WritePrivilege, database) { + return &ErrAuthorize{ + Database: database, + Message: fmt.Sprintf("%s not authorized to write to %s", username, database), + } + } + return nil +} diff --git a/vendor/github.com/influxdata/influxdb/services/opentsdb/README.md b/vendor/github.com/influxdata/influxdb/services/opentsdb/README.md index fe84a3065..46856b7bc 100644 --- a/vendor/github.com/influxdata/influxdb/services/opentsdb/README.md +++ b/vendor/github.com/influxdata/influxdb/services/opentsdb/README.md @@ -1,10 +1,10 @@ -openTSDB Input +OpenTSDB Input ============ -InfluxDB supports both the telnet and HTTP openTSDB protocol. This means that InfluxDB can act as a drop-in replacement for your openTSDB system. +InfluxDB supports both the telnet and HTTP OpenTSDB protocol. This means that InfluxDB can act as a drop-in replacement for your OpenTSDB system. ## Configuration -The openTSDB input allows the binding address, target database, and target retention policy within that database, to be set. If the database does not exist, it will be created automatically when the input is initialized. If you also decide to configure retention policy (without configuration the input will use the auto-created default retention policy), both the database and retention policy must already exist. +The OpenTSDB inputs allow the binding address, target database, and target retention policy within that database, to be set. If the database does not exist, it will be created automatically when the input is initialized. If you also decide to configure retention policy (without configuration the input will use the auto-created default retention policy), both the database and retention policy must already exist. The write-consistency-level can also be set. If any write operations do not meet the configured consistency guarantees, an error will occur and the data will not be indexed. The default consistency-level is `ONE`. -The openTSDB input also performs internal batching of the points it receives, as batched writes to the database are more efficient. The default _batch size_ is 1000, _pending batch_ factor is 5, with a _batch timeout_ of 1 second. This means the input will write batches of maximum size 1000, but if a batch has not reached 1000 points within 1 second of the first point being added to a batch, it will emit that batch regardless of size. The pending batch factor controls how many batches can be in memory at once, allowing the input to transmit a batch, while still building other batches. +The OpenTSDB input also performs internal batching of the points it receives, as batched writes to the database are more efficient. The default _batch size_ is 1000, _pending batch_ factor is 5, with a _batch timeout_ of 1 second. This means the input will write batches of maximum size 1000, but if a batch has not reached 1000 points within 1 second of the first point being added to a batch, it will emit that batch regardless of size. The pending batch factor controls how many batches can be in memory at once, allowing the input to transmit a batch, while still building other batches. diff --git a/vendor/github.com/influxdata/influxdb/services/opentsdb/config.go b/vendor/github.com/influxdata/influxdb/services/opentsdb/config.go index 821d8bdb7..71eba9f35 100644 --- a/vendor/github.com/influxdata/influxdb/services/opentsdb/config.go +++ b/vendor/github.com/influxdata/influxdb/services/opentsdb/config.go @@ -27,6 +27,9 @@ const ( // DefaultBatchPending is the default number of batches that can be in the queue. DefaultBatchPending = 5 + + // DefaultCertificate is the default location of the certificate used when TLS is enabled. + DefaultCertificate = "/etc/ssl/influxdb.pem" ) // Config represents the configuration of the OpenTSDB service. @@ -52,10 +55,42 @@ func NewConfig() Config { RetentionPolicy: DefaultRetentionPolicy, ConsistencyLevel: DefaultConsistencyLevel, TLSEnabled: false, - Certificate: "/etc/ssl/influxdb.pem", + Certificate: DefaultCertificate, BatchSize: DefaultBatchSize, BatchPending: DefaultBatchPending, BatchTimeout: toml.Duration(DefaultBatchTimeout), LogPointErrors: true, } } + +// WithDefaults takes the given config and returns a new config with any required +// default values set. +func (c *Config) WithDefaults() *Config { + d := *c + if d.BindAddress == "" { + d.BindAddress = DefaultBindAddress + } + if d.Database == "" { + d.Database = DefaultDatabase + } + if d.RetentionPolicy == "" { + d.RetentionPolicy = DefaultRetentionPolicy + } + if d.ConsistencyLevel == "" { + d.ConsistencyLevel = DefaultConsistencyLevel + } + if d.Certificate == "" { + d.Certificate = DefaultCertificate + } + if d.BatchSize == 0 { + d.BatchSize = DefaultBatchSize + } + if d.BatchPending == 0 { + d.BatchPending = DefaultBatchPending + } + if d.BatchTimeout == 0 { + d.BatchTimeout = toml.Duration(DefaultBatchTimeout) + } + + return &d +} diff --git a/vendor/github.com/influxdata/influxdb/services/opentsdb/handler.go b/vendor/github.com/influxdata/influxdb/services/opentsdb/handler.go index c804e1a0b..0ed192730 100644 --- a/vendor/github.com/influxdata/influxdb/services/opentsdb/handler.go +++ b/vendor/github.com/influxdata/influxdb/services/opentsdb/handler.go @@ -153,7 +153,6 @@ func (ln *chanListener) Accept() (net.Conn, error) { if !ok { return nil, errors.New("network connection closed") } - log.Println("TSDB listener accept ", conn) return conn, nil } diff --git a/vendor/github.com/influxdata/influxdb/services/opentsdb/service.go b/vendor/github.com/influxdata/influxdb/services/opentsdb/service.go index 1db82b47d..32b205f1e 100644 --- a/vendor/github.com/influxdata/influxdb/services/opentsdb/service.go +++ b/vendor/github.com/influxdata/influxdb/services/opentsdb/service.go @@ -80,19 +80,22 @@ type Service struct { // NewService returns a new instance of Service. func NewService(c Config) (*Service, error) { + // Use defaults where necessary. + d := c.WithDefaults() + s := &Service{ done: make(chan struct{}), - tls: c.TLSEnabled, - cert: c.Certificate, + tls: d.TLSEnabled, + cert: d.Certificate, err: make(chan error), - BindAddress: c.BindAddress, - Database: c.Database, - RetentionPolicy: c.RetentionPolicy, - batchSize: c.BatchSize, - batchPending: c.BatchPending, - batchTimeout: time.Duration(c.BatchTimeout), + BindAddress: d.BindAddress, + Database: d.Database, + RetentionPolicy: d.RetentionPolicy, + batchSize: d.BatchSize, + batchPending: d.BatchPending, + batchTimeout: time.Duration(d.BatchTimeout), Logger: log.New(os.Stderr, "[opentsdb] ", log.LstdFlags), - LogPointErrors: c.LogPointErrors, + LogPointErrors: d.LogPointErrors, } return s, nil } diff --git a/vendor/github.com/influxdata/influxdb/tsdb/meta.go b/vendor/github.com/influxdata/influxdb/tsdb/meta.go index efc61c7fa..904822bef 100644 --- a/vendor/github.com/influxdata/influxdb/tsdb/meta.go +++ b/vendor/github.com/influxdata/influxdb/tsdb/meta.go @@ -658,6 +658,11 @@ func (m *Measurement) TagSets(dimensions []string, condition influxql.Expr) ([]* tagSets[tagsAsKey] = tagSet } + // Sort the series in each tag set. + for _, t := range tagSets { + sort.Sort(t) + } + // The TagSets have been created, as a map of TagSets. Just send // the values back as a slice, sorting for consistency. sortedTagSetKeys := make([]string, 0, len(tagSets)) diff --git a/vendor/golang.org/x/sys/unix/syscall_linux.go b/vendor/golang.org/x/sys/unix/syscall_linux.go index 5182d0532..9ca104c95 100644 --- a/vendor/golang.org/x/sys/unix/syscall_linux.go +++ b/vendor/golang.org/x/sys/unix/syscall_linux.go @@ -872,7 +872,6 @@ func Mount(source string, target string, fstype string, flags uintptr, data stri //sysnb EpollCreate(size int) (fd int, err error) //sysnb EpollCreate1(flag int) (fd int, err error) //sysnb EpollCtl(epfd int, op int, fd int, event *EpollEvent) (err error) -//sys EpollWait(epfd int, events []EpollEvent, msec int) (n int, err error) //sys Exit(code int) = SYS_EXIT_GROUP //sys Faccessat(dirfd int, path string, mode uint32, flags int) (err error) //sys Fallocate(fd int, mode uint32, off int64, len int64) (err error) @@ -907,7 +906,6 @@ func Getpgrp() (pid int) { //sys Mkdirat(dirfd int, path string, mode uint32) (err error) //sys Mknodat(dirfd int, path string, mode uint32, dev int) (err error) //sys Nanosleep(time *Timespec, leftover *Timespec) (err error) -//sys Pause() (err error) //sys PivotRoot(newroot string, putold string) (err error) = SYS_PIVOT_ROOT //sysnb prlimit(pid int, resource int, old *Rlimit, newlimit *Rlimit) (err error) = SYS_PRLIMIT64 //sys Prctl(option int, arg2 uintptr, arg3 uintptr, arg4 uintptr, arg5 uintptr) (err error) diff --git a/vendor/golang.org/x/sys/unix/syscall_linux_386.go b/vendor/golang.org/x/sys/unix/syscall_linux_386.go index d5dde1b94..bea01cb50 100644 --- a/vendor/golang.org/x/sys/unix/syscall_linux_386.go +++ b/vendor/golang.org/x/sys/unix/syscall_linux_386.go @@ -91,6 +91,8 @@ func Pipe2(p []int, flags int) (err error) { //sys Select(nfd int, r *FdSet, w *FdSet, e *FdSet, timeout *Timeval) (n int, err error) = SYS__NEWSELECT //sys mmap2(addr uintptr, length uintptr, prot int, flags int, fd int, pageOffset uintptr) (xaddr uintptr, err error) +//sys EpollWait(epfd int, events []EpollEvent, msec int) (n int, err error) +//sys Pause() (err error) func mmap(addr uintptr, length uintptr, prot int, flags int, fd int, offset int64) (xaddr uintptr, err error) { page := uintptr(offset / 4096) diff --git a/vendor/golang.org/x/sys/unix/syscall_linux_amd64.go b/vendor/golang.org/x/sys/unix/syscall_linux_amd64.go index b7fa9e4f5..721f24b68 100644 --- a/vendor/golang.org/x/sys/unix/syscall_linux_amd64.go +++ b/vendor/golang.org/x/sys/unix/syscall_linux_amd64.go @@ -9,6 +9,7 @@ package unix import "syscall" //sys Dup2(oldfd int, newfd int) (err error) +//sys EpollWait(epfd int, events []EpollEvent, msec int) (n int, err error) //sys Fadvise(fd int, offset int64, length int64, advice int) (err error) = SYS_FADVISE64 //sys Fchown(fd int, uid int, gid int) (err error) //sys Fstat(fd int, stat *Stat_t) (err error) @@ -25,6 +26,7 @@ import "syscall" //sys Lchown(path string, uid int, gid int) (err error) //sys Listen(s int, n int) (err error) //sys Lstat(path string, stat *Stat_t) (err error) +//sys Pause() (err error) //sys Pread(fd int, p []byte, offset int64) (n int, err error) = SYS_PREAD64 //sys Pwrite(fd int, p []byte, offset int64) (n int, err error) = SYS_PWRITE64 //sys Seek(fd int, offset int64, whence int) (off int64, err error) = SYS_LSEEK diff --git a/vendor/golang.org/x/sys/unix/syscall_linux_arm.go b/vendor/golang.org/x/sys/unix/syscall_linux_arm.go index 3b4da2061..122df649a 100644 --- a/vendor/golang.org/x/sys/unix/syscall_linux_arm.go +++ b/vendor/golang.org/x/sys/unix/syscall_linux_arm.go @@ -108,6 +108,8 @@ func Seek(fd int, offset int64, whence int) (newoffset int64, err error) { // Vsyscalls on amd64. //sysnb Gettimeofday(tv *Timeval) (err error) +//sys EpollWait(epfd int, events []EpollEvent, msec int) (n int, err error) +//sys Pause() (err error) func Time(t *Time_t) (Time_t, error) { var tv Timeval diff --git a/vendor/golang.org/x/sys/unix/syscall_linux_arm64.go b/vendor/golang.org/x/sys/unix/syscall_linux_arm64.go index 9e2e8b72f..d10518680 100644 --- a/vendor/golang.org/x/sys/unix/syscall_linux_arm64.go +++ b/vendor/golang.org/x/sys/unix/syscall_linux_arm64.go @@ -8,6 +8,7 @@ package unix const _SYS_dup = SYS_DUP3 +//sys EpollWait(epfd int, events []EpollEvent, msec int) (n int, err error) = SYS_EPOLL_PWAIT //sys Fchown(fd int, uid int, gid int) (err error) //sys Fstat(fd int, stat *Stat_t) (err error) //sys Fstatat(fd int, path string, stat *Stat_t, flags int) (err error) @@ -154,6 +155,14 @@ func Dup2(oldfd int, newfd int) (err error) { return Dup3(oldfd, newfd, 0) } +func Pause() (err error) { + _, _, e1 := Syscall6(SYS_PPOLL, 0, 0, 0, 0, 0, 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + // TODO(dfc): constants that should be in zsysnum_linux_arm64.go, remove // these when the deprecated syscalls that the syscall package relies on // are removed. diff --git a/vendor/golang.org/x/sys/unix/syscall_linux_mips64x.go b/vendor/golang.org/x/sys/unix/syscall_linux_mips64x.go index ce7b420c9..bb15ba3e6 100644 --- a/vendor/golang.org/x/sys/unix/syscall_linux_mips64x.go +++ b/vendor/golang.org/x/sys/unix/syscall_linux_mips64x.go @@ -14,6 +14,7 @@ package unix // Lookup linux_dirent{,64} in kernel source code for details. const _SYS_getdents = SYS_GETDENTS +//sys EpollWait(epfd int, events []EpollEvent, msec int) (n int, err error) //sys Fchown(fd int, uid int, gid int) (err error) //sys Fstatfs(fd int, buf *Statfs_t) (err error) //sys Ftruncate(fd int, length int64) (err error) @@ -24,6 +25,7 @@ const _SYS_getdents = SYS_GETDENTS //sysnb Getuid() (uid int) //sys Lchown(path string, uid int, gid int) (err error) //sys Listen(s int, n int) (err error) +//sys Pause() (err error) //sys Pread(fd int, p []byte, offset int64) (n int, err error) = SYS_PREAD64 //sys Pwrite(fd int, p []byte, offset int64) (n int, err error) = SYS_PWRITE64 //sys Seek(fd int, offset int64, whence int) (off int64, err error) = SYS_LSEEK diff --git a/vendor/golang.org/x/sys/unix/syscall_linux_ppc64x.go b/vendor/golang.org/x/sys/unix/syscall_linux_ppc64x.go index 9560ffa66..b156d5242 100644 --- a/vendor/golang.org/x/sys/unix/syscall_linux_ppc64x.go +++ b/vendor/golang.org/x/sys/unix/syscall_linux_ppc64x.go @@ -7,6 +7,7 @@ package unix +//sys EpollWait(epfd int, events []EpollEvent, msec int) (n int, err error) //sys Dup2(oldfd int, newfd int) (err error) //sys Fchown(fd int, uid int, gid int) (err error) //sys Fstat(fd int, stat *Stat_t) (err error) @@ -22,6 +23,7 @@ package unix //sys Lchown(path string, uid int, gid int) (err error) //sys Listen(s int, n int) (err error) //sys Lstat(path string, stat *Stat_t) (err error) +//sys Pause() (err error) //sys Pread(fd int, p []byte, offset int64) (n int, err error) = SYS_PREAD64 //sys Pwrite(fd int, p []byte, offset int64) (n int, err error) = SYS_PWRITE64 //sys Seek(fd int, offset int64, whence int) (off int64, err error) = SYS_LSEEK diff --git a/vendor/golang.org/x/sys/unix/types_linux.go b/vendor/golang.org/x/sys/unix/types_linux.go index abc5e5d78..974d28c38 100644 --- a/vendor/golang.org/x/sys/unix/types_linux.go +++ b/vendor/golang.org/x/sys/unix/types_linux.go @@ -109,7 +109,7 @@ typedef struct user_regs_struct PtraceRegs; // The real epoll_event is a union, and godefs doesn't handle it well. struct my_epoll_event { uint32_t events; -#ifdef __ARM_EABI__ +#if defined(__ARM_EABI__) || defined(__aarch64__) // padding is not specified in linux/eventpoll.h but added to conform to the // alignment requirements of EABI int32_t padFd; diff --git a/vendor/golang.org/x/sys/unix/zsyscall_linux_386.go b/vendor/golang.org/x/sys/unix/zsyscall_linux_386.go index fe1f1dd24..749f3e46e 100644 --- a/vendor/golang.org/x/sys/unix/zsyscall_linux_386.go +++ b/vendor/golang.org/x/sys/unix/zsyscall_linux_386.go @@ -370,23 +370,6 @@ func EpollCtl(epfd int, op int, fd int, event *EpollEvent) (err error) { // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT -func EpollWait(epfd int, events []EpollEvent, msec int) (n int, err error) { - var _p0 unsafe.Pointer - if len(events) > 0 { - _p0 = unsafe.Pointer(&events[0]) - } else { - _p0 = unsafe.Pointer(&_zero) - } - r0, _, e1 := Syscall6(SYS_EPOLL_WAIT, uintptr(epfd), uintptr(_p0), uintptr(len(events)), uintptr(msec), 0, 0) - n = int(r0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - func Exit(code int) { Syscall(SYS_EXIT_GROUP, uintptr(code), 0, 0) return @@ -746,16 +729,6 @@ func Nanosleep(time *Timespec, leftover *Timespec) (err error) { // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT -func Pause() (err error) { - _, _, e1 := Syscall(SYS_PAUSE, 0, 0, 0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - func PivotRoot(newroot string, putold string) (err error) { var _p0 *byte _p0, err = BytePtrFromString(newroot) @@ -1582,6 +1555,33 @@ func mmap2(addr uintptr, length uintptr, prot int, flags int, fd int, pageOffset // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT +func EpollWait(epfd int, events []EpollEvent, msec int) (n int, err error) { + var _p0 unsafe.Pointer + if len(events) > 0 { + _p0 = unsafe.Pointer(&events[0]) + } else { + _p0 = unsafe.Pointer(&_zero) + } + r0, _, e1 := Syscall6(SYS_EPOLL_WAIT, uintptr(epfd), uintptr(_p0), uintptr(len(events)), uintptr(msec), 0, 0) + n = int(r0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Pause() (err error) { + _, _, e1 := Syscall(SYS_PAUSE, 0, 0, 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + func getrlimit(resource int, rlim *rlimit32) (err error) { _, _, e1 := RawSyscall(SYS_GETRLIMIT, uintptr(resource), uintptr(unsafe.Pointer(rlim)), 0) if e1 != 0 { diff --git a/vendor/golang.org/x/sys/unix/zsyscall_linux_amd64.go b/vendor/golang.org/x/sys/unix/zsyscall_linux_amd64.go index 510cb1bb1..1096aa544 100644 --- a/vendor/golang.org/x/sys/unix/zsyscall_linux_amd64.go +++ b/vendor/golang.org/x/sys/unix/zsyscall_linux_amd64.go @@ -370,23 +370,6 @@ func EpollCtl(epfd int, op int, fd int, event *EpollEvent) (err error) { // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT -func EpollWait(epfd int, events []EpollEvent, msec int) (n int, err error) { - var _p0 unsafe.Pointer - if len(events) > 0 { - _p0 = unsafe.Pointer(&events[0]) - } else { - _p0 = unsafe.Pointer(&_zero) - } - r0, _, e1 := Syscall6(SYS_EPOLL_WAIT, uintptr(epfd), uintptr(_p0), uintptr(len(events)), uintptr(msec), 0, 0) - n = int(r0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - func Exit(code int) { Syscall(SYS_EXIT_GROUP, uintptr(code), 0, 0) return @@ -746,16 +729,6 @@ func Nanosleep(time *Timespec, leftover *Timespec) (err error) { // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT -func Pause() (err error) { - _, _, e1 := Syscall(SYS_PAUSE, 0, 0, 0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - func PivotRoot(newroot string, putold string) (err error) { var _p0 *byte _p0, err = BytePtrFromString(newroot) @@ -1226,6 +1199,23 @@ func Dup2(oldfd int, newfd int) (err error) { // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT +func EpollWait(epfd int, events []EpollEvent, msec int) (n int, err error) { + var _p0 unsafe.Pointer + if len(events) > 0 { + _p0 = unsafe.Pointer(&events[0]) + } else { + _p0 = unsafe.Pointer(&_zero) + } + r0, _, e1 := Syscall6(SYS_EPOLL_WAIT, uintptr(epfd), uintptr(_p0), uintptr(len(events)), uintptr(msec), 0, 0) + n = int(r0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + func Fadvise(fd int, offset int64, length int64, advice int) (err error) { _, _, e1 := Syscall6(SYS_FADVISE64, uintptr(fd), uintptr(offset), uintptr(length), uintptr(advice), 0, 0) if e1 != 0 { @@ -1391,6 +1381,16 @@ func Lstat(path string, stat *Stat_t) (err error) { // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT +func Pause() (err error) { + _, _, e1 := Syscall(SYS_PAUSE, 0, 0, 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + func Pread(fd int, p []byte, offset int64) (n int, err error) { var _p0 unsafe.Pointer if len(p) > 0 { diff --git a/vendor/golang.org/x/sys/unix/zsyscall_linux_arm.go b/vendor/golang.org/x/sys/unix/zsyscall_linux_arm.go index 28c720fbc..9066e1cb7 100644 --- a/vendor/golang.org/x/sys/unix/zsyscall_linux_arm.go +++ b/vendor/golang.org/x/sys/unix/zsyscall_linux_arm.go @@ -370,23 +370,6 @@ func EpollCtl(epfd int, op int, fd int, event *EpollEvent) (err error) { // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT -func EpollWait(epfd int, events []EpollEvent, msec int) (n int, err error) { - var _p0 unsafe.Pointer - if len(events) > 0 { - _p0 = unsafe.Pointer(&events[0]) - } else { - _p0 = unsafe.Pointer(&_zero) - } - r0, _, e1 := Syscall6(SYS_EPOLL_WAIT, uintptr(epfd), uintptr(_p0), uintptr(len(events)), uintptr(msec), 0, 0) - n = int(r0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - func Exit(code int) { Syscall(SYS_EXIT_GROUP, uintptr(code), 0, 0) return @@ -746,16 +729,6 @@ func Nanosleep(time *Timespec, leftover *Timespec) (err error) { // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT -func Pause() (err error) { - _, _, e1 := Syscall(SYS_PAUSE, 0, 0, 0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - func PivotRoot(newroot string, putold string) (err error) { var _p0 *byte _p0, err = BytePtrFromString(newroot) @@ -1649,6 +1622,33 @@ func Gettimeofday(tv *Timeval) (err error) { // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT +func EpollWait(epfd int, events []EpollEvent, msec int) (n int, err error) { + var _p0 unsafe.Pointer + if len(events) > 0 { + _p0 = unsafe.Pointer(&events[0]) + } else { + _p0 = unsafe.Pointer(&_zero) + } + r0, _, e1 := Syscall6(SYS_EPOLL_WAIT, uintptr(epfd), uintptr(_p0), uintptr(len(events)), uintptr(msec), 0, 0) + n = int(r0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Pause() (err error) { + _, _, e1 := Syscall(SYS_PAUSE, 0, 0, 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + func Pread(fd int, p []byte, offset int64) (n int, err error) { var _p0 unsafe.Pointer if len(p) > 0 { diff --git a/vendor/golang.org/x/sys/unix/zsyscall_linux_arm64.go b/vendor/golang.org/x/sys/unix/zsyscall_linux_arm64.go index 1ac54217a..5b9161226 100644 --- a/vendor/golang.org/x/sys/unix/zsyscall_linux_arm64.go +++ b/vendor/golang.org/x/sys/unix/zsyscall_linux_arm64.go @@ -370,23 +370,6 @@ func EpollCtl(epfd int, op int, fd int, event *EpollEvent) (err error) { // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT -func EpollWait(epfd int, events []EpollEvent, msec int) (n int, err error) { - var _p0 unsafe.Pointer - if len(events) > 0 { - _p0 = unsafe.Pointer(&events[0]) - } else { - _p0 = unsafe.Pointer(&_zero) - } - r0, _, e1 := Syscall6(SYS_EPOLL_WAIT, uintptr(epfd), uintptr(_p0), uintptr(len(events)), uintptr(msec), 0, 0) - n = int(r0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - func Exit(code int) { Syscall(SYS_EXIT_GROUP, uintptr(code), 0, 0) return @@ -746,16 +729,6 @@ func Nanosleep(time *Timespec, leftover *Timespec) (err error) { // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT -func Pause() (err error) { - _, _, e1 := Syscall(SYS_PAUSE, 0, 0, 0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - func PivotRoot(newroot string, putold string) (err error) { var _p0 *byte _p0, err = BytePtrFromString(newroot) @@ -1216,6 +1189,23 @@ func Munlockall() (err error) { // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT +func EpollWait(epfd int, events []EpollEvent, msec int) (n int, err error) { + var _p0 unsafe.Pointer + if len(events) > 0 { + _p0 = unsafe.Pointer(&events[0]) + } else { + _p0 = unsafe.Pointer(&_zero) + } + r0, _, e1 := Syscall6(SYS_EPOLL_PWAIT, uintptr(epfd), uintptr(_p0), uintptr(len(events)), uintptr(msec), 0, 0) + n = int(r0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + func Fchown(fd int, uid int, gid int) (err error) { _, _, e1 := Syscall(SYS_FCHOWN, uintptr(fd), uintptr(uid), uintptr(gid)) if e1 != 0 { diff --git a/vendor/golang.org/x/sys/unix/zsyscall_linux_mips64.go b/vendor/golang.org/x/sys/unix/zsyscall_linux_mips64.go index 724700583..738c83091 100644 --- a/vendor/golang.org/x/sys/unix/zsyscall_linux_mips64.go +++ b/vendor/golang.org/x/sys/unix/zsyscall_linux_mips64.go @@ -370,23 +370,6 @@ func EpollCtl(epfd int, op int, fd int, event *EpollEvent) (err error) { // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT -func EpollWait(epfd int, events []EpollEvent, msec int) (n int, err error) { - var _p0 unsafe.Pointer - if len(events) > 0 { - _p0 = unsafe.Pointer(&events[0]) - } else { - _p0 = unsafe.Pointer(&_zero) - } - r0, _, e1 := Syscall6(SYS_EPOLL_WAIT, uintptr(epfd), uintptr(_p0), uintptr(len(events)), uintptr(msec), 0, 0) - n = int(r0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - func Exit(code int) { Syscall(SYS_EXIT_GROUP, uintptr(code), 0, 0) return @@ -746,16 +729,6 @@ func Nanosleep(time *Timespec, leftover *Timespec) (err error) { // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT -func Pause() (err error) { - _, _, e1 := Syscall(SYS_PAUSE, 0, 0, 0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - func PivotRoot(newroot string, putold string) (err error) { var _p0 *byte _p0, err = BytePtrFromString(newroot) @@ -1216,6 +1189,23 @@ func Munlockall() (err error) { // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT +func EpollWait(epfd int, events []EpollEvent, msec int) (n int, err error) { + var _p0 unsafe.Pointer + if len(events) > 0 { + _p0 = unsafe.Pointer(&events[0]) + } else { + _p0 = unsafe.Pointer(&_zero) + } + r0, _, e1 := Syscall6(SYS_EPOLL_WAIT, uintptr(epfd), uintptr(_p0), uintptr(len(events)), uintptr(msec), 0, 0) + n = int(r0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + func Fchown(fd int, uid int, gid int) (err error) { _, _, e1 := Syscall(SYS_FCHOWN, uintptr(fd), uintptr(uid), uintptr(gid)) if e1 != 0 { @@ -1314,6 +1304,16 @@ func Listen(s int, n int) (err error) { // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT +func Pause() (err error) { + _, _, e1 := Syscall(SYS_PAUSE, 0, 0, 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + func Pread(fd int, p []byte, offset int64) (n int, err error) { var _p0 unsafe.Pointer if len(p) > 0 { diff --git a/vendor/golang.org/x/sys/unix/zsyscall_linux_mips64le.go b/vendor/golang.org/x/sys/unix/zsyscall_linux_mips64le.go index 1b7fb64ea..2a0357832 100644 --- a/vendor/golang.org/x/sys/unix/zsyscall_linux_mips64le.go +++ b/vendor/golang.org/x/sys/unix/zsyscall_linux_mips64le.go @@ -370,23 +370,6 @@ func EpollCtl(epfd int, op int, fd int, event *EpollEvent) (err error) { // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT -func EpollWait(epfd int, events []EpollEvent, msec int) (n int, err error) { - var _p0 unsafe.Pointer - if len(events) > 0 { - _p0 = unsafe.Pointer(&events[0]) - } else { - _p0 = unsafe.Pointer(&_zero) - } - r0, _, e1 := Syscall6(SYS_EPOLL_WAIT, uintptr(epfd), uintptr(_p0), uintptr(len(events)), uintptr(msec), 0, 0) - n = int(r0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - func Exit(code int) { Syscall(SYS_EXIT_GROUP, uintptr(code), 0, 0) return @@ -746,16 +729,6 @@ func Nanosleep(time *Timespec, leftover *Timespec) (err error) { // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT -func Pause() (err error) { - _, _, e1 := Syscall(SYS_PAUSE, 0, 0, 0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - func PivotRoot(newroot string, putold string) (err error) { var _p0 *byte _p0, err = BytePtrFromString(newroot) @@ -1216,6 +1189,23 @@ func Munlockall() (err error) { // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT +func EpollWait(epfd int, events []EpollEvent, msec int) (n int, err error) { + var _p0 unsafe.Pointer + if len(events) > 0 { + _p0 = unsafe.Pointer(&events[0]) + } else { + _p0 = unsafe.Pointer(&_zero) + } + r0, _, e1 := Syscall6(SYS_EPOLL_WAIT, uintptr(epfd), uintptr(_p0), uintptr(len(events)), uintptr(msec), 0, 0) + n = int(r0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + func Fchown(fd int, uid int, gid int) (err error) { _, _, e1 := Syscall(SYS_FCHOWN, uintptr(fd), uintptr(uid), uintptr(gid)) if e1 != 0 { @@ -1314,6 +1304,16 @@ func Listen(s int, n int) (err error) { // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT +func Pause() (err error) { + _, _, e1 := Syscall(SYS_PAUSE, 0, 0, 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + func Pread(fd int, p []byte, offset int64) (n int, err error) { var _p0 unsafe.Pointer if len(p) > 0 { diff --git a/vendor/golang.org/x/sys/unix/zsyscall_linux_ppc64.go b/vendor/golang.org/x/sys/unix/zsyscall_linux_ppc64.go index 2b4cd7dfb..844ae592f 100644 --- a/vendor/golang.org/x/sys/unix/zsyscall_linux_ppc64.go +++ b/vendor/golang.org/x/sys/unix/zsyscall_linux_ppc64.go @@ -370,23 +370,6 @@ func EpollCtl(epfd int, op int, fd int, event *EpollEvent) (err error) { // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT -func EpollWait(epfd int, events []EpollEvent, msec int) (n int, err error) { - var _p0 unsafe.Pointer - if len(events) > 0 { - _p0 = unsafe.Pointer(&events[0]) - } else { - _p0 = unsafe.Pointer(&_zero) - } - r0, _, e1 := Syscall6(SYS_EPOLL_WAIT, uintptr(epfd), uintptr(_p0), uintptr(len(events)), uintptr(msec), 0, 0) - n = int(r0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - func Exit(code int) { Syscall(SYS_EXIT_GROUP, uintptr(code), 0, 0) return @@ -746,16 +729,6 @@ func Nanosleep(time *Timespec, leftover *Timespec) (err error) { // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT -func Pause() (err error) { - _, _, e1 := Syscall(SYS_PAUSE, 0, 0, 0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - func PivotRoot(newroot string, putold string) (err error) { var _p0 *byte _p0, err = BytePtrFromString(newroot) @@ -1216,6 +1189,23 @@ func Munlockall() (err error) { // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT +func EpollWait(epfd int, events []EpollEvent, msec int) (n int, err error) { + var _p0 unsafe.Pointer + if len(events) > 0 { + _p0 = unsafe.Pointer(&events[0]) + } else { + _p0 = unsafe.Pointer(&_zero) + } + r0, _, e1 := Syscall6(SYS_EPOLL_WAIT, uintptr(epfd), uintptr(_p0), uintptr(len(events)), uintptr(msec), 0, 0) + n = int(r0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + func Dup2(oldfd int, newfd int) (err error) { _, _, e1 := Syscall(SYS_DUP2, uintptr(oldfd), uintptr(newfd), 0) if e1 != 0 { @@ -1370,6 +1360,16 @@ func Lstat(path string, stat *Stat_t) (err error) { // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT +func Pause() (err error) { + _, _, e1 := Syscall(SYS_PAUSE, 0, 0, 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + func Pread(fd int, p []byte, offset int64) (n int, err error) { var _p0 unsafe.Pointer if len(p) > 0 { diff --git a/vendor/golang.org/x/sys/unix/zsyscall_linux_ppc64le.go b/vendor/golang.org/x/sys/unix/zsyscall_linux_ppc64le.go index 7e1708ded..0e86c9d9e 100644 --- a/vendor/golang.org/x/sys/unix/zsyscall_linux_ppc64le.go +++ b/vendor/golang.org/x/sys/unix/zsyscall_linux_ppc64le.go @@ -370,23 +370,6 @@ func EpollCtl(epfd int, op int, fd int, event *EpollEvent) (err error) { // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT -func EpollWait(epfd int, events []EpollEvent, msec int) (n int, err error) { - var _p0 unsafe.Pointer - if len(events) > 0 { - _p0 = unsafe.Pointer(&events[0]) - } else { - _p0 = unsafe.Pointer(&_zero) - } - r0, _, e1 := Syscall6(SYS_EPOLL_WAIT, uintptr(epfd), uintptr(_p0), uintptr(len(events)), uintptr(msec), 0, 0) - n = int(r0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - func Exit(code int) { Syscall(SYS_EXIT_GROUP, uintptr(code), 0, 0) return @@ -746,16 +729,6 @@ func Nanosleep(time *Timespec, leftover *Timespec) (err error) { // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT -func Pause() (err error) { - _, _, e1 := Syscall(SYS_PAUSE, 0, 0, 0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - func PivotRoot(newroot string, putold string) (err error) { var _p0 *byte _p0, err = BytePtrFromString(newroot) @@ -1216,6 +1189,23 @@ func Munlockall() (err error) { // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT +func EpollWait(epfd int, events []EpollEvent, msec int) (n int, err error) { + var _p0 unsafe.Pointer + if len(events) > 0 { + _p0 = unsafe.Pointer(&events[0]) + } else { + _p0 = unsafe.Pointer(&_zero) + } + r0, _, e1 := Syscall6(SYS_EPOLL_WAIT, uintptr(epfd), uintptr(_p0), uintptr(len(events)), uintptr(msec), 0, 0) + n = int(r0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + func Dup2(oldfd int, newfd int) (err error) { _, _, e1 := Syscall(SYS_DUP2, uintptr(oldfd), uintptr(newfd), 0) if e1 != 0 { @@ -1370,6 +1360,16 @@ func Lstat(path string, stat *Stat_t) (err error) { // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT +func Pause() (err error) { + _, _, e1 := Syscall(SYS_PAUSE, 0, 0, 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + func Pread(fd int, p []byte, offset int64) (n int, err error) { var _p0 unsafe.Pointer if len(p) > 0 { diff --git a/vendor/golang.org/x/sys/unix/ztypes_linux_arm64.go b/vendor/golang.org/x/sys/unix/ztypes_linux_arm64.go index 7939c4cec..28b7cd43c 100644 --- a/vendor/golang.org/x/sys/unix/ztypes_linux_arm64.go +++ b/vendor/golang.org/x/sys/unix/ztypes_linux_arm64.go @@ -580,6 +580,7 @@ type Ustat_t struct { type EpollEvent struct { Events uint32 + PadFd int32 Fd int32 Pad int32 }