diff --git a/ALERT_DESIGN.md b/ALERT_DESIGN.md deleted file mode 100644 index f54bd750c..000000000 --- a/ALERT_DESIGN.md +++ /dev/null @@ -1,93 +0,0 @@ -# Kapacitor Alerting - -Kapacitor enables a user to define and trigger alerts. -Alerts can be sent to various backend handlers. - -## Alert State - -Kapacitor exposes the state of all the alerts via its HTTP API. -See the API docs for more details. - - -## Two ways to work with alerts - -### Direct Alerts - -If you already have a system that manages your alerts then you can define your alerts directly in your TICKscripts. -This allows you to send alerts as they are triggered to any of the various alert handlers. - - -### Alert Events Subsystem - -If you want to have more fine grained control over your alerts then an alert subsystem is available. -The alert subsystem allows you to various different actions with your alerts: - -* Aggregate Alerts into a single alert containing summary information. -* Rate limit alerts -* Easily manage which handlers handle which alerts without modifying your Kapacitor tasks. - -This subsystem is based on an event model. -When Kapacitor triggers an alert instead of directly sending it to the handlers, it is first sent to this subsystem as an event. -Then different handlers can listen for different events and take appropriate actions. - -#### Using the Alert Event Subsystem - -Add an alert handler called `alertEvent`, either globally in the config or on a task by task basis. - -Example TICKscript: - -```go -stream - |from() - .measurement('cpu') - |window() - .period(1m) - .every(1m) - |mean('usage') - |alert() - .warn(lambda: "mean" > 70) - .crit(lambda: "mean" > 80) - // Send this alert to the alert event subsystem (not currently implemented) - .alertEvent() - // Send this alert directly to slack. (works today) - .slack() -``` - -Then alert handlers can be configured to subscribe to the events. -These alert handlers will be configured via the API. -Use yaml/json to define the alert handlers. - -```yaml -alert: - - events: - - cpu - - mem - - aggregate: - groupBy: id - interval: 1m - - throttle: - count: 10 - every: 5m - - publish: throttled_agged - - pagerDuty: - serviceKey: XXX -``` - -```json -{ - "alert" : [ - {"events": ["cpu", "mem"]}, - {"aggregate": {"groupBy":"id","internal":"1m"}}, - {"throttle": {"count":10,"every":"5m"}}, - {"publish": ["throttled_aggreated"]}, - {"pagerDuty": {"serviceKey":"XXX"}} - ] -} -``` - - -#### Implementation - -The underlying implementation will be a basic publish/subscribe system. -Various subscriber can be defined via the above definitions which can in turn publish back to the internal system or send alerts to third parties. - diff --git a/CHANGELOG.md b/CHANGELOG.md index 7cd427e32..396be67af 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -4,6 +4,33 @@ ### Release Notes +A new system for working with alerts has been introduced. +This alerting system allows you to configure topics for alert events and then configure handlers for various topics. +This way alert generation is decoupled from alert handling. + +Existing TICKscripts will continue to work without modification. + +To use this new alerting system remove any explicit alert handlers from your TICKscript and specify a topic. +Then configure the handlers for the topic. + +``` +stream + |from() + .measurement('cpu') + .groupBy('host') + |alert() + // Specify the topic for the alert + .topic('cpu') + .info(lambda: "value" > 60) + .warn(lambda: "value" > 70) + .crit(lambda: "value" > 80) + // No handlers are configured in the script, they are instead defined on the topic via the API. +``` + +The API exposes endpoints to query the state of each alert and endpoints for configuring alert handlers. +See the API docs for more details. +The kapacitor CLI has been updated with commands for defining alert handlers. + This release introduces a new feature where you can window based off the number of points instead of their time. For example: @@ -29,6 +56,8 @@ stream - [#898](https://github.com/influxdata/kapacitor/issues/898): Now when the Window node every value is zero, the window will be emitted immediately for each new point. - [#1052](https://github.com/influxdata/kapacitor/issues/1052): Move alerta api token to header and add option to skip TLS verification. - [#251](https://github.com/influxdata/kapacitor/issues/251): Enable markdown in slack attachments. +- [#1095](https://github.com/influxdata/kapacitor/pull/1095): Add new alert API, with support for configuring handlers and topics. + ### Bugfixes diff --git a/alert.go b/alert.go index 93637db93..4828439a9 100644 --- a/alert.go +++ b/alert.go @@ -3,25 +3,30 @@ package kapacitor import ( "bytes" "encoding/json" - "errors" "fmt" html "html/template" "log" - "net" - "net/http" "os" - "os/exec" - "path/filepath" "sync" text "text/template" "time" "github.com/influxdata/influxdb/influxql" imodels "github.com/influxdata/influxdb/models" + "github.com/influxdata/kapacitor/alert" "github.com/influxdata/kapacitor/expvar" "github.com/influxdata/kapacitor/models" "github.com/influxdata/kapacitor/pipeline" + alertservice "github.com/influxdata/kapacitor/services/alert" + "github.com/influxdata/kapacitor/services/hipchat" + "github.com/influxdata/kapacitor/services/opsgenie" + "github.com/influxdata/kapacitor/services/pagerduty" + "github.com/influxdata/kapacitor/services/slack" + "github.com/influxdata/kapacitor/services/smtp" + "github.com/influxdata/kapacitor/services/telegram" + "github.com/influxdata/kapacitor/services/victorops" "github.com/influxdata/kapacitor/tick/stateful" + "github.com/pkg/errors" ) const ( @@ -30,6 +35,7 @@ const ( statsInfosTriggered = "infos_triggered" statsWarnsTriggered = "warns_triggered" statsCritsTriggered = "crits_triggered" + statsEventsDropped = "events_dropped" ) // The newest state change is weighted 'weightDiff' times more than oldest state change. @@ -38,71 +44,12 @@ const weightDiff = 1.5 // Maximum weight applied to newest state change. const maxWeight = 1.2 -type AlertHandler func(ad *AlertData) - -type AlertLevel int - -const ( - OKAlert AlertLevel = iota - InfoAlert - WarnAlert - CritAlert -) - -func (l AlertLevel) String() string { - switch l { - case OKAlert: - return "OK" - case InfoAlert: - return "INFO" - case WarnAlert: - return "WARNING" - case CritAlert: - return "CRITICAL" - default: - panic("unknown AlertLevel") - } -} - -func (l AlertLevel) MarshalText() ([]byte, error) { - return []byte(l.String()), nil -} - -func (l *AlertLevel) UnmarshalText(text []byte) error { - s := string(text) - switch s { - case "OK": - *l = OKAlert - case "INFO": - *l = InfoAlert - case "WARNING": - *l = WarnAlert - case "CRITICAL": - *l = CritAlert - default: - return fmt.Errorf("unknown AlertLevel %s", s) - } - return nil -} - -type AlertData struct { - ID string `json:"id"` - Message string `json:"message"` - Details string `json:"details"` - Time time.Time `json:"time"` - Duration time.Duration `json:"duration"` - Level AlertLevel `json:"level"` - Data influxql.Result `json:"data"` - - // Info for custom templates - info detailsInfo -} - type AlertNode struct { node a *pipeline.AlertNode - endpoint string - handlers []AlertHandler + topic string + anonTopic string + handlers []alert.Handler levels []stateful.Expression scopePools []stateful.ScopePool states map[models.GroupID]*alertState @@ -115,6 +62,7 @@ type AlertNode struct { infosTriggered *expvar.Int warnsTriggered *expvar.Int critsTriggered *expvar.Int + eventsDropped *expvar.Int bufPool sync.Pool @@ -130,6 +78,10 @@ func newAlertNode(et *ExecutingTask, n *pipeline.AlertNode, l *log.Logger) (an * } an.node.runF = an.runAlert + an.topic = n.Topic + // Create anonymous topic name + an.anonTopic = fmt.Sprintf("%s:%s:%s", et.tm.ID(), et.Task.ID, an.Name()) + // Create buffer pool for the templates an.bufPool = sync.Pool{ New: func() interface{} { @@ -167,24 +119,33 @@ func newAlertNode(et *ExecutingTask, n *pipeline.AlertNode, l *log.Logger) (an * } // Construct alert handlers - an.handlers = make([]AlertHandler, 0) - for _, post := range n.PostHandlers { - post := post - an.handlers = append(an.handlers, func(ad *AlertData) { an.handlePost(post, ad) }) + c := alertservice.PostHandlerConfig{ + URL: post.URL, + } + h := alertservice.NewPostHandler(c, l) + an.handlers = append(an.handlers, h) } for _, tcp := range n.TcpHandlers { - tcp := tcp - an.handlers = append(an.handlers, func(ad *AlertData) { an.handleTcp(tcp, ad) }) + c := alertservice.TCPHandlerConfig{ + Address: tcp.Address, + } + h := alertservice.NewTCPHandler(c, l) + an.handlers = append(an.handlers, h) } for _, email := range n.EmailHandlers { - email := email - an.handlers = append(an.handlers, func(ad *AlertData) { an.handleEmail(email, ad) }) + c := smtp.HandlerConfig{ + To: email.ToList, + } + h := et.tm.SMTPService.Handler(c, l) + an.handlers = append(an.handlers, h) } if len(n.EmailHandlers) == 0 && (et.tm.SMTPService != nil && et.tm.SMTPService.Global()) { - an.handlers = append(an.handlers, func(ad *AlertData) { an.handleEmail(&pipeline.EmailHandler{}, ad) }) + c := smtp.HandlerConfig{} + h := et.tm.SMTPService.Handler(c, l) + an.handlers = append(an.handlers, h) } // If email has been configured with state changes only set it. if et.tm.SMTPService != nil && @@ -193,46 +154,72 @@ func newAlertNode(et *ExecutingTask, n *pipeline.AlertNode, l *log.Logger) (an * n.IsStateChangesOnly = true } - for _, exec := range n.ExecHandlers { - exec := exec - an.handlers = append(an.handlers, func(ad *AlertData) { an.handleExec(exec, ad) }) + for _, e := range n.ExecHandlers { + c := alertservice.ExecHandlerConfig{ + Prog: e.Command[0], + Args: e.Command[1:], + Commander: et.tm.Commander, + } + h := alertservice.NewExecHandler(c, l) + an.handlers = append(an.handlers, h) } for _, log := range n.LogHandlers { - log := log - if !filepath.IsAbs(log.FilePath) { - return nil, fmt.Errorf("alert log path must be absolute: %s is not absolute", log.FilePath) + c := alertservice.DefaultLogHandlerConfig() + c.Path = log.FilePath + if log.Mode != 0 { + c.Mode = os.FileMode(log.Mode) } - an.handlers = append(an.handlers, func(ad *AlertData) { an.handleLog(log, ad) }) + h, err := alertservice.NewLogHandler(c, l) + if err != nil { + return nil, errors.Wrap(err, "failed to create log alert handler") + } + an.handlers = append(an.handlers, h) } for _, vo := range n.VictorOpsHandlers { - vo := vo - an.handlers = append(an.handlers, func(ad *AlertData) { an.handleVictorOps(vo, ad) }) + c := victorops.HandlerConfig{ + RoutingKey: vo.RoutingKey, + } + h := et.tm.VictorOpsService.Handler(c, l) + an.handlers = append(an.handlers, h) } if len(n.VictorOpsHandlers) == 0 && (et.tm.VictorOpsService != nil && et.tm.VictorOpsService.Global()) { - an.handlers = append(an.handlers, func(ad *AlertData) { an.handleVictorOps(&pipeline.VictorOpsHandler{}, ad) }) + c := victorops.HandlerConfig{} + h := et.tm.VictorOpsService.Handler(c, l) + an.handlers = append(an.handlers, h) } for _, pd := range n.PagerDutyHandlers { - pd := pd - an.handlers = append(an.handlers, func(ad *AlertData) { an.handlePagerDuty(pd, ad) }) + c := pagerduty.HandlerConfig{ + ServiceKey: pd.ServiceKey, + } + h := et.tm.PagerDutyService.Handler(c, l) + an.handlers = append(an.handlers, h) } if len(n.PagerDutyHandlers) == 0 && (et.tm.PagerDutyService != nil && et.tm.PagerDutyService.Global()) { - an.handlers = append(an.handlers, func(ad *AlertData) { an.handlePagerDuty(&pipeline.PagerDutyHandler{}, ad) }) + c := pagerduty.HandlerConfig{} + h := et.tm.PagerDutyService.Handler(c, l) + an.handlers = append(an.handlers, h) } - for _, sensu := range n.SensuHandlers { - sensu := sensu - an.handlers = append(an.handlers, func(ad *AlertData) { an.handleSensu(sensu, ad) }) + for range n.SensuHandlers { + h := et.tm.SensuService.Handler(l) + an.handlers = append(an.handlers, h) } - for _, slack := range n.SlackHandlers { - slack := slack - an.handlers = append(an.handlers, func(ad *AlertData) { an.handleSlack(slack, ad) }) + for _, s := range n.SlackHandlers { + c := slack.HandlerConfig{ + Channel: s.Channel, + Username: s.Username, + IconEmoji: s.IconEmoji, + } + h := et.tm.SlackService.Handler(c, l) + an.handlers = append(an.handlers, h) } if len(n.SlackHandlers) == 0 && (et.tm.SlackService != nil && et.tm.SlackService.Global()) { - an.handlers = append(an.handlers, func(ad *AlertData) { an.handleSlack(&pipeline.SlackHandler{}, ad) }) + h := et.tm.SlackService.Handler(slack.HandlerConfig{}, l) + an.handlers = append(an.handlers, h) } // If slack has been configured with state changes only set it. if et.tm.SlackService != nil && @@ -241,12 +228,20 @@ func newAlertNode(et *ExecutingTask, n *pipeline.AlertNode, l *log.Logger) (an * n.IsStateChangesOnly = true } - for _, telegram := range n.TelegramHandlers { - telegram := telegram - an.handlers = append(an.handlers, func(ad *AlertData) { an.handleTelegram(telegram, ad) }) + for _, t := range n.TelegramHandlers { + c := telegram.HandlerConfig{ + ChatId: t.ChatId, + ParseMode: t.ParseMode, + DisableWebPagePreview: t.IsDisableWebPagePreview, + DisableNotification: t.IsDisableNotification, + } + h := et.tm.TelegramService.Handler(c, l) + an.handlers = append(an.handlers, h) } if len(n.TelegramHandlers) == 0 && (et.tm.TelegramService != nil && et.tm.TelegramService.Global()) { - an.handlers = append(an.handlers, func(ad *AlertData) { an.handleTelegram(&pipeline.TelegramHandler{}, ad) }) + c := telegram.HandlerConfig{} + h := et.tm.TelegramService.Handler(c, l) + an.handlers = append(an.handlers, h) } // If telegram has been configured with state changes only set it. if et.tm.TelegramService != nil && @@ -255,12 +250,18 @@ func newAlertNode(et *ExecutingTask, n *pipeline.AlertNode, l *log.Logger) (an * n.IsStateChangesOnly = true } - for _, hipchat := range n.HipChatHandlers { - hipchat := hipchat - an.handlers = append(an.handlers, func(ad *AlertData) { an.handleHipChat(hipchat, ad) }) + for _, hc := range n.HipChatHandlers { + c := hipchat.HandlerConfig{ + Room: hc.Room, + Token: hc.Token, + } + h := et.tm.HipChatService.Handler(c, l) + an.handlers = append(an.handlers, h) } if len(n.HipChatHandlers) == 0 && (et.tm.HipChatService != nil && et.tm.HipChatService.Global()) { - an.handlers = append(an.handlers, func(ad *AlertData) { an.handleHipChat(&pipeline.HipChatHandler{}, ad) }) + c := hipchat.HandlerConfig{} + h := et.tm.HipChatService.Handler(c, l) + an.handlers = append(an.handlers, h) } // If HipChat has been configured with state changes only set it. if et.tm.HipChatService != nil && @@ -269,58 +270,69 @@ func newAlertNode(et *ExecutingTask, n *pipeline.AlertNode, l *log.Logger) (an * n.IsStateChangesOnly = true } - for _, alerta := range n.AlertaHandlers { - // Validate alerta templates - rtmpl, err := text.New("resource").Parse(alerta.Resource) - if err != nil { - return nil, err + for _, a := range n.AlertaHandlers { + c := et.tm.AlertaService.DefaultHandlerConfig() + if a.Token != "" { + c.Token = a.Token } - evtmpl, err := text.New("event").Parse(alerta.Event) - if err != nil { - return nil, err + if a.Resource != "" { + c.Resource = a.Resource } - etmpl, err := text.New("environment").Parse(alerta.Environment) - if err != nil { - return nil, err + if a.Event != "" { + c.Event = a.Event } - gtmpl, err := text.New("group").Parse(alerta.Group) - if err != nil { - return nil, err + if a.Environment != "" { + c.Environment = a.Environment } - vtmpl, err := text.New("value").Parse(alerta.Value) - if err != nil { - return nil, err + if a.Group != "" { + c.Group = a.Group } - ai := alertaHandler{ - AlertaHandler: alerta, - resourceTmpl: rtmpl, - eventTmpl: evtmpl, - environmentTmpl: etmpl, - groupTmpl: gtmpl, - valueTmpl: vtmpl, + if a.Value != "" { + c.Value = a.Value } - an.handlers = append(an.handlers, func(ad *AlertData) { an.handleAlerta(ai, ad) }) + if a.Origin != "" { + c.Origin = a.Origin + } + if len(a.Service) != 0 { + c.Service = a.Service + } + h, err := et.tm.AlertaService.Handler(c, l) + if err != nil { + return nil, errors.Wrap(err, "failed to create Alerta handler") + } + an.handlers = append(an.handlers, h) } for _, og := range n.OpsGenieHandlers { - og := og - an.handlers = append(an.handlers, func(ad *AlertData) { an.handleOpsGenie(og, ad) }) + c := opsgenie.HandlerConfig{ + TeamsList: og.TeamsList, + RecipientsList: og.RecipientsList, + } + h := et.tm.OpsGenieService.Handler(c, l) + an.handlers = append(an.handlers, h) } if len(n.OpsGenieHandlers) == 0 && (et.tm.OpsGenieService != nil && et.tm.OpsGenieService.Global()) { - an.handlers = append(an.handlers, func(ad *AlertData) { an.handleOpsGenie(&pipeline.OpsGenieHandler{}, ad) }) + c := opsgenie.HandlerConfig{} + h := et.tm.OpsGenieService.Handler(c, l) + an.handlers = append(an.handlers, h) } - for _, talk := range n.TalkHandlers { - talk := talk - an.handlers = append(an.handlers, func(ad *AlertData) { an.handleTalk(talk, ad) }) + for range n.TalkHandlers { + h := et.tm.TalkService.Handler(l) + an.handlers = append(an.handlers, h) + } + + // Register Handlers on topic + for _, h := range an.handlers { + et.tm.AlertService.RegisterHandler([]string{an.anonTopic}, h) } // Parse level expressions - an.levels = make([]stateful.Expression, CritAlert+1) - an.scopePools = make([]stateful.ScopePool, CritAlert+1) + an.levels = make([]stateful.Expression, alert.Critical+1) + an.scopePools = make([]stateful.ScopePool, alert.Critical+1) - an.levelResets = make([]stateful.Expression, CritAlert+1) - an.lrScopePools = make([]stateful.ScopePool, CritAlert+1) + an.levelResets = make([]stateful.Expression, alert.Critical+1) + an.lrScopePools = make([]stateful.ScopePool, alert.Critical+1) if n.Info != nil { statefulExpression, expressionCompileError := stateful.NewExpression(n.Info.Expression) @@ -328,15 +340,15 @@ func newAlertNode(et *ExecutingTask, n *pipeline.AlertNode, l *log.Logger) (an * return nil, fmt.Errorf("Failed to compile stateful expression for info: %s", expressionCompileError) } - an.levels[InfoAlert] = statefulExpression - an.scopePools[InfoAlert] = stateful.NewScopePool(stateful.FindReferenceVariables(n.Info.Expression)) + an.levels[alert.Info] = statefulExpression + an.scopePools[alert.Info] = stateful.NewScopePool(stateful.FindReferenceVariables(n.Info.Expression)) if n.InfoReset != nil { lstatefulExpression, lexpressionCompileError := stateful.NewExpression(n.InfoReset.Expression) if lexpressionCompileError != nil { return nil, fmt.Errorf("Failed to compile stateful expression for infoReset: %s", lexpressionCompileError) } - an.levelResets[InfoAlert] = lstatefulExpression - an.lrScopePools[InfoAlert] = stateful.NewScopePool(stateful.FindReferenceVariables(n.InfoReset.Expression)) + an.levelResets[alert.Info] = lstatefulExpression + an.lrScopePools[alert.Info] = stateful.NewScopePool(stateful.FindReferenceVariables(n.InfoReset.Expression)) } } @@ -345,15 +357,15 @@ func newAlertNode(et *ExecutingTask, n *pipeline.AlertNode, l *log.Logger) (an * if expressionCompileError != nil { return nil, fmt.Errorf("Failed to compile stateful expression for warn: %s", expressionCompileError) } - an.levels[WarnAlert] = statefulExpression - an.scopePools[WarnAlert] = stateful.NewScopePool(stateful.FindReferenceVariables(n.Warn.Expression)) + an.levels[alert.Warning] = statefulExpression + an.scopePools[alert.Warning] = stateful.NewScopePool(stateful.FindReferenceVariables(n.Warn.Expression)) if n.WarnReset != nil { lstatefulExpression, lexpressionCompileError := stateful.NewExpression(n.WarnReset.Expression) if lexpressionCompileError != nil { return nil, fmt.Errorf("Failed to compile stateful expression for warnReset: %s", lexpressionCompileError) } - an.levelResets[WarnAlert] = lstatefulExpression - an.lrScopePools[WarnAlert] = stateful.NewScopePool(stateful.FindReferenceVariables(n.WarnReset.Expression)) + an.levelResets[alert.Warning] = lstatefulExpression + an.lrScopePools[alert.Warning] = stateful.NewScopePool(stateful.FindReferenceVariables(n.WarnReset.Expression)) } } @@ -362,15 +374,15 @@ func newAlertNode(et *ExecutingTask, n *pipeline.AlertNode, l *log.Logger) (an * if expressionCompileError != nil { return nil, fmt.Errorf("Failed to compile stateful expression for crit: %s", expressionCompileError) } - an.levels[CritAlert] = statefulExpression - an.scopePools[CritAlert] = stateful.NewScopePool(stateful.FindReferenceVariables(n.Crit.Expression)) + an.levels[alert.Critical] = statefulExpression + an.scopePools[alert.Critical] = stateful.NewScopePool(stateful.FindReferenceVariables(n.Crit.Expression)) if n.CritReset != nil { lstatefulExpression, lexpressionCompileError := stateful.NewExpression(n.CritReset.Expression) if lexpressionCompileError != nil { return nil, fmt.Errorf("Failed to compile stateful expression for critReset: %s", lexpressionCompileError) } - an.levelResets[CritAlert] = lstatefulExpression - an.lrScopePools[CritAlert] = stateful.NewScopePool(stateful.FindReferenceVariables(n.CritReset.Expression)) + an.levelResets[alert.Critical] = lstatefulExpression + an.lrScopePools[alert.Critical] = stateful.NewScopePool(stateful.FindReferenceVariables(n.CritReset.Expression)) } } @@ -406,13 +418,33 @@ func (a *AlertNode) runAlert([]byte) error { a.critsTriggered = &expvar.Int{} a.statMap.Set(statsCritsTriggered, a.critsTriggered) + a.eventsDropped = &expvar.Int{} + a.statMap.Set(statsCritsTriggered, a.critsTriggered) + switch a.Wants() { case pipeline.StreamEdge: for p, ok := a.ins[0].NextPoint(); ok; p, ok = a.ins[0].NextPoint() { a.timer.Start() - var currentLevel AlertLevel + id, err := a.renderID(p.Name, p.Group, p.Tags) + if err != nil { + return err + } + var currentLevel alert.Level if state, ok := a.states[p.Group]; ok { currentLevel = state.currentLevel() + } else { + // Check for pre-existing level on topic. + // Anon Topics do not preserve state as they are deleted when a task stops, + // so we only check the explict topic. + if a.topic != "" { + if state, ok := a.et.tm.AlertService.EventState(a.topic, id); ok { + currentLevel = state.Level + } + } + if currentLevel != alert.OK { + // Update the state with the restored state + a.updateState(p.Time, currentLevel, p.Group) + } } l := a.determineLevel(p.Time, p.Fields, p.Tags, currentLevel) state := a.updateState(p.Time, l, p.Group) @@ -421,7 +453,7 @@ func (a *AlertNode) runAlert([]byte) error { continue } // send alert if we are not OK or we are OK and state changed (i.e recovery) - if l != OKAlert || state.changed { + if l != alert.OK || state.changed { batch := models.Batch{ Name: p.Name, Group: p.Group, @@ -431,23 +463,23 @@ func (a *AlertNode) runAlert([]byte) error { } state.triggered(p.Time) // Suppress the recovery event. - if a.a.NoRecoveriesFlag && l == OKAlert { + if a.a.NoRecoveriesFlag && l == alert.OK { a.timer.Stop() continue } duration := state.duration() - ad, err := a.alertData(p.Name, p.Group, p.Tags, p.Fields, l, p.Time, duration, batch) + event, err := a.event(id, p.Name, p.Group, p.Tags, p.Fields, l, p.Time, duration, batch) if err != nil { return err } - a.handleAlert(ad) + a.handleEvent(event) if a.a.LevelTag != "" || a.a.IdTag != "" { p.Tags = p.Tags.Copy() if a.a.LevelTag != "" { p.Tags[a.a.LevelTag] = l.String() } if a.a.IdTag != "" { - p.Tags[a.a.IdTag] = ad.ID + p.Tags[a.a.IdTag] = event.State.ID } } if a.a.LevelField != "" || a.a.IdField != "" || a.a.DurationField != "" || a.a.MessageField != "" { @@ -456,10 +488,10 @@ func (a *AlertNode) runAlert([]byte) error { p.Fields[a.a.LevelField] = l.String() } if a.a.MessageField != "" { - p.Fields[a.a.MessageField] = ad.Message + p.Fields[a.a.MessageField] = event.State.Message } if a.a.IdField != "" { - p.Fields[a.a.IdField] = ad.ID + p.Fields[a.a.IdField] = event.State.ID } if a.a.DurationField != "" { p.Fields[a.a.DurationField] = int64(duration) @@ -479,21 +511,41 @@ func (a *AlertNode) runAlert([]byte) error { case pipeline.BatchEdge: for b, ok := a.ins[0].NextBatch(); ok; b, ok = a.ins[0].NextBatch() { a.timer.Start() + id, err := a.renderID(b.Name, b.Group, b.Tags) + if err != nil { + return err + } if len(b.Points) == 0 { a.timer.Stop() continue } // Keep track of lowest level for any point - lowestLevel := CritAlert + lowestLevel := alert.Critical // Keep track of highest level and point - highestLevel := OKAlert + highestLevel := alert.OK var highestPoint *models.BatchPoint - for i, p := range b.Points { - var currentLevel AlertLevel - if state, ok := a.states[b.Group]; ok { - currentLevel = state.currentLevel() + var currentLevel alert.Level + if state, ok := a.states[b.Group]; ok { + currentLevel = state.currentLevel() + } else { + // Check for pre-existing level on topics + if len(a.handlers) > 0 { + if state, ok := a.et.tm.AlertService.EventState(a.anonTopic, id); ok { + currentLevel = state.Level + } } + if a.topic != "" { + if state, ok := a.et.tm.AlertService.EventState(a.topic, id); ok { + currentLevel = state.Level + } + } + if currentLevel != alert.OK { + // Update the state with the restored state + a.updateState(b.TMax, currentLevel, b.Group) + } + } + for i, p := range b.Points { l := a.determineLevel(p.Time, p.Fields, p.Tags, currentLevel) if l < lowestLevel { lowestLevel = l @@ -512,7 +564,7 @@ func (a *AlertNode) runAlert([]byte) error { } // Create alert Data t := highestPoint.Time - if a.a.AllFlag || l == OKAlert { + if a.a.AllFlag || l == alert.OK { t = b.TMax } @@ -522,23 +574,23 @@ func (a *AlertNode) runAlert([]byte) error { // l == OK and state.changed (aka recovery) // OR // l != OK and flapping/statechanges checkout - if state.changed && l == OKAlert || - (l != OKAlert && + if state.changed && l == alert.OK || + (l != alert.OK && !((a.a.UseFlapping && state.flapping) || (a.a.IsStateChangesOnly && !state.changed && !state.expired))) { state.triggered(t) // Suppress the recovery event. - if a.a.NoRecoveriesFlag && l == OKAlert { + if a.a.NoRecoveriesFlag && l == alert.OK { a.timer.Stop() continue } duration := state.duration() - ad, err := a.alertData(b.Name, b.Group, b.Tags, highestPoint.Fields, l, t, duration, b) + event, err := a.event(id, b.Name, b.Group, b.Tags, highestPoint.Fields, l, t, duration, b) if err != nil { return err } - a.handleAlert(ad) + a.handleEvent(event) // Update tags or fields for Level property if a.a.LevelTag != "" || a.a.LevelField != "" || @@ -553,7 +605,7 @@ func (a *AlertNode) runAlert([]byte) error { b.Points[i].Tags[a.a.LevelTag] = l.String() } if a.a.IdTag != "" { - b.Points[i].Tags[a.a.IdTag] = ad.ID + b.Points[i].Tags[a.a.IdTag] = event.State.ID } } if a.a.LevelField != "" || a.a.IdField != "" || a.a.DurationField != "" || a.a.MessageField != "" { @@ -562,10 +614,10 @@ func (a *AlertNode) runAlert([]byte) error { b.Points[i].Fields[a.a.LevelField] = l.String() } if a.a.MessageField != "" { - b.Points[i].Fields[a.a.MessageField] = ad.Message + b.Points[i].Fields[a.a.MessageField] = event.State.Message } if a.a.IdField != "" { - b.Points[i].Fields[a.a.IdField] = ad.ID + b.Points[i].Fields[a.a.IdField] = event.State.ID } if a.a.DurationField != "" { b.Points[i].Fields[a.a.DurationField] = int64(duration) @@ -578,7 +630,7 @@ func (a *AlertNode) runAlert([]byte) error { b.Tags[a.a.LevelTag] = l.String() } if a.a.IdTag != "" { - b.Tags[a.a.IdTag] = ad.ID + b.Tags[a.a.IdTag] = event.State.ID } } } @@ -594,29 +646,48 @@ func (a *AlertNode) runAlert([]byte) error { a.timer.Stop() } } + // Delete the anonymous topic, which will also deregister its handlers + a.et.tm.AlertService.DeleteTopic(a.anonTopic) return nil } -func (a *AlertNode) handleAlert(ad *AlertData) { +func (a *AlertNode) handleEvent(event alert.Event) { a.alertsTriggered.Add(1) - switch ad.Level { - case OKAlert: + switch event.State.Level { + case alert.OK: a.oksTriggered.Add(1) - case InfoAlert: + case alert.Info: a.infosTriggered.Add(1) - case WarnAlert: + case alert.Warning: a.warnsTriggered.Add(1) - case CritAlert: + case alert.Critical: a.critsTriggered.Add(1) } - a.logger.Printf("D! %v alert triggered id:%s msg:%s data:%v", ad.Level, ad.ID, ad.Message, ad.Data.Series[0]) - for _, h := range a.handlers { - h(ad) + a.logger.Printf("D! %v alert triggered id:%s msg:%s data:%v", event.State.Level, event.State.ID, event.State.Message, event.Data.Result.Series[0]) + + // If we have anon handlers, emit event to the anonTopic + if len(a.handlers) > 0 { + event.Topic = a.anonTopic + err := a.et.tm.AlertService.Collect(event) + if err != nil { + a.eventsDropped.Add(1) + a.logger.Println("E!", err) + } + } + + // If we have a user define topic, emit event to the topic. + if a.topic != "" { + event.Topic = a.topic + err := a.et.tm.AlertService.Collect(event) + if err != nil { + a.eventsDropped.Add(1) + a.logger.Println("E!", err) + } } } -func (a *AlertNode) determineLevel(now time.Time, fields models.Fields, tags map[string]string, currentLevel AlertLevel) AlertLevel { - if higherLevel, found := a.findFirstMatchLevel(CritAlert, currentLevel-1, now, fields, tags); found { +func (a *AlertNode) determineLevel(now time.Time, fields models.Fields, tags map[string]string, currentLevel alert.Level) alert.Level { + if higherLevel, found := a.findFirstMatchLevel(alert.Critical, currentLevel-1, now, fields, tags); found { return higherLevel } if rse := a.levelResets[currentLevel]; rse != nil { @@ -626,15 +697,15 @@ func (a *AlertNode) determineLevel(now time.Time, fields models.Fields, tags map return currentLevel } } - if newLevel, found := a.findFirstMatchLevel(currentLevel, OKAlert, now, fields, tags); found { + if newLevel, found := a.findFirstMatchLevel(currentLevel, alert.OK, now, fields, tags); found { return newLevel } - return OKAlert + return alert.OK } -func (a *AlertNode) findFirstMatchLevel(start AlertLevel, stop AlertLevel, now time.Time, fields models.Fields, tags map[string]string) (AlertLevel, bool) { - if stop < OKAlert { - stop = OKAlert +func (a *AlertNode) findFirstMatchLevel(start alert.Level, stop alert.Level, now time.Time, fields models.Fields, tags map[string]string) (alert.Level, bool) { + if stop < alert.OK { + stop = alert.OK } for l := start; l > stop; l-- { se := a.levels[l] @@ -642,13 +713,13 @@ func (a *AlertNode) findFirstMatchLevel(start AlertLevel, stop AlertLevel, now t continue } if pass, err := EvalPredicate(se, a.scopePools[l], now, fields, tags); err != nil { - a.logger.Printf("E! error evaluating expression for level %v: %s", AlertLevel(l), err) + a.logger.Printf("E! error evaluating expression for level %v: %s", alert.Level(l), err) continue } else if pass { - return AlertLevel(l), true + return alert.Level(l), true } } - return OKAlert, false + return alert.OK, false } func (a *AlertNode) batchToResult(b models.Batch) influxql.Result { @@ -659,39 +730,44 @@ func (a *AlertNode) batchToResult(b models.Batch) influxql.Result { return r } -func (a *AlertNode) alertData( - name string, +func (a *AlertNode) event( + id, name string, group models.GroupID, tags models.Tags, fields models.Fields, - level AlertLevel, + level alert.Level, t time.Time, d time.Duration, b models.Batch, -) (*AlertData, error) { - id, err := a.renderID(name, group, tags) +) (alert.Event, error) { + msg, details, err := a.renderMessageAndDetails(id, name, t, group, tags, fields, level) if err != nil { - return nil, err - } - msg, details, info, err := a.renderMessageAndDetails(id, name, t, group, tags, fields, level) - if err != nil { - return nil, err - } - ad := &AlertData{ - ID: id, - Message: msg, - Details: details, - Time: t, - Duration: d, - Level: level, - Data: a.batchToResult(b), - info: info, + return alert.Event{}, err + } + event := alert.Event{ + Topic: a.anonTopic, + State: alert.EventState{ + ID: id, + Message: msg, + Details: details, + Time: t, + Duration: d, + Level: level, + }, + Data: alert.EventData{ + Name: name, + TaskName: a.et.Task.ID, + Group: string(group), + Tags: tags, + Fields: fields, + Result: a.batchToResult(b), + }, } - return ad, nil + return event, nil } type alertState struct { - history []AlertLevel + history []alert.Level idx int flapping bool changed bool @@ -711,26 +787,26 @@ func (a *alertState) duration() time.Duration { // Record that the alert was triggered at time t. func (a *alertState) triggered(t time.Time) { a.lastTriggered = t - // Check if we are being triggered for first time since an OKAlert + // Check if we are being triggered for first time since an alert.OKAlert // If so reset firstTriggered time p := a.idx - 1 if p == -1 { p = len(a.history) - 1 } - if a.history[p] == OKAlert { + if a.history[p] == alert.OK { a.firstTriggered = t } } // Record an event in the alert history. -func (a *alertState) addEvent(level AlertLevel) { +func (a *alertState) addEvent(level alert.Level) { a.changed = a.history[a.idx] != level a.idx = (a.idx + 1) % len(a.history) a.history[a.idx] = level } // Return current level of this state -func (a *alertState) currentLevel() AlertLevel { +func (a *alertState) currentLevel() alert.Level { return a.history[a.idx] } @@ -759,11 +835,11 @@ func (a *alertState) percentChange() float64 { return p } -func (a *AlertNode) updateState(t time.Time, level AlertLevel, group models.GroupID) *alertState { +func (a *AlertNode) updateState(t time.Time, level alert.Level, group models.GroupID) *alertState { state, ok := a.states[group] if !ok { state = &alertState{ - history: make([]AlertLevel, a.a.History), + history: make([]alert.Level, a.a.History), } a.states[group] = state } @@ -843,7 +919,7 @@ func (a *AlertNode) renderID(name string, group models.GroupID, tags models.Tags return id.String(), nil } -func (a *AlertNode) renderMessageAndDetails(id, name string, t time.Time, group models.GroupID, tags models.Tags, fields models.Fields, level AlertLevel) (string, string, detailsInfo, error) { +func (a *AlertNode) renderMessageAndDetails(id, name string, t time.Time, group models.GroupID, tags models.Tags, fields models.Fields, level alert.Level) (string, string, error) { g := string(group) if group == models.NilGroup { g = "nil" @@ -871,7 +947,7 @@ func (a *AlertNode) renderMessageAndDetails(id, name string, t time.Time, group err := a.messageTmpl.Execute(tmpBuffer, minfo) if err != nil { - return "", "", detailsInfo{}, err + return "", "", err } msg := tmpBuffer.String() @@ -884,334 +960,9 @@ func (a *AlertNode) renderMessageAndDetails(id, name string, t time.Time, group tmpBuffer.Reset() err = a.detailsTmpl.Execute(tmpBuffer, dinfo) if err != nil { - return "", "", dinfo, err + return "", "", err } details := tmpBuffer.String() - return msg, details, dinfo, nil -} - -//-------------------------------- -// Alert handlers - -func (a *AlertNode) handlePost(post *pipeline.PostHandler, ad *AlertData) { - bodyBuffer := a.bufPool.Get().(*bytes.Buffer) - defer func() { - bodyBuffer.Reset() - a.bufPool.Put(bodyBuffer) - }() - - err := json.NewEncoder(bodyBuffer).Encode(ad) - if err != nil { - a.logger.Println("E! failed to marshal alert data json", err) - return - } - - resp, err := http.Post(post.URL, "application/json", bodyBuffer) - if err != nil { - a.logger.Println("E! failed to POST batch", err) - return - } - - if resp == nil { - a.logger.Println("E! failed to POST batch response is nil") - return - } - - // close http response otherwise tcp socket will be 'ESTABLISHED' in a long time - defer resp.Body.Close() - return -} - -func (a *AlertNode) handleTcp(tcp *pipeline.TcpHandler, ad *AlertData) { - buf := a.bufPool.Get().(*bytes.Buffer) - defer func() { - buf.Reset() - a.bufPool.Put(buf) - }() - - err := json.NewEncoder(buf).Encode(ad) - if err != nil { - a.logger.Println("E! failed to marshal alert data json", err) - return - } - - conn, err := net.Dial("tcp", tcp.Address) - if err != nil { - a.logger.Println("E! failed to connect", err) - return - } - defer conn.Close() - - buf.WriteByte('\n') - conn.Write(buf.Bytes()) - - return -} - -func (a *AlertNode) handleEmail(email *pipeline.EmailHandler, ad *AlertData) { - if err := a.et.tm.SMTPService.SendMail(email.ToList, ad.Message, ad.Details); err != nil { - a.logger.Println("E! failed to send email:", err) - } -} - -func (a *AlertNode) handleExec(ex *pipeline.ExecHandler, ad *AlertData) { - b, err := json.Marshal(ad) - if err != nil { - a.logger.Println("E! failed to marshal alert data json", err) - return - } - cmd := exec.Command(ex.Command[0], ex.Command[1:]...) - cmd.Stdin = bytes.NewBuffer(b) - var out bytes.Buffer - cmd.Stdout = &out - cmd.Stderr = &out - err = cmd.Run() - if err != nil { - a.logger.Println("E! error running alert command:", err, out.String()) - return - } -} - -func (a *AlertNode) handleLog(l *pipeline.LogHandler, ad *AlertData) { - b, err := json.Marshal(ad) - if err != nil { - a.logger.Println("E! failed to marshal alert data json", err) - return - } - f, err := os.OpenFile(l.FilePath, os.O_WRONLY|os.O_APPEND|os.O_CREATE, os.FileMode(l.Mode)) - if err != nil { - a.logger.Println("E! failed to open file for alert logging", err) - return - } - defer f.Close() - n, err := f.Write(b) - if n != len(b) || err != nil { - a.logger.Println("E! failed to write to file", err) - } - n, err = f.Write([]byte("\n")) - if n != 1 || err != nil { - a.logger.Println("E! failed to write to file", err) - } -} - -func (a *AlertNode) handleVictorOps(vo *pipeline.VictorOpsHandler, ad *AlertData) { - var messageType string - switch ad.Level { - case OKAlert: - messageType = "RECOVERY" - default: - messageType = ad.Level.String() - } - err := a.et.tm.VictorOpsService.Alert( - vo.RoutingKey, - messageType, - ad.Message, - ad.ID, - ad.Time, - ad.Data, - ) - if err != nil { - a.logger.Println("E! failed to send alert data to VictorOps:", err) - return - } -} - -func (a *AlertNode) handlePagerDuty(pd *pipeline.PagerDutyHandler, ad *AlertData) { - err := a.et.tm.PagerDutyService.Alert( - pd.ServiceKey, - ad.ID, - ad.Message, - ad.Level, - ad.Data, - ) - if err != nil { - a.logger.Println("E! failed to send alert data to PagerDuty:", err) - return - } -} - -func (a *AlertNode) handleSensu(sensu *pipeline.SensuHandler, ad *AlertData) { - err := a.et.tm.SensuService.Alert( - ad.ID, - ad.Message, - ad.Level, - ) - if err != nil { - a.logger.Println("E! failed to send alert data to Sensu:", err) - return - } -} - -func (a *AlertNode) handleSlack(slack *pipeline.SlackHandler, ad *AlertData) { - err := a.et.tm.SlackService.Alert( - slack.Channel, - ad.Message, - slack.Username, - slack.IconEmoji, - ad.Level, - ) - if err != nil { - a.logger.Println("E! failed to send alert data to Slack:", err) - return - } -} - -func (a *AlertNode) handleTelegram(telegram *pipeline.TelegramHandler, ad *AlertData) { - err := a.et.tm.TelegramService.Alert( - telegram.ChatId, - telegram.ParseMode, - ad.Message, - telegram.IsDisableWebPagePreview, - telegram.IsDisableNotification, - ) - if err != nil { - a.logger.Println("E! failed to send alert data to Telegram:", err) - return - } -} - -func (a *AlertNode) handleHipChat(hipchat *pipeline.HipChatHandler, ad *AlertData) { - err := a.et.tm.HipChatService.Alert( - hipchat.Room, - hipchat.Token, - ad.Message, - ad.Level, - ) - if err != nil { - a.logger.Println("E! failed to send alert data to HipChat:", err) - return - } -} - -type alertaHandler struct { - *pipeline.AlertaHandler - - resourceTmpl *text.Template - eventTmpl *text.Template - environmentTmpl *text.Template - valueTmpl *text.Template - groupTmpl *text.Template -} - -func (a *AlertNode) handleAlerta(alerta alertaHandler, ad *AlertData) { - var severity string - - switch ad.Level { - case OKAlert: - severity = "ok" - case InfoAlert: - severity = "informational" - case WarnAlert: - severity = "warning" - case CritAlert: - severity = "critical" - default: - severity = "indeterminate" - } - var buf bytes.Buffer - err := alerta.resourceTmpl.Execute(&buf, ad.info) - if err != nil { - a.logger.Printf("E! failed to evaluate Alerta Resource template %s", alerta.Resource) - return - } - resource := buf.String() - buf.Reset() - - type eventData struct { - idInfo - ID string - } - data := eventData{ - idInfo: ad.info.messageInfo.idInfo, - ID: ad.ID, - } - err = alerta.eventTmpl.Execute(&buf, data) - if err != nil { - a.logger.Printf("E! failed to evaluate Alerta Event template %s", alerta.Event) - return - } - event := buf.String() - buf.Reset() - - err = alerta.environmentTmpl.Execute(&buf, ad.info) - if err != nil { - a.logger.Printf("E! failed to evaluate Alerta Environment template %s", alerta.Environment) - return - } - environment := buf.String() - buf.Reset() - - err = alerta.groupTmpl.Execute(&buf, ad.info) - if err != nil { - a.logger.Printf("E! failed to evaluate Alerta Group template %s", alerta.Group) - return - } - group := buf.String() - buf.Reset() - - err = alerta.valueTmpl.Execute(&buf, ad.info) - if err != nil { - a.logger.Printf("E! failed to evaluate Alerta Value template %s", alerta.Value) - return - } - value := buf.String() - - service := alerta.Service - if len(alerta.Service) == 0 { - service = []string{ad.info.Name} - } - - err = a.et.tm.AlertaService.Alert( - alerta.Token, - resource, - event, - environment, - severity, - group, - value, - ad.Message, - alerta.Origin, - service, - ad.Data, - ) - if err != nil { - a.logger.Println("E! failed to send alert data to Alerta:", err) - return - } -} - -func (a *AlertNode) handleOpsGenie(og *pipeline.OpsGenieHandler, ad *AlertData) { - var messageType string - switch ad.Level { - case OKAlert: - messageType = "RECOVERY" - default: - messageType = ad.Level.String() - } - - err := a.et.tm.OpsGenieService.Alert( - og.TeamsList, - og.RecipientsList, - messageType, - ad.Message, - ad.ID, - ad.Time, - ad.Data, - ) - if err != nil { - a.logger.Println("E! failed to send alert data to OpsGenie:", err) - return - } -} - -func (a *AlertNode) handleTalk(talk *pipeline.TalkHandler, ad *AlertData) { - err := a.et.tm.TalkService.Alert( - ad.ID, - ad.Message, - ) - if err != nil { - a.logger.Println("E! failed to send alert data to Talk:", err) - return - } + return msg, details, nil } diff --git a/alert/DESIGN.md b/alert/DESIGN.md new file mode 100644 index 000000000..b381a31c6 --- /dev/null +++ b/alert/DESIGN.md @@ -0,0 +1,114 @@ +# Kapacitor Alerting + +Kapacitor enables a user to define and trigger alerts. +Alerts can be sent to various backend handlers. + +## Alerts vs Events + +An alert is defined via an [AlertNode](https://docs.influxdata.com/kapacitor/latest/nodes/alert_node/) in a TICKscript. +Each alert generates multiple events. + +## Topics + +Each alert belongs to a `topic`, if no topic is specified an unique topic is generated for the alert. +A topic may contain multiple alerts, enabling you to group your alerts into various topics. + + +## Alert State + +Kapacitor exposes the state of the alerts via topics in the HTTP API. +The maximum level of all events withing a topic as well as the state of each event within the topic can be queried. +See the API docs for more details. + +## Two ways to setup alert handlers + +There are two ways to setup handlers for your alerts in Kapacitor. +The first method is designed to be quick and easy to configure. +The second method takes a bit more setup but provides more control over the handlers. + +### Direct Handlers + +You can directly define handlers in TICKscript. +Doing so dynamically creates a topic and configures the defined handlers on the topic. + +This method is useful if you already have a system that manages your alert events for you. + + +### Alert Events Subsystem + +The alert event subsystem follows a publish/subscribe model giving you fine grained control over how alert events are handled. +This is where alert topics show their strength. +Alert publish events to their topics and handlers subscribe to the various topics. + +The alert subsystem allows you to do various different actions with your alerts: + +* Aggregate Alerts into a single alert containing summary information. +* Rate limit alerts +* Easily manage which handlers handle which alerts without modifying your Kapacitor tasks. + + +#### Using the Alert Event Subsystem + +By specifying a `topic` for an alert, all events from the alert will be sent to that topic. + +Example TICKscript: + +```go +stream + |from() + .measurement('cpu') + |window() + .period(1m) + .every(1m) + |mean('usage') + |alert() + .topic('cpu') + .warn(lambda: "mean" > 70) + .crit(lambda: "mean" > 80) + // Send this alert directly to slack. + .slack() +``` + +Then alert handlers can be configured to subscribe to the events. +These alert handlers will be configured via the API. +Use yaml/json to define the alert handlers. + +```yaml +id: my_handler + +topics: + - cpu + - mem + +actions: + - kind: aggregate + options: + groupBy: id + interval: 1m + - kind: throttle + options: + count: 10 + every: 5m + - kind: publish + options: + topics: [ throttled_aggreated ] + - kind: pagerDuty + options: + serviceKey: XXX +``` + +```json +{ + "id": "my_handler", + "topics": ["cpu", "mem"], + "actions": [ + {"kind":"aggregate", "options": {"groupBy":"id","internal":"1m"}}, + {"kind":"throttle", "options": {"count":10,"every":"5m"}}, + {"kind":"publish", "options": {"topics":["throttled_aggreated"]}, + {"kind":"pagerDuty", "options": {"serviceKey":"XXX"}} + ] +} +``` + + + diff --git a/alert/HANDLERS.md b/alert/HANDLERS.md new file mode 100644 index 000000000..d662c27ce --- /dev/null +++ b/alert/HANDLERS.md @@ -0,0 +1,439 @@ +# Alert Handlers + +This document lays out how to implement an alert handler within Kapacitor. + +## Components of a Handler + +The Handler interface in this package is simple: + +```go +type Handler interface { + // Handle is responsible for taking action on the event. + Handle(event Event) +} +``` + +In order to implement a handler you must implement the above interface. +But there is much more to a handler beyond its implementation. +A complete handler implementation needs to provide several components, listed below: + +* An implementation of the Handler interface +* A service for creating instances of the handler implementation. +* A configuration struct for configuring the service via configuration files. +* A definition struct for how a handler is defined via a TICKscript +* A configuration struct for how a handler is defined via the HTTP API. +* A test options struct for testing the handler. + +Most of these components are defined in a single package named after the handler under the `services` parent package. + +## Example + +Let's walk through writing a simple example handler for the `Foo` alerting service. +The Foo service is a simple chat room application. +Messages can be sent to a specific room via an HTTP API. + +### The Foo Service + +First steps are to create a package where most of the implementation will live. +Create a directory relative to the root of the Kapacitor repo named `services/foo`. + +Next create a file for the configuration of the service named `services/foo/config.go`. +In the file create a struct for now named `Config`. + +```go +package foo + +import "errors" + +// Config declares the needed configuration options for the service Foo. +type Config struct { + // Enabled indicates whether the service should be enabled. + Enabled bool `toml:"enabled" override:"enabled"` + // URL of the Foo server. + URL string `toml:"url" override:"url"` + // Room specifies the default room to use for all handlers. + Room string `toml:"room" override:"room"` +} + +func NewConfig() Config { + return Config{} +} + +func (c Config) Validate() error { + if c.Enabled && c.URL == "" { + return errors.New("must specify the Foo server URL") + } + return nil +} +``` + +The two field tags `toml` and `override` are used for fields of the Config structure to allow the structure to be decoded from a toml file and overriden using the Kapacitor config HTTP API. +You will see that many structs make use of field tags, do not worry to much about how they are implemented at the moment. + +Create a file for the service implementation named `services/foo/service.go`. +A service is a type in go that can be opened, closed and updated; while providing whatever other API is needed for the service. + +Place the skeleton service type and method below in the service.go file. + +```go +package foo + +import ( + "bytes" + "encoding/json" + "fmt" + "log" + "net/http" + "sync/atomic" + + "github.com/influxdata/kapacitor/alert" +) + +type Service struct { + configValue atomic.Value + logger *log.Logger +} + +func NewService(c Config, l *log.Logger) *Service { + s := &Service{ + logger: l, + } + s.configValue.Store(c) + return s +} + +func (s *Service) Open() error { + // Perform any initialization needed here + return nil +} + +func (s *Service) Close() error { + // Perform any actions needed to properly close the service here. + // For example signal and wait for all go routines to finish. + return nil +} + +func (s *Service) Update(newConfig []interface{}) error { + if l := len(newConfig); l != 1 { + return fmt.Errorf("expected only one new config object, got %d", l) + } + if c, ok := newConfig[0].(Config); !ok { + return fmt.Errorf("expected config object to be of type %T, got %T", c, newConfig[0]) + } else { + s.configValue.Store(c) + } + return nil +} + +// config loads the config struct stored in the configValue field. +func (s *Service) config() Config { + return s.configValue.Load().(Config) +} + +// Alert sends a message to the specified room. +func (s *Service) Alert(room, message string) error { + c := s.config() + if !c.Enabled { + return errors.New("service is not enabled") + } + type postMessage struct { + Room string `json:"room"` + Message string `json:"message"` + } + data, err := json.Marshal(postMessage{ + Room: room, + Message: message, + }) + if err != nil { + return err + } + r, err := http.Post(c.URL, "application/json", bytes.NewReader(data)) + if err != nil { + return err + } + r.Body.Close() + if r.StatusCode != http.StatusOK { + return fmt.Errorf("unexpected response code %d from Foo service", r.StatusCode) + } + return nil +} +``` + + +At this point we have a minimal Foo service that can be used to send message to a Foo server. +Since the intent of our service is to provide an alert Handler we need to define a method for creating new handlers. +A good practice is to define a struct that contains all needed information to create a new handler. +Then provide a method on the service that accepts that configuration struct and returns a handler. + +Add the following snippet to the `service.go` file. + +```go +type HandlerConfig struct { + //Room specifies the destination room for the chat messages. + Room string `mapstructure:"room"` +} + +// handler provides the implementation of the alert.Handler interface for the Foo service. +type handler struct { + s *Service + c HandlerConfig + logger *log.Logger +} + +// DefaultHandlerConfig returns a HandlerConfig struct with defaults applied. +func (s *Service) DefaultHandlerConfig() HandlerConfig { + // return a handler config populated with the default room from the service config. + c := s.config() + return HandlerConfig{ + Room: c.Room, + } +} + +// Handler creates a handler from the config. +func (s *Service) Handler(c HandlerConfig, l *log.Logger) alert.Handler { + // handlers can operate in differing contexts, as a result a logger is passed + // in so that logs from this handler can be correctly associatied with a given context. + return &handler{ + s: s, + c: c, + logger: l, + } +} +``` + +Finally we need to implement the Handler interface on the handler type. +Add the following snippet to the `service.go` file. + +```go +// Handle takes an event and posts its message to the Foo service chat room. +func (h *handler) Handle(event alert.Event) { + if err := h.s.Alert(h.c.Room, event.State.Message); err != nil { + h.logger.Println("E! failed to handle event", err) + } +} +``` + +At this point the Foo service can post message to a Foo server. +The last bit required to complete the service is to enable the service to be tested dynamically by a user. +Kapacitor contains a service-tests API endpoint that enables a user to perform basic "hello world" tests against a service. +To leverage this system we need to implement a few methods on the service. + +Add the following snippet to the `service.go` file. + +```go +type testOptions struct { + Room string `json:"room"` + Message string `json:"message"` +} + +func (s *Service) TestOptions() interface{} { + c := s.config() + return &testOptions{ + Room: c.Room, + Message: "test foo message", + } +} + +func (s *Service) Test(o interface{}) error { + options, ok := o.(*testOptions) + if !ok { + return fmt.Errorf("unexpected options type %T", options) + } + return s.Alert(options.Room, options.Message) +} +``` + +With that we have a functioning Foo service that can be configured, tested and consumed as an alert handler. +Now we need to let the rest of the Kapacitor code know that our service exsists. + +### Integrating the service + +There are a few integration points that need to be addressed: + +* The Kapacitor server needs to know about the service. +* The TICKscript syntax needs to know how to define a Foo handler. +* The alert node needs to know how to create a handler from the TICKscript definition. +* The alert service needs to know how to create a handler from the HTTP API. + +We will address this integration points one at a time. + +#### Server + +To tell the Kapacitor server about our service we need to first add its config to the main server configuration struct. +In the alert handlers section of the Config struct in `server/config.go` add the following line. + +``` +Foo foo.Config `toml:"foo" override:"foo"` +``` + +In the `NewConfig` function in `server/config.go` add the following line. + +``` +c.Foo = foo.NewConfig() +``` + +In the `Validate()` method add the following lines. + +``` +if err := c.Foo.Validate(); err != nil { + return err +} +``` + + +That should do it for the configuration integration. +Next we need to add the Foo service to the list of services. +In the file `server/server.go` in the `NewServer` method add the following line after the existing alert handlers. + +``` +s.appendFooService() +``` + +Then later in the file define the `appendFooService` method. + +```go +func (s *Server) appendFooService() { + c := s.config.Foo + l := s.LogService.NewLogger("[foo] ", log.LstdFlags) + srv := foo.NewService(c, l) + + s.TaskMaster.FooService = srv + s.AlertService.FooService = srv + + s.SetDynamicService("foo", srv) + s.AppendService("foo", srv) +} +``` + +You may have noticed that we set the `FooService` field value in the above method. +Let's define those fields in the `TaskMaster` and `AlertService` types. +In `task_master.go` add the following line after the other services. + +``` +FooService interface { + DefaultHandlerConfig() foo.HandlerConfig + Handler(foo.HandlerConfig, *log.Logger) alert.Handler +} +``` + +And update the `New` method to copy over your service. + +``` +n.FooService = tm.FooService +``` + +In `services/alert/service.go` add the following lines after the other serivces. + +``` +FooService interface { + DefaultHandlerConfig() foo.HandlerConfig + Handler(foo.HandlerConfig, *log.Logger) alert.Handler +} +``` + + +With those additions the server now knows about the Foo service and will start it up during Kapacitor's startup procedure. + +#### TICKscript + +In order for your handler to be defined in TICKscripts we need to define a new `foo` property on the alert node. +In `pipeline/alert.go` a description of your service to the comment and add these line to the AlertNode struct after the other handlers. + + +``` +// Send alert to Foo. +// tick:ignore +FooHandlers []*FooHandler `tick:"Foo"` +``` + +Add these lines later on in the file `pipeline/alert.go`: + +``` +// Send alert to a Foo server. +// tick:property +func (a *AlertNode) Foo() *FooHandler { + f := &FooHandler{ + AlertNode: a, + } + a.FooHandlers = append(a.FooHandlers, f) + return f +} + +// tick:embedded:AlertNode.Foo +type FooHandler struct { + *AlertNode + + // The room for the messages. + // Defaults to the room in the configuration if empty. + Room string +} +``` + +With those types in TICKscript you can now do the following: + +```go +|alert() + .foo() + .room('alerts') +``` + +#### Alert Node + +Now we need to instruct the alert node on how to create a Foo handler from the TICKscript definition. + +In file `alert.go` in the `newAlertNode` function after the other handlers add these lines: + +```go +for _, f := range n.FooHandlers { + c := et.tm.FooService.DefaultHandlerConfig() + if f.Room != "" { + c.Room = f.Room + } + h := et.tm.FooService.Handler(c, l) + an.handlers = append(an.handlers, h) +} +``` + +#### Alert Service + +In addition to TICKscript a user can use the Kapacitor HTTP API to define alert handlers. +We need to define a mapping in the alert service for the new Foo handler. + +In the file `services/alert/service.go` add the following case to the switch statement in the `createHandlerActionFromSpec` method. + +``` +case "foo": + c := s.FooService.DefaultHandlerConfig() + err = decodeOptions(spec.Options, &c) + if err != nil { + return + } + h := s.FooService.Handler(c, s.logger) + ha = newPassThroughHandler(h) +``` + + +With that we are done! + +### Example Branch + +That is a lot to get right and there are quite a few touch points. +To make getting all the boiler plate code in place easier we have an example branch and PR up that contains all of the changes laid out in the document. + +You can find the PR [here](https://github.com/influxdata/kapacitor/pull/1107) and the branch [here](https://github.com/influxdata/kapacitor/tree/example-alert-handler). +Feel free to checkout the branch and rebase to jump start your contribution. + +### Tests + +In all of this we haven't mentioned tests. +A new service will need to be tested before it is merged into master. +There are relevant tests for all aspects we have touched on. +There are two locations where the behavior of an alert handler service is verified. + +* Integration tests can be found in the `integrations` package. Tests the specifics of using the service. +* End to end tests are found in the `server/server_test.go` file. Tests that the service has been properly integrated with the server. + + +For completeness the example branch does contain tests. + + diff --git a/alert/doc.go b/alert/doc.go new file mode 100644 index 000000000..5b52e043e --- /dev/null +++ b/alert/doc.go @@ -0,0 +1,3 @@ +// Alert provides a framework for tracking events in a publish subscribe system. +// Events are published to topics and handlers consume the events from the topics. +package alert diff --git a/alert/topics.go b/alert/topics.go new file mode 100644 index 000000000..13203674a --- /dev/null +++ b/alert/topics.go @@ -0,0 +1,432 @@ +package alert + +import ( + "fmt" + "log" + "path" + "sort" + "sync" +) + +const ( + // eventBufferSize is the number of events to buffer to each handler per topic. + eventBufferSize = 100 +) + +type Topics struct { + mu sync.RWMutex + + topics map[string]*Topic + + logger *log.Logger +} + +func NewTopics(l *log.Logger) *Topics { + s := &Topics{ + topics: make(map[string]*Topic), + logger: l, + } + return s +} + +func (s *Topics) Open() error { + return nil +} + +func (s *Topics) Close() error { + s.mu.Lock() + defer s.mu.Unlock() + for topic, t := range s.topics { + t.close() + delete(s.topics, topic) + } + return nil +} + +func (s *Topics) Topic(id string) (*Topic, bool) { + s.mu.RLock() + t, ok := s.topics[id] + s.mu.RUnlock() + return t, ok +} + +func (s *Topics) RestoreTopic(id string, eventStates map[string]EventState) { + s.mu.Lock() + defer s.mu.Unlock() + t, ok := s.topics[id] + if !ok { + t = newTopic(id) + s.topics[id] = t + } + t.restoreEventStates(eventStates) +} + +func (s *Topics) EventState(topic, event string) (EventState, bool) { + s.mu.RLock() + t, ok := s.topics[topic] + s.mu.RUnlock() + if !ok { + return EventState{}, false + } + return t.EventState(event) +} + +func (s *Topics) Collect(event Event) error { + s.mu.RLock() + topic := s.topics[event.Topic] + s.mu.RUnlock() + + if topic == nil { + // Create the empty topic + s.mu.Lock() + // Check again if the topic was created, now that we have the write lock + topic = s.topics[event.Topic] + if topic == nil { + topic = newTopic(event.Topic) + s.topics[event.Topic] = topic + } + s.mu.Unlock() + } + + return topic.handleEvent(event) +} + +func (s *Topics) DeleteTopic(topic string) { + s.mu.Lock() + t := s.topics[topic] + delete(s.topics, topic) + s.mu.Unlock() + if t != nil { + t.close() + } +} + +func (s *Topics) RegisterHandler(topics []string, h Handler) { + if len(topics) == 0 || h == nil { + return + } + + s.mu.Lock() + defer s.mu.Unlock() + + for _, topic := range topics { + if _, ok := s.topics[topic]; !ok { + s.topics[topic] = newTopic(topic) + } + s.topics[topic].addHandler(h) + } +} + +func (s *Topics) DeregisterHandler(topics []string, h Handler) { + if len(topics) == 0 || h == nil { + return + } + + s.mu.Lock() + defer s.mu.Unlock() + + for _, topic := range topics { + s.topics[topic].removeHandler(h) + } +} + +func (s *Topics) ReplaceHandler(oldTopics, newTopics []string, oldH, newH Handler) { + s.mu.Lock() + defer s.mu.Unlock() + + for _, topic := range oldTopics { + s.topics[topic].removeHandler(oldH) + } + + for _, topic := range newTopics { + if _, ok := s.topics[topic]; !ok { + s.topics[topic] = newTopic(topic) + } + s.topics[topic].addHandler(newH) + } +} + +// TopicStatus returns the max alert level for each topic matching 'pattern', not returning +// any topics with max alert levels less severe than 'minLevel' +func (s *Topics) TopicStatus(pattern string, minLevel Level) map[string]Level { + s.mu.RLock() + res := make(map[string]Level, len(s.topics)) + for _, topic := range s.topics { + if !match(pattern, topic.ID()) { + continue + } + level := topic.MaxLevel() + if level >= minLevel { + res[topic.ID()] = level + } + } + s.mu.RUnlock() + return res +} + +// TopicStatusDetails is similar to TopicStatus, but will additionally return +// at least 'minLevel' severity +func (s *Topics) TopicStatusEvents(pattern string, minLevel Level) map[string]map[string]EventState { + s.mu.RLock() + topics := make([]*Topic, 0, len(s.topics)) + for _, topic := range s.topics { + if topic.MaxLevel() >= minLevel && match(pattern, topic.id) { + topics = append(topics, topic) + } + } + s.mu.RUnlock() + + res := make(map[string]map[string]EventState, len(topics)) + + for _, topic := range topics { + res[topic.ID()] = topic.EventStates(minLevel) + } + + return res +} + +func match(pattern, id string) bool { + if pattern == "" { + return true + } + matched, _ := path.Match(pattern, id) + return matched +} + +type Topic struct { + id string + + mu sync.RWMutex + + events map[string]*EventState + sorted []*EventState + + handlers []*bufHandler +} + +func newTopic(id string) *Topic { + return &Topic{ + id: id, + events: make(map[string]*EventState), + } +} +func (t *Topic) ID() string { + return t.id +} + +func (t *Topic) MaxLevel() Level { + level := OK + t.mu.RLock() + if len(t.sorted) > 0 { + level = t.sorted[0].Level + } + t.mu.RUnlock() + return level +} + +func (t *Topic) addHandler(h Handler) { + t.mu.Lock() + defer t.mu.Unlock() + for _, cur := range t.handlers { + if cur.Equal(h) { + return + } + } + hdlr := newHandler(h) + t.handlers = append(t.handlers, hdlr) +} + +func (t *Topic) removeHandler(h Handler) { + t.mu.Lock() + defer t.mu.Unlock() + for i := 0; i < len(t.handlers); i++ { + if t.handlers[i].Equal(h) { + // Close handler + t.handlers[i].Close() + if i < len(t.handlers)-1 { + t.handlers[i] = t.handlers[len(t.handlers)-1] + } + t.handlers = t.handlers[:len(t.handlers)-1] + break + } + } +} + +func (t *Topic) restoreEventStates(eventStates map[string]EventState) { + t.mu.Lock() + defer t.mu.Unlock() + t.events = make(map[string]*EventState, len(eventStates)) + t.sorted = make([]*EventState, 0, len(eventStates)) + for id, state := range eventStates { + e := new(EventState) + *e = state + t.events[id] = e + t.sorted = append(t.sorted, e) + } + sort.Sort(sortedStates(t.sorted)) +} + +func (t *Topic) EventStates(minLevel Level) map[string]EventState { + t.mu.RLock() + events := make(map[string]EventState, len(t.sorted)) + for _, e := range t.sorted { + if e.Level < minLevel { + break + } + events[e.ID] = *e + } + t.mu.RUnlock() + return events +} + +func (t *Topic) EventState(event string) (EventState, bool) { + t.mu.RLock() + state, ok := t.events[event] + t.mu.RUnlock() + if ok { + return *state, true + } + return EventState{}, false +} + +func (t *Topic) close() { + t.mu.Lock() + defer t.mu.Unlock() + // Close all handlers + for _, h := range t.handlers { + h.Close() + } + t.handlers = nil +} + +func (t *Topic) handleEvent(event Event) error { + t.updateEvent(event.State) + t.mu.RLock() + defer t.mu.RUnlock() + + // Handle event + var errs multiError + for _, h := range t.handlers { + err := h.Handle(event) + if err != nil { + errs = append(errs, err) + } + } + if len(errs) != 0 { + return errs + } + return nil +} + +// updateEvent will store the latest state for the given ID. +func (t *Topic) updateEvent(state EventState) { + var needSort bool + t.mu.Lock() + defer t.mu.Unlock() + cur := t.events[state.ID] + if cur == nil { + needSort = true + cur = new(EventState) + t.events[state.ID] = cur + t.sorted = append(t.sorted, cur) + } + needSort = needSort || cur.Level != state.Level + + *cur = state + + if needSort { + sort.Sort(sortedStates(t.sorted)) + } +} + +type sortedStates []*EventState + +func (e sortedStates) Len() int { return len(e) } +func (e sortedStates) Swap(i int, j int) { e[i], e[j] = e[j], e[i] } +func (e sortedStates) Less(i int, j int) bool { + if e[i].Level > e[j].Level { + return true + } + return e[i].ID < e[j].ID +} + +// bufHandler wraps a Handler implementation in order to provide buffering and non-blocking event handling. +type bufHandler struct { + h Handler + events chan Event + aborting chan struct{} + wg sync.WaitGroup +} + +func newHandler(h Handler) *bufHandler { + hdlr := &bufHandler{ + h: h, + events: make(chan Event, eventBufferSize), + aborting: make(chan struct{}), + } + hdlr.wg.Add(1) + go func() { + defer hdlr.wg.Done() + hdlr.run() + }() + return hdlr +} + +func (h *bufHandler) Equal(o Handler) (b bool) { + defer func() { + // Recover in case the interface concrete type is not a comparable type. + r := recover() + if r != nil { + b = false + } + }() + b = h.h == o + return +} + +func (h *bufHandler) Close() { + close(h.events) + h.wg.Wait() +} + +func (h *bufHandler) Abort() { + close(h.aborting) + h.wg.Wait() +} + +func (h *bufHandler) Handle(event Event) error { + select { + case h.events <- event: + return nil + default: + return fmt.Errorf("failed to deliver event %q to handler", event.State.ID) + } +} + +func (h *bufHandler) run() { + for { + select { + case event, ok := <-h.events: + if !ok { + return + } + h.h.Handle(event) + case <-h.aborting: + return + } + } +} + +// multiError is a list of errors. +type multiError []error + +func (e multiError) Error() string { + if len(e) == 1 { + return e[0].Error() + } + msg := "multiple errors:" + for _, err := range e { + msg += "\n" + err.Error() + } + return msg +} diff --git a/alert/types.go b/alert/types.go new file mode 100644 index 000000000..028ea54fd --- /dev/null +++ b/alert/types.go @@ -0,0 +1,145 @@ +package alert + +import ( + "bytes" + "fmt" + "strings" + "time" + + "github.com/influxdata/influxdb/influxql" +) + +type Event struct { + Topic string + State EventState + Data EventData +} + +func (e Event) TemplateData() TemplateData { + return TemplateData{ + ID: e.State.ID, + Message: e.State.Message, + Level: e.State.Level.String(), + Time: e.State.Time, + Duration: e.State.Duration, + Name: e.Data.Name, + TaskName: e.Data.TaskName, + Group: e.Data.Group, + Tags: e.Data.Tags, + Fields: e.Data.Fields, + } +} + +type Handler interface { + // Handle is responsible for taking action on the event. + Handle(event Event) +} + +type EventState struct { + ID string + Message string + Details string + Time time.Time + Duration time.Duration + Level Level +} + +type EventData struct { + // Measurement name + Name string + + // TaskName is the name of the task that generated this event. + TaskName string + + // Concatenation of all group-by tags of the form [key=value,]+. + // If not groupBy is performed equal to literal 'nil' + Group string + + // Map of tags + Tags map[string]string + + // Fields of alerting data point. + Fields map[string]interface{} + + Result influxql.Result +} + +// TemplateData is a structure containing all information available to use in templates for an Event. +type TemplateData struct { + // The ID of the alert. + ID string + + // The Message of the Alert + Message string + + // Alert Level, one of: INFO, WARNING, CRITICAL. + Level string + + // Time the event occurred. + Time time.Time + + // Duration of the event + Duration time.Duration + + // Measurement name + Name string + + // Task name + TaskName string + + // Concatenation of all group-by tags of the form [key=value,]+. + // If not groupBy is performed equal to literal 'nil' + Group string + + // Map of tags + Tags map[string]string + + // Fields of alerting data point. + Fields map[string]interface{} +} + +type Level int + +const ( + OK Level = iota + Info + Warning + Critical + maxLevel +) + +const levelStrings = "OKINFOWARNINGCRITICAL" + +var levelBytes = []byte(levelStrings) + +var levelOffsets = []int{0, 2, 6, 13, 21} + +func (l Level) String() string { + if l < maxLevel { + return levelStrings[levelOffsets[l]:levelOffsets[l+1]] + } + return "unknown" +} + +func (l Level) MarshalText() ([]byte, error) { + return []byte(l.String()), nil +} + +func (l *Level) UnmarshalText(text []byte) error { + idx := bytes.Index(levelBytes, text) + if idx >= 0 { + for i := 0; i < int(maxLevel); i++ { + if idx == levelOffsets[i] { + *l = Level(i) + return nil + } + } + } + + return fmt.Errorf("unknown alert level '%s'", text) +} + +func ParseLevel(s string) (l Level, err error) { + err = l.UnmarshalText([]byte(strings.ToUpper(s))) + return +} diff --git a/bufpool/bufpool.go b/bufpool/bufpool.go new file mode 100644 index 000000000..261af9003 --- /dev/null +++ b/bufpool/bufpool.go @@ -0,0 +1,29 @@ +package bufpool + +import ( + "bytes" + "sync" +) + +type Pool struct { + p sync.Pool +} + +func New() *Pool { + return &Pool{ + p: sync.Pool{ + New: func() interface{} { + return new(bytes.Buffer) + }, + }, + } +} + +func (p *Pool) Get() *bytes.Buffer { + return p.p.Get().(*bytes.Buffer) +} + +func (p *Pool) Put(b *bytes.Buffer) { + b.Reset() + p.p.Put(b) +} diff --git a/client/API.md b/client/API.md index d03ef3335..9643b29d2 100644 --- a/client/API.md +++ b/client/API.md @@ -6,6 +6,7 @@ * [Templates](#templates) * [Recordings](#recordings) * [Replays](#replays) +* [Alerts](#alerts) * [Configuration](#configuration) * [Testing Services](#testing-services) * [Miscellaneous](#miscellaneous) @@ -62,6 +63,26 @@ If you do not specify an ID a random UUID will be generated for the resource. All IDs must match this regex `^[-\._\p{L}0-9]+$`, which is essentially numbers, unicode letters, '-', '.' and '_'. + +### Backwards Compatibility + +Currently Kapacitor is in 1.x release with a guarantee that all new releases will be backwards compatible with previous releases. +This applies directly to the API. New additions may be made to the API but existing endpoints will not be changed in backwards incompatible ways during the 1.x releases. + +### Technical Preview + +On occasion when a new feature is added to Kapacitor it may be added in a technical preview for a few minor releases and then later promoted to fully fledged v1 feature. +Preview means that the newly added features may be changed in backwards incompatible ways until they are promoted to v1 features. +Using technical preview allows for new features to fully mature while maintaining regularly scheduled releases. + +To make it clear which features of the API are in technical preview the base path `/kapacitor/v1preview` is used. +If you wish to preview some of these new features, simply use the path `/kapacitor/v1preview` instead of `/kapacitor/v1` for your requests. +All v1 endpoints are available under the v1preview path so that your client need not be configured with multiple paths. +The technical preview endpoints are only available under the v1preview path. + + +>NOTE: Using a technical preview means that you may have to update your client for breaking changes to the previewed endpoints. + ## Writing Data Kapacitor can accept writes over HTTP using the line protocol. @@ -404,7 +425,7 @@ GET /kapacitor/v1/tasks Optionally specify a glob `pattern` to list only matching tasks. ``` -GET /kapacitor/v1/task?pattern=TASK* +GET /kapacitor/v1/tasks?pattern=TASK* ``` ```json @@ -1341,6 +1362,419 @@ GET /kapacitor/v1/replays } ``` +## Alerts + +Kapacitor can generate and handle alerts. +The API allows you to see the current state of any alert and to configure various handlers for the alerts. + +>NOTE: All API endpoints related to alerts are in a technical preview. +Meaning that they are subject to change in the future until the technical preview is completed. +As such the URL for the endpoints uses the base path `/kapacitor/v1preview`. +Once the technical preview is deemed complete the endpoint paths will be promoted to use the v1 `/kapacitor/v1` base path. + +### Topics + +Alerts are grouped into topics. +An alert handler "listens" on a topic for any new events. +You can either specify the alert topic in the TICKscript or one will be generated for you. + +To query the list of available topics make a GET requests to `/kapacitor/v1preview/alerts/topics`. + +| Query Parameter | Default | Purpose | +| --------------- | ------- | ------- | +| min-level | OK | Only return topics that are greater or equal to the min-level. Valid values include OK, INFO, WARNING, CRITICAL. | +| pattern | * | Filter results based on the pattern. Uses standard shell glob matching on the topic ID, see [this](https://golang.org/pkg/path/filepath/#Match) for more details. | + + +#### Example + +Get all topics. + +``` +GET /kapacitor/v1preview/alerts/topics +``` + +``` +{ + "link": {"rel":"self","href":"/kapacitor/v1preview/alerts/topics"}, + "topics": [ + { + "link": {"rel":"self","href":"/kapacitor/v1preview/alerts/topics/system"}, + "events-link" : {"rel":"events","href":"/kapacitor/v1preview/alerts/topics/system/events"}, + "handlers-link": {"rel":"handlers","href":"/kapacitor/v1preview/alerts/topics/system/handlers"}, + "id": "system", + "level":"CRITICAL" + }, + { + "link": {"rel":"self","href":"/kapacitor/v1preview/alerts/topics/app"}, + "events-link" : {"rel":"events","href":"/kapacitor/v1preview/alerts/topics/app/events"}, + "handlers-link": {"rel":"handlers","href":"/kapacitor/v1preview/alerts/topics/app/handlers"}, + "id": "app", + "level":"OK" + } + ] +} +``` + +Get all topics in a WARNING or CRITICAL state. + + +``` +GET /kapacitor/v1preview/alerts/topics?min-level=WARNING +``` + +``` +{ + "link": {"rel":"self","href":"/kapacitor/v1preview/alerts/topics"}, + "topics": [ + { + "link": {"rel":"self","href":"/kapacitor/v1preview/alerts/topics/system"}, + "events-link" : {"rel":"events","href":"/kapacitor/v1preview/alerts/topics/system/events"}, + "handlers-link": {"rel":"handlers","href":"/kapacitor/v1preview/alerts/topics/system/handlers"}, + "id": "system", + "level":"CRITICAL" + } + ] +} +``` + +### Topic Status + +To query the status of a topic make a GET request to `/kapacitor/v1preview/alerts/topics/`. + +#### Example + +``` +GET /kapacitor/v1preview/alerts/topics/system +``` + +``` +{ + "link": {"rel":"self","href":"/kapacitor/v1preview/alerts/topics/system"}, + "id": "system", + "level":"CRITICAL" + "events-link" : {"rel":"events","href":"/kapacitor/v1preview/alerts/topics/system/events"}, + "handlers-link": {"rel":"handlers","href":"/kapacitor/v1preview/alerts/topics/system/handlers"}, +} +``` + +### All Topic Events + +To query all the events within a topic make a GET request to `/kapacitor/v1preview/alerts/topics//events`. + +| Query Parameter | Default | Purpose | +| --------------- | ------- | ------- | +| min-level | OK | Only return events that are greater or equal to the min-level. Valid values include OK, INFO, WARNING, CRITICAL. | + +#### Example + +``` +GET /kapacitor/v1preview/alerts/topics/system/events +``` + +``` +{ + "link": {"rel":"self","href":"/kapacitor/v1preview/alerts/topics/system/events"}, + "topic": "system", + "events": [ + { + "link":{"rel":"self","href":"/kapacitor/v1preview/alerts/topics/system/events/cpu"}, + "id": "cpu", + "state": { + "level": "WARNING", + "message": "cpu is WARNING", + "time": "2016-12-01T00:00:00Z", + "duration": "5m" + } + }, + { + "link":{"rel":"self","href":"/kapacitor/v1preview/alerts/topics/system/events/mem"}, + "id": "mem", + "state": { + "level": "CRITICAL", + "message": "mem is CRITICAL", + "time": "2016-12-01T00:10:00Z", + "duration": "1m" + } + } + ] +} +``` + +### Specific Topic Event + +You can query a specific event within a topic by making a GET request to `/kapacitor/v1preview/alerts/topics//events/`. + +#### Example + +``` +GET /kapacitor/v1preview/alerts/topics/system/events/cpu +``` + +``` +{ + "link":{"rel":"self","href":"/kapacitor/v1preview/alerts/topics/system/events/cpu"}, + "id": "cpu", + "state": { + "level": "WARNING", + "message": "cpu is WARNING", + "time": "2016-12-01T00:00:00Z", + "duration": "5m" + } +} +``` + +### Topic Handlers + +Handlers are created independent of a topic but are associated with a topic. +You can get a list of handlers configured for a topic by making a GET request to `/kapacitor/v1preview/alerts/topics//handlers`. + +>NOTE: Anonymous handlers (created automatically from TICKscripts) will not be listed under their associated anonymous topic as they are not configured via the API. + +#### Example + + +Get the handlers for the `system` topic. + +``` +GET /kapacitor/v1preview/alerts/topics/system/handlers +``` + +``` +{ + "link":{"rel":"self","href":"/kapacitor/v1preview/alerts/topics/system/handlers"}, + "topic": "system", + "handlers": [ + { + "link":{"rel":"self","href":"/kapacitor/v1preview/alerts/handlers/slack"}, + "id":"slack", + "topics": ["system", "app"], + "actions": [{ + "kind":"slack", + "options":{ + "channel":"#alerts" + } + }] + }, + { + "link":{"rel":"self","href":"/kapacitor/v1preview/alerts/handlers/smtp"}, + "id":"smtp", + "topics": ["system", "app"], + "actions": [{ + "kind":"smtp" + }] + } + ] +} +``` + +This `main:alert_cpu:alert5` topic represents an auto-generated topic from a task that has defined handlers explicitly in the TICKscript. +Anonymous handlers cannot be listed or modified via the API. + +``` +GET /kapacitor/v1preview/alerts/topics/main:alert_cpu:alert5/handlers +``` + +``` +{ + "link":{"rel":"self","href":"/kapacitor/v1preview/alerts/topics/system/handlers"}, + "topic": "main:alert_cpu:alert5", + "handlers": null +} +``` + +### Creating and Removing Topics + +Topics are created dynamically for you when they referenced in TICKscripts or in handlers. +To delete a topic make a `DELETE` request to `/kapacitor/v1preview/alerts/topics/`. +This will delete all known events and state for the topic. + +>NOTE: Since topics are dynamically created, a topic may return after having deleted it, if a new event is created for the topic. + + +#### Example + +``` +DELETE /kapacitor/v1preview/alerts/topics/system +``` + + +### List Handlers + +To query information about all handlers independent of a given topic make a GET request to `/kapacitor/v1preview/alerts/handlers`. + +| Query Parameter | Default | Purpose | +| --------------- | ------- | ------- | +| pattern | * | Filter results based on the pattern. Uses standard shell glob matching on the service name, see [this](https://golang.org/pkg/path/filepath/#Match) for more details. | + +#### Example + +``` +GET /kapacitor/v1preview/alerts/handlers +``` + +``` +{ + "link":{"rel":"self","href":"/kapacitor/v1preview/alerts/handlers"}, + "handlers": [ + { + "link":{"rel":"self","href":"/kapacitor/v1preview/alerts/handlers/slack"}, + "id":"slack", + "topics": ["system", "app"], + "actions": [{ + "kind":"slack", + "options": { + "channel":"#alerts" + } + }] + }, + { + "link":{"rel":"self","href":"/kapacitor/v1preview/alerts/handlers/smtp"}, + "id":"smtp", + "topics": ["system", "app"], + "actions": [{ + "kind":"smtp" + }] + } + ] +} +``` + +### Get a Handler + +To query information about a specific handler make a GET request to `/kapacitor/v1preview/alerts/handlers/`. + +#### Example + +``` +GET /kapacitor/v1preview/alerts/handlers/slack +``` + +``` +{ + "link":{"rel":"self","href":"/kapacitor/v1preview/alerts/handlers/slack"}, + "id":"slack", + "topics": ["system", "app"], + "actions": [{ + "kind":"slack", + "options": { + "channel":"#alerts" + } + }] +} +``` + +### Create a Handler + +To create a new handler make a POST request to `/kapacitor/v1preview/alerts/handlers`. + +``` +POST /kapacitor/v1preview/alerts/handlers +{ + "id":"slack", + "topics": ["system", "app"], + "actions": [{ + "kind":"slack", + "options": { + "channel":"#alerts" + } + }] + +} +``` + +``` +{ + "link":{"rel":"self","href":"/kapacitor/v1preview/alerts/handlers/slack"}, + "id": "slack", + "topics": ["system", "app"], + "actions": [{ + "kind":"slack", + "options": { + "channel":"#alerts" + } + }] +} +``` + +### Update a Handler + +To update an existing handler you can either make a PUT or PATCH request to `/kapacitor/v1preview/alerts/handlers/`. + +Using PUT will replace the entire handler, by using PATCH specific parts of the handler can be modified. + +PATCH will apply JSON patch object to the existing handler, see [rfc6902](https://tools.ietf.org/html/rfc6902) for more details. + +#### Example + +Update the topics and actions for a handler using the PATCH method. + +``` +PATCH /kapacitor/v1preview/alerts/handlers/slack +[ + {"op":"replace", "path":"/topics", "value":["system", "test"]}, + {"op":"replace", "path":"/actions/0/options/channel", "value":"#testing_alerts"} +] +``` + +``` +{ + "link":{"rel":"self","href":"/kapacitor/v1preview/alerts/handlers/slack"}, + "id": "slack", + "topics": ["system", "test"], + "actions": [ + { + "kind":"slack", + "options": { + "channel":"#testing_alerts" + } + } + ] +} +``` + +Replace an entire handler using the PUT method. + +``` +PUT /kapacitor/v1preview/alerts/handlers/slack +{ + "id": "slack", + "topics": ["system", "test"], + "actions": [ + { + "kind":"slack", + "options": { + "channel":"#testing_alerts" + } + } + ] +} +``` + +``` +{ + "link":{"rel":"self","href":"/kapacitor/v1preview/alerts/handlers/slack"}, + "id": "slack", + "topics": ["system", "test"], + "actions": [ + { + "kind":"slack", + "options": { + "channel":"#testing_alerts" + } + } + ] +} +``` + +### Remove a Handler + +To remove an existing handler make a DELETE request to `/kapacitor/v1preview/alerts/handlers/`. + +``` +DELETE /kapacitor/v1preview/alerts/handlers/ +``` + ## Configuration You can set configuration overrides via the API for certain sections of the config. diff --git a/client/v1/client.go b/client/v1/client.go index 67986f4af..33ecb3d03 100644 --- a/client/v1/client.go +++ b/client/v1/client.go @@ -26,21 +26,27 @@ const DefaultUserAgent = "KapacitorClient" // then use the appropriate *Link methods. const ( - basePath = "/kapacitor/v1" - pingPath = basePath + "/ping" - logLevelPath = basePath + "/loglevel" - debugVarsPath = basePath + "/debug/vars" - tasksPath = basePath + "/tasks" - templatesPath = basePath + "/templates" - recordingsPath = basePath + "/recordings" - recordStreamPath = basePath + "/recordings/stream" - recordBatchPath = basePath + "/recordings/batch" - recordQueryPath = basePath + "/recordings/query" - replaysPath = basePath + "/replays" - replayBatchPath = basePath + "/replays/batch" - replayQueryPath = basePath + "/replays/query" - configPath = basePath + "/config" - serviceTestsPath = basePath + "/service-tests" + basePath = "/kapacitor/v1" + basePreviewPath = "/kapacitor/v1preview" + pingPath = basePath + "/ping" + logLevelPath = basePath + "/loglevel" + debugVarsPath = basePath + "/debug/vars" + tasksPath = basePath + "/tasks" + templatesPath = basePath + "/templates" + recordingsPath = basePath + "/recordings" + recordStreamPath = basePath + "/recordings/stream" + recordBatchPath = basePath + "/recordings/batch" + recordQueryPath = basePath + "/recordings/query" + replaysPath = basePath + "/replays" + replayBatchPath = basePath + "/replays/batch" + replayQueryPath = basePath + "/replays/query" + configPath = basePath + "/config" + serviceTestsPath = basePath + "/service-tests" + alertsPath = basePreviewPath + "/alerts" + handlersPath = alertsPath + "/handlers" + topicsPath = alertsPath + "/topics" + topicEventsPath = "events" + topicHandlersPath = "handlers" ) // HTTP configuration for connecting to Kapacitor @@ -158,47 +164,14 @@ func New(conf Config) (*Client, error) { }, nil } -type Relation int +type Relation string const ( - Self Relation = iota - Next - Previous + Self Relation = "self" ) -func (r Relation) MarshalText() ([]byte, error) { - switch r { - case Self: - return []byte("self"), nil - case Next: - return []byte("next"), nil - case Previous: - return []byte("prev"), nil - default: - return nil, fmt.Errorf("unknown Relation %d", r) - } -} - -func (r *Relation) UnmarshalText(text []byte) error { - switch s := string(text); s { - case "self": - *r = Self - case "next": - *r = Next - case "prev": - *r = Previous - default: - return fmt.Errorf("unknown Relation %s", s) - } - return nil -} - func (r Relation) String() string { - s, err := r.MarshalText() - if err != nil { - return err.Error() - } - return string(s) + return string(r) } type Link struct { @@ -599,6 +572,15 @@ type Replay struct { Progress float64 `json:"progress"` } +type JSONOperation struct { + Path string `json:"path"` + Operation string `json:"op"` + Value interface{} `json:"value"` + From string `json:"from,omitempty"` +} + +type JSONPatch []JSONOperation + func (c *Client) URL() string { return c.url.String() } @@ -705,6 +687,24 @@ func (c *Client) ServiceTestLink(service string) Link { return Link{Relation: Self, Href: path.Join(serviceTestsPath, service)} } +func (c *Client) TopicEventsLink(topic string) Link { + return Link{Relation: Self, Href: path.Join(topicsPath, topic, topicEventsPath)} +} +func (c *Client) TopicEventLink(topic, event string) Link { + return Link{Relation: Self, Href: path.Join(topicsPath, topic, topicEventsPath, event)} +} + +func (c *Client) TopicHandlersLink(topic string) Link { + return Link{Relation: Self, Href: path.Join(topicsPath, topic, topicHandlersPath)} +} + +func (c *Client) HandlerLink(id string) Link { + return Link{Relation: Self, Href: path.Join(handlersPath, id)} +} +func (c *Client) TopicLink(id string) Link { + return Link{Relation: Self, Href: path.Join(topicsPath, id)} +} + type CreateTaskOptions struct { ID string `json:"id,omitempty"` TemplateID string `json:"template-id,omitempty"` @@ -1740,6 +1740,353 @@ func (c *Client) DoServiceTest(link Link, sto ServiceTestOptions) (ServiceTestRe return r, nil } +type ListTopicsOptions struct { + Pattern string + MinLevel string +} + +func (o *ListTopicsOptions) Default() { + if o.MinLevel == "" { + o.MinLevel = "OK" + } +} + +func (o *ListTopicsOptions) Values() *url.Values { + v := &url.Values{} + v.Set("pattern", o.Pattern) + v.Set("min-level", o.MinLevel) + return v +} + +type Topics struct { + Link Link `json:"link"` + Topics []Topic `json:"topics"` +} + +type Topic struct { + Link Link `json:"link"` + ID string `json:"id"` + Level string `json:"level"` + EventsLink Link `json:"events-link"` + HandlersLink Link `json:"handlers-link"` +} + +func (c *Client) ListTopics(opt *ListTopicsOptions) (Topics, error) { + topics := Topics{} + if opt == nil { + opt = new(ListTopicsOptions) + } + opt.Default() + + u := *c.url + u.Path = topicsPath + u.RawQuery = opt.Values().Encode() + + req, err := http.NewRequest("GET", u.String(), nil) + if err != nil { + return topics, err + } + + _, err = c.Do(req, &topics, http.StatusOK) + if err != nil { + return topics, err + } + return topics, nil +} + +func (c *Client) DeleteTopic(link Link) error { + if link.Href == "" { + return fmt.Errorf("invalid link %v", link) + } + u := *c.url + u.Path = link.Href + + req, err := http.NewRequest("DELETE", u.String(), nil) + if err != nil { + return err + } + + _, err = c.Do(req, nil, http.StatusNoContent) + return err +} + +type TopicEvents struct { + Link Link `json:"link"` + Topic string `json:"topic"` + Events []TopicEvent `json:"events"` +} + +type TopicEvent struct { + Link Link `json:"link"` + ID string `json:"id"` + State EventState `json:"state"` +} + +type EventState struct { + Message string `json:"message"` + Details string `json:"details"` + Time time.Time `json:"time"` + Duration Duration `json:"duration"` + Level string `json:"level"` +} + +// TopicEvent retrieves details for a single event of a topic +// Errors if no event exists. +func (c *Client) TopicEvent(link Link) (TopicEvent, error) { + e := TopicEvent{} + if link.Href == "" { + return e, fmt.Errorf("invalid link %v", link) + } + + u := *c.url + u.Path = link.Href + + req, err := http.NewRequest("GET", u.String(), nil) + if err != nil { + return e, err + } + + _, err = c.Do(req, &e, http.StatusOK) + return e, err +} + +type ListTopicEventsOptions struct { + MinLevel string +} + +func (o *ListTopicEventsOptions) Default() { + if o.MinLevel == "" { + o.MinLevel = "OK" + } +} + +func (o *ListTopicEventsOptions) Values() *url.Values { + v := &url.Values{} + v.Set("min-level", o.MinLevel) + return v +} + +// ListTopicEvents returns the current state for events within a topic. +func (c *Client) ListTopicEvents(link Link, opt *ListTopicEventsOptions) (TopicEvents, error) { + t := TopicEvents{} + if link.Href == "" { + return t, fmt.Errorf("invalid link %v", link) + } + + if opt == nil { + opt = new(ListTopicEventsOptions) + } + opt.Default() + + u := *c.url + u.Path = link.Href + u.RawQuery = opt.Values().Encode() + + req, err := http.NewRequest("GET", u.String(), nil) + if err != nil { + return t, err + } + + _, err = c.Do(req, &t, http.StatusOK) + return t, err +} + +type TopicHandlers struct { + Link Link `json:"link"` + Topic string `json:"topic"` + Handlers []Handler `json:"handlers"` +} + +// TopicHandlers returns the current state for events within a topic. +func (c *Client) ListTopicHandlers(link Link) (TopicHandlers, error) { + t := TopicHandlers{} + if link.Href == "" { + return t, fmt.Errorf("invalid link %v", link) + } + + u := *c.url + u.Path = link.Href + + req, err := http.NewRequest("GET", u.String(), nil) + if err != nil { + return t, err + } + + _, err = c.Do(req, &t, http.StatusOK) + return t, err +} + +type Handlers struct { + Link Link `json:"link"` + Handlers []Handler `json:"handlers"` +} + +type Handler struct { + Link Link `json:"link"` + ID string `json:"id"` + Topics []string `json:"topics"` + Actions []HandlerAction `json:"actions"` +} + +type HandlerAction struct { + Kind string `json:"kind" yaml:"kind"` + Options map[string]interface{} `json:"options" yaml:"options"` +} + +// Handler retrieves an alert handler. +// Errors if no handler exists. +func (c *Client) Handler(link Link) (Handler, error) { + h := Handler{} + if link.Href == "" { + return h, fmt.Errorf("invalid link %v", link) + } + + u := *c.url + u.Path = link.Href + + req, err := http.NewRequest("GET", u.String(), nil) + if err != nil { + return h, err + } + + _, err = c.Do(req, &h, http.StatusOK) + return h, err +} + +type HandlerOptions struct { + ID string `json:"id" yaml:"id"` + Topics []string `json:"topics" yaml:"topics"` + Actions []HandlerAction `json:"actions" yaml:"actions"` +} + +// CreateHandler creates a new alert handler. +// Errors if the handler already exists. +func (c *Client) CreateHandler(opt HandlerOptions) (Handler, error) { + var buf bytes.Buffer + enc := json.NewEncoder(&buf) + err := enc.Encode(opt) + if err != nil { + return Handler{}, err + } + + u := *c.url + u.Path = handlersPath + + req, err := http.NewRequest("POST", u.String(), &buf) + if err != nil { + return Handler{}, err + } + req.Header.Set("Content-Type", "application/json") + + h := Handler{} + _, err = c.Do(req, &h, http.StatusOK) + return h, err +} + +// PatchHandler applies a patch operation to an existing handler. +func (c *Client) PatchHandler(link Link, patch JSONPatch) (Handler, error) { + h := Handler{} + if link.Href == "" { + return h, fmt.Errorf("invalid link %v", link) + } + var buf bytes.Buffer + enc := json.NewEncoder(&buf) + err := enc.Encode(patch) + if err != nil { + return h, err + } + + u := *c.url + u.Path = link.Href + + req, err := http.NewRequest("PATCH", u.String(), &buf) + if err != nil { + return h, err + } + req.Header.Set("Content-Type", "application/json+patch") + + _, err = c.Do(req, &h, http.StatusOK) + return h, err +} + +// ReplaceHandler replaces an existing handler, with the new definition. +func (c *Client) ReplaceHandler(link Link, opt HandlerOptions) (Handler, error) { + h := Handler{} + if link.Href == "" { + return h, fmt.Errorf("invalid link %v", link) + } + var buf bytes.Buffer + enc := json.NewEncoder(&buf) + err := enc.Encode(opt) + if err != nil { + return h, err + } + + u := *c.url + u.Path = link.Href + + req, err := http.NewRequest("PUT", u.String(), &buf) + if err != nil { + return h, err + } + req.Header.Set("Content-Type", "application/json") + + _, err = c.Do(req, &h, http.StatusOK) + return h, err +} + +// DeleteHandler deletes a handler. +func (c *Client) DeleteHandler(link Link) error { + if link.Href == "" { + return fmt.Errorf("invalid link %v", link) + } + u := *c.url + u.Path = link.Href + + req, err := http.NewRequest("DELETE", u.String(), nil) + if err != nil { + return err + } + + _, err = c.Do(req, nil, http.StatusNoContent) + return err +} + +type ListHandlersOptions struct { + Pattern string +} + +func (o *ListHandlersOptions) Default() {} + +func (o *ListHandlersOptions) Values() *url.Values { + v := &url.Values{} + v.Set("pattern", o.Pattern) + return v +} + +func (c *Client) ListHandlers(opt *ListHandlersOptions) (Handlers, error) { + handlers := Handlers{} + if opt == nil { + opt = new(ListHandlersOptions) + } + opt.Default() + + u := *c.url + u.Path = handlersPath + u.RawQuery = opt.Values().Encode() + + req, err := http.NewRequest("GET", u.String(), nil) + if err != nil { + return handlers, err + } + + _, err = c.Do(req, &handlers, http.StatusOK) + if err != nil { + return handlers, err + } + return handlers, nil +} + type LogLevelOptions struct { Level string `json:"level"` } @@ -1801,3 +2148,17 @@ func (c *Client) DebugVars() (DebugVars, error) { _, err = c.Do(req, &vars, http.StatusOK) return vars, err } + +type Duration time.Duration + +func (d Duration) MarshalText() ([]byte, error) { + return []byte(time.Duration(d).String()), nil +} +func (d *Duration) UnmarshalText(data []byte) error { + dur, err := time.ParseDuration(string(data)) + if err != nil { + return err + } + *d = Duration(dur) + return nil +} diff --git a/client/v1/client_test.go b/client/v1/client_test.go index caad0320b..891374351 100644 --- a/client/v1/client_test.go +++ b/client/v1/client_test.go @@ -250,6 +250,83 @@ func Test_ReportsErrors(t *testing.T) { return err }, }, + { + name: "ListTopics", + fnc: func(c *client.Client) error { + _, err := c.ListTopics(nil) + return err + }, + }, + { + name: "DeleteTopic", + fnc: func(c *client.Client) error { + err := c.DeleteTopic(c.TopicLink("")) + return err + }, + }, + { + name: "TopicEvent", + fnc: func(c *client.Client) error { + _, err := c.TopicEvent(c.TopicEventLink("topic", "event")) + return err + }, + }, + { + name: "ListTopicEvents", + fnc: func(c *client.Client) error { + _, err := c.ListTopicEvents(c.TopicEventsLink(""), nil) + return err + }, + }, + { + name: "ListTopicHandlers", + fnc: func(c *client.Client) error { + _, err := c.ListTopicHandlers(c.TopicHandlersLink("")) + return err + }, + }, + { + name: "Handler", + fnc: func(c *client.Client) error { + _, err := c.Handler(c.HandlerLink("")) + return err + }, + }, + { + name: "CreateHandler", + fnc: func(c *client.Client) error { + _, err := c.CreateHandler(client.HandlerOptions{}) + return err + }, + }, + { + name: "PatchHandler", + fnc: func(c *client.Client) error { + _, err := c.PatchHandler(c.HandlerLink(""), nil) + return err + }, + }, + { + name: "ReplaceHandler", + fnc: func(c *client.Client) error { + _, err := c.ReplaceHandler(c.HandlerLink(""), client.HandlerOptions{}) + return err + }, + }, + { + name: "DeleteHandler", + fnc: func(c *client.Client) error { + err := c.DeleteHandler(c.HandlerLink("")) + return err + }, + }, + { + name: "ListHandlers", + fnc: func(c *client.Client) error { + _, err := c.ListHandlers(nil) + return err + }, + }, { name: "LogLevel", fnc: func(c *client.Client) error { @@ -2394,6 +2471,629 @@ func Test_DoServiceTest(t *testing.T) { } } +func Test_ListTopics(t *testing.T) { + s, c, err := newClient(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { + if r.URL.String() == "/kapacitor/v1preview/alerts/topics?min-level=WARNING&pattern=%2A" && + r.Method == "GET" { + w.WriteHeader(http.StatusOK) + fmt.Fprintf(w, `{ + "link": {"rel":"self","href":"/kapacitor/v1preview/alerts/topics"}, + "topics": [ + { + "link": {"rel":"self","href":"/kapacitor/v1preview/alerts/topics/system"}, + "events-link" : {"rel":"events","href":"/kapacitor/v1preview/alerts/topics/system/events"}, + "handlers-link": {"rel":"handlers","href":"/kapacitor/v1preview/alerts/topics/system/handlers"}, + "id": "system", + "level":"CRITICAL" + }, + { + "link": {"rel":"self","href":"/kapacitor/v1preview/alerts/topics/app"}, + "events-link" : {"rel":"events","href":"/kapacitor/v1preview/alerts/topics/app/events"}, + "handlers-link": {"rel":"handlers","href":"/kapacitor/v1preview/alerts/topics/app/handlers"}, + "id": "app", + "level":"WARNING" + } + ] +}`) + } else { + w.WriteHeader(http.StatusBadRequest) + fmt.Fprintf(w, "request: %v", r) + } + })) + if err != nil { + t.Fatal(err) + } + defer s.Close() + + topics, err := c.ListTopics(&client.ListTopicsOptions{ + Pattern: "*", + MinLevel: "WARNING", + }) + if err != nil { + t.Fatal(err) + } + exp := client.Topics{ + Link: client.Link{Relation: client.Self, Href: "/kapacitor/v1preview/alerts/topics"}, + Topics: []client.Topic{ + { + ID: "system", + Link: client.Link{Relation: client.Self, Href: "/kapacitor/v1preview/alerts/topics/system"}, + EventsLink: client.Link{Relation: "events", Href: "/kapacitor/v1preview/alerts/topics/system/events"}, + HandlersLink: client.Link{Relation: "handlers", Href: "/kapacitor/v1preview/alerts/topics/system/handlers"}, + Level: "CRITICAL", + }, + { + ID: "app", + Link: client.Link{Relation: client.Self, Href: "/kapacitor/v1preview/alerts/topics/app"}, + EventsLink: client.Link{Relation: "events", Href: "/kapacitor/v1preview/alerts/topics/app/events"}, + HandlersLink: client.Link{Relation: "handlers", Href: "/kapacitor/v1preview/alerts/topics/app/handlers"}, + Level: "WARNING", + }, + }, + } + if !reflect.DeepEqual(exp, topics) { + t.Errorf("unexpected topics result:\ngot:\n%v\nexp:\n%v", topics, exp) + } +} + +func Test_DeleteTopic(t *testing.T) { + s, c, err := newClient(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { + if r.URL.String() == "/kapacitor/v1preview/alerts/topics/system" && + r.Method == "DELETE" { + w.WriteHeader(http.StatusNoContent) + } else { + w.WriteHeader(http.StatusBadRequest) + fmt.Fprintf(w, "request: %v", r) + } + })) + if err != nil { + t.Fatal(err) + } + defer s.Close() + + err = c.DeleteTopic(c.TopicLink("system")) + if err != nil { + t.Fatal(err) + } +} + +func Test_TopicEvent(t *testing.T) { + s, c, err := newClient(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { + if r.URL.String() == "/kapacitor/v1preview/alerts/topics/system/events/cpu" && + r.Method == "GET" { + w.WriteHeader(http.StatusOK) + fmt.Fprintf(w, `{ + "link":{"rel":"self","href":"/kapacitor/v1preview/alerts/topics/system/events/cpu"}, + "id": "cpu", + "state": { + "level": "WARNING", + "message": "cpu is WARNING", + "time": "2016-12-01T00:00:00Z", + "duration": "5m" + } +}`) + } else { + w.WriteHeader(http.StatusBadRequest) + fmt.Fprintf(w, "request: %v", r) + } + })) + if err != nil { + t.Fatal(err) + } + defer s.Close() + + topicEvent, err := c.TopicEvent(c.TopicEventLink("system", "cpu")) + if err != nil { + t.Fatal(err) + } + exp := client.TopicEvent{ + ID: "cpu", + Link: client.Link{Relation: client.Self, Href: "/kapacitor/v1preview/alerts/topics/system/events/cpu"}, + State: client.EventState{ + Message: "cpu is WARNING", + Time: time.Date(2016, 12, 1, 0, 0, 0, 0, time.UTC), + Duration: client.Duration(5 * time.Minute), + Level: "WARNING", + }, + } + if !reflect.DeepEqual(exp, topicEvent) { + t.Errorf("unexpected topic event result:\ngot:\n%v\nexp:\n%v", topicEvent, exp) + } +} + +func Test_ListTopicEvents(t *testing.T) { + s, c, err := newClient(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { + if r.URL.String() == "/kapacitor/v1preview/alerts/topics/system/events?min-level=OK" && + r.Method == "GET" { + w.WriteHeader(http.StatusOK) + fmt.Fprintf(w, `{ + "link": {"rel":"self","href":"/kapacitor/v1preview/alerts/topics/system/events?min-level=OK"}, + "topic": "system", + "events": [ + { + "link":{"rel":"self","href":"/kapacitor/v1preview/alerts/topics/system/events/cpu"}, + "id": "cpu", + "state": { + "level": "WARNING", + "message": "cpu is WARNING", + "time": "2016-12-01T00:00:00Z", + "duration": "5m" + } + }, + { + "link":{"rel":"self","href":"/kapacitor/v1preview/alerts/topics/system/events/mem"}, + "id": "mem", + "state": { + "level": "CRITICAL", + "message": "mem is CRITICAL", + "time": "2016-12-01T00:10:00Z", + "duration": "1m" + } + } + ] +}`) + } else { + w.WriteHeader(http.StatusBadRequest) + fmt.Fprintf(w, "request: %v", r) + } + })) + if err != nil { + t.Fatal(err) + } + defer s.Close() + + topicEvents, err := c.ListTopicEvents(c.TopicEventsLink("system"), nil) + if err != nil { + t.Fatal(err) + } + exp := client.TopicEvents{ + Link: client.Link{Relation: client.Self, Href: "/kapacitor/v1preview/alerts/topics/system/events?min-level=OK"}, + Topic: "system", + Events: []client.TopicEvent{ + { + ID: "cpu", + Link: client.Link{Relation: client.Self, Href: "/kapacitor/v1preview/alerts/topics/system/events/cpu"}, + State: client.EventState{ + Message: "cpu is WARNING", + Time: time.Date(2016, 12, 1, 0, 0, 0, 0, time.UTC), + Duration: client.Duration(5 * time.Minute), + Level: "WARNING", + }, + }, + { + ID: "mem", + Link: client.Link{Relation: client.Self, Href: "/kapacitor/v1preview/alerts/topics/system/events/mem"}, + State: client.EventState{ + Message: "mem is CRITICAL", + Time: time.Date(2016, 12, 1, 0, 10, 0, 0, time.UTC), + Duration: client.Duration(1 * time.Minute), + Level: "CRITICAL", + }, + }, + }, + } + if !reflect.DeepEqual(exp, topicEvents) { + t.Errorf("unexpected topic events result:\ngot:\n%v\nexp:\n%v", topicEvents, exp) + } +} +func Test_ListTopicHandlers(t *testing.T) { + s, c, err := newClient(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { + if r.URL.String() == "/kapacitor/v1preview/alerts/topics/system/handlers" && + r.Method == "GET" { + w.WriteHeader(http.StatusOK) + fmt.Fprintf(w, `{ + "link":{"rel":"self","href":"/kapacitor/v1preview/alerts/topics/system/handlers"}, + "topic": "system", + "handlers": [ + { + "link":{"rel":"self","href":"/kapacitor/v1preview/alerts/handlers/slack"}, + "id":"slack", + "topics": ["system", "app"], + "actions": [{ + "kind":"slack", + "options":{ + "channel":"#alerts" + } + }] + }, + { + "link":{"rel":"self","href":"/kapacitor/v1preview/alerts/handlers/smtp"}, + "id":"smtp", + "topics": ["system", "app"], + "actions": [{ + "kind":"smtp" + }] + } + ] +}`) + } else { + w.WriteHeader(http.StatusBadRequest) + fmt.Fprintf(w, "request: %v", r) + } + })) + if err != nil { + t.Fatal(err) + } + defer s.Close() + + topicHandlers, err := c.ListTopicHandlers(c.TopicHandlersLink("system")) + if err != nil { + t.Fatal(err) + } + exp := client.TopicHandlers{ + Link: client.Link{Relation: client.Self, Href: "/kapacitor/v1preview/alerts/topics/system/handlers"}, + Topic: "system", + Handlers: []client.Handler{ + { + ID: "slack", + Link: client.Link{Relation: client.Self, Href: "/kapacitor/v1preview/alerts/handlers/slack"}, + Topics: []string{"system", "app"}, + Actions: []client.HandlerAction{{ + Kind: "slack", + Options: map[string]interface{}{ + "channel": "#alerts", + }, + }}, + }, + { + ID: "smtp", + Link: client.Link{Relation: client.Self, Href: "/kapacitor/v1preview/alerts/handlers/smtp"}, + Topics: []string{"system", "app"}, + Actions: []client.HandlerAction{{ + Kind: "smtp", + }}, + }, + }, + } + if !reflect.DeepEqual(exp, topicHandlers) { + t.Errorf("unexpected topic handlers result:\ngot:\n%v\nexp:\n%v", topicHandlers, exp) + } +} +func Test_Handler(t *testing.T) { + s, c, err := newClient(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { + if r.URL.String() == "/kapacitor/v1preview/alerts/handlers/slack" && + r.Method == "GET" { + w.WriteHeader(http.StatusOK) + fmt.Fprintf(w, `{ + "link":{"rel":"self","href":"/kapacitor/v1preview/alerts/handlers/slack"}, + "id":"slack", + "topics": ["system", "app"], + "actions": [{ + "kind":"slack", + "options": { + "channel":"#alerts" + } + }] +}`) + } else { + w.WriteHeader(http.StatusBadRequest) + fmt.Fprintf(w, "request: %v", r) + } + })) + if err != nil { + t.Fatal(err) + } + defer s.Close() + + h, err := c.Handler(c.HandlerLink("slack")) + if err != nil { + t.Fatal(err) + } + exp := client.Handler{ + ID: "slack", + Link: client.Link{Relation: client.Self, Href: "/kapacitor/v1preview/alerts/handlers/slack"}, + Topics: []string{"system", "app"}, + Actions: []client.HandlerAction{{ + Kind: "slack", + Options: map[string]interface{}{ + "channel": "#alerts", + }, + }}, + } + if !reflect.DeepEqual(exp, h) { + t.Errorf("unexpected handler result:\ngot:\n%v\nexp:\n%v", h, exp) + } +} +func Test_CreateHandler(t *testing.T) { + s, c, err := newClient(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { + options := client.HandlerOptions{} + json.NewDecoder(r.Body).Decode(&options) + expOptions := client.HandlerOptions{ + ID: "slack", + Topics: []string{"system", "app"}, + Actions: []client.HandlerAction{{ + Kind: "slack", + Options: map[string]interface{}{ + "channel": "#alerts", + }, + }}, + } + if r.URL.String() == "/kapacitor/v1preview/alerts/handlers" && + r.Method == "POST" && + reflect.DeepEqual(expOptions, options) { + w.WriteHeader(http.StatusOK) + fmt.Fprintf(w, `{ + "link":{"rel":"self","href":"/kapacitor/v1preview/alerts/handlers/slack"}, + "id": "slack", + "topics": ["system", "app"], + "actions": [{ + "kind":"slack", + "options": { + "channel":"#alerts" + } + }] +}`) + } else { + w.WriteHeader(http.StatusBadRequest) + fmt.Fprintf(w, "request: %v", r) + } + })) + if err != nil { + t.Fatal(err) + } + defer s.Close() + + h, err := c.CreateHandler(client.HandlerOptions{ + ID: "slack", + Topics: []string{"system", "app"}, + Actions: []client.HandlerAction{{ + Kind: "slack", + Options: map[string]interface{}{ + "channel": "#alerts", + }, + }}, + }) + if err != nil { + t.Fatal(err) + } + exp := client.Handler{ + ID: "slack", + Link: client.Link{Relation: client.Self, Href: "/kapacitor/v1preview/alerts/handlers/slack"}, + Topics: []string{"system", "app"}, + Actions: []client.HandlerAction{{ + Kind: "slack", + Options: map[string]interface{}{ + "channel": "#alerts", + }, + }}, + } + if !reflect.DeepEqual(exp, h) { + t.Errorf("unexpected create handler result:\ngot:\n%v\nexp:\n%v", h, exp) + } +} +func Test_PatchHandler(t *testing.T) { + s, c, err := newClient(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { + var patch client.JSONPatch + json.NewDecoder(r.Body).Decode(&patch) + expPatch := client.JSONPatch{ + client.JSONOperation{ + Operation: "replace", + Path: "/topics", + Value: []interface{}{"system", "test"}, + }, + client.JSONOperation{ + Operation: "replace", + Path: "/actions/0/options/channel", + Value: "#testing_alerts", + }, + } + if r.URL.String() == "/kapacitor/v1preview/alerts/handlers/slack" && + r.Method == "PATCH" && + reflect.DeepEqual(expPatch, patch) { + w.WriteHeader(http.StatusOK) + fmt.Fprintf(w, `{ + "link":{"rel":"self","href":"/kapacitor/v1preview/alerts/handlers/slack"}, + "id": "slack", + "topics": ["system", "test"], + "actions": [{ + "kind":"slack", + "options": { + "channel":"#testing_alerts" + } + }] +}`) + } else { + w.WriteHeader(http.StatusBadRequest) + fmt.Fprintf(w, "request: %v", r) + } + })) + if err != nil { + t.Fatal(err) + } + defer s.Close() + + h, err := c.PatchHandler(c.HandlerLink("slack"), client.JSONPatch{ + client.JSONOperation{ + Operation: "replace", + Path: "/topics", + Value: []string{"system", "test"}, + }, + client.JSONOperation{ + Operation: "replace", + Path: "/actions/0/options/channel", + Value: "#testing_alerts", + }, + }) + if err != nil { + t.Fatal(err) + } + exp := client.Handler{ + ID: "slack", + Link: client.Link{Relation: client.Self, Href: "/kapacitor/v1preview/alerts/handlers/slack"}, + Topics: []string{"system", "test"}, + Actions: []client.HandlerAction{{ + Kind: "slack", + Options: map[string]interface{}{ + "channel": "#testing_alerts", + }, + }}, + } + if !reflect.DeepEqual(exp, h) { + t.Errorf("unexpected replace handler result:\ngot:\n%v\nexp:\n%v", h, exp) + } +} +func Test_ReplaceHandler(t *testing.T) { + s, c, err := newClient(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { + options := client.HandlerOptions{} + json.NewDecoder(r.Body).Decode(&options) + expOptions := client.HandlerOptions{ + ID: "slack", + Topics: []string{"system", "test"}, + Actions: []client.HandlerAction{{ + Kind: "slack", + Options: map[string]interface{}{ + "channel": "#testing_alerts", + }, + }}, + } + if r.URL.String() == "/kapacitor/v1preview/alerts/handlers/slack" && + r.Method == "PUT" && + reflect.DeepEqual(expOptions, options) { + w.WriteHeader(http.StatusOK) + fmt.Fprintf(w, `{ + "link":{"rel":"self","href":"/kapacitor/v1preview/alerts/handlers/slack"}, + "id": "slack", + "topics": ["system", "test"], + "actions": [{ + "kind":"slack", + "options": { + "channel":"#testing_alerts" + } + }] +}`) + } else { + w.WriteHeader(http.StatusBadRequest) + fmt.Fprintf(w, "request: %v", r) + } + })) + if err != nil { + t.Fatal(err) + } + defer s.Close() + + h, err := c.ReplaceHandler(c.HandlerLink("slack"), client.HandlerOptions{ + ID: "slack", + Topics: []string{"system", "test"}, + Actions: []client.HandlerAction{{ + Kind: "slack", + Options: map[string]interface{}{ + "channel": "#testing_alerts", + }, + }}, + }) + if err != nil { + t.Fatal(err) + } + exp := client.Handler{ + ID: "slack", + Link: client.Link{Relation: client.Self, Href: "/kapacitor/v1preview/alerts/handlers/slack"}, + Topics: []string{"system", "test"}, + Actions: []client.HandlerAction{{ + Kind: "slack", + Options: map[string]interface{}{ + "channel": "#testing_alerts", + }, + }}, + } + if !reflect.DeepEqual(exp, h) { + t.Errorf("unexpected replace handler result:\ngot:\n%v\nexp:\n%v", h, exp) + } +} +func Test_DeleteHandler(t *testing.T) { + s, c, err := newClient(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { + if r.URL.String() == "/kapacitor/v1preview/alerts/handlers/slack" && + r.Method == "DELETE" { + w.WriteHeader(http.StatusNoContent) + } else { + w.WriteHeader(http.StatusBadRequest) + fmt.Fprintf(w, "request: %v", r) + } + })) + if err != nil { + t.Fatal(err) + } + defer s.Close() + + err = c.DeleteHandler(c.HandlerLink("slack")) + if err != nil { + t.Fatal(err) + } +} + +func Test_ListHandlers(t *testing.T) { + s, c, err := newClient(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { + if r.URL.String() == "/kapacitor/v1preview/alerts/handlers?pattern=%2A" && + r.Method == "GET" { + w.WriteHeader(http.StatusOK) + fmt.Fprintf(w, `{ + "link":{"rel":"self","href":"/kapacitor/v1preview/alerts/handlers"}, + "handlers": [ + { + "link":{"rel":"self","href":"/kapacitor/v1preview/alerts/handlers/slack"}, + "id":"slack", + "topics": ["system", "app"], + "actions": [{ + "kind":"slack", + "options": { + "channel":"#alerts" + } + }] + }, + { + "link":{"rel":"self","href":"/kapacitor/v1preview/alerts/handlers/smtp"}, + "id":"smtp", + "topics": ["system", "app"], + "actions": [{ + "kind":"smtp" + }] + } + ] +}`) + } else { + w.WriteHeader(http.StatusBadRequest) + fmt.Fprintf(w, "request: %v", r) + } + })) + if err != nil { + t.Fatal(err) + } + defer s.Close() + + handlers, err := c.ListHandlers(&client.ListHandlersOptions{ + Pattern: "*", + }) + if err != nil { + t.Fatal(err) + } + exp := client.Handlers{ + Link: client.Link{Relation: client.Self, Href: "/kapacitor/v1preview/alerts/handlers"}, + Handlers: []client.Handler{ + { + ID: "slack", + Link: client.Link{Relation: client.Self, Href: "/kapacitor/v1preview/alerts/handlers/slack"}, + Topics: []string{"system", "app"}, + Actions: []client.HandlerAction{{ + Kind: "slack", + Options: map[string]interface{}{ + "channel": "#alerts", + }, + }}, + }, + { + ID: "smtp", + Link: client.Link{Relation: client.Self, Href: "/kapacitor/v1preview/alerts/handlers/smtp"}, + Topics: []string{"system", "app"}, + Actions: []client.HandlerAction{{ + Kind: "smtp", + }}, + }, + }, + } + if !reflect.DeepEqual(exp, handlers) { + t.Errorf("unexpected list handlers result:\ngot:\n%v\nexp:\n%v", handlers, exp) + } +} + func Test_LogLevel(t *testing.T) { s, c, err := newClient(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { var opts client.LogLevelOptions diff --git a/cmd/kapacitor/main.go b/cmd/kapacitor/main.go index bfdec309c..e192e9b24 100644 --- a/cmd/kapacitor/main.go +++ b/cmd/kapacitor/main.go @@ -10,11 +10,14 @@ import ( "log" "net/http" "os" + "path" "sort" "strconv" "strings" "time" + yaml "gopkg.in/yaml.v2" + humanize "github.com/dustin/go-humanize" "github.com/influxdata/influxdb/influxql" "github.com/influxdata/kapacitor/client/v1" @@ -47,16 +50,18 @@ Commands: record Record the result of a query or a snapshot of the current stream data. define Create/update a task. define-template Create/update a template. + define-handler Create/update an alert handler. replay Replay a recording to a task. replay-live Replay data against a task without recording it. enable Enable and start running a task with live data. disable Stop running a task. reload Reload a running task with an updated task definition. push Publish a task definition to another Kapacitor instance. Not implemented yet. - delete Delete tasks, templates, recordings or replays. - list List information about tasks, templates, recordings or replays. + delete Delete tasks, templates, recordings, replays, topics or handlers. + list List information about tasks, templates, recordings, replays, topics, handlers or service-tests. show Display detailed information about a task. show-template Display detailed information about a template. + show-handler Display detailed information about an alert handler. level Sets the logging level on the kapacitord server. stats Display various stats about Kapacitor. version Displays the Kapacitor version info. @@ -133,6 +138,9 @@ func main() { case "define-template": commandArgs = args commandF = doDefineTemplate + case "define-handler": + commandArgs = args + commandF = doDefineHandler case "replay": replayFlags.Parse(args) commandArgs = replayFlags.Args() @@ -166,6 +174,9 @@ func main() { case "show-template": commandArgs = args commandF = doShowTemplate + case "show-handler": + commandArgs = args + commandF = doShowHandler case "level": commandArgs = args commandF = doLevel @@ -242,6 +253,8 @@ func doHelp(args []string) error { defineFlags.Usage() case "define-template": defineTemplateFlags.Usage() + case "define-handler": + defineHandlerUsage() case "replay": replayFlags.Usage() case "enable": @@ -258,6 +271,8 @@ func doHelp(args []string) error { showUsage() case "show-template": showTemplateUsage() + case "show-handler": + showHandlerUsage() case "level": levelUsage() case "help": @@ -797,6 +812,63 @@ func doDefineTemplate(args []string) error { return err } +func defineHandlerUsage() { + var u = `Usage: kapacitor define-handler + + Create or update a handler. + + A handler is defined via a JSON or YAML file. + +For example: + + Define a handler using the slack.yaml file: + + $ kapacitor define-handler slack.yaml + +` + fmt.Fprintln(os.Stderr, u) +} + +func doDefineHandler(args []string) error { + if len(args) < 1 { + fmt.Fprintln(os.Stderr, "Must provide a path to a handler file.") + defineHandlerUsage() + os.Exit(2) + } + p := args[0] + f, err := os.Open(p) + if err != nil { + return errors.Wrapf(err, "failed to open handler file %q", p) + } + + // Decode file into HandlerOptions + var ho client.HandlerOptions + ext := path.Ext(p) + switch ext { + case ".yaml": + data, err := ioutil.ReadAll(f) + if err != nil { + return errors.Wrapf(err, "failed to read handler file %q", p) + } + if err := yaml.Unmarshal(data, &ho); err != nil { + return errors.Wrapf(err, "failed to unmarshal yaml handler file %q", p) + } + case ".json": + if err := json.NewDecoder(f).Decode(&ho); err != nil { + return errors.Wrapf(err, "failed to unmarshal json handler file %q", p) + } + } + + l := cli.HandlerLink(ho.ID) + handler, _ := cli.Handler(l) + if handler.ID == "" { + _, err = cli.CreateHandler(ho) + } else { + _, err = cli.ReplaceHandler(l, ho) + } + return err +} + // Replay var ( replayFlags = flag.NewFlagSet("replay", flag.ExitOnError) @@ -1308,7 +1380,7 @@ func showTemplateUsage() { func doShowTemplate(args []string) error { if len(args) != 1 { fmt.Fprintln(os.Stderr, "Must specify one template ID") - showUsage() + showTemplateUsage() os.Exit(2) } @@ -1350,14 +1422,52 @@ func doShowTemplate(args []string) error { return nil } +// Show Handler + +func showHandlerUsage() { + var u = `Usage: kapacitor show-handler [handler ID] + + Show details about a specific handler. +` + fmt.Fprintln(os.Stderr, u) +} + +func doShowHandler(args []string) error { + if len(args) != 1 { + fmt.Fprintln(os.Stderr, "Must specify one handler ID") + showHandlerUsage() + os.Exit(2) + } + + h, err := cli.Handler(cli.HandlerLink(args[0])) + if err != nil { + return err + } + + fmt.Println("ID:", h.ID) + fmt.Println("Topics:", fmt.Sprintf("[%s]", strings.Join(h.Topics, ", "))) + fmt.Println("Actions:") + actionOutFmt := "%-30s%s\n" + fmt.Printf(actionOutFmt, "Kind", "Options") + for _, a := range h.Actions { + options, err := json.Marshal(a.Options) + if err != nil { + return errors.Wrap(err, "failed to format action options") + } + fmt.Printf(actionOutFmt, a.Kind, string(options)) + } + return nil +} + // List func listUsage() { - var u = `Usage: kapacitor list (tasks|templates|recordings|replays) [ID or pattern]... + var u = `Usage: kapacitor list (tasks|templates|recordings|replays|topics|handlers|service-tests) [ID or pattern]... + + List tasks, templates, recordings, replays, topics or handlers and their current state. -List tasks, templates, recordings, or replays and their current state. + If no ID or pattern is given then all items will be listed. -If no ID or pattern is given then all items will be listed. ` fmt.Fprintln(os.Stderr, u) } @@ -1376,7 +1486,7 @@ func (t TemplateList) Swap(i, j int) { t[i], t[j] = t[j], t[i] } func doList(args []string) error { if len(args) == 0 { - fmt.Fprintln(os.Stderr, "Must specify 'tasks', 'recordings', or 'replays'") + fmt.Fprintln(os.Stderr, "Must specify 'tasks', 'recordings', 'replays', 'topics', or 'handlers'") listUsage() os.Exit(2) } @@ -1554,8 +1664,80 @@ func doList(args []string) error { fmt.Fprintf(os.Stdout, outFmt, s.Name) } } + case "handlers": + maxID := 2 // len("ID") + maxTopics := 6 // len("Topics") + maxActions := 7 // len("Actions") + // The handlers are returned in sorted order already, no need to sort them here. + type info struct { + ID string + Topics string + Actions string + } + var allHandlers []info + for _, pattern := range patterns { + handlers, err := cli.ListHandlers(&client.ListHandlersOptions{ + Pattern: pattern, + }) + if err != nil { + return err + } + for _, h := range handlers.Handlers { + kinds := make([]string, len(h.Actions)) + for i, a := range h.Actions { + kinds[i] = a.Kind + } + i := info{ + ID: h.ID, + Topics: fmt.Sprintf("[%s]", strings.Join(h.Topics, ", ")), + Actions: fmt.Sprintf("[%s]", strings.Join(kinds, ", ")), + } + if l := len(i.ID); l > maxID { + maxID = l + } + if l := len(i.Topics); l > maxTopics { + maxTopics = l + } + if l := len(i.Actions); l > maxActions { + maxActions = l + } + allHandlers = append(allHandlers, i) + } + } + outFmt := fmt.Sprintf("%%-%dv%%-%dv%%-%dv\n", maxID+1, maxTopics+1, maxActions+1) + fmt.Fprintf(os.Stdout, outFmt, "ID", "Topics", "Actions") + for _, h := range allHandlers { + fmt.Fprintf(os.Stdout, outFmt, h.ID, h.Topics, h.Actions) + } + case "topics": + maxID := 2 // len("ID") + maxLevel := 8 // len("Level") + // The topics are returned in sorted order already, no need to sort them here. + var allTopics []client.Topic + for _, pattern := range patterns { + topics, err := cli.ListTopics(&client.ListTopicsOptions{ + Pattern: pattern, + }) + if err != nil { + return err + } + allTopics = append(allTopics, topics.Topics...) + for _, t := range topics.Topics { + if l := len(t.ID); l > maxID { + maxID = l + } + if l := len(t.Level); l > maxLevel { + maxLevel = l + } + } + } + outFmt := fmt.Sprintf("%%-%dv%%-%dv\n", maxID+1, maxLevel+1) + fmt.Fprintf(os.Stdout, outFmt, "ID", "Level") + for _, t := range allTopics { + fmt.Fprintf(os.Stdout, outFmt, t.ID, t.Level) + } default: - return fmt.Errorf("cannot list '%s' did you mean 'tasks', 'recordings', 'replays' or 'service-tests'?", kind) + return fmt.Errorf("cannot list '%s' did you mean 'tasks', 'recordings', 'replays', 'topics', 'handlers' or 'service-tests'?", kind) } return nil @@ -1563,12 +1745,13 @@ func doList(args []string) error { // Delete func deleteUsage() { - var u = `Usage: kapacitor delete (tasks|templates|recordings|replays) [task|templates|recording|replay ID]... + var u = `Usage: kapacitor delete (tasks|templates|recordings|replays|topics|handlers) [ID or pattern]... - Delete a tasks, templates, recordings or replays. + Delete a tasks, templates, recordings, replays, topics or handlers. If a task is enabled it will be disabled and then deleted. + For example: You can delete task: @@ -1683,6 +1866,36 @@ func doDelete(args []string) error { } } } + case "topics": + for _, pattern := range args[1:] { + topics, err := cli.ListTopics(&client.ListTopicsOptions{ + Pattern: pattern, + }) + if err != nil { + return err + } + for _, t := range topics.Topics { + err := cli.DeleteTopic(t.Link) + if err != nil { + return err + } + } + } + case "handlers": + for _, pattern := range args[1:] { + handlers, err := cli.ListHandlers(&client.ListHandlersOptions{ + Pattern: pattern, + }) + if err != nil { + return err + } + for _, h := range handlers.Handlers { + err := cli.DeleteHandler(h.Link) + if err != nil { + return err + } + } + } default: return fmt.Errorf("cannot delete '%s' did you mean 'tasks', 'recordings' or 'replays'?", kind) } diff --git a/command/command.go b/command/command.go new file mode 100644 index 000000000..da50cc386 --- /dev/null +++ b/command/command.go @@ -0,0 +1,63 @@ +package command + +import ( + "io" + "os/exec" +) + +type Command interface { + Start() error + Wait() error + + Stdin(io.Reader) + Stdout(io.Writer) + Stderr(io.Writer) + StdinPipe() (io.WriteCloser, error) + StdoutPipe() (io.Reader, error) + StderrPipe() (io.Reader, error) + + Kill() +} + +// Spec contains the necessary information to create a new command. +type Spec struct { + Prog string + Args []string + Env []string +} + +// Commander creates new commands. +type Commander interface { + NewCommand(Spec) Command +} + +// ExecCommander implements Commander using the stdlib os/exec package. +type execCommander struct{} + +// ExecCommander creates commands using the os/exec package. +var ExecCommander = execCommander{} + +// Create a new Command using golang exec package and the information. +func (execCommander) NewCommand(s Spec) Command { + c := exec.Command(s.Prog, s.Args...) + c.Env = s.Env + return execCmd{c} +} + +// ExecCmd implements Command using the stdlib os/exec package. +type execCmd struct { + *exec.Cmd +} + +func (c execCmd) Stdin(in io.Reader) { c.Cmd.Stdin = in } +func (c execCmd) Stdout(out io.Writer) { c.Cmd.Stdout = out } +func (c execCmd) Stderr(err io.Writer) { c.Cmd.Stderr = err } + +func (c execCmd) StdoutPipe() (io.Reader, error) { return c.Cmd.StdoutPipe() } +func (c execCmd) StderrPipe() (io.Reader, error) { return c.Cmd.StderrPipe() } + +func (c execCmd) Kill() { + if c.Cmd.Process != nil { + c.Cmd.Process.Kill() + } +} diff --git a/command/commandtest/commandtest.go b/command/commandtest/commandtest.go new file mode 100644 index 000000000..85df109a3 --- /dev/null +++ b/command/commandtest/commandtest.go @@ -0,0 +1,139 @@ +package commandtest + +import ( + "bytes" + "fmt" + "io" + "io/ioutil" + "reflect" + "sync" + + "github.com/influxdata/kapacitor/command" +) + +type Commander struct { + sync.Mutex + cmds []*Command +} + +func (c *Commander) NewCommand(s command.Spec) command.Command { + c.Lock() + defer c.Unlock() + cmd := &Command{ + Spec: s, + } + c.cmds = append(c.cmds, cmd) + return cmd +} + +func (c *Commander) Commands() []*Command { + c.Lock() + defer c.Unlock() + return c.cmds +} + +type Command struct { + sync.Mutex + Spec command.Spec + + Started bool + Waited bool + Killed bool + StdinData []byte + + StdinPipeCalled bool + StdoutPipeCalled bool + StderrPipeCalled bool + + stdin io.Reader + stdinR *io.PipeReader + stdinW *io.PipeWriter +} + +func (c *Command) Compare(o *Command) error { + c.Lock() + o.Lock() + defer c.Unlock() + defer o.Unlock() + + if got, exp := o.Spec, c.Spec; !reflect.DeepEqual(got, exp) { + return fmt.Errorf("unexpected command infos value: got %v exp %v", got, exp) + } + if got, exp := o.Started, c.Started; got != exp { + return fmt.Errorf("unexpected started value: got %v exp %v", got, exp) + } + if got, exp := o.Waited, c.Waited; got != exp { + return fmt.Errorf("unexpected waited value: got %v exp %v", got, exp) + } + if got, exp := o.Killed, c.Killed; got != exp { + return fmt.Errorf("unexpected killed value: got %v exp %v", got, exp) + } + if got, exp := o.StdinData, c.StdinData; !bytes.Equal(got, exp) { + return fmt.Errorf("unexpected stdin data value:\ngot\n%q\nexp\n%q\n", string(got), string(exp)) + } + + if got, exp := o.StdinPipeCalled, c.StdinPipeCalled; got != exp { + return fmt.Errorf("unexpected StdinPipeCalled value: got %v exp %v", got, exp) + } + if got, exp := o.StdoutPipeCalled, c.StdoutPipeCalled; got != exp { + return fmt.Errorf("unexpected StdoutPipeCalled value: got %v exp %v", got, exp) + } + if got, exp := o.StderrPipeCalled, c.StderrPipeCalled; got != exp { + return fmt.Errorf("unexpected StderrPipeCalled value: got %v exp %v", got, exp) + } + return nil +} + +func (c *Command) Start() error { + c.Lock() + defer c.Unlock() + c.Started = true + data, err := ioutil.ReadAll(c.stdin) + if err != nil { + return err + } + c.StdinData = data + return nil +} +func (c *Command) Wait() error { + c.Lock() + c.Waited = true + c.Unlock() + return nil +} +func (c *Command) Stdin(in io.Reader) { + c.Lock() + c.stdin = in + c.Unlock() +} +func (c *Command) Stdout(out io.Writer) { + // Not useful to keep value so just ignore it +} +func (c *Command) Stderr(err io.Writer) { + // Not useful to keep value so just ignore it +} +func (c *Command) Kill() { + c.Lock() + c.Killed = true + c.Unlock() +} +func (c *Command) StdinPipe() (io.WriteCloser, error) { + c.Lock() + defer c.Unlock() + c.StdinPipeCalled = true + c.stdinR, c.stdinW = io.Pipe() + c.stdin = c.stdinR + return c.stdinW, nil +} +func (c *Command) StdoutPipe() (io.Reader, error) { + c.Lock() + defer c.Unlock() + c.StdoutPipeCalled = true + return new(bytes.Buffer), nil +} +func (c *Command) StderrPipe() (io.Reader, error) { + c.Lock() + defer c.Unlock() + c.StderrPipeCalled = true + return new(bytes.Buffer), nil +} diff --git a/http_out.go b/http_out.go index 793947992..9e9781210 100644 --- a/http_out.go +++ b/http_out.go @@ -63,7 +63,6 @@ func (h *HTTPOutNode) runOut([]byte) error { p := path.Join("/tasks/", h.et.Task.ID, h.c.Endpoint) r := []httpd.Route{{ - Name: h.Name(), Method: "GET", Pattern: p, HandlerFunc: hndl, diff --git a/integrations/batcher_test.go b/integrations/batcher_test.go index 97cddc305..8391e8870 100644 --- a/integrations/batcher_test.go +++ b/integrations/batcher_test.go @@ -4,6 +4,7 @@ import ( "encoding/json" "fmt" "io" + "log" "net/http" "net/http/httptest" "os" @@ -15,7 +16,10 @@ import ( "github.com/influxdata/influxdb/influxql" imodels "github.com/influxdata/influxdb/models" "github.com/influxdata/kapacitor" + "github.com/influxdata/kapacitor/alert" "github.com/influxdata/kapacitor/clock" + alertservice "github.com/influxdata/kapacitor/services/alert" + "github.com/influxdata/kapacitor/services/storage/storagetest" "github.com/influxdata/wlog" ) @@ -23,7 +27,7 @@ func TestBatch_InvalidQuery(t *testing.T) { // Create a new execution env tm := kapacitor.NewTaskMaster("invalidQuery", logService) - tm.HTTPDService = httpService + tm.HTTPDService = newHTTPDService() tm.TaskStore = taskStore{} tm.DeadmanService = deadman{} tm.Open() @@ -1352,7 +1356,7 @@ batch func TestBatch_AlertStateChangesOnly(t *testing.T) { requestCount := int32(0) ts := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { - ad := kapacitor.AlertData{} + ad := alertservice.AlertData{} dec := json.NewDecoder(r.Body) err := dec.Decode(&ad) if err != nil { @@ -1360,23 +1364,23 @@ func TestBatch_AlertStateChangesOnly(t *testing.T) { } atomic.AddInt32(&requestCount, 1) if rc := atomic.LoadInt32(&requestCount); rc == 1 { - expAd := kapacitor.AlertData{ + expAd := alertservice.AlertData{ ID: "cpu_usage_idle:cpu=cpu-total", Message: "cpu_usage_idle:cpu=cpu-total is CRITICAL", Time: time.Date(1971, 1, 1, 0, 0, 0, 0, time.UTC), - Level: kapacitor.CritAlert, + Level: alert.Critical, } ad.Data = influxql.Result{} if eq, msg := compareAlertData(expAd, ad); !eq { t.Error(msg) } } else { - expAd := kapacitor.AlertData{ + expAd := alertservice.AlertData{ ID: "cpu_usage_idle:cpu=cpu-total", Message: "cpu_usage_idle:cpu=cpu-total is OK", Time: time.Date(1971, 1, 1, 0, 0, 38, 0, time.UTC), Duration: 38 * time.Second, - Level: kapacitor.OKAlert, + Level: alert.OK, } ad.Data = influxql.Result{} if eq, msg := compareAlertData(expAd, ad); !eq { @@ -1417,7 +1421,7 @@ batch func TestBatch_AlertStateChangesOnlyExpired(t *testing.T) { requestCount := int32(0) ts := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { - ad := kapacitor.AlertData{} + ad := alertservice.AlertData{} dec := json.NewDecoder(r.Body) err := dec.Decode(&ad) if err != nil { @@ -1425,24 +1429,24 @@ func TestBatch_AlertStateChangesOnlyExpired(t *testing.T) { } // We don't care about the data for this test ad.Data = influxql.Result{} - var expAd kapacitor.AlertData + var expAd alertservice.AlertData atomic.AddInt32(&requestCount, 1) rc := atomic.LoadInt32(&requestCount) if rc < 3 { - expAd = kapacitor.AlertData{ + expAd = alertservice.AlertData{ ID: "cpu_usage_idle:cpu=cpu-total", Message: "cpu_usage_idle:cpu=cpu-total is CRITICAL", Time: time.Date(1971, 1, 1, 0, 0, int(rc-1)*20, 0, time.UTC), Duration: time.Duration(rc-1) * 20 * time.Second, - Level: kapacitor.CritAlert, + Level: alert.Critical, } } else { - expAd = kapacitor.AlertData{ + expAd = alertservice.AlertData{ ID: "cpu_usage_idle:cpu=cpu-total", Message: "cpu_usage_idle:cpu=cpu-total is OK", Time: time.Date(1971, 1, 1, 0, 0, 38, 0, time.UTC), Duration: 38 * time.Second, - Level: kapacitor.OKAlert, + Level: alert.OK, } } if eq, msg := compareAlertData(expAd, ad); !eq { @@ -2333,9 +2337,17 @@ func testBatcher(t *testing.T, name, script string) (clock.Setter, *kapacitor.Ex // Create a new execution env tm := kapacitor.NewTaskMaster("testBatcher", logService) - tm.HTTPDService = httpService + httpdService := newHTTPDService() + tm.HTTPDService = httpdService tm.TaskStore = taskStore{} tm.DeadmanService = deadman{} + as := alertservice.NewService(alertservice.NewConfig(), logService.NewLogger("[alert] ", log.LstdFlags)) + as.StorageService = storagetest.New() + as.HTTPDService = httpdService + if err := as.Open(); err != nil { + t.Fatal(err) + } + tm.AlertService = as tm.Open() // Create task diff --git a/integrations/helpers_test.go b/integrations/helpers_test.go index 5ec86c2ab..63842be88 100644 --- a/integrations/helpers_test.go +++ b/integrations/helpers_test.go @@ -12,10 +12,24 @@ import ( "github.com/influxdata/influxdb/influxql" "github.com/influxdata/kapacitor" "github.com/influxdata/kapacitor/influxdb" + alertservice "github.com/influxdata/kapacitor/services/alert" + "github.com/influxdata/kapacitor/services/httpd" k8s "github.com/influxdata/kapacitor/services/k8s/client" "github.com/influxdata/kapacitor/udf" ) +func newHTTPDService() *httpd.Service { + // create API server + config := httpd.NewConfig() + config.BindAddress = ":0" // Choose port dynamically + httpService := httpd.NewService(config, "localhost", logService.NewLogger("[http] ", log.LstdFlags), logService) + err := httpService.Open() + if err != nil { + panic(err) + } + return httpService +} + type MockInfluxDBService struct { ts *httptest.Server } @@ -103,7 +117,7 @@ func compareResultsIgnoreSeriesOrder(exp, got kapacitor.Result) (bool, string) { return true, "" } -func compareAlertData(exp, got kapacitor.AlertData) (bool, string) { +func compareAlertData(exp, got alertservice.AlertData) (bool, string) { // Pull out Result for comparison expData := kapacitor.Result(exp.Data) exp.Data = influxql.Result{} diff --git a/integrations/streamer_test.go b/integrations/streamer_test.go index cd5fa028b..7b96441eb 100644 --- a/integrations/streamer_test.go +++ b/integrations/streamer_test.go @@ -6,9 +6,9 @@ import ( "fmt" "io/ioutil" "log" - "net" "net/http" "net/http/httptest" + "net/mail" "os" "path" "path/filepath" @@ -22,26 +22,41 @@ import ( "github.com/influxdata/influxdb/influxql" imodels "github.com/influxdata/influxdb/models" "github.com/influxdata/kapacitor" + "github.com/influxdata/kapacitor/alert" "github.com/influxdata/kapacitor/clock" + "github.com/influxdata/kapacitor/command" + "github.com/influxdata/kapacitor/command/commandtest" "github.com/influxdata/kapacitor/models" + alertservice "github.com/influxdata/kapacitor/services/alert" + "github.com/influxdata/kapacitor/services/alert/alerttest" "github.com/influxdata/kapacitor/services/alerta" + "github.com/influxdata/kapacitor/services/alerta/alertatest" "github.com/influxdata/kapacitor/services/hipchat" - "github.com/influxdata/kapacitor/services/httpd" + "github.com/influxdata/kapacitor/services/hipchat/hipchattest" k8s "github.com/influxdata/kapacitor/services/k8s/client" "github.com/influxdata/kapacitor/services/logging/loggingtest" "github.com/influxdata/kapacitor/services/opsgenie" + "github.com/influxdata/kapacitor/services/opsgenie/opsgenietest" "github.com/influxdata/kapacitor/services/pagerduty" + "github.com/influxdata/kapacitor/services/pagerduty/pagerdutytest" "github.com/influxdata/kapacitor/services/sensu" + "github.com/influxdata/kapacitor/services/sensu/sensutest" "github.com/influxdata/kapacitor/services/slack" + "github.com/influxdata/kapacitor/services/slack/slacktest" + "github.com/influxdata/kapacitor/services/smtp" + "github.com/influxdata/kapacitor/services/smtp/smtptest" + "github.com/influxdata/kapacitor/services/storage/storagetest" "github.com/influxdata/kapacitor/services/talk" + "github.com/influxdata/kapacitor/services/talk/talktest" "github.com/influxdata/kapacitor/services/telegram" + "github.com/influxdata/kapacitor/services/telegram/telegramtest" "github.com/influxdata/kapacitor/services/victorops" + "github.com/influxdata/kapacitor/services/victorops/victoropstest" "github.com/influxdata/kapacitor/udf" "github.com/influxdata/kapacitor/udf/test" "github.com/influxdata/wlog" ) -var httpService *httpd.Service var logService = loggingtest.New() var dbrps = []kapacitor.DBRP{ @@ -53,14 +68,6 @@ var dbrps = []kapacitor.DBRP{ func init() { wlog.SetLevel(wlog.OFF) - // create API server - config := httpd.NewConfig() - config.BindAddress = ":0" // Choose port dynamically - httpService = httpd.NewService(config, "localhost", logService.NewLogger("[http] ", log.LstdFlags), logService) - err := httpService.Open() - if err != nil { - panic(err) - } } func TestStream_Derivative(t *testing.T) { @@ -5220,7 +5227,7 @@ stream func TestStream_Alert(t *testing.T) { requestCount := int32(0) ts := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { - ad := kapacitor.AlertData{} + ad := alertservice.AlertData{} dec := json.NewDecoder(r.Body) err := dec.Decode(&ad) if err != nil { @@ -5228,12 +5235,12 @@ func TestStream_Alert(t *testing.T) { } atomic.AddInt32(&requestCount, 1) rc := atomic.LoadInt32(&requestCount) - expAd := kapacitor.AlertData{ + expAd := alertservice.AlertData{ ID: "kapacitor/cpu/serverA", Message: "kapacitor/cpu/serverA is CRITICAL", Details: "details", Time: time.Date(1971, 1, 1, 0, 0, 10, 0, time.UTC), - Level: kapacitor.CritAlert, + Level: alert.Critical, Data: influxql.Result{ Series: imodels.Rows{ { @@ -5310,7 +5317,7 @@ stream func TestStream_Alert_NoRecoveries(t *testing.T) { requestCount := int32(0) ts := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { - ad := kapacitor.AlertData{} + ad := alertservice.AlertData{} dec := json.NewDecoder(r.Body) err := dec.Decode(&ad) if err != nil { @@ -5318,15 +5325,15 @@ func TestStream_Alert_NoRecoveries(t *testing.T) { } atomic.AddInt32(&requestCount, 1) rc := atomic.LoadInt32(&requestCount) - var expAd kapacitor.AlertData + var expAd alertservice.AlertData switch rc { case 1: - expAd = kapacitor.AlertData{ + expAd = alertservice.AlertData{ ID: "kapacitor/cpu/serverA", Message: "kapacitor/cpu/serverA is WARNING", Time: time.Date(1971, 1, 1, 0, 0, 0, 0, time.UTC), Duration: 0, - Level: kapacitor.WarnAlert, + Level: alert.Warning, Data: influxql.Result{ Series: imodels.Rows{ { @@ -5342,12 +5349,12 @@ func TestStream_Alert_NoRecoveries(t *testing.T) { }, } case 2: - expAd = kapacitor.AlertData{ + expAd = alertservice.AlertData{ ID: "kapacitor/cpu/serverA", Message: "kapacitor/cpu/serverA is INFO", Time: time.Date(1971, 1, 1, 0, 0, 2, 0, time.UTC), Duration: 0, - Level: kapacitor.InfoAlert, + Level: alert.Info, Data: influxql.Result{ Series: imodels.Rows{ { @@ -5363,12 +5370,12 @@ func TestStream_Alert_NoRecoveries(t *testing.T) { }, } case 3: - expAd = kapacitor.AlertData{ + expAd = alertservice.AlertData{ ID: "kapacitor/cpu/serverA", Message: "kapacitor/cpu/serverA is WARNING", Time: time.Date(1971, 1, 1, 0, 0, 3, 0, time.UTC), Duration: time.Second, - Level: kapacitor.WarnAlert, + Level: alert.Warning, Data: influxql.Result{ Series: imodels.Rows{ { @@ -5384,12 +5391,12 @@ func TestStream_Alert_NoRecoveries(t *testing.T) { }, } case 4: - expAd = kapacitor.AlertData{ + expAd = alertservice.AlertData{ ID: "kapacitor/cpu/serverA", Message: "kapacitor/cpu/serverA is WARNING", Time: time.Date(1971, 1, 1, 0, 0, 4, 0, time.UTC), Duration: 2 * time.Second, - Level: kapacitor.WarnAlert, + Level: alert.Warning, Data: influxql.Result{ Series: imodels.Rows{ { @@ -5405,12 +5412,12 @@ func TestStream_Alert_NoRecoveries(t *testing.T) { }, } case 5: - expAd = kapacitor.AlertData{ + expAd = alertservice.AlertData{ ID: "kapacitor/cpu/serverA", Message: "kapacitor/cpu/serverA is CRITICAL", Time: time.Date(1971, 1, 1, 0, 0, 5, 0, time.UTC), Duration: 3 * time.Second, - Level: kapacitor.CritAlert, + Level: alert.Critical, Data: influxql.Result{ Series: imodels.Rows{ { @@ -5426,12 +5433,12 @@ func TestStream_Alert_NoRecoveries(t *testing.T) { }, } case 6: - expAd = kapacitor.AlertData{ + expAd = alertservice.AlertData{ ID: "kapacitor/cpu/serverA", Message: "kapacitor/cpu/serverA is INFO", Time: time.Date(1971, 1, 1, 0, 0, 7, 0, time.UTC), Duration: 0, - Level: kapacitor.InfoAlert, + Level: alert.Info, Data: influxql.Result{ Series: imodels.Rows{ { @@ -5497,7 +5504,7 @@ stream func TestStream_Alert_WithReset_0(t *testing.T) { requestCount := int32(0) ts := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { - ad := kapacitor.AlertData{} + ad := alertservice.AlertData{} dec := json.NewDecoder(r.Body) err := dec.Decode(&ad) if err != nil { @@ -5505,15 +5512,15 @@ func TestStream_Alert_WithReset_0(t *testing.T) { } atomic.AddInt32(&requestCount, 1) rc := atomic.LoadInt32(&requestCount) - var expAd kapacitor.AlertData + var expAd alertservice.AlertData switch rc { case 1: - expAd = kapacitor.AlertData{ + expAd = alertservice.AlertData{ ID: "kapacitor/cpu/serverA", Message: "kapacitor/cpu/serverA is INFO", Details: "details", Time: time.Date(1971, 1, 1, 0, 0, 0, 0, time.UTC), - Level: kapacitor.InfoAlert, + Level: alert.Info, Data: influxql.Result{ Series: imodels.Rows{ { @@ -5529,13 +5536,13 @@ func TestStream_Alert_WithReset_0(t *testing.T) { }, } case 2: - expAd = kapacitor.AlertData{ + expAd = alertservice.AlertData{ ID: "kapacitor/cpu/serverA", Message: "kapacitor/cpu/serverA is INFO", Details: "details", Time: time.Date(1971, 1, 1, 0, 0, 1, 0, time.UTC), Duration: time.Second, - Level: kapacitor.InfoAlert, + Level: alert.Info, Data: influxql.Result{ Series: imodels.Rows{ { @@ -5551,13 +5558,13 @@ func TestStream_Alert_WithReset_0(t *testing.T) { }, } case 3: - expAd = kapacitor.AlertData{ + expAd = alertservice.AlertData{ ID: "kapacitor/cpu/serverA", Message: "kapacitor/cpu/serverA is INFO", Details: "details", Time: time.Date(1971, 1, 1, 0, 0, 2, 0, time.UTC), Duration: 2 * time.Second, - Level: kapacitor.InfoAlert, + Level: alert.Info, Data: influxql.Result{ Series: imodels.Rows{ { @@ -5573,13 +5580,13 @@ func TestStream_Alert_WithReset_0(t *testing.T) { }, } case 4: - expAd = kapacitor.AlertData{ + expAd = alertservice.AlertData{ ID: "kapacitor/cpu/serverA", Message: "kapacitor/cpu/serverA is OK", Details: "details", Time: time.Date(1971, 1, 1, 0, 0, 3, 0, time.UTC), Duration: 3 * time.Second, - Level: kapacitor.OKAlert, + Level: alert.OK, Data: influxql.Result{ Series: imodels.Rows{ { @@ -5595,13 +5602,13 @@ func TestStream_Alert_WithReset_0(t *testing.T) { }, } case 5: - expAd = kapacitor.AlertData{ + expAd = alertservice.AlertData{ ID: "kapacitor/cpu/serverA", Message: "kapacitor/cpu/serverA is INFO", Details: "details", Time: time.Date(1971, 1, 1, 0, 0, 4, 0, time.UTC), Duration: 0 * time.Second, - Level: kapacitor.InfoAlert, + Level: alert.Info, Data: influxql.Result{ Series: imodels.Rows{ { @@ -5617,13 +5624,13 @@ func TestStream_Alert_WithReset_0(t *testing.T) { }, } case 6: - expAd = kapacitor.AlertData{ + expAd = alertservice.AlertData{ ID: "kapacitor/cpu/serverA", Message: "kapacitor/cpu/serverA is WARNING", Details: "details", Time: time.Date(1971, 1, 1, 0, 0, 5, 0, time.UTC), Duration: 1 * time.Second, - Level: kapacitor.WarnAlert, + Level: alert.Warning, Data: influxql.Result{ Series: imodels.Rows{ { @@ -5639,13 +5646,13 @@ func TestStream_Alert_WithReset_0(t *testing.T) { }, } case 7: - expAd = kapacitor.AlertData{ + expAd = alertservice.AlertData{ ID: "kapacitor/cpu/serverA", Message: "kapacitor/cpu/serverA is WARNING", Details: "details", Time: time.Date(1971, 1, 1, 0, 0, 6, 0, time.UTC), Duration: 2 * time.Second, - Level: kapacitor.WarnAlert, + Level: alert.Warning, Data: influxql.Result{ Series: imodels.Rows{ { @@ -5661,13 +5668,13 @@ func TestStream_Alert_WithReset_0(t *testing.T) { }, } case 8: - expAd = kapacitor.AlertData{ + expAd = alertservice.AlertData{ ID: "kapacitor/cpu/serverA", Message: "kapacitor/cpu/serverA is OK", Details: "details", Time: time.Date(1971, 1, 1, 0, 0, 7, 0, time.UTC), Duration: 3 * time.Second, - Level: kapacitor.OKAlert, + Level: alert.OK, Data: influxql.Result{ Series: imodels.Rows{ { @@ -5683,13 +5690,13 @@ func TestStream_Alert_WithReset_0(t *testing.T) { }, } case 9: - expAd = kapacitor.AlertData{ + expAd = alertservice.AlertData{ ID: "kapacitor/cpu/serverA", Message: "kapacitor/cpu/serverA is INFO", Details: "details", Time: time.Date(1971, 1, 1, 0, 0, 8, 0, time.UTC), Duration: 0 * time.Second, - Level: kapacitor.InfoAlert, + Level: alert.Info, Data: influxql.Result{ Series: imodels.Rows{ { @@ -5705,13 +5712,13 @@ func TestStream_Alert_WithReset_0(t *testing.T) { }, } case 10: - expAd = kapacitor.AlertData{ + expAd = alertservice.AlertData{ ID: "kapacitor/cpu/serverA", Message: "kapacitor/cpu/serverA is WARNING", Details: "details", Time: time.Date(1971, 1, 1, 0, 0, 9, 0, time.UTC), Duration: 1 * time.Second, - Level: kapacitor.WarnAlert, + Level: alert.Warning, Data: influxql.Result{ Series: imodels.Rows{ { @@ -5727,13 +5734,13 @@ func TestStream_Alert_WithReset_0(t *testing.T) { }, } case 11: - expAd = kapacitor.AlertData{ + expAd = alertservice.AlertData{ ID: "kapacitor/cpu/serverA", Message: "kapacitor/cpu/serverA is CRITICAL", Details: "details", Time: time.Date(1971, 1, 1, 0, 0, 10, 0, time.UTC), Duration: 2 * time.Second, - Level: kapacitor.CritAlert, + Level: alert.Critical, Data: influxql.Result{ Series: imodels.Rows{ { @@ -5749,13 +5756,13 @@ func TestStream_Alert_WithReset_0(t *testing.T) { }, } case 12: - expAd = kapacitor.AlertData{ + expAd = alertservice.AlertData{ ID: "kapacitor/cpu/serverA", Message: "kapacitor/cpu/serverA is OK", Details: "details", Time: time.Date(1971, 1, 1, 0, 0, 11, 0, time.UTC), Duration: 3 * time.Second, - Level: kapacitor.OKAlert, + Level: alert.OK, Data: influxql.Result{ Series: imodels.Rows{ { @@ -5835,7 +5842,7 @@ stream func TestStream_Alert_WithReset_1(t *testing.T) { requestCount := int32(0) ts := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { - ad := kapacitor.AlertData{} + ad := alertservice.AlertData{} dec := json.NewDecoder(r.Body) err := dec.Decode(&ad) if err != nil { @@ -5843,15 +5850,15 @@ func TestStream_Alert_WithReset_1(t *testing.T) { } atomic.AddInt32(&requestCount, 1) rc := atomic.LoadInt32(&requestCount) - var expAd kapacitor.AlertData + var expAd alertservice.AlertData switch rc { case 1: - expAd = kapacitor.AlertData{ + expAd = alertservice.AlertData{ ID: "kapacitor/cpu/serverA", Message: "kapacitor/cpu/serverA is INFO", Details: "details", Time: time.Date(1971, 1, 1, 0, 0, 0, 0, time.UTC), - Level: kapacitor.InfoAlert, + Level: alert.Info, Data: influxql.Result{ Series: imodels.Rows{ { @@ -5867,13 +5874,13 @@ func TestStream_Alert_WithReset_1(t *testing.T) { }, } case 2: - expAd = kapacitor.AlertData{ + expAd = alertservice.AlertData{ ID: "kapacitor/cpu/serverA", Message: "kapacitor/cpu/serverA is INFO", Details: "details", Time: time.Date(1971, 1, 1, 0, 0, 1, 0, time.UTC), Duration: time.Second, - Level: kapacitor.InfoAlert, + Level: alert.Info, Data: influxql.Result{ Series: imodels.Rows{ { @@ -5889,13 +5896,13 @@ func TestStream_Alert_WithReset_1(t *testing.T) { }, } case 3: - expAd = kapacitor.AlertData{ + expAd = alertservice.AlertData{ ID: "kapacitor/cpu/serverA", Message: "kapacitor/cpu/serverA is INFO", Details: "details", Time: time.Date(1971, 1, 1, 0, 0, 2, 0, time.UTC), Duration: 2 * time.Second, - Level: kapacitor.InfoAlert, + Level: alert.Info, Data: influxql.Result{ Series: imodels.Rows{ { @@ -5911,13 +5918,13 @@ func TestStream_Alert_WithReset_1(t *testing.T) { }, } case 4: - expAd = kapacitor.AlertData{ + expAd = alertservice.AlertData{ ID: "kapacitor/cpu/serverA", Message: "kapacitor/cpu/serverA is OK", Details: "details", Time: time.Date(1971, 1, 1, 0, 0, 3, 0, time.UTC), Duration: 3 * time.Second, - Level: kapacitor.OKAlert, + Level: alert.OK, Data: influxql.Result{ Series: imodels.Rows{ { @@ -5933,13 +5940,13 @@ func TestStream_Alert_WithReset_1(t *testing.T) { }, } case 5: - expAd = kapacitor.AlertData{ + expAd = alertservice.AlertData{ ID: "kapacitor/cpu/serverA", Message: "kapacitor/cpu/serverA is INFO", Details: "details", Time: time.Date(1971, 1, 1, 0, 0, 4, 0, time.UTC), Duration: 0 * time.Second, - Level: kapacitor.InfoAlert, + Level: alert.Info, Data: influxql.Result{ Series: imodels.Rows{ { @@ -5955,13 +5962,13 @@ func TestStream_Alert_WithReset_1(t *testing.T) { }, } case 6: - expAd = kapacitor.AlertData{ + expAd = alertservice.AlertData{ ID: "kapacitor/cpu/serverA", Message: "kapacitor/cpu/serverA is WARNING", Details: "details", Time: time.Date(1971, 1, 1, 0, 0, 5, 0, time.UTC), Duration: 1 * time.Second, - Level: kapacitor.WarnAlert, + Level: alert.Warning, Data: influxql.Result{ Series: imodels.Rows{ { @@ -5977,13 +5984,13 @@ func TestStream_Alert_WithReset_1(t *testing.T) { }, } case 7: - expAd = kapacitor.AlertData{ + expAd = alertservice.AlertData{ ID: "kapacitor/cpu/serverA", Message: "kapacitor/cpu/serverA is INFO", Details: "details", Time: time.Date(1971, 1, 1, 0, 0, 6, 0, time.UTC), Duration: 2 * time.Second, - Level: kapacitor.InfoAlert, + Level: alert.Info, Data: influxql.Result{ Series: imodels.Rows{ { @@ -5999,13 +6006,13 @@ func TestStream_Alert_WithReset_1(t *testing.T) { }, } case 8: - expAd = kapacitor.AlertData{ + expAd = alertservice.AlertData{ ID: "kapacitor/cpu/serverA", Message: "kapacitor/cpu/serverA is OK", Details: "details", Time: time.Date(1971, 1, 1, 0, 0, 7, 0, time.UTC), Duration: 3 * time.Second, - Level: kapacitor.OKAlert, + Level: alert.OK, Data: influxql.Result{ Series: imodels.Rows{ { @@ -6021,13 +6028,13 @@ func TestStream_Alert_WithReset_1(t *testing.T) { }, } case 9: - expAd = kapacitor.AlertData{ + expAd = alertservice.AlertData{ ID: "kapacitor/cpu/serverA", Message: "kapacitor/cpu/serverA is INFO", Details: "details", Time: time.Date(1971, 1, 1, 0, 0, 8, 0, time.UTC), Duration: 0 * time.Second, - Level: kapacitor.InfoAlert, + Level: alert.Info, Data: influxql.Result{ Series: imodels.Rows{ { @@ -6043,13 +6050,13 @@ func TestStream_Alert_WithReset_1(t *testing.T) { }, } case 10: - expAd = kapacitor.AlertData{ + expAd = alertservice.AlertData{ ID: "kapacitor/cpu/serverA", Message: "kapacitor/cpu/serverA is WARNING", Details: "details", Time: time.Date(1971, 1, 1, 0, 0, 9, 0, time.UTC), Duration: 1 * time.Second, - Level: kapacitor.WarnAlert, + Level: alert.Warning, Data: influxql.Result{ Series: imodels.Rows{ { @@ -6065,13 +6072,13 @@ func TestStream_Alert_WithReset_1(t *testing.T) { }, } case 11: - expAd = kapacitor.AlertData{ + expAd = alertservice.AlertData{ ID: "kapacitor/cpu/serverA", Message: "kapacitor/cpu/serverA is CRITICAL", Details: "details", Time: time.Date(1971, 1, 1, 0, 0, 10, 0, time.UTC), Duration: 2 * time.Second, - Level: kapacitor.CritAlert, + Level: alert.Critical, Data: influxql.Result{ Series: imodels.Rows{ { @@ -6087,13 +6094,13 @@ func TestStream_Alert_WithReset_1(t *testing.T) { }, } case 12: - expAd = kapacitor.AlertData{ + expAd = alertservice.AlertData{ ID: "kapacitor/cpu/serverA", Message: "kapacitor/cpu/serverA is WARNING", Details: "details", Time: time.Date(1971, 1, 1, 0, 0, 11, 0, time.UTC), Duration: 3 * time.Second, - Level: kapacitor.WarnAlert, + Level: alert.Warning, Data: influxql.Result{ Series: imodels.Rows{ { @@ -6109,13 +6116,13 @@ func TestStream_Alert_WithReset_1(t *testing.T) { }, } case 13: - expAd = kapacitor.AlertData{ + expAd = alertservice.AlertData{ ID: "kapacitor/cpu/serverA", Message: "kapacitor/cpu/serverA is WARNING", Details: "details", Time: time.Date(1971, 1, 1, 0, 0, 12, 0, time.UTC), Duration: 4 * time.Second, - Level: kapacitor.WarnAlert, + Level: alert.Warning, Data: influxql.Result{ Series: imodels.Rows{ { @@ -6131,13 +6138,13 @@ func TestStream_Alert_WithReset_1(t *testing.T) { }, } case 14: - expAd = kapacitor.AlertData{ + expAd = alertservice.AlertData{ ID: "kapacitor/cpu/serverA", Message: "kapacitor/cpu/serverA is INFO", Details: "details", Time: time.Date(1971, 1, 1, 0, 0, 13, 0, time.UTC), Duration: 5 * time.Second, - Level: kapacitor.InfoAlert, + Level: alert.Info, Data: influxql.Result{ Series: imodels.Rows{ { @@ -6153,13 +6160,13 @@ func TestStream_Alert_WithReset_1(t *testing.T) { }, } case 15: - expAd = kapacitor.AlertData{ + expAd = alertservice.AlertData{ ID: "kapacitor/cpu/serverA", Message: "kapacitor/cpu/serverA is OK", Details: "details", Time: time.Date(1971, 1, 1, 0, 0, 14, 0, time.UTC), Duration: 6 * time.Second, - Level: kapacitor.OKAlert, + Level: alert.OK, Data: influxql.Result{ Series: imodels.Rows{ { @@ -6239,24 +6246,24 @@ stream func TestStream_AlertDuration(t *testing.T) { requestCount := int32(0) ts := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { - ad := kapacitor.AlertData{} + ad := alertservice.AlertData{} dec := json.NewDecoder(r.Body) err := dec.Decode(&ad) if err != nil { t.Fatal(err) } atomic.AddInt32(&requestCount, 1) - var expAd kapacitor.AlertData + var expAd alertservice.AlertData rc := atomic.LoadInt32(&requestCount) switch rc { case 1: - expAd = kapacitor.AlertData{ + expAd = alertservice.AlertData{ ID: "kapacitor/cpu/serverA", Message: "kapacitor/cpu/serverA is CRITICAL", Details: "details", Time: time.Date(1971, 1, 1, 0, 0, 0, 0, time.UTC), Duration: 0, - Level: kapacitor.CritAlert, + Level: alert.Critical, Data: influxql.Result{ Series: imodels.Rows{ { @@ -6272,13 +6279,13 @@ func TestStream_AlertDuration(t *testing.T) { }, } case 2: - expAd = kapacitor.AlertData{ + expAd = alertservice.AlertData{ ID: "kapacitor/cpu/serverA", Message: "kapacitor/cpu/serverA is WARNING", Details: "details", Time: time.Date(1971, 1, 1, 0, 0, 2, 0, time.UTC), Duration: 2 * time.Second, - Level: kapacitor.WarnAlert, + Level: alert.Warning, Data: influxql.Result{ Series: imodels.Rows{ { @@ -6294,13 +6301,13 @@ func TestStream_AlertDuration(t *testing.T) { }, } case 3: - expAd = kapacitor.AlertData{ + expAd = alertservice.AlertData{ ID: "kapacitor/cpu/serverA", Message: "kapacitor/cpu/serverA is OK", Details: "details", Time: time.Date(1971, 1, 1, 0, 0, 4, 0, time.UTC), Duration: 4 * time.Second, - Level: kapacitor.OKAlert, + Level: alert.OK, Data: influxql.Result{ Series: imodels.Rows{ { @@ -6316,13 +6323,13 @@ func TestStream_AlertDuration(t *testing.T) { }, } case 4: - expAd = kapacitor.AlertData{ + expAd = alertservice.AlertData{ ID: "kapacitor/cpu/serverA", Message: "kapacitor/cpu/serverA is WARNING", Details: "details", Time: time.Date(1971, 1, 1, 0, 0, 5, 0, time.UTC), Duration: 0, - Level: kapacitor.WarnAlert, + Level: alert.Warning, Data: influxql.Result{ Series: imodels.Rows{ { @@ -6338,13 +6345,13 @@ func TestStream_AlertDuration(t *testing.T) { }, } case 5: - expAd = kapacitor.AlertData{ + expAd = alertservice.AlertData{ ID: "kapacitor/cpu/serverA", Message: "kapacitor/cpu/serverA is OK", Details: "details", Time: time.Date(1971, 1, 1, 0, 0, 8, 0, time.UTC), Duration: 3 * time.Second, - Level: kapacitor.OKAlert, + Level: alert.OK, Data: influxql.Result{ Series: imodels.Rows{ { @@ -6409,54 +6416,11 @@ stream } func TestStream_AlertSensu(t *testing.T) { - requestCount := int32(0) - addr, err := net.ResolveTCPAddr("tcp", "localhost:0") - if err != nil { - t.Fatal(err) - } - listen, err := net.ListenTCP("tcp", addr) + ts, err := sensutest.NewServer() if err != nil { t.Fatal(err) } - defer listen.Close() - go func() { - for { - conn, err := listen.Accept() - if err != nil { - return - } - func() { - defer conn.Close() - - atomic.AddInt32(&requestCount, 1) - type postData struct { - Name string `json:"name"` - Source string `json:"source"` - Output string `json:"output"` - Status int `json:"status"` - } - pd := postData{} - dec := json.NewDecoder(conn) - dec.Decode(&pd) - - if exp := "Kapacitor"; pd.Source != exp { - t.Errorf("unexpected source got %s exp %s", pd.Source, exp) - } - - if exp := "kapacitor.cpu.serverA is CRITICAL"; pd.Output != exp { - t.Errorf("unexpected text got %s exp %s", pd.Output, exp) - } - - if exp := "kapacitor.cpu.serverA"; pd.Name != exp { - t.Errorf("unexpected text got %s exp %s", pd.Name, exp) - } - - if exp := 2; pd.Status != exp { - t.Errorf("unexpected status got %v exp %v", pd.Status, exp) - } - }() - } - }() + defer ts.Close() var script = ` stream @@ -6475,81 +6439,39 @@ stream .crit(lambda: "count" > 8.0) .sensu() ` + tmInit := func(tm *kapacitor.TaskMaster) { + c := sensu.NewConfig() + c.Enabled = true + c.Addr = ts.Addr + c.Source = "Kapacitor" + sl := sensu.NewService(c, logService.NewLogger("[test_sensu] ", log.LstdFlags)) + tm.SensuService = sl + } + testStreamerNoOutput(t, "TestStream_Alert", script, 13*time.Second, tmInit) - clock, et, replayErr, tm := testStreamer(t, "TestStream_Alert", script, nil) - defer tm.Close() + exp := []interface{}{ + sensutest.Request{ + Source: "Kapacitor", + Output: "kapacitor.cpu.serverA is CRITICAL", + Name: "kapacitor.cpu.serverA", + Status: 2, + }, + } - c := sensu.NewConfig() - c.Enabled = true - c.Addr = listen.Addr().String() - c.Source = "Kapacitor" - sl := sensu.NewService(c, logService.NewLogger("[test_sensu] ", log.LstdFlags)) - tm.SensuService = sl + ts.Close() + var got []interface{} + for _, g := range ts.Requests() { + got = append(got, g) + } - err = fastForwardTask(clock, et, replayErr, tm, 13*time.Second) - if err != nil { + if err := compareListIgnoreOrder(got, exp, nil); err != nil { t.Error(err) } - if rc := atomic.LoadInt32(&requestCount); rc != 1 { - t.Errorf("unexpected requestCount got %d exp 1", rc) - } } func TestStream_AlertSlack(t *testing.T) { - requestCount := int32(0) - ts := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { - atomic.AddInt32(&requestCount, 1) - type postData struct { - Channel string `json:"channel"` - Username string `json:"username"` - Text string `json:"text"` - Attachments []struct { - Fallback string `json:"fallback"` - Color string `json:"color"` - Text string `json:"text"` - Mrkdwn_in []string `json:"mrkdwn_in"` - } `json:"attachments"` - } - pd := postData{} - dec := json.NewDecoder(r.Body) - dec.Decode(&pd) - if exp := "/test/slack/url"; r.URL.String() != exp { - t.Errorf("unexpected url got %s exp %s", r.URL.String(), exp) - } - if rc := atomic.LoadInt32(&requestCount); rc == 1 { - if exp := "#alerts"; pd.Channel != exp { - t.Errorf("unexpected channel got %s exp %s", pd.Channel, exp) - } - } else if rc := atomic.LoadInt32(&requestCount); rc == 2 { - if exp := "@jim"; pd.Channel != exp { - t.Errorf("unexpected channel got %s exp %s", pd.Channel, exp) - } - } - if exp := "kapacitor"; pd.Username != exp { - t.Errorf("unexpected username got %s exp %s", pd.Username, exp) - } - if exp := ""; pd.Text != exp { - t.Errorf("unexpected text got %s exp %s", pd.Text, exp) - } - if len(pd.Attachments) != 1 { - t.Errorf("unexpected attachments got %v", pd.Attachments) - } else { - exp := "kapacitor/cpu/serverA is CRITICAL" - if pd.Attachments[0].Fallback != exp { - t.Errorf("unexpected fallback got %s exp %s", pd.Attachments[0].Fallback, exp) - } - if pd.Attachments[0].Text != exp { - t.Errorf("unexpected text got %s exp %s", pd.Attachments[0].Text, exp) - } - if exp := "danger"; pd.Attachments[0].Color != exp { - t.Errorf("unexpected color got %s exp %s", pd.Attachments[0].Color, exp) - } - if exp := []string{"text"}; !reflect.DeepEqual(pd.Attachments[0].Mrkdwn_in, exp) { - t.Errorf("unexpected mrkdwn_in got %v exp %v", pd.Attachments[0].Mrkdwn_in, exp) - } - } - })) + ts := slacktest.NewServer() defer ts.Close() var script = ` @@ -6573,78 +6495,64 @@ stream .channel('@jim') ` - clock, et, replayErr, tm := testStreamer(t, "TestStream_Alert", script, nil) - defer tm.Close() - - c := slack.NewConfig() - c.Enabled = true - c.URL = ts.URL + "/test/slack/url" - c.Channel = "#channel" - sl := slack.NewService(c, logService.NewLogger("[test_slack] ", log.LstdFlags)) - tm.SlackService = sl + tmInit := func(tm *kapacitor.TaskMaster) { + c := slack.NewConfig() + c.Enabled = true + c.URL = ts.URL + "/test/slack/url" + c.Channel = "#channel" + sl := slack.NewService(c, logService.NewLogger("[test_slack] ", log.LstdFlags)) + tm.SlackService = sl + } + testStreamerNoOutput(t, "TestStream_Alert", script, 13*time.Second, tmInit) + + exp := []interface{}{ + slacktest.Request{ + URL: "/test/slack/url", + PostData: slacktest.PostData{ + Channel: "@jim", + Username: "kapacitor", + Text: "", + Attachments: []slacktest.Attachment{ + { + Fallback: "kapacitor/cpu/serverA is CRITICAL", + Color: "danger", + Text: "kapacitor/cpu/serverA is CRITICAL", + Mrkdwn_in: []string{"text"}, + }, + }, + }, + }, + slacktest.Request{ + URL: "/test/slack/url", + PostData: slacktest.PostData{ + Channel: "#alerts", + Username: "kapacitor", + Text: "", + Attachments: []slacktest.Attachment{ + { + Fallback: "kapacitor/cpu/serverA is CRITICAL", + Color: "danger", + Text: "kapacitor/cpu/serverA is CRITICAL", + Mrkdwn_in: []string{"text"}, + }, + }, + }, + }, + } - err := fastForwardTask(clock, et, replayErr, tm, 13*time.Second) - if err != nil { - t.Error(err) + ts.Close() + var got []interface{} + for _, g := range ts.Requests() { + got = append(got, g) } - if rc := atomic.LoadInt32(&requestCount); rc != 2 { - t.Errorf("unexpected requestCount got %d exp 2", rc) + if err := compareListIgnoreOrder(got, exp, nil); err != nil { + t.Error(err) } } func TestStream_AlertTelegram(t *testing.T) { - requestCount := int32(0) - ts := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { - atomic.AddInt32(&requestCount, 1) - type postData struct { - ChatId string `json:"chat_id"` - Text string `json:"text"` - ParseMode string `json:"parse_mode"` - DisableWebPagePreview bool `json:"disable_web_page_preview"` - DisableNotification bool `json:"disable_notification"` - } - pd := postData{} - dec := json.NewDecoder(r.Body) - dec.Decode(&pd) - - if exp := "/botTOKEN:AUTH/sendMessage"; r.URL.String() != exp { - t.Errorf("unexpected url got %s exp %s", r.URL.String(), exp) - } - - if rc := atomic.LoadInt32(&requestCount); rc == 1 { - if exp := "12345678"; pd.ChatId != exp { - t.Errorf("unexpected recipient got %s exp %s", pd.ChatId, exp) - } - if exp := "HTML"; pd.ParseMode != exp { - t.Errorf("unexpected recipient got %s exp %s", pd.ParseMode, exp) - } - if exp := true; pd.DisableWebPagePreview != exp { - t.Errorf("unexpected DisableWebPagePreview got %t exp %t", pd.DisableWebPagePreview, exp) - } - if exp := true; pd.DisableNotification != exp { - t.Errorf("unexpected DisableNotification got %t exp %t", pd.DisableNotification, exp) - } - - } else if rc := atomic.LoadInt32(&requestCount); rc == 2 { - if exp := "87654321"; pd.ChatId != exp { - t.Errorf("unexpected recipient got %s exp %s", pd.ChatId, exp) - } - if exp := ""; pd.ParseMode != exp { - t.Errorf("unexpected recipient got '%s' exp '%s'", pd.ParseMode, exp) - } - if exp := true; pd.DisableWebPagePreview != exp { - t.Errorf("unexpected DisableWebPagePreview got %t exp %t", pd.DisableWebPagePreview, exp) - } - if exp := false; pd.DisableNotification != exp { - t.Errorf("unexpected DisableNotification got %t exp %t", pd.DisableNotification, exp) - } - } - - if exp := "kapacitor/cpu/serverA is CRITICAL"; pd.Text != exp { - t.Errorf("unexpected text got %s exp %s", pd.Text, exp) - } - })) + ts := telegramtest.NewServer() defer ts.Close() var script = ` @@ -6664,71 +6572,120 @@ stream .crit(lambda: "count" > 8.0) .telegram() .chatId('12345678') - .disableNotification() - .parseMode('HTML') - .telegram() - .chatId('87654321') + .disableNotification() + .parseMode('HTML') + .telegram() + .chatId('87654321') ` + tmInit := func(tm *kapacitor.TaskMaster) { + c := telegram.NewConfig() + c.Enabled = true + c.URL = ts.URL + "/bot" + c.Token = "TOKEN:AUTH" + c.ChatId = "123456789" + c.DisableWebPagePreview = true + c.DisableNotification = false + tl := telegram.NewService(c, logService.NewLogger("[test_telegram] ", log.LstdFlags)) + tm.TelegramService = tl + } + testStreamerNoOutput(t, "TestStream_Alert", script, 13*time.Second, tmInit) - clock, et, replayErr, tm := testStreamer(t, "TestStream_Alert", script, nil) - defer tm.Close() + exp := []interface{}{ + telegramtest.Request{ + URL: "/botTOKEN:AUTH/sendMessage", + PostData: telegramtest.PostData{ + ChatId: "12345678", + Text: "kapacitor/cpu/serverA is CRITICAL", + ParseMode: "HTML", + DisableWebPagePreview: true, + DisableNotification: true, + }, + }, + telegramtest.Request{ + URL: "/botTOKEN:AUTH/sendMessage", + PostData: telegramtest.PostData{ + ChatId: "87654321", + Text: "kapacitor/cpu/serverA is CRITICAL", + ParseMode: "", + DisableWebPagePreview: true, + DisableNotification: false, + }, + }, + } - c := telegram.NewConfig() - c.Enabled = true - c.URL = ts.URL + "/bot" - c.Token = "TOKEN:AUTH" - c.ChatId = "123456789" - c.DisableWebPagePreview = true - c.DisableNotification = false - tl := telegram.NewService(c, logService.NewLogger("[test_telegram] ", log.LstdFlags)) - tm.TelegramService = tl - - err := fastForwardTask(clock, et, replayErr, tm, 13*time.Second) - if err != nil { + ts.Close() + var got []interface{} + for _, g := range ts.Requests() { + got = append(got, g) + } + + if err := compareListIgnoreOrder(got, exp, nil); err != nil { t.Error(err) } +} - if rc := atomic.LoadInt32(&requestCount); rc != 2 { - t.Errorf("unexpected requestCount got %d exp 2", rc) +func TestStream_AlertTCP(t *testing.T) { + ts, err := alerttest.NewTCPServer() + if err != nil { + t.Fatal(err) + } + defer ts.Close() + + var script = ` +stream + |from() + .measurement('cpu') + .where(lambda: "host" == 'serverA') + .groupBy('host') + |window() + .period(10s) + .every(10s) + |count('value') + |alert() + .id('kapacitor.{{ .Name }}.{{ index .Tags "host" }}') + .info(lambda: "count" > 6.0) + .warn(lambda: "count" > 7.0) + .crit(lambda: "count" > 8.0) + .details('') + .tcp('` + ts.Addr + `') +` + testStreamerNoOutput(t, "TestStream_Alert", script, 13*time.Second, nil) + + exp := []interface{}{ + alertservice.AlertData{ + ID: "kapacitor.cpu.serverA", + Message: "kapacitor.cpu.serverA is CRITICAL", + Time: time.Date(1971, 1, 1, 0, 0, 10, 0, time.UTC), + Level: alert.Critical, + Data: influxql.Result{ + Series: imodels.Rows{ + { + Name: "cpu", + Tags: map[string]string{"host": "serverA"}, + Columns: []string{"time", "count"}, + Values: [][]interface{}{[]interface{}{ + time.Date(1971, 1, 1, 0, 0, 10, 0, time.UTC).Format(time.RFC3339Nano), + 10.0, + }}, + }, + }, + }, + }, + } + + ts.Close() + var got []interface{} + for _, g := range ts.Data() { + got = append(got, g) + } + + if err := compareListIgnoreOrder(got, exp, nil); err != nil { + t.Error(err) } } func TestStream_AlertHipChat(t *testing.T) { - requestCount := int32(0) - ts := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { - atomic.AddInt32(&requestCount, 1) - type postData struct { - From string `json:"from"` - Message string `json:"message"` - Color string `json:"color"` - Notify bool `json:"notify"` - } - pd := postData{} - dec := json.NewDecoder(r.Body) - dec.Decode(&pd) - - if rc := atomic.LoadInt32(&requestCount); rc == 1 { - if exp := "/1234567/notification?auth_token=testtoken1234567"; r.URL.String() != exp { - t.Errorf("unexpected url got %s exp %s", r.URL.String(), exp) - } - } else if rc := atomic.LoadInt32(&requestCount); rc == 2 { - if exp := "/Test%20Room/notification?auth_token=testtokenTestRoom"; r.URL.String() != exp { - t.Errorf("unexpected url got %s exp %s", r.URL.String(), exp) - } - } - if exp := "kapacitor"; pd.From != exp { - t.Errorf("unexpected username got %s exp %s", pd.From, exp) - } - if exp := "kapacitor/cpu/serverA is CRITICAL"; pd.Message != exp { - t.Errorf("unexpected text got %s exp %s", pd.Message, exp) - } - if exp := "red"; pd.Color != exp { - t.Errorf("unexpected color got %s exp %s", pd.Color, exp) - } - if exp := true; pd.Notify != exp { - t.Errorf("unexpected notify got %t exp %t", pd.Notify, exp) - } - })) + ts := hipchattest.NewServer() defer ts.Close() var script = ` @@ -6753,107 +6710,52 @@ stream .room('Test Room') .token('testtokenTestRoom') ` + tmInit := func(tm *kapacitor.TaskMaster) { - clock, et, replayErr, tm := testStreamer(t, "TestStream_Alert", script, nil) - defer tm.Close() + c := hipchat.NewConfig() + c.Enabled = true + c.URL = ts.URL + c.Room = "1231234" + c.Token = "testtoken1231234" + sl := hipchat.NewService(c, logService.NewLogger("[test_hipchat] ", log.LstdFlags)) + tm.HipChatService = sl + } + testStreamerNoOutput(t, "TestStream_Alert", script, 13*time.Second, tmInit) - c := hipchat.NewConfig() - c.Enabled = true - c.URL = ts.URL - c.Room = "1231234" - c.Token = "testtoken1231234" - sl := hipchat.NewService(c, logService.NewLogger("[test_hipchat] ", log.LstdFlags)) - tm.HipChatService = sl + exp := []interface{}{ + hipchattest.Request{ + URL: "/1234567/notification?auth_token=testtoken1234567", + PostData: hipchattest.PostData{ + From: "kapacitor", + Message: "kapacitor/cpu/serverA is CRITICAL", + Color: "red", + Notify: true, + }, + }, + hipchattest.Request{ + URL: "/Test%20Room/notification?auth_token=testtokenTestRoom", + PostData: hipchattest.PostData{ + From: "kapacitor", + Message: "kapacitor/cpu/serverA is CRITICAL", + Color: "red", + Notify: true, + }, + }, + } - err := fastForwardTask(clock, et, replayErr, tm, 13*time.Second) - if err != nil { - t.Error(err) + ts.Close() + var got []interface{} + for _, g := range ts.Requests() { + got = append(got, g) } - if rc := atomic.LoadInt32(&requestCount); rc != 2 { - t.Errorf("unexpected requestCount got %d exp 2", rc) + if err := compareListIgnoreOrder(got, exp, nil); err != nil { + t.Error(err) } } func TestStream_AlertAlerta(t *testing.T) { - requestCount := int32(0) - ts := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { - atomic.AddInt32(&requestCount, 1) - type postData struct { - Resource string `json:"resource"` - Event string `json:"event"` - Group string `json:"group"` - Environment string `json:"environment"` - Text string `json:"text"` - Origin string `json:"origin"` - Service []string `json:"service"` - Value string `json:"value"` - } - pd := postData{} - dec := json.NewDecoder(r.Body) - dec.Decode(&pd) - - if rc := atomic.LoadInt32(&requestCount); rc == 1 { - if exp := "/alert"; r.URL.String() != exp { - t.Errorf("unexpected url got %s exp %s", r.URL.String(), exp) - } - if exp := "Key testtoken1234567"; r.Header.Get("Authorization") != exp { - t.Errorf("unexpected token in header got %s exp %s", r.Header.Get("Authorization"), exp) - } - if exp := "cpu"; pd.Resource != exp { - t.Errorf("unexpected resource got %s exp %s", pd.Resource, exp) - } - if exp := "serverA"; pd.Event != exp { - t.Errorf("unexpected event got %s exp %s", pd.Event, exp) - } - if exp := "production"; pd.Environment != exp { - t.Errorf("unexpected environment got %s exp %s", pd.Environment, exp) - } - if exp := "host=serverA"; pd.Group != exp { - t.Errorf("unexpected group got %s exp %s", pd.Group, exp) - } - if exp := ""; pd.Value != exp { - t.Errorf("unexpected value got %s exp %s", pd.Value, exp) - } - if exp := []string{"cpu"}; !reflect.DeepEqual(pd.Service, exp) { - t.Errorf("unexpected service got %s exp %s", pd.Service, exp) - } - if exp := "Kapacitor"; pd.Origin != exp { - t.Errorf("unexpected origin got %s exp %s", pd.Origin, exp) - } - } else { - if exp := "/alert"; r.URL.String() != exp { - t.Errorf("unexpected url got %s exp %s", r.URL.String(), exp) - } - if exp := "Key anothertesttoken"; r.Header.Get("Authorization") != exp { - t.Errorf("unexpected token in header got %s exp %s", r.Header.Get("Authorization"), exp) - } - if exp := "resource: serverA"; pd.Resource != exp { - t.Errorf("unexpected resource got %s exp %s", pd.Resource, exp) - } - if exp := "event: TestStream_Alert"; pd.Event != exp { - t.Errorf("unexpected event got %s exp %s", pd.Event, exp) - } - if exp := "serverA"; pd.Environment != exp { - t.Errorf("unexpected environment got %s exp %s", pd.Environment, exp) - } - if exp := "serverA"; pd.Group != exp { - t.Errorf("unexpected group got %s exp %s", pd.Group, exp) - } - if exp := "10"; pd.Value != exp { - t.Errorf("unexpected value got %s exp %s", pd.Value, exp) - } - if exp := []string{"serviceA", "serviceB"}; !reflect.DeepEqual(pd.Service, exp) { - t.Errorf("unexpected service got %s exp %s", pd.Service, exp) - } - if exp := "override"; pd.Origin != exp { - t.Errorf("unexpected origin got %s exp %s", pd.Origin, exp) - } - } - if exp := "kapacitor/cpu/serverA is CRITICAL @1971-01-01 00:00:10 +0000 UTC"; pd.Text != exp { - t.Errorf("unexpected text got %s exp %s", pd.Text, exp) - } - })) + ts := alertatest.NewServer() defer ts.Close() var script = ` @@ -6885,103 +6787,59 @@ stream .value('{{ index .Fields "count" }}') .services('serviceA', 'serviceB') ` - - clock, et, replayErr, tm := testStreamer(t, "TestStream_Alert", script, nil) - defer tm.Close() - - c := alerta.NewConfig() - c.Enabled = true - c.URL = ts.URL - c.Origin = "Kapacitor" - sl := alerta.NewService(c, logService.NewLogger("[test_alerta] ", log.LstdFlags)) - tm.AlertaService = sl - - err := fastForwardTask(clock, et, replayErr, tm, 13*time.Second) - if err != nil { + tmInit := func(tm *kapacitor.TaskMaster) { + c := alerta.NewConfig() + c.Enabled = true + c.URL = ts.URL + c.Origin = "Kapacitor" + sl := alerta.NewService(c, logService.NewLogger("[test_alerta] ", log.LstdFlags)) + tm.AlertaService = sl + } + testStreamerNoOutput(t, "TestStream_Alert", script, 13*time.Second, tmInit) + + exp := []interface{}{ + alertatest.Request{ + URL: "/alert", + Authorization: "Key testtoken1234567", + PostData: alertatest.PostData{ + Resource: "cpu", + Event: "serverA", + Group: "host=serverA", + Environment: "production", + Text: "kapacitor/cpu/serverA is CRITICAL @1971-01-01 00:00:10 +0000 UTC", + Origin: "Kapacitor", + Service: []string{"cpu"}, + }, + }, + alertatest.Request{ + URL: "/alert", + Authorization: "Key anothertesttoken", + PostData: alertatest.PostData{ + Resource: "resource: serverA", + Event: "event: TestStream_Alert", + Group: "serverA", + Environment: "serverA", + Text: "kapacitor/cpu/serverA is CRITICAL @1971-01-01 00:00:10 +0000 UTC", + Origin: "override", + Service: []string{"serviceA", "serviceB"}, + Value: "10", + }, + }, + } + + ts.Close() + var got []interface{} + for _, g := range ts.Requests() { + got = append(got, g) + } + + if err := compareListIgnoreOrder(got, exp, nil); err != nil { t.Error(err) } - - if rc := atomic.LoadInt32(&requestCount); rc != 2 { - t.Errorf("unexpected requestCount got %d exp 2", rc) - } } func TestStream_AlertOpsGenie(t *testing.T) { - requestCount := int32(0) - ts := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { - atomic.AddInt32(&requestCount, 1) - - type postData struct { - ApiKey string `json:"apiKey"` - Message string `json:"message"` - Entity string `json:"entity"` - Alias string `json:"alias"` - Note int `json:"note"` - Details map[string]interface{} `json:"details"` - Description interface{} `json:"description"` - Teams []string `json:"teams"` - Recipients []string `json:"recipients"` - } - - pd := postData{} - dec := json.NewDecoder(r.Body) - dec.Decode(&pd) - - if exp := "CRITICAL"; pd.Details["Level"] != exp { - t.Errorf("unexpected level got %s exp %s", pd.Details["level"], exp) - } - if exp := "kapacitor/cpu/serverA"; pd.Entity != exp { - t.Errorf("unexpected entity got %s exp %s", pd.Entity, exp) - } - if exp := "kapacitor/cpu/serverA"; pd.Alias != exp { - t.Errorf("unexpected alias got %s exp %s", pd.Alias, exp) - } - if exp := "kapacitor/cpu/serverA is CRITICAL"; pd.Message != exp { - t.Errorf("unexpected entity id got %s exp %s", pd.Message, exp) - } - if exp := "Kapacitor"; pd.Details["Monitoring Tool"] != exp { - t.Errorf("unexpected monitoring tool got %s exp %s", pd.Details["Monitoring Tool"], exp) - } - if pd.Description == nil { - t.Error("unexpected description got nil") - } - if rc := atomic.LoadInt32(&requestCount); rc == 1 { - if exp, l := 2, len(pd.Teams); l != exp { - t.Errorf("unexpected teams count got %d exp %d", l, exp) - } - if exp := "test_team"; pd.Teams[0] != exp { - t.Errorf("unexpected teams[0] got %s exp %s", pd.Teams[0], exp) - } - if exp := "another_team"; pd.Teams[1] != exp { - t.Errorf("unexpected teams[1] got %s exp %s", pd.Teams[1], exp) - } - if exp, l := 2, len(pd.Recipients); l != exp { - t.Errorf("unexpected recipients count got %d exp %d", l, exp) - } - if exp := "test_recipient"; pd.Recipients[0] != exp { - t.Errorf("unexpected recipients[0] got %s exp %s", pd.Recipients[0], exp) - } - if exp := "another_recipient"; pd.Recipients[1] != exp { - t.Errorf("unexpected recipients[1] got %s exp %s", pd.Recipients[1], exp) - } - } else if rc := atomic.LoadInt32(&requestCount); rc == 2 { - if exp, l := 1, len(pd.Teams); l != exp { - t.Errorf("unexpected teams count got %d exp %d", l, exp) - } - if exp := "test_team2"; pd.Teams[0] != exp { - t.Errorf("unexpected teams[0] got %s exp %s", pd.Teams[0], exp) - } - if exp, l := 2, len(pd.Recipients); l != exp { - t.Errorf("unexpected recipients count got %d exp %d", l, exp) - } - if exp := "test_recipient2"; pd.Recipients[0] != exp { - t.Errorf("unexpected recipients[0] got %s exp %s", pd.Recipients[0], exp) - } - if exp := "another_recipient"; pd.Recipients[1] != exp { - t.Errorf("unexpected recipients[1] got %s exp %s", pd.Recipients[1], exp) - } - } - })) + ts := opsgenietest.NewServer() defer ts.Close() var script = ` @@ -7006,66 +6864,67 @@ stream .teams('test_team2' ) .recipients('test_recipient2', 'another_recipient') ` + tmInit := func(tm *kapacitor.TaskMaster) { + c := opsgenie.NewConfig() + c.Enabled = true + c.URL = ts.URL + c.APIKey = "api_key" + og := opsgenie.NewService(c, logService.NewLogger("[test_og] ", log.LstdFlags)) + tm.OpsGenieService = og + } + testStreamerNoOutput(t, "TestStream_Alert", script, 13*time.Second, tmInit) + + exp := []interface{}{ + opsgenietest.Request{ + URL: "/", + PostData: opsgenietest.PostData{ + ApiKey: "api_key", + Message: "kapacitor/cpu/serverA is CRITICAL", + Entity: "kapacitor/cpu/serverA", + Alias: "kapacitor/cpu/serverA", + Note: "", + Details: map[string]interface{}{ + "Level": "CRITICAL", + "Monitoring Tool": "Kapacitor", + }, + Description: `{"Series":[{"name":"cpu","tags":{"host":"serverA"},"columns":["time","count"],"values":[["1971-01-01T00:00:10Z",10]]}],"Messages":null,"Err":null}`, + Teams: []string{"test_team", "another_team"}, + Recipients: []string{"test_recipient", "another_recipient"}, + }, + }, + opsgenietest.Request{ + URL: "/", + PostData: opsgenietest.PostData{ + ApiKey: "api_key", + Message: "kapacitor/cpu/serverA is CRITICAL", + Entity: "kapacitor/cpu/serverA", + Alias: "kapacitor/cpu/serverA", + Note: "", + Details: map[string]interface{}{ + "Level": "CRITICAL", + "Monitoring Tool": "Kapacitor", + }, + Description: `{"Series":[{"name":"cpu","tags":{"host":"serverA"},"columns":["time","count"],"values":[["1971-01-01T00:00:10Z",10]]}],"Messages":null,"Err":null}`, + Teams: []string{"test_team2"}, + Recipients: []string{"test_recipient2", "another_recipient"}, + }, + }, + } - clock, et, replayErr, tm := testStreamer(t, "TestStream_Alert", script, nil) - defer tm.Close() - c := opsgenie.NewConfig() - c.Enabled = true - c.URL = ts.URL - c.APIKey = "api_key" - og := opsgenie.NewService(c, logService.NewLogger("[test_og] ", log.LstdFlags)) - tm.OpsGenieService = og - - err := fastForwardTask(clock, et, replayErr, tm, 13*time.Second) - if err != nil { - t.Error(err) + ts.Close() + var got []interface{} + for _, g := range ts.Requests() { + got = append(got, g) } - if rc := atomic.LoadInt32(&requestCount); rc != 2 { - t.Errorf("unexpected requestCount got %d exp 1", rc) + if err := compareListIgnoreOrder(got, exp, nil); err != nil { + t.Error(err) } + } func TestStream_AlertPagerDuty(t *testing.T) { - requestCount := int32(0) - ts := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { - atomic.AddInt32(&requestCount, 1) - type postData struct { - ServiceKey string `json:"service_key"` - EventType string `json:"event_type"` - Description string `json:"description"` - Client string `json:"client"` - ClientURL string `json:"client_url"` - Details interface{} `json:"details"` - } - pd := postData{} - dec := json.NewDecoder(r.Body) - dec.Decode(&pd) - if rc := atomic.LoadInt32(&requestCount); rc == 1 { - if exp := "service_key"; pd.ServiceKey != exp { - t.Errorf("unexpected service key got %s exp %s", pd.ServiceKey, exp) - } - } else if rc := atomic.LoadInt32(&requestCount); rc == 2 { - if exp := "test_override_key"; pd.ServiceKey != exp { - t.Errorf("unexpected service key got %s exp %s", pd.ServiceKey, exp) - } - } - if exp := "trigger"; pd.EventType != exp { - t.Errorf("unexpected event type got %s exp %s", pd.EventType, exp) - } - if exp := "CRITICAL alert for kapacitor/cpu/serverA"; pd.Description != exp { - t.Errorf("unexpected description got %s exp %s", pd.Description, exp) - } - if exp := "kapacitor"; pd.Client != exp { - t.Errorf("unexpected client got %s exp %s", pd.Client, exp) - } - if len(pd.ClientURL) == 0 { - t.Errorf("unexpected client url got empty string") - } - if pd.Details == nil { - t.Error("unexpected data got nil") - } - })) + ts := pagerdutytest.NewServer() defer ts.Close() var script = ` @@ -7089,69 +6948,116 @@ stream .serviceKey('test_override_key') ` - clock, et, replayErr, tm := testStreamer(t, "TestStream_Alert", script, nil) - defer tm.Close() - c := pagerduty.NewConfig() - c.Enabled = true - c.URL = ts.URL - c.ServiceKey = "service_key" - pd := pagerduty.NewService(c, logService.NewLogger("[test_pd] ", log.LstdFlags)) - pd.HTTPDService = tm.HTTPDService - tm.PagerDutyService = pd - - err := fastForwardTask(clock, et, replayErr, tm, 13*time.Second) - if err != nil { + var kapacitorURL string + tmInit := func(tm *kapacitor.TaskMaster) { + c := pagerduty.NewConfig() + c.Enabled = true + c.URL = ts.URL + c.ServiceKey = "service_key" + pd := pagerduty.NewService(c, logService.NewLogger("[test_pd] ", log.LstdFlags)) + pd.HTTPDService = tm.HTTPDService + tm.PagerDutyService = pd + + kapacitorURL = tm.HTTPDService.URL() + } + testStreamerNoOutput(t, "TestStream_Alert", script, 13*time.Second, tmInit) + + exp := []interface{}{ + pagerdutytest.Request{ + URL: "/", + PostData: pagerdutytest.PostData{ + ServiceKey: "service_key", + EventType: "trigger", + Description: "CRITICAL alert for kapacitor/cpu/serverA", + Client: "kapacitor", + ClientURL: kapacitorURL, + Details: `{"Series":[{"name":"cpu","tags":{"host":"serverA"},"columns":["time","count"],"values":[["1971-01-01T00:00:10Z",10]]}],"Messages":null,"Err":null}`, + }, + }, + pagerdutytest.Request{ + URL: "/", + PostData: pagerdutytest.PostData{ + ServiceKey: "test_override_key", + EventType: "trigger", + Description: "CRITICAL alert for kapacitor/cpu/serverA", + Client: "kapacitor", + ClientURL: kapacitorURL, + Details: `{"Series":[{"name":"cpu","tags":{"host":"serverA"},"columns":["time","count"],"values":[["1971-01-01T00:00:10Z",10]]}],"Messages":null,"Err":null}`, + }, + }, + } + + ts.Close() + var got []interface{} + for _, g := range ts.Requests() { + got = append(got, g) + } + + if err := compareListIgnoreOrder(got, exp, nil); err != nil { t.Error(err) } +} - if rc := atomic.LoadInt32(&requestCount); rc != 2 { - t.Errorf("unexpected requestCount got %d exp 1", rc) +func TestStream_AlertPost(t *testing.T) { + ts := alerttest.NewPostServer() + defer ts.Close() + + var script = ` +stream + |from() + .measurement('cpu') + .where(lambda: "host" == 'serverA') + .groupBy('host') + |window() + .period(10s) + .every(10s) + |count('value') + |alert() + .id('kapacitor.{{ .Name }}.{{ index .Tags "host" }}') + .info(lambda: "count" > 6.0) + .warn(lambda: "count" > 7.0) + .crit(lambda: "count" > 8.0) + .details('') + .post('` + ts.URL + `') +` + + testStreamerNoOutput(t, "TestStream_Alert", script, 13*time.Second, nil) + + exp := []interface{}{ + alertservice.AlertData{ + ID: "kapacitor.cpu.serverA", + Message: "kapacitor.cpu.serverA is CRITICAL", + Time: time.Date(1971, 1, 1, 0, 0, 10, 0, time.UTC), + Level: alert.Critical, + Data: influxql.Result{ + Series: imodels.Rows{ + { + Name: "cpu", + Tags: map[string]string{"host": "serverA"}, + Columns: []string{"time", "count"}, + Values: [][]interface{}{[]interface{}{ + time.Date(1971, 1, 1, 0, 0, 10, 0, time.UTC).Format(time.RFC3339Nano), + 10.0, + }}, + }, + }, + }, + }, + } + + ts.Close() + var got []interface{} + for _, g := range ts.Data() { + got = append(got, g) + } + + if err := compareListIgnoreOrder(got, exp, nil); err != nil { + t.Error(err) } } func TestStream_AlertVictorOps(t *testing.T) { - requestCount := int32(0) - ts := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { - atomic.AddInt32(&requestCount, 1) - if rc := atomic.LoadInt32(&requestCount); rc == 1 { - if exp, got := "/api_key/test_key", r.URL.String(); got != exp { - t.Errorf("unexpected VO url got %s exp %s", got, exp) - } - } else if rc := atomic.LoadInt32(&requestCount); rc == 2 { - if exp, got := "/api_key/test_key2", r.URL.String(); got != exp { - t.Errorf("unexpected VO url got %s exp %s", got, exp) - } - } - type postData struct { - MessageType string `json:"message_type"` - EntityID string `json:"entity_id"` - StateMessage string `json:"state_message"` - Timestamp int `json:"timestamp"` - MonitoringTool string `json:"monitoring_tool"` - Data interface{} `json:"data"` - } - pd := postData{} - dec := json.NewDecoder(r.Body) - dec.Decode(&pd) - if exp := "CRITICAL"; pd.MessageType != exp { - t.Errorf("unexpected message type got %s exp %s", pd.MessageType, exp) - } - if exp := "kapacitor/cpu/serverA"; pd.EntityID != exp { - t.Errorf("unexpected entity id got %s exp %s", pd.EntityID, exp) - } - if exp := "kapacitor/cpu/serverA is CRITICAL"; pd.StateMessage != exp { - t.Errorf("unexpected state message got %s exp %s", pd.StateMessage, exp) - } - if exp := "kapacitor"; pd.MonitoringTool != exp { - t.Errorf("unexpected monitoring tool got %s exp %s", pd.MonitoringTool, exp) - } - if exp := 31536010; pd.Timestamp != exp { - t.Errorf("unexpected timestamp got %d exp %d", pd.Timestamp, exp) - } - if pd.Data == nil { - t.Error("unexpected data got nil") - } - })) + ts := victoropstest.NewServer() defer ts.Close() var script = ` @@ -7175,52 +7081,55 @@ stream .routingKey('test_key2') ` - clock, et, replayErr, tm := testStreamer(t, "TestStream_Alert", script, nil) - defer tm.Close() - c := victorops.NewConfig() - c.Enabled = true - c.URL = ts.URL - c.APIKey = "api_key" - c.RoutingKey = "routing_key" - vo := victorops.NewService(c, logService.NewLogger("[test_vo] ", log.LstdFlags)) - tm.VictorOpsService = vo - - err := fastForwardTask(clock, et, replayErr, tm, 13*time.Second) - if err != nil { - t.Error(err) + tmInit := func(tm *kapacitor.TaskMaster) { + c := victorops.NewConfig() + c.Enabled = true + c.URL = ts.URL + c.APIKey = "api_key" + c.RoutingKey = "routing_key" + vo := victorops.NewService(c, logService.NewLogger("[test_vo] ", log.LstdFlags)) + tm.VictorOpsService = vo } + testStreamerNoOutput(t, "TestStream_Alert", script, 13*time.Second, tmInit) - if got, exp := atomic.LoadInt32(&requestCount), int32(2); got != exp { - t.Errorf("unexpected requestCount got %d exp %d", got, exp) + exp := []interface{}{ + victoropstest.Request{ + URL: "/api_key/test_key", + PostData: victoropstest.PostData{ + MessageType: "CRITICAL", + EntityID: "kapacitor/cpu/serverA", + StateMessage: "kapacitor/cpu/serverA is CRITICAL", + Timestamp: 31536010, + MonitoringTool: "kapacitor", + Data: `{"Series":[{"name":"cpu","tags":{"host":"serverA"},"columns":["time","count"],"values":[["1971-01-01T00:00:10Z",10]]}],"Messages":null,"Err":null}`, + }, + }, + victoropstest.Request{ + URL: "/api_key/test_key2", + PostData: victoropstest.PostData{ + MessageType: "CRITICAL", + EntityID: "kapacitor/cpu/serverA", + StateMessage: "kapacitor/cpu/serverA is CRITICAL", + Timestamp: 31536010, + MonitoringTool: "kapacitor", + Data: `{"Series":[{"name":"cpu","tags":{"host":"serverA"},"columns":["time","count"],"values":[["1971-01-01T00:00:10Z",10]]}],"Messages":null,"Err":null}`, + }, + }, } -} - -func TestStream_AlertTalk(t *testing.T) { - requestCount := int32(0) - ts := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { - atomic.AddInt32(&requestCount, 1) - type postData struct { - Title string `json:"title"` - Text string `json:"text"` - AuthorName string `json:"authorName"` - } - pd := postData{} - dec := json.NewDecoder(r.Body) - dec.Decode(&pd) - - if exp := "Kapacitor"; pd.AuthorName != exp { - t.Errorf("unexpected source got %s exp %s", pd.AuthorName, exp) - } - if exp := "kapacitor/cpu/serverA is CRITICAL"; pd.Text != exp { - t.Errorf("unexpected text got %s exp %s", pd.Text, exp) - } + ts.Close() + var got []interface{} + for _, g := range ts.Requests() { + got = append(got, g) + } - if exp := "kapacitor/cpu/serverA"; pd.Title != exp { - t.Errorf("unexpected text got %s exp %s", pd.Title, exp) - } + if err := compareListIgnoreOrder(got, exp, nil); err != nil { + t.Error(err) + } +} - })) +func TestStream_AlertTalk(t *testing.T) { + ts := talktest.NewServer() defer ts.Close() var script = ` @@ -7241,25 +7150,38 @@ stream .talk() ` - clock, et, replayErr, tm := testStreamer(t, "TestStream_Alert", script, nil) - defer tm.Close() + tmInit := func(tm *kapacitor.TaskMaster) { + c := talk.NewConfig() + c.Enabled = true + c.URL = ts.URL + c.AuthorName = "Kapacitor" + sl := talk.NewService(c, logService.NewLogger("[test_talk] ", log.LstdFlags)) + tm.TalkService = sl + } + testStreamerNoOutput(t, "TestStream_Alert", script, 13*time.Second, tmInit) - c := talk.NewConfig() - c.Enabled = true - c.URL = ts.URL - c.AuthorName = "Kapacitor" - sl := talk.NewService(c, logService.NewLogger("[test_talk] ", log.LstdFlags)) - tm.TalkService = sl + exp := []interface{}{ + talktest.Request{ + URL: "/", + PostData: talktest.PostData{ + AuthorName: "Kapacitor", + Text: "kapacitor/cpu/serverA is CRITICAL", + Title: "kapacitor/cpu/serverA", + }, + }, + } - err := fastForwardTask(clock, et, replayErr, tm, 13*time.Second) - if err != nil { - t.Error(err) + ts.Close() + var got []interface{} + for _, g := range ts.Requests() { + got = append(got, g) } - if rc := atomic.LoadInt32(&requestCount); rc != 1 { - t.Errorf("unexpected requestCount got %d exp 1", rc) + if err := compareListIgnoreOrder(got, exp, nil); err != nil { + t.Error(err) } } + func TestStream_AlertLog(t *testing.T) { tmpDir, err := ioutil.TempDir("", "TestStream_AlertLog") if err != nil { @@ -7268,6 +7190,10 @@ func TestStream_AlertLog(t *testing.T) { defer os.RemoveAll(tmpDir) normalPath := filepath.Join(tmpDir, "normal.log") modePath := filepath.Join(tmpDir, "mode.log") + + normal := alerttest.NewLog(normalPath) + mode := alerttest.NewLog(modePath) + var script = fmt.Sprintf(` stream |from() @@ -7280,6 +7206,7 @@ stream |count('value') |alert() .id('kapacitor.{{ .Name }}.{{ index .Tags "host" }}') + .details('') .info(lambda: "count" > 6.0) .warn(lambda: "count" > 7.0) .crit(lambda: "count" > 8.0) @@ -7288,56 +7215,316 @@ stream .mode(0644) `, normalPath, modePath) - clock, et, replayErr, tm := testStreamer(t, "TestStream_Alert", script, nil) - defer tm.Close() + expAD := []alertservice.AlertData{{ + ID: "kapacitor.cpu.serverA", + Message: "kapacitor.cpu.serverA is CRITICAL", + Time: time.Date(1971, 01, 01, 0, 0, 10, 0, time.UTC), + Level: alert.Critical, + Data: influxql.Result{ + Series: imodels.Rows{ + { + Name: "cpu", + Tags: map[string]string{"host": "serverA"}, + Columns: []string{"time", "count"}, + Values: [][]interface{}{[]interface{}{ + time.Date(1971, 1, 1, 0, 0, 10, 0, time.UTC).Format(time.RFC3339Nano), + 10.0, + }}, + }, + }, + }, + }} - err = fastForwardTask(clock, et, replayErr, tm, 13*time.Second) - if err != nil { + testStreamerNoOutput(t, "TestStream_Alert", script, 13*time.Second, nil) + + testLog := func(name string, expData []alertservice.AlertData, expMode os.FileMode, l *alerttest.Log) error { + m, err := l.Mode() + if err != nil { + return err + } + if got, exp := m, expMode; exp != got { + return fmt.Errorf("%s unexpected file mode: got %v exp %v", name, got, exp) + } + data, err := l.Data() + if err != nil { + return err + } + if got, exp := data, expData; !reflect.DeepEqual(got, exp) { + return fmt.Errorf("%s unexpected alert data written to log:\ngot\n%+v\nexp\n%+v\n", name, got, exp) + } + return nil + } + + if err := testLog("normal", expAD, 0600, normal); err != nil { + t.Error(err) + } + if err := testLog("mode", expAD, 0644, mode); err != nil { t.Error(err) } - normal, err := os.Open(normalPath) - if err != nil { - t.Fatalf("missing log file for alert %v", err) +} + +func TestStream_AlertExec(t *testing.T) { + var script = ` +stream + |from() + .measurement('cpu') + .where(lambda: "host" == 'serverA') + .groupBy('host') + |window() + .period(10s) + .every(10s) + |count('value') + |alert() + .id('kapacitor.{{ .Name }}.{{ index .Tags "host" }}') + .details('') + .info(lambda: "count" > 6.0) + .warn(lambda: "count" > 7.0) + .crit(lambda: "count" > 8.0) + .exec('/bin/my-script', 'arg1', 'arg2') + .exec('/bin/my-other-script') +` + + expAD := alertservice.AlertData{ + ID: "kapacitor.cpu.serverA", + Message: "kapacitor.cpu.serverA is CRITICAL", + Time: time.Date(1971, 01, 01, 0, 0, 10, 0, time.UTC), + Level: alert.Critical, + Data: influxql.Result{ + Series: imodels.Rows{ + { + Name: "cpu", + Tags: map[string]string{"host": "serverA"}, + Columns: []string{"time", "count"}, + Values: [][]interface{}{[]interface{}{ + time.Date(1971, 1, 1, 0, 0, 10, 0, time.UTC).Format(time.RFC3339Nano), + 10.0, + }}, + }, + }, + }, } - defer normal.Close() - if stat, err := normal.Stat(); err != nil { + expStdin, err := json.Marshal(expAD) + if err != nil { t.Fatal(err) - } else if exp, got := os.FileMode(0600), stat.Mode(); exp != got { - t.Errorf("unexpected normal file mode: got %v exp %v", got, exp) } + // Append trailing new line + expStdin = append(expStdin, '\n') + + te := alerttest.NewExec() + tmInit := func(tm *kapacitor.TaskMaster) { + tm.Commander = te.Commander + } + + testStreamerNoOutput(t, "TestStream_Alert", script, 13*time.Second, tmInit) + + expCmds := []interface{}{ + &commandtest.Command{ + Spec: command.Spec{ + Prog: "/bin/my-script", + Args: []string{"arg1", "arg2"}, + }, + Started: true, + Waited: true, + Killed: false, + StdinData: expStdin, + }, + &commandtest.Command{ + Spec: command.Spec{ + Prog: "/bin/my-other-script", + Args: []string{}, + }, + Started: true, + Waited: true, + Killed: false, + StdinData: expStdin, + }, + } + + cmds := te.Commands() + cmdsI := make([]interface{}, len(cmds)) + for i := range cmds { + cmdsI[i] = cmds[i] + } + if err := compareListIgnoreOrder(cmdsI, expCmds, func(got, exp interface{}) error { + g := got.(*commandtest.Command) + e := exp.(*commandtest.Command) + return e.Compare(g) + }); err != nil { + t.Error(err) + } + + //for i := 0; i < 2; i++ { + // select { + // case cmd := <-cmdC: + // cmd.Lock() + // defer cmd.Unlock() + + // var err error + // for j, info := range expInfo { + // if got, exp := cmd.Info, info; reflect.DeepEqual(got, exp) { + // // Found match remove it + // if j == 0 { + // expInfo = expInfo[1:] + // } else { + // expInfo = expInfo[:1] + // } + // err = nil + // break + // } else { + // err = fmt.Errorf("%d unexpected command info:\ngot\n%+v\nexp\n%+v\n", i, got, exp) + // } + // } + // if err != nil { + // t.Error(err) + // } + + // if !cmd.Started { + // t.Errorf("%d expected command to have been started", i) + // } + // if !cmd.Waited { + // t.Errorf("%d expected command to have waited", i) + // } + // if cmd.Killed { + // t.Errorf("%d expected command not to have been killed", i) + // } + + // ad := alertservice.AlertData{} + // if err := json.Unmarshal(cmd.StdinData, &ad); err != nil { + // t.Fatal(err) + // } + // if got, exp := ad, expAD; !reflect.DeepEqual(got, exp) { + // t.Errorf("%d unexpected alert data sent to command:\ngot\n%+v\nexp\n%+v\n%s", i, got, exp, string(cmd.StdinData)) + // } + // default: + // t.Error("expected command to be created") + // } + //} +} - mode, err := os.Open(modePath) +func TestStream_AlertEmail(t *testing.T) { + var script = ` +stream + |from() + .measurement('cpu') + .where(lambda: "host" == 'serverA') + .groupBy('host') + |window() + .period(10s) + .every(10s) + |count('value') + |alert() + .id('kapacitor.{{ .Name }}.{{ index .Tags "host" }}') + .details(''' +{{.Message}} + +Value: {{ index .Fields "count" }} +Details +''') + .info(lambda: "count" > 6.0) + .warn(lambda: "count" > 7.0) + .crit(lambda: "count" > 8.0) + .email('user1@example.com', 'user2@example.com') + .email() + .to('user1@example.com', 'user2@example.com') +` + + expMail := []*smtptest.Message{ + { + Header: mail.Header{ + "Mime-Version": []string{"1.0"}, + "Content-Type": []string{"text/html; charset=UTF-8"}, + "Content-Transfer-Encoding": []string{"quoted-printable"}, + "To": []string{"user1@example.com, user2@example.com"}, + "From": []string{"test@example.com"}, + "Subject": []string{"kapacitor.cpu.serverA is CRITICAL"}, + }, + Body: ` +kapacitor.cpu.serverA is CRITICAL + +Value: 10 +Details +`, + }, + { + Header: mail.Header{ + "Mime-Version": []string{"1.0"}, + "Content-Type": []string{"text/html; charset=UTF-8"}, + "Content-Transfer-Encoding": []string{"quoted-printable"}, + "To": []string{"user1@example.com, user2@example.com"}, + "From": []string{"test@example.com"}, + "Subject": []string{"kapacitor.cpu.serverA is CRITICAL"}, + }, + Body: ` +kapacitor.cpu.serverA is CRITICAL + +Value: 10 +Details +`, + }, + } + + smtpServer, err := smtptest.NewServer() if err != nil { - t.Fatalf("missing log file for alert %v", err) + t.Fatal(err) + } + defer smtpServer.Close() + sc := smtp.Config{ + Enabled: true, + Host: smtpServer.Host, + Port: smtpServer.Port, + From: "test@example.com", } - defer mode.Close() - if stat, err := mode.Stat(); err != nil { + smtpService := smtp.NewService(sc, logService.NewLogger("[test-smtp] ", log.LstdFlags)) + if err := smtpService.Open(); err != nil { t.Fatal(err) - } else if exp, got := os.FileMode(0644), stat.Mode(); exp != got { - t.Errorf("unexpected normal file mode: got %v exp %v", got, exp) + } + defer smtpService.Close() + + tmInit := func(tm *kapacitor.TaskMaster) { + tm.SMTPService = smtpService + } + + testStreamerNoOutput(t, "TestStream_Alert", script, 13*time.Second, tmInit) + + smtpServer.Close() + + errors := smtpServer.Errors() + if got, exp := len(errors), 0; got != exp { + t.Errorf("unexpected smtp server errors: %v", errors) + } + + msgs := smtpServer.SentMessages() + if got, exp := len(msgs), len(expMail); got != exp { + t.Errorf("unexpected number of messages sent: got %d exp %d", got, exp) + } + for i, exp := range expMail { + got := msgs[i] + if err := exp.Compare(got); err != nil { + t.Errorf("%d %s", i, err) + } } } func TestStream_AlertSigma(t *testing.T) { requestCount := int32(0) ts := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { - ad := kapacitor.AlertData{} + ad := alertservice.AlertData{} dec := json.NewDecoder(r.Body) err := dec.Decode(&ad) if err != nil { t.Fatal(err) } - var expAd kapacitor.AlertData + var expAd alertservice.AlertData atomic.AddInt32(&requestCount, 1) rc := atomic.LoadInt32(&requestCount) if rc := atomic.LoadInt32(&requestCount); rc == 1 { - expAd = kapacitor.AlertData{ + expAd = alertservice.AlertData{ ID: "cpu:nil", Message: "cpu:nil is INFO", Details: "cpu:nil is INFO", Time: time.Date(1971, 1, 1, 0, 0, 7, 0, time.UTC), - Level: kapacitor.InfoAlert, + Level: alert.Info, Data: influxql.Result{ Series: imodels.Rows{ { @@ -7354,13 +7541,13 @@ func TestStream_AlertSigma(t *testing.T) { }, } } else { - expAd = kapacitor.AlertData{ + expAd = alertservice.AlertData{ ID: "cpu:nil", Message: "cpu:nil is OK", Details: "cpu:nil is OK", Time: time.Date(1971, 1, 1, 0, 0, 8, 0, time.UTC), Duration: time.Second, - Level: kapacitor.OKAlert, + Level: alert.OK, Data: influxql.Result{ Series: imodels.Rows{ { @@ -7410,19 +7597,19 @@ func TestStream_AlertComplexWhere(t *testing.T) { requestCount := int32(0) ts := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { - ad := kapacitor.AlertData{} + ad := alertservice.AlertData{} dec := json.NewDecoder(r.Body) err := dec.Decode(&ad) if err != nil { t.Fatal(err) } atomic.AddInt32(&requestCount, 1) - expAd := kapacitor.AlertData{ + expAd := alertservice.AlertData{ ID: "cpu:nil", Message: "cpu:nil is CRITICAL", Details: "", Time: time.Date(1971, 1, 1, 0, 0, 7, 0, time.UTC), - Level: kapacitor.CritAlert, + Level: alert.Critical, Data: influxql.Result{ Series: imodels.Rows{ { @@ -7489,7 +7676,7 @@ func TestStream_AlertStateChangesOnlyExpired(t *testing.T) { requestCount := int32(0) ts := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { - ad := kapacitor.AlertData{} + ad := alertservice.AlertData{} dec := json.NewDecoder(r.Body) err := dec.Decode(&ad) if err != nil { @@ -7497,24 +7684,24 @@ func TestStream_AlertStateChangesOnlyExpired(t *testing.T) { } //We don't care about the data for this test ad.Data = influxql.Result{} - var expAd kapacitor.AlertData + var expAd alertservice.AlertData atomic.AddInt32(&requestCount, 1) rc := atomic.LoadInt32(&requestCount) if rc < 6 { - expAd = kapacitor.AlertData{ + expAd = alertservice.AlertData{ ID: "cpu:nil", Message: "cpu:nil is CRITICAL", Time: time.Date(1971, 1, 1, 0, 0, int(rc)*2-1, 0, time.UTC), Duration: time.Duration(rc-1) * 2 * time.Second, - Level: kapacitor.CritAlert, + Level: alert.Critical, } } else { - expAd = kapacitor.AlertData{ + expAd = alertservice.AlertData{ ID: "cpu:nil", Message: "cpu:nil is OK", Time: time.Date(1971, 1, 1, 0, 0, 10, 0, time.UTC), Duration: 9 * time.Second, - Level: kapacitor.OKAlert, + Level: alert.OK, } } if eq, msg := compareAlertData(expAd, ad); !eq { @@ -7863,7 +8050,7 @@ stream // Create a new execution env tm := kapacitor.NewTaskMaster("testStreamer", logService) - tm.HTTPDService = httpService + tm.HTTPDService = newHTTPDService() tm.TaskStore = taskStore{} tm.DeadmanService = deadman{} tm.InfluxDBService = influxdb @@ -7923,7 +8110,7 @@ stream // Create a new execution env tm := kapacitor.NewTaskMaster("testStreamer", logService) - tm.HTTPDService = httpService + tm.HTTPDService = newHTTPDService() tm.TaskStore = taskStore{} tm.DeadmanService = deadman{} tm.InfluxDBService = influxdb @@ -8126,9 +8313,17 @@ func testStreamer( // Create a new execution env tm := kapacitor.NewTaskMaster("testStreamer", logService) - tm.HTTPDService = httpService + httpdService := newHTTPDService() + tm.HTTPDService = httpdService tm.TaskStore = taskStore{} tm.DeadmanService = deadman{} + as := alertservice.NewService(alertservice.NewConfig(), logService.NewLogger("[alert] ", log.LstdFlags)) + as.StorageService = storagetest.New() + as.HTTPDService = httpdService + if err := as.Open(); err != nil { + t.Fatal(err) + } + tm.AlertService = as if tmInit != nil { tmInit(tm) } @@ -8245,3 +8440,33 @@ func testStreamerWithOutput( } } } + +func compareListIgnoreOrder(got, exp []interface{}, cmpF func(got, exp interface{}) error) error { + if len(got) != len(exp) { + return fmt.Errorf("unexpected count got %d exp %d", len(got), len(exp)) + } + + if cmpF == nil { + cmpF = func(got, exp interface{}) error { + if !reflect.DeepEqual(got, exp) { + return fmt.Errorf("\ngot\n%+v\nexp\n%+v\n", got, exp) + } + return nil + } + } + + for _, e := range exp { + found := false + var err error + for _, g := range got { + if err = cmpF(g, e); err == nil { + found = true + break + } + } + if !found { + return err + } + } + return nil +} diff --git a/pipeline/alert.go b/pipeline/alert.go index e0b1890bc..a638bc4a1 100644 --- a/pipeline/alert.go +++ b/pipeline/alert.go @@ -1,8 +1,6 @@ package pipeline import ( - "fmt" - "os" "reflect" "time" @@ -21,9 +19,6 @@ const defaultMessageTmpl = "{{ .ID }} is {{ .Level }}" // Default template for constructing a details message. const defaultDetailsTmpl = "{{ json . }}" -// Default log mode for file -const defaultLogFileMode = 0600 - // An AlertNode can trigger an event of varying severity levels, // and pass the event to alert handlers. The criteria for triggering // an alert is specified via a [lambda expression](/kapacitor/latest/tick/expr/). @@ -122,6 +117,8 @@ const defaultLogFileMode = 0600 type AlertNode struct { chainnode + Topic string + // Template for constructing a unique ID for a given alert. // // Available template data: @@ -352,15 +349,6 @@ func newAlertNode(wants EdgeType) *AlertNode { return a } -func (n *AlertNode) validate() error { - for _, lh := range n.LogHandlers { - if err := lh.validate(); err != nil { - return err - } - } - return nil -} - //tick:ignore func (n *AlertNode) ChainMethods() map[string]reflect.Value { return map[string]reflect.Value{ @@ -623,7 +611,6 @@ func (a *AlertNode) Log(filepath string) *LogHandler { log := &LogHandler{ AlertNode: a, FilePath: filepath, - Mode: defaultLogFileMode, } a.LogHandlers = append(a.LogHandlers, log) return log @@ -643,13 +630,6 @@ type LogHandler struct { Mode int64 } -func (h *LogHandler) validate() error { - if os.FileMode(h.Mode).Perm()&0200 == 0 { - return fmt.Errorf("invalid file mode %o, must be user writable", h.Mode) - } - return nil -} - // Send alert to VictorOps. // To use VictorOps alerting you must first enable the 'Alert Ingestion API' // in the 'Integrations' section of VictorOps. @@ -880,18 +860,11 @@ type HipChatHandler struct { func (a *AlertNode) Alerta() *AlertaHandler { alerta := &AlertaHandler{ AlertNode: a, - Resource: defaultAlertaResource, - Event: defaultAlertaEvent, - Group: defaultAlertaGroup, } a.AlertaHandlers = append(a.AlertaHandlers, alerta) return alerta } -const defaultAlertaResource = "{{ .Name }}" -const defaultAlertaEvent = "{{ .ID }}" -const defaultAlertaGroup = "{{ .Group }}" - // tick:embedded:AlertNode.Alerta type AlertaHandler struct { *AlertNode diff --git a/server/config.go b/server/config.go index 770aa7fb8..ed15d9d59 100644 --- a/server/config.go +++ b/server/config.go @@ -11,6 +11,8 @@ import ( "strings" "time" + "github.com/influxdata/kapacitor/command" + "github.com/influxdata/kapacitor/services/alert" "github.com/influxdata/kapacitor/services/alerta" "github.com/influxdata/kapacitor/services/config" "github.com/influxdata/kapacitor/services/deadman" @@ -49,6 +51,7 @@ type Config struct { InfluxDB []influxdb.Config `toml:"influxdb" override:"influxdb,element-key=name"` Logging logging.Config `toml:"logging"` ConfigOverride config.Config `toml:"config-override"` + Alert alert.Config `toml:"alert"` // Input services Graphites []graphite.Config `toml:"graphite"` @@ -80,12 +83,15 @@ type Config struct { DataDir string `toml:"data_dir"` SkipConfigOverrides bool `toml:"skip-config-overrides"` DefaultRetentionPolicy string `toml:"default-retention-policy"` + + Commander command.Commander `toml:"-"` } // NewConfig returns an instance of Config with reasonable defaults. func NewConfig() *Config { c := &Config{ - Hostname: "localhost", + Hostname: "localhost", + Commander: command.ExecCommander, } c.HTTP = httpd.NewConfig() @@ -96,6 +102,7 @@ func NewConfig() *Config { c.Logging = logging.NewConfig() c.Kubernetes = k8s.NewConfig() c.ConfigOverride = config.NewConfig() + c.Alert = alert.NewConfig() c.Collectd = collectd.NewConfig() c.OpenTSDB = opentsdb.NewConfig() diff --git a/server/server.go b/server/server.go index c7ddf5a37..d914580e4 100644 --- a/server/server.go +++ b/server/server.go @@ -18,7 +18,9 @@ import ( "github.com/influxdata/influxdb/services/opentsdb" "github.com/influxdata/kapacitor" "github.com/influxdata/kapacitor/auth" + "github.com/influxdata/kapacitor/command" iclient "github.com/influxdata/kapacitor/influxdb" + "github.com/influxdata/kapacitor/services/alert" "github.com/influxdata/kapacitor/services/alerta" "github.com/influxdata/kapacitor/services/config" "github.com/influxdata/kapacitor/services/deadman" @@ -69,17 +71,21 @@ type Server struct { err chan error + Commander command.Commander + TaskMaster *kapacitor.TaskMaster TaskMasterLookup *kapacitor.TaskMasterLookup AuthService auth.Interface HTTPDService *httpd.Service StorageService *storage.Service + AlertService *alert.Service TaskStore *task_store.Service ReplayService *replay.Service InfluxDBService *influxdb.Service ConfigOverrideService *config.Service TesterService *servicetest.Service + StatsService *stats.Service MetaClient *kapacitor.NoopMetaClient QueryExecutor *Queryexecutor @@ -126,6 +132,7 @@ func New(c *Config, buildInfo BuildInfo, logService logging.Interface) (*Server, Logger: l, ServicesByName: make(map[string]int), DynamicServices: make(map[string]Updater), + Commander: c.Commander, } s.Logger.Println("I! Kapacitor hostname:", s.hostname) @@ -147,6 +154,7 @@ func New(c *Config, buildInfo BuildInfo, logService logging.Interface) (*Server, s.TaskMasterLookup = kapacitor.NewTaskMasterLookup() s.TaskMaster = kapacitor.NewTaskMaster(kapacitor.MainTaskMaster, logService) s.TaskMaster.DefaultRetentionPolicy = c.DefaultRetentionPolicy + s.TaskMaster.Commander = s.Commander s.TaskMasterLookup.Set(s.TaskMaster) if err := s.TaskMaster.Open(); err != nil { return nil, err @@ -158,6 +166,7 @@ func New(c *Config, buildInfo BuildInfo, logService logging.Interface) (*Server, s.appendAuthService() s.appendConfigOverrideService() s.appendTesterService() + s.appendAlertService() // Append all dynamic services after the config override and tester services. s.appendUDFService() @@ -202,9 +211,6 @@ func New(c *Config, buildInfo BuildInfo, logService logging.Interface) (*Server, s.appendStatsService() s.appendReportingService() - // Append HTTPD Service last so that the API is not listening till everything else succeeded. - s.appendHTTPDService() - return s, nil } @@ -243,6 +249,19 @@ func (s *Server) appendConfigOverrideService() { s.AppendService("config", srv) } +func (s *Server) appendAlertService() { + l := s.LogService.NewLogger("[alert] ", log.LstdFlags) + srv := alert.NewService(s.config.Alert, l) + + srv.Commander = s.Commander + srv.HTTPDService = s.HTTPDService + srv.StorageService = s.StorageService + + s.AlertService = srv + s.TaskMaster.AlertService = srv + s.AppendService("alert", srv) +} + func (s *Server) appendTesterService() { l := s.LogService.NewLogger("[service-tests] ", log.LstdFlags) srv := servicetest.NewService(servicetest.NewConfig(), l) @@ -258,6 +277,8 @@ func (s *Server) appendSMTPService() { srv := smtp.NewService(c, l) s.TaskMaster.SMTPService = srv + s.AlertService.SMTPService = srv + s.SetDynamicService("smtp", srv) s.AppendService("smtp", srv) } @@ -297,10 +318,6 @@ func (s *Server) initHTTPDService() { s.TaskMaster.HTTPDService = srv } -func (s *Server) appendHTTPDService() { - s.AppendService("httpd", s.HTTPDService) -} - func (s *Server) appendTaskStoreService() { l := s.LogService.NewLogger("[task_store] ", log.LstdFlags) srv := task_store.NewService(s.config.Task, l) @@ -370,7 +387,9 @@ func (s *Server) appendOpsGenieService() { c := s.config.OpsGenie l := s.LogService.NewLogger("[opsgenie] ", log.LstdFlags) srv := opsgenie.NewService(c, l) + s.TaskMaster.OpsGenieService = srv + s.AlertService.OpsGenieService = srv s.SetDynamicService("opsgenie", srv) s.AppendService("opsgenie", srv) @@ -380,7 +399,9 @@ func (s *Server) appendVictorOpsService() { c := s.config.VictorOps l := s.LogService.NewLogger("[victorops] ", log.LstdFlags) srv := victorops.NewService(c, l) + s.TaskMaster.VictorOpsService = srv + s.AlertService.VictorOpsService = srv s.SetDynamicService("victorops", srv) s.AppendService("victorops", srv) @@ -391,7 +412,9 @@ func (s *Server) appendPagerDutyService() { l := s.LogService.NewLogger("[pagerduty] ", log.LstdFlags) srv := pagerduty.NewService(c, l) srv.HTTPDService = s.HTTPDService + s.TaskMaster.PagerDutyService = srv + s.AlertService.PagerDutyService = srv s.SetDynamicService("pagerduty", srv) s.AppendService("pagerduty", srv) @@ -401,7 +424,9 @@ func (s *Server) appendSensuService() { c := s.config.Sensu l := s.LogService.NewLogger("[sensu] ", log.LstdFlags) srv := sensu.NewService(c, l) + s.TaskMaster.SensuService = srv + s.AlertService.SensuService = srv s.SetDynamicService("sensu", srv) s.AppendService("sensu", srv) @@ -411,7 +436,9 @@ func (s *Server) appendSlackService() { c := s.config.Slack l := s.LogService.NewLogger("[slack] ", log.LstdFlags) srv := slack.NewService(c, l) + s.TaskMaster.SlackService = srv + s.AlertService.SlackService = srv s.SetDynamicService("slack", srv) s.AppendService("slack", srv) @@ -421,7 +448,9 @@ func (s *Server) appendTelegramService() { c := s.config.Telegram l := s.LogService.NewLogger("[telegram] ", log.LstdFlags) srv := telegram.NewService(c, l) + s.TaskMaster.TelegramService = srv + s.AlertService.TelegramService = srv s.SetDynamicService("telegram", srv) s.AppendService("telegram", srv) @@ -431,7 +460,9 @@ func (s *Server) appendHipChatService() { c := s.config.HipChat l := s.LogService.NewLogger("[hipchat] ", log.LstdFlags) srv := hipchat.NewService(c, l) + s.TaskMaster.HipChatService = srv + s.AlertService.HipChatService = srv s.SetDynamicService("hipchat", srv) s.AppendService("hipchat", srv) @@ -441,7 +472,9 @@ func (s *Server) appendAlertaService() { c := s.config.Alerta l := s.LogService.NewLogger("[alerta] ", log.LstdFlags) srv := alerta.NewService(c, l) + s.TaskMaster.AlertaService = srv + s.AlertService.AlertaService = srv s.SetDynamicService("alerta", srv) s.AppendService("alerta", srv) @@ -451,7 +484,9 @@ func (s *Server) appendTalkService() { c := s.config.Talk l := s.LogService.NewLogger("[talk] ", log.LstdFlags) srv := talk.NewService(c, l) + s.TaskMaster.TalkService = srv + s.AlertService.TalkService = srv s.SetDynamicService("talk", srv) s.AppendService("talk", srv) @@ -527,6 +562,7 @@ func (s *Server) appendStatsService() { srv := stats.NewService(c, l) srv.TaskMaster = s.TaskMaster + s.StatsService = srv s.TaskMaster.TimingService = srv s.AppendService("stats", srv) } @@ -555,6 +591,10 @@ func (s *Server) Open() error { s.Close() return err } + // Open HTTPD Service last so that the API is not listening till everything else succeeded. + if err := s.HTTPDService.Open(); err != nil { + return err + } go s.watchServices() go s.watchConfigUpdates() @@ -617,7 +657,18 @@ func (s *Server) watchConfigUpdates() { func (s *Server) Close() error { s.stopProfile() - // First stop all tasks. + // Close all services that write points first. + if err := s.HTTPDService.Close(); err != nil { + s.Logger.Printf("E! error closing httpd service: %v", err) + } + if s.StatsService != nil { + if err := s.StatsService.Close(); err != nil { + s.Logger.Printf("E! error closing stats service: %v", err) + } + } + + // Drain the in-flight writes and stop all tasks. + s.TaskMaster.Drain() s.TaskMaster.StopTasks() // Close services now that all tasks are stopped. diff --git a/server/server_helper_test.go b/server/server_helper_test.go index 74b32553a..6382c7618 100644 --- a/server/server_helper_test.go +++ b/server/server_helper_test.go @@ -18,6 +18,7 @@ import ( iclient "github.com/influxdata/influxdb/client/v2" "github.com/influxdata/kapacitor/client/v1" "github.com/influxdata/kapacitor/server" + "github.com/influxdata/kapacitor/services/logging" "github.com/influxdata/kapacitor/services/logging/loggingtest" "github.com/influxdata/wlog" ) @@ -25,7 +26,9 @@ import ( // Server represents a test wrapper for server.Server. type Server struct { *server.Server - Config *server.Config + Config *server.Config + buildInfo server.BuildInfo + ls logging.Interface } // NewServer returns a new instance of Server. @@ -43,12 +46,26 @@ func NewServer(c *server.Config) *Server { panic(err) } s := Server{ - Server: srv, - Config: c, + Server: srv, + Config: c, + buildInfo: buildInfo, + ls: ls, } return &s } +func (s *Server) Restart() { + s.Server.Close() + srv, err := server.New(s.Config, s.buildInfo, s.ls) + if err != nil { + panic(err.Error()) + } + if err := srv.Open(); err != nil { + panic(err.Error()) + } + s.Server = srv +} + // OpenServer opens a test server. func OpenDefaultServer() (*Server, *client.Client) { c := NewConfig() @@ -77,6 +94,16 @@ func Client(s *Server) *client.Client { return client } +func (s *Server) Open() error { + err := s.Server.Open() + if err != nil { + return err + } + u, _ := url.Parse(s.URL()) + s.Config.HTTP.BindAddress = u.Host + return nil +} + // Close shuts down the server and removes all temporary paths. func (s *Server) Close() { s.Server.Close() diff --git a/server/server_test.go b/server/server_test.go index 3d4b9ea77..120683425 100644 --- a/server/server_test.go +++ b/server/server_test.go @@ -1,12 +1,13 @@ package server_test import ( - "bufio" + "context" "encoding/json" "fmt" "io/ioutil" "math/rand" "net/http" + "net/mail" "net/url" "os" "os/exec" @@ -24,13 +25,28 @@ import ( "github.com/influxdata/influxdb/influxql" "github.com/influxdata/influxdb/models" "github.com/influxdata/influxdb/toml" + "github.com/influxdata/kapacitor/alert" "github.com/influxdata/kapacitor/client/v1" + "github.com/influxdata/kapacitor/command" + "github.com/influxdata/kapacitor/command/commandtest" "github.com/influxdata/kapacitor/server" + alertservice "github.com/influxdata/kapacitor/services/alert" + "github.com/influxdata/kapacitor/services/alert/alerttest" + "github.com/influxdata/kapacitor/services/alerta/alertatest" + "github.com/influxdata/kapacitor/services/hipchat/hipchattest" "github.com/influxdata/kapacitor/services/opsgenie" + "github.com/influxdata/kapacitor/services/opsgenie/opsgenietest" "github.com/influxdata/kapacitor/services/pagerduty" + "github.com/influxdata/kapacitor/services/pagerduty/pagerdutytest" + "github.com/influxdata/kapacitor/services/sensu/sensutest" + "github.com/influxdata/kapacitor/services/slack/slacktest" + "github.com/influxdata/kapacitor/services/smtp/smtptest" + "github.com/influxdata/kapacitor/services/talk/talktest" "github.com/influxdata/kapacitor/services/telegram" + "github.com/influxdata/kapacitor/services/telegram/telegramtest" "github.com/influxdata/kapacitor/services/udf" "github.com/influxdata/kapacitor/services/victorops" + "github.com/influxdata/kapacitor/services/victorops/victoropstest" "github.com/pkg/errors" ) @@ -2937,7 +2953,7 @@ test value=1 0000000012 recordings, err := cli.ListRecordings(nil) if exp, got := 1, len(recordings); exp != got { - t.Fatalf("unexpected recordings list:\ngot %v\nexp %v", got, exp) + t.Fatalf("unexpected recordings list:\ngot %v\nexp %v\nrecordings %v", got, exp, recordings) } err = cli.DeleteRecording(recordings[0].Link) @@ -2947,12 +2963,12 @@ test value=1 0000000012 recordings, err = cli.ListRecordings(nil) if exp, got := 0, len(recordings); exp != got { - t.Errorf("unexpected recordings list:\ngot %v\nexp %v", got, exp) + t.Errorf("unexpected recordings list after delete:\ngot %v\nexp %v\nrecordings %v", got, exp, recordings) } replays, err := cli.ListReplays(nil) if exp, got := 1, len(replays); exp != got { - t.Fatalf("unexpected replays list:\ngot %v\nexp %v", got, exp) + t.Fatalf("unexpected replays list:\ngot %v\nexp %v\nreplays %v", got, exp, replays) } err = cli.DeleteReplay(replays[0].Link) @@ -2962,7 +2978,7 @@ test value=1 0000000012 replays, err = cli.ListReplays(nil) if exp, got := 0, len(replays); exp != got { - t.Errorf("unexpected replays list:\ngot %v\nexp %v", got, exp) + t.Errorf("unexpected replays list after delete:\ngot %v\nexp %v\nreplays %v", got, exp, replays) } } @@ -3083,7 +3099,7 @@ func TestServer_RecordReplayBatch(t *testing.T) { } retry++ if retry > 10 { - t.Fatal("failed to perfom replay") + t.Fatal("failed to perform replay") } } @@ -3149,11 +3165,11 @@ func TestServer_RecordReplayBatch(t *testing.T) { }, }, } - scanner := bufio.NewScanner(f) + dec := json.NewDecoder(f) got := make([]response, 0) - g := response{} - for scanner.Scan() { - json.Unmarshal(scanner.Bytes(), &g) + for dec.More() { + g := response{} + dec.Decode(&g) got = append(got, g) } if !reflect.DeepEqual(exp, got) { @@ -3350,11 +3366,11 @@ func TestServer_ReplayBatch(t *testing.T) { }, }, } - scanner := bufio.NewScanner(f) + dec := json.NewDecoder(f) got := make([]response, 0) - g := response{} - for scanner.Scan() { - json.Unmarshal(scanner.Bytes(), &g) + for dec.More() { + g := response{} + dec.Decode(&g) got = append(got, g) } if !reflect.DeepEqual(exp, got) { @@ -3594,11 +3610,11 @@ func TestServer_RecordReplayQuery(t *testing.T) { }, }, } - scanner := bufio.NewScanner(f) + dec := json.NewDecoder(f) got := make([]response, 0) - g := response{} - for scanner.Scan() { - json.Unmarshal(scanner.Bytes(), &g) + for dec.More() { + g := response{} + dec.Decode(&g) got = append(got, g) } if !reflect.DeepEqual(exp, got) { @@ -3628,7 +3644,7 @@ func TestServer_RecordReplayQuery(t *testing.T) { type recResponse struct { Recordings []client.Recording `json:"recordings"` } - dec := json.NewDecoder(resp.Body) + dec = json.NewDecoder(resp.Body) recR := recResponse{} dec.Decode(&recR) if exp, got := 1, len(recR.Recordings); exp != got { @@ -3866,11 +3882,11 @@ func TestServer_ReplayQuery(t *testing.T) { }, }, } - scanner := bufio.NewScanner(f) + dec := json.NewDecoder(f) got := make([]response, 0) - g := response{} - for scanner.Scan() { - json.Unmarshal(scanner.Bytes(), &g) + for dec.More() { + g := response{} + dec.Decode(&g) got = append(got, g) } if !reflect.DeepEqual(exp, got) { @@ -6599,3 +6615,1385 @@ func TestServer_DoServiceTest(t *testing.T) { } } } + +func TestServer_AlertHandlers_CRUD(t *testing.T) { + testCases := []struct { + create client.HandlerOptions + expCreate client.Handler + patch client.JSONPatch + expPatch client.Handler + put client.HandlerOptions + expPut client.Handler + }{ + { + create: client.HandlerOptions{ + ID: "myhandler", + Topics: []string{"system", "test"}, + Actions: []client.HandlerAction{{ + Kind: "slack", + Options: map[string]interface{}{ + "channel": "#test", + }, + }}, + }, + expCreate: client.Handler{ + Link: client.Link{Relation: client.Self, Href: "/kapacitor/v1preview/alerts/handlers/myhandler"}, + ID: "myhandler", + Topics: []string{"system", "test"}, + Actions: []client.HandlerAction{{ + Kind: "slack", + Options: map[string]interface{}{ + "channel": "#test", + }, + }}, + }, + patch: client.JSONPatch{ + { + Path: "/topics/0", + Operation: "remove", + }, + { + Path: "/actions/0/options/channel", + Operation: "replace", + Value: "#kapacitor_test", + }, + }, + expPatch: client.Handler{ + Link: client.Link{Relation: client.Self, Href: "/kapacitor/v1preview/alerts/handlers/myhandler"}, + ID: "myhandler", + Topics: []string{"test"}, + Actions: []client.HandlerAction{{ + Kind: "slack", + Options: map[string]interface{}{ + "channel": "#kapacitor_test", + }, + }}, + }, + put: client.HandlerOptions{ + ID: "newid", + Topics: []string{"test"}, + Actions: []client.HandlerAction{{ + Kind: "smtp", + Options: map[string]interface{}{ + "to": []string{"oncall@example.com"}, + }, + }}, + }, + expPut: client.Handler{ + Link: client.Link{Relation: client.Self, Href: "/kapacitor/v1preview/alerts/handlers/newid"}, + ID: "newid", + Topics: []string{"test"}, + Actions: []client.HandlerAction{{ + Kind: "smtp", + Options: map[string]interface{}{ + "to": []interface{}{"oncall@example.com"}, + }, + }}, + }, + }, + { + create: client.HandlerOptions{ + ID: "anotherhandler", + Topics: []string{"test"}, + Actions: []client.HandlerAction{ + { + Kind: "slack", + Options: map[string]interface{}{ + "channel": "#test", + }, + }, + { + Kind: "log", + Options: map[string]interface{}{ + "path": "/tmp/alert.log", + }, + }, + }, + }, + expCreate: client.Handler{ + Link: client.Link{Relation: client.Self, Href: "/kapacitor/v1preview/alerts/handlers/anotherhandler"}, + ID: "anotherhandler", + Topics: []string{"test"}, + Actions: []client.HandlerAction{ + { + Kind: "slack", + Options: map[string]interface{}{ + "channel": "#test", + }, + }, + { + Kind: "log", + Options: map[string]interface{}{ + "path": "/tmp/alert.log", + }, + }, + }, + }, + patch: client.JSONPatch{ + { + Path: "/topics/-", + Operation: "add", + Value: "system", + }, + { + Path: "/actions/0/options/channel", + Operation: "replace", + Value: "#kapacitor_test", + }, + { + Path: "/actions/-", + Operation: "add", + Value: map[string]interface{}{ + "kind": "smtp", + "options": map[string]interface{}{ + "to": []string{"oncall@example.com"}, + }, + }, + }, + }, + expPatch: client.Handler{ + Link: client.Link{Relation: client.Self, Href: "/kapacitor/v1preview/alerts/handlers/anotherhandler"}, + ID: "anotherhandler", + Topics: []string{"test", "system"}, + Actions: []client.HandlerAction{ + { + Kind: "slack", + Options: map[string]interface{}{ + "channel": "#kapacitor_test", + }, + }, + { + Kind: "log", + Options: map[string]interface{}{ + "path": "/tmp/alert.log", + }, + }, + { + Kind: "smtp", + Options: map[string]interface{}{ + "to": []interface{}{"oncall@example.com"}, + }, + }, + }, + }, + put: client.HandlerOptions{ + ID: "anotherhandler", + Topics: []string{"test"}, + Actions: []client.HandlerAction{{ + Kind: "smtp", + Options: map[string]interface{}{ + "to": []string{"oncall@example.com"}, + }, + }}, + }, + expPut: client.Handler{ + Link: client.Link{Relation: client.Self, Href: "/kapacitor/v1preview/alerts/handlers/anotherhandler"}, + ID: "anotherhandler", + Topics: []string{"test"}, + Actions: []client.HandlerAction{{ + Kind: "smtp", + Options: map[string]interface{}{ + "to": []interface{}{"oncall@example.com"}, + }, + }}, + }, + }, + } + for _, tc := range testCases { + // Create default config + c := NewConfig() + s := OpenServer(c) + cli := Client(s) + defer s.Close() + + h, err := cli.CreateHandler(tc.create) + if err != nil { + t.Fatal(err) + } + + if !reflect.DeepEqual(h, tc.expCreate) { + t.Errorf("unexpected handler created:\ngot\n%#v\nexp\n%#v\n", h, tc.expCreate) + } + + h, err = cli.PatchHandler(h.Link, tc.patch) + if err != nil { + t.Fatal(err) + } + + if !reflect.DeepEqual(h, tc.expPatch) { + t.Errorf("unexpected handler patched:\ngot\n%#v\nexp\n%#v\n", h, tc.expPatch) + } + + h, err = cli.ReplaceHandler(h.Link, tc.put) + if err != nil { + t.Fatal(err) + } + + if !reflect.DeepEqual(h, tc.expPut) { + t.Errorf("unexpected handler put:\ngot\n%#v\nexp\n%#v\n", h, tc.expPut) + } + + // Restart server + s.Restart() + + rh, err := cli.Handler(h.Link) + if err != nil { + t.Fatalf("could not find handler after restart: %v", err) + } + if got, exp := rh, h; !reflect.DeepEqual(got, exp) { + t.Errorf("unexpected handler after restart:\ngot\n%#v\nexp\n%#v\n", got, exp) + } + + err = cli.DeleteHandler(h.Link) + if err != nil { + t.Fatal(err) + } + + _, err = cli.Handler(h.Link) + if err == nil { + t.Errorf("expected handler to be deleted") + } + } +} + +func TestServer_AlertHandlers(t *testing.T) { + + resultJSON := `{"Series":[{"name":"alert","columns":["time","value"],"values":[["1970-01-01T00:00:00Z",1]]}],"Messages":null,"Err":null}` + + alertData := alertservice.AlertData{ + ID: "id", + Message: "message", + Details: "details", + Time: time.Date(1970, 1, 1, 0, 0, 0, 0, time.UTC), + Level: alert.Critical, + Data: influxql.Result{ + Series: models.Rows{ + { + Name: "alert", + Columns: []string{"time", "value"}, + Values: [][]interface{}{[]interface{}{ + time.Date(1970, 1, 1, 0, 0, 0, 0, time.UTC).Format(time.RFC3339Nano), + 1.0, + }}, + }, + }, + }, + } + adJSON, err := json.Marshal(alertData) + if err != nil { + t.Fatal(err) + } + testCases := []struct { + handlerAction client.HandlerAction + setup func(*server.Config, *client.HandlerAction) (context.Context, error) + result func(context.Context) error + }{ + { + handlerAction: client.HandlerAction{ + Kind: "alerta", + Options: map[string]interface{}{ + "token": "testtoken1234567", + "origin": "kapacitor", + "group": "test", + "environment": "env", + }, + }, + setup: func(c *server.Config, ha *client.HandlerAction) (context.Context, error) { + ts := alertatest.NewServer() + ctxt := context.WithValue(nil, "server", ts) + + c.Alerta.Enabled = true + c.Alerta.URL = ts.URL + return ctxt, nil + }, + result: func(ctxt context.Context) error { + ts := ctxt.Value("server").(*alertatest.Server) + ts.Close() + got := ts.Requests() + exp := []alertatest.Request{{ + URL: "/alert", + Authorization: "Key testtoken1234567", + PostData: alertatest.PostData{ + Resource: "alert", + Event: "id", + Group: "test", + Environment: "env", + Text: "message", + Origin: "kapacitor", + Service: []string{"alert"}, + }, + }} + if !reflect.DeepEqual(exp, got) { + return fmt.Errorf("unexpected alerta request:\nexp\n%+v\ngot\n%+v\n", exp, got) + } + return nil + }, + }, + { + handlerAction: client.HandlerAction{ + Kind: "exec", + Options: map[string]interface{}{ + "prog": "/bin/alert-handler.sh", + "args": []string{"arg1", "arg2", "arg3"}, + }, + }, + setup: func(c *server.Config, ha *client.HandlerAction) (context.Context, error) { + te := alerttest.NewExec() + ctxt := context.WithValue(nil, "exec", te) + c.Commander = te.Commander + return ctxt, nil + }, + result: func(ctxt context.Context) error { + te := ctxt.Value("exec").(*alerttest.Exec) + expData := []*commandtest.Command{{ + Spec: command.Spec{ + Prog: "/bin/alert-handler.sh", + Args: []string{"arg1", "arg2", "arg3"}, + }, + Started: true, + Waited: true, + Killed: false, + StdinData: append(adJSON, '\n'), + }} + cmds := te.Commands() + if got, exp := len(cmds), len(expData); got != exp { + return fmt.Errorf("unexpected commands length: got %d exp %d", got, exp) + } + for i := range expData { + if err := expData[i].Compare(cmds[i]); err != nil { + return fmt.Errorf("unexpected command %d: %v", i, err) + } + } + return nil + }, + }, + { + handlerAction: client.HandlerAction{ + Kind: "hipchat", + Options: map[string]interface{}{ + "token": "testtoken1234567", + "room": "1234567", + }, + }, + setup: func(c *server.Config, ha *client.HandlerAction) (context.Context, error) { + ts := hipchattest.NewServer() + ctxt := context.WithValue(nil, "server", ts) + + c.HipChat.Enabled = true + c.HipChat.URL = ts.URL + return ctxt, nil + }, + result: func(ctxt context.Context) error { + ts := ctxt.Value("server").(*hipchattest.Server) + ts.Close() + got := ts.Requests() + exp := []hipchattest.Request{{ + URL: "/1234567/notification?auth_token=testtoken1234567", + PostData: hipchattest.PostData{ + From: "kapacitor", + Message: "message", + Color: "red", + Notify: true, + }, + }} + if !reflect.DeepEqual(exp, got) { + return fmt.Errorf("unexpected hipchat request:\nexp\n%+v\ngot\n%+v\n", exp, got) + } + return nil + }, + }, + { + handlerAction: client.HandlerAction{ + Kind: "log", + Options: map[string]interface{}{ + "mode": 0604, + }, + }, + setup: func(c *server.Config, ha *client.HandlerAction) (context.Context, error) { + tdir := MustTempDir() + p := path.Join(tdir, "alert.log") + + ha.Options["path"] = p + + l := alerttest.NewLog(p) + + ctxt := context.WithValue(nil, "tdir", tdir) + ctxt = context.WithValue(ctxt, "log", l) + return ctxt, nil + }, + result: func(ctxt context.Context) error { + tdir := ctxt.Value("tdir").(string) + defer os.RemoveAll(tdir) + l := ctxt.Value("log").(*alerttest.Log) + expData := []alertservice.AlertData{alertData} + expMode := os.FileMode(0604) + + m, err := l.Mode() + if err != nil { + return err + } + if got, exp := m, expMode; exp != got { + return fmt.Errorf("unexpected file mode: got %v exp %v", got, exp) + } + data, err := l.Data() + if err != nil { + return err + } + if got, exp := data, expData; !reflect.DeepEqual(got, exp) { + return fmt.Errorf("unexpected alert data written to log:\ngot\n%+v\nexp\n%+v\n", got, exp) + } + return nil + }, + }, + { + handlerAction: client.HandlerAction{ + Kind: "opsgenie", + Options: map[string]interface{}{ + "teams-list": []string{"A team", "B team"}, + "recipients-list": []string{"test_recipient1", "test_recipient2"}, + }, + }, + setup: func(c *server.Config, ha *client.HandlerAction) (context.Context, error) { + ts := opsgenietest.NewServer() + ctxt := context.WithValue(nil, "server", ts) + + c.OpsGenie.Enabled = true + c.OpsGenie.URL = ts.URL + c.OpsGenie.APIKey = "api_key" + return ctxt, nil + }, + result: func(ctxt context.Context) error { + ts := ctxt.Value("server").(*opsgenietest.Server) + ts.Close() + got := ts.Requests() + exp := []opsgenietest.Request{{ + URL: "/", + PostData: opsgenietest.PostData{ + ApiKey: "api_key", + Message: "message", + Entity: "id", + Alias: "id", + Note: "", + Details: map[string]interface{}{ + "Level": "CRITICAL", + "Monitoring Tool": "Kapacitor", + }, + Description: resultJSON, + Teams: []string{"A team", "B team"}, + Recipients: []string{"test_recipient1", "test_recipient2"}, + }, + }} + if !reflect.DeepEqual(exp, got) { + return fmt.Errorf("unexpected opsgenie request:\nexp\n%+v\ngot\n%+v\n", exp, got) + } + return nil + }, + }, + { + handlerAction: client.HandlerAction{ + Kind: "pagerduty", + Options: map[string]interface{}{ + "service-key": "service_key", + }, + }, + setup: func(c *server.Config, ha *client.HandlerAction) (context.Context, error) { + ts := pagerdutytest.NewServer() + ctxt := context.WithValue(nil, "server", ts) + + c.PagerDuty.Enabled = true + c.PagerDuty.URL = ts.URL + return ctxt, nil + }, + result: func(ctxt context.Context) error { + ts := ctxt.Value("server").(*pagerdutytest.Server) + kapacitorURL := ctxt.Value("kapacitorURL").(string) + ts.Close() + got := ts.Requests() + exp := []pagerdutytest.Request{{ + URL: "/", + PostData: pagerdutytest.PostData{ + ServiceKey: "service_key", + EventType: "trigger", + Description: "message", + Client: "kapacitor", + ClientURL: kapacitorURL, + Details: resultJSON, + }, + }} + if !reflect.DeepEqual(exp, got) { + return fmt.Errorf("unexpected pagerduty request:\nexp\n%+v\ngot\n%+v\n", exp, got) + } + return nil + }, + }, + { + handlerAction: client.HandlerAction{ + Kind: "post", + }, + setup: func(c *server.Config, ha *client.HandlerAction) (context.Context, error) { + ts := alerttest.NewPostServer() + + ha.Options = map[string]interface{}{"url": ts.URL} + + ctxt := context.WithValue(nil, "server", ts) + return ctxt, nil + }, + result: func(ctxt context.Context) error { + ts := ctxt.Value("server").(*alerttest.PostServer) + ts.Close() + exp := []alertservice.AlertData{alertData} + got := ts.Data() + if !reflect.DeepEqual(exp, got) { + return fmt.Errorf("unexpected post request:\nexp\n%+v\ngot\n%+v\n", exp, got) + } + return nil + }, + }, + { + handlerAction: client.HandlerAction{ + Kind: "sensu", + }, + setup: func(c *server.Config, ha *client.HandlerAction) (context.Context, error) { + ts, err := sensutest.NewServer() + if err != nil { + return nil, err + } + ctxt := context.WithValue(nil, "server", ts) + + c.Sensu.Enabled = true + c.Sensu.Addr = ts.Addr + c.Sensu.Source = "Kapacitor" + return ctxt, nil + }, + result: func(ctxt context.Context) error { + ts := ctxt.Value("server").(*sensutest.Server) + ts.Close() + exp := []sensutest.Request{{ + Source: "Kapacitor", + Output: "message", + Name: "id", + Status: 2, + }} + got := ts.Requests() + if !reflect.DeepEqual(exp, got) { + return fmt.Errorf("unexpected sensu request:\nexp\n%+v\ngot\n%+v\n", exp, got) + } + return nil + }, + }, + { + handlerAction: client.HandlerAction{ + Kind: "slack", + Options: map[string]interface{}{ + "channel": "#test", + }, + }, + setup: func(c *server.Config, ha *client.HandlerAction) (context.Context, error) { + ts := slacktest.NewServer() + ctxt := context.WithValue(nil, "server", ts) + + c.Slack.Enabled = true + c.Slack.URL = ts.URL + "/test/slack/url" + return ctxt, nil + }, + result: func(ctxt context.Context) error { + ts := ctxt.Value("server").(*slacktest.Server) + ts.Close() + got := ts.Requests() + exp := []slacktest.Request{{ + URL: "/test/slack/url", + PostData: slacktest.PostData{ + Channel: "#test", + Username: "kapacitor", + Text: "", + Attachments: []slacktest.Attachment{ + { + Fallback: "message", + Color: "danger", + Text: "message", + Mrkdwn_in: []string{"text"}, + }, + }, + }, + }} + if !reflect.DeepEqual(exp, got) { + return fmt.Errorf("unexpected slack request:\nexp\n%+v\ngot\n%+v\n", exp, got) + } + return nil + }, + }, + { + handlerAction: client.HandlerAction{ + Kind: "smtp", + Options: map[string]interface{}{ + "to": []string{"oncall@example.com", "backup@example.com"}, + }, + }, + setup: func(c *server.Config, ha *client.HandlerAction) (context.Context, error) { + ts, err := smtptest.NewServer() + if err != nil { + return nil, err + } + ctxt := context.WithValue(nil, "server", ts) + + c.SMTP.Enabled = true + c.SMTP.Host = ts.Host + c.SMTP.Port = ts.Port + c.SMTP.From = "test@example.com" + return ctxt, nil + }, + result: func(ctxt context.Context) error { + ts := ctxt.Value("server").(*smtptest.Server) + ts.Close() + + errors := ts.Errors() + if len(errors) != 0 { + return fmt.Errorf("multiple errors %d: %v", len(errors), errors) + } + + expMail := []*smtptest.Message{{ + Header: mail.Header{ + "Mime-Version": []string{"1.0"}, + "Content-Type": []string{"text/html; charset=UTF-8"}, + "Content-Transfer-Encoding": []string{"quoted-printable"}, + "To": []string{"oncall@example.com, backup@example.com"}, + "From": []string{"test@example.com"}, + "Subject": []string{"message"}, + }, + Body: "details\n", + }} + msgs := ts.SentMessages() + if got, exp := len(msgs), len(expMail); got != exp { + return fmt.Errorf("unexpected number of messages sent: got %d exp %d", got, exp) + } + for i, exp := range expMail { + got := msgs[i] + if err := exp.Compare(got); err != nil { + return fmt.Errorf("unexpected message %d: %v", i, err) + } + } + return nil + }, + }, + { + handlerAction: client.HandlerAction{ + Kind: "talk", + }, + setup: func(c *server.Config, ha *client.HandlerAction) (context.Context, error) { + ts := talktest.NewServer() + ctxt := context.WithValue(nil, "server", ts) + + c.Talk.Enabled = true + c.Talk.URL = ts.URL + c.Talk.AuthorName = "Kapacitor" + return ctxt, nil + }, + result: func(ctxt context.Context) error { + ts := ctxt.Value("server").(*talktest.Server) + ts.Close() + got := ts.Requests() + exp := []talktest.Request{{ + URL: "/", + PostData: talktest.PostData{ + AuthorName: "Kapacitor", + Text: "message", + Title: "id", + }, + }} + if !reflect.DeepEqual(exp, got) { + return fmt.Errorf("unexpected talk request:\nexp\n%+v\ngot\n%+v\n", exp, got) + } + return nil + }, + }, + { + handlerAction: client.HandlerAction{ + Kind: "tcp", + }, + setup: func(c *server.Config, ha *client.HandlerAction) (context.Context, error) { + ts, err := alerttest.NewTCPServer() + if err != nil { + return nil, err + } + + ha.Options = map[string]interface{}{"address": ts.Addr} + + ctxt := context.WithValue(nil, "server", ts) + return ctxt, nil + }, + result: func(ctxt context.Context) error { + ts := ctxt.Value("server").(*alerttest.TCPServer) + ts.Close() + exp := []alertservice.AlertData{alertData} + got := ts.Data() + if !reflect.DeepEqual(exp, got) { + return fmt.Errorf("unexpected tcp request:\nexp\n%+v\ngot\n%+v\n", exp, got) + } + return nil + }, + }, + { + handlerAction: client.HandlerAction{ + Kind: "telegram", + Options: map[string]interface{}{ + "chat-id": "chat id", + "disable-web-page-preview": true, + }, + }, + setup: func(c *server.Config, ha *client.HandlerAction) (context.Context, error) { + ts := telegramtest.NewServer() + ctxt := context.WithValue(nil, "server", ts) + + c.Telegram.Enabled = true + c.Telegram.URL = ts.URL + "/bot" + c.Telegram.Token = "TOKEN:AUTH" + return ctxt, nil + }, + result: func(ctxt context.Context) error { + ts := ctxt.Value("server").(*telegramtest.Server) + ts.Close() + got := ts.Requests() + exp := []telegramtest.Request{{ + URL: "/botTOKEN:AUTH/sendMessage", + PostData: telegramtest.PostData{ + ChatId: "chat id", + Text: "message", + ParseMode: "", + DisableWebPagePreview: true, + DisableNotification: false, + }, + }} + if !reflect.DeepEqual(exp, got) { + return fmt.Errorf("unexpected telegram request:\nexp\n%+v\ngot\n%+v\n", exp, got) + } + return nil + }, + }, + { + handlerAction: client.HandlerAction{ + Kind: "victorops", + Options: map[string]interface{}{ + "routing-key": "key", + }, + }, + setup: func(c *server.Config, ha *client.HandlerAction) (context.Context, error) { + ts := victoropstest.NewServer() + ctxt := context.WithValue(nil, "server", ts) + + c.VictorOps.Enabled = true + c.VictorOps.URL = ts.URL + c.VictorOps.APIKey = "api_key" + return ctxt, nil + }, + result: func(ctxt context.Context) error { + ts := ctxt.Value("server").(*victoropstest.Server) + ts.Close() + got := ts.Requests() + exp := []victoropstest.Request{{ + URL: "/api_key/key", + PostData: victoropstest.PostData{ + MessageType: "CRITICAL", + EntityID: "id", + StateMessage: "message", + Timestamp: 0, + MonitoringTool: "kapacitor", + Data: resultJSON, + }, + }} + if !reflect.DeepEqual(exp, got) { + return fmt.Errorf("unexpected victorops request:\nexp\n%+v\ngot\n%+v\n", exp, got) + } + return nil + }, + }, + } + for _, tc := range testCases { + t.Run(tc.handlerAction.Kind, func(t *testing.T) { + kind := tc.handlerAction.Kind + // Create default config + c := NewConfig() + var ctxt context.Context + if tc.setup != nil { + var err error + ctxt, err = tc.setup(c, &tc.handlerAction) + if err != nil { + t.Fatal(err) + } + } + s := OpenServer(c) + cli := Client(s) + closed := false + defer func() { + if !closed { + s.Close() + } + }() + ctxt = context.WithValue(ctxt, "kapacitorURL", s.URL()) + + if _, err := cli.CreateHandler(client.HandlerOptions{ + ID: "testAlertHandlers", + Topics: []string{"test"}, + Actions: []client.HandlerAction{ + tc.handlerAction, + }, + }); err != nil { + t.Fatalf("%s: %v", kind, err) + } + + tick := ` +stream + |from() + .measurement('alert') + |alert() + .topic('test') + .id('id') + .message('message') + .details('details') + .crit(lambda: TRUE) +` + + if _, err := cli.CreateTask(client.CreateTaskOptions{ + ID: "testAlertHandlers", + Type: client.StreamTask, + DBRPs: []client.DBRP{{ + Database: "mydb", + RetentionPolicy: "myrp", + }}, + TICKscript: tick, + Status: client.Enabled, + }); err != nil { + t.Fatalf("%s: %v", kind, err) + } + + point := "alert value=1 0000000000" + v := url.Values{} + v.Add("precision", "s") + s.MustWrite("mydb", "myrp", point, v) + + // Close the entire server to ensure all data is processed + s.Close() + closed = true + + if err := tc.result(ctxt); err != nil { + t.Errorf("%s: %v", kind, err) + } + }) + } +} + +func TestServer_AlertTopic_PersistedState(t *testing.T) { + // Setup test TCP server + ts, err := alerttest.NewTCPServer() + if err != nil { + t.Fatal(err) + } + defer ts.Close() + + // Create default config + c := NewConfig() + s := OpenServer(c) + cli := Client(s) + defer s.Close() + + if _, err := cli.CreateHandler(client.HandlerOptions{ + ID: "testAlertHandler", + Topics: []string{"test"}, + Actions: []client.HandlerAction{{ + Kind: "tcp", + Options: map[string]interface{}{"address": ts.Addr}, + }}, + }); err != nil { + t.Fatal(err) + } + + tick := ` +stream + |from() + .measurement('alert') + |alert() + .topic('test') + .id('id') + .message('message') + .details('details') + .warn(lambda: TRUE) +` + + if _, err := cli.CreateTask(client.CreateTaskOptions{ + ID: "testAlertHandlers", + Type: client.StreamTask, + DBRPs: []client.DBRP{{ + Database: "mydb", + RetentionPolicy: "myrp", + }}, + TICKscript: tick, + Status: client.Enabled, + }); err != nil { + t.Fatal(err) + } + + point := "alert value=1 0000000000" + v := url.Values{} + v.Add("precision", "s") + s.MustWrite("mydb", "myrp", point, v) + + // Restart the server + s.Restart() + + l := cli.TopicEventsLink("test") + expTopicEvents := client.TopicEvents{ + Link: l, + Topic: "test", + Events: []client.TopicEvent{{ + Link: client.Link{Relation: client.Self, Href: "/kapacitor/v1preview/alerts/topics/test/events/id"}, + ID: "id", + State: client.EventState{ + Message: "message", + Details: "details", + Time: time.Date(1970, 1, 1, 0, 0, 0, 0, time.UTC), + Duration: 0, + Level: "WARNING", + }, + }}, + } + + te, err := cli.ListTopicEvents(l, nil) + if err != nil { + t.Fatal(err) + } + if !reflect.DeepEqual(te, expTopicEvents) { + t.Errorf("unexpected topic events:\ngot\n%+v\nexp\n%+v\n", te, expTopicEvents) + } + event, err := cli.TopicEvent(expTopicEvents.Events[0].Link) + if err != nil { + t.Fatal(err) + } + if !reflect.DeepEqual(event, expTopicEvents.Events[0]) { + t.Errorf("unexpected topic event:\ngot\n%+v\nexp\n%+v\n", event, expTopicEvents.Events[0]) + } + + te, err = cli.ListTopicEvents(l, &client.ListTopicEventsOptions{ + MinLevel: "CRITICAL", + }) + if err != nil { + t.Fatal(err) + } + expTopicEvents.Events = expTopicEvents.Events[0:0] + if !reflect.DeepEqual(te, expTopicEvents) { + t.Errorf("unexpected topic events with minLevel:\ngot\n%+v\nexp\n%+v\n", te, expTopicEvents) + } + + l = cli.TopicLink("test") + if err := cli.DeleteTopic(l); err != nil { + t.Fatal(err) + } + te, err = cli.ListTopicEvents(l, nil) + if err == nil { + t.Fatal("expected error for deleted topic") + } +} + +func TestServer_AlertListHandlers(t *testing.T) { + // Setup test TCP server + ts, err := alerttest.NewTCPServer() + if err != nil { + t.Fatal(err) + } + defer ts.Close() + + // Create default config + c := NewConfig() + s := OpenServer(c) + cli := Client(s) + defer s.Close() + + topics := []string{"test"} + actions := []client.HandlerAction{{ + Kind: "tcp", + Options: map[string]interface{}{"address": ts.Addr}, + }} + + // Number of handlers to create + n := 3 + for i := 0; i < n; i++ { + id := fmt.Sprintf("handler%d", i) + if _, err := cli.CreateHandler(client.HandlerOptions{ + ID: id, + Topics: topics, + Actions: actions, + }); err != nil { + t.Fatal(err) + } + } + + expHandlers := client.Handlers{ + Link: client.Link{Relation: client.Self, Href: "/kapacitor/v1preview/alerts/handlers?pattern="}, + Handlers: []client.Handler{ + { + Link: client.Link{Relation: client.Self, Href: "/kapacitor/v1preview/alerts/handlers/handler0"}, + ID: "handler0", + Topics: topics, + Actions: actions, + }, + { + Link: client.Link{Relation: client.Self, Href: "/kapacitor/v1preview/alerts/handlers/handler1"}, + ID: "handler1", + Topics: topics, + Actions: actions, + }, + { + Link: client.Link{Relation: client.Self, Href: "/kapacitor/v1preview/alerts/handlers/handler2"}, + ID: "handler2", + Topics: topics, + Actions: actions, + }, + }, + } + + handlers, err := cli.ListHandlers(nil) + if err != nil { + t.Fatal(err) + } + if !reflect.DeepEqual(handlers, expHandlers) { + t.Errorf("unexpected handlers:\ngot\n%+v\nexp\n%+v\n", handlers, expHandlers) + } + + // Restart the server + s.Restart() + + // Check again + handlers, err = cli.ListHandlers(nil) + if err != nil { + t.Fatal(err) + } + if !reflect.DeepEqual(handlers, expHandlers) { + t.Errorf("unexpected handlers after restart:\ngot\n%+v\nexp\n%+v\n", handlers, expHandlers) + } + + var exp client.Handlers + + // Pattern = * + handlers, err = cli.ListHandlers(&client.ListHandlersOptions{ + Pattern: "*", + }) + if err != nil { + t.Fatal(err) + } + exp = expHandlers + exp.Link.Href = "/kapacitor/v1preview/alerts/handlers?pattern=%2A" + if !reflect.DeepEqual(handlers, exp) { + t.Errorf("unexpected handlers with pattern \"*\":\ngot\n%+v\nexp\n%+v\n", handlers, exp) + } + + // Pattern = handler* + handlers, err = cli.ListHandlers(&client.ListHandlersOptions{ + Pattern: "handler*", + }) + if err != nil { + t.Fatal(err) + } + exp = expHandlers + exp.Link.Href = "/kapacitor/v1preview/alerts/handlers?pattern=handler%2A" + if !reflect.DeepEqual(handlers, exp) { + t.Errorf("unexpected handlers with pattern \"test\":\ngot\n%+v\nexp\n%+v\n", handlers, exp) + } + + // Pattern = handler0 + handlers, err = cli.ListHandlers(&client.ListHandlersOptions{ + Pattern: "handler0", + }) + if err != nil { + t.Fatal(err) + } + exp = expHandlers + exp.Link.Href = "/kapacitor/v1preview/alerts/handlers?pattern=handler0" + exp.Handlers = expHandlers.Handlers[0:1] + if !reflect.DeepEqual(handlers, exp) { + t.Errorf("unexpected handlers with pattern \"test\":\ngot\n%+v\nexp\n%+v\n", handlers, exp) + } + + // List handlers of test topic + l := cli.TopicHandlersLink("test") + topicHandlers, err := cli.ListTopicHandlers(l) + expTopicHandlers := client.TopicHandlers{ + Link: client.Link{Relation: client.Self, Href: "/kapacitor/v1preview/alerts/topics/test/handlers"}, + Topic: "test", + Handlers: expHandlers.Handlers, + } + if !reflect.DeepEqual(topicHandlers, expTopicHandlers) { + t.Errorf("unexpected topic handlers:\ngot\n%+v\nexp\n%+v\n", topicHandlers, expTopicHandlers) + } +} + +func TestServer_AlertListTopics(t *testing.T) { + // Setup test TCP server + ts, err := alerttest.NewTCPServer() + if err != nil { + t.Fatal(err) + } + defer ts.Close() + + // Create default config + c := NewConfig() + s := OpenServer(c) + cli := Client(s) + defer s.Close() + + if _, err := cli.CreateHandler(client.HandlerOptions{ + ID: "testAlertHandler", + Topics: []string{"test", "system", "misc"}, + Actions: []client.HandlerAction{{ + Kind: "tcp", + Options: map[string]interface{}{"address": ts.Addr}, + }}, + }); err != nil { + t.Fatal(err) + } + + expTopics := client.Topics{ + Link: client.Link{Relation: client.Self, Href: "/kapacitor/v1preview/alerts/topics?min-level=OK&pattern="}, + Topics: []client.Topic{ + { + Link: client.Link{Relation: client.Self, Href: "/kapacitor/v1preview/alerts/topics/misc"}, + ID: "misc", + Level: "OK", + EventsLink: client.Link{Relation: "events", Href: "/kapacitor/v1preview/alerts/topics/misc/events"}, + HandlersLink: client.Link{Relation: "handlers", Href: "/kapacitor/v1preview/alerts/topics/misc/handlers"}, + }, + { + Link: client.Link{Relation: client.Self, Href: "/kapacitor/v1preview/alerts/topics/system"}, + ID: "system", + Level: "OK", + EventsLink: client.Link{Relation: "events", Href: "/kapacitor/v1preview/alerts/topics/system/events"}, + HandlersLink: client.Link{Relation: "handlers", Href: "/kapacitor/v1preview/alerts/topics/system/handlers"}, + }, + { + Link: client.Link{Relation: client.Self, Href: "/kapacitor/v1preview/alerts/topics/test"}, + ID: "test", + Level: "OK", + EventsLink: client.Link{Relation: "events", Href: "/kapacitor/v1preview/alerts/topics/test/events"}, + HandlersLink: client.Link{Relation: "handlers", Href: "/kapacitor/v1preview/alerts/topics/test/handlers"}, + }, + }, + } + topics, err := cli.ListTopics(nil) + if err != nil { + t.Fatal(err) + } + if !reflect.DeepEqual(topics, expTopics) { + t.Errorf("unexpected topics:\ngot\n%+v\nexp\n%+v\n", topics, expTopics) + } + + tick := ` +stream + |from() + .measurement('alert') + |alert() + .topic('test') + .id('id') + .message('message') + .details('details') + .crit(lambda: TRUE) +` + + if _, err := cli.CreateTask(client.CreateTaskOptions{ + ID: "testAlertHandlers", + Type: client.StreamTask, + DBRPs: []client.DBRP{{ + Database: "mydb", + RetentionPolicy: "myrp", + }}, + TICKscript: tick, + Status: client.Enabled, + }); err != nil { + t.Fatal(err) + } + + point := "alert value=1 0000000000" + v := url.Values{} + v.Add("precision", "s") + s.MustWrite("mydb", "myrp", point, v) + + // Restart the server + s.Restart() + + // Update expected topics since we triggered an event. + expTopics.Topics[2].Level = "CRITICAL" + + // Check again + topics, err = cli.ListTopics(nil) + if err != nil { + t.Fatal(err) + } + if !reflect.DeepEqual(topics, expTopics) { + t.Errorf("unexpected topics after restart:\ngot\n%+v\nexp\n%+v\n", topics, expTopics) + } + + var exp client.Topics + + // Pattern = * + topics, err = cli.ListTopics(&client.ListTopicsOptions{ + Pattern: "*", + }) + if err != nil { + t.Fatal(err) + } + exp = expTopics + exp.Link.Href = "/kapacitor/v1preview/alerts/topics?min-level=OK&pattern=%2A" + if !reflect.DeepEqual(topics, exp) { + t.Errorf("unexpected topics with pattern \"*\":\ngot\n%+v\nexp\n%+v\n", topics, exp) + } + + // Pattern = test + topics, err = cli.ListTopics(&client.ListTopicsOptions{ + Pattern: "test", + }) + if err != nil { + t.Fatal(err) + } + exp = expTopics + exp.Link.Href = "/kapacitor/v1preview/alerts/topics?min-level=OK&pattern=test" + exp.Topics = expTopics.Topics[2:] + if !reflect.DeepEqual(topics, exp) { + t.Errorf("unexpected topics with pattern \"test\":\ngot\n%+v\nexp\n%+v\n", topics, exp) + } + + // MinLevel = INFO + topics, err = cli.ListTopics(&client.ListTopicsOptions{ + MinLevel: "INFO", + }) + if err != nil { + t.Fatal(err) + } + exp = expTopics + exp.Link.Href = "/kapacitor/v1preview/alerts/topics?min-level=INFO&pattern=" + exp.Topics = expTopics.Topics[2:] + if !reflect.DeepEqual(topics, exp) { + t.Errorf("unexpected topics min level \"info\":\ngot\n%+v\nexp\n%+v\n", topics, exp) + } +} + +func TestServer_AlertHandler_MultipleActions(t *testing.T) { + resultJSON := `{"Series":[{"name":"alert","columns":["time","value"],"values":[["1970-01-01T00:00:00Z",1]]}],"Messages":null,"Err":null}` + + // Create default config + c := NewConfig() + + // Configure slack + slack := slacktest.NewServer() + c.Slack.Enabled = true + c.Slack.URL = slack.URL + "/test/slack/url" + + // Configure victorops + vo := victoropstest.NewServer() + c.VictorOps.Enabled = true + c.VictorOps.URL = vo.URL + c.VictorOps.APIKey = "api_key" + + s := OpenServer(c) + cli := Client(s) + closed := false + defer func() { + if !closed { + s.Close() + } + }() + + if _, err := cli.CreateHandler(client.HandlerOptions{ + ID: "testAlertHandlers", + Topics: []string{"test"}, + Actions: []client.HandlerAction{ + { + Kind: "victorops", + Options: map[string]interface{}{ + "routing-key": "key", + }, + }, + { + Kind: "slack", + Options: map[string]interface{}{ + "channel": "#test", + }, + }, + }, + }); err != nil { + t.Fatal(err) + } + + tick := ` +stream + |from() + .measurement('alert') + |alert() + .topic('test') + .id('id') + .message('message') + .details('details') + .crit(lambda: TRUE) +` + + if _, err := cli.CreateTask(client.CreateTaskOptions{ + ID: "testAlertHandlers", + Type: client.StreamTask, + DBRPs: []client.DBRP{{ + Database: "mydb", + RetentionPolicy: "myrp", + }}, + TICKscript: tick, + Status: client.Enabled, + }); err != nil { + t.Fatal(err) + } + + point := "alert value=1 0000000000" + v := url.Values{} + v.Add("precision", "s") + s.MustWrite("mydb", "myrp", point, v) + + // Close the entire server to ensure all data is processed + s.Close() + closed = true + + // Validate slack + { + slack.Close() + got := slack.Requests() + exp := []slacktest.Request{{ + URL: "/test/slack/url", + PostData: slacktest.PostData{ + Channel: "#test", + Username: "kapacitor", + Text: "", + Attachments: []slacktest.Attachment{ + { + Fallback: "message", + Color: "danger", + Text: "message", + Mrkdwn_in: []string{"text"}, + }, + }, + }, + }} + if !reflect.DeepEqual(exp, got) { + t.Errorf("unexpected slack request:\nexp\n%+v\ngot\n%+v\n", exp, got) + } + } + // Validate victorops + { + vo.Close() + got := vo.Requests() + exp := []victoropstest.Request{{ + URL: "/api_key/key", + PostData: victoropstest.PostData{ + MessageType: "CRITICAL", + EntityID: "id", + StateMessage: "message", + Timestamp: 0, + MonitoringTool: "kapacitor", + Data: resultJSON, + }, + }} + if !reflect.DeepEqual(exp, got) { + t.Errorf("unexpected victorops request:\nexp\n%+v\ngot\n%+v\n", exp, got) + } + } +} diff --git a/services/alert/alerttest/alerttest.go b/services/alert/alerttest/alerttest.go new file mode 100644 index 000000000..353c7dcb4 --- /dev/null +++ b/services/alert/alerttest/alerttest.go @@ -0,0 +1,160 @@ +package alerttest + +import ( + "encoding/json" + "net" + "net/http" + "net/http/httptest" + "os" + "sync" + + "github.com/influxdata/kapacitor/command" + "github.com/influxdata/kapacitor/command/commandtest" + alertservice "github.com/influxdata/kapacitor/services/alert" +) + +type Log struct { + path string +} + +func NewLog(p string) *Log { + return &Log{ + path: p, + } +} + +func (l *Log) Data() ([]alertservice.AlertData, error) { + f, err := os.Open(l.path) + if err != nil { + return nil, err + } + defer f.Close() + dec := json.NewDecoder(f) + var data []alertservice.AlertData + for dec.More() { + ad := alertservice.AlertData{} + err := dec.Decode(&ad) + if err != nil { + return nil, err + } + data = append(data, ad) + } + return data, nil +} + +func (l *Log) Mode() (os.FileMode, error) { + stat, err := os.Stat(l.path) + if err != nil { + return 0, err + } + return stat.Mode(), nil +} + +type Exec struct { + ct *commandtest.Commander + Commander command.Commander +} + +func NewExec() *Exec { + ct := new(commandtest.Commander) + return &Exec{ + ct: ct, + Commander: ct, + } +} + +func (e *Exec) Commands() []*commandtest.Command { + return e.ct.Commands() +} + +type TCPServer struct { + Addr string + + l *net.TCPListener + + data []alertservice.AlertData + + wg sync.WaitGroup + closed bool +} + +func NewTCPServer() (*TCPServer, error) { + addr, err := net.ResolveTCPAddr("tcp", "localhost:0") + if err != nil { + return nil, err + } + l, err := net.ListenTCP("tcp", addr) + if err != nil { + return nil, err + } + s := &TCPServer{ + l: l, + Addr: l.Addr().String(), + } + s.wg.Add(1) + go func() { + defer s.wg.Done() + s.run() + }() + return s, nil +} + +func (s *TCPServer) Data() []alertservice.AlertData { + return s.data +} + +func (s *TCPServer) Close() { + if s.closed { + return + } + s.closed = true + s.l.Close() + s.wg.Wait() +} + +func (s *TCPServer) run() { + for { + conn, err := s.l.Accept() + if err != nil { + return + } + func() { + defer conn.Close() + ad := alertservice.AlertData{} + json.NewDecoder(conn).Decode(&ad) + s.data = append(s.data, ad) + }() + } +} + +type PostServer struct { + ts *httptest.Server + URL string + data []alertservice.AlertData + closed bool +} + +func NewPostServer() *PostServer { + s := new(PostServer) + ts := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { + ad := alertservice.AlertData{} + dec := json.NewDecoder(r.Body) + dec.Decode(&ad) + s.data = append(s.data, ad) + })) + s.ts = ts + s.URL = ts.URL + return s +} + +func (s *PostServer) Data() []alertservice.AlertData { + return s.data +} + +func (s *PostServer) Close() { + if s.closed { + return + } + s.closed = true + s.ts.Close() +} diff --git a/services/alert/config.go b/services/alert/config.go new file mode 100644 index 000000000..98a0de75f --- /dev/null +++ b/services/alert/config.go @@ -0,0 +1,8 @@ +package alert + +type Config struct { +} + +func NewConfig() Config { + return Config{} +} diff --git a/services/alert/dao.go b/services/alert/dao.go new file mode 100644 index 000000000..f6ebe67ce --- /dev/null +++ b/services/alert/dao.go @@ -0,0 +1,282 @@ +package alert + +import ( + "encoding/json" + "errors" + "fmt" + "regexp" + "time" + + "github.com/influxdata/kapacitor/alert" + "github.com/influxdata/kapacitor/services/storage" +) + +var ( + ErrHandlerSpecExists = errors.New("handler spec already exists") + ErrNoHandlerSpecExists = errors.New("no handler spec exists") +) + +// Data access object for HandlerSpec data. +type HandlerSpecDAO interface { + // Retrieve a handler + Get(id string) (HandlerSpec, error) + + // Create a handler. + // ErrHandlerSpecExists is returned if a handler already exists with the same ID. + Create(h HandlerSpec) error + + // Replace an existing handler. + // ErrNoHandlerSpecExists is returned if the handler does not exist. + Replace(h HandlerSpec) error + + // Delete a handler. + // It is not an error to delete an non-existent handler. + Delete(id string) error + + // List handlers matching a pattern. + // The pattern is shell/glob matching see https://golang.org/pkg/path/#Match + // Offset and limit are pagination bounds. Offset is inclusive starting at index 0. + // More results may exist while the number of returned items is equal to limit. + List(pattern string, offset, limit int) ([]HandlerSpec, error) +} + +//-------------------------------------------------------------------- +// The following structures are stored in a database via gob encoding. +// Changes to the structures could break existing data. +// +// Many of these structures are exact copies of structures found elsewhere, +// this is intentional so that all structures stored in the database are +// defined here and nowhere else. So as to not accidentally change +// the gob serialization format in incompatible ways. + +// version is the current version of the HandlerSpec structure. +const handlerSpecVersion = 1 + +// HandlerSpec provides all the necessary information to create a handler. +type HandlerSpec struct { + ID string `json:"id"` + Topics []string `json:"topics"` + Actions []HandlerActionSpec `json:"actions"` +} + +var validHandlerID = regexp.MustCompile(`^[-\._\p{L}0-9]+$`) +var validTopicID = regexp.MustCompile(`^[-:\._\p{L}0-9]+$`) + +func (h HandlerSpec) Validate() error { + if !validHandlerID.MatchString(h.ID) { + return fmt.Errorf("handler ID must contain only letters, numbers, '-', '.' and '_'. %q", h.ID) + } + for _, t := range h.Topics { + if !validTopicID.MatchString(t) { + return fmt.Errorf("topic must contain only letters, numbers, '-', '.', ':' and '_'. %q", t) + } + } + if len(h.Actions) == 0 { + return errors.New("must provide at least one action") + } + return nil +} + +// HandlerActionSpec defines an action an handler can take. +type HandlerActionSpec struct { + Kind string `json:"kind"` + Options map[string]interface{} `json:"options"` +} + +func (h HandlerSpec) ObjectID() string { + return h.ID +} + +func (h HandlerSpec) MarshalBinary() ([]byte, error) { + return storage.VersionJSONEncode(handlerSpecVersion, h) +} + +func (h *HandlerSpec) UnmarshalBinary(data []byte) error { + return storage.VersionJSONDecode(data, func(version int, dec *json.Decoder) error { + return dec.Decode(h) + }) +} + +// Key/Value store based implementation of the HandlerSpecDAO +type handlerSpecKV struct { + store *storage.IndexedStore +} + +func newHandlerSpecKV(store storage.Interface) (*handlerSpecKV, error) { + c := storage.DefaultIndexedStoreConfig("handlers", func() storage.BinaryObject { + return new(HandlerSpec) + }) + istore, err := storage.NewIndexedStore(store, c) + if err != nil { + return nil, err + } + return &handlerSpecKV{ + store: istore, + }, nil +} + +func (kv *handlerSpecKV) error(err error) error { + if err == storage.ErrObjectExists { + return ErrHandlerSpecExists + } else if err == storage.ErrNoObjectExists { + return ErrNoHandlerSpecExists + } + return err +} + +func (kv *handlerSpecKV) Get(id string) (HandlerSpec, error) { + o, err := kv.store.Get(id) + if err != nil { + return HandlerSpec{}, kv.error(err) + } + h, ok := o.(*HandlerSpec) + if !ok { + return HandlerSpec{}, storage.ImpossibleTypeErr(h, o) + } + return *h, nil +} + +func (kv *handlerSpecKV) Create(h HandlerSpec) error { + return kv.store.Create(&h) +} + +func (kv *handlerSpecKV) Replace(h HandlerSpec) error { + return kv.store.Replace(&h) +} + +func (kv *handlerSpecKV) Delete(id string) error { + return kv.store.Delete(id) +} + +func (kv *handlerSpecKV) List(pattern string, offset, limit int) ([]HandlerSpec, error) { + objects, err := kv.store.List(storage.DefaultIDIndex, pattern, offset, limit) + if err != nil { + return nil, err + } + specs := make([]HandlerSpec, len(objects)) + for i, o := range objects { + h, ok := o.(*HandlerSpec) + if !ok { + return nil, storage.ImpossibleTypeErr(h, o) + } + specs[i] = *h + } + return specs, nil +} + +var ( + ErrNoTopicStateExists = errors.New("no topic state exists") +) + +// Data access object for TopicState data. +type TopicStateDAO interface { + // Retrieve a handler + Get(id string) (TopicState, error) + + // Put a topic state, replaces any existing state. + Put(h TopicState) error + + // Delete a handler. + // It is not an error to delete an non-existent handler. + Delete(id string) error + + // List handlers matching a pattern. + // The pattern is shell/glob matching see https://golang.org/pkg/path/#Match + // Offset and limit are pagination bounds. Offset is inclusive starting at index 0. + // More results may exist while the number of returned items is equal to limit. + List(pattern string, offset, limit int) ([]TopicState, error) +} + +const topicStateVersion = 1 + +type TopicState struct { + Topic string `json:"topic"` + EventStates map[string]EventState `json:"event-states"` +} + +type EventState struct { + Message string `json:"message"` + Details string `json:"details"` + Time time.Time `json:"time"` + Duration time.Duration `json:"duration"` + Level alert.Level `json:"level"` +} + +func (t TopicState) ObjectID() string { + return t.Topic +} + +func (t TopicState) MarshalBinary() ([]byte, error) { + return storage.VersionJSONEncode(topicStateVersion, t) +} + +func (t *TopicState) UnmarshalBinary(data []byte) error { + return storage.VersionJSONDecode(data, func(version int, dec *json.Decoder) error { + return dec.Decode(&t) + }) +} + +// Key/Value store based implementation of the TopicStateDAO +type topicStateKV struct { + store *storage.IndexedStore +} + +func newTopicStateKV(store storage.Interface) (*topicStateKV, error) { + c := storage.DefaultIndexedStoreConfig("topics", func() storage.BinaryObject { + return new(TopicState) + }) + istore, err := storage.NewIndexedStore(store, c) + if err != nil { + return nil, err + } + return &topicStateKV{ + store: istore, + }, nil +} + +func (kv *topicStateKV) error(err error) error { + if err == storage.ErrNoObjectExists { + return ErrNoTopicStateExists + } + return err +} + +func (kv *topicStateKV) Get(id string) (TopicState, error) { + o, err := kv.store.Get(id) + if err != nil { + return TopicState{}, kv.error(err) + } + t, ok := o.(*TopicState) + if !ok { + return TopicState{}, storage.ImpossibleTypeErr(t, o) + } + return *t, nil +} + +func (kv *topicStateKV) Put(t TopicState) error { + return kv.store.Put(&t) +} + +func (kv *topicStateKV) Replace(t TopicState) error { + return kv.store.Replace(&t) +} + +func (kv *topicStateKV) Delete(id string) error { + return kv.store.Delete(id) +} + +func (kv *topicStateKV) List(pattern string, offset, limit int) ([]TopicState, error) { + objects, err := kv.store.List(storage.DefaultIDIndex, pattern, offset, limit) + if err != nil { + return nil, err + } + specs := make([]TopicState, len(objects)) + for i, o := range objects { + t, ok := o.(*TopicState) + if !ok { + return nil, storage.ImpossibleTypeErr(t, o) + } + specs[i] = *t + } + return specs, nil +} diff --git a/services/alert/doc.go b/services/alert/doc.go new file mode 100644 index 000000000..5bc1297c7 --- /dev/null +++ b/services/alert/doc.go @@ -0,0 +1,19 @@ +/* + Alert provides an implementation of the HTTP API for managing alert topics, handlers and events. + + Responsibilities of this package include: + + * Providing and HTTP API for the management of handlers + * Storing handler definitions + * Providing implementations of several simple handlers + * Mapping external handler implementations to handler definitions + + The last item needs some more clarification. + Handlers can be implemented in external packages. + In order for the HTTP API to consume those implementations + a mapping needs to be defined from the HandlerAction definition to + the expected configuration of the handler implementation. + This package provides that mapping. + +*/ +package alert diff --git a/services/alert/handlers.go b/services/alert/handlers.go new file mode 100644 index 000000000..c1698847e --- /dev/null +++ b/services/alert/handlers.go @@ -0,0 +1,319 @@ +package alert + +import ( + "bytes" + "encoding/json" + "fmt" + "log" + "net" + "net/http" + "os" + "path/filepath" + "strings" + "sync" + "time" + + "github.com/influxdata/influxdb/influxql" + "github.com/influxdata/kapacitor/alert" + "github.com/influxdata/kapacitor/bufpool" + "github.com/influxdata/kapacitor/command" +) + +// AlertData is a structure that contains relevant data about an alert event. +// The structure is intended to be JSON encoded, providing a consistent data format. +type AlertData struct { + ID string `json:"id"` + Message string `json:"message"` + Details string `json:"details"` + Time time.Time `json:"time"` + Duration time.Duration `json:"duration"` + Level alert.Level `json:"level"` + Data influxql.Result `json:"data"` +} + +func alertDataFromEvent(event alert.Event) AlertData { + return AlertData{ + ID: event.State.ID, + Message: event.State.Message, + Details: event.State.Details, + Time: event.State.Time, + Duration: event.State.Duration, + Level: event.State.Level, + Data: event.Data.Result, + } +} + +// Default log mode for file +const defaultLogFileMode = 0600 + +type LogHandlerConfig struct { + Path string `mapstructure:"path"` + Mode os.FileMode `mapstructure:"mode"` +} + +func (c LogHandlerConfig) Validate() error { + if c.Mode.Perm()&0200 == 0 { + return fmt.Errorf("invalid file mode %v, must be user writable", c.Mode) + } + if !filepath.IsAbs(c.Path) { + return fmt.Errorf("log path must be absolute: %s is not absolute", c.Path) + } + return nil +} + +type logHandler struct { + logpath string + mode os.FileMode + logger *log.Logger +} + +func DefaultLogHandlerConfig() LogHandlerConfig { + return LogHandlerConfig{ + Mode: defaultLogFileMode, + } +} + +func NewLogHandler(c LogHandlerConfig, l *log.Logger) (alert.Handler, error) { + if err := c.Validate(); err != nil { + return nil, err + } + return &logHandler{ + logpath: c.Path, + mode: c.Mode, + logger: l, + }, nil +} + +func (h *logHandler) Handle(event alert.Event) { + ad := alertDataFromEvent(event) + + f, err := os.OpenFile(h.logpath, os.O_WRONLY|os.O_APPEND|os.O_CREATE, h.mode) + if err != nil { + h.logger.Printf("E! failed to open file %s for alert logging: %v", h.logpath, err) + return + } + defer f.Close() + + err = json.NewEncoder(f).Encode(ad) + if err != nil { + h.logger.Printf("E! failed to marshal alert data json: %v", err) + } +} + +type ExecHandlerConfig struct { + Prog string `mapstructure:"prog"` + Args []string `mapstructure:"args"` + Commander command.Commander `mapstructure:"-"` +} + +type execHandler struct { + bp *bufpool.Pool + s command.Spec + commander command.Commander + logger *log.Logger +} + +func NewExecHandler(c ExecHandlerConfig, l *log.Logger) alert.Handler { + s := command.Spec{ + Prog: c.Prog, + Args: c.Args, + } + return &execHandler{ + bp: bufpool.New(), + s: s, + commander: c.Commander, + logger: l, + } +} + +func (h *execHandler) Handle(event alert.Event) { + buf := h.bp.Get() + defer h.bp.Put(buf) + ad := alertDataFromEvent(event) + + err := json.NewEncoder(buf).Encode(ad) + if err != nil { + h.logger.Printf("E! failed to marshal alert data json: %v", err) + return + } + + cmd := h.commander.NewCommand(h.s) + cmd.Stdin(buf) + var out bytes.Buffer + cmd.Stdout(&out) + cmd.Stderr(&out) + err = cmd.Start() + if err != nil { + h.logger.Printf("E! exec command failed: Output: %s: %v", out.String(), err) + return + } + err = cmd.Wait() + if err != nil { + h.logger.Printf("E! exec command failed: Output: %s: %v", out.String(), err) + return + } +} + +type TCPHandlerConfig struct { + Address string `mapstructure:"address"` +} + +type tcpHandler struct { + bp *bufpool.Pool + addr string + logger *log.Logger +} + +func NewTCPHandler(c TCPHandlerConfig, l *log.Logger) alert.Handler { + return &tcpHandler{ + bp: bufpool.New(), + addr: c.Address, + logger: l, + } +} + +func (h *tcpHandler) Handle(event alert.Event) { + buf := h.bp.Get() + defer h.bp.Put(buf) + ad := alertDataFromEvent(event) + + err := json.NewEncoder(buf).Encode(ad) + if err != nil { + h.logger.Printf("E! failed to marshal alert data json: %v", err) + return + } + + conn, err := net.Dial("tcp", h.addr) + if err != nil { + h.logger.Printf("E! failed to connect to %s: %v", h.addr, err) + return + } + defer conn.Close() + + buf.WriteByte('\n') + conn.Write(buf.Bytes()) +} + +type PostHandlerConfig struct { + URL string `mapstructure:"url"` +} + +type postHandler struct { + bp *bufpool.Pool + url string + logger *log.Logger +} + +func NewPostHandler(c PostHandlerConfig, l *log.Logger) alert.Handler { + return &postHandler{ + bp: bufpool.New(), + url: c.URL, + logger: l, + } +} + +func (h *postHandler) Handle(event alert.Event) { + body := h.bp.Get() + defer h.bp.Put(body) + ad := alertDataFromEvent(event) + + err := json.NewEncoder(body).Encode(ad) + if err != nil { + h.logger.Printf("E! failed to marshal alert data json: %v", err) + return + } + + resp, err := http.Post(h.url, "application/json", body) + if err != nil { + h.logger.Printf("E! failed to POST alert data: %v", err) + return + } + resp.Body.Close() +} + +type AggregateHandlerConfig struct { + Interval time.Duration `mapstructure:"interval"` +} + +type aggregateHandler struct { + interval time.Duration + next alert.Handler + + logger *log.Logger + events chan alert.Event + closing chan struct{} + + wg sync.WaitGroup +} + +func NewAggregateHandler(c AggregateHandlerConfig, l *log.Logger) handlerAction { + h := &aggregateHandler{ + interval: time.Duration(c.Interval), + logger: l, + events: make(chan alert.Event), + closing: make(chan struct{}), + } + h.wg.Add(1) + go func() { + defer h.wg.Done() + h.run() + }() + return h +} + +func (h *aggregateHandler) run() { + ticker := time.NewTicker(h.interval) + defer ticker.Stop() + var events []alert.Event + for { + select { + case <-h.closing: + return + case e := <-h.events: + events = append(events, e) + case <-ticker.C: + if len(events) == 0 { + continue + } + details := make([]string, len(events)) + agg := alert.Event{ + State: alert.EventState{ + ID: "aggregate", + Message: fmt.Sprintf("Received %d events in the last %v.", len(events), h.interval), + }, + } + for i, e := range events { + agg.Topic = e.Topic + if e.State.Level > agg.State.Level { + agg.State.Level = e.State.Level + } + if e.State.Time.After(agg.State.Time) { + agg.State.Time = e.State.Time + } + if e.State.Duration > agg.State.Duration { + agg.State.Duration = e.State.Duration + } + details[i] = e.State.Message + } + agg.State.Details = strings.Join(details, "\n") + h.next.Handle(agg) + events = events[0:0] + } + } +} + +func (h *aggregateHandler) Handle(event alert.Event) { + select { + case h.events <- event: + case <-h.closing: + } +} + +func (h *aggregateHandler) SetNext(n alert.Handler) { + h.next = n +} + +func (h *aggregateHandler) Close() { + close(h.closing) + h.wg.Wait() +} diff --git a/services/alert/service.go b/services/alert/service.go new file mode 100644 index 000000000..584049390 --- /dev/null +++ b/services/alert/service.go @@ -0,0 +1,1041 @@ +package alert + +import ( + "encoding/json" + "fmt" + "io/ioutil" + "log" + "net/http" + "path" + "sort" + "strings" + "sync" + + jsonpatch "github.com/evanphx/json-patch" + "github.com/influxdata/kapacitor/alert" + client "github.com/influxdata/kapacitor/client/v1" + "github.com/influxdata/kapacitor/command" + "github.com/influxdata/kapacitor/services/alerta" + "github.com/influxdata/kapacitor/services/hipchat" + "github.com/influxdata/kapacitor/services/httpd" + "github.com/influxdata/kapacitor/services/opsgenie" + "github.com/influxdata/kapacitor/services/pagerduty" + "github.com/influxdata/kapacitor/services/slack" + "github.com/influxdata/kapacitor/services/smtp" + "github.com/influxdata/kapacitor/services/storage" + "github.com/influxdata/kapacitor/services/telegram" + "github.com/influxdata/kapacitor/services/victorops" + "github.com/mitchellh/mapstructure" + "github.com/pkg/errors" +) + +const ( + alertsPath = "/alerts" + alertsPathAnchored = "/alerts/" + + topicsPath = alertsPath + "/topics" + topicsPathAnchored = alertsPath + "/topics/" + topicsBasePath = httpd.BasePreviewPath + topicsPath + topicsBasePathAnchored = httpd.BasePreviewPath + topicsPathAnchored + + handlersPath = alertsPath + "/handlers" + handlersPathAnchored = alertsPath + "/handlers/" + handlersBasePath = httpd.BasePreviewPath + handlersPath + handlersBasePathAnchored = httpd.BasePreviewPath + handlersPathAnchored + + topicEventsPath = "events" + topicHandlersPath = "handlers" + + eventsPattern = "*/" + topicEventsPath + eventPattern = "*/" + topicEventsPath + "/*" + handlersPattern = "*/" + topicHandlersPath + + eventsRelation = "events" + handlersRelation = "handlers" +) + +type handler struct { + Spec HandlerSpec + Handler alert.Handler +} + +type handlerAction interface { + alert.Handler + SetNext(h alert.Handler) + Close() +} + +type Service struct { + mu sync.RWMutex + + specsDAO HandlerSpecDAO + topicsDAO TopicStateDAO + + handlers map[string]handler + + topics *alert.Topics + + routes []httpd.Route + HTTPDService interface { + AddPreviewRoutes([]httpd.Route) error + DelRoutes([]httpd.Route) + } + StorageService interface { + Store(namespace string) storage.Interface + } + + Commander command.Commander + + logger *log.Logger + + AlertaService interface { + DefaultHandlerConfig() alerta.HandlerConfig + Handler(alerta.HandlerConfig, *log.Logger) (alert.Handler, error) + } + HipChatService interface { + Handler(hipchat.HandlerConfig, *log.Logger) alert.Handler + } + OpsGenieService interface { + Handler(opsgenie.HandlerConfig, *log.Logger) alert.Handler + } + PagerDutyService interface { + Handler(pagerduty.HandlerConfig, *log.Logger) alert.Handler + } + SensuService interface { + Handler(*log.Logger) alert.Handler + } + SlackService interface { + Handler(slack.HandlerConfig, *log.Logger) alert.Handler + } + SMTPService interface { + Handler(smtp.HandlerConfig, *log.Logger) alert.Handler + } + TalkService interface { + Handler(*log.Logger) alert.Handler + } + TelegramService interface { + Handler(telegram.HandlerConfig, *log.Logger) alert.Handler + } + VictorOpsService interface { + Handler(victorops.HandlerConfig, *log.Logger) alert.Handler + } +} + +func NewService(c Config, l *log.Logger) *Service { + s := &Service{ + handlers: make(map[string]handler), + topics: alert.NewTopics(l), + logger: l, + } + return s +} + +// The storage namespace for all task data. +const alertNamespace = "alert_store" + +func (s *Service) Open() error { + s.mu.Lock() + defer s.mu.Unlock() + + // Create DAO + store := s.StorageService.Store(alertNamespace) + specsDAO, err := newHandlerSpecKV(store) + if err != nil { + return err + } + s.specsDAO = specsDAO + topicsDAO, err := newTopicStateKV(store) + if err != nil { + return err + } + s.topicsDAO = topicsDAO + + // Load saved handlers + if err := s.loadSavedHandlerSpecs(); err != nil { + return err + } + + // Load saved topic state + if err := s.loadSavedTopicStates(); err != nil { + return err + } + + // Define API routes + s.routes = []httpd.Route{ + { + Method: "GET", + Pattern: topicsPath, + HandlerFunc: s.handleListTopics, + }, + { + Method: "GET", + Pattern: topicsPathAnchored, + HandlerFunc: s.handleRouteTopic, + }, + { + Method: "DELETE", + Pattern: topicsPathAnchored, + HandlerFunc: s.handleDeleteTopic, + }, + { + Method: "GET", + Pattern: handlersPath, + HandlerFunc: s.handleListHandlers, + }, + { + Method: "POST", + Pattern: handlersPath, + HandlerFunc: s.handleCreateHandler, + }, + { + // Satisfy CORS checks. + Method: "OPTIONS", + Pattern: handlersPathAnchored, + HandlerFunc: httpd.ServeOptions, + }, + { + Method: "PATCH", + Pattern: handlersPathAnchored, + HandlerFunc: s.handlePatchHandler, + }, + { + Method: "PUT", + Pattern: handlersPathAnchored, + HandlerFunc: s.handlePutHandler, + }, + { + Method: "DELETE", + Pattern: handlersPathAnchored, + HandlerFunc: s.handleDeleteHandler, + }, + { + Method: "GET", + Pattern: handlersPathAnchored, + HandlerFunc: s.handleGetHandler, + }, + } + + return s.HTTPDService.AddPreviewRoutes(s.routes) +} + +func (s *Service) loadSavedHandlerSpecs() error { + offset := 0 + limit := 100 + for { + specs, err := s.specsDAO.List("", offset, limit) + if err != nil { + return err + } + + for _, spec := range specs { + if err := s.loadHandlerSpec(spec); err != nil { + s.logger.Println("E! failed to load handler on startup", err) + } + } + + offset += limit + if len(specs) != limit { + break + } + } + return nil +} + +func (s *Service) loadSavedTopicStates() error { + offset := 0 + limit := 100 + for { + topicStates, err := s.topicsDAO.List("", offset, limit) + if err != nil { + return err + } + + for _, ts := range topicStates { + s.topics.RestoreTopic(ts.Topic, s.convertEventStatesToAlert(ts.EventStates)) + } + + offset += limit + if len(topicStates) != limit { + break + } + } + return nil +} + +func (s *Service) Close() error { + s.mu.Lock() + defer s.mu.Unlock() + s.topics.Close() + s.HTTPDService.DelRoutes(s.routes) + return nil +} + +func validatePattern(pattern string) error { + _, err := path.Match(pattern, "") + return err +} + +func (s *Service) handleListTopics(w http.ResponseWriter, r *http.Request) { + pattern := r.URL.Query().Get("pattern") + if err := validatePattern(pattern); err != nil { + httpd.HttpError(w, fmt.Sprint("invalide pattern: ", err.Error()), true, http.StatusBadRequest) + return + } + minLevelStr := r.URL.Query().Get("min-level") + minLevel, err := alert.ParseLevel(minLevelStr) + if err != nil { + httpd.HttpError(w, err.Error(), true, http.StatusBadRequest) + return + } + topics := client.Topics{ + Link: client.Link{Relation: client.Self, Href: r.URL.String()}, + Topics: s.TopicStatus(pattern, minLevel), + } + + w.WriteHeader(http.StatusOK) + w.Write(httpd.MarshalJSON(topics, true)) +} + +func (s *Service) topicIDFromPath(p string) (id string) { + d := p + for d != "." { + id = d + d = path.Dir(d) + } + return +} + +func pathMatch(pattern, p string) (match bool) { + match, _ = path.Match(pattern, p) + return +} + +func (s *Service) handleDeleteTopic(w http.ResponseWriter, r *http.Request) { + p := strings.TrimPrefix(r.URL.Path, topicsBasePathAnchored) + id := s.topicIDFromPath(p) + if err := s.DeleteTopic(id); err != nil { + httpd.HttpError(w, fmt.Sprintf("failed to delete topic %q: %v", id, err), true, http.StatusInternalServerError) + return + } + w.WriteHeader(http.StatusNoContent) +} + +func (s *Service) handleRouteTopic(w http.ResponseWriter, r *http.Request) { + p := strings.TrimPrefix(r.URL.Path, topicsBasePathAnchored) + id := s.topicIDFromPath(p) + t, ok := s.topics.Topic(id) + if !ok { + httpd.HttpError(w, fmt.Sprintf("topic %q does not exist", id), true, http.StatusNotFound) + return + } + + switch { + case pathMatch(eventsPattern, p): + s.handleListTopicEvents(t, w, r) + case pathMatch(eventPattern, p): + s.handleTopicEvent(t, w, r) + case pathMatch(handlersPattern, p): + s.handleListTopicHandlers(t, w, r) + default: + s.handleTopic(t, w, r) + } +} + +func (s *Service) handlerLink(id string) client.Link { + return client.Link{Relation: client.Self, Href: path.Join(handlersBasePath, id)} +} +func (s *Service) topicLink(id string) client.Link { + return client.Link{Relation: client.Self, Href: path.Join(topicsBasePath, id)} +} +func (s *Service) topicEventsLink(id string, r client.Relation) client.Link { + return client.Link{Relation: r, Href: path.Join(topicsBasePath, id, topicEventsPath)} +} +func (s *Service) topicEventLink(topic, event string) client.Link { + return client.Link{Relation: client.Self, Href: path.Join(topicsBasePath, topic, topicEventsPath, event)} +} +func (s *Service) topicHandlersLink(id string, r client.Relation) client.Link { + return client.Link{Relation: r, Href: path.Join(topicsBasePath, id, topicHandlersPath)} +} +func (s *Service) topicHandlerLink(topic, handler string) client.Link { + return client.Link{Relation: client.Self, Href: path.Join(topicsBasePath, topic, topicHandlersPath, handler)} +} + +func (s *Service) createClientTopic(topic string, level alert.Level) client.Topic { + return client.Topic{ + ID: topic, + Link: s.topicLink(topic), + Level: level.String(), + EventsLink: s.topicEventsLink(topic, eventsRelation), + HandlersLink: s.topicHandlersLink(topic, handlersRelation), + } +} + +func (s *Service) handleTopic(t *alert.Topic, w http.ResponseWriter, r *http.Request) { + topic := s.createClientTopic(t.ID(), t.MaxLevel()) + + w.WriteHeader(http.StatusOK) + w.Write(httpd.MarshalJSON(topic, true)) +} + +func (s *Service) convertEventStatesFromAlert(states map[string]alert.EventState) map[string]EventState { + newStates := make(map[string]EventState, len(states)) + for id, state := range states { + newStates[id] = s.convertEventStateFromAlert(state) + } + return newStates +} + +func (s *Service) convertEventStatesToAlert(states map[string]EventState) map[string]alert.EventState { + newStates := make(map[string]alert.EventState, len(states)) + for id, state := range states { + newStates[id] = s.convertEventStateToAlert(id, state) + } + return newStates +} + +func (s *Service) convertEventStateFromAlert(state alert.EventState) EventState { + return EventState{ + Message: state.Message, + Details: state.Details, + Time: state.Time, + Duration: state.Duration, + Level: state.Level, + } +} + +func (s *Service) convertEventStateToAlert(id string, state EventState) alert.EventState { + return alert.EventState{ + ID: id, + Message: state.Message, + Details: state.Details, + Time: state.Time, + Duration: state.Duration, + Level: state.Level, + } +} + +func (s *Service) convertEventStateToClient(state alert.EventState) client.EventState { + return client.EventState{ + Message: state.Message, + Details: state.Details, + Time: state.Time, + Duration: client.Duration(state.Duration), + Level: state.Level.String(), + } +} + +func (s *Service) convertHandlerSpec(spec HandlerSpec) client.Handler { + actions := make([]client.HandlerAction, 0, len(spec.Actions)) + for _, actionSpec := range spec.Actions { + action := client.HandlerAction{ + Kind: actionSpec.Kind, + Options: actionSpec.Options, + } + actions = append(actions, action) + } + return client.Handler{ + Link: s.handlerLink(spec.ID), + ID: spec.ID, + Topics: spec.Topics, + Actions: actions, + } +} + +func (s *Service) handleListTopicEvents(t *alert.Topic, w http.ResponseWriter, r *http.Request) { + minLevelStr := r.URL.Query().Get("min-level") + minLevel, err := alert.ParseLevel(minLevelStr) + if err != nil { + httpd.HttpError(w, err.Error(), true, http.StatusBadRequest) + return + } + events := t.EventStates(minLevel) + res := client.TopicEvents{ + Link: s.topicEventsLink(t.ID(), client.Self), + Topic: t.ID(), + Events: make([]client.TopicEvent, 0, len(events)), + } + for id, state := range events { + res.Events = append(res.Events, client.TopicEvent{ + Link: s.topicEventLink(t.ID(), id), + ID: id, + State: s.convertEventStateToClient(state), + }) + } + w.WriteHeader(http.StatusOK) + w.Write(httpd.MarshalJSON(res, true)) +} + +func (s *Service) handleTopicEvent(t *alert.Topic, w http.ResponseWriter, r *http.Request) { + id := path.Base(r.URL.Path) + state, ok := t.EventState(id) + if !ok { + httpd.HttpError(w, fmt.Sprintf("event %q does not exist for topic %q", id, t.ID()), true, http.StatusNotFound) + return + } + event := client.TopicEvent{ + Link: s.topicEventLink(t.ID(), id), + ID: id, + State: s.convertEventStateToClient(state), + } + w.WriteHeader(http.StatusOK) + w.Write(httpd.MarshalJSON(event, true)) +} + +func (s *Service) handleListTopicHandlers(t *alert.Topic, w http.ResponseWriter, r *http.Request) { + var handlers []client.Handler + for _, h := range s.handlers { + match := false + for _, topic := range h.Spec.Topics { + if topic == t.ID() { + match = true + break + } + } + if match { + handlers = append(handlers, s.convertHandlerSpec(h.Spec)) + } + } + sort.Sort(sortedHandlers(handlers)) + th := client.TopicHandlers{ + Link: s.topicHandlersLink(t.ID(), client.Self), + Topic: t.ID(), + Handlers: handlers, + } + w.WriteHeader(http.StatusOK) + w.Write(httpd.MarshalJSON(th, true)) +} + +func (s *Service) handleListHandlers(w http.ResponseWriter, r *http.Request) { + pattern := r.URL.Query().Get("pattern") + if err := validatePattern(pattern); err != nil { + httpd.HttpError(w, fmt.Sprint("invalid pattern: ", err.Error()), true, http.StatusBadRequest) + return + } + handlers := client.Handlers{ + Link: client.Link{Relation: client.Self, Href: r.URL.String()}, + Handlers: s.Handlers(pattern), + } + + w.WriteHeader(http.StatusOK) + w.Write(httpd.MarshalJSON(handlers, true)) +} + +func (s *Service) handleCreateHandler(w http.ResponseWriter, r *http.Request) { + handlerSpec := HandlerSpec{} + err := json.NewDecoder(r.Body).Decode(&handlerSpec) + if err != nil { + httpd.HttpError(w, fmt.Sprint("invalid handler json: ", err.Error()), true, http.StatusBadRequest) + return + } + if err := handlerSpec.Validate(); err != nil { + httpd.HttpError(w, fmt.Sprint("invalid handler spec: ", err.Error()), true, http.StatusBadRequest) + return + } + + err = s.RegisterHandlerSpec(handlerSpec) + if err != nil { + httpd.HttpError(w, fmt.Sprint("failed to create handler: ", err.Error()), true, http.StatusInternalServerError) + return + } + + h := s.convertHandlerSpec(handlerSpec) + w.WriteHeader(http.StatusOK) + w.Write(httpd.MarshalJSON(h, true)) +} + +func (s *Service) handlePatchHandler(w http.ResponseWriter, r *http.Request) { + id := strings.TrimPrefix(r.URL.Path, handlersBasePathAnchored) + s.mu.RLock() + h, ok := s.handlers[id] + s.mu.RUnlock() + if !ok { + httpd.HttpError(w, fmt.Sprintf("unknown handler: %q", id), true, http.StatusNotFound) + return + } + + patchBytes, err := ioutil.ReadAll(r.Body) + if err != nil { + httpd.HttpError(w, fmt.Sprint("failed to read request body: ", err.Error()), true, http.StatusInternalServerError) + return + } + patch, err := jsonpatch.DecodePatch(patchBytes) + if err != nil { + httpd.HttpError(w, fmt.Sprint("invalid patch json: ", err.Error()), true, http.StatusBadRequest) + return + } + specBytes, err := json.Marshal(h.Spec) + if err != nil { + httpd.HttpError(w, fmt.Sprint("failed to marshal JSON: ", err.Error()), true, http.StatusInternalServerError) + return + } + newBytes, err := patch.Apply(specBytes) + if err != nil { + httpd.HttpError(w, fmt.Sprint("failed to apply patch: ", err.Error()), true, http.StatusBadRequest) + return + } + newSpec := HandlerSpec{} + if err := json.Unmarshal(newBytes, &newSpec); err != nil { + httpd.HttpError(w, fmt.Sprint("failed to unmarshal patched json: ", err.Error()), true, http.StatusInternalServerError) + return + } + if err := newSpec.Validate(); err != nil { + httpd.HttpError(w, fmt.Sprint("invalid handler spec: ", err.Error()), true, http.StatusBadRequest) + return + } + + if err := s.ReplaceHandlerSpec(h.Spec, newSpec); err != nil { + httpd.HttpError(w, fmt.Sprint("failed to update handler: ", err.Error()), true, http.StatusInternalServerError) + return + } + + ch := s.convertHandlerSpec(newSpec) + + w.WriteHeader(http.StatusOK) + w.Write(httpd.MarshalJSON(ch, true)) +} + +func (s *Service) handlePutHandler(w http.ResponseWriter, r *http.Request) { + id := strings.TrimPrefix(r.URL.Path, handlersBasePathAnchored) + s.mu.RLock() + h, ok := s.handlers[id] + s.mu.RUnlock() + if !ok { + httpd.HttpError(w, fmt.Sprintf("unknown handler: %q", id), true, http.StatusNotFound) + return + } + + newSpec := HandlerSpec{} + err := json.NewDecoder(r.Body).Decode(&newSpec) + if err != nil { + httpd.HttpError(w, fmt.Sprint("failed to unmar JSON: ", err.Error()), true, http.StatusBadRequest) + return + } + if err := newSpec.Validate(); err != nil { + httpd.HttpError(w, fmt.Sprint("invalid handler spec: ", err.Error()), true, http.StatusBadRequest) + return + } + + if err := s.ReplaceHandlerSpec(h.Spec, newSpec); err != nil { + httpd.HttpError(w, fmt.Sprint("failed to update handler: ", err.Error()), true, http.StatusInternalServerError) + return + } + + ch := s.convertHandlerSpec(newSpec) + + w.WriteHeader(http.StatusOK) + w.Write(httpd.MarshalJSON(ch, true)) +} + +func (s *Service) handleDeleteHandler(w http.ResponseWriter, r *http.Request) { + id := strings.TrimPrefix(r.URL.Path, handlersBasePathAnchored) + if err := s.DeregisterHandlerSpec(id); err != nil { + httpd.HttpError(w, fmt.Sprint("failed to delete handler: ", err.Error()), true, http.StatusInternalServerError) + return + } + w.WriteHeader(http.StatusNoContent) +} + +func (s *Service) handleGetHandler(w http.ResponseWriter, r *http.Request) { + id := strings.TrimPrefix(r.URL.Path, handlersBasePathAnchored) + s.mu.RLock() + h, ok := s.handlers[id] + s.mu.RUnlock() + if !ok { + httpd.HttpError(w, fmt.Sprintf("unknown handler: %q", id), true, http.StatusNotFound) + return + } + + ch := s.convertHandlerSpec(h.Spec) + + w.WriteHeader(http.StatusOK) + w.Write(httpd.MarshalJSON(ch, true)) +} + +func (s *Service) EventState(topic, event string) (alert.EventState, bool) { + t, ok := s.topics.Topic(topic) + if !ok { + return alert.EventState{}, false + } + return t.EventState(event) +} + +func (s *Service) Collect(event alert.Event) error { + err := s.topics.Collect(event) + if err != nil { + return err + } + t, ok := s.topics.Topic(event.Topic) + if !ok { + // Topic was deleted, nothing to do + return nil + } + + ts := TopicState{ + Topic: event.Topic, + EventStates: s.convertEventStatesFromAlert(t.EventStates(alert.OK)), + } + return s.topicsDAO.Put(ts) +} + +func (s *Service) DeleteTopic(topic string) error { + s.topics.DeleteTopic(topic) + return s.topicsDAO.Delete(topic) +} + +func (s *Service) RegisterHandler(topics []string, h alert.Handler) { + s.topics.RegisterHandler(topics, h) +} +func (s *Service) DeregisterHandler(topics []string, h alert.Handler) { + s.topics.DeregisterHandler(topics, h) +} + +// loadHandlerSpec initializes a spec that already exists. +// Caller must have the write lock. +func (s *Service) loadHandlerSpec(spec HandlerSpec) error { + h, err := s.createHandlerFromSpec(spec) + if err != nil { + return err + } + + s.handlers[spec.ID] = h + + s.topics.RegisterHandler(spec.Topics, h.Handler) + return nil +} + +func (s *Service) RegisterHandlerSpec(spec HandlerSpec) error { + if err := spec.Validate(); err != nil { + return err + } + s.mu.RLock() + _, ok := s.handlers[spec.ID] + s.mu.RUnlock() + if ok { + return fmt.Errorf("cannot register handler, handler with ID %q already exists", spec.ID) + } + + h, err := s.createHandlerFromSpec(spec) + if err != nil { + return err + } + + // Persist handler spec + if err := s.specsDAO.Create(spec); err != nil { + return err + } + + s.mu.Lock() + s.handlers[spec.ID] = h + s.mu.Unlock() + + s.topics.RegisterHandler(spec.Topics, h.Handler) + return nil +} + +func (s *Service) DeregisterHandlerSpec(id string) error { + s.mu.RLock() + h, ok := s.handlers[id] + s.mu.RUnlock() + + if ok { + // Delete handler spec + if err := s.specsDAO.Delete(id); err != nil { + return err + } + s.topics.DeregisterHandler(h.Spec.Topics, h.Handler) + + if ha, ok := h.Handler.(handlerAction); ok { + ha.Close() + } + + s.mu.Lock() + delete(s.handlers, id) + s.mu.Unlock() + } + return nil +} + +func (s *Service) ReplaceHandlerSpec(oldSpec, newSpec HandlerSpec) error { + if err := newSpec.Validate(); err != nil { + return err + } + newH, err := s.createHandlerFromSpec(newSpec) + if err != nil { + return err + } + + s.mu.RLock() + oldH := s.handlers[oldSpec.ID] + s.mu.RUnlock() + + // Persist new handler specs + if newSpec.ID == oldSpec.ID { + if err := s.specsDAO.Replace(newSpec); err != nil { + return err + } + } else { + if err := s.specsDAO.Create(newSpec); err != nil { + return err + } + if err := s.specsDAO.Delete(oldSpec.ID); err != nil { + return err + } + } + + s.mu.Lock() + delete(s.handlers, oldSpec.ID) + s.handlers[newSpec.ID] = newH + s.mu.Unlock() + + s.topics.ReplaceHandler(oldSpec.Topics, newSpec.Topics, oldH.Handler, newH.Handler) + return nil +} + +type sortedTopics []client.Topic + +func (s sortedTopics) Len() int { return len(s) } +func (s sortedTopics) Less(i int, j int) bool { return s[i].ID < s[j].ID } +func (s sortedTopics) Swap(i int, j int) { s[i], s[j] = s[j], s[i] } + +// TopicStatus returns the max alert level for each topic matching 'pattern', not returning +// any topics with max alert levels less severe than 'minLevel' +func (s *Service) TopicStatus(pattern string, minLevel alert.Level) []client.Topic { + statuses := s.topics.TopicStatus(pattern, minLevel) + topics := make([]client.Topic, 0, len(statuses)) + for topic, level := range statuses { + topics = append(topics, s.createClientTopic(topic, level)) + } + sort.Sort(sortedTopics(topics)) + return topics +} + +// TopicStatusDetails is similar to TopicStatus, but will additionally return +// at least 'minLevel' severity +func (s *Service) TopicStatusEvents(pattern string, minLevel alert.Level) map[string]map[string]alert.EventState { + return s.topics.TopicStatusEvents(pattern, minLevel) +} + +type sortedHandlers []client.Handler + +func (s sortedHandlers) Len() int { return len(s) } +func (s sortedHandlers) Less(i int, j int) bool { return s[i].ID < s[j].ID } +func (s sortedHandlers) Swap(i int, j int) { s[i], s[j] = s[j], s[i] } + +func (s *Service) Handlers(pattern string) []client.Handler { + s.mu.RLock() + defer s.mu.RUnlock() + + handlers := make([]client.Handler, 0, len(s.handlers)) + for id, h := range s.handlers { + if match(pattern, id) { + handlers = append(handlers, s.convertHandlerSpec(h.Spec)) + } + } + sort.Sort(sortedHandlers(handlers)) + return handlers +} +func match(pattern, id string) bool { + if pattern == "" { + return true + } + matched, _ := path.Match(pattern, id) + return matched +} + +func (s *Service) createHandlerFromSpec(spec HandlerSpec) (handler, error) { + if 0 == len(spec.Actions) { + return handler{}, errors.New("invalid handler spec, must have at least one action") + } + + // Create actions chained together in a singly linked list + var prev, first handlerAction + for _, actionSpec := range spec.Actions { + curr, err := s.createHandlerActionFromSpec(actionSpec) + if err != nil { + return handler{}, err + } + if first == nil { + // Keep first action + first = curr + } + if prev != nil { + // Link previous action to current action + prev.SetNext(curr) + } + prev = curr + } + + // set a noopHandler for the last action + prev.SetNext(noopHandler{}) + + return handler{Spec: spec, Handler: first}, nil +} + +func decodeOptions(options map[string]interface{}, c interface{}) error { + dec, err := mapstructure.NewDecoder(&mapstructure.DecoderConfig{ + ErrorUnused: true, + Result: c, + DecodeHook: mapstructure.StringToTimeDurationHookFunc(), + }) + if err != nil { + return errors.Wrap(err, "failed to initialize mapstructure decoder") + } + if err := dec.Decode(options); err != nil { + return errors.Wrapf(err, "failed to decode options into %T", c) + } + return nil +} + +func (s *Service) createHandlerActionFromSpec(spec HandlerActionSpec) (ha handlerAction, err error) { + switch spec.Kind { + case "aggregate": + c := AggregateHandlerConfig{} + err = decodeOptions(spec.Options, &c) + if err != nil { + return + } + ha = NewAggregateHandler(c, s.logger) + case "alerta": + c := s.AlertaService.DefaultHandlerConfig() + err = decodeOptions(spec.Options, &c) + if err != nil { + return + } + h, err := s.AlertaService.Handler(c, s.logger) + if err != nil { + return nil, err + } + ha = newPassThroughHandler(h) + case "exec": + c := ExecHandlerConfig{ + Commander: s.Commander, + } + err = decodeOptions(spec.Options, &c) + if err != nil { + return + } + h := NewExecHandler(c, s.logger) + ha = newPassThroughHandler(h) + case "hipchat": + c := hipchat.HandlerConfig{} + err = decodeOptions(spec.Options, &c) + if err != nil { + return + } + h := s.HipChatService.Handler(c, s.logger) + ha = newPassThroughHandler(h) + case "log": + c := DefaultLogHandlerConfig() + err = decodeOptions(spec.Options, &c) + if err != nil { + return + } + h, err := NewLogHandler(c, s.logger) + if err != nil { + return nil, err + } + ha = newPassThroughHandler(h) + case "opsgenie": + c := opsgenie.HandlerConfig{} + err = decodeOptions(spec.Options, &c) + if err != nil { + return + } + h := s.OpsGenieService.Handler(c, s.logger) + ha = newPassThroughHandler(h) + case "pagerduty": + c := pagerduty.HandlerConfig{} + err = decodeOptions(spec.Options, &c) + if err != nil { + return + } + h := s.PagerDutyService.Handler(c, s.logger) + ha = newPassThroughHandler(h) + case "post": + c := PostHandlerConfig{} + err = decodeOptions(spec.Options, &c) + if err != nil { + return + } + h := NewPostHandler(c, s.logger) + ha = newPassThroughHandler(h) + case "sensu": + h := s.SensuService.Handler(s.logger) + ha = newPassThroughHandler(h) + case "slack": + c := slack.HandlerConfig{} + err = decodeOptions(spec.Options, &c) + if err != nil { + return + } + h := s.SlackService.Handler(c, s.logger) + ha = newPassThroughHandler(h) + case "smtp": + c := smtp.HandlerConfig{} + err = decodeOptions(spec.Options, &c) + if err != nil { + return + } + h := s.SMTPService.Handler(c, s.logger) + ha = newPassThroughHandler(h) + case "talk": + h := s.TalkService.Handler(s.logger) + ha = newPassThroughHandler(h) + case "tcp": + c := TCPHandlerConfig{} + err = decodeOptions(spec.Options, &c) + if err != nil { + return + } + h := NewTCPHandler(c, s.logger) + ha = newPassThroughHandler(h) + case "telegram": + c := telegram.HandlerConfig{} + err = decodeOptions(spec.Options, &c) + if err != nil { + return + } + h := s.TelegramService.Handler(c, s.logger) + ha = newPassThroughHandler(h) + case "victorops": + c := victorops.HandlerConfig{} + err = decodeOptions(spec.Options, &c) + if err != nil { + return + } + h := s.VictorOpsService.Handler(c, s.logger) + ha = newPassThroughHandler(h) + default: + err = fmt.Errorf("unsupported action kind %q", spec.Kind) + log.Println("error", err) + } + return +} + +// PassThroughHandler implements HandlerAction and passes through all events to the next handler. +type passThroughHandler struct { + h alert.Handler + next alert.Handler +} + +func newPassThroughHandler(h alert.Handler) *passThroughHandler { + return &passThroughHandler{ + h: h, + } +} + +func (h *passThroughHandler) Handle(event alert.Event) { + h.h.Handle(event) + h.next.Handle(event) +} + +func (h *passThroughHandler) SetNext(next alert.Handler) { + h.next = next +} +func (h *passThroughHandler) Close() { +} + +// NoopHandler implements Handler and does nothing with the event +type noopHandler struct{} + +func (h noopHandler) Handle(event alert.Event) {} diff --git a/services/alerta/alertatest/alertatest.go b/services/alerta/alertatest/alertatest.go new file mode 100644 index 000000000..ffc7f3d2f --- /dev/null +++ b/services/alerta/alertatest/alertatest.go @@ -0,0 +1,64 @@ +package alertatest + +import ( + "encoding/json" + "net/http" + "net/http/httptest" + "sync" +) + +type Server struct { + mu sync.Mutex + ts *httptest.Server + URL string + requests []Request + closed bool +} + +func NewServer() *Server { + s := new(Server) + ts := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { + ar := Request{ + URL: r.URL.String(), + Authorization: r.Header.Get("Authorization"), + } + dec := json.NewDecoder(r.Body) + dec.Decode(&ar.PostData) + s.mu.Lock() + s.requests = append(s.requests, ar) + s.mu.Unlock() + w.WriteHeader(http.StatusCreated) + })) + s.ts = ts + s.URL = ts.URL + return s +} +func (s *Server) Requests() []Request { + s.mu.Lock() + defer s.mu.Unlock() + return s.requests +} +func (s *Server) Close() { + if s.closed { + return + } + s.closed = true + s.ts.Close() +} + +type Request struct { + URL string + Authorization string + PostData PostData +} + +type PostData struct { + Resource string `json:"resource"` + Event string `json:"event"` + Group string `json:"group"` + Environment string `json:"environment"` + Text string `json:"text"` + Origin string `json:"origin"` + Service []string `json:"service"` + Value string `json:"value"` +} diff --git a/services/alerta/service.go b/services/alerta/service.go index 501d2c327..148583d7a 100644 --- a/services/alerta/service.go +++ b/services/alerta/service.go @@ -4,7 +4,6 @@ import ( "bytes" "crypto/tls" "encoding/json" - "errors" "fmt" "io/ioutil" "log" @@ -12,6 +11,16 @@ import ( "net/url" "path" "sync/atomic" + text "text/template" + + "github.com/influxdata/kapacitor/alert" + "github.com/pkg/errors" +) + +const ( + defaultResource = "{{ .Name }}" + defaultEvent = "{{ .ID }}" + defaultGroup = "{{ .Group }}" ) type Service struct { @@ -199,3 +208,197 @@ func (s *Service) preparePost(token, resource, event, environment, severity, gro return req, nil } + +type HandlerConfig struct { + // Alerta authentication token. + // If empty uses the token from the configuration. + Token string `mapstructure:"token"` + + // Alerta resource. + // Can be a template and has access to the same data as the AlertNode.Details property. + // Default: {{ .Name }} + Resource string `mapstructure:"resource"` + + // Alerta event. + // Can be a template and has access to the same data as the idInfo property. + // Default: {{ .ID }} + Event string `mapstructure:"event"` + + // Alerta environment. + // Can be a template and has access to the same data as the AlertNode.Details property. + // Defaut is set from the configuration. + Environment string `mapstructure:"environment"` + + // Alerta group. + // Can be a template and has access to the same data as the AlertNode.Details property. + // Default: {{ .Group }} + Group string `mapstructure:"group"` + + // Alerta value. + // Can be a template and has access to the same data as the AlertNode.Details property. + // Default is an empty string. + Value string `mapstructure:"value"` + + // Alerta origin. + // If empty uses the origin from the configuration. + Origin string `mapstructure:"origin"` + + // List of effected Services + Service []string `mapstructure:"service"` +} + +type handler struct { + s *Service + c HandlerConfig + logger *log.Logger + + resourceTmpl *text.Template + eventTmpl *text.Template + environmentTmpl *text.Template + valueTmpl *text.Template + groupTmpl *text.Template +} + +func (s *Service) DefaultHandlerConfig() HandlerConfig { + return HandlerConfig{ + Resource: defaultResource, + Event: defaultEvent, + Group: defaultGroup, + } +} + +func (s *Service) Handler(c HandlerConfig, l *log.Logger) (alert.Handler, error) { + // Parse and validate alerta templates + rtmpl, err := text.New("resource").Parse(c.Resource) + if err != nil { + return nil, err + } + evtmpl, err := text.New("event").Parse(c.Event) + if err != nil { + return nil, err + } + etmpl, err := text.New("environment").Parse(c.Environment) + if err != nil { + return nil, err + } + gtmpl, err := text.New("group").Parse(c.Group) + if err != nil { + return nil, err + } + vtmpl, err := text.New("value").Parse(c.Value) + if err != nil { + return nil, err + } + return &handler{ + s: s, + c: c, + logger: l, + resourceTmpl: rtmpl, + eventTmpl: evtmpl, + environmentTmpl: etmpl, + groupTmpl: gtmpl, + valueTmpl: vtmpl, + }, nil +} + +type eventData struct { + ID string + // Measurement name + Name string + + // Task name + TaskName string + + // Concatenation of all group-by tags of the form [key=value,]+. + // If not groupBy is performed equal to literal 'nil' + Group string + + // Map of tags + Tags map[string]string +} + +func (h *handler) Handle(event alert.Event) { + td := event.TemplateData() + var buf bytes.Buffer + err := h.resourceTmpl.Execute(&buf, td) + if err != nil { + h.logger.Printf("E! failed to evaluate Alerta Resource template %s: %v", h.c.Resource, err) + return + } + resource := buf.String() + buf.Reset() + + data := eventData{ + ID: td.ID, + Name: td.Name, + TaskName: td.TaskName, + Group: td.Group, + Tags: td.Tags, + } + err = h.eventTmpl.Execute(&buf, data) + if err != nil { + h.logger.Printf("E! failed to evaluate Alerta Event template %s: %v", h.c.Event, err) + return + } + eventStr := buf.String() + buf.Reset() + + err = h.environmentTmpl.Execute(&buf, td) + if err != nil { + h.logger.Printf("E! failed to evaluate Alerta Environment template %s: %v", h.c.Environment, err) + return + } + environment := buf.String() + buf.Reset() + + err = h.groupTmpl.Execute(&buf, td) + if err != nil { + h.logger.Printf("E! failed to evaluate Alerta Group template %s: %v", h.c.Group, err) + return + } + group := buf.String() + buf.Reset() + + err = h.valueTmpl.Execute(&buf, td) + if err != nil { + h.logger.Printf("E! failed to evaluate Alerta Value template %s: %v", h.c.Value, err) + return + } + value := buf.String() + + service := h.c.Service + if len(service) == 0 { + service = []string{td.Name} + } + + var severity string + + switch event.State.Level { + case alert.OK: + severity = "ok" + case alert.Info: + severity = "informational" + case alert.Warning: + severity = "warning" + case alert.Critical: + severity = "critical" + default: + severity = "indeterminate" + } + + if err := h.s.Alert( + h.c.Token, + resource, + eventStr, + environment, + severity, + group, + value, + event.State.Message, + h.c.Origin, + service, + event.Data.Result, + ); err != nil { + h.logger.Printf("E! failed to send event to Alerta: %v", err) + } +} diff --git a/services/config/dao.go b/services/config/dao.go index c184179e3..16ade9e26 100644 --- a/services/config/dao.go +++ b/services/config/dao.go @@ -1,7 +1,6 @@ package config import ( - "bytes" "encoding/json" "errors" @@ -52,138 +51,78 @@ type Override struct { Create bool `json:"create"` } -// versionWrapper wraps a structure with a version so that changes -// to the structure can be properly decoded. -type versionWrapper struct { - Version int `json:"version"` - Value *json.RawMessage `json:"value"` +func (o Override) ObjectID() string { + return o.ID } -const ( - overrideDataPrefix = "/overrides/data/" - overrideIndexesPrefix = "/overrides/indexes/" +func (o Override) MarshalBinary() ([]byte, error) { + return storage.VersionJSONEncode(version, o) +} - // Name of ID index - idIndex = "id/" -) +func (o *Override) UnmarshalBinary(data []byte) error { + return storage.VersionJSONDecode(data, func(version int, dec *json.Decoder) error { + dec.UseNumber() + return dec.Decode(o) + }) +} // Key/Value store based implementation of the OverrideDAO type overrideKV struct { - store storage.Interface -} - -func newOverrideKV(store storage.Interface) *overrideKV { - return &overrideKV{ - store: store, - } + store *storage.IndexedStore } -func encodeOverride(o Override) ([]byte, error) { - raw, err := json.Marshal(o) +func newOverrideKV(store storage.Interface) (*overrideKV, error) { + c := storage.DefaultIndexedStoreConfig("overrides", func() storage.BinaryObject { + return new(Override) + }) + istore, err := storage.NewIndexedStore(store, c) if err != nil { return nil, err } - rawCopy := make(json.RawMessage, len(raw)) - copy(rawCopy, raw) - wrapper := versionWrapper{ - Version: version, - Value: &rawCopy, - } - return json.Marshal(wrapper) -} - -func decodeOverride(data []byte) (Override, error) { - var wrapper versionWrapper - err := json.Unmarshal(data, &wrapper) - if err != nil { - return Override{}, err - } - var override Override - if wrapper.Value == nil { - return Override{}, errors.New("empty override") - } - dec := json.NewDecoder(bytes.NewReader(*wrapper.Value)) - // Do not convert all nums to float64, rather use json.Number which is a Stringer - dec.UseNumber() - err = dec.Decode(&override) - return override, err -} - -// Create a key for the override data -func (d *overrideKV) overrideDataKey(id string) string { - return overrideDataPrefix + id -} - -// Create a key for a given index and value. -// -// Indexes are maintained via a 'directory' like system: -// -// /overrides/data/ID -- contains encoded override data -// /overrides/index/id/ID -- contains the override ID -// -// As such to list all overrides in ID sorted order use the /overrides/index/id/ directory. -func (d *overrideKV) overrideIndexKey(index, value string) string { - return overrideIndexesPrefix + index + value + return &overrideKV{ + store: istore, + }, nil } -func (d *overrideKV) Get(id string) (Override, error) { - key := d.overrideDataKey(id) - if exists, err := d.store.Exists(key); err != nil { - return Override{}, err - } else if !exists { - return Override{}, ErrNoOverrideExists +func (kv *overrideKV) error(err error) error { + if err == storage.ErrNoObjectExists { + return ErrNoOverrideExists } - kv, err := d.store.Get(key) - if err != nil { - return Override{}, err - } - return decodeOverride(kv.Value) + return err } -func (d *overrideKV) Set(o Override) error { - key := d.overrideDataKey(o.ID) - - data, err := encodeOverride(o) +func (kv *overrideKV) Get(id string) (Override, error) { + obj, err := kv.store.Get(id) if err != nil { - return err + return Override{}, kv.error(err) } - // Put data - err = d.store.Put(key, data) - if err != nil { - return err + o, ok := obj.(*Override) + if !ok { + return Override{}, storage.ImpossibleTypeErr(o, obj) } - // Put ID index - indexKey := d.overrideIndexKey(idIndex, o.ID) - return d.store.Put(indexKey, []byte(o.ID)) + return *o, nil } -func (d *overrideKV) Delete(id string) error { - key := d.overrideDataKey(id) - indexKey := d.overrideIndexKey(idIndex, id) +func (kv *overrideKV) Set(o Override) error { + return kv.store.Put(&o) +} - dataErr := d.store.Delete(key) - indexErr := d.store.Delete(indexKey) - if dataErr != nil { - return dataErr - } - return indexErr +func (kv *overrideKV) Delete(id string) error { + return kv.store.Delete(id) } -func (d *overrideKV) List(prefix string) ([]Override, error) { - // List all override ids sorted by ID - ids, err := d.store.List(overrideIndexesPrefix + idIndex + prefix) +func (kv *overrideKV) List(prefix string) ([]Override, error) { + objects, err := kv.store.List(storage.DefaultIDIndex, "", 0, -1) if err != nil { return nil, err } - overrides := make([]Override, 0, len(ids)) - for _, kv := range ids { - id := string(kv.Value) - o, err := d.Get(id) - if err != nil { - return nil, err + overrides := make([]Override, len(objects)) + for i, object := range objects { + o, ok := object.(*Override) + if !ok { + return nil, storage.ImpossibleTypeErr(o, object) } - overrides = append(overrides, o) + overrides[i] = *o } - return overrides, nil } diff --git a/services/config/dao_test.go b/services/config/dao_test.go index fabfde17f..a3d6e18ab 100644 --- a/services/config/dao_test.go +++ b/services/config/dao_test.go @@ -43,19 +43,19 @@ func Test_encodeOverride_decodeOverride(t *testing.T) { }, } for _, tc := range testCases { - got, err := encodeOverride(tc.o) + got, err := tc.o.MarshalBinary() if err != nil { t.Fatal(err) } if !bytes.Equal(got, tc.exp) { t.Errorf("unexpected encoding:\ngot\n%s\nexp\n%s\n", string(got), string(tc.exp)) } - o, err := decodeOverride(got) - if err != nil { + o := new(Override) + if err := o.UnmarshalBinary(got); err != nil { t.Fatal(err) } - if !reflect.DeepEqual(o, tc.o) { - t.Errorf("unexpected decoding:\ngot\n%v\nexp\n%v\n", o, tc.o) + if !reflect.DeepEqual(*o, tc.o) { + t.Errorf("unexpected decoding:\ngot\n%v\nexp\n%v\n", *o, tc.o) } } } diff --git a/services/config/service.go b/services/config/service.go index a9fd11556..2893fca1b 100644 --- a/services/config/service.go +++ b/services/config/service.go @@ -63,7 +63,11 @@ const configNamespace = "config_overrides" func (s *Service) Open() error { store := s.StorageService.Store(configNamespace) - s.overrides = newOverrideKV(store) + overrides, err := newOverrideKV(store) + if err != nil { + return err + } + s.overrides = overrides // Cache element keys if elementKeys, err := override.ElementKeys(s.config); err != nil { @@ -75,26 +79,23 @@ func (s *Service) Open() error { // Define API routes s.routes = []httpd.Route{ { - Name: "config", Method: "GET", Pattern: configPath, HandlerFunc: s.handleGetConfig, }, { - Name: "config", Method: "GET", Pattern: configPathAnchored, HandlerFunc: s.handleGetConfig, }, { - Name: "config", Method: "POST", Pattern: configPathAnchored, HandlerFunc: s.handleUpdateSection, }, } - err := s.HTTPDService.AddRoutes(s.routes) + err = s.HTTPDService.AddRoutes(s.routes) return errors.Wrap(err, "failed to add API routes") } diff --git a/services/hipchat/hipchattest/hipchattest.go b/services/hipchat/hipchattest/hipchattest.go new file mode 100644 index 000000000..111e83011 --- /dev/null +++ b/services/hipchat/hipchattest/hipchattest.go @@ -0,0 +1,58 @@ +package hipchattest + +import ( + "encoding/json" + "net/http" + "net/http/httptest" + "sync" +) + +type Server struct { + mu sync.Mutex + ts *httptest.Server + URL string + requests []Request + closed bool +} + +func NewServer() *Server { + s := new(Server) + ts := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { + hr := Request{ + URL: r.URL.String(), + } + dec := json.NewDecoder(r.Body) + dec.Decode(&hr.PostData) + s.mu.Lock() + s.requests = append(s.requests, hr) + s.mu.Unlock() + })) + s.ts = ts + s.URL = ts.URL + return s +} + +func (s *Server) Requests() []Request { + s.mu.Lock() + defer s.mu.Unlock() + return s.requests +} +func (s *Server) Close() { + if s.closed { + return + } + s.closed = true + s.ts.Close() +} + +type Request struct { + URL string + PostData PostData +} + +type PostData struct { + From string `json:"from"` + Message string `json:"message"` + Color string `json:"color"` + Notify bool `json:"notify"` +} diff --git a/services/hipchat/service.go b/services/hipchat/service.go index 201d887d5..4ffb925ea 100644 --- a/services/hipchat/service.go +++ b/services/hipchat/service.go @@ -13,7 +13,7 @@ import ( "path" "sync/atomic" - "github.com/influxdata/kapacitor" + "github.com/influxdata/kapacitor/alert" ) type Service struct { @@ -64,9 +64,9 @@ func (s *Service) StateChangesOnly() bool { } type testOptions struct { - Room string `json:"room"` - Message string `json:"message"` - Level kapacitor.AlertLevel `json:"level"` + Room string `json:"room"` + Message string `json:"message"` + Level alert.Level `json:"level"` } func (s *Service) TestOptions() interface{} { @@ -74,7 +74,7 @@ func (s *Service) TestOptions() interface{} { return &testOptions{ Room: c.Room, Message: "test hipchat message", - Level: kapacitor.CritAlert, + Level: alert.Critical, } } @@ -87,7 +87,7 @@ func (s *Service) Test(options interface{}) error { return s.Alert(o.Room, c.Token, o.Message, o.Level) } -func (s *Service) Alert(room, token, message string, level kapacitor.AlertLevel) error { +func (s *Service) Alert(room, token, message string, level alert.Level) error { url, post, err := s.preparePost(room, token, message, level) if err != nil { return err @@ -115,7 +115,7 @@ func (s *Service) Alert(room, token, message string, level kapacitor.AlertLevel) return nil } -func (s *Service) preparePost(room, token, message string, level kapacitor.AlertLevel) (string, io.Reader, error) { +func (s *Service) preparePost(room, token, message string, level alert.Level) (string, io.Reader, error) { c := s.config() if !c.Enabled { @@ -140,16 +140,16 @@ func (s *Service) preparePost(room, token, message string, level kapacitor.Alert var color string switch level { - case kapacitor.WarnAlert: + case alert.Warning: color = "yellow" - case kapacitor.CritAlert: + case alert.Critical: color = "red" default: color = "green" } postData := make(map[string]interface{}) - postData["from"] = kapacitor.Product + postData["from"] = "kapacitor" postData["color"] = color postData["message"] = message postData["notify"] = true @@ -162,3 +162,38 @@ func (s *Service) preparePost(room, token, message string, level kapacitor.Alert } return u.String(), &post, nil } + +type HandlerConfig struct { + // HipChat room in which to post messages. + // If empty uses the channel from the configuration. + Room string `mapstructure:"room"` + + // HipChat authentication token. + // If empty uses the token from the configuration. + Token string `mapstructure:"token"` +} + +type handler struct { + s *Service + c HandlerConfig + logger *log.Logger +} + +func (s *Service) Handler(c HandlerConfig, l *log.Logger) alert.Handler { + return &handler{ + s: s, + c: c, + logger: l, + } +} + +func (h *handler) Handle(event alert.Event) { + if err := h.s.Alert( + h.c.Room, + h.c.Token, + event.State.Message, + event.State.Level, + ); err != nil { + h.logger.Println("E! failed to send event to HipChat", err) + } +} diff --git a/services/httpd/handler.go b/services/httpd/handler.go index 9ff94bd61..e1ce4d1f1 100644 --- a/services/httpd/handler.go +++ b/services/httpd/handler.go @@ -39,6 +39,8 @@ const ( const ( // Root path for the API BasePath = "/kapacitor/v1" + // Root path for the preview API + BasePreviewPath = "/kapacitor/v1preview" // Name of the special user for subscriptions SubscriptionUser = "~subscriber" ) @@ -56,7 +58,6 @@ const ( type AuthorizationHandler func(http.ResponseWriter, *http.Request, auth.User) type Route struct { - Name string Method string Pattern string HandlerFunc interface{} @@ -123,6 +124,7 @@ func NewHandler( "GET", "POST", "PATCH", + "PUT", "DELETE", "HEAD", "OPTIONS", @@ -132,108 +134,100 @@ func NewHandler( h.methodMux[method] = NewServeMux() route := Route{ // Catch all 404 - Name: "404", Method: method, Pattern: "/", HandlerFunc: h.serve404, } h.addRawRoute(route) + previewRoute := Route{ + // Catch all Rewrite+404 + Method: method, + Pattern: BasePreviewPath + "/", + HandlerFunc: h.rewritePreview, + } + h.addRawRoute(previewRoute) } h.addRawRoutes([]Route{ { // Ping - Name: "ping", Method: "GET", Pattern: BasePath + "/ping", HandlerFunc: h.servePing, }, { // Ping - Name: "ping-head", Method: "HEAD", Pattern: BasePath + "/ping", HandlerFunc: h.servePing, }, { // Data-ingest route. - Name: "write", Method: "POST", Pattern: BasePath + "/write", HandlerFunc: h.serveWrite, }, { // Satisfy CORS checks. - Name: "write", Method: "OPTIONS", Pattern: BasePath + "/write", HandlerFunc: ServeOptions, }, { // Data-ingest route for /write endpoint without base path - Name: "write-raw", Method: "POST", Pattern: "/write", HandlerFunc: h.serveWrite, }, { // Satisfy CORS checks. - Name: "write-raw", Method: "OPTIONS", Pattern: "/write", HandlerFunc: ServeOptions, }, { // Display current API routes - Name: "routes", Method: "GET", Pattern: BasePath + "/:routes", HandlerFunc: h.serveRoutes, }, { // Change current log level - Name: "log-level", Method: "POST", Pattern: BasePath + "/loglevel", HandlerFunc: h.serveLogLevel, }, { - Name: "pprof", Method: "GET", Pattern: BasePath + "/debug/pprof/", HandlerFunc: pprof.Index, noJSON: true, }, { - Name: "pprof/cmdline", Method: "GET", Pattern: BasePath + "/debug/pprof/cmdline", HandlerFunc: pprof.Cmdline, noJSON: true, }, { - Name: "pprof/profile", Method: "GET", Pattern: BasePath + "/debug/pprof/profile", HandlerFunc: pprof.Profile, noJSON: true, }, { - Name: "pprof/symbol", Method: "GET", Pattern: BasePath + "/debug/pprof/symbol", HandlerFunc: pprof.Symbol, noJSON: true, }, { - Name: "pprof/trace", Method: "GET", Pattern: BasePath + "/debug/pprof/trace", HandlerFunc: pprof.Trace, noJSON: true, }, { - Name: "debug/vars", Method: "GET", Pattern: BasePath + "/debug/vars", HandlerFunc: serveExpvar, @@ -261,6 +255,24 @@ func (h *Handler) AddRoute(r Route) error { return h.addRawRoute(r) } +func (h *Handler) AddPreviewRoutes(routes []Route) error { + for _, r := range routes { + err := h.AddPreviewRoute(r) + if err != nil { + return err + } + } + return nil +} + +func (h *Handler) AddPreviewRoute(r Route) error { + if len(r.Pattern) > 0 && r.Pattern[0] != '/' { + return fmt.Errorf("route patterns must begin with a '/' %s", r.Pattern) + } + r.Pattern = BasePreviewPath + r.Pattern + return h.addRawRoute(r) +} + func (h *Handler) addRawRoutes(routes []Route) error { for _, r := range routes { err := h.addRawRoute(r) @@ -299,9 +311,9 @@ func (h *Handler) addRawRoute(r Route) error { handler = requestID(handler) if h.loggingEnabled { - handler = logHandler(handler, r.Name, h.clfLogger) + handler = logHandler(handler, h.clfLogger) } - handler = recovery(handler, r.Name, h.logger) // make sure recovery is always last + handler = recovery(handler, h.logger) // make sure recovery is always last mux, ok := h.methodMux[r.Method] if !ok { @@ -330,6 +342,17 @@ func (h *Handler) delRawRoute(r Route) { } } +// RewritePreview rewrites the URL path from BasePreviewPath to BasePath, +// thus allowing any URI that exist on BasePath to be auto promotted to the BasePreviewPath. +func (h *Handler) rewritePreview(w http.ResponseWriter, r *http.Request) { + if strings.HasPrefix(r.URL.Path, BasePreviewPath) { + r.URL.Path = strings.Replace(r.URL.Path, BasePreviewPath, BasePath, 1) + h.ServeHTTP(w, r) + } else { + h.serve404(w, r) + } +} + // ServeHTTP responds to HTTP request to the handler. func (h *Handler) ServeHTTP(w http.ResponseWriter, r *http.Request) { h.statMap.Add(statRequest, 1) @@ -637,7 +660,7 @@ func requiredPrivilegeForHTTPMethod(method string) (auth.Privilege, error) { return auth.NoPrivileges, nil case "GET": return auth.ReadPrivilege, nil - case "POST", "PATCH": + case "POST", "PATCH", "PUT": return auth.WritePrivilege, nil case "DELETE": return auth.DeletePrivilege, nil @@ -840,7 +863,7 @@ func requestID(inner http.Handler) http.Handler { }) } -func logHandler(inner http.Handler, name string, weblog *log.Logger) http.Handler { +func logHandler(inner http.Handler, weblog *log.Logger) http.Handler { return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { start := time.Now() l := &responseLogger{w: w} @@ -849,7 +872,7 @@ func logHandler(inner http.Handler, name string, weblog *log.Logger) http.Handle }) } -func recovery(inner http.Handler, name string, weblog *log.Logger) http.Handler { +func recovery(inner http.Handler, weblog *log.Logger) http.Handler { return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { start := time.Now() l := &responseLogger{w: w} diff --git a/services/httpd/handler_test.go b/services/httpd/handler_test.go index 80e137763..a8d253e85 100644 --- a/services/httpd/handler_test.go +++ b/services/httpd/handler_test.go @@ -1,7 +1,6 @@ package httpd import ( - "errors" "testing" "github.com/influxdata/kapacitor/auth" @@ -75,7 +74,13 @@ func Test_RequiredPrilegeForHTTPMethod(t *testing.T) { }, { m: "PUT", - err: errors.New(`unknown method "PUT"`), + rp: auth.WritePrivilege, + err: nil, + }, + { + m: "put", + rp: auth.WritePrivilege, + err: nil, }, } diff --git a/services/httpd/service.go b/services/httpd/service.go index 52355415d..c649378a8 100644 --- a/services/httpd/service.go +++ b/services/httpd/service.go @@ -274,6 +274,10 @@ func (s *Service) AddRoutes(routes []Route) error { return s.Handler.AddRoutes(routes) } +func (s *Service) AddPreviewRoutes(routes []Route) error { + return s.Handler.AddPreviewRoutes(routes) +} + func (s *Service) DelRoutes(routes []Route) { s.Handler.DelRoutes(routes) } diff --git a/services/influxdb/service.go b/services/influxdb/service.go index 2f581cf39..d6df03c9f 100644 --- a/services/influxdb/service.go +++ b/services/influxdb/service.go @@ -115,7 +115,6 @@ func (s *Service) Open() error { // Define API routes s.routes = []httpd.Route{ { - Name: "subscriptions", Method: "POST", Pattern: subscriptionsPath, HandlerFunc: s.handleSubscriptions, diff --git a/services/opsgenie/opsgenietest/opsgenietest.go b/services/opsgenie/opsgenietest/opsgenietest.go new file mode 100644 index 000000000..9de6e2f03 --- /dev/null +++ b/services/opsgenie/opsgenietest/opsgenietest.go @@ -0,0 +1,62 @@ +package opsgenietest + +import ( + "encoding/json" + "net/http" + "net/http/httptest" + "sync" +) + +type Server struct { + mu sync.Mutex + ts *httptest.Server + URL string + requests []Request + closed bool +} + +func NewServer() *Server { + s := new(Server) + ts := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { + or := Request{ + URL: r.URL.String(), + } + dec := json.NewDecoder(r.Body) + dec.Decode(&or.PostData) + s.mu.Lock() + s.requests = append(s.requests, or) + s.mu.Unlock() + })) + s.ts = ts + s.URL = ts.URL + return s +} +func (s *Server) Requests() []Request { + s.mu.Lock() + defer s.mu.Unlock() + return s.requests +} +func (s *Server) Close() { + if s.closed { + return + } + s.closed = true + s.ts.Close() +} + +type Request struct { + URL string + PostData PostData +} + +type PostData struct { + ApiKey string `json:"apiKey"` + Message string `json:"message"` + Entity string `json:"entity"` + Alias string `json:"alias"` + Note string `json:"note"` + Details map[string]interface{} `json:"details"` + Description string `json:"description"` + Teams []string `json:"teams"` + Recipients []string `json:"recipients"` +} diff --git a/services/opsgenie/service.go b/services/opsgenie/service.go index b7441b04f..f2dcb47ca 100644 --- a/services/opsgenie/service.go +++ b/services/opsgenie/service.go @@ -12,7 +12,7 @@ import ( "sync/atomic" "time" - "github.com/influxdata/kapacitor" + "github.com/influxdata/kapacitor/alert" ) type Service struct { @@ -134,7 +134,7 @@ func (s *Service) preparePost(teams []string, recipients []string, messageType, ogData["alias"] = entityID ogData["message"] = message ogData["note"] = "" - ogData["monitoring_tool"] = kapacitor.Product + ogData["monitoring_tool"] = "kapacitor" //Extra Fields (can be used for filtering) ogDetails := make(map[string]interface{}) @@ -183,3 +183,46 @@ func (s *Service) preparePost(teams []string, recipients []string, messageType, return url, &post, nil } + +type HandlerConfig struct { + // OpsGenie Teams. + TeamsList []string `mapstructure:"teams-list"` + + // OpsGenie Recipients. + RecipientsList []string `mapstructure:"recipients-list"` +} + +type handler struct { + s *Service + c HandlerConfig + logger *log.Logger +} + +func (s *Service) Handler(c HandlerConfig, l *log.Logger) alert.Handler { + return &handler{ + s: s, + c: c, + logger: l, + } +} + +func (h *handler) Handle(event alert.Event) { + var messageType string + switch event.State.Level { + case alert.OK: + messageType = "RECOVERY" + default: + messageType = event.State.Level.String() + } + if err := h.s.Alert( + h.c.TeamsList, + h.c.RecipientsList, + messageType, + event.State.Message, + event.State.ID, + event.State.Time, + event.Data.Result, + ); err != nil { + h.logger.Println("E! failed to send event to OpsGenie", err) + } +} diff --git a/services/pagerduty/pagerdutytest/pagerdutytest.go b/services/pagerduty/pagerdutytest/pagerdutytest.go new file mode 100644 index 000000000..df9c22ad8 --- /dev/null +++ b/services/pagerduty/pagerdutytest/pagerdutytest.go @@ -0,0 +1,59 @@ +package pagerdutytest + +import ( + "encoding/json" + "net/http" + "net/http/httptest" + "sync" +) + +type Server struct { + mu sync.Mutex + ts *httptest.Server + URL string + requests []Request + closed bool +} + +func NewServer() *Server { + s := new(Server) + ts := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { + pr := Request{ + URL: r.URL.String(), + } + dec := json.NewDecoder(r.Body) + dec.Decode(&pr.PostData) + s.mu.Lock() + s.requests = append(s.requests, pr) + s.mu.Unlock() + })) + s.ts = ts + s.URL = ts.URL + return s +} +func (s *Server) Requests() []Request { + s.mu.Lock() + defer s.mu.Unlock() + return s.requests +} +func (s *Server) Close() { + if s.closed { + return + } + s.closed = true + s.ts.Close() +} + +type Request struct { + URL string + PostData PostData +} + +type PostData struct { + ServiceKey string `json:"service_key"` + EventType string `json:"event_type"` + Description string `json:"description"` + Client string `json:"client"` + ClientURL string `json:"client_url"` + Details string `json:"details"` +} diff --git a/services/pagerduty/service.go b/services/pagerduty/service.go index eb269da4e..f2bea02e5 100644 --- a/services/pagerduty/service.go +++ b/services/pagerduty/service.go @@ -11,7 +11,7 @@ import ( "net/http" "sync/atomic" - "github.com/influxdata/kapacitor" + "github.com/influxdata/kapacitor/alert" ) type Service struct { @@ -61,16 +61,16 @@ func (s *Service) Global() bool { } type testOptions struct { - IncidentKey string `json:"incident-key"` - Description string `json:"description"` - Level kapacitor.AlertLevel `json:"level"` + IncidentKey string `json:"incident-key"` + Description string `json:"description"` + Level alert.Level `json:"level"` } func (s *Service) TestOptions() interface{} { return &testOptions{ IncidentKey: "testIncidentKey", Description: "test pagerduty message", - Level: kapacitor.CritAlert, + Level: alert.Critical, } } @@ -89,11 +89,12 @@ func (s *Service) Test(options interface{}) error { ) } -func (s *Service) Alert(serviceKey, incidentKey, desc string, level kapacitor.AlertLevel, details interface{}) error { +func (s *Service) Alert(serviceKey, incidentKey, desc string, level alert.Level, details interface{}) error { url, post, err := s.preparePost(serviceKey, incidentKey, desc, level, details) if err != nil { return err } + resp, err := http.Post(url, "application/json", post) if err != nil { return err @@ -116,7 +117,7 @@ func (s *Service) Alert(serviceKey, incidentKey, desc string, level kapacitor.Al return nil } -func (s *Service) preparePost(serviceKey, incidentKey, desc string, level kapacitor.AlertLevel, details interface{}) (string, io.Reader, error) { +func (s *Service) preparePost(serviceKey, incidentKey, desc string, level alert.Level, details interface{}) (string, io.Reader, error) { c := s.config() if !c.Enabled { @@ -125,9 +126,9 @@ func (s *Service) preparePost(serviceKey, incidentKey, desc string, level kapaci var eventType string switch level { - case kapacitor.WarnAlert, kapacitor.CritAlert: + case alert.Warning, alert.Critical: eventType = "trigger" - case kapacitor.InfoAlert: + case alert.Info: return "", nil, fmt.Errorf("AlertLevel 'info' is currently ignored by the PagerDuty service") default: eventType = "resolve" @@ -142,7 +143,7 @@ func (s *Service) preparePost(serviceKey, incidentKey, desc string, level kapaci pData["event_type"] = eventType pData["description"] = desc pData["incident_key"] = incidentKey - pData["client"] = kapacitor.Product + pData["client"] = "kapacitor" pData["client_url"] = s.HTTPDService.URL() if details != nil { b, err := json.Marshal(details) @@ -162,3 +163,35 @@ func (s *Service) preparePost(serviceKey, incidentKey, desc string, level kapaci return c.URL, &post, nil } + +type HandlerConfig struct { + // The service key to use for the alert. + // Defaults to the value in the configuration if empty. + ServiceKey string `mapstructure:"service-key"` +} + +type handler struct { + s *Service + c HandlerConfig + logger *log.Logger +} + +func (s *Service) Handler(c HandlerConfig, l *log.Logger) alert.Handler { + return &handler{ + s: s, + c: c, + logger: l, + } +} + +func (h *handler) Handle(event alert.Event) { + if err := h.s.Alert( + h.c.ServiceKey, + event.State.ID, + event.State.Message, + event.State.Level, + event.Data.Result, + ); err != nil { + h.logger.Println("E! failed to send event to PagerDuty", err) + } +} diff --git a/services/replay/dao.go b/services/replay/dao.go index 94eece279..ef2e5e9bd 100644 --- a/services/replay/dao.go +++ b/services/replay/dao.go @@ -4,7 +4,6 @@ import ( "bytes" "encoding/gob" "errors" - "path" "time" "github.com/influxdata/kapacitor/services/storage" @@ -78,209 +77,98 @@ type Recording struct { Progress float64 } -const ( - recordingDataPrefix = "/recordings/data/" - recordingIndexesPrefix = "/recordings/indexes/" - - // Name of ID index - recordingIdIndex = "id/" - // Name of Date index - recordingDateIndex = "date/" -) +type rawRecording Recording -// Key/Value based implementation of the RecordingDAO. -type recordingKV struct { - store storage.Interface +func (r Recording) ObjectID() string { + return r.ID } -func newRecordingKV(store storage.Interface) *recordingKV { - return &recordingKV{ - store: store, - } -} - -func (d *recordingKV) encodeRecording(r Recording) ([]byte, error) { +func (r Recording) MarshalBinary() ([]byte, error) { var buf bytes.Buffer - enc := gob.NewEncoder(&buf) - err := enc.Encode(r) + err := gob.NewEncoder(&buf).Encode((rawRecording)(r)) return buf.Bytes(), err } -func (d *recordingKV) decodeRecording(data []byte) (Recording, error) { - var recording Recording - dec := gob.NewDecoder(bytes.NewReader(data)) - err := dec.Decode(&recording) - return recording, err -} - -// Create a key for the recording data -func (d *recordingKV) recordingDataKey(id string) string { - return recordingDataPrefix + id +func (r *Recording) UnmarshalBinary(data []byte) error { + return gob.NewDecoder(bytes.NewReader(data)).Decode((*rawRecording)(r)) } -// Create a key for a given index and value. -// -// Indexes are maintained via a 'directory' like system: -// -// /recordings/data/ID -- contains encoded recording data -// /recordings/index/id/ID -- contains the recording ID -// /recordings/index/date/DATE/ID -- contains the recording ID -// -// As such to list all recordings in Date sorted order use the /recordings/index/date/ directory. -func (d *recordingKV) recordingIndexKey(index, value string) string { - return recordingIndexesPrefix + index + value -} +// Name of Date index +const recordingDateIndex = "date" -func (d *recordingKV) recordingIDIndexKey(r Recording) string { - return d.recordingIndexKey(replayIdIndex, r.ID) -} -func (d *recordingKV) recordingDateIndexKey(r Recording) string { - return d.recordingIndexKey(recordingDateIndex, r.Date.Format(time.RFC3339)+"/"+r.ID) +// Key/Value based implementation of the RecordingDAO. +type recordingKV struct { + store *storage.IndexedStore } -func (d *recordingKV) Get(id string) (Recording, error) { - key := d.recordingDataKey(id) - if exists, err := d.store.Exists(key); err != nil { - return Recording{}, err - } else if !exists { - return Recording{}, ErrNoRecordingExists - } - kv, err := d.store.Get(key) +func newRecordingKV(store storage.Interface) (*recordingKV, error) { + c := storage.DefaultIndexedStoreConfig("recordings", func() storage.BinaryObject { + return new(Recording) + }) + c.Indexes = append(c.Indexes, storage.Index{ + Name: recordingDateIndex, + ValueFunc: func(o storage.BinaryObject) (string, error) { + r, ok := o.(*Recording) + if !ok { + return "", storage.ImpossibleTypeErr(r, o) + } + return r.Date.UTC().Format(time.RFC3339), nil + }, + }) + istore, err := storage.NewIndexedStore(store, c) if err != nil { - return Recording{}, err + return nil, err } - return d.decodeRecording(kv.Value) + return &recordingKV{ + store: istore, + }, nil } -func (d *recordingKV) Create(r Recording) error { - key := d.recordingDataKey(r.ID) - - exists, err := d.store.Exists(key) - if err != nil { - return err - } - if exists { +func (kv *recordingKV) error(err error) error { + if err == storage.ErrNoObjectExists { + return ErrNoRecordingExists + } else if err == storage.ErrObjectExists { return ErrRecordingExists } - - data, err := d.encodeRecording(r) - if err != nil { - return err - } - // Put data - err = d.store.Put(key, data) - if err != nil { - return err - } - // Put ID index - indexKey := d.recordingIDIndexKey(r) - err = d.store.Put(indexKey, []byte(r.ID)) - if err != nil { - return err - } - // Put Date index - indexKey = d.recordingDateIndexKey(r) - return d.store.Put(indexKey, []byte(r.ID)) + return err } -func (d *recordingKV) Replace(r Recording) error { - key := d.recordingDataKey(r.ID) - - exists, err := d.store.Exists(key) +func (kv *recordingKV) Get(id string) (Recording, error) { + o, err := kv.store.Get(id) if err != nil { - return err - } - if !exists { - return ErrNoRecordingExists + return Recording{}, kv.error(err) } - - prev, err := d.Get(r.ID) - if err != nil { - return err + r, ok := o.(*Recording) + if !ok { + return Recording{}, storage.ImpossibleTypeErr(r, o) } - - data, err := d.encodeRecording(r) - if err != nil { - return err - } - // Put data - err = d.store.Put(key, data) - if err != nil { - return err - } - // Update Date index - prevIndexKey := d.recordingDateIndexKey(prev) - err = d.store.Delete(prevIndexKey) - if err != nil { - return err - } - currIndexKey := d.recordingDateIndexKey(r) - err = d.store.Put(currIndexKey, []byte(r.ID)) - if err != nil { - return err - } - return nil + return *r, nil } -func (d *recordingKV) Delete(id string) error { - key := d.recordingDataKey(id) - r, err := d.Get(id) - if err != nil { - if err == ErrNoRecordingExists { - return nil - } - return err - } - - idIndexKey := d.recordingIDIndexKey(r) - dateIndexKey := d.recordingDateIndexKey(r) +func (kv *recordingKV) Create(r Recording) error { + return kv.error(kv.store.Create(&r)) +} - dataErr := d.store.Delete(key) - idIndexErr := d.store.Delete(idIndexKey) - dateIndexErr := d.store.Delete(dateIndexKey) - if dataErr != nil { - return dataErr - } - if idIndexErr != nil { - return dataErr - } - return dateIndexErr +func (kv *recordingKV) Replace(r Recording) error { + return kv.error(kv.store.Replace(&r)) } -func (d *recordingKV) List(pattern string, offset, limit int) ([]Recording, error) { - // Recordings are indexed by their Date. - // This allows us to do offset/limits and filtering without having to read in all recording data. +func (kv *recordingKV) Delete(id string) error { + return kv.store.Delete(id) +} - // List all recording ids sorted by Date - ids, err := d.store.List(recordingIndexesPrefix + recordingDateIndex) +func (kv *recordingKV) List(pattern string, offset, limit int) ([]Recording, error) { + objects, err := kv.store.ReverseList(recordingDateIndex, pattern, offset, limit) if err != nil { return nil, err } - // Reverse to sort by newest first - for i, j := 0, len(ids)-1; i < j; i, j = i+1, j-1 { - ids[i], ids[j] = ids[j], ids[i] - } - - var match func([]byte) bool - if pattern != "" { - match = func(value []byte) bool { - id := string(value) - matched, _ := path.Match(pattern, id) - return matched - } - } else { - match = func([]byte) bool { return true } - } - matches := storage.DoListFunc(ids, match, offset, limit) - - recordings := make([]Recording, len(matches)) - for i, id := range matches { - data, err := d.store.Get(d.recordingDataKey(string(id))) - if err != nil { - return nil, err + recordings := make([]Recording, len(objects)) + for i, o := range objects { + r, ok := o.(*Recording) + if !ok { + return nil, storage.ImpossibleTypeErr(r, o) } - t, err := d.decodeRecording(data.Value) - recordings[i] = t + recordings[i] = *r } return recordings, nil } @@ -328,207 +216,98 @@ type Replay struct { Progress float64 } -const ( - replayDataPrefix = "/replays/data/" - replayIndexesPrefix = "/replays/indexes/" - - replayIdIndex = "id/" - replayDateIndex = "date/" -) +type rawReplay Replay -// Key/Value based implementation of the ReplayDAO. -type replayKV struct { - store storage.Interface +func (r Replay) ObjectID() string { + return r.ID } -func newReplayKV(store storage.Interface) *replayKV { - return &replayKV{ - store: store, - } -} - -func (d *replayKV) encodeReplay(r Replay) ([]byte, error) { +func (r Replay) MarshalBinary() ([]byte, error) { var buf bytes.Buffer - enc := gob.NewEncoder(&buf) - err := enc.Encode(r) + err := gob.NewEncoder(&buf).Encode(rawReplay(r)) return buf.Bytes(), err } -func (d *replayKV) decodeReplay(data []byte) (Replay, error) { - var replay Replay - dec := gob.NewDecoder(bytes.NewReader(data)) - err := dec.Decode(&replay) - return replay, err -} - -// Create a key for the replay data -func (d *replayKV) replayDataKey(id string) string { - return replayDataPrefix + id +func (r *Replay) UnmarshalBinary(data []byte) error { + return gob.NewDecoder(bytes.NewReader(data)).Decode((*rawReplay)(r)) } -// Create a key for a given index and value. -// -// Indexes are maintained via a 'directory' like system: -// -// /replays/data/ID -- contains encoded replay data -// /replays/index/id/ID -- contains the replay ID -// /replays/index/date/DATE/ID -- contains the replay ID -// -// As such to list all replays in Date sorted order use the /replays/index/date/ directory. -func (d *replayKV) replayIndexKey(index, value string) string { - return replayIndexesPrefix + index + value -} +// Name of the replay date index +const replayDateIndex = "date" -func (d *replayKV) replayIDIndexKey(r Replay) string { - return d.replayIndexKey(replayIdIndex, r.ID) -} -func (d *replayKV) replayDateIndexKey(r Replay) string { - return d.replayIndexKey(replayDateIndex, r.Date.Format(time.RFC3339)+"/"+r.ID) +// Key/Value based implementation of the ReplayDAO. +type replayKV struct { + store *storage.IndexedStore } -func (d *replayKV) Get(id string) (Replay, error) { - key := d.replayDataKey(id) - if exists, err := d.store.Exists(key); err != nil { - return Replay{}, err - } else if !exists { - return Replay{}, ErrNoReplayExists - } - kv, err := d.store.Get(key) +func newReplayKV(store storage.Interface) (*replayKV, error) { + c := storage.DefaultIndexedStoreConfig("replays", func() storage.BinaryObject { + return new(Replay) + }) + c.Indexes = append(c.Indexes, storage.Index{ + Name: replayDateIndex, + ValueFunc: func(o storage.BinaryObject) (string, error) { + r, ok := o.(*Replay) + if !ok { + return "", storage.ImpossibleTypeErr(r, o) + } + return r.Date.UTC().Format(time.RFC3339), nil + }, + }) + istore, err := storage.NewIndexedStore(store, c) if err != nil { - return Replay{}, err + return nil, err } - return d.decodeReplay(kv.Value) + return &replayKV{ + store: istore, + }, nil } -func (d *replayKV) Create(r Replay) error { - key := d.replayDataKey(r.ID) - - exists, err := d.store.Exists(key) - if err != nil { - return err - } - if exists { +func (kv *replayKV) error(err error) error { + if err == storage.ErrNoObjectExists { + return ErrNoReplayExists + } else if err == storage.ErrObjectExists { return ErrReplayExists } - - data, err := d.encodeReplay(r) - if err != nil { - return err - } - // Put data - err = d.store.Put(key, data) - if err != nil { - return err - } - // Put ID index - indexKey := d.replayIDIndexKey(r) - err = d.store.Put(indexKey, []byte(r.ID)) - if err != nil { - return err - } - // Put Date index - indexKey = d.replayDateIndexKey(r) - return d.store.Put(indexKey, []byte(r.ID)) + return err } -func (d *replayKV) Replace(r Replay) error { - key := d.replayDataKey(r.ID) - - exists, err := d.store.Exists(key) +func (kv *replayKV) Get(id string) (Replay, error) { + o, err := kv.store.Get(id) if err != nil { - return err - } - if !exists { - return ErrNoReplayExists + return Replay{}, kv.error(err) } - - prev, err := d.Get(r.ID) - if err != nil { - return err + r, ok := o.(*Replay) + if !ok { + return Replay{}, storage.ImpossibleTypeErr(r, o) } - - data, err := d.encodeReplay(r) - if err != nil { - return err - } - // Put data - err = d.store.Put(key, data) - if err != nil { - return err - } - // Update Date index - prevIndexKey := d.replayDateIndexKey(prev) - err = d.store.Delete(prevIndexKey) - if err != nil { - return err - } - currIndexKey := d.replayDateIndexKey(r) - err = d.store.Put(currIndexKey, []byte(r.ID)) - if err != nil { - return err - } - return nil + return *r, nil } -func (d *replayKV) Delete(id string) error { - key := d.replayDataKey(id) - r, err := d.Get(id) - if err != nil { - if err == ErrNoReplayExists { - return nil - } - return err - } - - idIndexKey := d.replayIDIndexKey(r) - dateIndexKey := d.replayDateIndexKey(r) +func (kv *replayKV) Create(r Replay) error { + return kv.error(kv.store.Create(&r)) +} - dataErr := d.store.Delete(key) - idIndexErr := d.store.Delete(idIndexKey) - dateIndexErr := d.store.Delete(dateIndexKey) - if dataErr != nil { - return dataErr - } - if idIndexErr != nil { - return dataErr - } - return dateIndexErr +func (kv *replayKV) Replace(r Replay) error { + return kv.error(kv.store.Replace(&r)) } -func (d *replayKV) List(pattern string, offset, limit int) ([]Replay, error) { - // Replays are indexed by their Date. - // This allows us to do offset/limits and filtering without having to read in all replay data. +func (kv *replayKV) Delete(id string) error { + return kv.store.Delete(id) +} - // List all replay ids sorted by Date - ids, err := d.store.List(replayIndexesPrefix + replayDateIndex) +func (kv *replayKV) List(pattern string, offset, limit int) ([]Replay, error) { + objects, err := kv.store.ReverseList(replayDateIndex, pattern, offset, limit) if err != nil { return nil, err } - // Reverse to sort by newest first - for i, j := 0, len(ids)-1; i < j; i, j = i+1, j-1 { - ids[i], ids[j] = ids[j], ids[i] - } - - var match func([]byte) bool - if pattern != "" { - match = func(value []byte) bool { - id := string(value) - matched, _ := path.Match(pattern, id) - return matched - } - } else { - match = func([]byte) bool { return true } - } - matches := storage.DoListFunc(ids, match, offset, limit) - - replays := make([]Replay, len(matches)) - for i, id := range matches { - data, err := d.store.Get(d.replayDataKey(string(id))) - if err != nil { - return nil, err + replays := make([]Replay, len(objects)) + for i, o := range objects { + r, ok := o.(*Replay) + if !ok { + return nil, storage.ImpossibleTypeErr(r, o) } - t, err := d.decodeReplay(data.Value) - replays[i] = t + replays[i] = *r } return replays, nil } diff --git a/services/replay/service.go b/services/replay/service.go index 15a28f4f6..06cfac16e 100644 --- a/services/replay/service.go +++ b/services/replay/service.go @@ -101,18 +101,24 @@ const replayNamespace = "replay_store" func (s *Service) Open() error { // Create DAO - s.recordings = newRecordingKV(s.StorageService.Store(recordingNamespace)) - s.replays = newReplayKV(s.StorageService.Store(replayNamespace)) - - err := os.MkdirAll(s.saveDir, 0755) + recordings, err := newRecordingKV(s.StorageService.Store(recordingNamespace)) if err != nil { return err } - - err = s.syncRecordingMetadata() + s.recordings = recordings + replays, err := newReplayKV(s.StorageService.Store(replayNamespace)) if err != nil { return err } + s.replays = replays + + if err := os.MkdirAll(s.saveDir, 0755); err != nil { + return err + } + + if err := s.syncRecordingMetadata(); err != nil { + return err + } // Mark all running replays or recordings as failed since // we are just starting and they cannot possibly be still running @@ -122,85 +128,71 @@ func (s *Service) Open() error { // Setup routes s.routes = []httpd.Route{ { - Name: "recording", Method: "GET", Pattern: recordingsPathAnchored, HandlerFunc: s.handleRecording, }, { - Name: "deleteRecording", Method: "DELETE", Pattern: recordingsPathAnchored, HandlerFunc: s.handleDeleteRecording, }, { - Name: "/recordings/-cors", Method: "OPTIONS", Pattern: recordingsPathAnchored, HandlerFunc: httpd.ServeOptions, }, { - Name: "listRecordings", Method: "GET", Pattern: recordingsPath, HandlerFunc: s.handleListRecordings, }, { - Name: "createRecording", Method: "POST", Pattern: recordStreamPath, HandlerFunc: s.handleRecordStream, }, { - Name: "createRecording", Method: "POST", Pattern: recordBatchPath, HandlerFunc: s.handleRecordBatch, }, { - Name: "createRecording", Method: "POST", Pattern: recordQueryPath, HandlerFunc: s.handleRecordQuery, }, { - Name: "replay", Method: "GET", Pattern: replaysPathAnchored, HandlerFunc: s.handleReplay, }, { - Name: "deleteReplay", Method: "DELETE", Pattern: replaysPathAnchored, HandlerFunc: s.handleDeleteReplay, }, { - Name: "/replays/-cors", Method: "OPTIONS", Pattern: replaysPathAnchored, HandlerFunc: httpd.ServeOptions, }, { - Name: "listReplays", Method: "GET", Pattern: replaysPath, HandlerFunc: s.handleListReplays, }, { - Name: "createReplay", Method: "POST", Pattern: replaysPath, HandlerFunc: s.handleCreateReplay, }, { - Name: "replayBatch", Method: "POST", Pattern: replayBatchPath, HandlerFunc: s.handleReplayBatch, }, { - Name: "replayQuery", Method: "POST", Pattern: replayQueryPath, HandlerFunc: s.handleReplayQuery, diff --git a/services/sensu/sensutest/sensutest.go b/services/sensu/sensutest/sensutest.go new file mode 100644 index 000000000..fd67794e3 --- /dev/null +++ b/services/sensu/sensutest/sensutest.go @@ -0,0 +1,71 @@ +package sensutest + +import ( + "encoding/json" + "net" + "sync" +) + +type Server struct { + l *net.TCPListener + requests []Request + Addr string + wg sync.WaitGroup + closed bool +} + +func NewServer() (*Server, error) { + addr, err := net.ResolveTCPAddr("tcp", "localhost:0") + if err != nil { + return nil, err + } + l, err := net.ListenTCP("tcp", addr) + if err != nil { + return nil, err + } + s := &Server{ + l: l, + Addr: l.Addr().String(), + } + s.wg.Add(1) + go func() { + defer s.wg.Done() + s.run() + }() + return s, nil +} + +func (s *Server) Requests() []Request { + return s.requests +} + +func (s *Server) Close() { + if s.closed { + return + } + s.closed = true + s.l.Close() + s.wg.Wait() +} + +func (s *Server) run() { + for { + conn, err := s.l.Accept() + if err != nil { + return + } + func() { + defer conn.Close() + r := Request{} + json.NewDecoder(conn).Decode(&r) + s.requests = append(s.requests, r) + }() + } +} + +type Request struct { + Name string `json:"name"` + Source string `json:"source"` + Output string `json:"output"` + Status int `json:"status"` +} diff --git a/services/sensu/service.go b/services/sensu/service.go index 69224ba7a..a97055c82 100644 --- a/services/sensu/service.go +++ b/services/sensu/service.go @@ -10,7 +10,7 @@ import ( "regexp" "sync/atomic" - "github.com/influxdata/kapacitor" + "github.com/influxdata/kapacitor/alert" ) type Service struct { @@ -53,16 +53,16 @@ func (s *Service) Update(newConfig []interface{}) error { } type testOptions struct { - Name string `json:"name"` - Output string `json:"output"` - Level kapacitor.AlertLevel `json:"level"` + Name string `json:"name"` + Output string `json:"output"` + Level alert.Level `json:"level"` } func (s *Service) TestOptions() interface{} { return &testOptions{ Name: "testName", Output: "testOutput", - Level: kapacitor.CritAlert, + Level: alert.Critical, } } @@ -78,7 +78,7 @@ func (s *Service) Test(options interface{}) error { ) } -func (s *Service) Alert(name, output string, level kapacitor.AlertLevel) error { +func (s *Service) Alert(name, output string, level alert.Level) error { if !validNamePattern.MatchString(name) { return fmt.Errorf("invalid name %q for sensu alert. Must match %v", name, validNamePattern) } @@ -106,7 +106,7 @@ func (s *Service) Alert(name, output string, level kapacitor.AlertLevel) error { return nil } -func (s *Service) prepareData(name, output string, level kapacitor.AlertLevel) (*net.TCPAddr, map[string]interface{}, error) { +func (s *Service) prepareData(name, output string, level alert.Level) (*net.TCPAddr, map[string]interface{}, error) { c := s.config() @@ -116,13 +116,13 @@ func (s *Service) prepareData(name, output string, level kapacitor.AlertLevel) ( var status int switch level { - case kapacitor.OKAlert: + case alert.OK: status = 0 - case kapacitor.InfoAlert: + case alert.Info: status = 0 - case kapacitor.WarnAlert: + case alert.Warning: status = 1 - case kapacitor.CritAlert: + case alert.Critical: status = 2 default: status = 3 @@ -141,3 +141,25 @@ func (s *Service) prepareData(name, output string, level kapacitor.AlertLevel) ( return addr, postData, nil } + +type handler struct { + s *Service + logger *log.Logger +} + +func (s *Service) Handler(l *log.Logger) alert.Handler { + return &handler{ + s: s, + logger: l, + } +} + +func (h *handler) Handle(event alert.Event) { + if err := h.s.Alert( + event.State.ID, + event.State.Message, + event.State.Level, + ); err != nil { + h.logger.Println("E! failed to send event to Sensu", err) + } +} diff --git a/services/servicetest/service.go b/services/servicetest/service.go index c9c29cce5..6faa95596 100644 --- a/services/servicetest/service.go +++ b/services/servicetest/service.go @@ -51,19 +51,16 @@ func (s *Service) Open() error { // Define API routes s.routes = []httpd.Route{ { - Name: "tests-list", Method: "GET", Pattern: testPath, HandlerFunc: s.handleListTests, }, { - Name: "tests-options", Method: "GET", Pattern: testPathAnchored, HandlerFunc: s.handleTestOptions, }, { - Name: "do-test", Method: "POST", Pattern: testPathAnchored, HandlerFunc: s.handleTest, diff --git a/services/slack/config.go b/services/slack/config.go index 594424923..e258f3420 100644 --- a/services/slack/config.go +++ b/services/slack/config.go @@ -3,10 +3,11 @@ package slack import ( "net/url" - "github.com/influxdata/kapacitor" "github.com/pkg/errors" ) +const DefaultUsername = "kapacitor" + type Config struct { // Whether Slack integration is enabled. Enabled bool `toml:"enabled" override:"enabled"` @@ -29,7 +30,7 @@ type Config struct { func NewConfig() Config { return Config{ - Username: kapacitor.Product, + Username: DefaultUsername, } } diff --git a/services/slack/service.go b/services/slack/service.go index 7777bc918..672cc5f7d 100644 --- a/services/slack/service.go +++ b/services/slack/service.go @@ -10,7 +10,7 @@ import ( "net/http" "sync/atomic" - "github.com/influxdata/kapacitor" + "github.com/influxdata/kapacitor/alert" "github.com/pkg/errors" ) @@ -70,11 +70,11 @@ type attachment struct { } type testOptions struct { - Channel string `json:"channel"` - Message string `json:"message"` - Level kapacitor.AlertLevel `json:"level"` - Username string `json:"username"` - IconEmoji string `json:"icon-emoji"` + Channel string `json:"channel"` + Message string `json:"message"` + Level alert.Level `json:"level"` + Username string `json:"username"` + IconEmoji string `json:"icon-emoji"` } func (s *Service) TestOptions() interface{} { @@ -82,7 +82,7 @@ func (s *Service) TestOptions() interface{} { return &testOptions{ Channel: c.Channel, Message: "test slack message", - Level: kapacitor.CritAlert, + Level: alert.Critical, } } @@ -94,7 +94,7 @@ func (s *Service) Test(options interface{}) error { return s.Alert(o.Channel, o.Message, o.Username, o.IconEmoji, o.Level) } -func (s *Service) Alert(channel, message, username, iconEmoji string, level kapacitor.AlertLevel) error { +func (s *Service) Alert(channel, message, username, iconEmoji string, level alert.Level) error { url, post, err := s.preparePost(channel, message, username, iconEmoji, level) if err != nil { return err @@ -121,7 +121,7 @@ func (s *Service) Alert(channel, message, username, iconEmoji string, level kapa return nil } -func (s *Service) preparePost(channel, message, username, iconEmoji string, level kapacitor.AlertLevel) (string, io.Reader, error) { +func (s *Service) preparePost(channel, message, username, iconEmoji string, level alert.Level) (string, io.Reader, error) { c := s.config() if !c.Enabled { @@ -132,9 +132,9 @@ func (s *Service) preparePost(channel, message, username, iconEmoji string, leve } var color string switch level { - case kapacitor.WarnAlert: + case alert.Warning: color = "warning" - case kapacitor.CritAlert: + case alert.Critical: color = "danger" default: color = "good" @@ -170,3 +170,43 @@ func (s *Service) preparePost(channel, message, username, iconEmoji string, leve return c.URL, &post, nil } + +type HandlerConfig struct { + // Slack channel in which to post messages. + // If empty uses the channel from the configuration. + Channel string `mapstructure:"channel"` + + // Username of the Slack bot. + // If empty uses the username from the configuration. + Username string `mapstructure:"username"` + + // IconEmoji is an emoji name surrounded in ':' characters. + // The emoji image will replace the normal user icon for the slack bot. + IconEmoji string `mapstructure:"icon-emoji"` +} + +type handler struct { + s *Service + c HandlerConfig + logger *log.Logger +} + +func (s *Service) Handler(c HandlerConfig, l *log.Logger) alert.Handler { + return &handler{ + s: s, + c: c, + logger: l, + } +} + +func (h *handler) Handle(event alert.Event) { + if err := h.s.Alert( + h.c.Channel, + event.State.Message, + h.c.Username, + h.c.IconEmoji, + event.State.Level, + ); err != nil { + h.logger.Println("E! failed to send event to Slack", err) + } +} diff --git a/services/slack/slacktest/slacktest.go b/services/slack/slacktest/slacktest.go new file mode 100644 index 000000000..9157de67c --- /dev/null +++ b/services/slack/slacktest/slacktest.go @@ -0,0 +1,64 @@ +package slacktest + +import ( + "encoding/json" + "net/http" + "net/http/httptest" + "sync" +) + +type Server struct { + mu sync.Mutex + ts *httptest.Server + URL string + requests []Request + closed bool +} + +func NewServer() *Server { + s := new(Server) + ts := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { + sr := Request{ + URL: r.URL.String(), + } + dec := json.NewDecoder(r.Body) + dec.Decode(&sr.PostData) + s.mu.Lock() + s.requests = append(s.requests, sr) + s.mu.Unlock() + })) + s.ts = ts + s.URL = ts.URL + return s +} +func (s *Server) Requests() []Request { + s.mu.Lock() + defer s.mu.Unlock() + return s.requests +} +func (s *Server) Close() { + if s.closed { + return + } + s.closed = true + s.ts.Close() +} + +type Request struct { + URL string + PostData PostData +} + +type PostData struct { + Channel string `json:"channel"` + Username string `json:"username"` + Text string `json:"text"` + Attachments []Attachment `json:"attachments"` +} + +type Attachment struct { + Fallback string `json:"fallback"` + Color string `json:"color"` + Text string `json:"text"` + Mrkdwn_in []string `json:"mrkdwn_in"` +} diff --git a/services/smtp/service.go b/services/smtp/service.go index ea7ff8884..da3cb9992 100644 --- a/services/smtp/service.go +++ b/services/smtp/service.go @@ -9,6 +9,8 @@ import ( "sync/atomic" "time" + "github.com/influxdata/kapacitor/alert" + "gopkg.in/gomail.v2" ) @@ -123,10 +125,15 @@ func (s *Service) runMailer() { d, idleTimeout = s.dialer() var conn gomail.SendCloser + defer func() { + if conn != nil { + conn.Close() + } + }() + var err error open := false - done := false - for !done { + for { timer := time.NewTimer(idleTimeout) select { case <-s.updates: @@ -142,8 +149,7 @@ func (s *Service) runMailer() { open = false case m, ok := <-s.mail: if !ok { - done = true - break + return } if !open { if conn, err = d.Dial(); err != nil { @@ -223,3 +229,32 @@ func (s *Service) Test(options interface{}) error { o.Body, ) } + +type HandlerConfig struct { + // List of email recipients. + To []string `mapstructure:"to"` +} + +type handler struct { + s *Service + c HandlerConfig + logger *log.Logger +} + +func (s *Service) Handler(c HandlerConfig, l *log.Logger) alert.Handler { + return &handler{ + s: s, + c: c, + logger: l, + } +} + +func (h *handler) Handle(event alert.Event) { + if err := h.s.SendMail( + h.c.To, + event.State.Message, + event.State.Details, + ); err != nil { + h.logger.Println("E! failed to send email", err) + } +} diff --git a/services/smtp/smtptest/smtptest.go b/services/smtp/smtptest/smtptest.go new file mode 100644 index 000000000..ae21f0a49 --- /dev/null +++ b/services/smtp/smtptest/smtptest.go @@ -0,0 +1,183 @@ +package smtptest + +import ( + "fmt" + "io/ioutil" + "net" + "net/mail" + "net/textproto" + "strconv" + "sync" +) + +type Server struct { + Host string + Port int + Err error + + l *net.TCPListener + wg sync.WaitGroup + mu sync.Mutex + sentMessages []*Message + errors []error +} + +func NewServer() (*Server, error) { + laddr, err := net.ResolveTCPAddr("tcp", "localhost:0") + if err != nil { + return nil, err + } + l, err := net.ListenTCP("tcp", laddr) + if err != nil { + return nil, err + } + + addr := l.Addr() + host, portStr, err := net.SplitHostPort(addr.String()) + if err != nil { + return nil, err + } + port, err := strconv.ParseInt(portStr, 10, 64) + if err != nil { + return nil, err + } + s := &Server{ + Host: host, + Port: int(port), + l: l, + } + s.wg.Add(1) + go func() { + defer s.wg.Done() + s.run() + }() + return s, nil +} + +func (s *Server) Errors() []error { + return s.errors +} + +func (s *Server) SentMessages() []*Message { + return s.sentMessages +} + +func (s *Server) Close() error { + s.l.Close() + s.wg.Wait() + return nil +} + +func (s *Server) run() { + for { + conn, err := s.l.Accept() + if err != nil { + return + } + s.wg.Add(1) + go func() { + defer s.wg.Done() + defer conn.Close() + s.handleConn(conn) + }() + } +} + +const ( + replyGreeting = "220 hello" + replyOK = "250 Ok" + replyData = "354 Go ahead" + replyGoodbye = "221 Goodbye" +) + +// handleConn takes a connection and implements a simplified SMTP protocol, +// while capturing the message contents. +func (s *Server) handleConn(conn net.Conn) { + var err error + var line string + tc := textproto.NewConn(conn) + err = tc.PrintfLine(replyGreeting) + if err != nil { + goto FAIL + } + for { + line, err = tc.ReadLine() + if err != nil { + goto FAIL + } + if len(line) < 4 { + err = fmt.Errorf("unexpected data %q", line) + goto FAIL + } + switch line[:4] { + case "EHLO", "MAIL", "RCPT": + tc.PrintfLine(replyOK) + case "DATA": + var message *mail.Message + var body []byte + err = tc.PrintfLine(replyData) + if err != nil { + goto FAIL + } + dotReader := tc.DotReader() + message, err = mail.ReadMessage(dotReader) + if err != nil { + goto FAIL + } + body, err = ioutil.ReadAll(message.Body) + if err != nil { + goto FAIL + } + s.mu.Lock() + s.sentMessages = append(s.sentMessages, &Message{ + Header: message.Header, + Body: string(body), + }) + s.mu.Unlock() + err = tc.PrintfLine(replyOK) + if err != nil { + goto FAIL + } + case "QUIT": + err = tc.PrintfLine(replyGoodbye) + if err != nil { + goto FAIL + } + return + } + } +FAIL: + tc.PrintfLine(replyGoodbye) + s.mu.Lock() + s.errors = append(s.errors, err) + s.mu.Unlock() +} + +type Message struct { + Header mail.Header + Body string +} + +// Compare returns a useful error message if the two message are note equal. +// Only headers that are present in the exp message are compared, thus ignoring any extra headers in the got message. +func (exp *Message) Compare(got *Message) error { + if exp.Body != got.Body { + return fmt.Errorf("unequal bodies:\ngot\n%q\nexp\n%q\n", got.Body, exp.Body) + } + // Compare only the header keys specified in the exp message. + for k, ev := range exp.Header { + gv, ok := got.Header[k] + if !ok { + return fmt.Errorf("missing header %s", k) + } + if len(gv) != len(ev) { + return fmt.Errorf("unexpected header %s: got %v exp %v", k, gv, ev) + } + for i := range ev { + if gv[i] != ev[i] { + return fmt.Errorf("unexpected header %s: got %v exp %v", k, gv, ev) + } + } + } + return nil +} diff --git a/services/stats/service.go b/services/stats/service.go index d7eeb962e..b70202774 100644 --- a/services/stats/service.go +++ b/services/stats/service.go @@ -23,7 +23,6 @@ package stats import ( - "errors" "log" "sync" "time" @@ -88,7 +87,7 @@ func (s *Service) Close() error { s.mu.Lock() defer s.mu.Unlock() if !s.open { - return errors.New("error closing stats service: service not open") + return nil } s.open = false close(s.closing) diff --git a/services/storage/bolt.go b/services/storage/bolt.go index 57bc2c565..c8a541998 100644 --- a/services/storage/bolt.go +++ b/services/storage/bolt.go @@ -19,90 +19,168 @@ func NewBolt(db *bolt.DB, bucket string) *Bolt { } } +func (b *Bolt) View(f func(tx ReadOnlyTx) error) error { + return DoView(b, f) +} + +func (b *Bolt) Update(f func(tx Tx) error) error { + return DoUpdate(b, f) +} + +func (b *Bolt) put(tx *bolt.Tx, key string, value []byte) error { + bucket, err := tx.CreateBucketIfNotExists(b.bucket) + if err != nil { + return err + } + err = bucket.Put([]byte(key), value) + if err != nil { + return err + } + return nil +} + func (b *Bolt) Put(key string, value []byte) error { return b.db.Update(func(tx *bolt.Tx) error { - bucket, err := tx.CreateBucketIfNotExists(b.bucket) - if err != nil { - return err - } - err = bucket.Put([]byte(key), value) - if err != nil { - return err - } - return nil + return b.put(tx, key, value) }) } -func (b *Bolt) Get(key string) (*KeyValue, error) { - var value []byte - err := b.db.View(func(tx *bolt.Tx) error { - bucket := tx.Bucket(b.bucket) - if bucket == nil { - return ErrNoKeyExists - } - - val := bucket.Get([]byte(key)) - if val == nil { - return ErrNoKeyExists - } - value = make([]byte, len(val)) - copy(value, val) - return nil - }) - if err != nil { - return nil, err + +func (b *Bolt) get(tx *bolt.Tx, key string) (*KeyValue, error) { + bucket := tx.Bucket(b.bucket) + if bucket == nil { + return nil, ErrNoKeyExists } + val := bucket.Get([]byte(key)) + if val == nil { + return nil, ErrNoKeyExists + } + value := make([]byte, len(val)) + copy(value, val) return &KeyValue{ Key: key, Value: value, }, nil } +func (b *Bolt) Get(key string) (kv *KeyValue, err error) { + err = b.db.View(func(tx *bolt.Tx) error { + kv, err = b.get(tx, key) + return err + }) + return +} + +func (b *Bolt) delete(tx *bolt.Tx, key string) error { + bucket := tx.Bucket(b.bucket) + if bucket == nil { + return nil + } + return bucket.Delete([]byte(key)) +} + func (b *Bolt) Delete(key string) error { return b.db.Update(func(tx *bolt.Tx) error { - bucket := tx.Bucket(b.bucket) - if bucket == nil { - return nil - } - return bucket.Delete([]byte(key)) + return b.delete(tx, key) }) } -func (b *Bolt) Exists(key string) (bool, error) { - var exists bool - err := b.db.View(func(tx *bolt.Tx) error { - bucket := tx.Bucket(b.bucket) - if bucket == nil { - return nil - } +func (b *Bolt) exists(tx *bolt.Tx, key string) (bool, error) { + bucket := tx.Bucket(b.bucket) + if bucket == nil { + return false, nil + } + val := bucket.Get([]byte(key)) + exists := val != nil + return exists, nil +} - val := bucket.Get([]byte(key)) - exists = val != nil - return nil +func (b *Bolt) Exists(key string) (exists bool, err error) { + err = b.db.View(func(tx *bolt.Tx) error { + exists, err = b.exists(tx, key) + return err }) - return exists, err + return +} + +func (b *Bolt) list(tx *bolt.Tx, prefixStr string) (kvs []*KeyValue, err error) { + bucket := tx.Bucket(b.bucket) + if bucket == nil { + return + } + + cursor := bucket.Cursor() + prefix := []byte(prefixStr) + + for key, v := cursor.Seek(prefix); bytes.HasPrefix(key, prefix); key, v = cursor.Next() { + value := make([]byte, len(v)) + copy(value, v) + + kvs = append(kvs, &KeyValue{ + Key: string(key), + Value: value, + }) + } + return } func (b *Bolt) List(prefix string) (kvs []*KeyValue, err error) { err = b.db.View(func(tx *bolt.Tx) error { - bucket := tx.Bucket(b.bucket) - if bucket == nil { - return nil - } - - cursor := bucket.Cursor() - prefix := []byte(prefix) - - for key, v := cursor.Seek(prefix); bytes.HasPrefix(key, prefix); key, v = cursor.Next() { - value := make([]byte, len(v)) - copy(value, v) - - kvs = append(kvs, &KeyValue{ - Key: string(key), - Value: value, - }) - } - return nil + kvs, err = b.list(tx, prefix) + return err }) - return kvs, err + return +} + +func (b *Bolt) BeginTx() (Tx, error) { + return b.newTx(true) +} + +func (b *Bolt) BeginReadOnlyTx() (ReadOnlyTx, error) { + return b.newTx(false) +} + +func (b *Bolt) newTx(write bool) (*boltTx, error) { + tx, err := b.db.Begin(write) + if err != nil { + return nil, err + } + return &boltTx{ + b: b, + tx: tx, + }, nil +} + +// BoltTx wraps an underlying bolt.Tx type to implement the Tx interface. +type boltTx struct { + b *Bolt + tx *bolt.Tx +} + +func (t *boltTx) Get(key string) (*KeyValue, error) { + return t.b.get(t.tx, key) +} + +func (t *boltTx) Exists(key string) (bool, error) { + return t.b.exists(t.tx, key) +} + +func (t *boltTx) List(prefix string) ([]*KeyValue, error) { + return t.b.list(t.tx, prefix) +} + +func (t *boltTx) Put(key string, value []byte) error { + return t.b.put(t.tx, key, value) +} + +func (t *boltTx) Delete(key string) error { + return t.b.delete(t.tx, key) +} + +func (t *boltTx) Commit() error { + return t.tx.Commit() +} + +func (t *boltTx) Rollback() error { + return t.tx.Rollback() } diff --git a/services/storage/indexed.go b/services/storage/indexed.go new file mode 100644 index 000000000..b56428bf4 --- /dev/null +++ b/services/storage/indexed.go @@ -0,0 +1,350 @@ +package storage + +import ( + "encoding" + "errors" + "fmt" + "path" + "strings" +) + +const ( + defaultDataPrefix = "data" + defaultIndexesPrefix = "indexes" + + DefaultIDIndex = "id" +) + +var ( + ErrObjectExists = errors.New("object already exists") + ErrNoObjectExists = errors.New("no object exists") +) + +type BinaryObject interface { + encoding.BinaryMarshaler + encoding.BinaryUnmarshaler + ObjectID() string +} + +type NewObjectF func() BinaryObject +type ValueFunc func(BinaryObject) (string, error) + +type Index struct { + Name string + ValueFunc ValueFunc + Unique bool +} + +func (idx Index) ValueOf(o BinaryObject) (string, error) { + value, err := idx.ValueFunc(o) + if err != nil { + return "", err + } + if !idx.Unique { + value = value + "/" + o.ObjectID() + } + return value, nil +} + +// Indexed provides basic CRUD operations and maintains indexes. +type IndexedStore struct { + store Interface + + dataPrefix string + indexesPrefix string + + indexes []Index + + newObject NewObjectF +} + +type IndexedStoreConfig struct { + Prefix string + DataPrefix string + IndexesPrefix string + NewObject NewObjectF + Indexes []Index +} + +func DefaultIndexedStoreConfig(prefix string, newObject NewObjectF) IndexedStoreConfig { + return IndexedStoreConfig{ + Prefix: prefix, + DataPrefix: defaultDataPrefix, + IndexesPrefix: defaultIndexesPrefix, + NewObject: newObject, + Indexes: []Index{{ + Name: DefaultIDIndex, + Unique: true, + ValueFunc: func(o BinaryObject) (string, error) { + return o.ObjectID(), nil + }, + }}, + } +} + +func validPath(p string) bool { + return !strings.Contains(p, "/") +} + +func (c IndexedStoreConfig) Validate() error { + if c.Prefix == "" { + return errors.New("must provide a prefix") + } + if !validPath(c.Prefix) { + return fmt.Errorf("invalid prefix %q", c.Prefix) + } + if !validPath(c.DataPrefix) { + return fmt.Errorf("invalid data prefix %q", c.DataPrefix) + } + if !validPath(c.IndexesPrefix) { + return fmt.Errorf("invalid indexes prefix %q", c.IndexesPrefix) + } + if c.IndexesPrefix == c.DataPrefix { + return fmt.Errorf("data prefix and indexes prefix must be different, both are %q", c.IndexesPrefix) + } + if c.NewObject == nil { + return errors.New("must provide a NewObject function") + } + for _, idx := range c.Indexes { + if !validPath(idx.Name) { + return fmt.Errorf("invalid index name %q", idx.Name) + } + if idx.ValueFunc == nil { + return fmt.Errorf("index %q does not have a ValueF function", idx.Name) + } + } + return nil +} + +func NewIndexedStore(store Interface, c IndexedStoreConfig) (*IndexedStore, error) { + if err := c.Validate(); err != nil { + return nil, err + } + return &IndexedStore{ + store: store, + dataPrefix: path.Join("/", c.Prefix, c.DataPrefix) + "/", + indexesPrefix: path.Join("/", c.Prefix, c.IndexesPrefix), + indexes: c.Indexes, + newObject: c.NewObject, + }, nil +} + +// Create a key for the object data +func (s *IndexedStore) dataKey(id string) string { + return s.dataPrefix + id +} + +// Create a key for a given index and value. +// +// Indexes are maintained via a 'directory' like system: +// +// // -- contains encoded object data +// //id/ -- contains the object ID +// +// As such to list all handlers in ID sorted order use the //id/ directory. +func (s *IndexedStore) indexKey(index, value string) string { + return path.Join(s.indexesPrefix, index, value) +} + +func (s *IndexedStore) get(tx ReadOnlyTx, id string) (BinaryObject, error) { + key := s.dataKey(id) + if exists, err := tx.Exists(key); err != nil { + return nil, err + } else if !exists { + return nil, ErrNoObjectExists + } + kv, err := tx.Get(key) + if err != nil { + return nil, err + } + o := s.newObject() + err = o.UnmarshalBinary(kv.Value) + return o, err + +} + +func (s *IndexedStore) Get(id string) (o BinaryObject, err error) { + err = s.store.View(func(tx ReadOnlyTx) error { + o, err = s.get(tx, id) + return err + }) + return +} + +func (s *IndexedStore) Create(o BinaryObject) error { + return s.put(o, false, false) +} + +func (s *IndexedStore) Put(o BinaryObject) error { + return s.put(o, true, false) +} + +func (s *IndexedStore) Replace(o BinaryObject) error { + return s.put(o, true, true) +} + +func (s *IndexedStore) put(o BinaryObject, allowReplace, requireReplace bool) error { + return s.store.Update(func(tx Tx) error { + key := s.dataKey(o.ObjectID()) + + replacing := false + old, err := s.get(tx, o.ObjectID()) + if err != nil { + if err != ErrNoObjectExists || (requireReplace && err == ErrNoObjectExists) { + return err + } + } else if !allowReplace { + return ErrObjectExists + } else { + replacing = true + } + + data, err := o.MarshalBinary() + if err != nil { + return err + } + + // Put data + err = tx.Put(key, data) + if err != nil { + return err + } + // Put all indexes + for _, idx := range s.indexes { + // Get new index key + newValue, err := idx.ValueOf(o) + if err != nil { + return err + } + newIndexKey := s.indexKey(idx.Name, newValue) + + // Get old index key, if we are replacing + var oldValue string + if replacing { + var err error + oldValue, err = idx.ValueOf(old) + if err != nil { + return err + } + } + oldIndexKey := s.indexKey(idx.Name, oldValue) + + if !replacing || (replacing && oldIndexKey != newIndexKey) { + // Update new key + err := tx.Put(newIndexKey, []byte(o.ObjectID())) + if err != nil { + return err + } + if replacing { + // Remove old key + err = tx.Delete(oldIndexKey) + if err != nil { + return err + } + } + } + } + return nil + }) +} + +func (s *IndexedStore) Delete(id string) error { + return s.store.Update(func(tx Tx) error { + o, err := s.get(tx, id) + if err == ErrNoObjectExists { + // Nothing to do + return nil + } else if err != nil { + return err + } + + // Delete object + key := s.dataKey(id) + err = tx.Delete(key) + if err != nil { + return err + } + + // Delete all indexes + for _, idx := range s.indexes { + value, err := idx.ValueOf(o) + if err != nil { + return err + } + indexKey := s.indexKey(idx.Name, value) + err = tx.Delete(indexKey) + if err != nil { + return err + } + } + return nil + }) +} + +// List returns a list of objects that match a given pattern. +// If limit < 0, then no limit is enforced. +func (s *IndexedStore) List(index, pattern string, offset, limit int) ([]BinaryObject, error) { + return s.list(index, pattern, offset, limit, false) +} + +// ReverseList returns a list of objects that match a given pattern, using reverse sort. +// If limit < 0, then no limit is enforced. +func (s *IndexedStore) ReverseList(index, pattern string, offset, limit int) ([]BinaryObject, error) { + return s.list(index, pattern, offset, limit, true) +} + +func (s *IndexedStore) list(index, pattern string, offset, limit int, reverse bool) (objects []BinaryObject, err error) { + err = s.store.View(func(tx ReadOnlyTx) error { + // List all object ids sorted by index + ids, err := tx.List(s.indexKey(index, "") + "/") + if err != nil { + return err + } + if reverse { + // Reverse to sort + for i, j := 0, len(ids)-1; i < j; i, j = i+1, j-1 { + ids[i], ids[j] = ids[j], ids[i] + } + } + + var match func([]byte) bool + if pattern != "" { + match = func(value []byte) bool { + id := string(value) + matched, _ := path.Match(pattern, id) + return matched + } + } else { + match = func([]byte) bool { return true } + } + var matches []string + if limit >= 0 { + matches = DoListFunc(ids, match, offset, limit) + } else { + matches = make([]string, len(ids)) + for i := range ids { + matches[i] = string(ids[i].Value) + } + } + + objects = make([]BinaryObject, len(matches)) + for i, id := range matches { + data, err := tx.Get(s.dataKey(id)) + if err != nil { + return err + } + o := s.newObject() + err = o.UnmarshalBinary(data.Value) + if err != nil { + return err + } + objects[i] = o + } + return nil + }) + return +} + +func ImpossibleTypeErr(exp interface{}, got interface{}) error { + return fmt.Errorf("impossible error, object not of type %T, got %T", exp, got) +} diff --git a/services/storage/indexed_test.go b/services/storage/indexed_test.go new file mode 100644 index 000000000..b5cc5ef2b --- /dev/null +++ b/services/storage/indexed_test.go @@ -0,0 +1,220 @@ +package storage_test + +import ( + "encoding/json" + "reflect" + "testing" + "time" + + "github.com/davecgh/go-spew/spew" + "github.com/influxdata/kapacitor/services/storage" +) + +type object struct { + ID string + Value string + Date time.Time +} + +func (o object) ObjectID() string { + return o.ID +} + +func (o object) MarshalBinary() ([]byte, error) { + return json.Marshal(o) +} + +func (o *object) UnmarshalBinary(data []byte) error { + return json.Unmarshal(data, o) +} + +func TestIndexedStore_CRUD(t *testing.T) { + for name, sc := range stores { + t.Run(name, func(t *testing.T) { + db, err := sc() + if err != nil { + t.Fatal(err) + } + defer db.Close() + + s := db.Store("crud") + c := storage.DefaultIndexedStoreConfig("crud", func() storage.BinaryObject { + return new(object) + }) + c.Indexes = append(c.Indexes, storage.Index{ + Name: "date", + ValueFunc: func(o storage.BinaryObject) (string, error) { + obj, ok := o.(*object) + if !ok { + return "", storage.ImpossibleTypeErr(obj, o) + } + return obj.Date.UTC().Format(time.RFC3339), nil + }, + }) + is, err := storage.NewIndexedStore(s, c) + if err != nil { + t.Fatal(err) + } + + // Create new object + o1 := &object{ + ID: "1", + Value: "obj1", + Date: time.Date(2017, 1, 1, 0, 0, 0, 0, time.UTC), + } + if err := is.Create(o1); err != nil { + t.Fatal(err) + } + if err := is.Create(o1); err != storage.ErrObjectExists { + t.Fatal("expected ErrObjectExists creating object1 got", err) + } + // Check o1 + got1, err := is.Get("1") + if err != nil { + t.Fatal(err) + } + if !reflect.DeepEqual(got1, o1) { + t.Errorf("unexpected object 1 retrieved:\ngot\n%s\nexp\n%s\n", spew.Sdump(got1), spew.Sdump(o1)) + } + // Check ID list + expIDList := []storage.BinaryObject{o1} + gotIDList, err := is.List("id", "", 0, 100) + if err != nil { + t.Fatal(err) + } + if !reflect.DeepEqual(gotIDList, expIDList) { + t.Errorf("unexpected object list by ID:\ngot\n%s\nexp\n%s\n", spew.Sdump(gotIDList), spew.Sdump(expIDList)) + } + // Check Date list + expDateList := []storage.BinaryObject{o1} + gotDateList, err := is.List("date", "", 0, 100) + if err != nil { + t.Fatal(err) + } + if !reflect.DeepEqual(gotDateList, expDateList) { + t.Errorf("unexpected object list by Date:\ngot\n%s\nexp\n%s\n", spew.Sdump(gotDateList), spew.Sdump(expDateList)) + } + + // Create second object, using put + o2 := &object{ + ID: "2", + Value: "obj2", + Date: time.Date(2016, 1, 1, 0, 0, 0, 0, time.UTC), + } + if err := is.Put(o2); err != nil { + t.Fatal(err) + } + if err := is.Create(o2); err != storage.ErrObjectExists { + t.Fatal("expected ErrObjectExists creating object2 got", err) + } + // Check o2 + got2, err := is.Get("2") + if err != nil { + t.Fatal(err) + } + if !reflect.DeepEqual(got2, o2) { + t.Errorf("unexpected object 2 retrieved:\ngot\n%s\nexp\n%s\n", spew.Sdump(got2), spew.Sdump(o2)) + } + // Check ID list + expIDList = []storage.BinaryObject{o1, o2} + gotIDList, err = is.List("id", "", 0, 100) + if err != nil { + t.Fatal(err) + } + if !reflect.DeepEqual(gotIDList, expIDList) { + t.Errorf("unexpected object list by ID:\ngot\n%s\nexp\n%s\n", spew.Sdump(gotIDList), spew.Sdump(expIDList)) + } + // Check Date list + expDateList = []storage.BinaryObject{o2, o1} + gotDateList, err = is.List("date", "", 0, 100) + if err != nil { + t.Fatal(err) + } + if !reflect.DeepEqual(gotDateList, expDateList) { + t.Errorf("unexpected object list by Date:\ngot\n%s\nexp\n%s\n", spew.Sdump(gotDateList), spew.Sdump(expDateList)) + } + + // Modify objects + o1.Value = "modified obj1" + is.Replace(o1) + o2.Date = time.Date(2018, 1, 1, 0, 0, 0, 0, time.UTC) + is.Put(o2) + + // Check o1 + got1, err = is.Get("1") + if err != nil { + t.Fatal(err) + } + if !reflect.DeepEqual(got1, o1) { + t.Errorf("unexpected object 1 retrieved after modification:\ngot\n%s\nexp\n%s\n", spew.Sdump(got1), spew.Sdump(o1)) + } + + // Check o2 + got2, err = is.Get("2") + if err != nil { + t.Fatal(err) + } + if !reflect.DeepEqual(got2, o2) { + t.Errorf("unexpected object 2 retrieved after modification:\ngot\n%s\nexp\n%s\n", spew.Sdump(got2), spew.Sdump(o2)) + } + + // Check ID list + expIDList = []storage.BinaryObject{o1, o2} + gotIDList, err = is.List("id", "", 0, 100) + if err != nil { + t.Fatal(err) + } + if !reflect.DeepEqual(gotIDList, expIDList) { + t.Errorf("unexpected object list by ID after modification:\ngot\n%s\nexp\n%s\n", spew.Sdump(gotIDList), spew.Sdump(expIDList)) + } + // Check Date list + expDateList = []storage.BinaryObject{o1, o2} + gotDateList, err = is.List("date", "", 0, 100) + if err != nil { + t.Fatal(err) + } + if !reflect.DeepEqual(gotDateList, expDateList) { + t.Errorf("unexpected object list by Date after modification:\ngot\n%s\nexp\n%s\n", spew.Sdump(gotDateList), spew.Sdump(expDateList)) + } + + // Delete object 2 + if err := is.Delete("2"); err != nil { + t.Fatal(err) + } + + // Check o2 + if _, err := is.Get("2"); err != storage.ErrNoObjectExists { + t.Error("expected ErrNoObjectExists for delete object 2, got:", err) + } + + // Check ID list + expIDList = []storage.BinaryObject{o1} + gotIDList, err = is.List("id", "", 0, 100) + if err != nil { + t.Fatal(err) + } + if !reflect.DeepEqual(gotIDList, expIDList) { + t.Errorf("unexpected object list by ID after modification:\ngot\n%s\nexp\n%s\n", spew.Sdump(gotIDList), spew.Sdump(expIDList)) + } + // Check Date list + expDateList = []storage.BinaryObject{o1} + gotDateList, err = is.List("date", "", 0, 100) + if err != nil { + t.Fatal(err) + } + if !reflect.DeepEqual(gotDateList, expDateList) { + t.Errorf("unexpected object list by Date after modification:\ngot\n%s\nexp\n%s\n", spew.Sdump(gotDateList), spew.Sdump(expDateList)) + } + + // Try to replace non existent object + o3 := &object{ + ID: "3", + Value: "obj3", + Date: time.Date(2016, 1, 1, 0, 0, 0, 0, time.UTC), + } + if err := is.Replace(o3); err != storage.ErrNoObjectExists { + t.Error("expected error replacing non existent object, got:", err) + } + }) + } +} diff --git a/services/storage/mem.go b/services/storage/mem.go new file mode 100644 index 000000000..f6b751280 --- /dev/null +++ b/services/storage/mem.go @@ -0,0 +1,171 @@ +package storage + +import ( + "fmt" + "sort" + "strings" + "sync" +) + +// MemStore is an in memory only implementation of the storage.Interface. +// This is intend to be used for testing use cases only. +type MemStore struct { + mu sync.Mutex + Name string + store map[string][]byte +} + +func NewMemStore(name string) *MemStore { + return &MemStore{ + Name: name, + store: make(map[string][]byte), + } +} + +func (s *MemStore) View(f func(tx ReadOnlyTx) error) error { + return DoView(s, f) +} + +func (s *MemStore) Update(f func(tx Tx) error) error { + return DoUpdate(s, f) +} + +func (s *MemStore) Put(key string, value []byte) error { + s.mu.Lock() + s.store[key] = value + s.mu.Unlock() + return nil +} + +func (s *MemStore) Get(key string) (*KeyValue, error) { + s.mu.Lock() + value, ok := s.store[key] + s.mu.Unlock() + if !ok { + return nil, ErrNoKeyExists + } + return &KeyValue{ + Key: key, + Value: value, + }, nil +} + +func (s *MemStore) Delete(key string) error { + s.mu.Lock() + delete(s.store, key) + s.mu.Unlock() + return nil +} + +func (s *MemStore) Exists(key string) (bool, error) { + s.mu.Lock() + _, ok := s.store[key] + s.mu.Unlock() + return ok, nil +} + +type keySortedKVs []*KeyValue + +func (s keySortedKVs) Len() int { return len(s) } +func (s keySortedKVs) Less(i int, j int) bool { return s[i].Key < s[j].Key } +func (s keySortedKVs) Swap(i int, j int) { s[i], s[j] = s[j], s[i] } + +func (s *MemStore) List(prefix string) ([]*KeyValue, error) { + s.mu.Lock() + kvs := make([]*KeyValue, 0, len(s.store)) + for k, v := range s.store { + if strings.HasPrefix(k, prefix) { + kvs = append(kvs, &KeyValue{Key: k, Value: v}) + } + } + s.mu.Unlock() + sort.Sort(keySortedKVs(kvs)) + return kvs, nil +} + +func (s *MemStore) BeginTx() (Tx, error) { + return s.newTx() +} + +func (s *MemStore) BeginReadOnlyTx() (ReadOnlyTx, error) { + return s.newTx() +} + +func (s *MemStore) newTx() (*memTx, error) { + // A Tx carries the lock, and must be committed or rolledback before another operation can continue. + s.mu.Lock() + store := make(map[string][]byte, len(s.store)) + for k, v := range s.store { + store[k] = v + } + return &memTx{ + m: s, + store: store, + }, nil +} + +type memTxState int + +const ( + unCommitted memTxState = iota + committed + rolledback +) + +type memTx struct { + state memTxState + m *MemStore + store map[string][]byte +} + +func (t *memTx) Get(key string) (*KeyValue, error) { + value, ok := t.store[key] + if !ok { + return nil, ErrNoKeyExists + } + return &KeyValue{Key: key, Value: value}, nil +} + +func (t *memTx) Exists(key string) (bool, error) { + _, ok := t.store[key] + return ok, nil +} + +func (t *memTx) List(prefix string) ([]*KeyValue, error) { + kvs := make([]*KeyValue, 0, len(t.store)) + for k, v := range t.store { + if strings.HasPrefix(k, prefix) { + kvs = append(kvs, &KeyValue{Key: k, Value: v}) + } + } + sort.Sort(keySortedKVs(kvs)) + return kvs, nil +} + +func (t *memTx) Put(key string, value []byte) error { + t.store[key] = value + return nil +} + +func (t *memTx) Delete(key string) error { + delete(t.store, key) + return nil +} + +func (t *memTx) Commit() error { + if t.state == unCommitted { + t.m.store = t.store + t.state = committed + t.m.mu.Unlock() + return nil + } + return fmt.Errorf("cannot commit transaction, transaction in state %v", t.state) +} + +func (t *memTx) Rollback() error { + if t.state == unCommitted { + t.state = rolledback + t.m.mu.Unlock() + } + return nil +} diff --git a/services/storage/storage.go b/services/storage/storage.go index 059e88a95..aef5376d2 100644 --- a/services/storage/storage.go +++ b/services/storage/storage.go @@ -7,28 +7,100 @@ var ( ErrNoKeyExists = errors.New("no key exists") ) -// Common interface for interacting with a simple Key/Value storage -type Interface interface { - // Store a value. - Put(key string, value []byte) error +// ReadOperator provides an interface for performing read operations. +type ReadOperator interface { // Retrieve a value. Get(key string) (*KeyValue, error) - // Delete a key. - // Deleting a non-existent key is not an error. - Delete(key string) error // Check if a key exists> Exists(key string) (bool, error) // List all values with given prefix. List(prefix string) ([]*KeyValue, error) } +// WriteOperator provides an interface for performing write operations. +type WriteOperator interface { + // Store a value. + Put(key string, value []byte) error + // Delete a key. + // Deleting a non-existent key is not an error. + Delete(key string) error +} + +// ReadOnlyTx provides an interface for performing read operations in a single transaction. +type ReadOnlyTx interface { + ReadOperator + + // Rollback signals that the transaction is complete. + // If the transaction was not committed, then all changes are reverted. + // Rollback must always be called for every transaction. + Rollback() error +} + +// Tx provides an interface for performing read and write storage operations in a single transaction. +type Tx interface { + ReadOnlyTx + WriteOperator + + // Commit finalizes the transaction. + // Once a transaction is committed, rolling back the transaction has no effect. + Commit() error +} + +type TxOperator interface { + // BeginReadOnlyTx starts a new read only transaction. The transaction must be rolledback. + // Leaving a transaction open can block other operations and otherwise + // significantly degrade the performance of the storage backend. + // A single go routine should only have one transaction open at a time. + BeginReadOnlyTx() (ReadOnlyTx, error) + // BeginTx starts a new transaction for reads and writes. The transaction must be committed or rolledback. + // Leaving a transaction open can block other operations and otherwise + // significantly degrade the performance of the storage backend. + // A single go routine should only have one transaction open at a time. + BeginTx() (Tx, error) +} + +// Common interface for interacting with a simple Key/Value storage +type Interface interface { + + // View creates a new read only transaction and always rolls it back. + View(func(ReadOnlyTx) error) error + + // Update creates a new read-write transaction and always rolls it back. + // If the function returns a nil error the transaction is committed, otherwise the error is returned. + Update(func(Tx) error) error +} + +// View manages a read only transaction. +func DoView(o TxOperator, f func(ReadOnlyTx) error) error { + tx, err := o.BeginReadOnlyTx() + if err != nil { + return err + } + defer tx.Rollback() + return f(tx) +} + +// DoUpdate provides a complete implementation of Interface.Update for a TxOperator. +func DoUpdate(o TxOperator, f func(Tx) error) error { + tx, err := o.BeginTx() + if err != nil { + return err + } + defer tx.Rollback() + err = f(tx) + if err != nil { + return err + } + return tx.Commit() +} + type KeyValue struct { Key string Value []byte } // Return a list of values from a list of KeyValues using an offset/limit bound and a match function. -func DoListFunc(list []*KeyValue, match func(value []byte) bool, offset, limit int) [][]byte { +func DoListFunc(list []*KeyValue, match func(value []byte) bool, offset, limit int) []string { l := len(list) upper := offset + limit if upper > l { @@ -39,7 +111,7 @@ func DoListFunc(list []*KeyValue, match func(value []byte) bool, offset, limit i // No more results return nil } - matches := make([][]byte, 0, size) + matches := make([]string, 0, size) i := 0 for _, kv := range list { if !match(kv.Value) { @@ -53,7 +125,7 @@ func DoListFunc(list []*KeyValue, match func(value []byte) bool, offset, limit i continue } - matches = append(matches, kv.Value) + matches = append(matches, string(kv.Value)) // Stop once limit reached if len(matches) == size { diff --git a/services/storage/storage_test.go b/services/storage/storage_test.go new file mode 100644 index 000000000..5a1b21584 --- /dev/null +++ b/services/storage/storage_test.go @@ -0,0 +1,313 @@ +package storage_test + +import ( + "bytes" + "fmt" + "io/ioutil" + "os" + "path/filepath" + "testing" + + "github.com/boltdb/bolt" + "github.com/influxdata/kapacitor/services/storage" + "github.com/pkg/errors" +) + +// Error used to specifically trigger a rollback for tests. +var rollbackErr = errors.New("rollback") + +type createStoreCloser func() (storeCloser, error) + +// stores is a map of all storage implementations, +// each test will be run against the stores found in this map. +var stores = map[string]createStoreCloser{ + "bolt": newBolt, + "mem": newMemStore, +} + +type storeCloser interface { + Store(namespace string) storage.Interface + Close() +} + +type boltDB struct { + db *bolt.DB + dir string +} + +func (b boltDB) Close() { + b.db.Close() + os.RemoveAll(b.dir) +} + +func newBolt() (storeCloser, error) { + tmpDir, err := ioutil.TempDir("", "storage-bolt") + db, err := bolt.Open(filepath.Join(tmpDir, "bolt.db"), 0600, nil) + if err != nil { + return boltDB{}, err + } + return boltDB{ + db: db, + dir: tmpDir, + }, nil +} + +func (b boltDB) Store(bucket string) storage.Interface { + return storage.NewBolt(b.db, bucket) +} + +type memStore struct { + stores map[string]storage.Interface +} + +func newMemStore() (storeCloser, error) { + return memStore{ + stores: make(map[string]storage.Interface), + }, nil +} + +func (s memStore) Store(name string) storage.Interface { + m, ok := s.stores[name] + if ok { + return m + } + m = storage.NewMemStore(name) + s.stores[name] = m + return m +} + +func (s memStore) Close() { +} + +func TestStorage_CRUD(t *testing.T) { + for name, sc := range stores { + t.Run(name, func(t *testing.T) { + db, err := sc() + if err != nil { + t.Fatal(err) + } + defer db.Close() + + s := db.Store("crud") + s.Update(func(tx storage.Tx) error { + key := "key0" + value := []byte("test value") + if exists, err := tx.Exists(key); err != nil { + t.Fatal(err) + } else if exists { + t.Fatal("expected key to not exist") + } + + if err := tx.Put(key, value); err != nil { + t.Fatal(err) + } + if exists, err := tx.Exists(key); err != nil { + t.Fatal(err) + } else if !exists { + t.Fatal("expected key to exist") + } + + got, err := tx.Get(key) + if err != nil { + t.Fatal(err) + } + + if !bytes.Equal(got.Value, value) { + t.Fatalf("unexpected value got %q exp %q", string(got.Value), string(value)) + } + + if err := tx.Delete(key); err != nil { + t.Fatal(err) + } + + if exists, err := tx.Exists(key); err != nil { + t.Fatal(err) + } else if exists { + t.Fatal("expected key to not exist after delete") + } + return nil + }) + }) + } +} + +func TestStorage_Update(t *testing.T) { + for name, sc := range stores { + t.Run(name, func(t *testing.T) { + db, err := sc() + if err != nil { + t.Fatal(err) + } + defer db.Close() + + s := db.Store("commit") + value := []byte("test value") + err = s.Update(func(tx storage.Tx) error { + return tx.Put("key0", value) + }) + if err != nil { + t.Fatal(err) + } + + var got *storage.KeyValue + err = s.View(func(tx storage.ReadOnlyTx) error { + got, err = tx.Get("key0") + return err + }) + if err != nil { + t.Fatal(err) + } + + if !bytes.Equal(got.Value, value) { + t.Errorf("unexpected value got %q exp %q", string(got.Value), string(value)) + } + }) + } +} + +func TestStorage_Update_Rollback(t *testing.T) { + for name, sc := range stores { + t.Run(name, func(t *testing.T) { + db, err := sc() + if err != nil { + t.Fatal(err) + } + defer db.Close() + + s := db.Store("rollback") + value := []byte("test value") + + // Put value + err = s.Update(func(tx storage.Tx) error { + return tx.Put("key0", value) + }) + if err != nil { + t.Fatal(err) + } + + err = s.Update(func(tx storage.Tx) error { + if err := tx.Put("key0", []byte("overridden value is rolledback")); err != nil { + return err + } + return rollbackErr + }) + + if err == nil { + t.Fatal("expected error") + } else if err != rollbackErr { + t.Fatalf("unexpected error: got %v exp %v", err, rollbackErr) + } + + var got *storage.KeyValue + s.View(func(tx storage.ReadOnlyTx) error { + got, err = tx.Get("key0") + return err + }) + if err != nil { + t.Fatal(err) + } + + if !bytes.Equal(got.Value, value) { + t.Errorf("unexpected value got %q exp %q", string(got.Value), string(value)) + } + }) + } +} + +func TestStorage_Update_Concurrent(t *testing.T) { + for name, sc := range stores { + t.Run(name, func(t *testing.T) { + db, err := sc() + if err != nil { + t.Fatal(err) + } + defer db.Close() + + bucketFmt := func(w int) string { + return fmt.Sprintf("bucket%d", w) + } + valueFmt := func(w, i, k int) []byte { + return []byte(fmt.Sprintf("worker %d iteration %d key %d", w, i, k)) + } + keyFmt := func(w, i, k int) string { + return fmt.Sprintf("key%d", k) + } + + putLoop := func(s storage.Interface, w, i, k int) error { + // Begin new transaction + err := s.Update(func(tx storage.Tx) error { + // Put a set of values + for x := 0; x < k; x++ { + v := valueFmt(w, i, x) + k := keyFmt(w, i, x) + if err := tx.Put(k, v); err != nil { + return err + } + } + // Do not commit every third transaction + if i%3 == 0 { + return rollbackErr + } + return nil + }) + // Mask explicit rollback errors + if err == rollbackErr { + err = nil + } + return err + } + + testF := func(s storage.Interface, w, i, k int) error { + for x := 0; x < i; x++ { + if err := putLoop(s, w, x, k); err != nil { + return errors.Wrapf(err, "worker %d", w) + } + } + return nil + } + + // Concurrency counts + w := 10 // number of workers + i := 10 // number of iterations + k := 10 // number of keys to write + + errs := make(chan error, w) + for x := 0; x < w; x++ { + s := db.Store(bucketFmt(x)) + go func(s storage.Interface, w, i, k int) { + errs <- testF(s, w, i, k) + }(s, x, i, k) + } + for x := 0; x < w; x++ { + err := <-errs + if err != nil { + t.Fatal(err) + } + } + + for x := 0; x < w; x++ { + s := db.Store(bucketFmt(x)) + for z := 0; z < k; z++ { + y := i - 1 + if y%3 == 0 { + // The last iteration was not committed, expect the previous + y-- + } + key := keyFmt(x, y, z) + value := valueFmt(x, y, z) + var kv *storage.KeyValue + err := s.View(func(tx storage.ReadOnlyTx) error { + kv, err = tx.Get(key) + return err + }) + if err != nil { + t.Fatalf("%s err:%v", key, err) + } + if !bytes.Equal(kv.Value, value) { + t.Errorf("unexpected value for key %s: got %q exp %q", key, string(kv.Value), string(value)) + } + } + } + }) + } +} diff --git a/services/storage/storagetest/storage.go b/services/storage/storagetest/storage.go index fe42fab67..f2230e379 100644 --- a/services/storage/storagetest/storage.go +++ b/services/storage/storagetest/storage.go @@ -1,11 +1,6 @@ package storagetest -import ( - "strings" - "sync" - - "github.com/influxdata/kapacitor/services/storage" -) +import "github.com/influxdata/kapacitor/services/storage" type TestStore struct{} @@ -14,65 +9,5 @@ func New() TestStore { } func (s TestStore) Store(name string) storage.Interface { - return NewMemStore(name) -} - -// Common interface for interacting with a simple Key/Value storage -type MemStore struct { - sync.Mutex - Name string - store map[string][]byte -} - -func NewMemStore(name string) *MemStore { - return &MemStore{ - Name: name, - store: make(map[string][]byte), - } -} - -func (s *MemStore) Put(key string, value []byte) error { - s.Lock() - s.store[key] = value - s.Unlock() - return nil -} - -func (s *MemStore) Get(key string) (*storage.KeyValue, error) { - s.Lock() - value, ok := s.store[key] - s.Unlock() - if !ok { - return nil, storage.ErrNoKeyExists - } - return &storage.KeyValue{ - Key: key, - Value: value, - }, nil -} - -func (s *MemStore) Delete(key string) error { - s.Lock() - delete(s.store, key) - s.Unlock() - return nil -} - -func (s *MemStore) Exists(key string) (bool, error) { - s.Lock() - _, ok := s.store[key] - s.Unlock() - return ok, nil -} - -func (s *MemStore) List(prefix string) ([]*storage.KeyValue, error) { - s.Lock() - kvs := make([]*storage.KeyValue, 0, len(s.store)) - for k, v := range s.store { - if strings.HasPrefix(k, prefix) { - kvs = append(kvs, &storage.KeyValue{Key: k, Value: v}) - } - } - s.Unlock() - return kvs, nil + return storage.NewMemStore(name) } diff --git a/services/storage/version.go b/services/storage/version.go new file mode 100644 index 000000000..95ee6a92b --- /dev/null +++ b/services/storage/version.go @@ -0,0 +1,43 @@ +package storage + +import ( + "bytes" + "encoding/json" + "errors" +) + +// VersionWrapper wraps a structure with a version so that changes +// to the structure can be properly decoded. +type VersionWrapper struct { + Version int `json:"version"` + Value *json.RawMessage `json:"value"` +} + +// VersionJSONEncode encodes an object as json wrapping it in a VersionWrapper struct. +func VersionJSONEncode(version int, o interface{}) ([]byte, error) { + raw, err := json.Marshal(o) + if err != nil { + return nil, err + } + rawCopy := make(json.RawMessage, len(raw)) + copy(rawCopy, raw) + wrapper := VersionWrapper{ + Version: version, + Value: &rawCopy, + } + return json.Marshal(wrapper) +} + +// VersionJSONDecode decodes and object that was encoded using VersionJSONEncode. +func VersionJSONDecode(data []byte, decF func(version int, dec *json.Decoder) error) error { + var wrapper VersionWrapper + err := json.Unmarshal(data, &wrapper) + if err != nil { + return err + } + if wrapper.Value == nil { + return errors.New("empty value") + } + dec := json.NewDecoder(bytes.NewReader(*wrapper.Value)) + return decF(wrapper.Version, dec) +} diff --git a/services/talk/service.go b/services/talk/service.go index 8859c2c4a..4461d656e 100644 --- a/services/talk/service.go +++ b/services/talk/service.go @@ -10,6 +10,8 @@ import ( "log" "net/http" "sync/atomic" + + "github.com/influxdata/kapacitor/alert" ) type Service struct { @@ -74,6 +76,7 @@ func (s *Service) Alert(title, text string) error { if err != nil { return err } + resp, err := http.Post(url, "application/json", post) if err != nil { return err @@ -115,3 +118,24 @@ func (s *Service) preparePost(title, text string) (string, io.Reader, error) { return c.URL, &post, nil } + +type handler struct { + s *Service + logger *log.Logger +} + +func (s *Service) Handler(l *log.Logger) alert.Handler { + return &handler{ + s: s, + logger: l, + } +} + +func (h *handler) Handle(event alert.Event) { + if err := h.s.Alert( + event.State.ID, + event.State.Message, + ); err != nil { + h.logger.Println("E! failed to send event to Talk", err) + } +} diff --git a/services/talk/talktest/talktest.go b/services/talk/talktest/talktest.go new file mode 100644 index 000000000..c786c7738 --- /dev/null +++ b/services/talk/talktest/talktest.go @@ -0,0 +1,57 @@ +package talktest + +import ( + "encoding/json" + "net/http" + "net/http/httptest" + "sync" +) + +type Server struct { + mu sync.Mutex + ts *httptest.Server + URL string + requests []Request + closed bool +} + +func NewServer() *Server { + s := new(Server) + ts := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { + tr := Request{ + URL: r.URL.String(), + } + dec := json.NewDecoder(r.Body) + dec.Decode(&tr.PostData) + s.mu.Lock() + s.requests = append(s.requests, tr) + s.mu.Unlock() + + })) + s.ts = ts + s.URL = ts.URL + return s +} +func (s *Server) Requests() []Request { + s.mu.Lock() + defer s.mu.Unlock() + return s.requests +} +func (s *Server) Close() { + if s.closed { + return + } + s.closed = true + s.ts.Close() +} + +type Request struct { + URL string + PostData PostData +} + +type PostData struct { + Title string `json:"title"` + Text string `json:"text"` + AuthorName string `json:"authorName"` +} diff --git a/services/task_store/dao.go b/services/task_store/dao.go index 53e3fa4de..a3c9cf5e8 100644 --- a/services/task_store/dao.go +++ b/services/task_store/dao.go @@ -137,6 +137,24 @@ type Task struct { LastEnabled time.Time } +type rawTask Task + +func (t Task) ObjectID() string { + return t.ID +} + +func (t Task) MarshalBinary() ([]byte, error) { + var buf bytes.Buffer + enc := gob.NewEncoder(&buf) + err := enc.Encode(rawTask(t)) + return buf.Bytes(), err +} + +func (t *Task) UnmarshalBinary(data []byte) error { + dec := gob.NewDecoder(bytes.NewReader(data)) + return dec.Decode((*rawTask)(t)) +} + type Template struct { // Unique identifier for the task ID string @@ -161,161 +179,69 @@ type Snapshot struct { NodeSnapshots map[string][]byte } -const ( - taskDataPrefix = "/tasks/data/" - taskIndexesPrefix = "/tasks/indexes/" - - // Name of ID index - idIndex = "id/" -) - // Key/Value store based implementation of the TaskDAO type taskKV struct { - store storage.Interface + store *storage.IndexedStore } -func newTaskKV(store storage.Interface) *taskKV { - return &taskKV{ - store: store, - } -} - -func (d *taskKV) encodeTask(t Task) ([]byte, error) { - var buf bytes.Buffer - enc := gob.NewEncoder(&buf) - err := enc.Encode(t) - return buf.Bytes(), err -} - -func (d *taskKV) decodeTask(data []byte) (Task, error) { - var task Task - dec := gob.NewDecoder(bytes.NewReader(data)) - err := dec.Decode(&task) - return task, err -} - -// Create a key for the task data -func (d *taskKV) taskDataKey(id string) string { - return taskDataPrefix + id -} - -// Create a key for a given index and value. -// -// Indexes are maintained via a 'directory' like system: -// -// /tasks/data/ID -- contains encoded task data -// /tasks/index/id/ID -- contains the task ID -// -// As such to list all tasks in ID sorted order use the /tasks/index/id/ directory. -func (d *taskKV) taskIndexKey(index, value string) string { - return taskIndexesPrefix + index + value -} - -func (d *taskKV) Get(id string) (Task, error) { - key := d.taskDataKey(id) - if exists, err := d.store.Exists(key); err != nil { - return Task{}, err - } else if !exists { - return Task{}, ErrNoTaskExists - } - kv, err := d.store.Get(key) +func newTaskKV(store storage.Interface) (*taskKV, error) { + c := storage.DefaultIndexedStoreConfig("tasks", func() storage.BinaryObject { + return new(Task) + }) + istore, err := storage.NewIndexedStore(store, c) if err != nil { - return Task{}, err + return nil, err } - return d.decodeTask(kv.Value) + return &taskKV{ + store: istore, + }, nil } -func (d *taskKV) Create(t Task) error { - key := d.taskDataKey(t.ID) - - exists, err := d.store.Exists(key) - if err != nil { - return err - } - if exists { +func (kv *taskKV) error(err error) error { + if err == storage.ErrObjectExists { return ErrTaskExists + } else if err == storage.ErrNoObjectExists { + return ErrNoTaskExists } - - data, err := d.encodeTask(t) - if err != nil { - return err - } - // Put data - err = d.store.Put(key, data) - if err != nil { - return err - } - // Put ID index - indexKey := d.taskIndexKey(idIndex, t.ID) - return d.store.Put(indexKey, []byte(t.ID)) + return err } -func (d *taskKV) Replace(t Task) error { - key := d.taskDataKey(t.ID) - - exists, err := d.store.Exists(key) +func (kv *taskKV) Get(id string) (Task, error) { + o, err := kv.store.Get(id) if err != nil { - return err + return Task{}, kv.error(err) } - if !exists { - return ErrNoTaskExists - } - - data, err := d.encodeTask(t) - if err != nil { - return err + t, ok := o.(*Task) + if !ok { + return Task{}, fmt.Errorf("impossible error, object not a Task, got %T", o) } - // Put data - err = d.store.Put(key, data) - if err != nil { - return err - } - return nil + return *t, nil } -func (d *taskKV) Delete(id string) error { - key := d.taskDataKey(id) - indexKey := d.taskIndexKey(idIndex, id) +func (kv *taskKV) Create(t Task) error { + return kv.store.Create(&t) +} - dataErr := d.store.Delete(key) - indexErr := d.store.Delete(indexKey) - if dataErr != nil { - return dataErr - } - return indexErr +func (kv *taskKV) Replace(t Task) error { + return kv.store.Replace(&t) } -func (d *taskKV) List(pattern string, offset, limit int) ([]Task, error) { - // Tasks are indexed via their ID only. - // While tasks are sorted in the data section by their ID anyway - // this allows us to do offset/limits and filtering without having to read in all task data. +func (kv *taskKV) Delete(id string) error { + return kv.store.Delete(id) +} - // List all task ids sorted by ID - ids, err := d.store.List(taskIndexesPrefix + idIndex) +func (kv *taskKV) List(pattern string, offset, limit int) ([]Task, error) { + objects, err := kv.store.List(storage.DefaultIDIndex, pattern, offset, limit) if err != nil { return nil, err } - - var match func([]byte) bool - if pattern != "" { - match = func(value []byte) bool { - id := string(value) - matched, _ := path.Match(pattern, id) - return matched - } - } else { - match = func([]byte) bool { return true } - } - matches := storage.DoListFunc(ids, match, offset, limit) - - tasks := make([]Task, len(matches)) - for i, id := range matches { - data, err := d.store.Get(d.taskDataKey(string(id))) - if err != nil { - return nil, err + tasks := make([]Task, len(objects)) + for i, o := range objects { + t, ok := o.(*Task) + if !ok { + return nil, fmt.Errorf("impossible error, object not a Task, got %T", o) } - t, err := d.decodeTask(data.Value) - tasks[i] = t + tasks[i] = *t } return tasks, nil } @@ -325,6 +251,8 @@ const ( templateIndexesPrefix = "/templates/indexes/" // Associate tasks with a template templateTaskPrefix = "/templates/tasks/" + + idIndex = "id/" ) // Key/Value store based implementation of the TemplateDAO @@ -374,154 +302,169 @@ func (d *templateKV) templateTaskAssociationKey(templateId, taskId string) strin return templateTaskPrefix + templateId + "/" + taskId } -func (d *templateKV) Get(id string) (Template, error) { - key := d.templateDataKey(id) - if exists, err := d.store.Exists(key); err != nil { - return Template{}, err - } else if !exists { - return Template{}, ErrNoTemplateExists - } - kv, err := d.store.Get(key) - if err != nil { - return Template{}, err - } - return d.decodeTemplate(kv.Value) +func (d *templateKV) Get(id string) (t Template, err error) { + err = d.store.View(func(tx storage.ReadOnlyTx) error { + key := d.templateDataKey(id) + if exists, err := tx.Exists(key); err != nil { + return err + } else if !exists { + return ErrNoTemplateExists + } + kv, err := tx.Get(key) + if err != nil { + return err + } + t, err = d.decodeTemplate(kv.Value) + return err + }) + return } func (d *templateKV) Create(t Template) error { - key := d.templateDataKey(t.ID) + return d.store.Update(func(tx storage.Tx) error { + key := d.templateDataKey(t.ID) - exists, err := d.store.Exists(key) - if err != nil { - return err - } - if exists { - return ErrTemplateExists - } + exists, err := tx.Exists(key) + if err != nil { + return err + } + if exists { + return ErrTemplateExists + } - data, err := d.encodeTemplate(t) - if err != nil { - return err - } - // Put data - err = d.store.Put(key, data) - if err != nil { - return err - } - // Put ID index - indexKey := d.templateIndexKey(idIndex, t.ID) - return d.store.Put(indexKey, []byte(t.ID)) + data, err := d.encodeTemplate(t) + if err != nil { + return err + } + // Put data + err = tx.Put(key, data) + if err != nil { + return err + } + // Put ID index + indexKey := d.templateIndexKey(idIndex, t.ID) + return tx.Put(indexKey, []byte(t.ID)) + }) } func (d *templateKV) Replace(t Template) error { - key := d.templateDataKey(t.ID) + return d.store.Update(func(tx storage.Tx) error { + key := d.templateDataKey(t.ID) - exists, err := d.store.Exists(key) - if err != nil { - return err - } - if !exists { - return ErrNoTemplateExists - } + exists, err := tx.Exists(key) + if err != nil { + return err + } + if !exists { + return ErrNoTemplateExists + } - data, err := d.encodeTemplate(t) - if err != nil { - return err - } - // Put data - err = d.store.Put(key, data) - if err != nil { - return err - } - return nil + data, err := d.encodeTemplate(t) + if err != nil { + return err + } + // Put data + err = tx.Put(key, data) + if err != nil { + return err + } + return nil + }) } func (d *templateKV) Delete(id string) error { - key := d.templateDataKey(id) - indexKey := d.templateIndexKey(idIndex, id) + return d.store.Update(func(tx storage.Tx) error { + key := d.templateDataKey(id) + indexKey := d.templateIndexKey(idIndex, id) - // Try and delete everything ignore errors until after. + if err := tx.Delete(key); err != nil { + return err + } + if err := tx.Delete(indexKey); err != nil { + return err + } - dataErr := d.store.Delete(key) - indexErr := d.store.Delete(indexKey) + // Delete all associations + ids, err := tx.List(templateTaskPrefix + id + "/") + if err != nil { + return nil + } - // Delete all associations - var lastErr error - ids, err := d.store.List(templateTaskPrefix + id + "/") - if err != nil { - lastErr = err - } else { for _, id := range ids { - err := d.store.Delete(id.Key) + err := tx.Delete(id.Key) if err != nil { - lastErr = err + return err } } - } - if dataErr != nil { - return dataErr - } - if indexErr != nil { - return indexErr - } - return lastErr + return nil + }) } func (d *templateKV) AssociateTask(templateId, taskId string) error { - akey := d.templateTaskAssociationKey(templateId, taskId) - return d.store.Put(akey, []byte(taskId)) + return d.store.Update(func(tx storage.Tx) error { + akey := d.templateTaskAssociationKey(templateId, taskId) + return tx.Put(akey, []byte(taskId)) + }) } func (d *templateKV) DisassociateTask(templateId, taskId string) error { - akey := d.templateTaskAssociationKey(templateId, taskId) - return d.store.Delete(akey) + return d.store.Update(func(tx storage.Tx) error { + akey := d.templateTaskAssociationKey(templateId, taskId) + return tx.Delete(akey) + }) } -func (d *templateKV) ListAssociatedTasks(templateId string) ([]string, error) { - ids, err := d.store.List(templateTaskPrefix + templateId + "/") - if err != nil { - return nil, err - } - taskIds := make([]string, len(ids)) - for i, id := range ids { - taskIds[i] = string(id.Value) - } - return taskIds, nil +func (d *templateKV) ListAssociatedTasks(templateId string) (taskIds []string, err error) { + err = d.store.View(func(tx storage.ReadOnlyTx) error { + ids, err := tx.List(templateTaskPrefix + templateId + "/") + if err != nil { + return err + } + taskIds = make([]string, len(ids)) + for i, id := range ids { + taskIds[i] = string(id.Value) + } + return nil + }) + return } -func (d *templateKV) List(pattern string, offset, limit int) ([]Template, error) { - // Templates are indexed via their ID only. - // While templates are sorted in the data section by their ID anyway - // this allows us to do offset/limits and filtering without having to read in all template data. +func (d *templateKV) List(pattern string, offset, limit int) (templates []Template, err error) { + err = d.store.View(func(tx storage.ReadOnlyTx) error { + // Templates are indexed via their ID only. + // While templates are sorted in the data section by their ID anyway + // this allows us to do offset/limits and filtering without having to read in all template data. - // List all template ids sorted by ID - ids, err := d.store.List(templateIndexesPrefix + idIndex) - if err != nil { - return nil, err - } + // List all template ids sorted by ID + ids, err := tx.List(templateIndexesPrefix + idIndex) + if err != nil { + return err + } - var match func([]byte) bool - if pattern != "" { - match = func(value []byte) bool { - id := string(value) - matched, _ := path.Match(pattern, id) - return matched + var match func([]byte) bool + if pattern != "" { + match = func(value []byte) bool { + id := string(value) + matched, _ := path.Match(pattern, id) + return matched + } + } else { + match = func([]byte) bool { return true } } - } else { - match = func([]byte) bool { return true } - } - matches := storage.DoListFunc(ids, match, offset, limit) + matches := storage.DoListFunc(ids, match, offset, limit) - templates := make([]Template, len(matches)) - for i, id := range matches { - data, err := d.store.Get(d.templateDataKey(string(id))) - if err != nil { - return nil, err + templates = make([]Template, len(matches)) + for i, id := range matches { + data, err := tx.Get(d.templateDataKey(string(id))) + if err != nil { + return err + } + t, err := d.decodeTemplate(data.Value) + templates[i] = t } - t, err := d.decodeTemplate(data.Value) - templates[i] = t - } - return templates, nil + return nil + }) + return } const ( @@ -557,38 +500,50 @@ func (d *snapshotKV) snapshotDataKey(id string) string { } func (d *snapshotKV) Put(id string, snapshot *Snapshot) error { - key := d.snapshotDataKey(id) - data, err := d.encodeSnapshot(snapshot) - if err != nil { - return err - } - return d.store.Put(key, data) + return d.store.Update(func(tx storage.Tx) error { + key := d.snapshotDataKey(id) + data, err := d.encodeSnapshot(snapshot) + if err != nil { + return err + } + return tx.Put(key, data) + }) } func (d *snapshotKV) Delete(id string) error { - key := d.snapshotDataKey(id) - return d.store.Delete(key) + return d.store.Update(func(tx storage.Tx) error { + key := d.snapshotDataKey(id) + return tx.Delete(key) + }) } -func (d *snapshotKV) Exists(id string) (bool, error) { - key := d.snapshotDataKey(id) - return d.store.Exists(key) +func (d *snapshotKV) Exists(id string) (exists bool, err error) { + err = d.store.View(func(tx storage.ReadOnlyTx) error { + key := d.snapshotDataKey(id) + exists, err = tx.Exists(key) + return err + }) + return } -func (d *snapshotKV) Get(id string) (*Snapshot, error) { - exists, err := d.Exists(id) - if err != nil { - return nil, err - } - if !exists { - return nil, ErrNoSnapshotExists - } - key := d.snapshotDataKey(id) - data, err := d.store.Get(key) - if err != nil { - return nil, err - } - return d.decodeSnapshot(data.Value) +func (d *snapshotKV) Get(id string) (snap *Snapshot, err error) { + err = d.store.View(func(tx storage.ReadOnlyTx) error { + exists, err := d.Exists(id) + if err != nil { + return err + } + if !exists { + return ErrNoSnapshotExists + } + key := d.snapshotDataKey(id) + data, err := tx.Get(key) + if err != nil { + return err + } + snap, err = d.decodeSnapshot(data.Value) + return err + }) + return } type VarType int diff --git a/services/task_store/service.go b/services/task_store/service.go index c940576a2..adbf8c18a 100644 --- a/services/task_store/service.go +++ b/services/task_store/service.go @@ -76,88 +76,79 @@ const taskNamespace = "task_store" func (ts *Service) Open() error { // Create DAO store := ts.StorageService.Store(taskNamespace) - ts.tasks = newTaskKV(store) + tasksDAO, err := newTaskKV(store) + if err != nil { + return err + } + ts.tasks = tasksDAO ts.templates = newTemplateKV(store) ts.snapshots = newSnapshotKV(store) // Perform migration to new storage service. - err := ts.migrate() - if err != nil { + if err := ts.migrate(); err != nil { return err } // Define API routes ts.routes = []httpd.Route{ { - Name: "task", Method: "GET", Pattern: tasksPathAnchored, HandlerFunc: ts.handleTask, }, { - Name: "deleteTask", Method: "DELETE", Pattern: tasksPathAnchored, HandlerFunc: ts.handleDeleteTask, }, { // Satisfy CORS checks. - Name: "/tasks/-cors", Method: "OPTIONS", Pattern: tasksPathAnchored, HandlerFunc: httpd.ServeOptions, }, { - Name: "updateTask", Method: "PATCH", Pattern: tasksPathAnchored, HandlerFunc: ts.handleUpdateTask, }, { - Name: "listTasks", Method: "GET", Pattern: tasksPath, HandlerFunc: ts.handleListTasks, }, { - Name: "createTask", Method: "POST", Pattern: tasksPath, HandlerFunc: ts.handleCreateTask, }, { - Name: "template", Method: "GET", Pattern: templatesPathAnchored, HandlerFunc: ts.handleTemplate, }, { - Name: "deleteTemplate", Method: "DELETE", Pattern: templatesPathAnchored, HandlerFunc: ts.handleDeleteTemplate, }, { // Satisfy CORS checks. - Name: "/templates/-cors", Method: "OPTIONS", Pattern: templatesPathAnchored, HandlerFunc: httpd.ServeOptions, }, { - Name: "updateTemplate", Method: "PATCH", Pattern: templatesPathAnchored, HandlerFunc: ts.handleUpdateTemplate, }, { - Name: "listTemplates", Method: "GET", Pattern: templatesPath, HandlerFunc: ts.handleListTemplates, }, { - Name: "createTemplate", Method: "POST", Pattern: templatesPath, HandlerFunc: ts.handleCreateTemplate, @@ -226,7 +217,7 @@ func (ts *Service) migrate() error { // Connect to old boltdb db, err := bolt.Open(filepath.Join(ts.oldDBDir, "task.db"), 0600, &bolt.Options{ReadOnly: true}) if err != nil { - ts.logger.Println("W! could not open old boltd for task_store. Not performing migration. Remove the `task_store.dir` configuration to disable migration.") + ts.logger.Println("D! could not open old boltd for task_store. Not performing migration. Remove the `task_store.dir` configuration to disable migration.") return nil } diff --git a/services/telegram/service.go b/services/telegram/service.go index 11ac5d862..f3028ec61 100644 --- a/services/telegram/service.go +++ b/services/telegram/service.go @@ -12,6 +12,7 @@ import ( "path" "sync/atomic" + "github.com/influxdata/kapacitor/alert" "github.com/pkg/errors" ) @@ -176,3 +177,47 @@ func (s *Service) preparePost(chatId, parseMode, message string, disableWebPageP u.Path = path.Join(u.Path+c.Token, "sendMessage") return u.String(), &post, nil } + +type HandlerConfig struct { + // Telegram user/group ID to post messages to. + // If empty uses the chati-d from the configuration. + ChatId string `mapstructure:"chat-id"` + + // Parse node, defaults to Mardown + // If empty uses the parse-mode from the configuration. + ParseMode string `mapstructure:"parse-mode"` + + // Web Page preview + // If empty uses the disable-web-page-preview from the configuration. + DisableWebPagePreview bool `mapstructure:"disable-web-page-preview"` + + // Disables Notification + // If empty uses the disable-notification from the configuration. + DisableNotification bool `mapstructure:"disable-notification"` +} + +type handler struct { + s *Service + c HandlerConfig + logger *log.Logger +} + +func (s *Service) Handler(c HandlerConfig, l *log.Logger) alert.Handler { + return &handler{ + s: s, + c: c, + logger: l, + } +} + +func (h *handler) Handle(event alert.Event) { + if err := h.s.Alert( + h.c.ChatId, + h.c.ParseMode, + event.State.Message, + h.c.DisableWebPagePreview, + h.c.DisableNotification, + ); err != nil { + h.logger.Println("E! failed to send event to Telegram", err) + } +} diff --git a/services/telegram/telegramtest/telegramtest.go b/services/telegram/telegramtest/telegramtest.go new file mode 100644 index 000000000..502823d6f --- /dev/null +++ b/services/telegram/telegramtest/telegramtest.go @@ -0,0 +1,57 @@ +package telegramtest + +import ( + "encoding/json" + "net/http" + "net/http/httptest" + "sync" +) + +type Server struct { + mu sync.Mutex + ts *httptest.Server + URL string + requests []Request + closed bool +} + +func NewServer() *Server { + s := new(Server) + ts := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { + tr := Request{ + URL: r.URL.String(), + } + dec := json.NewDecoder(r.Body) + dec.Decode(&tr.PostData) + s.mu.Lock() + s.requests = append(s.requests, tr) + s.mu.Unlock() + })) + s.ts = ts + s.URL = ts.URL + return s +} +func (s *Server) Requests() []Request { + s.mu.Lock() + defer s.mu.Unlock() + return s.requests +} +func (s *Server) Close() { + if s.closed { + return + } + s.closed = true + s.ts.Close() +} + +type Request struct { + URL string + PostData PostData +} +type PostData struct { + ChatId string `json:"chat_id"` + Text string `json:"text"` + ParseMode string `json:"parse_mode"` + DisableWebPagePreview bool `json:"disable_web_page_preview"` + DisableNotification bool `json:"disable_notification"` +} diff --git a/services/udf/service.go b/services/udf/service.go index d68b85d4a..1545e8a74 100644 --- a/services/udf/service.go +++ b/services/udf/service.go @@ -8,6 +8,7 @@ import ( "time" "github.com/influxdata/kapacitor" + "github.com/influxdata/kapacitor/command" "github.com/influxdata/kapacitor/udf" ) @@ -80,13 +81,14 @@ func (s *Service) Create( for k, v := range conf.Env { env = append(env, fmt.Sprintf("%s=%s", k, v)) } - commander := kapacitor.CommandInfo{ + cmdSpec := command.Spec{ Prog: conf.Prog, Args: conf.Args, Env: env, } return kapacitor.NewUDFProcess( - commander, + command.ExecCommander, + cmdSpec, l, time.Duration(conf.Timeout), abortCallback, diff --git a/services/victorops/service.go b/services/victorops/service.go index 31b8c901c..b2f7a4c38 100644 --- a/services/victorops/service.go +++ b/services/victorops/service.go @@ -13,7 +13,7 @@ import ( "sync/atomic" "time" - "github.com/influxdata/kapacitor" + "github.com/influxdata/kapacitor/alert" "github.com/pkg/errors" ) @@ -133,7 +133,7 @@ func (s *Service) preparePost(routingKey, messageType, message, entityID string, voData["entity_id"] = entityID voData["state_message"] = message voData["timestamp"] = t.Unix() - voData["monitoring_tool"] = kapacitor.Product + voData["monitoring_tool"] = "kapacitor" if details != nil { b, err := json.Marshal(details) if err != nil { @@ -160,3 +160,43 @@ func (s *Service) preparePost(routingKey, messageType, message, entityID string, u.Path = path.Join(u.Path, c.APIKey, routingKey) return u.String(), &post, nil } + +type HandlerConfig struct { + // The routing key to use for the alert. + // Defaults to the value in the configuration if empty. + RoutingKey string `mapstructure:"routing-key"` +} + +type handler struct { + s *Service + c HandlerConfig + logger *log.Logger +} + +func (s *Service) Handler(c HandlerConfig, l *log.Logger) alert.Handler { + return &handler{ + s: s, + c: c, + logger: l, + } +} + +func (h *handler) Handle(event alert.Event) { + var messageType string + switch event.State.Level { + case alert.OK: + messageType = "RECOVERY" + default: + messageType = event.State.Level.String() + } + if err := h.s.Alert( + h.c.RoutingKey, + messageType, + event.State.Message, + event.State.ID, + event.State.Time, + event.Data.Result, + ); err != nil { + h.logger.Println("E! failed to send event to VictorOps", err) + } +} diff --git a/services/victorops/victoropstest/victoropstest.go b/services/victorops/victoropstest/victoropstest.go new file mode 100644 index 000000000..bc9980d4e --- /dev/null +++ b/services/victorops/victoropstest/victoropstest.go @@ -0,0 +1,58 @@ +package victoropstest + +import ( + "encoding/json" + "net/http" + "net/http/httptest" + "sync" +) + +type Server struct { + mu sync.Mutex + ts *httptest.Server + URL string + requests []Request + closed bool +} + +func NewServer() *Server { + s := new(Server) + ts := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { + vr := Request{ + URL: r.URL.String(), + } + dec := json.NewDecoder(r.Body) + dec.Decode(&vr.PostData) + s.mu.Lock() + s.requests = append(s.requests, vr) + s.mu.Unlock() + })) + s.ts = ts + s.URL = ts.URL + return s +} +func (s *Server) Requests() []Request { + s.mu.Lock() + defer s.mu.Unlock() + return s.requests +} +func (s *Server) Close() { + if s.closed { + return + } + s.closed = true + s.ts.Close() +} + +type Request struct { + URL string + PostData PostData +} +type PostData struct { + MessageType string `json:"message_type"` + EntityID string `json:"entity_id"` + StateMessage string `json:"state_message"` + Timestamp int `json:"timestamp"` + MonitoringTool string `json:"monitoring_tool"` + Data string `json:"data"` +} diff --git a/task_master.go b/task_master.go index e8af1c4e4..e4c4fee91 100644 --- a/task_master.go +++ b/task_master.go @@ -8,12 +8,22 @@ import ( "time" imodels "github.com/influxdata/influxdb/models" + "github.com/influxdata/kapacitor/alert" + "github.com/influxdata/kapacitor/command" "github.com/influxdata/kapacitor/expvar" "github.com/influxdata/kapacitor/influxdb" "github.com/influxdata/kapacitor/models" "github.com/influxdata/kapacitor/pipeline" + "github.com/influxdata/kapacitor/services/alerta" + "github.com/influxdata/kapacitor/services/hipchat" "github.com/influxdata/kapacitor/services/httpd" k8s "github.com/influxdata/kapacitor/services/k8s/client" + "github.com/influxdata/kapacitor/services/opsgenie" + "github.com/influxdata/kapacitor/services/pagerduty" + "github.com/influxdata/kapacitor/services/slack" + "github.com/influxdata/kapacitor/services/smtp" + "github.com/influxdata/kapacitor/services/telegram" + "github.com/influxdata/kapacitor/services/victorops" "github.com/influxdata/kapacitor/tick" "github.com/influxdata/kapacitor/tick/stateful" "github.com/influxdata/kapacitor/timer" @@ -57,59 +67,57 @@ type TaskMaster struct { UDFService UDFService + AlertService interface { + EventState(topic, event string) (alert.EventState, bool) + Collect(event alert.Event) error + RegisterHandler(topics []string, h alert.Handler) + DeregisterHandler(topics []string, h alert.Handler) + DeleteTopic(topic string) error + } InfluxDBService interface { NewNamedClient(name string) (influxdb.Client, error) } SMTPService interface { Global() bool StateChangesOnly() bool - SendMail(to []string, subject string, msg string) error + Handler(smtp.HandlerConfig, *log.Logger) alert.Handler } OpsGenieService interface { Global() bool - Alert(teams []string, recipients []string, messageType, message, entityID string, t time.Time, details interface{}) error + Handler(opsgenie.HandlerConfig, *log.Logger) alert.Handler } VictorOpsService interface { Global() bool - Alert(routingKey, messageType, message, entityID string, t time.Time, extra interface{}) error + Handler(victorops.HandlerConfig, *log.Logger) alert.Handler } PagerDutyService interface { Global() bool - Alert(serviceKey, incidentKey, desc string, level AlertLevel, details interface{}) error + Handler(pagerduty.HandlerConfig, *log.Logger) alert.Handler } SlackService interface { Global() bool StateChangesOnly() bool - Alert(channel, message, username, iconEmoji string, level AlertLevel) error + Handler(slack.HandlerConfig, *log.Logger) alert.Handler } TelegramService interface { Global() bool StateChangesOnly() bool - Alert(chatId, parseMode, message string, disableWebPagePreview, disableNotification bool) error + Handler(telegram.HandlerConfig, *log.Logger) alert.Handler } HipChatService interface { Global() bool StateChangesOnly() bool - Alert(room, token, message string, level AlertLevel) error + Handler(hipchat.HandlerConfig, *log.Logger) alert.Handler } AlertaService interface { - Alert(token, - resource, - event, - environment, - severity, - group, - value, - message, - origin string, - service []string, - data interface{}) error + DefaultHandlerConfig() alerta.HandlerConfig + Handler(alerta.HandlerConfig, *log.Logger) (alert.Handler, error) } SensuService interface { - Alert(name, output string, level AlertLevel) error + Handler(*log.Logger) alert.Handler } TalkService interface { - Alert(title, text string) error + Handler(*log.Logger) alert.Handler } TimingService interface { NewTimer(timer.Setter) timer.Timer @@ -119,6 +127,8 @@ type TaskMaster struct { } LogService LogService + Commander command.Commander + DefaultRetentionPolicy string // Incoming streams @@ -181,6 +191,7 @@ func (tm *TaskMaster) New(id string) *TaskMaster { n.TaskStore = tm.TaskStore n.DeadmanService = tm.DeadmanService n.UDFService = tm.UDFService + n.AlertService = tm.AlertService n.InfluxDBService = tm.InfluxDBService n.SMTPService = tm.SMTPService n.OpsGenieService = tm.OpsGenieService @@ -194,9 +205,14 @@ func (tm *TaskMaster) New(id string) *TaskMaster { n.TalkService = tm.TalkService n.TimingService = tm.TimingService n.K8sService = tm.K8sService + n.Commander = tm.Commander return n } +func (tm *TaskMaster) ID() string { + return tm.id +} + func (tm *TaskMaster) Open() (err error) { tm.mu.Lock() defer tm.mu.Unlock() @@ -242,7 +258,6 @@ func (tm *TaskMaster) Drain() { tm.mu.Lock() defer tm.mu.Unlock() - // TODO(yosia): handle this thing ;) for id, _ := range tm.taskToForkKeys { tm.delFork(id) } @@ -314,11 +329,22 @@ func (tm *TaskMaster) NewTask( } func (tm *TaskMaster) waitForForks() { - if tm.drained { + tm.mu.Lock() + drained := tm.drained + tm.mu.Unlock() + + if drained { return } + + tm.mu.Lock() tm.drained = true + tm.mu.Unlock() + + // Close the write points in stream tm.writePointsIn.Close() + + // Don't hold the lock while we wait tm.wg.Wait() } @@ -475,14 +501,15 @@ func (tm *TaskMaster) stream(name string) (StreamCollector, error) { return nil, ErrTaskMasterClosed } in := newEdge(fmt.Sprintf("task_master:%s", tm.id), name, "stream", pipeline.StreamEdge, defaultEdgeBufferSize, tm.LogService) - tm.drained = false tm.wg.Add(1) - go tm.runForking(in) + go func() { + defer tm.wg.Done() + tm.runForking(in) + }() return in, nil } func (tm *TaskMaster) runForking(in *Edge) { - defer tm.wg.Done() for p, ok := in.NextPoint(); ok; p, ok = in.NextPoint() { tm.forkPoint(p) } diff --git a/udf.go b/udf.go index d4de7644d..4304b620d 100644 --- a/udf.go +++ b/udf.go @@ -6,11 +6,11 @@ import ( "io" "log" "net" - "os/exec" "sync" "time" "github.com/cenkalti/backoff" + "github.com/influxdata/kapacitor/command" "github.com/influxdata/kapacitor/models" "github.com/influxdata/kapacitor/pipeline" "github.com/influxdata/kapacitor/udf" @@ -175,8 +175,9 @@ func (u *UDFNode) snapshot() ([]byte, error) { // via normal Kapacitor logging. type UDFProcess struct { server *udf.Server - commander Commander - cmd Command + commander command.Commander + cmdSpec command.Spec + cmd command.Command stderr io.Reader @@ -192,13 +193,15 @@ type UDFProcess struct { } func NewUDFProcess( - commander Commander, + commander command.Commander, + cmdSpec command.Spec, l *log.Logger, timeout time.Duration, abortCallback func(), ) *UDFProcess { return &UDFProcess{ commander: commander, + cmdSpec: cmdSpec, logger: l, timeout: timeout, abortCallback: abortCallback, @@ -210,7 +213,7 @@ func (p *UDFProcess) Open() error { p.mu.Lock() defer p.mu.Unlock() - cmd := p.commander.NewCommand() + cmd := p.commander.NewCommand(p.cmdSpec) stdin, err := cmd.StdinPipe() if err != nil { return err @@ -296,48 +299,6 @@ func (p *UDFProcess) PointOut() <-chan models.Point { return p.server.PointOu func (p *UDFProcess) BatchOut() <-chan models.Batch { return p.server.BatchOut() } func (p *UDFProcess) Info() (udf.Info, error) { return p.server.Info() } -type Command interface { - Start() error - Wait() error - - StdinPipe() (io.WriteCloser, error) - StdoutPipe() (io.Reader, error) - StderrPipe() (io.Reader, error) - - Kill() -} - -type Commander interface { - NewCommand() Command -} - -// Necessary information to create a new command -type CommandInfo struct { - Prog string - Args []string - Env []string -} - -// Create a new Command using golang exec package and the information. -func (ci CommandInfo) NewCommand() Command { - c := exec.Command(ci.Prog, ci.Args...) - c.Env = ci.Env - return cmd{c} -} - -type cmd struct { - *exec.Cmd -} - -func (c cmd) StdoutPipe() (io.Reader, error) { return c.Cmd.StdoutPipe() } -func (c cmd) StderrPipe() (io.Reader, error) { return c.Cmd.StderrPipe() } - -func (c cmd) Kill() { - if c.Cmd.Process != nil { - c.Cmd.Process.Kill() - } -} - type UDFSocket struct { server *udf.Server socket Socket diff --git a/udf/udf.pb.go b/udf/udf.pb.go index b53b1828e..2abbbfa47 100644 --- a/udf/udf.pb.go +++ b/udf/udf.pb.go @@ -1228,74 +1228,74 @@ func init() { func init() { proto.RegisterFile("udf.proto", fileDescriptor0) } var fileDescriptor0 = []byte{ - // 1091 bytes of a gzipped FileDescriptorProto + // 1100 bytes of a gzipped FileDescriptorProto 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x09, 0x6e, 0x88, 0x02, 0xff, 0xc4, 0x56, 0x6d, 0x73, 0xdb, 0x44, - 0x10, 0xae, 0x2c, 0xcb, 0x96, 0xd6, 0x76, 0x22, 0x1f, 0x69, 0x11, 0x99, 0x4e, 0xa6, 0x15, 0xd3, - 0x24, 0x04, 0x30, 0x8c, 0xcb, 0x4b, 0x87, 0xe9, 0x00, 0x31, 0x71, 0xa9, 0xa7, 0x25, 0xee, 0x28, - 0x6e, 0xbf, 0xcb, 0xd1, 0xc5, 0xd5, 0xd4, 0x91, 0x8c, 0x24, 0x07, 0xcc, 0xcf, 0x81, 0xff, 0xc2, - 0x07, 0xfe, 0x07, 0x33, 0x7c, 0xe4, 0x27, 0x70, 0xb7, 0x27, 0x9d, 0xce, 0x2f, 0xd0, 0x29, 0x93, - 0x99, 0x7e, 0xd3, 0xed, 0x3e, 0xfb, 0xf6, 0xdc, 0xee, 0xad, 0xc0, 0x9a, 0x07, 0x17, 0x9d, 0x59, - 0x12, 0x67, 0x31, 0xd1, 0xd9, 0xa7, 0xdb, 0x82, 0xc6, 0x20, 0xba, 0x88, 0x3d, 0xfa, 0xe3, 0x9c, - 0xa6, 0x99, 0xfb, 0xa7, 0x06, 0x4d, 0x71, 0x4e, 0x67, 0x71, 0x94, 0x52, 0xf2, 0x3e, 0x18, 0x3f, - 0xf9, 0x51, 0x96, 0x3a, 0xda, 0x1d, 0xed, 0x70, 0xab, 0xdb, 0xea, 0x70, 0xfb, 0x7e, 0x30, 0xa1, - 0xa3, 0xc5, 0x8c, 0x7a, 0x42, 0x47, 0x3e, 0x00, 0x93, 0xb9, 0xbc, 0x0a, 0x03, 0x9a, 0x3a, 0x95, - 0x4d, 0x38, 0xa9, 0x26, 0x0f, 0xa0, 0x1e, 0xcf, 0xb2, 0x90, 0xf9, 0x76, 0xf4, 0x3b, 0xfa, 0x61, - 0xa3, 0xbb, 0x87, 0x48, 0x35, 0x66, 0x67, 0x28, 0x00, 0xfd, 0x28, 0x4b, 0x16, 0x5e, 0x01, 0xdf, - 0x7d, 0x02, 0x4d, 0x55, 0x41, 0x6c, 0xd0, 0x5f, 0xd1, 0x05, 0xe6, 0x65, 0x79, 0xfc, 0x93, 0xdc, - 0x03, 0xe3, 0xca, 0x9f, 0xce, 0x29, 0xe6, 0xd0, 0xe8, 0x6e, 0xa3, 0x67, 0x61, 0x83, 0xfe, 0x85, - 0xf6, 0xab, 0xca, 0x03, 0xcd, 0x7d, 0x08, 0x50, 0x2a, 0x48, 0x07, 0x00, 0x55, 0x3c, 0x57, 0x5e, - 0xa9, 0xce, 0x2a, 0xd8, 0x42, 0xeb, 0x17, 0x85, 0xd8, 0x53, 0x10, 0xee, 0x67, 0x9c, 0xb4, 0x30, - 0xcb, 0x49, 0x63, 0x71, 0x65, 0x4d, 0x1a, 0xd6, 0xd4, 0x50, 0x22, 0xcb, 0x02, 0xdc, 0x47, 0x50, - 0x13, 0x22, 0x42, 0xa0, 0x1a, 0xf9, 0x97, 0x34, 0xcf, 0x1d, 0xbf, 0xc9, 0x21, 0xd4, 0x30, 0x02, - 0x67, 0x90, 0xfb, 0xb0, 0x15, 0x1f, 0x98, 0x85, 0x97, 0xeb, 0xdd, 0xbf, 0x34, 0x68, 0x28, 0x72, - 0xe2, 0x42, 0x35, 0x63, 0x69, 0xe5, 0x37, 0xb4, 0x9a, 0x37, 0xea, 0xc8, 0x1e, 0x58, 0xe3, 0x38, - 0x9e, 0xbe, 0x90, 0xf4, 0x98, 0x8f, 0x6f, 0x78, 0xa5, 0x88, 0xdc, 0x06, 0x33, 0x8c, 0x32, 0xa1, - 0xd6, 0x99, 0x5a, 0x67, 0x6a, 0x29, 0x61, 0x11, 0x1a, 0x41, 0x3c, 0x1f, 0x4f, 0xa9, 0x00, 0x54, - 0x19, 0x40, 0x63, 0x00, 0x55, 0xc8, 0x31, 0x69, 0x96, 0x84, 0xd1, 0x44, 0x60, 0x0c, 0x5e, 0x1a, - 0xc7, 0x28, 0x42, 0xb2, 0x0f, 0xad, 0x60, 0x9e, 0xf8, 0x32, 0x75, 0xa7, 0x96, 0x87, 0x5a, 0x16, - 0xf7, 0xea, 0xf9, 0x45, 0xba, 0x5f, 0xf3, 0x6e, 0xe4, 0x44, 0xe7, 0xdd, 0xe8, 0x40, 0x3d, 0x9d, - 0x9f, 0x9f, 0xd3, 0x54, 0xf4, 0xa3, 0xe9, 0x15, 0x47, 0xb2, 0x03, 0x06, 0x4d, 0x92, 0x38, 0xc1, - 0xe2, 0x2c, 0x4f, 0x1c, 0xdc, 0x36, 0x6c, 0x9f, 0x45, 0xfe, 0x2c, 0x7d, 0x19, 0x17, 0x97, 0xe5, - 0x76, 0xc0, 0x2e, 0x45, 0xb9, 0xdb, 0x5d, 0x30, 0xd3, 0x5c, 0x86, 0x7e, 0x9b, 0x9e, 0x3c, 0xbb, - 0x1f, 0xc1, 0x16, 0xc3, 0x65, 0x71, 0x42, 0x8b, 0xeb, 0xfe, 0x2f, 0xf4, 0x31, 0x6c, 0x4b, 0xf4, - 0xff, 0xcc, 0x79, 0x1f, 0xec, 0x27, 0x94, 0xce, 0xfc, 0x69, 0x78, 0x25, 0x43, 0xb2, 0x86, 0xc9, - 0xc2, 0xbc, 0x61, 0x74, 0x0f, 0xbf, 0xdd, 0x03, 0x68, 0x2b, 0xb8, 0x3c, 0xd8, 0x26, 0xe0, 0x3d, - 0x68, 0xf5, 0xb9, 0x67, 0x09, 0x92, 0x71, 0x35, 0x35, 0xee, 0x1f, 0x1a, 0x40, 0x8f, 0x4e, 0xc2, - 0xa8, 0xe7, 0x67, 0xe7, 0x2f, 0x37, 0xf6, 0x28, 0x33, 0x9c, 0x24, 0xf1, 0x7c, 0x56, 0x24, 0x8c, - 0x07, 0xf2, 0x31, 0x8b, 0xe9, 0x4f, 0x8a, 0x79, 0x7e, 0x0f, 0xfb, 0xaf, 0x74, 0xd4, 0x19, 0x31, - 0x9d, 0x18, 0x65, 0x84, 0x71, 0xc7, 0x69, 0xf8, 0x8b, 0xe8, 0x22, 0x96, 0x22, 0xff, 0x26, 0xb7, - 0xa0, 0x36, 0x5e, 0x9c, 0xf2, 0x70, 0x06, 0x52, 0x94, 0x9f, 0x76, 0xbf, 0x04, 0x4b, 0x9a, 0x6f, - 0x18, 0xf8, 0x1d, 0x75, 0xe0, 0x2d, 0x75, 0xbe, 0x7f, 0x33, 0xc0, 0x78, 0x16, 0xb3, 0x06, 0xde, - 0xc4, 0x88, 0xac, 0xad, 0xa2, 0xd4, 0xc6, 0x6e, 0x35, 0xf0, 0x33, 0x7f, 0xec, 0xa7, 0x62, 0x02, - 0x2c, 0x4f, 0x9e, 0xd9, 0x6c, 0x6e, 0x27, 0x34, 0xa3, 0x11, 0xef, 0xd0, 0x67, 0xf1, 0x34, 0x3c, - 0x5f, 0x60, 0xf6, 0x96, 0xb7, 0x2a, 0x2e, 0x19, 0x32, 0x54, 0x86, 0xf6, 0x00, 0x02, 0x16, 0x37, - 0x4a, 0xf1, 0x8d, 0xa8, 0x31, 0x9e, 0x2c, 0x4f, 0x91, 0x30, 0xff, 0x82, 0xc1, 0x3a, 0x32, 0xb8, - 0x83, 0x0c, 0x62, 0xf6, 0x6b, 0xe4, 0x7d, 0x0b, 0xcd, 0x8b, 0x90, 0x4e, 0x83, 0xf4, 0x04, 0x47, - 0xcf, 0x31, 0xd1, 0xe2, 0xb6, 0x62, 0xf1, 0x48, 0x51, 0x0b, 0xcb, 0x25, 0x0b, 0xc2, 0x28, 0x15, - 0xe7, 0x41, 0x94, 0x39, 0x96, 0x72, 0x65, 0xaa, 0x39, 0xd3, 0x09, 0xdb, 0x12, 0x5b, 0x86, 0x3e, - 0xc3, 0x89, 0x76, 0xe0, 0x5f, 0x42, 0x0b, 0xf5, 0x52, 0x68, 0x21, 0x52, 0x6e, 0xb9, 0x71, 0x2d, - 0xb7, 0xbc, 0xfb, 0x0d, 0xb4, 0xd7, 0xca, 0x7d, 0x9d, 0x03, 0x4d, 0x75, 0xf0, 0x10, 0xb6, 0x96, - 0x0b, 0x7e, 0x9d, 0xb5, 0xbe, 0x31, 0xbc, 0x52, 0xf2, 0x1b, 0x75, 0xe9, 0xef, 0x1a, 0x98, 0xfd, - 0x28, 0x78, 0xd3, 0x81, 0xe3, 0x2d, 0x7d, 0xe9, 0xff, 0x2c, 0x1e, 0x6a, 0x0f, 0xbf, 0xc9, 0x87, - 0x79, 0x0b, 0x55, 0xf1, 0x56, 0xde, 0x15, 0xeb, 0x37, 0x77, 0xbd, 0xd6, 0x45, 0xd7, 0x3e, 0x6e, - 0x7f, 0x57, 0xa0, 0x5e, 0xbc, 0x55, 0xfb, 0x50, 0x0d, 0xd9, 0x52, 0x45, 0xc3, 0x62, 0x8d, 0x29, - 0xbf, 0x18, 0xec, 0xb5, 0x47, 0xbd, 0xc0, 0x85, 0x59, 0xbe, 0xac, 0x0b, 0x9c, 0xdc, 0xaa, 0x02, - 0x17, 0x66, 0xe4, 0x73, 0xb0, 0x5e, 0x15, 0xef, 0x1c, 0x96, 0xdc, 0xe8, 0xde, 0x44, 0xf0, 0xea, - 0x2b, 0xc9, 0x37, 0x9a, 0x44, 0x92, 0xae, 0xf2, 0x4a, 0x57, 0xd1, 0x4a, 0xcc, 0xd5, 0xca, 0x3e, - 0xe0, 0x7b, 0xae, 0xc0, 0x91, 0x4f, 0xa0, 0x9e, 0x88, 0xd7, 0x1b, 0x89, 0x69, 0x74, 0xdf, 0x41, - 0x93, 0xe5, 0xf7, 0x9f, 0x59, 0x14, 0x28, 0x72, 0x00, 0xc6, 0x98, 0xbf, 0x74, 0x8e, 0xad, 0xfc, - 0x71, 0x94, 0x6f, 0x1f, 0x83, 0x0a, 0x3d, 0xdb, 0x8e, 0xc6, 0x8c, 0xcf, 0x88, 0xd3, 0x46, 0x20, - 0x94, 0x53, 0xc3, 0x31, 0xa8, 0x22, 0x77, 0x41, 0xa7, 0x51, 0xe0, 0x10, 0x44, 0xb4, 0x96, 0x6e, - 0x90, 0x81, 0xb8, 0xae, 0x67, 0x41, 0xfd, 0x92, 0x6d, 0x0e, 0x7f, 0x42, 0xdd, 0x5f, 0x75, 0x30, - 0xe5, 0x8b, 0x7e, 0xb0, 0xc4, 0x79, 0x7b, 0xed, 0x97, 0x4a, 0x92, 0x7e, 0xb0, 0x44, 0x7a, 0x5b, - 0x21, 0x5d, 0x05, 0x32, 0xd6, 0xbf, 0x58, 0x67, 0xfd, 0xd6, 0x2a, 0xeb, 0xd2, 0x44, 0xa1, 0xfd, - 0xfe, 0x1a, 0xed, 0x37, 0x57, 0x68, 0x97, 0x56, 0x25, 0xef, 0x9f, 0xae, 0xf2, 0xbe, 0xb3, 0xcc, - 0xbb, 0x34, 0x91, 0xc4, 0x1f, 0x15, 0x2b, 0xac, 0x86, 0x78, 0x22, 0xd8, 0x52, 0xb7, 0x1c, 0xe7, - 0x15, 0x21, 0x6f, 0xf1, 0x92, 0x8e, 0xee, 0xb2, 0xf9, 0xce, 0xff, 0x81, 0x09, 0x40, 0xed, 0x6c, - 0xe4, 0xf5, 0x8f, 0x7f, 0xb0, 0x6f, 0x10, 0x0b, 0x8c, 0xde, 0xf1, 0xe8, 0xbb, 0xc7, 0xb6, 0x76, - 0x74, 0x02, 0x96, 0xfc, 0x59, 0x23, 0x26, 0x54, 0x7b, 0xc3, 0xe1, 0x53, 0x86, 0xa8, 0x83, 0x3e, - 0x38, 0x1d, 0xd9, 0x1a, 0x37, 0x3b, 0x19, 0x3e, 0xef, 0x3d, 0xed, 0xdb, 0x95, 0xdc, 0xc5, 0xe0, - 0xf4, 0x7b, 0x5b, 0x27, 0x4d, 0x30, 0x4f, 0x9e, 0x7b, 0xc7, 0xa3, 0xc1, 0xf0, 0xd4, 0xae, 0x8e, - 0x6b, 0xf8, 0x4b, 0x7f, 0xff, 0x9f, 0x00, 0x00, 0x00, 0xff, 0xff, 0x29, 0x62, 0x27, 0x91, 0xdf, - 0x0b, 0x00, 0x00, + 0x10, 0xb6, 0x22, 0xcb, 0x96, 0xd6, 0x79, 0x91, 0x8f, 0xb4, 0x88, 0x4c, 0xa7, 0x93, 0x8a, 0x49, + 0x62, 0x02, 0x18, 0xc6, 0x05, 0xda, 0x61, 0x3a, 0x40, 0x4c, 0x5c, 0xec, 0x69, 0x49, 0x3a, 0x17, + 0xb7, 0xdf, 0xe5, 0xe8, 0xe2, 0x6a, 0xea, 0x48, 0x46, 0x3a, 0x07, 0xcc, 0xcf, 0x81, 0xff, 0xc2, + 0x07, 0xfe, 0x07, 0x33, 0x7c, 0xe4, 0x27, 0x30, 0xb7, 0x27, 0x9d, 0xce, 0x2f, 0xd0, 0x29, 0xd3, + 0x19, 0xbe, 0xe9, 0x76, 0x9f, 0x7d, 0x7b, 0x6e, 0x77, 0x4f, 0xe0, 0xcc, 0xc2, 0xab, 0xf6, 0x34, + 0x4d, 0x78, 0x42, 0xcc, 0x59, 0x78, 0xe5, 0x6f, 0x41, 0x63, 0x10, 0x5f, 0x25, 0x94, 0xfd, 0x30, + 0x63, 0x19, 0xf7, 0xff, 0x30, 0x60, 0x53, 0x9e, 0xb3, 0x69, 0x12, 0x67, 0x8c, 0xbc, 0x0f, 0xd6, + 0x8f, 0x41, 0xcc, 0x33, 0xcf, 0xd8, 0x37, 0x5a, 0xdb, 0x9d, 0xad, 0xb6, 0xb0, 0xef, 0x85, 0x63, + 0x36, 0x9c, 0x4f, 0x19, 0x95, 0x3a, 0xf2, 0x01, 0xd8, 0xd3, 0x34, 0xb9, 0x89, 0x42, 0x96, 0x79, + 0x1b, 0xeb, 0x70, 0x4a, 0x4d, 0x1e, 0x42, 0x3d, 0x99, 0xf2, 0x28, 0x89, 0x33, 0xcf, 0xdc, 0x37, + 0x5b, 0x8d, 0xce, 0x5d, 0x44, 0xea, 0x31, 0xdb, 0xe7, 0x12, 0xd0, 0x8b, 0x79, 0x3a, 0xa7, 0x05, + 0x7c, 0xef, 0x09, 0x6c, 0xea, 0x0a, 0xe2, 0x82, 0xf9, 0x8a, 0xcd, 0x31, 0x2f, 0x87, 0x8a, 0x4f, + 0x72, 0x00, 0xd6, 0x4d, 0x30, 0x99, 0x31, 0xcc, 0xa1, 0xd1, 0xd9, 0x41, 0xcf, 0xd2, 0x06, 0xfd, + 0x4b, 0xed, 0x97, 0x1b, 0x0f, 0x0d, 0xff, 0x11, 0x40, 0xa9, 0x20, 0x6d, 0x00, 0x54, 0x89, 0x5c, + 0x45, 0xa5, 0x66, 0x6b, 0xbb, 0xb3, 0x8d, 0xd6, 0x2f, 0x0a, 0x31, 0xd5, 0x10, 0xfe, 0x67, 0x82, + 0xb4, 0x88, 0xe7, 0xa4, 0x91, 0x83, 0xb2, 0x26, 0x03, 0x6b, 0x6a, 0x68, 0x91, 0x55, 0x01, 0xfe, + 0x63, 0xa8, 0x49, 0x11, 0x21, 0x50, 0x8d, 0x83, 0x6b, 0x96, 0xe7, 0x8e, 0xdf, 0xa4, 0x05, 0x35, + 0x8c, 0x20, 0x18, 0x14, 0x3e, 0x5c, 0xcd, 0x07, 0x66, 0x41, 0x73, 0xbd, 0xff, 0xa7, 0x01, 0x0d, + 0x4d, 0x4e, 0x7c, 0xa8, 0xf2, 0xf9, 0x94, 0xe5, 0x37, 0xb4, 0x9c, 0x37, 0xea, 0xc8, 0x5d, 0x70, + 0x46, 0x49, 0x32, 0x79, 0xa1, 0xe8, 0xb1, 0xfb, 0x15, 0x5a, 0x8a, 0xc8, 0x1d, 0xb0, 0xa3, 0x98, + 0x4b, 0xb5, 0xb9, 0x6f, 0xb4, 0xcc, 0x7e, 0x85, 0x2a, 0x09, 0xf1, 0xa1, 0x11, 0x26, 0xb3, 0xd1, + 0x84, 0x49, 0x40, 0x75, 0xdf, 0x68, 0x19, 0xfd, 0x0a, 0xd5, 0x85, 0x02, 0x93, 0xf1, 0x34, 0x8a, + 0xc7, 0x12, 0x63, 0x89, 0xd2, 0x04, 0x46, 0x13, 0x92, 0x43, 0xd8, 0x0a, 0x67, 0x69, 0xa0, 0x52, + 0xf7, 0x6a, 0x79, 0xa8, 0x45, 0x71, 0xb7, 0x9e, 0x5f, 0xa4, 0xff, 0x95, 0xe8, 0x46, 0x41, 0x74, + 0xde, 0x8d, 0x1e, 0xd4, 0xb3, 0xd9, 0xe5, 0x25, 0xcb, 0x64, 0x3f, 0xda, 0xb4, 0x38, 0x92, 0x5d, + 0xb0, 0x58, 0x9a, 0x26, 0x29, 0x16, 0xe7, 0x50, 0x79, 0xf0, 0x9b, 0xb0, 0x73, 0x11, 0x07, 0xd3, + 0xec, 0x65, 0x52, 0x5c, 0x96, 0xdf, 0x06, 0xb7, 0x14, 0xe5, 0x6e, 0xf7, 0xc0, 0xce, 0x72, 0x19, + 0xfa, 0xdd, 0xa4, 0xea, 0xec, 0x7f, 0x04, 0xdb, 0x94, 0x65, 0x3c, 0x49, 0x59, 0x71, 0xdd, 0xff, + 0x86, 0x3e, 0x81, 0x1d, 0x85, 0xfe, 0x8f, 0x39, 0x1f, 0x82, 0xfb, 0x84, 0xb1, 0x69, 0x30, 0x89, + 0x6e, 0x54, 0x48, 0x02, 0x55, 0x1e, 0xe5, 0x0d, 0x63, 0x52, 0xfc, 0xf6, 0x8f, 0xa0, 0xa9, 0xe1, + 0xf2, 0x60, 0xeb, 0x80, 0x07, 0xb0, 0xd5, 0x13, 0x9e, 0x15, 0x48, 0xc5, 0x35, 0xf4, 0xb8, 0xbf, + 0x1b, 0x00, 0x5d, 0x36, 0x8e, 0xe2, 0x6e, 0xc0, 0x2f, 0x5f, 0xae, 0xed, 0xd1, 0x5d, 0xb0, 0xc6, + 0x69, 0x32, 0x9b, 0x16, 0x09, 0xe3, 0x81, 0x7c, 0x0c, 0x55, 0x1e, 0x8c, 0x8b, 0x79, 0x7e, 0x0f, + 0xfb, 0xaf, 0x74, 0xd4, 0x1e, 0x06, 0xe3, 0x7c, 0x94, 0x11, 0x26, 0x1c, 0x67, 0xd1, 0xcf, 0xb2, + 0x8b, 0x4c, 0x8a, 0xdf, 0xe4, 0x36, 0xd4, 0x46, 0xf3, 0x33, 0x11, 0xce, 0x42, 0x8a, 0xf2, 0xd3, + 0xde, 0x03, 0x70, 0x94, 0xf9, 0x9a, 0x81, 0xdf, 0xd5, 0x07, 0xde, 0xd1, 0xe7, 0xfb, 0x57, 0x0b, + 0xac, 0x67, 0x49, 0x14, 0xaf, 0xa5, 0x4e, 0xd5, 0xb6, 0xa1, 0xd5, 0xb6, 0x07, 0x76, 0x18, 0xf0, + 0x60, 0x14, 0x64, 0x72, 0x02, 0x1c, 0xaa, 0xce, 0xa4, 0x05, 0x3b, 0x29, 0xe3, 0x2c, 0x16, 0x1d, + 0xfa, 0x2c, 0x99, 0x44, 0x97, 0x73, 0xcc, 0xde, 0xa1, 0xcb, 0xe2, 0x92, 0x21, 0x4b, 0x67, 0xe8, + 0x2e, 0x40, 0x18, 0x5d, 0xb3, 0x38, 0xc3, 0x1d, 0x51, 0xdb, 0x37, 0x5b, 0x0e, 0xd5, 0x24, 0xa4, + 0x95, 0x33, 0x58, 0x47, 0x06, 0x77, 0x91, 0x41, 0xcc, 0x7e, 0x85, 0xbc, 0x6f, 0x60, 0xf3, 0x2a, + 0x62, 0x93, 0x30, 0x3b, 0xc5, 0xd1, 0xf3, 0x6c, 0xb4, 0xb8, 0xa3, 0x59, 0x3c, 0xd6, 0xd4, 0xd2, + 0x72, 0xc1, 0x82, 0x3c, 0x00, 0x47, 0x9e, 0x07, 0x31, 0xf7, 0x1c, 0xed, 0xca, 0x74, 0xf3, 0x41, + 0xcc, 0xa5, 0x6d, 0x89, 0x2d, 0x43, 0x5f, 0xe0, 0x44, 0x7b, 0xf0, 0x0f, 0xa1, 0xa5, 0x7a, 0x21, + 0xb4, 0x14, 0x69, 0xb7, 0xdc, 0x78, 0x2b, 0xb7, 0xbc, 0xf7, 0x35, 0x34, 0x57, 0xca, 0x7d, 0x9d, + 0x03, 0x43, 0x77, 0xf0, 0x08, 0xb6, 0x17, 0x0b, 0x7e, 0x9d, 0xb5, 0xb9, 0x36, 0xbc, 0x56, 0xf2, + 0x1b, 0x75, 0xe9, 0x6f, 0x06, 0xd8, 0xbd, 0x38, 0x7c, 0xd3, 0x81, 0x13, 0x2d, 0x7d, 0x1d, 0xfc, + 0x24, 0x17, 0x35, 0xc5, 0x6f, 0xf2, 0x61, 0xde, 0x42, 0x55, 0xbc, 0x95, 0x77, 0xe5, 0xf3, 0x9b, + 0xbb, 0x5e, 0xe9, 0xa2, 0xb7, 0x3e, 0x6e, 0x7f, 0x6d, 0x40, 0xbd, 0xd8, 0x55, 0x87, 0x50, 0x8d, + 0xe2, 0xab, 0x04, 0x0d, 0x8b, 0x67, 0x4c, 0xfb, 0xc5, 0xe8, 0x57, 0x28, 0xea, 0x25, 0x2e, 0xe2, + 0xf9, 0x63, 0x5d, 0xe0, 0xd4, 0xab, 0x2a, 0x71, 0x11, 0x27, 0x9f, 0x83, 0xf3, 0xaa, 0xd8, 0x73, + 0x58, 0x72, 0xa3, 0x73, 0x0b, 0xc1, 0xcb, 0x5b, 0x52, 0xbc, 0x68, 0x0a, 0x49, 0x3a, 0xda, 0x96, + 0xae, 0xa2, 0x95, 0x9c, 0xab, 0xa5, 0xf7, 0x40, 0xbc, 0x73, 0x05, 0x8e, 0x7c, 0x02, 0xf5, 0x54, + 0x6e, 0x6f, 0x24, 0xa6, 0xd1, 0x79, 0x07, 0x4d, 0x16, 0xf7, 0x7f, 0xbf, 0x42, 0x0b, 0x14, 0x39, + 0x02, 0x6b, 0x24, 0x36, 0x9d, 0xe7, 0x6a, 0x7f, 0x1c, 0xe5, 0xee, 0xeb, 0x57, 0xa8, 0xd4, 0x13, + 0x1f, 0xac, 0xa9, 0x98, 0x11, 0xaf, 0x89, 0x40, 0x28, 0xa7, 0x46, 0x60, 0x50, 0x45, 0xee, 0x81, + 0xc9, 0xe2, 0xd0, 0x23, 0x88, 0xd8, 0x5a, 0xb8, 0xc1, 0x7e, 0x85, 0x0a, 0x5d, 0xd7, 0x81, 0xfa, + 0x35, 0xcb, 0xb2, 0x60, 0xcc, 0xfc, 0x5f, 0x4c, 0xb0, 0xd5, 0x46, 0x3f, 0x5a, 0xe0, 0xbc, 0xb9, + 0xf2, 0x4b, 0xa5, 0x48, 0x3f, 0x5a, 0x20, 0xbd, 0xa9, 0x91, 0xae, 0x03, 0x23, 0x4e, 0xbe, 0x58, + 0x65, 0xfd, 0xf6, 0x32, 0xeb, 0xca, 0x44, 0xa3, 0xfd, 0xfe, 0x0a, 0xed, 0xb7, 0x96, 0x68, 0x57, + 0x56, 0x25, 0xef, 0x9f, 0x2e, 0xf3, 0xbe, 0xbb, 0xc8, 0xbb, 0x32, 0x51, 0xc4, 0x1f, 0x17, 0x4f, + 0x58, 0x0d, 0xf1, 0x44, 0xb2, 0xa5, 0xbf, 0x72, 0x82, 0x57, 0x84, 0xfc, 0x8f, 0x97, 0x74, 0x7c, + 0x0f, 0xec, 0xe2, 0x1f, 0x98, 0x00, 0xd4, 0x2e, 0x86, 0xb4, 0x77, 0xf2, 0xbd, 0x5b, 0x21, 0x0e, + 0x58, 0xdd, 0x93, 0xe1, 0xb7, 0x7d, 0xd7, 0x38, 0x3e, 0x05, 0x47, 0xfd, 0xac, 0x11, 0x1b, 0xaa, + 0xdd, 0xf3, 0xf3, 0xa7, 0x6e, 0x85, 0xd4, 0xc1, 0x1c, 0x9c, 0x0d, 0x5d, 0x43, 0x98, 0x9d, 0x9e, + 0x3f, 0xef, 0x3e, 0xed, 0xb9, 0x1b, 0xb9, 0x8b, 0xc1, 0xd9, 0x77, 0xae, 0x49, 0x36, 0xc1, 0x3e, + 0x7d, 0x4e, 0x4f, 0x86, 0x83, 0xf3, 0x33, 0xb7, 0x3a, 0xaa, 0xe1, 0x2f, 0xfd, 0xfd, 0xbf, 0x03, + 0x00, 0x00, 0xff, 0xff, 0x29, 0x62, 0x27, 0x91, 0xdf, 0x0b, 0x00, 0x00, } diff --git a/udf_test.go b/udf_test.go index e1f106f06..8360bfc2b 100644 --- a/udf_test.go +++ b/udf_test.go @@ -11,6 +11,7 @@ import ( "time" "github.com/influxdata/kapacitor" + "github.com/influxdata/kapacitor/command" "github.com/influxdata/kapacitor/models" "github.com/influxdata/kapacitor/udf" udf_test "github.com/influxdata/kapacitor/udf/test" @@ -27,7 +28,7 @@ func newUDFProcess(name string) (*kapacitor.UDFProcess, *udf_test.IO) { uio := udf_test.NewIO() cmd := newTestCommander(uio) l := log.New(os.Stderr, fmt.Sprintf("[%s] ", name), log.LstdFlags) - u := kapacitor.NewUDFProcess(cmd, l, 0, nil) + u := kapacitor.NewUDFProcess(cmd, command.Spec{}, l, 0, nil) return u, uio } @@ -230,13 +231,13 @@ type testCommander struct { uio *udf_test.IO } -func newTestCommander(uio *udf_test.IO) kapacitor.Commander { +func newTestCommander(uio *udf_test.IO) command.Commander { return &testCommander{ uio: uio, } } -func (c *testCommander) NewCommand() kapacitor.Command { +func (c *testCommander) NewCommand(command.Spec) command.Command { return c } @@ -244,6 +245,10 @@ func (c *testCommander) Start() error { return nil } func (c *testCommander) Wait() error { return nil } +func (c *testCommander) Stdin(io.Reader) {} +func (c *testCommander) Stdout(io.Writer) {} +func (c *testCommander) Stderr(io.Writer) {} + func (c *testCommander) StdinPipe() (io.WriteCloser, error) { return c.uio.In(), nil } diff --git a/usr/share/bash-completion/completions/kapacitor b/usr/share/bash-completion/completions/kapacitor index 32d7b79e5..993181968 100644 --- a/usr/share/bash-completion/completions/kapacitor +++ b/usr/share/bash-completion/completions/kapacitor @@ -83,6 +83,9 @@ _kapacitor() esac fi ;; + define-handler) + _kapacitor_json_yaml_files "${cur}" + ;; replay) case "$prev" in -recording) @@ -148,17 +151,20 @@ _kapacitor() ;; delete|list) case "${COMP_WORDS[2]}" in - tasks|templates|recordings|replays|service-tests) + tasks|templates|recordings|replays|handlers|topics|service-tests) words=$(_kapacitor_list "${COMP_WORDS[2]}" "$cur") ;; *) - words='tasks templates recordings replays service-tests' + words='tasks templates recordings replays handlers topics service-tests' ;; esac ;; show-template) words=$(_kapacitor_list templates "$cur") ;; + show-handler) + words=$(_kapacitor_list handlers "$cur") + ;; level) words='debug info warn error' ;; @@ -172,8 +178,8 @@ _kapacitor() esac ;; *) - words='record define define-template replay replay-live enable disable \ - reload delete list show show-template level stats version vars service-tests help' + words='record define define-template define-handler replay replay-live enable disable \ + reload delete list show show-template show-handler level stats version vars service-tests help' ;; esac if [ -z "$COMPREPLY" ] @@ -216,6 +222,16 @@ _kapacitor_json_files() _kapacitor_files_add_postfix } +_kapacitor_json_yaml_files() +{ + local didi cur="$1" + COMPREPLY=($(compgen -o filenames -f -X '!*.json' -- "$cur") ) + COMPREPLY+=($(compgen -o filenames -f -X '!*.yaml' -- "$cur") ) + didi=$(compgen -d -- "$cur") + COMPREPLY=(${COMPREPLY[@]:-} $didi ) + _kapacitor_files_add_postfix +} + _kapacitor_list() { # List a certain kind of object and return a set of IDs diff --git a/vendor.list b/vendor.list index 96842cc5d..f64fa1973 100644 --- a/vendor.list +++ b/vendor.list @@ -1,8 +1,10 @@ github.com/BurntSushi/toml github.com/boltdb/bolt github.com/cenkalti/backoff +github.com/davecgh/go-spew github.com/dgrijalva/jwt-go v3.0.0 github.com/dustin/go-humanize +github.com/evanphx/json-patch github.com/gogo/protobuf github.com/golang/protobuf github.com/gorhill/cronexpr @@ -11,6 +13,7 @@ github.com/influxdata/usage-client github.com/kimor79/gollectd github.com/mattn/go-runewidth github.com/mitchellh/copystructure +github.com/mitchellh/mapstructure github.com/mitchellh/reflectwalk github.com/pkg/errors github.com/russross/blackfriday @@ -21,6 +24,7 @@ github.com/shurcooL/sanitized_anchor_name github.com/twinj/uuid golang.org/x/crypto master gopkg.in/gomail.v2 +gopkg.in/yaml.v2 # Generate deps github.com/benbjohnson/tmpl # Testing deps diff --git a/vendor/github.com/davecgh/go-spew/.gitignore b/vendor/github.com/davecgh/go-spew/.gitignore new file mode 100644 index 000000000..00268614f --- /dev/null +++ b/vendor/github.com/davecgh/go-spew/.gitignore @@ -0,0 +1,22 @@ +# Compiled Object files, Static and Dynamic libs (Shared Objects) +*.o +*.a +*.so + +# Folders +_obj +_test + +# Architecture specific extensions/prefixes +*.[568vq] +[568vq].out + +*.cgo1.go +*.cgo2.c +_cgo_defun.c +_cgo_gotypes.go +_cgo_export.* + +_testmain.go + +*.exe diff --git a/vendor/github.com/davecgh/go-spew/.gitrepo b/vendor/github.com/davecgh/go-spew/.gitrepo new file mode 100644 index 000000000..251de3277 --- /dev/null +++ b/vendor/github.com/davecgh/go-spew/.gitrepo @@ -0,0 +1,11 @@ +; DO NOT EDIT (unless you know what you are doing) +; +; This subdirectory is a git "subrepo", and this file is maintained by the +; git-subrepo command. See https://github.com/git-commands/git-subrepo#readme +; +[subrepo] + remote = https://github.com/davecgh/go-spew.git + branch = master + commit = 346938d642f2ec3594ed81d874461961cd0faa76 + parent = 8a66eb21ac7c30b9411cb23b70aa7bc0e161d2d2 + cmdver = 0.3.0 diff --git a/vendor/github.com/davecgh/go-spew/.travis.yml b/vendor/github.com/davecgh/go-spew/.travis.yml new file mode 100644 index 000000000..984e0736e --- /dev/null +++ b/vendor/github.com/davecgh/go-spew/.travis.yml @@ -0,0 +1,14 @@ +language: go +go: + - 1.5.4 + - 1.6.3 + - 1.7 +install: + - go get -v golang.org/x/tools/cmd/cover +script: + - go test -v -tags=safe ./spew + - go test -v -tags=testcgo ./spew -covermode=count -coverprofile=profile.cov +after_success: + - go get -v github.com/mattn/goveralls + - export PATH=$PATH:$HOME/gopath/bin + - goveralls -coverprofile=profile.cov -service=travis-ci diff --git a/vendor/github.com/davecgh/go-spew/LICENSE b/vendor/github.com/davecgh/go-spew/LICENSE new file mode 100644 index 000000000..c83641619 --- /dev/null +++ b/vendor/github.com/davecgh/go-spew/LICENSE @@ -0,0 +1,15 @@ +ISC License + +Copyright (c) 2012-2016 Dave Collins + +Permission to use, copy, modify, and distribute this software for any +purpose with or without fee is hereby granted, provided that the above +copyright notice and this permission notice appear in all copies. + +THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES +WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF +MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR +ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES +WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN +ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF +OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. diff --git a/vendor/github.com/davecgh/go-spew/README.md b/vendor/github.com/davecgh/go-spew/README.md new file mode 100644 index 000000000..262430449 --- /dev/null +++ b/vendor/github.com/davecgh/go-spew/README.md @@ -0,0 +1,205 @@ +go-spew +======= + +[![Build Status](https://img.shields.io/travis/davecgh/go-spew.svg)] +(https://travis-ci.org/davecgh/go-spew) [![ISC License] +(http://img.shields.io/badge/license-ISC-blue.svg)](http://copyfree.org) [![Coverage Status] +(https://img.shields.io/coveralls/davecgh/go-spew.svg)] +(https://coveralls.io/r/davecgh/go-spew?branch=master) + + +Go-spew implements a deep pretty printer for Go data structures to aid in +debugging. A comprehensive suite of tests with 100% test coverage is provided +to ensure proper functionality. See `test_coverage.txt` for the gocov coverage +report. Go-spew is licensed under the liberal ISC license, so it may be used in +open source or commercial projects. + +If you're interested in reading about how this package came to life and some +of the challenges involved in providing a deep pretty printer, there is a blog +post about it +[here](https://web.archive.org/web/20160304013555/https://blog.cyphertite.com/go-spew-a-journey-into-dumping-go-data-structures/). + +## Documentation + +[![GoDoc](https://img.shields.io/badge/godoc-reference-blue.svg)] +(http://godoc.org/github.com/davecgh/go-spew/spew) + +Full `go doc` style documentation for the project can be viewed online without +installing this package by using the excellent GoDoc site here: +http://godoc.org/github.com/davecgh/go-spew/spew + +You can also view the documentation locally once the package is installed with +the `godoc` tool by running `godoc -http=":6060"` and pointing your browser to +http://localhost:6060/pkg/github.com/davecgh/go-spew/spew + +## Installation + +```bash +$ go get -u github.com/davecgh/go-spew/spew +``` + +## Quick Start + +Add this import line to the file you're working in: + +```Go +import "github.com/davecgh/go-spew/spew" +``` + +To dump a variable with full newlines, indentation, type, and pointer +information use Dump, Fdump, or Sdump: + +```Go +spew.Dump(myVar1, myVar2, ...) +spew.Fdump(someWriter, myVar1, myVar2, ...) +str := spew.Sdump(myVar1, myVar2, ...) +``` + +Alternatively, if you would prefer to use format strings with a compacted inline +printing style, use the convenience wrappers Printf, Fprintf, etc with %v (most +compact), %+v (adds pointer addresses), %#v (adds types), or %#+v (adds types +and pointer addresses): + +```Go +spew.Printf("myVar1: %v -- myVar2: %+v", myVar1, myVar2) +spew.Printf("myVar3: %#v -- myVar4: %#+v", myVar3, myVar4) +spew.Fprintf(someWriter, "myVar1: %v -- myVar2: %+v", myVar1, myVar2) +spew.Fprintf(someWriter, "myVar3: %#v -- myVar4: %#+v", myVar3, myVar4) +``` + +## Debugging a Web Application Example + +Here is an example of how you can use `spew.Sdump()` to help debug a web application. Please be sure to wrap your output using the `html.EscapeString()` function for safety reasons. You should also only use this debugging technique in a development environment, never in production. + +```Go +package main + +import ( + "fmt" + "html" + "net/http" + + "github.com/davecgh/go-spew/spew" +) + +func handler(w http.ResponseWriter, r *http.Request) { + w.Header().Set("Content-Type", "text/html") + fmt.Fprintf(w, "Hi there, %s!", r.URL.Path[1:]) + fmt.Fprintf(w, "") +} + +func main() { + http.HandleFunc("/", handler) + http.ListenAndServe(":8080", nil) +} +``` + +## Sample Dump Output + +``` +(main.Foo) { + unexportedField: (*main.Bar)(0xf84002e210)({ + flag: (main.Flag) flagTwo, + data: (uintptr) + }), + ExportedField: (map[interface {}]interface {}) { + (string) "one": (bool) true + } +} +([]uint8) { + 00000000 11 12 13 14 15 16 17 18 19 1a 1b 1c 1d 1e 1f 20 |............... | + 00000010 21 22 23 24 25 26 27 28 29 2a 2b 2c 2d 2e 2f 30 |!"#$%&'()*+,-./0| + 00000020 31 32 |12| +} +``` + +## Sample Formatter Output + +Double pointer to a uint8: +``` + %v: <**>5 + %+v: <**>(0xf8400420d0->0xf8400420c8)5 + %#v: (**uint8)5 + %#+v: (**uint8)(0xf8400420d0->0xf8400420c8)5 +``` + +Pointer to circular struct with a uint8 field and a pointer to itself: +``` + %v: <*>{1 <*>} + %+v: <*>(0xf84003e260){ui8:1 c:<*>(0xf84003e260)} + %#v: (*main.circular){ui8:(uint8)1 c:(*main.circular)} + %#+v: (*main.circular)(0xf84003e260){ui8:(uint8)1 c:(*main.circular)(0xf84003e260)} +``` + +## Configuration Options + +Configuration of spew is handled by fields in the ConfigState type. For +convenience, all of the top-level functions use a global state available via the +spew.Config global. + +It is also possible to create a ConfigState instance that provides methods +equivalent to the top-level functions. This allows concurrent configuration +options. See the ConfigState documentation for more details. + +``` +* Indent + String to use for each indentation level for Dump functions. + It is a single space by default. A popular alternative is "\t". + +* MaxDepth + Maximum number of levels to descend into nested data structures. + There is no limit by default. + +* DisableMethods + Disables invocation of error and Stringer interface methods. + Method invocation is enabled by default. + +* DisablePointerMethods + Disables invocation of error and Stringer interface methods on types + which only accept pointer receivers from non-pointer variables. This option + relies on access to the unsafe package, so it will not have any effect when + running in environments without access to the unsafe package such as Google + App Engine or with the "safe" build tag specified. + Pointer method invocation is enabled by default. + +* DisablePointerAddresses + DisablePointerAddresses specifies whether to disable the printing of + pointer addresses. This is useful when diffing data structures in tests. + +* DisableCapacities + DisableCapacities specifies whether to disable the printing of capacities + for arrays, slices, maps and channels. This is useful when diffing data + structures in tests. + +* ContinueOnMethod + Enables recursion into types after invoking error and Stringer interface + methods. Recursion after method invocation is disabled by default. + +* SortKeys + Specifies map keys should be sorted before being printed. Use + this to have a more deterministic, diffable output. Note that + only native types (bool, int, uint, floats, uintptr and string) + and types which implement error or Stringer interfaces are supported, + with other types sorted according to the reflect.Value.String() output + which guarantees display stability. Natural map order is used by + default. + +* SpewKeys + SpewKeys specifies that, as a last resort attempt, map keys should be + spewed to strings and sorted by those strings. This is only considered + if SortKeys is true. + +``` + +## Unsafe Package Dependency + +This package relies on the unsafe package to perform some of the more advanced +features, however it also supports a "limited" mode which allows it to work in +environments where the unsafe package is not available. By default, it will +operate in this mode on Google App Engine and when compiled with GopherJS. The +"safe" build tag may also be specified to force the package to build without +using the unsafe package. + +## License + +Go-spew is licensed under the [copyfree](http://copyfree.org) ISC License. diff --git a/vendor/github.com/davecgh/go-spew/cov_report.sh b/vendor/github.com/davecgh/go-spew/cov_report.sh new file mode 100644 index 000000000..9579497e4 --- /dev/null +++ b/vendor/github.com/davecgh/go-spew/cov_report.sh @@ -0,0 +1,22 @@ +#!/bin/sh + +# This script uses gocov to generate a test coverage report. +# The gocov tool my be obtained with the following command: +# go get github.com/axw/gocov/gocov +# +# It will be installed to $GOPATH/bin, so ensure that location is in your $PATH. + +# Check for gocov. +if ! type gocov >/dev/null 2>&1; then + echo >&2 "This script requires the gocov tool." + echo >&2 "You may obtain it with the following command:" + echo >&2 "go get github.com/axw/gocov/gocov" + exit 1 +fi + +# Only run the cgo tests if gcc is installed. +if type gcc >/dev/null 2>&1; then + (cd spew && gocov test -tags testcgo | gocov report) +else + (cd spew && gocov test | gocov report) +fi diff --git a/vendor/github.com/davecgh/go-spew/spew/bypass.go b/vendor/github.com/davecgh/go-spew/spew/bypass.go new file mode 100644 index 000000000..8a4a6589a --- /dev/null +++ b/vendor/github.com/davecgh/go-spew/spew/bypass.go @@ -0,0 +1,152 @@ +// Copyright (c) 2015-2016 Dave Collins +// +// Permission to use, copy, modify, and distribute this software for any +// purpose with or without fee is hereby granted, provided that the above +// copyright notice and this permission notice appear in all copies. +// +// THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES +// WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF +// MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR +// ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES +// WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN +// ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF +// OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. + +// NOTE: Due to the following build constraints, this file will only be compiled +// when the code is not running on Google App Engine, compiled by GopherJS, and +// "-tags safe" is not added to the go build command line. The "disableunsafe" +// tag is deprecated and thus should not be used. +// +build !js,!appengine,!safe,!disableunsafe + +package spew + +import ( + "reflect" + "unsafe" +) + +const ( + // UnsafeDisabled is a build-time constant which specifies whether or + // not access to the unsafe package is available. + UnsafeDisabled = false + + // ptrSize is the size of a pointer on the current arch. + ptrSize = unsafe.Sizeof((*byte)(nil)) +) + +var ( + // offsetPtr, offsetScalar, and offsetFlag are the offsets for the + // internal reflect.Value fields. These values are valid before golang + // commit ecccf07e7f9d which changed the format. The are also valid + // after commit 82f48826c6c7 which changed the format again to mirror + // the original format. Code in the init function updates these offsets + // as necessary. + offsetPtr = uintptr(ptrSize) + offsetScalar = uintptr(0) + offsetFlag = uintptr(ptrSize * 2) + + // flagKindWidth and flagKindShift indicate various bits that the + // reflect package uses internally to track kind information. + // + // flagRO indicates whether or not the value field of a reflect.Value is + // read-only. + // + // flagIndir indicates whether the value field of a reflect.Value is + // the actual data or a pointer to the data. + // + // These values are valid before golang commit 90a7c3c86944 which + // changed their positions. Code in the init function updates these + // flags as necessary. + flagKindWidth = uintptr(5) + flagKindShift = uintptr(flagKindWidth - 1) + flagRO = uintptr(1 << 0) + flagIndir = uintptr(1 << 1) +) + +func init() { + // Older versions of reflect.Value stored small integers directly in the + // ptr field (which is named val in the older versions). Versions + // between commits ecccf07e7f9d and 82f48826c6c7 added a new field named + // scalar for this purpose which unfortunately came before the flag + // field, so the offset of the flag field is different for those + // versions. + // + // This code constructs a new reflect.Value from a known small integer + // and checks if the size of the reflect.Value struct indicates it has + // the scalar field. When it does, the offsets are updated accordingly. + vv := reflect.ValueOf(0xf00) + if unsafe.Sizeof(vv) == (ptrSize * 4) { + offsetScalar = ptrSize * 2 + offsetFlag = ptrSize * 3 + } + + // Commit 90a7c3c86944 changed the flag positions such that the low + // order bits are the kind. This code extracts the kind from the flags + // field and ensures it's the correct type. When it's not, the flag + // order has been changed to the newer format, so the flags are updated + // accordingly. + upf := unsafe.Pointer(uintptr(unsafe.Pointer(&vv)) + offsetFlag) + upfv := *(*uintptr)(upf) + flagKindMask := uintptr((1<>flagKindShift != uintptr(reflect.Int) { + flagKindShift = 0 + flagRO = 1 << 5 + flagIndir = 1 << 6 + + // Commit adf9b30e5594 modified the flags to separate the + // flagRO flag into two bits which specifies whether or not the + // field is embedded. This causes flagIndir to move over a bit + // and means that flagRO is the combination of either of the + // original flagRO bit and the new bit. + // + // This code detects the change by extracting what used to be + // the indirect bit to ensure it's set. When it's not, the flag + // order has been changed to the newer format, so the flags are + // updated accordingly. + if upfv&flagIndir == 0 { + flagRO = 3 << 5 + flagIndir = 1 << 7 + } + } +} + +// unsafeReflectValue converts the passed reflect.Value into a one that bypasses +// the typical safety restrictions preventing access to unaddressable and +// unexported data. It works by digging the raw pointer to the underlying +// value out of the protected value and generating a new unprotected (unsafe) +// reflect.Value to it. +// +// This allows us to check for implementations of the Stringer and error +// interfaces to be used for pretty printing ordinarily unaddressable and +// inaccessible values such as unexported struct fields. +func unsafeReflectValue(v reflect.Value) (rv reflect.Value) { + indirects := 1 + vt := v.Type() + upv := unsafe.Pointer(uintptr(unsafe.Pointer(&v)) + offsetPtr) + rvf := *(*uintptr)(unsafe.Pointer(uintptr(unsafe.Pointer(&v)) + offsetFlag)) + if rvf&flagIndir != 0 { + vt = reflect.PtrTo(v.Type()) + indirects++ + } else if offsetScalar != 0 { + // The value is in the scalar field when it's not one of the + // reference types. + switch vt.Kind() { + case reflect.Uintptr: + case reflect.Chan: + case reflect.Func: + case reflect.Map: + case reflect.Ptr: + case reflect.UnsafePointer: + default: + upv = unsafe.Pointer(uintptr(unsafe.Pointer(&v)) + + offsetScalar) + } + } + + pv := reflect.NewAt(vt, upv) + rv = pv + for i := 0; i < indirects; i++ { + rv = rv.Elem() + } + return rv +} diff --git a/vendor/github.com/davecgh/go-spew/spew/bypasssafe.go b/vendor/github.com/davecgh/go-spew/spew/bypasssafe.go new file mode 100644 index 000000000..1fe3cf3d5 --- /dev/null +++ b/vendor/github.com/davecgh/go-spew/spew/bypasssafe.go @@ -0,0 +1,38 @@ +// Copyright (c) 2015-2016 Dave Collins +// +// Permission to use, copy, modify, and distribute this software for any +// purpose with or without fee is hereby granted, provided that the above +// copyright notice and this permission notice appear in all copies. +// +// THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES +// WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF +// MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR +// ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES +// WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN +// ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF +// OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. + +// NOTE: Due to the following build constraints, this file will only be compiled +// when the code is running on Google App Engine, compiled by GopherJS, or +// "-tags safe" is added to the go build command line. The "disableunsafe" +// tag is deprecated and thus should not be used. +// +build js appengine safe disableunsafe + +package spew + +import "reflect" + +const ( + // UnsafeDisabled is a build-time constant which specifies whether or + // not access to the unsafe package is available. + UnsafeDisabled = true +) + +// unsafeReflectValue typically converts the passed reflect.Value into a one +// that bypasses the typical safety restrictions preventing access to +// unaddressable and unexported data. However, doing this relies on access to +// the unsafe package. This is a stub version which simply returns the passed +// reflect.Value when the unsafe package is not available. +func unsafeReflectValue(v reflect.Value) reflect.Value { + return v +} diff --git a/vendor/github.com/davecgh/go-spew/spew/common.go b/vendor/github.com/davecgh/go-spew/spew/common.go new file mode 100644 index 000000000..7c519ff47 --- /dev/null +++ b/vendor/github.com/davecgh/go-spew/spew/common.go @@ -0,0 +1,341 @@ +/* + * Copyright (c) 2013-2016 Dave Collins + * + * Permission to use, copy, modify, and distribute this software for any + * purpose with or without fee is hereby granted, provided that the above + * copyright notice and this permission notice appear in all copies. + * + * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES + * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF + * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR + * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES + * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN + * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF + * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. + */ + +package spew + +import ( + "bytes" + "fmt" + "io" + "reflect" + "sort" + "strconv" +) + +// Some constants in the form of bytes to avoid string overhead. This mirrors +// the technique used in the fmt package. +var ( + panicBytes = []byte("(PANIC=") + plusBytes = []byte("+") + iBytes = []byte("i") + trueBytes = []byte("true") + falseBytes = []byte("false") + interfaceBytes = []byte("(interface {})") + commaNewlineBytes = []byte(",\n") + newlineBytes = []byte("\n") + openBraceBytes = []byte("{") + openBraceNewlineBytes = []byte("{\n") + closeBraceBytes = []byte("}") + asteriskBytes = []byte("*") + colonBytes = []byte(":") + colonSpaceBytes = []byte(": ") + openParenBytes = []byte("(") + closeParenBytes = []byte(")") + spaceBytes = []byte(" ") + pointerChainBytes = []byte("->") + nilAngleBytes = []byte("") + maxNewlineBytes = []byte("\n") + maxShortBytes = []byte("") + circularBytes = []byte("") + circularShortBytes = []byte("") + invalidAngleBytes = []byte("") + openBracketBytes = []byte("[") + closeBracketBytes = []byte("]") + percentBytes = []byte("%") + precisionBytes = []byte(".") + openAngleBytes = []byte("<") + closeAngleBytes = []byte(">") + openMapBytes = []byte("map[") + closeMapBytes = []byte("]") + lenEqualsBytes = []byte("len=") + capEqualsBytes = []byte("cap=") +) + +// hexDigits is used to map a decimal value to a hex digit. +var hexDigits = "0123456789abcdef" + +// catchPanic handles any panics that might occur during the handleMethods +// calls. +func catchPanic(w io.Writer, v reflect.Value) { + if err := recover(); err != nil { + w.Write(panicBytes) + fmt.Fprintf(w, "%v", err) + w.Write(closeParenBytes) + } +} + +// handleMethods attempts to call the Error and String methods on the underlying +// type the passed reflect.Value represents and outputes the result to Writer w. +// +// It handles panics in any called methods by catching and displaying the error +// as the formatted value. +func handleMethods(cs *ConfigState, w io.Writer, v reflect.Value) (handled bool) { + // We need an interface to check if the type implements the error or + // Stringer interface. However, the reflect package won't give us an + // interface on certain things like unexported struct fields in order + // to enforce visibility rules. We use unsafe, when it's available, + // to bypass these restrictions since this package does not mutate the + // values. + if !v.CanInterface() { + if UnsafeDisabled { + return false + } + + v = unsafeReflectValue(v) + } + + // Choose whether or not to do error and Stringer interface lookups against + // the base type or a pointer to the base type depending on settings. + // Technically calling one of these methods with a pointer receiver can + // mutate the value, however, types which choose to satisify an error or + // Stringer interface with a pointer receiver should not be mutating their + // state inside these interface methods. + if !cs.DisablePointerMethods && !UnsafeDisabled && !v.CanAddr() { + v = unsafeReflectValue(v) + } + if v.CanAddr() { + v = v.Addr() + } + + // Is it an error or Stringer? + switch iface := v.Interface().(type) { + case error: + defer catchPanic(w, v) + if cs.ContinueOnMethod { + w.Write(openParenBytes) + w.Write([]byte(iface.Error())) + w.Write(closeParenBytes) + w.Write(spaceBytes) + return false + } + + w.Write([]byte(iface.Error())) + return true + + case fmt.Stringer: + defer catchPanic(w, v) + if cs.ContinueOnMethod { + w.Write(openParenBytes) + w.Write([]byte(iface.String())) + w.Write(closeParenBytes) + w.Write(spaceBytes) + return false + } + w.Write([]byte(iface.String())) + return true + } + return false +} + +// printBool outputs a boolean value as true or false to Writer w. +func printBool(w io.Writer, val bool) { + if val { + w.Write(trueBytes) + } else { + w.Write(falseBytes) + } +} + +// printInt outputs a signed integer value to Writer w. +func printInt(w io.Writer, val int64, base int) { + w.Write([]byte(strconv.FormatInt(val, base))) +} + +// printUint outputs an unsigned integer value to Writer w. +func printUint(w io.Writer, val uint64, base int) { + w.Write([]byte(strconv.FormatUint(val, base))) +} + +// printFloat outputs a floating point value using the specified precision, +// which is expected to be 32 or 64bit, to Writer w. +func printFloat(w io.Writer, val float64, precision int) { + w.Write([]byte(strconv.FormatFloat(val, 'g', -1, precision))) +} + +// printComplex outputs a complex value using the specified float precision +// for the real and imaginary parts to Writer w. +func printComplex(w io.Writer, c complex128, floatPrecision int) { + r := real(c) + w.Write(openParenBytes) + w.Write([]byte(strconv.FormatFloat(r, 'g', -1, floatPrecision))) + i := imag(c) + if i >= 0 { + w.Write(plusBytes) + } + w.Write([]byte(strconv.FormatFloat(i, 'g', -1, floatPrecision))) + w.Write(iBytes) + w.Write(closeParenBytes) +} + +// printHexPtr outputs a uintptr formatted as hexidecimal with a leading '0x' +// prefix to Writer w. +func printHexPtr(w io.Writer, p uintptr) { + // Null pointer. + num := uint64(p) + if num == 0 { + w.Write(nilAngleBytes) + return + } + + // Max uint64 is 16 bytes in hex + 2 bytes for '0x' prefix + buf := make([]byte, 18) + + // It's simpler to construct the hex string right to left. + base := uint64(16) + i := len(buf) - 1 + for num >= base { + buf[i] = hexDigits[num%base] + num /= base + i-- + } + buf[i] = hexDigits[num] + + // Add '0x' prefix. + i-- + buf[i] = 'x' + i-- + buf[i] = '0' + + // Strip unused leading bytes. + buf = buf[i:] + w.Write(buf) +} + +// valuesSorter implements sort.Interface to allow a slice of reflect.Value +// elements to be sorted. +type valuesSorter struct { + values []reflect.Value + strings []string // either nil or same len and values + cs *ConfigState +} + +// newValuesSorter initializes a valuesSorter instance, which holds a set of +// surrogate keys on which the data should be sorted. It uses flags in +// ConfigState to decide if and how to populate those surrogate keys. +func newValuesSorter(values []reflect.Value, cs *ConfigState) sort.Interface { + vs := &valuesSorter{values: values, cs: cs} + if canSortSimply(vs.values[0].Kind()) { + return vs + } + if !cs.DisableMethods { + vs.strings = make([]string, len(values)) + for i := range vs.values { + b := bytes.Buffer{} + if !handleMethods(cs, &b, vs.values[i]) { + vs.strings = nil + break + } + vs.strings[i] = b.String() + } + } + if vs.strings == nil && cs.SpewKeys { + vs.strings = make([]string, len(values)) + for i := range vs.values { + vs.strings[i] = Sprintf("%#v", vs.values[i].Interface()) + } + } + return vs +} + +// canSortSimply tests whether a reflect.Kind is a primitive that can be sorted +// directly, or whether it should be considered for sorting by surrogate keys +// (if the ConfigState allows it). +func canSortSimply(kind reflect.Kind) bool { + // This switch parallels valueSortLess, except for the default case. + switch kind { + case reflect.Bool: + return true + case reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64, reflect.Int: + return true + case reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64, reflect.Uint: + return true + case reflect.Float32, reflect.Float64: + return true + case reflect.String: + return true + case reflect.Uintptr: + return true + case reflect.Array: + return true + } + return false +} + +// Len returns the number of values in the slice. It is part of the +// sort.Interface implementation. +func (s *valuesSorter) Len() int { + return len(s.values) +} + +// Swap swaps the values at the passed indices. It is part of the +// sort.Interface implementation. +func (s *valuesSorter) Swap(i, j int) { + s.values[i], s.values[j] = s.values[j], s.values[i] + if s.strings != nil { + s.strings[i], s.strings[j] = s.strings[j], s.strings[i] + } +} + +// valueSortLess returns whether the first value should sort before the second +// value. It is used by valueSorter.Less as part of the sort.Interface +// implementation. +func valueSortLess(a, b reflect.Value) bool { + switch a.Kind() { + case reflect.Bool: + return !a.Bool() && b.Bool() + case reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64, reflect.Int: + return a.Int() < b.Int() + case reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64, reflect.Uint: + return a.Uint() < b.Uint() + case reflect.Float32, reflect.Float64: + return a.Float() < b.Float() + case reflect.String: + return a.String() < b.String() + case reflect.Uintptr: + return a.Uint() < b.Uint() + case reflect.Array: + // Compare the contents of both arrays. + l := a.Len() + for i := 0; i < l; i++ { + av := a.Index(i) + bv := b.Index(i) + if av.Interface() == bv.Interface() { + continue + } + return valueSortLess(av, bv) + } + } + return a.String() < b.String() +} + +// Less returns whether the value at index i should sort before the +// value at index j. It is part of the sort.Interface implementation. +func (s *valuesSorter) Less(i, j int) bool { + if s.strings == nil { + return valueSortLess(s.values[i], s.values[j]) + } + return s.strings[i] < s.strings[j] +} + +// sortValues is a sort function that handles both native types and any type that +// can be converted to error or Stringer. Other inputs are sorted according to +// their Value.String() value to ensure display stability. +func sortValues(values []reflect.Value, cs *ConfigState) { + if len(values) == 0 { + return + } + sort.Sort(newValuesSorter(values, cs)) +} diff --git a/vendor/github.com/davecgh/go-spew/spew/common_test.go b/vendor/github.com/davecgh/go-spew/spew/common_test.go new file mode 100644 index 000000000..0f5ce47dc --- /dev/null +++ b/vendor/github.com/davecgh/go-spew/spew/common_test.go @@ -0,0 +1,298 @@ +/* + * Copyright (c) 2013-2016 Dave Collins + * + * Permission to use, copy, modify, and distribute this software for any + * purpose with or without fee is hereby granted, provided that the above + * copyright notice and this permission notice appear in all copies. + * + * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES + * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF + * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR + * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES + * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN + * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF + * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. + */ + +package spew_test + +import ( + "fmt" + "reflect" + "testing" + + "github.com/davecgh/go-spew/spew" +) + +// custom type to test Stinger interface on non-pointer receiver. +type stringer string + +// String implements the Stringer interface for testing invocation of custom +// stringers on types with non-pointer receivers. +func (s stringer) String() string { + return "stringer " + string(s) +} + +// custom type to test Stinger interface on pointer receiver. +type pstringer string + +// String implements the Stringer interface for testing invocation of custom +// stringers on types with only pointer receivers. +func (s *pstringer) String() string { + return "stringer " + string(*s) +} + +// xref1 and xref2 are cross referencing structs for testing circular reference +// detection. +type xref1 struct { + ps2 *xref2 +} +type xref2 struct { + ps1 *xref1 +} + +// indirCir1, indirCir2, and indirCir3 are used to generate an indirect circular +// reference for testing detection. +type indirCir1 struct { + ps2 *indirCir2 +} +type indirCir2 struct { + ps3 *indirCir3 +} +type indirCir3 struct { + ps1 *indirCir1 +} + +// embed is used to test embedded structures. +type embed struct { + a string +} + +// embedwrap is used to test embedded structures. +type embedwrap struct { + *embed + e *embed +} + +// panicer is used to intentionally cause a panic for testing spew properly +// handles them +type panicer int + +func (p panicer) String() string { + panic("test panic") +} + +// customError is used to test custom error interface invocation. +type customError int + +func (e customError) Error() string { + return fmt.Sprintf("error: %d", int(e)) +} + +// stringizeWants converts a slice of wanted test output into a format suitable +// for a test error message. +func stringizeWants(wants []string) string { + s := "" + for i, want := range wants { + if i > 0 { + s += fmt.Sprintf("want%d: %s", i+1, want) + } else { + s += "want: " + want + } + } + return s +} + +// testFailed returns whether or not a test failed by checking if the result +// of the test is in the slice of wanted strings. +func testFailed(result string, wants []string) bool { + for _, want := range wants { + if result == want { + return false + } + } + return true +} + +type sortableStruct struct { + x int +} + +func (ss sortableStruct) String() string { + return fmt.Sprintf("ss.%d", ss.x) +} + +type unsortableStruct struct { + x int +} + +type sortTestCase struct { + input []reflect.Value + expected []reflect.Value +} + +func helpTestSortValues(tests []sortTestCase, cs *spew.ConfigState, t *testing.T) { + getInterfaces := func(values []reflect.Value) []interface{} { + interfaces := []interface{}{} + for _, v := range values { + interfaces = append(interfaces, v.Interface()) + } + return interfaces + } + + for _, test := range tests { + spew.SortValues(test.input, cs) + // reflect.DeepEqual cannot really make sense of reflect.Value, + // probably because of all the pointer tricks. For instance, + // v(2.0) != v(2.0) on a 32-bits system. Turn them into interface{} + // instead. + input := getInterfaces(test.input) + expected := getInterfaces(test.expected) + if !reflect.DeepEqual(input, expected) { + t.Errorf("Sort mismatch:\n %v != %v", input, expected) + } + } +} + +// TestSortValues ensures the sort functionality for relect.Value based sorting +// works as intended. +func TestSortValues(t *testing.T) { + v := reflect.ValueOf + + a := v("a") + b := v("b") + c := v("c") + embedA := v(embed{"a"}) + embedB := v(embed{"b"}) + embedC := v(embed{"c"}) + tests := []sortTestCase{ + // No values. + { + []reflect.Value{}, + []reflect.Value{}, + }, + // Bools. + { + []reflect.Value{v(false), v(true), v(false)}, + []reflect.Value{v(false), v(false), v(true)}, + }, + // Ints. + { + []reflect.Value{v(2), v(1), v(3)}, + []reflect.Value{v(1), v(2), v(3)}, + }, + // Uints. + { + []reflect.Value{v(uint8(2)), v(uint8(1)), v(uint8(3))}, + []reflect.Value{v(uint8(1)), v(uint8(2)), v(uint8(3))}, + }, + // Floats. + { + []reflect.Value{v(2.0), v(1.0), v(3.0)}, + []reflect.Value{v(1.0), v(2.0), v(3.0)}, + }, + // Strings. + { + []reflect.Value{b, a, c}, + []reflect.Value{a, b, c}, + }, + // Array + { + []reflect.Value{v([3]int{3, 2, 1}), v([3]int{1, 3, 2}), v([3]int{1, 2, 3})}, + []reflect.Value{v([3]int{1, 2, 3}), v([3]int{1, 3, 2}), v([3]int{3, 2, 1})}, + }, + // Uintptrs. + { + []reflect.Value{v(uintptr(2)), v(uintptr(1)), v(uintptr(3))}, + []reflect.Value{v(uintptr(1)), v(uintptr(2)), v(uintptr(3))}, + }, + // SortableStructs. + { + // Note: not sorted - DisableMethods is set. + []reflect.Value{v(sortableStruct{2}), v(sortableStruct{1}), v(sortableStruct{3})}, + []reflect.Value{v(sortableStruct{2}), v(sortableStruct{1}), v(sortableStruct{3})}, + }, + // UnsortableStructs. + { + // Note: not sorted - SpewKeys is false. + []reflect.Value{v(unsortableStruct{2}), v(unsortableStruct{1}), v(unsortableStruct{3})}, + []reflect.Value{v(unsortableStruct{2}), v(unsortableStruct{1}), v(unsortableStruct{3})}, + }, + // Invalid. + { + []reflect.Value{embedB, embedA, embedC}, + []reflect.Value{embedB, embedA, embedC}, + }, + } + cs := spew.ConfigState{DisableMethods: true, SpewKeys: false} + helpTestSortValues(tests, &cs, t) +} + +// TestSortValuesWithMethods ensures the sort functionality for relect.Value +// based sorting works as intended when using string methods. +func TestSortValuesWithMethods(t *testing.T) { + v := reflect.ValueOf + + a := v("a") + b := v("b") + c := v("c") + tests := []sortTestCase{ + // Ints. + { + []reflect.Value{v(2), v(1), v(3)}, + []reflect.Value{v(1), v(2), v(3)}, + }, + // Strings. + { + []reflect.Value{b, a, c}, + []reflect.Value{a, b, c}, + }, + // SortableStructs. + { + []reflect.Value{v(sortableStruct{2}), v(sortableStruct{1}), v(sortableStruct{3})}, + []reflect.Value{v(sortableStruct{1}), v(sortableStruct{2}), v(sortableStruct{3})}, + }, + // UnsortableStructs. + { + // Note: not sorted - SpewKeys is false. + []reflect.Value{v(unsortableStruct{2}), v(unsortableStruct{1}), v(unsortableStruct{3})}, + []reflect.Value{v(unsortableStruct{2}), v(unsortableStruct{1}), v(unsortableStruct{3})}, + }, + } + cs := spew.ConfigState{DisableMethods: false, SpewKeys: false} + helpTestSortValues(tests, &cs, t) +} + +// TestSortValuesWithSpew ensures the sort functionality for relect.Value +// based sorting works as intended when using spew to stringify keys. +func TestSortValuesWithSpew(t *testing.T) { + v := reflect.ValueOf + + a := v("a") + b := v("b") + c := v("c") + tests := []sortTestCase{ + // Ints. + { + []reflect.Value{v(2), v(1), v(3)}, + []reflect.Value{v(1), v(2), v(3)}, + }, + // Strings. + { + []reflect.Value{b, a, c}, + []reflect.Value{a, b, c}, + }, + // SortableStructs. + { + []reflect.Value{v(sortableStruct{2}), v(sortableStruct{1}), v(sortableStruct{3})}, + []reflect.Value{v(sortableStruct{1}), v(sortableStruct{2}), v(sortableStruct{3})}, + }, + // UnsortableStructs. + { + []reflect.Value{v(unsortableStruct{2}), v(unsortableStruct{1}), v(unsortableStruct{3})}, + []reflect.Value{v(unsortableStruct{1}), v(unsortableStruct{2}), v(unsortableStruct{3})}, + }, + } + cs := spew.ConfigState{DisableMethods: true, SpewKeys: true} + helpTestSortValues(tests, &cs, t) +} diff --git a/vendor/github.com/davecgh/go-spew/spew/config.go b/vendor/github.com/davecgh/go-spew/spew/config.go new file mode 100644 index 000000000..2e3d22f31 --- /dev/null +++ b/vendor/github.com/davecgh/go-spew/spew/config.go @@ -0,0 +1,306 @@ +/* + * Copyright (c) 2013-2016 Dave Collins + * + * Permission to use, copy, modify, and distribute this software for any + * purpose with or without fee is hereby granted, provided that the above + * copyright notice and this permission notice appear in all copies. + * + * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES + * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF + * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR + * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES + * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN + * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF + * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. + */ + +package spew + +import ( + "bytes" + "fmt" + "io" + "os" +) + +// ConfigState houses the configuration options used by spew to format and +// display values. There is a global instance, Config, that is used to control +// all top-level Formatter and Dump functionality. Each ConfigState instance +// provides methods equivalent to the top-level functions. +// +// The zero value for ConfigState provides no indentation. You would typically +// want to set it to a space or a tab. +// +// Alternatively, you can use NewDefaultConfig to get a ConfigState instance +// with default settings. See the documentation of NewDefaultConfig for default +// values. +type ConfigState struct { + // Indent specifies the string to use for each indentation level. The + // global config instance that all top-level functions use set this to a + // single space by default. If you would like more indentation, you might + // set this to a tab with "\t" or perhaps two spaces with " ". + Indent string + + // MaxDepth controls the maximum number of levels to descend into nested + // data structures. The default, 0, means there is no limit. + // + // NOTE: Circular data structures are properly detected, so it is not + // necessary to set this value unless you specifically want to limit deeply + // nested data structures. + MaxDepth int + + // DisableMethods specifies whether or not error and Stringer interfaces are + // invoked for types that implement them. + DisableMethods bool + + // DisablePointerMethods specifies whether or not to check for and invoke + // error and Stringer interfaces on types which only accept a pointer + // receiver when the current type is not a pointer. + // + // NOTE: This might be an unsafe action since calling one of these methods + // with a pointer receiver could technically mutate the value, however, + // in practice, types which choose to satisify an error or Stringer + // interface with a pointer receiver should not be mutating their state + // inside these interface methods. As a result, this option relies on + // access to the unsafe package, so it will not have any effect when + // running in environments without access to the unsafe package such as + // Google App Engine or with the "safe" build tag specified. + DisablePointerMethods bool + + // DisablePointerAddresses specifies whether to disable the printing of + // pointer addresses. This is useful when diffing data structures in tests. + DisablePointerAddresses bool + + // DisableCapacities specifies whether to disable the printing of capacities + // for arrays, slices, maps and channels. This is useful when diffing + // data structures in tests. + DisableCapacities bool + + // ContinueOnMethod specifies whether or not recursion should continue once + // a custom error or Stringer interface is invoked. The default, false, + // means it will print the results of invoking the custom error or Stringer + // interface and return immediately instead of continuing to recurse into + // the internals of the data type. + // + // NOTE: This flag does not have any effect if method invocation is disabled + // via the DisableMethods or DisablePointerMethods options. + ContinueOnMethod bool + + // SortKeys specifies map keys should be sorted before being printed. Use + // this to have a more deterministic, diffable output. Note that only + // native types (bool, int, uint, floats, uintptr and string) and types + // that support the error or Stringer interfaces (if methods are + // enabled) are supported, with other types sorted according to the + // reflect.Value.String() output which guarantees display stability. + SortKeys bool + + // SpewKeys specifies that, as a last resort attempt, map keys should + // be spewed to strings and sorted by those strings. This is only + // considered if SortKeys is true. + SpewKeys bool +} + +// Config is the active configuration of the top-level functions. +// The configuration can be changed by modifying the contents of spew.Config. +var Config = ConfigState{Indent: " "} + +// Errorf is a wrapper for fmt.Errorf that treats each argument as if it were +// passed with a Formatter interface returned by c.NewFormatter. It returns +// the formatted string as a value that satisfies error. See NewFormatter +// for formatting details. +// +// This function is shorthand for the following syntax: +// +// fmt.Errorf(format, c.NewFormatter(a), c.NewFormatter(b)) +func (c *ConfigState) Errorf(format string, a ...interface{}) (err error) { + return fmt.Errorf(format, c.convertArgs(a)...) +} + +// Fprint is a wrapper for fmt.Fprint that treats each argument as if it were +// passed with a Formatter interface returned by c.NewFormatter. It returns +// the number of bytes written and any write error encountered. See +// NewFormatter for formatting details. +// +// This function is shorthand for the following syntax: +// +// fmt.Fprint(w, c.NewFormatter(a), c.NewFormatter(b)) +func (c *ConfigState) Fprint(w io.Writer, a ...interface{}) (n int, err error) { + return fmt.Fprint(w, c.convertArgs(a)...) +} + +// Fprintf is a wrapper for fmt.Fprintf that treats each argument as if it were +// passed with a Formatter interface returned by c.NewFormatter. It returns +// the number of bytes written and any write error encountered. See +// NewFormatter for formatting details. +// +// This function is shorthand for the following syntax: +// +// fmt.Fprintf(w, format, c.NewFormatter(a), c.NewFormatter(b)) +func (c *ConfigState) Fprintf(w io.Writer, format string, a ...interface{}) (n int, err error) { + return fmt.Fprintf(w, format, c.convertArgs(a)...) +} + +// Fprintln is a wrapper for fmt.Fprintln that treats each argument as if it +// passed with a Formatter interface returned by c.NewFormatter. See +// NewFormatter for formatting details. +// +// This function is shorthand for the following syntax: +// +// fmt.Fprintln(w, c.NewFormatter(a), c.NewFormatter(b)) +func (c *ConfigState) Fprintln(w io.Writer, a ...interface{}) (n int, err error) { + return fmt.Fprintln(w, c.convertArgs(a)...) +} + +// Print is a wrapper for fmt.Print that treats each argument as if it were +// passed with a Formatter interface returned by c.NewFormatter. It returns +// the number of bytes written and any write error encountered. See +// NewFormatter for formatting details. +// +// This function is shorthand for the following syntax: +// +// fmt.Print(c.NewFormatter(a), c.NewFormatter(b)) +func (c *ConfigState) Print(a ...interface{}) (n int, err error) { + return fmt.Print(c.convertArgs(a)...) +} + +// Printf is a wrapper for fmt.Printf that treats each argument as if it were +// passed with a Formatter interface returned by c.NewFormatter. It returns +// the number of bytes written and any write error encountered. See +// NewFormatter for formatting details. +// +// This function is shorthand for the following syntax: +// +// fmt.Printf(format, c.NewFormatter(a), c.NewFormatter(b)) +func (c *ConfigState) Printf(format string, a ...interface{}) (n int, err error) { + return fmt.Printf(format, c.convertArgs(a)...) +} + +// Println is a wrapper for fmt.Println that treats each argument as if it were +// passed with a Formatter interface returned by c.NewFormatter. It returns +// the number of bytes written and any write error encountered. See +// NewFormatter for formatting details. +// +// This function is shorthand for the following syntax: +// +// fmt.Println(c.NewFormatter(a), c.NewFormatter(b)) +func (c *ConfigState) Println(a ...interface{}) (n int, err error) { + return fmt.Println(c.convertArgs(a)...) +} + +// Sprint is a wrapper for fmt.Sprint that treats each argument as if it were +// passed with a Formatter interface returned by c.NewFormatter. It returns +// the resulting string. See NewFormatter for formatting details. +// +// This function is shorthand for the following syntax: +// +// fmt.Sprint(c.NewFormatter(a), c.NewFormatter(b)) +func (c *ConfigState) Sprint(a ...interface{}) string { + return fmt.Sprint(c.convertArgs(a)...) +} + +// Sprintf is a wrapper for fmt.Sprintf that treats each argument as if it were +// passed with a Formatter interface returned by c.NewFormatter. It returns +// the resulting string. See NewFormatter for formatting details. +// +// This function is shorthand for the following syntax: +// +// fmt.Sprintf(format, c.NewFormatter(a), c.NewFormatter(b)) +func (c *ConfigState) Sprintf(format string, a ...interface{}) string { + return fmt.Sprintf(format, c.convertArgs(a)...) +} + +// Sprintln is a wrapper for fmt.Sprintln that treats each argument as if it +// were passed with a Formatter interface returned by c.NewFormatter. It +// returns the resulting string. See NewFormatter for formatting details. +// +// This function is shorthand for the following syntax: +// +// fmt.Sprintln(c.NewFormatter(a), c.NewFormatter(b)) +func (c *ConfigState) Sprintln(a ...interface{}) string { + return fmt.Sprintln(c.convertArgs(a)...) +} + +/* +NewFormatter returns a custom formatter that satisfies the fmt.Formatter +interface. As a result, it integrates cleanly with standard fmt package +printing functions. The formatter is useful for inline printing of smaller data +types similar to the standard %v format specifier. + +The custom formatter only responds to the %v (most compact), %+v (adds pointer +addresses), %#v (adds types), and %#+v (adds types and pointer addresses) verb +combinations. Any other verbs such as %x and %q will be sent to the the +standard fmt package for formatting. In addition, the custom formatter ignores +the width and precision arguments (however they will still work on the format +specifiers not handled by the custom formatter). + +Typically this function shouldn't be called directly. It is much easier to make +use of the custom formatter by calling one of the convenience functions such as +c.Printf, c.Println, or c.Printf. +*/ +func (c *ConfigState) NewFormatter(v interface{}) fmt.Formatter { + return newFormatter(c, v) +} + +// Fdump formats and displays the passed arguments to io.Writer w. It formats +// exactly the same as Dump. +func (c *ConfigState) Fdump(w io.Writer, a ...interface{}) { + fdump(c, w, a...) +} + +/* +Dump displays the passed parameters to standard out with newlines, customizable +indentation, and additional debug information such as complete types and all +pointer addresses used to indirect to the final value. It provides the +following features over the built-in printing facilities provided by the fmt +package: + + * Pointers are dereferenced and followed + * Circular data structures are detected and handled properly + * Custom Stringer/error interfaces are optionally invoked, including + on unexported types + * Custom types which only implement the Stringer/error interfaces via + a pointer receiver are optionally invoked when passing non-pointer + variables + * Byte arrays and slices are dumped like the hexdump -C command which + includes offsets, byte values in hex, and ASCII output + +The configuration options are controlled by modifying the public members +of c. See ConfigState for options documentation. + +See Fdump if you would prefer dumping to an arbitrary io.Writer or Sdump to +get the formatted result as a string. +*/ +func (c *ConfigState) Dump(a ...interface{}) { + fdump(c, os.Stdout, a...) +} + +// Sdump returns a string with the passed arguments formatted exactly the same +// as Dump. +func (c *ConfigState) Sdump(a ...interface{}) string { + var buf bytes.Buffer + fdump(c, &buf, a...) + return buf.String() +} + +// convertArgs accepts a slice of arguments and returns a slice of the same +// length with each argument converted to a spew Formatter interface using +// the ConfigState associated with s. +func (c *ConfigState) convertArgs(args []interface{}) (formatters []interface{}) { + formatters = make([]interface{}, len(args)) + for index, arg := range args { + formatters[index] = newFormatter(c, arg) + } + return formatters +} + +// NewDefaultConfig returns a ConfigState with the following default settings. +// +// Indent: " " +// MaxDepth: 0 +// DisableMethods: false +// DisablePointerMethods: false +// ContinueOnMethod: false +// SortKeys: false +func NewDefaultConfig() *ConfigState { + return &ConfigState{Indent: " "} +} diff --git a/vendor/github.com/davecgh/go-spew/spew/doc.go b/vendor/github.com/davecgh/go-spew/spew/doc.go new file mode 100644 index 000000000..aacaac6f1 --- /dev/null +++ b/vendor/github.com/davecgh/go-spew/spew/doc.go @@ -0,0 +1,211 @@ +/* + * Copyright (c) 2013-2016 Dave Collins + * + * Permission to use, copy, modify, and distribute this software for any + * purpose with or without fee is hereby granted, provided that the above + * copyright notice and this permission notice appear in all copies. + * + * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES + * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF + * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR + * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES + * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN + * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF + * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. + */ + +/* +Package spew implements a deep pretty printer for Go data structures to aid in +debugging. + +A quick overview of the additional features spew provides over the built-in +printing facilities for Go data types are as follows: + + * Pointers are dereferenced and followed + * Circular data structures are detected and handled properly + * Custom Stringer/error interfaces are optionally invoked, including + on unexported types + * Custom types which only implement the Stringer/error interfaces via + a pointer receiver are optionally invoked when passing non-pointer + variables + * Byte arrays and slices are dumped like the hexdump -C command which + includes offsets, byte values in hex, and ASCII output (only when using + Dump style) + +There are two different approaches spew allows for dumping Go data structures: + + * Dump style which prints with newlines, customizable indentation, + and additional debug information such as types and all pointer addresses + used to indirect to the final value + * A custom Formatter interface that integrates cleanly with the standard fmt + package and replaces %v, %+v, %#v, and %#+v to provide inline printing + similar to the default %v while providing the additional functionality + outlined above and passing unsupported format verbs such as %x and %q + along to fmt + +Quick Start + +This section demonstrates how to quickly get started with spew. See the +sections below for further details on formatting and configuration options. + +To dump a variable with full newlines, indentation, type, and pointer +information use Dump, Fdump, or Sdump: + spew.Dump(myVar1, myVar2, ...) + spew.Fdump(someWriter, myVar1, myVar2, ...) + str := spew.Sdump(myVar1, myVar2, ...) + +Alternatively, if you would prefer to use format strings with a compacted inline +printing style, use the convenience wrappers Printf, Fprintf, etc with +%v (most compact), %+v (adds pointer addresses), %#v (adds types), or +%#+v (adds types and pointer addresses): + spew.Printf("myVar1: %v -- myVar2: %+v", myVar1, myVar2) + spew.Printf("myVar3: %#v -- myVar4: %#+v", myVar3, myVar4) + spew.Fprintf(someWriter, "myVar1: %v -- myVar2: %+v", myVar1, myVar2) + spew.Fprintf(someWriter, "myVar3: %#v -- myVar4: %#+v", myVar3, myVar4) + +Configuration Options + +Configuration of spew is handled by fields in the ConfigState type. For +convenience, all of the top-level functions use a global state available +via the spew.Config global. + +It is also possible to create a ConfigState instance that provides methods +equivalent to the top-level functions. This allows concurrent configuration +options. See the ConfigState documentation for more details. + +The following configuration options are available: + * Indent + String to use for each indentation level for Dump functions. + It is a single space by default. A popular alternative is "\t". + + * MaxDepth + Maximum number of levels to descend into nested data structures. + There is no limit by default. + + * DisableMethods + Disables invocation of error and Stringer interface methods. + Method invocation is enabled by default. + + * DisablePointerMethods + Disables invocation of error and Stringer interface methods on types + which only accept pointer receivers from non-pointer variables. + Pointer method invocation is enabled by default. + + * DisablePointerAddresses + DisablePointerAddresses specifies whether to disable the printing of + pointer addresses. This is useful when diffing data structures in tests. + + * DisableCapacities + DisableCapacities specifies whether to disable the printing of + capacities for arrays, slices, maps and channels. This is useful when + diffing data structures in tests. + + * ContinueOnMethod + Enables recursion into types after invoking error and Stringer interface + methods. Recursion after method invocation is disabled by default. + + * SortKeys + Specifies map keys should be sorted before being printed. Use + this to have a more deterministic, diffable output. Note that + only native types (bool, int, uint, floats, uintptr and string) + and types which implement error or Stringer interfaces are + supported with other types sorted according to the + reflect.Value.String() output which guarantees display + stability. Natural map order is used by default. + + * SpewKeys + Specifies that, as a last resort attempt, map keys should be + spewed to strings and sorted by those strings. This is only + considered if SortKeys is true. + +Dump Usage + +Simply call spew.Dump with a list of variables you want to dump: + + spew.Dump(myVar1, myVar2, ...) + +You may also call spew.Fdump if you would prefer to output to an arbitrary +io.Writer. For example, to dump to standard error: + + spew.Fdump(os.Stderr, myVar1, myVar2, ...) + +A third option is to call spew.Sdump to get the formatted output as a string: + + str := spew.Sdump(myVar1, myVar2, ...) + +Sample Dump Output + +See the Dump example for details on the setup of the types and variables being +shown here. + + (main.Foo) { + unexportedField: (*main.Bar)(0xf84002e210)({ + flag: (main.Flag) flagTwo, + data: (uintptr) + }), + ExportedField: (map[interface {}]interface {}) (len=1) { + (string) (len=3) "one": (bool) true + } + } + +Byte (and uint8) arrays and slices are displayed uniquely like the hexdump -C +command as shown. + ([]uint8) (len=32 cap=32) { + 00000000 11 12 13 14 15 16 17 18 19 1a 1b 1c 1d 1e 1f 20 |............... | + 00000010 21 22 23 24 25 26 27 28 29 2a 2b 2c 2d 2e 2f 30 |!"#$%&'()*+,-./0| + 00000020 31 32 |12| + } + +Custom Formatter + +Spew provides a custom formatter that implements the fmt.Formatter interface +so that it integrates cleanly with standard fmt package printing functions. The +formatter is useful for inline printing of smaller data types similar to the +standard %v format specifier. + +The custom formatter only responds to the %v (most compact), %+v (adds pointer +addresses), %#v (adds types), or %#+v (adds types and pointer addresses) verb +combinations. Any other verbs such as %x and %q will be sent to the the +standard fmt package for formatting. In addition, the custom formatter ignores +the width and precision arguments (however they will still work on the format +specifiers not handled by the custom formatter). + +Custom Formatter Usage + +The simplest way to make use of the spew custom formatter is to call one of the +convenience functions such as spew.Printf, spew.Println, or spew.Printf. The +functions have syntax you are most likely already familiar with: + + spew.Printf("myVar1: %v -- myVar2: %+v", myVar1, myVar2) + spew.Printf("myVar3: %#v -- myVar4: %#+v", myVar3, myVar4) + spew.Println(myVar, myVar2) + spew.Fprintf(os.Stderr, "myVar1: %v -- myVar2: %+v", myVar1, myVar2) + spew.Fprintf(os.Stderr, "myVar3: %#v -- myVar4: %#+v", myVar3, myVar4) + +See the Index for the full list convenience functions. + +Sample Formatter Output + +Double pointer to a uint8: + %v: <**>5 + %+v: <**>(0xf8400420d0->0xf8400420c8)5 + %#v: (**uint8)5 + %#+v: (**uint8)(0xf8400420d0->0xf8400420c8)5 + +Pointer to circular struct with a uint8 field and a pointer to itself: + %v: <*>{1 <*>} + %+v: <*>(0xf84003e260){ui8:1 c:<*>(0xf84003e260)} + %#v: (*main.circular){ui8:(uint8)1 c:(*main.circular)} + %#+v: (*main.circular)(0xf84003e260){ui8:(uint8)1 c:(*main.circular)(0xf84003e260)} + +See the Printf example for details on the setup of variables being shown +here. + +Errors + +Since it is possible for custom Stringer/error interfaces to panic, spew +detects them and handles them internally by printing the panic information +inline with the output. Since spew is intended to provide deep pretty printing +capabilities on structures, it intentionally does not return any errors. +*/ +package spew diff --git a/vendor/github.com/davecgh/go-spew/spew/dump.go b/vendor/github.com/davecgh/go-spew/spew/dump.go new file mode 100644 index 000000000..df1d582a7 --- /dev/null +++ b/vendor/github.com/davecgh/go-spew/spew/dump.go @@ -0,0 +1,509 @@ +/* + * Copyright (c) 2013-2016 Dave Collins + * + * Permission to use, copy, modify, and distribute this software for any + * purpose with or without fee is hereby granted, provided that the above + * copyright notice and this permission notice appear in all copies. + * + * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES + * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF + * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR + * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES + * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN + * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF + * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. + */ + +package spew + +import ( + "bytes" + "encoding/hex" + "fmt" + "io" + "os" + "reflect" + "regexp" + "strconv" + "strings" +) + +var ( + // uint8Type is a reflect.Type representing a uint8. It is used to + // convert cgo types to uint8 slices for hexdumping. + uint8Type = reflect.TypeOf(uint8(0)) + + // cCharRE is a regular expression that matches a cgo char. + // It is used to detect character arrays to hexdump them. + cCharRE = regexp.MustCompile("^.*\\._Ctype_char$") + + // cUnsignedCharRE is a regular expression that matches a cgo unsigned + // char. It is used to detect unsigned character arrays to hexdump + // them. + cUnsignedCharRE = regexp.MustCompile("^.*\\._Ctype_unsignedchar$") + + // cUint8tCharRE is a regular expression that matches a cgo uint8_t. + // It is used to detect uint8_t arrays to hexdump them. + cUint8tCharRE = regexp.MustCompile("^.*\\._Ctype_uint8_t$") +) + +// dumpState contains information about the state of a dump operation. +type dumpState struct { + w io.Writer + depth int + pointers map[uintptr]int + ignoreNextType bool + ignoreNextIndent bool + cs *ConfigState +} + +// indent performs indentation according to the depth level and cs.Indent +// option. +func (d *dumpState) indent() { + if d.ignoreNextIndent { + d.ignoreNextIndent = false + return + } + d.w.Write(bytes.Repeat([]byte(d.cs.Indent), d.depth)) +} + +// unpackValue returns values inside of non-nil interfaces when possible. +// This is useful for data types like structs, arrays, slices, and maps which +// can contain varying types packed inside an interface. +func (d *dumpState) unpackValue(v reflect.Value) reflect.Value { + if v.Kind() == reflect.Interface && !v.IsNil() { + v = v.Elem() + } + return v +} + +// dumpPtr handles formatting of pointers by indirecting them as necessary. +func (d *dumpState) dumpPtr(v reflect.Value) { + // Remove pointers at or below the current depth from map used to detect + // circular refs. + for k, depth := range d.pointers { + if depth >= d.depth { + delete(d.pointers, k) + } + } + + // Keep list of all dereferenced pointers to show later. + pointerChain := make([]uintptr, 0) + + // Figure out how many levels of indirection there are by dereferencing + // pointers and unpacking interfaces down the chain while detecting circular + // references. + nilFound := false + cycleFound := false + indirects := 0 + ve := v + for ve.Kind() == reflect.Ptr { + if ve.IsNil() { + nilFound = true + break + } + indirects++ + addr := ve.Pointer() + pointerChain = append(pointerChain, addr) + if pd, ok := d.pointers[addr]; ok && pd < d.depth { + cycleFound = true + indirects-- + break + } + d.pointers[addr] = d.depth + + ve = ve.Elem() + if ve.Kind() == reflect.Interface { + if ve.IsNil() { + nilFound = true + break + } + ve = ve.Elem() + } + } + + // Display type information. + d.w.Write(openParenBytes) + d.w.Write(bytes.Repeat(asteriskBytes, indirects)) + d.w.Write([]byte(ve.Type().String())) + d.w.Write(closeParenBytes) + + // Display pointer information. + if !d.cs.DisablePointerAddresses && len(pointerChain) > 0 { + d.w.Write(openParenBytes) + for i, addr := range pointerChain { + if i > 0 { + d.w.Write(pointerChainBytes) + } + printHexPtr(d.w, addr) + } + d.w.Write(closeParenBytes) + } + + // Display dereferenced value. + d.w.Write(openParenBytes) + switch { + case nilFound == true: + d.w.Write(nilAngleBytes) + + case cycleFound == true: + d.w.Write(circularBytes) + + default: + d.ignoreNextType = true + d.dump(ve) + } + d.w.Write(closeParenBytes) +} + +// dumpSlice handles formatting of arrays and slices. Byte (uint8 under +// reflection) arrays and slices are dumped in hexdump -C fashion. +func (d *dumpState) dumpSlice(v reflect.Value) { + // Determine whether this type should be hex dumped or not. Also, + // for types which should be hexdumped, try to use the underlying data + // first, then fall back to trying to convert them to a uint8 slice. + var buf []uint8 + doConvert := false + doHexDump := false + numEntries := v.Len() + if numEntries > 0 { + vt := v.Index(0).Type() + vts := vt.String() + switch { + // C types that need to be converted. + case cCharRE.MatchString(vts): + fallthrough + case cUnsignedCharRE.MatchString(vts): + fallthrough + case cUint8tCharRE.MatchString(vts): + doConvert = true + + // Try to use existing uint8 slices and fall back to converting + // and copying if that fails. + case vt.Kind() == reflect.Uint8: + // We need an addressable interface to convert the type + // to a byte slice. However, the reflect package won't + // give us an interface on certain things like + // unexported struct fields in order to enforce + // visibility rules. We use unsafe, when available, to + // bypass these restrictions since this package does not + // mutate the values. + vs := v + if !vs.CanInterface() || !vs.CanAddr() { + vs = unsafeReflectValue(vs) + } + if !UnsafeDisabled { + vs = vs.Slice(0, numEntries) + + // Use the existing uint8 slice if it can be + // type asserted. + iface := vs.Interface() + if slice, ok := iface.([]uint8); ok { + buf = slice + doHexDump = true + break + } + } + + // The underlying data needs to be converted if it can't + // be type asserted to a uint8 slice. + doConvert = true + } + + // Copy and convert the underlying type if needed. + if doConvert && vt.ConvertibleTo(uint8Type) { + // Convert and copy each element into a uint8 byte + // slice. + buf = make([]uint8, numEntries) + for i := 0; i < numEntries; i++ { + vv := v.Index(i) + buf[i] = uint8(vv.Convert(uint8Type).Uint()) + } + doHexDump = true + } + } + + // Hexdump the entire slice as needed. + if doHexDump { + indent := strings.Repeat(d.cs.Indent, d.depth) + str := indent + hex.Dump(buf) + str = strings.Replace(str, "\n", "\n"+indent, -1) + str = strings.TrimRight(str, d.cs.Indent) + d.w.Write([]byte(str)) + return + } + + // Recursively call dump for each item. + for i := 0; i < numEntries; i++ { + d.dump(d.unpackValue(v.Index(i))) + if i < (numEntries - 1) { + d.w.Write(commaNewlineBytes) + } else { + d.w.Write(newlineBytes) + } + } +} + +// dump is the main workhorse for dumping a value. It uses the passed reflect +// value to figure out what kind of object we are dealing with and formats it +// appropriately. It is a recursive function, however circular data structures +// are detected and handled properly. +func (d *dumpState) dump(v reflect.Value) { + // Handle invalid reflect values immediately. + kind := v.Kind() + if kind == reflect.Invalid { + d.w.Write(invalidAngleBytes) + return + } + + // Handle pointers specially. + if kind == reflect.Ptr { + d.indent() + d.dumpPtr(v) + return + } + + // Print type information unless already handled elsewhere. + if !d.ignoreNextType { + d.indent() + d.w.Write(openParenBytes) + d.w.Write([]byte(v.Type().String())) + d.w.Write(closeParenBytes) + d.w.Write(spaceBytes) + } + d.ignoreNextType = false + + // Display length and capacity if the built-in len and cap functions + // work with the value's kind and the len/cap itself is non-zero. + valueLen, valueCap := 0, 0 + switch v.Kind() { + case reflect.Array, reflect.Slice, reflect.Chan: + valueLen, valueCap = v.Len(), v.Cap() + case reflect.Map, reflect.String: + valueLen = v.Len() + } + if valueLen != 0 || !d.cs.DisableCapacities && valueCap != 0 { + d.w.Write(openParenBytes) + if valueLen != 0 { + d.w.Write(lenEqualsBytes) + printInt(d.w, int64(valueLen), 10) + } + if !d.cs.DisableCapacities && valueCap != 0 { + if valueLen != 0 { + d.w.Write(spaceBytes) + } + d.w.Write(capEqualsBytes) + printInt(d.w, int64(valueCap), 10) + } + d.w.Write(closeParenBytes) + d.w.Write(spaceBytes) + } + + // Call Stringer/error interfaces if they exist and the handle methods flag + // is enabled + if !d.cs.DisableMethods { + if (kind != reflect.Invalid) && (kind != reflect.Interface) { + if handled := handleMethods(d.cs, d.w, v); handled { + return + } + } + } + + switch kind { + case reflect.Invalid: + // Do nothing. We should never get here since invalid has already + // been handled above. + + case reflect.Bool: + printBool(d.w, v.Bool()) + + case reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64, reflect.Int: + printInt(d.w, v.Int(), 10) + + case reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64, reflect.Uint: + printUint(d.w, v.Uint(), 10) + + case reflect.Float32: + printFloat(d.w, v.Float(), 32) + + case reflect.Float64: + printFloat(d.w, v.Float(), 64) + + case reflect.Complex64: + printComplex(d.w, v.Complex(), 32) + + case reflect.Complex128: + printComplex(d.w, v.Complex(), 64) + + case reflect.Slice: + if v.IsNil() { + d.w.Write(nilAngleBytes) + break + } + fallthrough + + case reflect.Array: + d.w.Write(openBraceNewlineBytes) + d.depth++ + if (d.cs.MaxDepth != 0) && (d.depth > d.cs.MaxDepth) { + d.indent() + d.w.Write(maxNewlineBytes) + } else { + d.dumpSlice(v) + } + d.depth-- + d.indent() + d.w.Write(closeBraceBytes) + + case reflect.String: + d.w.Write([]byte(strconv.Quote(v.String()))) + + case reflect.Interface: + // The only time we should get here is for nil interfaces due to + // unpackValue calls. + if v.IsNil() { + d.w.Write(nilAngleBytes) + } + + case reflect.Ptr: + // Do nothing. We should never get here since pointers have already + // been handled above. + + case reflect.Map: + // nil maps should be indicated as different than empty maps + if v.IsNil() { + d.w.Write(nilAngleBytes) + break + } + + d.w.Write(openBraceNewlineBytes) + d.depth++ + if (d.cs.MaxDepth != 0) && (d.depth > d.cs.MaxDepth) { + d.indent() + d.w.Write(maxNewlineBytes) + } else { + numEntries := v.Len() + keys := v.MapKeys() + if d.cs.SortKeys { + sortValues(keys, d.cs) + } + for i, key := range keys { + d.dump(d.unpackValue(key)) + d.w.Write(colonSpaceBytes) + d.ignoreNextIndent = true + d.dump(d.unpackValue(v.MapIndex(key))) + if i < (numEntries - 1) { + d.w.Write(commaNewlineBytes) + } else { + d.w.Write(newlineBytes) + } + } + } + d.depth-- + d.indent() + d.w.Write(closeBraceBytes) + + case reflect.Struct: + d.w.Write(openBraceNewlineBytes) + d.depth++ + if (d.cs.MaxDepth != 0) && (d.depth > d.cs.MaxDepth) { + d.indent() + d.w.Write(maxNewlineBytes) + } else { + vt := v.Type() + numFields := v.NumField() + for i := 0; i < numFields; i++ { + d.indent() + vtf := vt.Field(i) + d.w.Write([]byte(vtf.Name)) + d.w.Write(colonSpaceBytes) + d.ignoreNextIndent = true + d.dump(d.unpackValue(v.Field(i))) + if i < (numFields - 1) { + d.w.Write(commaNewlineBytes) + } else { + d.w.Write(newlineBytes) + } + } + } + d.depth-- + d.indent() + d.w.Write(closeBraceBytes) + + case reflect.Uintptr: + printHexPtr(d.w, uintptr(v.Uint())) + + case reflect.UnsafePointer, reflect.Chan, reflect.Func: + printHexPtr(d.w, v.Pointer()) + + // There were not any other types at the time this code was written, but + // fall back to letting the default fmt package handle it in case any new + // types are added. + default: + if v.CanInterface() { + fmt.Fprintf(d.w, "%v", v.Interface()) + } else { + fmt.Fprintf(d.w, "%v", v.String()) + } + } +} + +// fdump is a helper function to consolidate the logic from the various public +// methods which take varying writers and config states. +func fdump(cs *ConfigState, w io.Writer, a ...interface{}) { + for _, arg := range a { + if arg == nil { + w.Write(interfaceBytes) + w.Write(spaceBytes) + w.Write(nilAngleBytes) + w.Write(newlineBytes) + continue + } + + d := dumpState{w: w, cs: cs} + d.pointers = make(map[uintptr]int) + d.dump(reflect.ValueOf(arg)) + d.w.Write(newlineBytes) + } +} + +// Fdump formats and displays the passed arguments to io.Writer w. It formats +// exactly the same as Dump. +func Fdump(w io.Writer, a ...interface{}) { + fdump(&Config, w, a...) +} + +// Sdump returns a string with the passed arguments formatted exactly the same +// as Dump. +func Sdump(a ...interface{}) string { + var buf bytes.Buffer + fdump(&Config, &buf, a...) + return buf.String() +} + +/* +Dump displays the passed parameters to standard out with newlines, customizable +indentation, and additional debug information such as complete types and all +pointer addresses used to indirect to the final value. It provides the +following features over the built-in printing facilities provided by the fmt +package: + + * Pointers are dereferenced and followed + * Circular data structures are detected and handled properly + * Custom Stringer/error interfaces are optionally invoked, including + on unexported types + * Custom types which only implement the Stringer/error interfaces via + a pointer receiver are optionally invoked when passing non-pointer + variables + * Byte arrays and slices are dumped like the hexdump -C command which + includes offsets, byte values in hex, and ASCII output + +The configuration options are controlled by an exported package global, +spew.Config. See ConfigState for options documentation. + +See Fdump if you would prefer dumping to an arbitrary io.Writer or Sdump to +get the formatted result as a string. +*/ +func Dump(a ...interface{}) { + fdump(&Config, os.Stdout, a...) +} diff --git a/vendor/github.com/davecgh/go-spew/spew/dump_test.go b/vendor/github.com/davecgh/go-spew/spew/dump_test.go new file mode 100644 index 000000000..5aad9c7af --- /dev/null +++ b/vendor/github.com/davecgh/go-spew/spew/dump_test.go @@ -0,0 +1,1042 @@ +/* + * Copyright (c) 2013-2016 Dave Collins + * + * Permission to use, copy, modify, and distribute this software for any + * purpose with or without fee is hereby granted, provided that the above + * copyright notice and this permission notice appear in all copies. + * + * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES + * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF + * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR + * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES + * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN + * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF + * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. + */ + +/* +Test Summary: +NOTE: For each test, a nil pointer, a single pointer and double pointer to the +base test element are also tested to ensure proper indirection across all types. + +- Max int8, int16, int32, int64, int +- Max uint8, uint16, uint32, uint64, uint +- Boolean true and false +- Standard complex64 and complex128 +- Array containing standard ints +- Array containing type with custom formatter on pointer receiver only +- Array containing interfaces +- Array containing bytes +- Slice containing standard float32 values +- Slice containing type with custom formatter on pointer receiver only +- Slice containing interfaces +- Slice containing bytes +- Nil slice +- Standard string +- Nil interface +- Sub-interface +- Map with string keys and int vals +- Map with custom formatter type on pointer receiver only keys and vals +- Map with interface keys and values +- Map with nil interface value +- Struct with primitives +- Struct that contains another struct +- Struct that contains custom type with Stringer pointer interface via both + exported and unexported fields +- Struct that contains embedded struct and field to same struct +- Uintptr to 0 (null pointer) +- Uintptr address of real variable +- Unsafe.Pointer to 0 (null pointer) +- Unsafe.Pointer to address of real variable +- Nil channel +- Standard int channel +- Function with no params and no returns +- Function with param and no returns +- Function with multiple params and multiple returns +- Struct that is circular through self referencing +- Structs that are circular through cross referencing +- Structs that are indirectly circular +- Type that panics in its Stringer interface +*/ + +package spew_test + +import ( + "bytes" + "fmt" + "testing" + "unsafe" + + "github.com/davecgh/go-spew/spew" +) + +// dumpTest is used to describe a test to be performed against the Dump method. +type dumpTest struct { + in interface{} + wants []string +} + +// dumpTests houses all of the tests to be performed against the Dump method. +var dumpTests = make([]dumpTest, 0) + +// addDumpTest is a helper method to append the passed input and desired result +// to dumpTests +func addDumpTest(in interface{}, wants ...string) { + test := dumpTest{in, wants} + dumpTests = append(dumpTests, test) +} + +func addIntDumpTests() { + // Max int8. + v := int8(127) + nv := (*int8)(nil) + pv := &v + vAddr := fmt.Sprintf("%p", pv) + pvAddr := fmt.Sprintf("%p", &pv) + vt := "int8" + vs := "127" + addDumpTest(v, "("+vt+") "+vs+"\n") + addDumpTest(pv, "(*"+vt+")("+vAddr+")("+vs+")\n") + addDumpTest(&pv, "(**"+vt+")("+pvAddr+"->"+vAddr+")("+vs+")\n") + addDumpTest(nv, "(*"+vt+")()\n") + + // Max int16. + v2 := int16(32767) + nv2 := (*int16)(nil) + pv2 := &v2 + v2Addr := fmt.Sprintf("%p", pv2) + pv2Addr := fmt.Sprintf("%p", &pv2) + v2t := "int16" + v2s := "32767" + addDumpTest(v2, "("+v2t+") "+v2s+"\n") + addDumpTest(pv2, "(*"+v2t+")("+v2Addr+")("+v2s+")\n") + addDumpTest(&pv2, "(**"+v2t+")("+pv2Addr+"->"+v2Addr+")("+v2s+")\n") + addDumpTest(nv2, "(*"+v2t+")()\n") + + // Max int32. + v3 := int32(2147483647) + nv3 := (*int32)(nil) + pv3 := &v3 + v3Addr := fmt.Sprintf("%p", pv3) + pv3Addr := fmt.Sprintf("%p", &pv3) + v3t := "int32" + v3s := "2147483647" + addDumpTest(v3, "("+v3t+") "+v3s+"\n") + addDumpTest(pv3, "(*"+v3t+")("+v3Addr+")("+v3s+")\n") + addDumpTest(&pv3, "(**"+v3t+")("+pv3Addr+"->"+v3Addr+")("+v3s+")\n") + addDumpTest(nv3, "(*"+v3t+")()\n") + + // Max int64. + v4 := int64(9223372036854775807) + nv4 := (*int64)(nil) + pv4 := &v4 + v4Addr := fmt.Sprintf("%p", pv4) + pv4Addr := fmt.Sprintf("%p", &pv4) + v4t := "int64" + v4s := "9223372036854775807" + addDumpTest(v4, "("+v4t+") "+v4s+"\n") + addDumpTest(pv4, "(*"+v4t+")("+v4Addr+")("+v4s+")\n") + addDumpTest(&pv4, "(**"+v4t+")("+pv4Addr+"->"+v4Addr+")("+v4s+")\n") + addDumpTest(nv4, "(*"+v4t+")()\n") + + // Max int. + v5 := int(2147483647) + nv5 := (*int)(nil) + pv5 := &v5 + v5Addr := fmt.Sprintf("%p", pv5) + pv5Addr := fmt.Sprintf("%p", &pv5) + v5t := "int" + v5s := "2147483647" + addDumpTest(v5, "("+v5t+") "+v5s+"\n") + addDumpTest(pv5, "(*"+v5t+")("+v5Addr+")("+v5s+")\n") + addDumpTest(&pv5, "(**"+v5t+")("+pv5Addr+"->"+v5Addr+")("+v5s+")\n") + addDumpTest(nv5, "(*"+v5t+")()\n") +} + +func addUintDumpTests() { + // Max uint8. + v := uint8(255) + nv := (*uint8)(nil) + pv := &v + vAddr := fmt.Sprintf("%p", pv) + pvAddr := fmt.Sprintf("%p", &pv) + vt := "uint8" + vs := "255" + addDumpTest(v, "("+vt+") "+vs+"\n") + addDumpTest(pv, "(*"+vt+")("+vAddr+")("+vs+")\n") + addDumpTest(&pv, "(**"+vt+")("+pvAddr+"->"+vAddr+")("+vs+")\n") + addDumpTest(nv, "(*"+vt+")()\n") + + // Max uint16. + v2 := uint16(65535) + nv2 := (*uint16)(nil) + pv2 := &v2 + v2Addr := fmt.Sprintf("%p", pv2) + pv2Addr := fmt.Sprintf("%p", &pv2) + v2t := "uint16" + v2s := "65535" + addDumpTest(v2, "("+v2t+") "+v2s+"\n") + addDumpTest(pv2, "(*"+v2t+")("+v2Addr+")("+v2s+")\n") + addDumpTest(&pv2, "(**"+v2t+")("+pv2Addr+"->"+v2Addr+")("+v2s+")\n") + addDumpTest(nv2, "(*"+v2t+")()\n") + + // Max uint32. + v3 := uint32(4294967295) + nv3 := (*uint32)(nil) + pv3 := &v3 + v3Addr := fmt.Sprintf("%p", pv3) + pv3Addr := fmt.Sprintf("%p", &pv3) + v3t := "uint32" + v3s := "4294967295" + addDumpTest(v3, "("+v3t+") "+v3s+"\n") + addDumpTest(pv3, "(*"+v3t+")("+v3Addr+")("+v3s+")\n") + addDumpTest(&pv3, "(**"+v3t+")("+pv3Addr+"->"+v3Addr+")("+v3s+")\n") + addDumpTest(nv3, "(*"+v3t+")()\n") + + // Max uint64. + v4 := uint64(18446744073709551615) + nv4 := (*uint64)(nil) + pv4 := &v4 + v4Addr := fmt.Sprintf("%p", pv4) + pv4Addr := fmt.Sprintf("%p", &pv4) + v4t := "uint64" + v4s := "18446744073709551615" + addDumpTest(v4, "("+v4t+") "+v4s+"\n") + addDumpTest(pv4, "(*"+v4t+")("+v4Addr+")("+v4s+")\n") + addDumpTest(&pv4, "(**"+v4t+")("+pv4Addr+"->"+v4Addr+")("+v4s+")\n") + addDumpTest(nv4, "(*"+v4t+")()\n") + + // Max uint. + v5 := uint(4294967295) + nv5 := (*uint)(nil) + pv5 := &v5 + v5Addr := fmt.Sprintf("%p", pv5) + pv5Addr := fmt.Sprintf("%p", &pv5) + v5t := "uint" + v5s := "4294967295" + addDumpTest(v5, "("+v5t+") "+v5s+"\n") + addDumpTest(pv5, "(*"+v5t+")("+v5Addr+")("+v5s+")\n") + addDumpTest(&pv5, "(**"+v5t+")("+pv5Addr+"->"+v5Addr+")("+v5s+")\n") + addDumpTest(nv5, "(*"+v5t+")()\n") +} + +func addBoolDumpTests() { + // Boolean true. + v := bool(true) + nv := (*bool)(nil) + pv := &v + vAddr := fmt.Sprintf("%p", pv) + pvAddr := fmt.Sprintf("%p", &pv) + vt := "bool" + vs := "true" + addDumpTest(v, "("+vt+") "+vs+"\n") + addDumpTest(pv, "(*"+vt+")("+vAddr+")("+vs+")\n") + addDumpTest(&pv, "(**"+vt+")("+pvAddr+"->"+vAddr+")("+vs+")\n") + addDumpTest(nv, "(*"+vt+")()\n") + + // Boolean false. + v2 := bool(false) + pv2 := &v2 + v2Addr := fmt.Sprintf("%p", pv2) + pv2Addr := fmt.Sprintf("%p", &pv2) + v2t := "bool" + v2s := "false" + addDumpTest(v2, "("+v2t+") "+v2s+"\n") + addDumpTest(pv2, "(*"+v2t+")("+v2Addr+")("+v2s+")\n") + addDumpTest(&pv2, "(**"+v2t+")("+pv2Addr+"->"+v2Addr+")("+v2s+")\n") +} + +func addFloatDumpTests() { + // Standard float32. + v := float32(3.1415) + nv := (*float32)(nil) + pv := &v + vAddr := fmt.Sprintf("%p", pv) + pvAddr := fmt.Sprintf("%p", &pv) + vt := "float32" + vs := "3.1415" + addDumpTest(v, "("+vt+") "+vs+"\n") + addDumpTest(pv, "(*"+vt+")("+vAddr+")("+vs+")\n") + addDumpTest(&pv, "(**"+vt+")("+pvAddr+"->"+vAddr+")("+vs+")\n") + addDumpTest(nv, "(*"+vt+")()\n") + + // Standard float64. + v2 := float64(3.1415926) + nv2 := (*float64)(nil) + pv2 := &v2 + v2Addr := fmt.Sprintf("%p", pv2) + pv2Addr := fmt.Sprintf("%p", &pv2) + v2t := "float64" + v2s := "3.1415926" + addDumpTest(v2, "("+v2t+") "+v2s+"\n") + addDumpTest(pv2, "(*"+v2t+")("+v2Addr+")("+v2s+")\n") + addDumpTest(&pv2, "(**"+v2t+")("+pv2Addr+"->"+v2Addr+")("+v2s+")\n") + addDumpTest(nv2, "(*"+v2t+")()\n") +} + +func addComplexDumpTests() { + // Standard complex64. + v := complex(float32(6), -2) + nv := (*complex64)(nil) + pv := &v + vAddr := fmt.Sprintf("%p", pv) + pvAddr := fmt.Sprintf("%p", &pv) + vt := "complex64" + vs := "(6-2i)" + addDumpTest(v, "("+vt+") "+vs+"\n") + addDumpTest(pv, "(*"+vt+")("+vAddr+")("+vs+")\n") + addDumpTest(&pv, "(**"+vt+")("+pvAddr+"->"+vAddr+")("+vs+")\n") + addDumpTest(nv, "(*"+vt+")()\n") + + // Standard complex128. + v2 := complex(float64(-6), 2) + nv2 := (*complex128)(nil) + pv2 := &v2 + v2Addr := fmt.Sprintf("%p", pv2) + pv2Addr := fmt.Sprintf("%p", &pv2) + v2t := "complex128" + v2s := "(-6+2i)" + addDumpTest(v2, "("+v2t+") "+v2s+"\n") + addDumpTest(pv2, "(*"+v2t+")("+v2Addr+")("+v2s+")\n") + addDumpTest(&pv2, "(**"+v2t+")("+pv2Addr+"->"+v2Addr+")("+v2s+")\n") + addDumpTest(nv2, "(*"+v2t+")()\n") +} + +func addArrayDumpTests() { + // Array containing standard ints. + v := [3]int{1, 2, 3} + vLen := fmt.Sprintf("%d", len(v)) + vCap := fmt.Sprintf("%d", cap(v)) + nv := (*[3]int)(nil) + pv := &v + vAddr := fmt.Sprintf("%p", pv) + pvAddr := fmt.Sprintf("%p", &pv) + vt := "int" + vs := "(len=" + vLen + " cap=" + vCap + ") {\n (" + vt + ") 1,\n (" + + vt + ") 2,\n (" + vt + ") 3\n}" + addDumpTest(v, "([3]"+vt+") "+vs+"\n") + addDumpTest(pv, "(*[3]"+vt+")("+vAddr+")("+vs+")\n") + addDumpTest(&pv, "(**[3]"+vt+")("+pvAddr+"->"+vAddr+")("+vs+")\n") + addDumpTest(nv, "(*[3]"+vt+")()\n") + + // Array containing type with custom formatter on pointer receiver only. + v2i0 := pstringer("1") + v2i1 := pstringer("2") + v2i2 := pstringer("3") + v2 := [3]pstringer{v2i0, v2i1, v2i2} + v2i0Len := fmt.Sprintf("%d", len(v2i0)) + v2i1Len := fmt.Sprintf("%d", len(v2i1)) + v2i2Len := fmt.Sprintf("%d", len(v2i2)) + v2Len := fmt.Sprintf("%d", len(v2)) + v2Cap := fmt.Sprintf("%d", cap(v2)) + nv2 := (*[3]pstringer)(nil) + pv2 := &v2 + v2Addr := fmt.Sprintf("%p", pv2) + pv2Addr := fmt.Sprintf("%p", &pv2) + v2t := "spew_test.pstringer" + v2sp := "(len=" + v2Len + " cap=" + v2Cap + ") {\n (" + v2t + + ") (len=" + v2i0Len + ") stringer 1,\n (" + v2t + + ") (len=" + v2i1Len + ") stringer 2,\n (" + v2t + + ") (len=" + v2i2Len + ") " + "stringer 3\n}" + v2s := v2sp + if spew.UnsafeDisabled { + v2s = "(len=" + v2Len + " cap=" + v2Cap + ") {\n (" + v2t + + ") (len=" + v2i0Len + ") \"1\",\n (" + v2t + ") (len=" + + v2i1Len + ") \"2\",\n (" + v2t + ") (len=" + v2i2Len + + ") " + "\"3\"\n}" + } + addDumpTest(v2, "([3]"+v2t+") "+v2s+"\n") + addDumpTest(pv2, "(*[3]"+v2t+")("+v2Addr+")("+v2sp+")\n") + addDumpTest(&pv2, "(**[3]"+v2t+")("+pv2Addr+"->"+v2Addr+")("+v2sp+")\n") + addDumpTest(nv2, "(*[3]"+v2t+")()\n") + + // Array containing interfaces. + v3i0 := "one" + v3 := [3]interface{}{v3i0, int(2), uint(3)} + v3i0Len := fmt.Sprintf("%d", len(v3i0)) + v3Len := fmt.Sprintf("%d", len(v3)) + v3Cap := fmt.Sprintf("%d", cap(v3)) + nv3 := (*[3]interface{})(nil) + pv3 := &v3 + v3Addr := fmt.Sprintf("%p", pv3) + pv3Addr := fmt.Sprintf("%p", &pv3) + v3t := "[3]interface {}" + v3t2 := "string" + v3t3 := "int" + v3t4 := "uint" + v3s := "(len=" + v3Len + " cap=" + v3Cap + ") {\n (" + v3t2 + ") " + + "(len=" + v3i0Len + ") \"one\",\n (" + v3t3 + ") 2,\n (" + + v3t4 + ") 3\n}" + addDumpTest(v3, "("+v3t+") "+v3s+"\n") + addDumpTest(pv3, "(*"+v3t+")("+v3Addr+")("+v3s+")\n") + addDumpTest(&pv3, "(**"+v3t+")("+pv3Addr+"->"+v3Addr+")("+v3s+")\n") + addDumpTest(nv3, "(*"+v3t+")()\n") + + // Array containing bytes. + v4 := [34]byte{ + 0x11, 0x12, 0x13, 0x14, 0x15, 0x16, 0x17, 0x18, + 0x19, 0x1a, 0x1b, 0x1c, 0x1d, 0x1e, 0x1f, 0x20, + 0x21, 0x22, 0x23, 0x24, 0x25, 0x26, 0x27, 0x28, + 0x29, 0x2a, 0x2b, 0x2c, 0x2d, 0x2e, 0x2f, 0x30, + 0x31, 0x32, + } + v4Len := fmt.Sprintf("%d", len(v4)) + v4Cap := fmt.Sprintf("%d", cap(v4)) + nv4 := (*[34]byte)(nil) + pv4 := &v4 + v4Addr := fmt.Sprintf("%p", pv4) + pv4Addr := fmt.Sprintf("%p", &pv4) + v4t := "[34]uint8" + v4s := "(len=" + v4Len + " cap=" + v4Cap + ") " + + "{\n 00000000 11 12 13 14 15 16 17 18 19 1a 1b 1c 1d 1e 1f 20" + + " |............... |\n" + + " 00000010 21 22 23 24 25 26 27 28 29 2a 2b 2c 2d 2e 2f 30" + + " |!\"#$%&'()*+,-./0|\n" + + " 00000020 31 32 " + + " |12|\n}" + addDumpTest(v4, "("+v4t+") "+v4s+"\n") + addDumpTest(pv4, "(*"+v4t+")("+v4Addr+")("+v4s+")\n") + addDumpTest(&pv4, "(**"+v4t+")("+pv4Addr+"->"+v4Addr+")("+v4s+")\n") + addDumpTest(nv4, "(*"+v4t+")()\n") +} + +func addSliceDumpTests() { + // Slice containing standard float32 values. + v := []float32{3.14, 6.28, 12.56} + vLen := fmt.Sprintf("%d", len(v)) + vCap := fmt.Sprintf("%d", cap(v)) + nv := (*[]float32)(nil) + pv := &v + vAddr := fmt.Sprintf("%p", pv) + pvAddr := fmt.Sprintf("%p", &pv) + vt := "float32" + vs := "(len=" + vLen + " cap=" + vCap + ") {\n (" + vt + ") 3.14,\n (" + + vt + ") 6.28,\n (" + vt + ") 12.56\n}" + addDumpTest(v, "([]"+vt+") "+vs+"\n") + addDumpTest(pv, "(*[]"+vt+")("+vAddr+")("+vs+")\n") + addDumpTest(&pv, "(**[]"+vt+")("+pvAddr+"->"+vAddr+")("+vs+")\n") + addDumpTest(nv, "(*[]"+vt+")()\n") + + // Slice containing type with custom formatter on pointer receiver only. + v2i0 := pstringer("1") + v2i1 := pstringer("2") + v2i2 := pstringer("3") + v2 := []pstringer{v2i0, v2i1, v2i2} + v2i0Len := fmt.Sprintf("%d", len(v2i0)) + v2i1Len := fmt.Sprintf("%d", len(v2i1)) + v2i2Len := fmt.Sprintf("%d", len(v2i2)) + v2Len := fmt.Sprintf("%d", len(v2)) + v2Cap := fmt.Sprintf("%d", cap(v2)) + nv2 := (*[]pstringer)(nil) + pv2 := &v2 + v2Addr := fmt.Sprintf("%p", pv2) + pv2Addr := fmt.Sprintf("%p", &pv2) + v2t := "spew_test.pstringer" + v2s := "(len=" + v2Len + " cap=" + v2Cap + ") {\n (" + v2t + ") (len=" + + v2i0Len + ") stringer 1,\n (" + v2t + ") (len=" + v2i1Len + + ") stringer 2,\n (" + v2t + ") (len=" + v2i2Len + ") " + + "stringer 3\n}" + addDumpTest(v2, "([]"+v2t+") "+v2s+"\n") + addDumpTest(pv2, "(*[]"+v2t+")("+v2Addr+")("+v2s+")\n") + addDumpTest(&pv2, "(**[]"+v2t+")("+pv2Addr+"->"+v2Addr+")("+v2s+")\n") + addDumpTest(nv2, "(*[]"+v2t+")()\n") + + // Slice containing interfaces. + v3i0 := "one" + v3 := []interface{}{v3i0, int(2), uint(3), nil} + v3i0Len := fmt.Sprintf("%d", len(v3i0)) + v3Len := fmt.Sprintf("%d", len(v3)) + v3Cap := fmt.Sprintf("%d", cap(v3)) + nv3 := (*[]interface{})(nil) + pv3 := &v3 + v3Addr := fmt.Sprintf("%p", pv3) + pv3Addr := fmt.Sprintf("%p", &pv3) + v3t := "[]interface {}" + v3t2 := "string" + v3t3 := "int" + v3t4 := "uint" + v3t5 := "interface {}" + v3s := "(len=" + v3Len + " cap=" + v3Cap + ") {\n (" + v3t2 + ") " + + "(len=" + v3i0Len + ") \"one\",\n (" + v3t3 + ") 2,\n (" + + v3t4 + ") 3,\n (" + v3t5 + ") \n}" + addDumpTest(v3, "("+v3t+") "+v3s+"\n") + addDumpTest(pv3, "(*"+v3t+")("+v3Addr+")("+v3s+")\n") + addDumpTest(&pv3, "(**"+v3t+")("+pv3Addr+"->"+v3Addr+")("+v3s+")\n") + addDumpTest(nv3, "(*"+v3t+")()\n") + + // Slice containing bytes. + v4 := []byte{ + 0x11, 0x12, 0x13, 0x14, 0x15, 0x16, 0x17, 0x18, + 0x19, 0x1a, 0x1b, 0x1c, 0x1d, 0x1e, 0x1f, 0x20, + 0x21, 0x22, 0x23, 0x24, 0x25, 0x26, 0x27, 0x28, + 0x29, 0x2a, 0x2b, 0x2c, 0x2d, 0x2e, 0x2f, 0x30, + 0x31, 0x32, + } + v4Len := fmt.Sprintf("%d", len(v4)) + v4Cap := fmt.Sprintf("%d", cap(v4)) + nv4 := (*[]byte)(nil) + pv4 := &v4 + v4Addr := fmt.Sprintf("%p", pv4) + pv4Addr := fmt.Sprintf("%p", &pv4) + v4t := "[]uint8" + v4s := "(len=" + v4Len + " cap=" + v4Cap + ") " + + "{\n 00000000 11 12 13 14 15 16 17 18 19 1a 1b 1c 1d 1e 1f 20" + + " |............... |\n" + + " 00000010 21 22 23 24 25 26 27 28 29 2a 2b 2c 2d 2e 2f 30" + + " |!\"#$%&'()*+,-./0|\n" + + " 00000020 31 32 " + + " |12|\n}" + addDumpTest(v4, "("+v4t+") "+v4s+"\n") + addDumpTest(pv4, "(*"+v4t+")("+v4Addr+")("+v4s+")\n") + addDumpTest(&pv4, "(**"+v4t+")("+pv4Addr+"->"+v4Addr+")("+v4s+")\n") + addDumpTest(nv4, "(*"+v4t+")()\n") + + // Nil slice. + v5 := []int(nil) + nv5 := (*[]int)(nil) + pv5 := &v5 + v5Addr := fmt.Sprintf("%p", pv5) + pv5Addr := fmt.Sprintf("%p", &pv5) + v5t := "[]int" + v5s := "" + addDumpTest(v5, "("+v5t+") "+v5s+"\n") + addDumpTest(pv5, "(*"+v5t+")("+v5Addr+")("+v5s+")\n") + addDumpTest(&pv5, "(**"+v5t+")("+pv5Addr+"->"+v5Addr+")("+v5s+")\n") + addDumpTest(nv5, "(*"+v5t+")()\n") +} + +func addStringDumpTests() { + // Standard string. + v := "test" + vLen := fmt.Sprintf("%d", len(v)) + nv := (*string)(nil) + pv := &v + vAddr := fmt.Sprintf("%p", pv) + pvAddr := fmt.Sprintf("%p", &pv) + vt := "string" + vs := "(len=" + vLen + ") \"test\"" + addDumpTest(v, "("+vt+") "+vs+"\n") + addDumpTest(pv, "(*"+vt+")("+vAddr+")("+vs+")\n") + addDumpTest(&pv, "(**"+vt+")("+pvAddr+"->"+vAddr+")("+vs+")\n") + addDumpTest(nv, "(*"+vt+")()\n") +} + +func addInterfaceDumpTests() { + // Nil interface. + var v interface{} + nv := (*interface{})(nil) + pv := &v + vAddr := fmt.Sprintf("%p", pv) + pvAddr := fmt.Sprintf("%p", &pv) + vt := "interface {}" + vs := "" + addDumpTest(v, "("+vt+") "+vs+"\n") + addDumpTest(pv, "(*"+vt+")("+vAddr+")("+vs+")\n") + addDumpTest(&pv, "(**"+vt+")("+pvAddr+"->"+vAddr+")("+vs+")\n") + addDumpTest(nv, "(*"+vt+")()\n") + + // Sub-interface. + v2 := interface{}(uint16(65535)) + pv2 := &v2 + v2Addr := fmt.Sprintf("%p", pv2) + pv2Addr := fmt.Sprintf("%p", &pv2) + v2t := "uint16" + v2s := "65535" + addDumpTest(v2, "("+v2t+") "+v2s+"\n") + addDumpTest(pv2, "(*"+v2t+")("+v2Addr+")("+v2s+")\n") + addDumpTest(&pv2, "(**"+v2t+")("+pv2Addr+"->"+v2Addr+")("+v2s+")\n") +} + +func addMapDumpTests() { + // Map with string keys and int vals. + k := "one" + kk := "two" + m := map[string]int{k: 1, kk: 2} + klen := fmt.Sprintf("%d", len(k)) // not kLen to shut golint up + kkLen := fmt.Sprintf("%d", len(kk)) + mLen := fmt.Sprintf("%d", len(m)) + nilMap := map[string]int(nil) + nm := (*map[string]int)(nil) + pm := &m + mAddr := fmt.Sprintf("%p", pm) + pmAddr := fmt.Sprintf("%p", &pm) + mt := "map[string]int" + mt1 := "string" + mt2 := "int" + ms := "(len=" + mLen + ") {\n (" + mt1 + ") (len=" + klen + ") " + + "\"one\": (" + mt2 + ") 1,\n (" + mt1 + ") (len=" + kkLen + + ") \"two\": (" + mt2 + ") 2\n}" + ms2 := "(len=" + mLen + ") {\n (" + mt1 + ") (len=" + kkLen + ") " + + "\"two\": (" + mt2 + ") 2,\n (" + mt1 + ") (len=" + klen + + ") \"one\": (" + mt2 + ") 1\n}" + addDumpTest(m, "("+mt+") "+ms+"\n", "("+mt+") "+ms2+"\n") + addDumpTest(pm, "(*"+mt+")("+mAddr+")("+ms+")\n", + "(*"+mt+")("+mAddr+")("+ms2+")\n") + addDumpTest(&pm, "(**"+mt+")("+pmAddr+"->"+mAddr+")("+ms+")\n", + "(**"+mt+")("+pmAddr+"->"+mAddr+")("+ms2+")\n") + addDumpTest(nm, "(*"+mt+")()\n") + addDumpTest(nilMap, "("+mt+") \n") + + // Map with custom formatter type on pointer receiver only keys and vals. + k2 := pstringer("one") + v2 := pstringer("1") + m2 := map[pstringer]pstringer{k2: v2} + k2Len := fmt.Sprintf("%d", len(k2)) + v2Len := fmt.Sprintf("%d", len(v2)) + m2Len := fmt.Sprintf("%d", len(m2)) + nilMap2 := map[pstringer]pstringer(nil) + nm2 := (*map[pstringer]pstringer)(nil) + pm2 := &m2 + m2Addr := fmt.Sprintf("%p", pm2) + pm2Addr := fmt.Sprintf("%p", &pm2) + m2t := "map[spew_test.pstringer]spew_test.pstringer" + m2t1 := "spew_test.pstringer" + m2t2 := "spew_test.pstringer" + m2s := "(len=" + m2Len + ") {\n (" + m2t1 + ") (len=" + k2Len + ") " + + "stringer one: (" + m2t2 + ") (len=" + v2Len + ") stringer 1\n}" + if spew.UnsafeDisabled { + m2s = "(len=" + m2Len + ") {\n (" + m2t1 + ") (len=" + k2Len + + ") " + "\"one\": (" + m2t2 + ") (len=" + v2Len + + ") \"1\"\n}" + } + addDumpTest(m2, "("+m2t+") "+m2s+"\n") + addDumpTest(pm2, "(*"+m2t+")("+m2Addr+")("+m2s+")\n") + addDumpTest(&pm2, "(**"+m2t+")("+pm2Addr+"->"+m2Addr+")("+m2s+")\n") + addDumpTest(nm2, "(*"+m2t+")()\n") + addDumpTest(nilMap2, "("+m2t+") \n") + + // Map with interface keys and values. + k3 := "one" + k3Len := fmt.Sprintf("%d", len(k3)) + m3 := map[interface{}]interface{}{k3: 1} + m3Len := fmt.Sprintf("%d", len(m3)) + nilMap3 := map[interface{}]interface{}(nil) + nm3 := (*map[interface{}]interface{})(nil) + pm3 := &m3 + m3Addr := fmt.Sprintf("%p", pm3) + pm3Addr := fmt.Sprintf("%p", &pm3) + m3t := "map[interface {}]interface {}" + m3t1 := "string" + m3t2 := "int" + m3s := "(len=" + m3Len + ") {\n (" + m3t1 + ") (len=" + k3Len + ") " + + "\"one\": (" + m3t2 + ") 1\n}" + addDumpTest(m3, "("+m3t+") "+m3s+"\n") + addDumpTest(pm3, "(*"+m3t+")("+m3Addr+")("+m3s+")\n") + addDumpTest(&pm3, "(**"+m3t+")("+pm3Addr+"->"+m3Addr+")("+m3s+")\n") + addDumpTest(nm3, "(*"+m3t+")()\n") + addDumpTest(nilMap3, "("+m3t+") \n") + + // Map with nil interface value. + k4 := "nil" + k4Len := fmt.Sprintf("%d", len(k4)) + m4 := map[string]interface{}{k4: nil} + m4Len := fmt.Sprintf("%d", len(m4)) + nilMap4 := map[string]interface{}(nil) + nm4 := (*map[string]interface{})(nil) + pm4 := &m4 + m4Addr := fmt.Sprintf("%p", pm4) + pm4Addr := fmt.Sprintf("%p", &pm4) + m4t := "map[string]interface {}" + m4t1 := "string" + m4t2 := "interface {}" + m4s := "(len=" + m4Len + ") {\n (" + m4t1 + ") (len=" + k4Len + ")" + + " \"nil\": (" + m4t2 + ") \n}" + addDumpTest(m4, "("+m4t+") "+m4s+"\n") + addDumpTest(pm4, "(*"+m4t+")("+m4Addr+")("+m4s+")\n") + addDumpTest(&pm4, "(**"+m4t+")("+pm4Addr+"->"+m4Addr+")("+m4s+")\n") + addDumpTest(nm4, "(*"+m4t+")()\n") + addDumpTest(nilMap4, "("+m4t+") \n") +} + +func addStructDumpTests() { + // Struct with primitives. + type s1 struct { + a int8 + b uint8 + } + v := s1{127, 255} + nv := (*s1)(nil) + pv := &v + vAddr := fmt.Sprintf("%p", pv) + pvAddr := fmt.Sprintf("%p", &pv) + vt := "spew_test.s1" + vt2 := "int8" + vt3 := "uint8" + vs := "{\n a: (" + vt2 + ") 127,\n b: (" + vt3 + ") 255\n}" + addDumpTest(v, "("+vt+") "+vs+"\n") + addDumpTest(pv, "(*"+vt+")("+vAddr+")("+vs+")\n") + addDumpTest(&pv, "(**"+vt+")("+pvAddr+"->"+vAddr+")("+vs+")\n") + addDumpTest(nv, "(*"+vt+")()\n") + + // Struct that contains another struct. + type s2 struct { + s1 s1 + b bool + } + v2 := s2{s1{127, 255}, true} + nv2 := (*s2)(nil) + pv2 := &v2 + v2Addr := fmt.Sprintf("%p", pv2) + pv2Addr := fmt.Sprintf("%p", &pv2) + v2t := "spew_test.s2" + v2t2 := "spew_test.s1" + v2t3 := "int8" + v2t4 := "uint8" + v2t5 := "bool" + v2s := "{\n s1: (" + v2t2 + ") {\n a: (" + v2t3 + ") 127,\n b: (" + + v2t4 + ") 255\n },\n b: (" + v2t5 + ") true\n}" + addDumpTest(v2, "("+v2t+") "+v2s+"\n") + addDumpTest(pv2, "(*"+v2t+")("+v2Addr+")("+v2s+")\n") + addDumpTest(&pv2, "(**"+v2t+")("+pv2Addr+"->"+v2Addr+")("+v2s+")\n") + addDumpTest(nv2, "(*"+v2t+")()\n") + + // Struct that contains custom type with Stringer pointer interface via both + // exported and unexported fields. + type s3 struct { + s pstringer + S pstringer + } + v3 := s3{"test", "test2"} + nv3 := (*s3)(nil) + pv3 := &v3 + v3Addr := fmt.Sprintf("%p", pv3) + pv3Addr := fmt.Sprintf("%p", &pv3) + v3t := "spew_test.s3" + v3t2 := "spew_test.pstringer" + v3s := "{\n s: (" + v3t2 + ") (len=4) stringer test,\n S: (" + v3t2 + + ") (len=5) stringer test2\n}" + v3sp := v3s + if spew.UnsafeDisabled { + v3s = "{\n s: (" + v3t2 + ") (len=4) \"test\",\n S: (" + + v3t2 + ") (len=5) \"test2\"\n}" + v3sp = "{\n s: (" + v3t2 + ") (len=4) \"test\",\n S: (" + + v3t2 + ") (len=5) stringer test2\n}" + } + addDumpTest(v3, "("+v3t+") "+v3s+"\n") + addDumpTest(pv3, "(*"+v3t+")("+v3Addr+")("+v3sp+")\n") + addDumpTest(&pv3, "(**"+v3t+")("+pv3Addr+"->"+v3Addr+")("+v3sp+")\n") + addDumpTest(nv3, "(*"+v3t+")()\n") + + // Struct that contains embedded struct and field to same struct. + e := embed{"embedstr"} + eLen := fmt.Sprintf("%d", len("embedstr")) + v4 := embedwrap{embed: &e, e: &e} + nv4 := (*embedwrap)(nil) + pv4 := &v4 + eAddr := fmt.Sprintf("%p", &e) + v4Addr := fmt.Sprintf("%p", pv4) + pv4Addr := fmt.Sprintf("%p", &pv4) + v4t := "spew_test.embedwrap" + v4t2 := "spew_test.embed" + v4t3 := "string" + v4s := "{\n embed: (*" + v4t2 + ")(" + eAddr + ")({\n a: (" + v4t3 + + ") (len=" + eLen + ") \"embedstr\"\n }),\n e: (*" + v4t2 + + ")(" + eAddr + ")({\n a: (" + v4t3 + ") (len=" + eLen + ")" + + " \"embedstr\"\n })\n}" + addDumpTest(v4, "("+v4t+") "+v4s+"\n") + addDumpTest(pv4, "(*"+v4t+")("+v4Addr+")("+v4s+")\n") + addDumpTest(&pv4, "(**"+v4t+")("+pv4Addr+"->"+v4Addr+")("+v4s+")\n") + addDumpTest(nv4, "(*"+v4t+")()\n") +} + +func addUintptrDumpTests() { + // Null pointer. + v := uintptr(0) + pv := &v + vAddr := fmt.Sprintf("%p", pv) + pvAddr := fmt.Sprintf("%p", &pv) + vt := "uintptr" + vs := "" + addDumpTest(v, "("+vt+") "+vs+"\n") + addDumpTest(pv, "(*"+vt+")("+vAddr+")("+vs+")\n") + addDumpTest(&pv, "(**"+vt+")("+pvAddr+"->"+vAddr+")("+vs+")\n") + + // Address of real variable. + i := 1 + v2 := uintptr(unsafe.Pointer(&i)) + nv2 := (*uintptr)(nil) + pv2 := &v2 + v2Addr := fmt.Sprintf("%p", pv2) + pv2Addr := fmt.Sprintf("%p", &pv2) + v2t := "uintptr" + v2s := fmt.Sprintf("%p", &i) + addDumpTest(v2, "("+v2t+") "+v2s+"\n") + addDumpTest(pv2, "(*"+v2t+")("+v2Addr+")("+v2s+")\n") + addDumpTest(&pv2, "(**"+v2t+")("+pv2Addr+"->"+v2Addr+")("+v2s+")\n") + addDumpTest(nv2, "(*"+v2t+")()\n") +} + +func addUnsafePointerDumpTests() { + // Null pointer. + v := unsafe.Pointer(uintptr(0)) + nv := (*unsafe.Pointer)(nil) + pv := &v + vAddr := fmt.Sprintf("%p", pv) + pvAddr := fmt.Sprintf("%p", &pv) + vt := "unsafe.Pointer" + vs := "" + addDumpTest(v, "("+vt+") "+vs+"\n") + addDumpTest(pv, "(*"+vt+")("+vAddr+")("+vs+")\n") + addDumpTest(&pv, "(**"+vt+")("+pvAddr+"->"+vAddr+")("+vs+")\n") + addDumpTest(nv, "(*"+vt+")()\n") + + // Address of real variable. + i := 1 + v2 := unsafe.Pointer(&i) + pv2 := &v2 + v2Addr := fmt.Sprintf("%p", pv2) + pv2Addr := fmt.Sprintf("%p", &pv2) + v2t := "unsafe.Pointer" + v2s := fmt.Sprintf("%p", &i) + addDumpTest(v2, "("+v2t+") "+v2s+"\n") + addDumpTest(pv2, "(*"+v2t+")("+v2Addr+")("+v2s+")\n") + addDumpTest(&pv2, "(**"+v2t+")("+pv2Addr+"->"+v2Addr+")("+v2s+")\n") + addDumpTest(nv, "(*"+vt+")()\n") +} + +func addChanDumpTests() { + // Nil channel. + var v chan int + pv := &v + nv := (*chan int)(nil) + vAddr := fmt.Sprintf("%p", pv) + pvAddr := fmt.Sprintf("%p", &pv) + vt := "chan int" + vs := "" + addDumpTest(v, "("+vt+") "+vs+"\n") + addDumpTest(pv, "(*"+vt+")("+vAddr+")("+vs+")\n") + addDumpTest(&pv, "(**"+vt+")("+pvAddr+"->"+vAddr+")("+vs+")\n") + addDumpTest(nv, "(*"+vt+")()\n") + + // Real channel. + v2 := make(chan int) + pv2 := &v2 + v2Addr := fmt.Sprintf("%p", pv2) + pv2Addr := fmt.Sprintf("%p", &pv2) + v2t := "chan int" + v2s := fmt.Sprintf("%p", v2) + addDumpTest(v2, "("+v2t+") "+v2s+"\n") + addDumpTest(pv2, "(*"+v2t+")("+v2Addr+")("+v2s+")\n") + addDumpTest(&pv2, "(**"+v2t+")("+pv2Addr+"->"+v2Addr+")("+v2s+")\n") +} + +func addFuncDumpTests() { + // Function with no params and no returns. + v := addIntDumpTests + nv := (*func())(nil) + pv := &v + vAddr := fmt.Sprintf("%p", pv) + pvAddr := fmt.Sprintf("%p", &pv) + vt := "func()" + vs := fmt.Sprintf("%p", v) + addDumpTest(v, "("+vt+") "+vs+"\n") + addDumpTest(pv, "(*"+vt+")("+vAddr+")("+vs+")\n") + addDumpTest(&pv, "(**"+vt+")("+pvAddr+"->"+vAddr+")("+vs+")\n") + addDumpTest(nv, "(*"+vt+")()\n") + + // Function with param and no returns. + v2 := TestDump + nv2 := (*func(*testing.T))(nil) + pv2 := &v2 + v2Addr := fmt.Sprintf("%p", pv2) + pv2Addr := fmt.Sprintf("%p", &pv2) + v2t := "func(*testing.T)" + v2s := fmt.Sprintf("%p", v2) + addDumpTest(v2, "("+v2t+") "+v2s+"\n") + addDumpTest(pv2, "(*"+v2t+")("+v2Addr+")("+v2s+")\n") + addDumpTest(&pv2, "(**"+v2t+")("+pv2Addr+"->"+v2Addr+")("+v2s+")\n") + addDumpTest(nv2, "(*"+v2t+")()\n") + + // Function with multiple params and multiple returns. + var v3 = func(i int, s string) (b bool, err error) { + return true, nil + } + nv3 := (*func(int, string) (bool, error))(nil) + pv3 := &v3 + v3Addr := fmt.Sprintf("%p", pv3) + pv3Addr := fmt.Sprintf("%p", &pv3) + v3t := "func(int, string) (bool, error)" + v3s := fmt.Sprintf("%p", v3) + addDumpTest(v3, "("+v3t+") "+v3s+"\n") + addDumpTest(pv3, "(*"+v3t+")("+v3Addr+")("+v3s+")\n") + addDumpTest(&pv3, "(**"+v3t+")("+pv3Addr+"->"+v3Addr+")("+v3s+")\n") + addDumpTest(nv3, "(*"+v3t+")()\n") +} + +func addCircularDumpTests() { + // Struct that is circular through self referencing. + type circular struct { + c *circular + } + v := circular{nil} + v.c = &v + pv := &v + vAddr := fmt.Sprintf("%p", pv) + pvAddr := fmt.Sprintf("%p", &pv) + vt := "spew_test.circular" + vs := "{\n c: (*" + vt + ")(" + vAddr + ")({\n c: (*" + vt + ")(" + + vAddr + ")()\n })\n}" + vs2 := "{\n c: (*" + vt + ")(" + vAddr + ")()\n}" + addDumpTest(v, "("+vt+") "+vs+"\n") + addDumpTest(pv, "(*"+vt+")("+vAddr+")("+vs2+")\n") + addDumpTest(&pv, "(**"+vt+")("+pvAddr+"->"+vAddr+")("+vs2+")\n") + + // Structs that are circular through cross referencing. + v2 := xref1{nil} + ts2 := xref2{&v2} + v2.ps2 = &ts2 + pv2 := &v2 + ts2Addr := fmt.Sprintf("%p", &ts2) + v2Addr := fmt.Sprintf("%p", pv2) + pv2Addr := fmt.Sprintf("%p", &pv2) + v2t := "spew_test.xref1" + v2t2 := "spew_test.xref2" + v2s := "{\n ps2: (*" + v2t2 + ")(" + ts2Addr + ")({\n ps1: (*" + v2t + + ")(" + v2Addr + ")({\n ps2: (*" + v2t2 + ")(" + ts2Addr + + ")()\n })\n })\n}" + v2s2 := "{\n ps2: (*" + v2t2 + ")(" + ts2Addr + ")({\n ps1: (*" + v2t + + ")(" + v2Addr + ")()\n })\n}" + addDumpTest(v2, "("+v2t+") "+v2s+"\n") + addDumpTest(pv2, "(*"+v2t+")("+v2Addr+")("+v2s2+")\n") + addDumpTest(&pv2, "(**"+v2t+")("+pv2Addr+"->"+v2Addr+")("+v2s2+")\n") + + // Structs that are indirectly circular. + v3 := indirCir1{nil} + tic2 := indirCir2{nil} + tic3 := indirCir3{&v3} + tic2.ps3 = &tic3 + v3.ps2 = &tic2 + pv3 := &v3 + tic2Addr := fmt.Sprintf("%p", &tic2) + tic3Addr := fmt.Sprintf("%p", &tic3) + v3Addr := fmt.Sprintf("%p", pv3) + pv3Addr := fmt.Sprintf("%p", &pv3) + v3t := "spew_test.indirCir1" + v3t2 := "spew_test.indirCir2" + v3t3 := "spew_test.indirCir3" + v3s := "{\n ps2: (*" + v3t2 + ")(" + tic2Addr + ")({\n ps3: (*" + v3t3 + + ")(" + tic3Addr + ")({\n ps1: (*" + v3t + ")(" + v3Addr + + ")({\n ps2: (*" + v3t2 + ")(" + tic2Addr + + ")()\n })\n })\n })\n}" + v3s2 := "{\n ps2: (*" + v3t2 + ")(" + tic2Addr + ")({\n ps3: (*" + v3t3 + + ")(" + tic3Addr + ")({\n ps1: (*" + v3t + ")(" + v3Addr + + ")()\n })\n })\n}" + addDumpTest(v3, "("+v3t+") "+v3s+"\n") + addDumpTest(pv3, "(*"+v3t+")("+v3Addr+")("+v3s2+")\n") + addDumpTest(&pv3, "(**"+v3t+")("+pv3Addr+"->"+v3Addr+")("+v3s2+")\n") +} + +func addPanicDumpTests() { + // Type that panics in its Stringer interface. + v := panicer(127) + nv := (*panicer)(nil) + pv := &v + vAddr := fmt.Sprintf("%p", pv) + pvAddr := fmt.Sprintf("%p", &pv) + vt := "spew_test.panicer" + vs := "(PANIC=test panic)127" + addDumpTest(v, "("+vt+") "+vs+"\n") + addDumpTest(pv, "(*"+vt+")("+vAddr+")("+vs+")\n") + addDumpTest(&pv, "(**"+vt+")("+pvAddr+"->"+vAddr+")("+vs+")\n") + addDumpTest(nv, "(*"+vt+")()\n") +} + +func addErrorDumpTests() { + // Type that has a custom Error interface. + v := customError(127) + nv := (*customError)(nil) + pv := &v + vAddr := fmt.Sprintf("%p", pv) + pvAddr := fmt.Sprintf("%p", &pv) + vt := "spew_test.customError" + vs := "error: 127" + addDumpTest(v, "("+vt+") "+vs+"\n") + addDumpTest(pv, "(*"+vt+")("+vAddr+")("+vs+")\n") + addDumpTest(&pv, "(**"+vt+")("+pvAddr+"->"+vAddr+")("+vs+")\n") + addDumpTest(nv, "(*"+vt+")()\n") +} + +// TestDump executes all of the tests described by dumpTests. +func TestDump(t *testing.T) { + // Setup tests. + addIntDumpTests() + addUintDumpTests() + addBoolDumpTests() + addFloatDumpTests() + addComplexDumpTests() + addArrayDumpTests() + addSliceDumpTests() + addStringDumpTests() + addInterfaceDumpTests() + addMapDumpTests() + addStructDumpTests() + addUintptrDumpTests() + addUnsafePointerDumpTests() + addChanDumpTests() + addFuncDumpTests() + addCircularDumpTests() + addPanicDumpTests() + addErrorDumpTests() + addCgoDumpTests() + + t.Logf("Running %d tests", len(dumpTests)) + for i, test := range dumpTests { + buf := new(bytes.Buffer) + spew.Fdump(buf, test.in) + s := buf.String() + if testFailed(s, test.wants) { + t.Errorf("Dump #%d\n got: %s %s", i, s, stringizeWants(test.wants)) + continue + } + } +} + +func TestDumpSortedKeys(t *testing.T) { + cfg := spew.ConfigState{SortKeys: true} + s := cfg.Sdump(map[int]string{1: "1", 3: "3", 2: "2"}) + expected := "(map[int]string) (len=3) {\n(int) 1: (string) (len=1) " + + "\"1\",\n(int) 2: (string) (len=1) \"2\",\n(int) 3: (string) " + + "(len=1) \"3\"\n" + + "}\n" + if s != expected { + t.Errorf("Sorted keys mismatch:\n %v %v", s, expected) + } + + s = cfg.Sdump(map[stringer]int{"1": 1, "3": 3, "2": 2}) + expected = "(map[spew_test.stringer]int) (len=3) {\n" + + "(spew_test.stringer) (len=1) stringer 1: (int) 1,\n" + + "(spew_test.stringer) (len=1) stringer 2: (int) 2,\n" + + "(spew_test.stringer) (len=1) stringer 3: (int) 3\n" + + "}\n" + if s != expected { + t.Errorf("Sorted keys mismatch:\n %v %v", s, expected) + } + + s = cfg.Sdump(map[pstringer]int{pstringer("1"): 1, pstringer("3"): 3, pstringer("2"): 2}) + expected = "(map[spew_test.pstringer]int) (len=3) {\n" + + "(spew_test.pstringer) (len=1) stringer 1: (int) 1,\n" + + "(spew_test.pstringer) (len=1) stringer 2: (int) 2,\n" + + "(spew_test.pstringer) (len=1) stringer 3: (int) 3\n" + + "}\n" + if spew.UnsafeDisabled { + expected = "(map[spew_test.pstringer]int) (len=3) {\n" + + "(spew_test.pstringer) (len=1) \"1\": (int) 1,\n" + + "(spew_test.pstringer) (len=1) \"2\": (int) 2,\n" + + "(spew_test.pstringer) (len=1) \"3\": (int) 3\n" + + "}\n" + } + if s != expected { + t.Errorf("Sorted keys mismatch:\n %v %v", s, expected) + } + + s = cfg.Sdump(map[customError]int{customError(1): 1, customError(3): 3, customError(2): 2}) + expected = "(map[spew_test.customError]int) (len=3) {\n" + + "(spew_test.customError) error: 1: (int) 1,\n" + + "(spew_test.customError) error: 2: (int) 2,\n" + + "(spew_test.customError) error: 3: (int) 3\n" + + "}\n" + if s != expected { + t.Errorf("Sorted keys mismatch:\n %v %v", s, expected) + } + +} diff --git a/vendor/github.com/davecgh/go-spew/spew/dumpcgo_test.go b/vendor/github.com/davecgh/go-spew/spew/dumpcgo_test.go new file mode 100644 index 000000000..6ab180809 --- /dev/null +++ b/vendor/github.com/davecgh/go-spew/spew/dumpcgo_test.go @@ -0,0 +1,99 @@ +// Copyright (c) 2013-2016 Dave Collins +// +// Permission to use, copy, modify, and distribute this software for any +// purpose with or without fee is hereby granted, provided that the above +// copyright notice and this permission notice appear in all copies. +// +// THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES +// WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF +// MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR +// ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES +// WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN +// ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF +// OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. + +// NOTE: Due to the following build constraints, this file will only be compiled +// when both cgo is supported and "-tags testcgo" is added to the go test +// command line. This means the cgo tests are only added (and hence run) when +// specifially requested. This configuration is used because spew itself +// does not require cgo to run even though it does handle certain cgo types +// specially. Rather than forcing all clients to require cgo and an external +// C compiler just to run the tests, this scheme makes them optional. +// +build cgo,testcgo + +package spew_test + +import ( + "fmt" + + "github.com/davecgh/go-spew/spew/testdata" +) + +func addCgoDumpTests() { + // C char pointer. + v := testdata.GetCgoCharPointer() + nv := testdata.GetCgoNullCharPointer() + pv := &v + vcAddr := fmt.Sprintf("%p", v) + vAddr := fmt.Sprintf("%p", pv) + pvAddr := fmt.Sprintf("%p", &pv) + vt := "*testdata._Ctype_char" + vs := "116" + addDumpTest(v, "("+vt+")("+vcAddr+")("+vs+")\n") + addDumpTest(pv, "(*"+vt+")("+vAddr+"->"+vcAddr+")("+vs+")\n") + addDumpTest(&pv, "(**"+vt+")("+pvAddr+"->"+vAddr+"->"+vcAddr+")("+vs+")\n") + addDumpTest(nv, "("+vt+")()\n") + + // C char array. + v2, v2l, v2c := testdata.GetCgoCharArray() + v2Len := fmt.Sprintf("%d", v2l) + v2Cap := fmt.Sprintf("%d", v2c) + v2t := "[6]testdata._Ctype_char" + v2s := "(len=" + v2Len + " cap=" + v2Cap + ") " + + "{\n 00000000 74 65 73 74 32 00 " + + " |test2.|\n}" + addDumpTest(v2, "("+v2t+") "+v2s+"\n") + + // C unsigned char array. + v3, v3l, v3c := testdata.GetCgoUnsignedCharArray() + v3Len := fmt.Sprintf("%d", v3l) + v3Cap := fmt.Sprintf("%d", v3c) + v3t := "[6]testdata._Ctype_unsignedchar" + v3t2 := "[6]testdata._Ctype_uchar" + v3s := "(len=" + v3Len + " cap=" + v3Cap + ") " + + "{\n 00000000 74 65 73 74 33 00 " + + " |test3.|\n}" + addDumpTest(v3, "("+v3t+") "+v3s+"\n", "("+v3t2+") "+v3s+"\n") + + // C signed char array. + v4, v4l, v4c := testdata.GetCgoSignedCharArray() + v4Len := fmt.Sprintf("%d", v4l) + v4Cap := fmt.Sprintf("%d", v4c) + v4t := "[6]testdata._Ctype_schar" + v4t2 := "testdata._Ctype_schar" + v4s := "(len=" + v4Len + " cap=" + v4Cap + ") " + + "{\n (" + v4t2 + ") 116,\n (" + v4t2 + ") 101,\n (" + v4t2 + + ") 115,\n (" + v4t2 + ") 116,\n (" + v4t2 + ") 52,\n (" + v4t2 + + ") 0\n}" + addDumpTest(v4, "("+v4t+") "+v4s+"\n") + + // C uint8_t array. + v5, v5l, v5c := testdata.GetCgoUint8tArray() + v5Len := fmt.Sprintf("%d", v5l) + v5Cap := fmt.Sprintf("%d", v5c) + v5t := "[6]testdata._Ctype_uint8_t" + v5s := "(len=" + v5Len + " cap=" + v5Cap + ") " + + "{\n 00000000 74 65 73 74 35 00 " + + " |test5.|\n}" + addDumpTest(v5, "("+v5t+") "+v5s+"\n") + + // C typedefed unsigned char array. + v6, v6l, v6c := testdata.GetCgoTypdefedUnsignedCharArray() + v6Len := fmt.Sprintf("%d", v6l) + v6Cap := fmt.Sprintf("%d", v6c) + v6t := "[6]testdata._Ctype_custom_uchar_t" + v6s := "(len=" + v6Len + " cap=" + v6Cap + ") " + + "{\n 00000000 74 65 73 74 36 00 " + + " |test6.|\n}" + addDumpTest(v6, "("+v6t+") "+v6s+"\n") +} diff --git a/vendor/github.com/davecgh/go-spew/spew/dumpnocgo_test.go b/vendor/github.com/davecgh/go-spew/spew/dumpnocgo_test.go new file mode 100644 index 000000000..52a0971fb --- /dev/null +++ b/vendor/github.com/davecgh/go-spew/spew/dumpnocgo_test.go @@ -0,0 +1,26 @@ +// Copyright (c) 2013 Dave Collins +// +// Permission to use, copy, modify, and distribute this software for any +// purpose with or without fee is hereby granted, provided that the above +// copyright notice and this permission notice appear in all copies. +// +// THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES +// WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF +// MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR +// ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES +// WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN +// ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF +// OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. + +// NOTE: Due to the following build constraints, this file will only be compiled +// when either cgo is not supported or "-tags testcgo" is not added to the go +// test command line. This file intentionally does not setup any cgo tests in +// this scenario. +// +build !cgo !testcgo + +package spew_test + +func addCgoDumpTests() { + // Don't add any tests for cgo since this file is only compiled when + // there should not be any cgo tests. +} diff --git a/vendor/github.com/davecgh/go-spew/spew/example_test.go b/vendor/github.com/davecgh/go-spew/spew/example_test.go new file mode 100644 index 000000000..c6ec8c6d5 --- /dev/null +++ b/vendor/github.com/davecgh/go-spew/spew/example_test.go @@ -0,0 +1,226 @@ +/* + * Copyright (c) 2013-2016 Dave Collins + * + * Permission to use, copy, modify, and distribute this software for any + * purpose with or without fee is hereby granted, provided that the above + * copyright notice and this permission notice appear in all copies. + * + * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES + * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF + * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR + * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES + * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN + * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF + * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. + */ + +package spew_test + +import ( + "fmt" + + "github.com/davecgh/go-spew/spew" +) + +type Flag int + +const ( + flagOne Flag = iota + flagTwo +) + +var flagStrings = map[Flag]string{ + flagOne: "flagOne", + flagTwo: "flagTwo", +} + +func (f Flag) String() string { + if s, ok := flagStrings[f]; ok { + return s + } + return fmt.Sprintf("Unknown flag (%d)", int(f)) +} + +type Bar struct { + data uintptr +} + +type Foo struct { + unexportedField Bar + ExportedField map[interface{}]interface{} +} + +// This example demonstrates how to use Dump to dump variables to stdout. +func ExampleDump() { + // The following package level declarations are assumed for this example: + /* + type Flag int + + const ( + flagOne Flag = iota + flagTwo + ) + + var flagStrings = map[Flag]string{ + flagOne: "flagOne", + flagTwo: "flagTwo", + } + + func (f Flag) String() string { + if s, ok := flagStrings[f]; ok { + return s + } + return fmt.Sprintf("Unknown flag (%d)", int(f)) + } + + type Bar struct { + data uintptr + } + + type Foo struct { + unexportedField Bar + ExportedField map[interface{}]interface{} + } + */ + + // Setup some sample data structures for the example. + bar := Bar{uintptr(0)} + s1 := Foo{bar, map[interface{}]interface{}{"one": true}} + f := Flag(5) + b := []byte{ + 0x11, 0x12, 0x13, 0x14, 0x15, 0x16, 0x17, 0x18, + 0x19, 0x1a, 0x1b, 0x1c, 0x1d, 0x1e, 0x1f, 0x20, + 0x21, 0x22, 0x23, 0x24, 0x25, 0x26, 0x27, 0x28, + 0x29, 0x2a, 0x2b, 0x2c, 0x2d, 0x2e, 0x2f, 0x30, + 0x31, 0x32, + } + + // Dump! + spew.Dump(s1, f, b) + + // Output: + // (spew_test.Foo) { + // unexportedField: (spew_test.Bar) { + // data: (uintptr) + // }, + // ExportedField: (map[interface {}]interface {}) (len=1) { + // (string) (len=3) "one": (bool) true + // } + // } + // (spew_test.Flag) Unknown flag (5) + // ([]uint8) (len=34 cap=34) { + // 00000000 11 12 13 14 15 16 17 18 19 1a 1b 1c 1d 1e 1f 20 |............... | + // 00000010 21 22 23 24 25 26 27 28 29 2a 2b 2c 2d 2e 2f 30 |!"#$%&'()*+,-./0| + // 00000020 31 32 |12| + // } + // +} + +// This example demonstrates how to use Printf to display a variable with a +// format string and inline formatting. +func ExamplePrintf() { + // Create a double pointer to a uint 8. + ui8 := uint8(5) + pui8 := &ui8 + ppui8 := &pui8 + + // Create a circular data type. + type circular struct { + ui8 uint8 + c *circular + } + c := circular{ui8: 1} + c.c = &c + + // Print! + spew.Printf("ppui8: %v\n", ppui8) + spew.Printf("circular: %v\n", c) + + // Output: + // ppui8: <**>5 + // circular: {1 <*>{1 <*>}} +} + +// This example demonstrates how to use a ConfigState. +func ExampleConfigState() { + // Modify the indent level of the ConfigState only. The global + // configuration is not modified. + scs := spew.ConfigState{Indent: "\t"} + + // Output using the ConfigState instance. + v := map[string]int{"one": 1} + scs.Printf("v: %v\n", v) + scs.Dump(v) + + // Output: + // v: map[one:1] + // (map[string]int) (len=1) { + // (string) (len=3) "one": (int) 1 + // } +} + +// This example demonstrates how to use ConfigState.Dump to dump variables to +// stdout +func ExampleConfigState_Dump() { + // See the top-level Dump example for details on the types used in this + // example. + + // Create two ConfigState instances with different indentation. + scs := spew.ConfigState{Indent: "\t"} + scs2 := spew.ConfigState{Indent: " "} + + // Setup some sample data structures for the example. + bar := Bar{uintptr(0)} + s1 := Foo{bar, map[interface{}]interface{}{"one": true}} + + // Dump using the ConfigState instances. + scs.Dump(s1) + scs2.Dump(s1) + + // Output: + // (spew_test.Foo) { + // unexportedField: (spew_test.Bar) { + // data: (uintptr) + // }, + // ExportedField: (map[interface {}]interface {}) (len=1) { + // (string) (len=3) "one": (bool) true + // } + // } + // (spew_test.Foo) { + // unexportedField: (spew_test.Bar) { + // data: (uintptr) + // }, + // ExportedField: (map[interface {}]interface {}) (len=1) { + // (string) (len=3) "one": (bool) true + // } + // } + // +} + +// This example demonstrates how to use ConfigState.Printf to display a variable +// with a format string and inline formatting. +func ExampleConfigState_Printf() { + // See the top-level Dump example for details on the types used in this + // example. + + // Create two ConfigState instances and modify the method handling of the + // first ConfigState only. + scs := spew.NewDefaultConfig() + scs2 := spew.NewDefaultConfig() + scs.DisableMethods = true + + // Alternatively + // scs := spew.ConfigState{Indent: " ", DisableMethods: true} + // scs2 := spew.ConfigState{Indent: " "} + + // This is of type Flag which implements a Stringer and has raw value 1. + f := flagTwo + + // Dump using the ConfigState instances. + scs.Printf("f: %v\n", f) + scs2.Printf("f: %v\n", f) + + // Output: + // f: 1 + // f: flagTwo +} diff --git a/vendor/github.com/davecgh/go-spew/spew/format.go b/vendor/github.com/davecgh/go-spew/spew/format.go new file mode 100644 index 000000000..c49875bac --- /dev/null +++ b/vendor/github.com/davecgh/go-spew/spew/format.go @@ -0,0 +1,419 @@ +/* + * Copyright (c) 2013-2016 Dave Collins + * + * Permission to use, copy, modify, and distribute this software for any + * purpose with or without fee is hereby granted, provided that the above + * copyright notice and this permission notice appear in all copies. + * + * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES + * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF + * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR + * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES + * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN + * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF + * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. + */ + +package spew + +import ( + "bytes" + "fmt" + "reflect" + "strconv" + "strings" +) + +// supportedFlags is a list of all the character flags supported by fmt package. +const supportedFlags = "0-+# " + +// formatState implements the fmt.Formatter interface and contains information +// about the state of a formatting operation. The NewFormatter function can +// be used to get a new Formatter which can be used directly as arguments +// in standard fmt package printing calls. +type formatState struct { + value interface{} + fs fmt.State + depth int + pointers map[uintptr]int + ignoreNextType bool + cs *ConfigState +} + +// buildDefaultFormat recreates the original format string without precision +// and width information to pass in to fmt.Sprintf in the case of an +// unrecognized type. Unless new types are added to the language, this +// function won't ever be called. +func (f *formatState) buildDefaultFormat() (format string) { + buf := bytes.NewBuffer(percentBytes) + + for _, flag := range supportedFlags { + if f.fs.Flag(int(flag)) { + buf.WriteRune(flag) + } + } + + buf.WriteRune('v') + + format = buf.String() + return format +} + +// constructOrigFormat recreates the original format string including precision +// and width information to pass along to the standard fmt package. This allows +// automatic deferral of all format strings this package doesn't support. +func (f *formatState) constructOrigFormat(verb rune) (format string) { + buf := bytes.NewBuffer(percentBytes) + + for _, flag := range supportedFlags { + if f.fs.Flag(int(flag)) { + buf.WriteRune(flag) + } + } + + if width, ok := f.fs.Width(); ok { + buf.WriteString(strconv.Itoa(width)) + } + + if precision, ok := f.fs.Precision(); ok { + buf.Write(precisionBytes) + buf.WriteString(strconv.Itoa(precision)) + } + + buf.WriteRune(verb) + + format = buf.String() + return format +} + +// unpackValue returns values inside of non-nil interfaces when possible and +// ensures that types for values which have been unpacked from an interface +// are displayed when the show types flag is also set. +// This is useful for data types like structs, arrays, slices, and maps which +// can contain varying types packed inside an interface. +func (f *formatState) unpackValue(v reflect.Value) reflect.Value { + if v.Kind() == reflect.Interface { + f.ignoreNextType = false + if !v.IsNil() { + v = v.Elem() + } + } + return v +} + +// formatPtr handles formatting of pointers by indirecting them as necessary. +func (f *formatState) formatPtr(v reflect.Value) { + // Display nil if top level pointer is nil. + showTypes := f.fs.Flag('#') + if v.IsNil() && (!showTypes || f.ignoreNextType) { + f.fs.Write(nilAngleBytes) + return + } + + // Remove pointers at or below the current depth from map used to detect + // circular refs. + for k, depth := range f.pointers { + if depth >= f.depth { + delete(f.pointers, k) + } + } + + // Keep list of all dereferenced pointers to possibly show later. + pointerChain := make([]uintptr, 0) + + // Figure out how many levels of indirection there are by derferencing + // pointers and unpacking interfaces down the chain while detecting circular + // references. + nilFound := false + cycleFound := false + indirects := 0 + ve := v + for ve.Kind() == reflect.Ptr { + if ve.IsNil() { + nilFound = true + break + } + indirects++ + addr := ve.Pointer() + pointerChain = append(pointerChain, addr) + if pd, ok := f.pointers[addr]; ok && pd < f.depth { + cycleFound = true + indirects-- + break + } + f.pointers[addr] = f.depth + + ve = ve.Elem() + if ve.Kind() == reflect.Interface { + if ve.IsNil() { + nilFound = true + break + } + ve = ve.Elem() + } + } + + // Display type or indirection level depending on flags. + if showTypes && !f.ignoreNextType { + f.fs.Write(openParenBytes) + f.fs.Write(bytes.Repeat(asteriskBytes, indirects)) + f.fs.Write([]byte(ve.Type().String())) + f.fs.Write(closeParenBytes) + } else { + if nilFound || cycleFound { + indirects += strings.Count(ve.Type().String(), "*") + } + f.fs.Write(openAngleBytes) + f.fs.Write([]byte(strings.Repeat("*", indirects))) + f.fs.Write(closeAngleBytes) + } + + // Display pointer information depending on flags. + if f.fs.Flag('+') && (len(pointerChain) > 0) { + f.fs.Write(openParenBytes) + for i, addr := range pointerChain { + if i > 0 { + f.fs.Write(pointerChainBytes) + } + printHexPtr(f.fs, addr) + } + f.fs.Write(closeParenBytes) + } + + // Display dereferenced value. + switch { + case nilFound == true: + f.fs.Write(nilAngleBytes) + + case cycleFound == true: + f.fs.Write(circularShortBytes) + + default: + f.ignoreNextType = true + f.format(ve) + } +} + +// format is the main workhorse for providing the Formatter interface. It +// uses the passed reflect value to figure out what kind of object we are +// dealing with and formats it appropriately. It is a recursive function, +// however circular data structures are detected and handled properly. +func (f *formatState) format(v reflect.Value) { + // Handle invalid reflect values immediately. + kind := v.Kind() + if kind == reflect.Invalid { + f.fs.Write(invalidAngleBytes) + return + } + + // Handle pointers specially. + if kind == reflect.Ptr { + f.formatPtr(v) + return + } + + // Print type information unless already handled elsewhere. + if !f.ignoreNextType && f.fs.Flag('#') { + f.fs.Write(openParenBytes) + f.fs.Write([]byte(v.Type().String())) + f.fs.Write(closeParenBytes) + } + f.ignoreNextType = false + + // Call Stringer/error interfaces if they exist and the handle methods + // flag is enabled. + if !f.cs.DisableMethods { + if (kind != reflect.Invalid) && (kind != reflect.Interface) { + if handled := handleMethods(f.cs, f.fs, v); handled { + return + } + } + } + + switch kind { + case reflect.Invalid: + // Do nothing. We should never get here since invalid has already + // been handled above. + + case reflect.Bool: + printBool(f.fs, v.Bool()) + + case reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64, reflect.Int: + printInt(f.fs, v.Int(), 10) + + case reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64, reflect.Uint: + printUint(f.fs, v.Uint(), 10) + + case reflect.Float32: + printFloat(f.fs, v.Float(), 32) + + case reflect.Float64: + printFloat(f.fs, v.Float(), 64) + + case reflect.Complex64: + printComplex(f.fs, v.Complex(), 32) + + case reflect.Complex128: + printComplex(f.fs, v.Complex(), 64) + + case reflect.Slice: + if v.IsNil() { + f.fs.Write(nilAngleBytes) + break + } + fallthrough + + case reflect.Array: + f.fs.Write(openBracketBytes) + f.depth++ + if (f.cs.MaxDepth != 0) && (f.depth > f.cs.MaxDepth) { + f.fs.Write(maxShortBytes) + } else { + numEntries := v.Len() + for i := 0; i < numEntries; i++ { + if i > 0 { + f.fs.Write(spaceBytes) + } + f.ignoreNextType = true + f.format(f.unpackValue(v.Index(i))) + } + } + f.depth-- + f.fs.Write(closeBracketBytes) + + case reflect.String: + f.fs.Write([]byte(v.String())) + + case reflect.Interface: + // The only time we should get here is for nil interfaces due to + // unpackValue calls. + if v.IsNil() { + f.fs.Write(nilAngleBytes) + } + + case reflect.Ptr: + // Do nothing. We should never get here since pointers have already + // been handled above. + + case reflect.Map: + // nil maps should be indicated as different than empty maps + if v.IsNil() { + f.fs.Write(nilAngleBytes) + break + } + + f.fs.Write(openMapBytes) + f.depth++ + if (f.cs.MaxDepth != 0) && (f.depth > f.cs.MaxDepth) { + f.fs.Write(maxShortBytes) + } else { + keys := v.MapKeys() + if f.cs.SortKeys { + sortValues(keys, f.cs) + } + for i, key := range keys { + if i > 0 { + f.fs.Write(spaceBytes) + } + f.ignoreNextType = true + f.format(f.unpackValue(key)) + f.fs.Write(colonBytes) + f.ignoreNextType = true + f.format(f.unpackValue(v.MapIndex(key))) + } + } + f.depth-- + f.fs.Write(closeMapBytes) + + case reflect.Struct: + numFields := v.NumField() + f.fs.Write(openBraceBytes) + f.depth++ + if (f.cs.MaxDepth != 0) && (f.depth > f.cs.MaxDepth) { + f.fs.Write(maxShortBytes) + } else { + vt := v.Type() + for i := 0; i < numFields; i++ { + if i > 0 { + f.fs.Write(spaceBytes) + } + vtf := vt.Field(i) + if f.fs.Flag('+') || f.fs.Flag('#') { + f.fs.Write([]byte(vtf.Name)) + f.fs.Write(colonBytes) + } + f.format(f.unpackValue(v.Field(i))) + } + } + f.depth-- + f.fs.Write(closeBraceBytes) + + case reflect.Uintptr: + printHexPtr(f.fs, uintptr(v.Uint())) + + case reflect.UnsafePointer, reflect.Chan, reflect.Func: + printHexPtr(f.fs, v.Pointer()) + + // There were not any other types at the time this code was written, but + // fall back to letting the default fmt package handle it if any get added. + default: + format := f.buildDefaultFormat() + if v.CanInterface() { + fmt.Fprintf(f.fs, format, v.Interface()) + } else { + fmt.Fprintf(f.fs, format, v.String()) + } + } +} + +// Format satisfies the fmt.Formatter interface. See NewFormatter for usage +// details. +func (f *formatState) Format(fs fmt.State, verb rune) { + f.fs = fs + + // Use standard formatting for verbs that are not v. + if verb != 'v' { + format := f.constructOrigFormat(verb) + fmt.Fprintf(fs, format, f.value) + return + } + + if f.value == nil { + if fs.Flag('#') { + fs.Write(interfaceBytes) + } + fs.Write(nilAngleBytes) + return + } + + f.format(reflect.ValueOf(f.value)) +} + +// newFormatter is a helper function to consolidate the logic from the various +// public methods which take varying config states. +func newFormatter(cs *ConfigState, v interface{}) fmt.Formatter { + fs := &formatState{value: v, cs: cs} + fs.pointers = make(map[uintptr]int) + return fs +} + +/* +NewFormatter returns a custom formatter that satisfies the fmt.Formatter +interface. As a result, it integrates cleanly with standard fmt package +printing functions. The formatter is useful for inline printing of smaller data +types similar to the standard %v format specifier. + +The custom formatter only responds to the %v (most compact), %+v (adds pointer +addresses), %#v (adds types), or %#+v (adds types and pointer addresses) verb +combinations. Any other verbs such as %x and %q will be sent to the the +standard fmt package for formatting. In addition, the custom formatter ignores +the width and precision arguments (however they will still work on the format +specifiers not handled by the custom formatter). + +Typically this function shouldn't be called directly. It is much easier to make +use of the custom formatter by calling one of the convenience functions such as +Printf, Println, or Fprintf. +*/ +func NewFormatter(v interface{}) fmt.Formatter { + return newFormatter(&Config, v) +} diff --git a/vendor/github.com/davecgh/go-spew/spew/format_test.go b/vendor/github.com/davecgh/go-spew/spew/format_test.go new file mode 100644 index 000000000..f9b93abe8 --- /dev/null +++ b/vendor/github.com/davecgh/go-spew/spew/format_test.go @@ -0,0 +1,1558 @@ +/* + * Copyright (c) 2013-2016 Dave Collins + * + * Permission to use, copy, modify, and distribute this software for any + * purpose with or without fee is hereby granted, provided that the above + * copyright notice and this permission notice appear in all copies. + * + * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES + * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF + * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR + * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES + * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN + * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF + * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. + */ + +/* +Test Summary: +NOTE: For each test, a nil pointer, a single pointer and double pointer to the +base test element are also tested to ensure proper indirection across all types. + +- Max int8, int16, int32, int64, int +- Max uint8, uint16, uint32, uint64, uint +- Boolean true and false +- Standard complex64 and complex128 +- Array containing standard ints +- Array containing type with custom formatter on pointer receiver only +- Array containing interfaces +- Slice containing standard float32 values +- Slice containing type with custom formatter on pointer receiver only +- Slice containing interfaces +- Nil slice +- Standard string +- Nil interface +- Sub-interface +- Map with string keys and int vals +- Map with custom formatter type on pointer receiver only keys and vals +- Map with interface keys and values +- Map with nil interface value +- Struct with primitives +- Struct that contains another struct +- Struct that contains custom type with Stringer pointer interface via both + exported and unexported fields +- Struct that contains embedded struct and field to same struct +- Uintptr to 0 (null pointer) +- Uintptr address of real variable +- Unsafe.Pointer to 0 (null pointer) +- Unsafe.Pointer to address of real variable +- Nil channel +- Standard int channel +- Function with no params and no returns +- Function with param and no returns +- Function with multiple params and multiple returns +- Struct that is circular through self referencing +- Structs that are circular through cross referencing +- Structs that are indirectly circular +- Type that panics in its Stringer interface +- Type that has a custom Error interface +- %x passthrough with uint +- %#x passthrough with uint +- %f passthrough with precision +- %f passthrough with width and precision +- %d passthrough with width +- %q passthrough with string +*/ + +package spew_test + +import ( + "bytes" + "fmt" + "testing" + "unsafe" + + "github.com/davecgh/go-spew/spew" +) + +// formatterTest is used to describe a test to be performed against NewFormatter. +type formatterTest struct { + format string + in interface{} + wants []string +} + +// formatterTests houses all of the tests to be performed against NewFormatter. +var formatterTests = make([]formatterTest, 0) + +// addFormatterTest is a helper method to append the passed input and desired +// result to formatterTests. +func addFormatterTest(format string, in interface{}, wants ...string) { + test := formatterTest{format, in, wants} + formatterTests = append(formatterTests, test) +} + +func addIntFormatterTests() { + // Max int8. + v := int8(127) + nv := (*int8)(nil) + pv := &v + vAddr := fmt.Sprintf("%p", pv) + pvAddr := fmt.Sprintf("%p", &pv) + vt := "int8" + vs := "127" + addFormatterTest("%v", v, vs) + addFormatterTest("%v", pv, "<*>"+vs) + addFormatterTest("%v", &pv, "<**>"+vs) + addFormatterTest("%v", nv, "") + addFormatterTest("%+v", v, vs) + addFormatterTest("%+v", pv, "<*>("+vAddr+")"+vs) + addFormatterTest("%+v", &pv, "<**>("+pvAddr+"->"+vAddr+")"+vs) + addFormatterTest("%+v", nv, "") + addFormatterTest("%#v", v, "("+vt+")"+vs) + addFormatterTest("%#v", pv, "(*"+vt+")"+vs) + addFormatterTest("%#v", &pv, "(**"+vt+")"+vs) + addFormatterTest("%#v", nv, "(*"+vt+")"+"") + addFormatterTest("%#+v", v, "("+vt+")"+vs) + addFormatterTest("%#+v", pv, "(*"+vt+")("+vAddr+")"+vs) + addFormatterTest("%#+v", &pv, "(**"+vt+")("+pvAddr+"->"+vAddr+")"+vs) + addFormatterTest("%#+v", nv, "(*"+vt+")"+"") + + // Max int16. + v2 := int16(32767) + nv2 := (*int16)(nil) + pv2 := &v2 + v2Addr := fmt.Sprintf("%p", pv2) + pv2Addr := fmt.Sprintf("%p", &pv2) + v2t := "int16" + v2s := "32767" + addFormatterTest("%v", v2, v2s) + addFormatterTest("%v", pv2, "<*>"+v2s) + addFormatterTest("%v", &pv2, "<**>"+v2s) + addFormatterTest("%v", nv2, "") + addFormatterTest("%+v", v2, v2s) + addFormatterTest("%+v", pv2, "<*>("+v2Addr+")"+v2s) + addFormatterTest("%+v", &pv2, "<**>("+pv2Addr+"->"+v2Addr+")"+v2s) + addFormatterTest("%+v", nv2, "") + addFormatterTest("%#v", v2, "("+v2t+")"+v2s) + addFormatterTest("%#v", pv2, "(*"+v2t+")"+v2s) + addFormatterTest("%#v", &pv2, "(**"+v2t+")"+v2s) + addFormatterTest("%#v", nv2, "(*"+v2t+")"+"") + addFormatterTest("%#+v", v2, "("+v2t+")"+v2s) + addFormatterTest("%#+v", pv2, "(*"+v2t+")("+v2Addr+")"+v2s) + addFormatterTest("%#+v", &pv2, "(**"+v2t+")("+pv2Addr+"->"+v2Addr+")"+v2s) + addFormatterTest("%#+v", nv2, "(*"+v2t+")"+"") + + // Max int32. + v3 := int32(2147483647) + nv3 := (*int32)(nil) + pv3 := &v3 + v3Addr := fmt.Sprintf("%p", pv3) + pv3Addr := fmt.Sprintf("%p", &pv3) + v3t := "int32" + v3s := "2147483647" + addFormatterTest("%v", v3, v3s) + addFormatterTest("%v", pv3, "<*>"+v3s) + addFormatterTest("%v", &pv3, "<**>"+v3s) + addFormatterTest("%v", nv3, "") + addFormatterTest("%+v", v3, v3s) + addFormatterTest("%+v", pv3, "<*>("+v3Addr+")"+v3s) + addFormatterTest("%+v", &pv3, "<**>("+pv3Addr+"->"+v3Addr+")"+v3s) + addFormatterTest("%+v", nv3, "") + addFormatterTest("%#v", v3, "("+v3t+")"+v3s) + addFormatterTest("%#v", pv3, "(*"+v3t+")"+v3s) + addFormatterTest("%#v", &pv3, "(**"+v3t+")"+v3s) + addFormatterTest("%#v", nv3, "(*"+v3t+")"+"") + addFormatterTest("%#+v", v3, "("+v3t+")"+v3s) + addFormatterTest("%#+v", pv3, "(*"+v3t+")("+v3Addr+")"+v3s) + addFormatterTest("%#+v", &pv3, "(**"+v3t+")("+pv3Addr+"->"+v3Addr+")"+v3s) + addFormatterTest("%#v", nv3, "(*"+v3t+")"+"") + + // Max int64. + v4 := int64(9223372036854775807) + nv4 := (*int64)(nil) + pv4 := &v4 + v4Addr := fmt.Sprintf("%p", pv4) + pv4Addr := fmt.Sprintf("%p", &pv4) + v4t := "int64" + v4s := "9223372036854775807" + addFormatterTest("%v", v4, v4s) + addFormatterTest("%v", pv4, "<*>"+v4s) + addFormatterTest("%v", &pv4, "<**>"+v4s) + addFormatterTest("%v", nv4, "") + addFormatterTest("%+v", v4, v4s) + addFormatterTest("%+v", pv4, "<*>("+v4Addr+")"+v4s) + addFormatterTest("%+v", &pv4, "<**>("+pv4Addr+"->"+v4Addr+")"+v4s) + addFormatterTest("%+v", nv4, "") + addFormatterTest("%#v", v4, "("+v4t+")"+v4s) + addFormatterTest("%#v", pv4, "(*"+v4t+")"+v4s) + addFormatterTest("%#v", &pv4, "(**"+v4t+")"+v4s) + addFormatterTest("%#v", nv4, "(*"+v4t+")"+"") + addFormatterTest("%#+v", v4, "("+v4t+")"+v4s) + addFormatterTest("%#+v", pv4, "(*"+v4t+")("+v4Addr+")"+v4s) + addFormatterTest("%#+v", &pv4, "(**"+v4t+")("+pv4Addr+"->"+v4Addr+")"+v4s) + addFormatterTest("%#+v", nv4, "(*"+v4t+")"+"") + + // Max int. + v5 := int(2147483647) + nv5 := (*int)(nil) + pv5 := &v5 + v5Addr := fmt.Sprintf("%p", pv5) + pv5Addr := fmt.Sprintf("%p", &pv5) + v5t := "int" + v5s := "2147483647" + addFormatterTest("%v", v5, v5s) + addFormatterTest("%v", pv5, "<*>"+v5s) + addFormatterTest("%v", &pv5, "<**>"+v5s) + addFormatterTest("%v", nv5, "") + addFormatterTest("%+v", v5, v5s) + addFormatterTest("%+v", pv5, "<*>("+v5Addr+")"+v5s) + addFormatterTest("%+v", &pv5, "<**>("+pv5Addr+"->"+v5Addr+")"+v5s) + addFormatterTest("%+v", nv5, "") + addFormatterTest("%#v", v5, "("+v5t+")"+v5s) + addFormatterTest("%#v", pv5, "(*"+v5t+")"+v5s) + addFormatterTest("%#v", &pv5, "(**"+v5t+")"+v5s) + addFormatterTest("%#v", nv5, "(*"+v5t+")"+"") + addFormatterTest("%#+v", v5, "("+v5t+")"+v5s) + addFormatterTest("%#+v", pv5, "(*"+v5t+")("+v5Addr+")"+v5s) + addFormatterTest("%#+v", &pv5, "(**"+v5t+")("+pv5Addr+"->"+v5Addr+")"+v5s) + addFormatterTest("%#+v", nv5, "(*"+v5t+")"+"") +} + +func addUintFormatterTests() { + // Max uint8. + v := uint8(255) + nv := (*uint8)(nil) + pv := &v + vAddr := fmt.Sprintf("%p", pv) + pvAddr := fmt.Sprintf("%p", &pv) + vt := "uint8" + vs := "255" + addFormatterTest("%v", v, vs) + addFormatterTest("%v", pv, "<*>"+vs) + addFormatterTest("%v", &pv, "<**>"+vs) + addFormatterTest("%v", nv, "") + addFormatterTest("%+v", v, vs) + addFormatterTest("%+v", pv, "<*>("+vAddr+")"+vs) + addFormatterTest("%+v", &pv, "<**>("+pvAddr+"->"+vAddr+")"+vs) + addFormatterTest("%+v", nv, "") + addFormatterTest("%#v", v, "("+vt+")"+vs) + addFormatterTest("%#v", pv, "(*"+vt+")"+vs) + addFormatterTest("%#v", &pv, "(**"+vt+")"+vs) + addFormatterTest("%#v", nv, "(*"+vt+")"+"") + addFormatterTest("%#+v", v, "("+vt+")"+vs) + addFormatterTest("%#+v", pv, "(*"+vt+")("+vAddr+")"+vs) + addFormatterTest("%#+v", &pv, "(**"+vt+")("+pvAddr+"->"+vAddr+")"+vs) + addFormatterTest("%#+v", nv, "(*"+vt+")"+"") + + // Max uint16. + v2 := uint16(65535) + nv2 := (*uint16)(nil) + pv2 := &v2 + v2Addr := fmt.Sprintf("%p", pv2) + pv2Addr := fmt.Sprintf("%p", &pv2) + v2t := "uint16" + v2s := "65535" + addFormatterTest("%v", v2, v2s) + addFormatterTest("%v", pv2, "<*>"+v2s) + addFormatterTest("%v", &pv2, "<**>"+v2s) + addFormatterTest("%v", nv2, "") + addFormatterTest("%+v", v2, v2s) + addFormatterTest("%+v", pv2, "<*>("+v2Addr+")"+v2s) + addFormatterTest("%+v", &pv2, "<**>("+pv2Addr+"->"+v2Addr+")"+v2s) + addFormatterTest("%+v", nv2, "") + addFormatterTest("%#v", v2, "("+v2t+")"+v2s) + addFormatterTest("%#v", pv2, "(*"+v2t+")"+v2s) + addFormatterTest("%#v", &pv2, "(**"+v2t+")"+v2s) + addFormatterTest("%#v", nv2, "(*"+v2t+")"+"") + addFormatterTest("%#+v", v2, "("+v2t+")"+v2s) + addFormatterTest("%#+v", pv2, "(*"+v2t+")("+v2Addr+")"+v2s) + addFormatterTest("%#+v", &pv2, "(**"+v2t+")("+pv2Addr+"->"+v2Addr+")"+v2s) + addFormatterTest("%#+v", nv2, "(*"+v2t+")"+"") + + // Max uint32. + v3 := uint32(4294967295) + nv3 := (*uint32)(nil) + pv3 := &v3 + v3Addr := fmt.Sprintf("%p", pv3) + pv3Addr := fmt.Sprintf("%p", &pv3) + v3t := "uint32" + v3s := "4294967295" + addFormatterTest("%v", v3, v3s) + addFormatterTest("%v", pv3, "<*>"+v3s) + addFormatterTest("%v", &pv3, "<**>"+v3s) + addFormatterTest("%v", nv3, "") + addFormatterTest("%+v", v3, v3s) + addFormatterTest("%+v", pv3, "<*>("+v3Addr+")"+v3s) + addFormatterTest("%+v", &pv3, "<**>("+pv3Addr+"->"+v3Addr+")"+v3s) + addFormatterTest("%+v", nv3, "") + addFormatterTest("%#v", v3, "("+v3t+")"+v3s) + addFormatterTest("%#v", pv3, "(*"+v3t+")"+v3s) + addFormatterTest("%#v", &pv3, "(**"+v3t+")"+v3s) + addFormatterTest("%#v", nv3, "(*"+v3t+")"+"") + addFormatterTest("%#+v", v3, "("+v3t+")"+v3s) + addFormatterTest("%#+v", pv3, "(*"+v3t+")("+v3Addr+")"+v3s) + addFormatterTest("%#+v", &pv3, "(**"+v3t+")("+pv3Addr+"->"+v3Addr+")"+v3s) + addFormatterTest("%#v", nv3, "(*"+v3t+")"+"") + + // Max uint64. + v4 := uint64(18446744073709551615) + nv4 := (*uint64)(nil) + pv4 := &v4 + v4Addr := fmt.Sprintf("%p", pv4) + pv4Addr := fmt.Sprintf("%p", &pv4) + v4t := "uint64" + v4s := "18446744073709551615" + addFormatterTest("%v", v4, v4s) + addFormatterTest("%v", pv4, "<*>"+v4s) + addFormatterTest("%v", &pv4, "<**>"+v4s) + addFormatterTest("%v", nv4, "") + addFormatterTest("%+v", v4, v4s) + addFormatterTest("%+v", pv4, "<*>("+v4Addr+")"+v4s) + addFormatterTest("%+v", &pv4, "<**>("+pv4Addr+"->"+v4Addr+")"+v4s) + addFormatterTest("%+v", nv4, "") + addFormatterTest("%#v", v4, "("+v4t+")"+v4s) + addFormatterTest("%#v", pv4, "(*"+v4t+")"+v4s) + addFormatterTest("%#v", &pv4, "(**"+v4t+")"+v4s) + addFormatterTest("%#v", nv4, "(*"+v4t+")"+"") + addFormatterTest("%#+v", v4, "("+v4t+")"+v4s) + addFormatterTest("%#+v", pv4, "(*"+v4t+")("+v4Addr+")"+v4s) + addFormatterTest("%#+v", &pv4, "(**"+v4t+")("+pv4Addr+"->"+v4Addr+")"+v4s) + addFormatterTest("%#+v", nv4, "(*"+v4t+")"+"") + + // Max uint. + v5 := uint(4294967295) + nv5 := (*uint)(nil) + pv5 := &v5 + v5Addr := fmt.Sprintf("%p", pv5) + pv5Addr := fmt.Sprintf("%p", &pv5) + v5t := "uint" + v5s := "4294967295" + addFormatterTest("%v", v5, v5s) + addFormatterTest("%v", pv5, "<*>"+v5s) + addFormatterTest("%v", &pv5, "<**>"+v5s) + addFormatterTest("%v", nv5, "") + addFormatterTest("%+v", v5, v5s) + addFormatterTest("%+v", pv5, "<*>("+v5Addr+")"+v5s) + addFormatterTest("%+v", &pv5, "<**>("+pv5Addr+"->"+v5Addr+")"+v5s) + addFormatterTest("%+v", nv5, "") + addFormatterTest("%#v", v5, "("+v5t+")"+v5s) + addFormatterTest("%#v", pv5, "(*"+v5t+")"+v5s) + addFormatterTest("%#v", &pv5, "(**"+v5t+")"+v5s) + addFormatterTest("%#v", nv5, "(*"+v5t+")"+"") + addFormatterTest("%#+v", v5, "("+v5t+")"+v5s) + addFormatterTest("%#+v", pv5, "(*"+v5t+")("+v5Addr+")"+v5s) + addFormatterTest("%#+v", &pv5, "(**"+v5t+")("+pv5Addr+"->"+v5Addr+")"+v5s) + addFormatterTest("%#v", nv5, "(*"+v5t+")"+"") +} + +func addBoolFormatterTests() { + // Boolean true. + v := bool(true) + nv := (*bool)(nil) + pv := &v + vAddr := fmt.Sprintf("%p", pv) + pvAddr := fmt.Sprintf("%p", &pv) + vt := "bool" + vs := "true" + addFormatterTest("%v", v, vs) + addFormatterTest("%v", pv, "<*>"+vs) + addFormatterTest("%v", &pv, "<**>"+vs) + addFormatterTest("%v", nv, "") + addFormatterTest("%+v", v, vs) + addFormatterTest("%+v", pv, "<*>("+vAddr+")"+vs) + addFormatterTest("%+v", &pv, "<**>("+pvAddr+"->"+vAddr+")"+vs) + addFormatterTest("%+v", nv, "") + addFormatterTest("%#v", v, "("+vt+")"+vs) + addFormatterTest("%#v", pv, "(*"+vt+")"+vs) + addFormatterTest("%#v", &pv, "(**"+vt+")"+vs) + addFormatterTest("%#v", nv, "(*"+vt+")"+"") + addFormatterTest("%#+v", v, "("+vt+")"+vs) + addFormatterTest("%#+v", pv, "(*"+vt+")("+vAddr+")"+vs) + addFormatterTest("%#+v", &pv, "(**"+vt+")("+pvAddr+"->"+vAddr+")"+vs) + addFormatterTest("%#+v", nv, "(*"+vt+")"+"") + + // Boolean false. + v2 := bool(false) + pv2 := &v2 + v2Addr := fmt.Sprintf("%p", pv2) + pv2Addr := fmt.Sprintf("%p", &pv2) + v2t := "bool" + v2s := "false" + addFormatterTest("%v", v2, v2s) + addFormatterTest("%v", pv2, "<*>"+v2s) + addFormatterTest("%v", &pv2, "<**>"+v2s) + addFormatterTest("%+v", v2, v2s) + addFormatterTest("%+v", pv2, "<*>("+v2Addr+")"+v2s) + addFormatterTest("%+v", &pv2, "<**>("+pv2Addr+"->"+v2Addr+")"+v2s) + addFormatterTest("%#v", v2, "("+v2t+")"+v2s) + addFormatterTest("%#v", pv2, "(*"+v2t+")"+v2s) + addFormatterTest("%#v", &pv2, "(**"+v2t+")"+v2s) + addFormatterTest("%#+v", v2, "("+v2t+")"+v2s) + addFormatterTest("%#+v", pv2, "(*"+v2t+")("+v2Addr+")"+v2s) + addFormatterTest("%#+v", &pv2, "(**"+v2t+")("+pv2Addr+"->"+v2Addr+")"+v2s) +} + +func addFloatFormatterTests() { + // Standard float32. + v := float32(3.1415) + nv := (*float32)(nil) + pv := &v + vAddr := fmt.Sprintf("%p", pv) + pvAddr := fmt.Sprintf("%p", &pv) + vt := "float32" + vs := "3.1415" + addFormatterTest("%v", v, vs) + addFormatterTest("%v", pv, "<*>"+vs) + addFormatterTest("%v", &pv, "<**>"+vs) + addFormatterTest("%v", nv, "") + addFormatterTest("%+v", v, vs) + addFormatterTest("%+v", pv, "<*>("+vAddr+")"+vs) + addFormatterTest("%+v", &pv, "<**>("+pvAddr+"->"+vAddr+")"+vs) + addFormatterTest("%+v", nv, "") + addFormatterTest("%#v", v, "("+vt+")"+vs) + addFormatterTest("%#v", pv, "(*"+vt+")"+vs) + addFormatterTest("%#v", &pv, "(**"+vt+")"+vs) + addFormatterTest("%#v", nv, "(*"+vt+")"+"") + addFormatterTest("%#+v", v, "("+vt+")"+vs) + addFormatterTest("%#+v", pv, "(*"+vt+")("+vAddr+")"+vs) + addFormatterTest("%#+v", &pv, "(**"+vt+")("+pvAddr+"->"+vAddr+")"+vs) + addFormatterTest("%#+v", nv, "(*"+vt+")"+"") + + // Standard float64. + v2 := float64(3.1415926) + nv2 := (*float64)(nil) + pv2 := &v2 + v2Addr := fmt.Sprintf("%p", pv2) + pv2Addr := fmt.Sprintf("%p", &pv2) + v2t := "float64" + v2s := "3.1415926" + addFormatterTest("%v", v2, v2s) + addFormatterTest("%v", pv2, "<*>"+v2s) + addFormatterTest("%v", &pv2, "<**>"+v2s) + addFormatterTest("%+v", nv2, "") + addFormatterTest("%+v", v2, v2s) + addFormatterTest("%+v", pv2, "<*>("+v2Addr+")"+v2s) + addFormatterTest("%+v", &pv2, "<**>("+pv2Addr+"->"+v2Addr+")"+v2s) + addFormatterTest("%+v", nv2, "") + addFormatterTest("%#v", v2, "("+v2t+")"+v2s) + addFormatterTest("%#v", pv2, "(*"+v2t+")"+v2s) + addFormatterTest("%#v", &pv2, "(**"+v2t+")"+v2s) + addFormatterTest("%#v", nv2, "(*"+v2t+")"+"") + addFormatterTest("%#+v", v2, "("+v2t+")"+v2s) + addFormatterTest("%#+v", pv2, "(*"+v2t+")("+v2Addr+")"+v2s) + addFormatterTest("%#+v", &pv2, "(**"+v2t+")("+pv2Addr+"->"+v2Addr+")"+v2s) + addFormatterTest("%#+v", nv2, "(*"+v2t+")"+"") +} + +func addComplexFormatterTests() { + // Standard complex64. + v := complex(float32(6), -2) + nv := (*complex64)(nil) + pv := &v + vAddr := fmt.Sprintf("%p", pv) + pvAddr := fmt.Sprintf("%p", &pv) + vt := "complex64" + vs := "(6-2i)" + addFormatterTest("%v", v, vs) + addFormatterTest("%v", pv, "<*>"+vs) + addFormatterTest("%v", &pv, "<**>"+vs) + addFormatterTest("%+v", nv, "") + addFormatterTest("%+v", v, vs) + addFormatterTest("%+v", pv, "<*>("+vAddr+")"+vs) + addFormatterTest("%+v", &pv, "<**>("+pvAddr+"->"+vAddr+")"+vs) + addFormatterTest("%+v", nv, "") + addFormatterTest("%#v", v, "("+vt+")"+vs) + addFormatterTest("%#v", pv, "(*"+vt+")"+vs) + addFormatterTest("%#v", &pv, "(**"+vt+")"+vs) + addFormatterTest("%#v", nv, "(*"+vt+")"+"") + addFormatterTest("%#+v", v, "("+vt+")"+vs) + addFormatterTest("%#+v", pv, "(*"+vt+")("+vAddr+")"+vs) + addFormatterTest("%#+v", &pv, "(**"+vt+")("+pvAddr+"->"+vAddr+")"+vs) + addFormatterTest("%#+v", nv, "(*"+vt+")"+"") + + // Standard complex128. + v2 := complex(float64(-6), 2) + nv2 := (*complex128)(nil) + pv2 := &v2 + v2Addr := fmt.Sprintf("%p", pv2) + pv2Addr := fmt.Sprintf("%p", &pv2) + v2t := "complex128" + v2s := "(-6+2i)" + addFormatterTest("%v", v2, v2s) + addFormatterTest("%v", pv2, "<*>"+v2s) + addFormatterTest("%v", &pv2, "<**>"+v2s) + addFormatterTest("%+v", nv2, "") + addFormatterTest("%+v", v2, v2s) + addFormatterTest("%+v", pv2, "<*>("+v2Addr+")"+v2s) + addFormatterTest("%+v", &pv2, "<**>("+pv2Addr+"->"+v2Addr+")"+v2s) + addFormatterTest("%+v", nv2, "") + addFormatterTest("%#v", v2, "("+v2t+")"+v2s) + addFormatterTest("%#v", pv2, "(*"+v2t+")"+v2s) + addFormatterTest("%#v", &pv2, "(**"+v2t+")"+v2s) + addFormatterTest("%#v", nv2, "(*"+v2t+")"+"") + addFormatterTest("%#+v", v2, "("+v2t+")"+v2s) + addFormatterTest("%#+v", pv2, "(*"+v2t+")("+v2Addr+")"+v2s) + addFormatterTest("%#+v", &pv2, "(**"+v2t+")("+pv2Addr+"->"+v2Addr+")"+v2s) + addFormatterTest("%#+v", nv2, "(*"+v2t+")"+"") +} + +func addArrayFormatterTests() { + // Array containing standard ints. + v := [3]int{1, 2, 3} + nv := (*[3]int)(nil) + pv := &v + vAddr := fmt.Sprintf("%p", pv) + pvAddr := fmt.Sprintf("%p", &pv) + vt := "[3]int" + vs := "[1 2 3]" + addFormatterTest("%v", v, vs) + addFormatterTest("%v", pv, "<*>"+vs) + addFormatterTest("%v", &pv, "<**>"+vs) + addFormatterTest("%+v", nv, "") + addFormatterTest("%+v", v, vs) + addFormatterTest("%+v", pv, "<*>("+vAddr+")"+vs) + addFormatterTest("%+v", &pv, "<**>("+pvAddr+"->"+vAddr+")"+vs) + addFormatterTest("%+v", nv, "") + addFormatterTest("%#v", v, "("+vt+")"+vs) + addFormatterTest("%#v", pv, "(*"+vt+")"+vs) + addFormatterTest("%#v", &pv, "(**"+vt+")"+vs) + addFormatterTest("%#v", nv, "(*"+vt+")"+"") + addFormatterTest("%#+v", v, "("+vt+")"+vs) + addFormatterTest("%#+v", pv, "(*"+vt+")("+vAddr+")"+vs) + addFormatterTest("%#+v", &pv, "(**"+vt+")("+pvAddr+"->"+vAddr+")"+vs) + addFormatterTest("%#+v", nv, "(*"+vt+")"+"") + + // Array containing type with custom formatter on pointer receiver only. + v2 := [3]pstringer{"1", "2", "3"} + nv2 := (*[3]pstringer)(nil) + pv2 := &v2 + v2Addr := fmt.Sprintf("%p", pv2) + pv2Addr := fmt.Sprintf("%p", &pv2) + v2t := "[3]spew_test.pstringer" + v2sp := "[stringer 1 stringer 2 stringer 3]" + v2s := v2sp + if spew.UnsafeDisabled { + v2s = "[1 2 3]" + } + addFormatterTest("%v", v2, v2s) + addFormatterTest("%v", pv2, "<*>"+v2sp) + addFormatterTest("%v", &pv2, "<**>"+v2sp) + addFormatterTest("%+v", nv2, "") + addFormatterTest("%+v", v2, v2s) + addFormatterTest("%+v", pv2, "<*>("+v2Addr+")"+v2sp) + addFormatterTest("%+v", &pv2, "<**>("+pv2Addr+"->"+v2Addr+")"+v2sp) + addFormatterTest("%+v", nv2, "") + addFormatterTest("%#v", v2, "("+v2t+")"+v2s) + addFormatterTest("%#v", pv2, "(*"+v2t+")"+v2sp) + addFormatterTest("%#v", &pv2, "(**"+v2t+")"+v2sp) + addFormatterTest("%#v", nv2, "(*"+v2t+")"+"") + addFormatterTest("%#+v", v2, "("+v2t+")"+v2s) + addFormatterTest("%#+v", pv2, "(*"+v2t+")("+v2Addr+")"+v2sp) + addFormatterTest("%#+v", &pv2, "(**"+v2t+")("+pv2Addr+"->"+v2Addr+")"+v2sp) + addFormatterTest("%#+v", nv2, "(*"+v2t+")"+"") + + // Array containing interfaces. + v3 := [3]interface{}{"one", int(2), uint(3)} + nv3 := (*[3]interface{})(nil) + pv3 := &v3 + v3Addr := fmt.Sprintf("%p", pv3) + pv3Addr := fmt.Sprintf("%p", &pv3) + v3t := "[3]interface {}" + v3t2 := "string" + v3t3 := "int" + v3t4 := "uint" + v3s := "[one 2 3]" + v3s2 := "[(" + v3t2 + ")one (" + v3t3 + ")2 (" + v3t4 + ")3]" + addFormatterTest("%v", v3, v3s) + addFormatterTest("%v", pv3, "<*>"+v3s) + addFormatterTest("%v", &pv3, "<**>"+v3s) + addFormatterTest("%+v", nv3, "") + addFormatterTest("%+v", v3, v3s) + addFormatterTest("%+v", pv3, "<*>("+v3Addr+")"+v3s) + addFormatterTest("%+v", &pv3, "<**>("+pv3Addr+"->"+v3Addr+")"+v3s) + addFormatterTest("%+v", nv3, "") + addFormatterTest("%#v", v3, "("+v3t+")"+v3s2) + addFormatterTest("%#v", pv3, "(*"+v3t+")"+v3s2) + addFormatterTest("%#v", &pv3, "(**"+v3t+")"+v3s2) + addFormatterTest("%#v", nv3, "(*"+v3t+")"+"") + addFormatterTest("%#+v", v3, "("+v3t+")"+v3s2) + addFormatterTest("%#+v", pv3, "(*"+v3t+")("+v3Addr+")"+v3s2) + addFormatterTest("%#+v", &pv3, "(**"+v3t+")("+pv3Addr+"->"+v3Addr+")"+v3s2) + addFormatterTest("%#+v", nv3, "(*"+v3t+")"+"") +} + +func addSliceFormatterTests() { + // Slice containing standard float32 values. + v := []float32{3.14, 6.28, 12.56} + nv := (*[]float32)(nil) + pv := &v + vAddr := fmt.Sprintf("%p", pv) + pvAddr := fmt.Sprintf("%p", &pv) + vt := "[]float32" + vs := "[3.14 6.28 12.56]" + addFormatterTest("%v", v, vs) + addFormatterTest("%v", pv, "<*>"+vs) + addFormatterTest("%v", &pv, "<**>"+vs) + addFormatterTest("%+v", nv, "") + addFormatterTest("%+v", v, vs) + addFormatterTest("%+v", pv, "<*>("+vAddr+")"+vs) + addFormatterTest("%+v", &pv, "<**>("+pvAddr+"->"+vAddr+")"+vs) + addFormatterTest("%+v", nv, "") + addFormatterTest("%#v", v, "("+vt+")"+vs) + addFormatterTest("%#v", pv, "(*"+vt+")"+vs) + addFormatterTest("%#v", &pv, "(**"+vt+")"+vs) + addFormatterTest("%#v", nv, "(*"+vt+")"+"") + addFormatterTest("%#+v", v, "("+vt+")"+vs) + addFormatterTest("%#+v", pv, "(*"+vt+")("+vAddr+")"+vs) + addFormatterTest("%#+v", &pv, "(**"+vt+")("+pvAddr+"->"+vAddr+")"+vs) + addFormatterTest("%#+v", nv, "(*"+vt+")"+"") + + // Slice containing type with custom formatter on pointer receiver only. + v2 := []pstringer{"1", "2", "3"} + nv2 := (*[]pstringer)(nil) + pv2 := &v2 + v2Addr := fmt.Sprintf("%p", pv2) + pv2Addr := fmt.Sprintf("%p", &pv2) + v2t := "[]spew_test.pstringer" + v2s := "[stringer 1 stringer 2 stringer 3]" + addFormatterTest("%v", v2, v2s) + addFormatterTest("%v", pv2, "<*>"+v2s) + addFormatterTest("%v", &pv2, "<**>"+v2s) + addFormatterTest("%+v", nv2, "") + addFormatterTest("%+v", v2, v2s) + addFormatterTest("%+v", pv2, "<*>("+v2Addr+")"+v2s) + addFormatterTest("%+v", &pv2, "<**>("+pv2Addr+"->"+v2Addr+")"+v2s) + addFormatterTest("%+v", nv2, "") + addFormatterTest("%#v", v2, "("+v2t+")"+v2s) + addFormatterTest("%#v", pv2, "(*"+v2t+")"+v2s) + addFormatterTest("%#v", &pv2, "(**"+v2t+")"+v2s) + addFormatterTest("%#v", nv2, "(*"+v2t+")"+"") + addFormatterTest("%#+v", v2, "("+v2t+")"+v2s) + addFormatterTest("%#+v", pv2, "(*"+v2t+")("+v2Addr+")"+v2s) + addFormatterTest("%#+v", &pv2, "(**"+v2t+")("+pv2Addr+"->"+v2Addr+")"+v2s) + addFormatterTest("%#+v", nv2, "(*"+v2t+")"+"") + + // Slice containing interfaces. + v3 := []interface{}{"one", int(2), uint(3), nil} + nv3 := (*[]interface{})(nil) + pv3 := &v3 + v3Addr := fmt.Sprintf("%p", pv3) + pv3Addr := fmt.Sprintf("%p", &pv3) + v3t := "[]interface {}" + v3t2 := "string" + v3t3 := "int" + v3t4 := "uint" + v3t5 := "interface {}" + v3s := "[one 2 3 ]" + v3s2 := "[(" + v3t2 + ")one (" + v3t3 + ")2 (" + v3t4 + ")3 (" + v3t5 + + ")]" + addFormatterTest("%v", v3, v3s) + addFormatterTest("%v", pv3, "<*>"+v3s) + addFormatterTest("%v", &pv3, "<**>"+v3s) + addFormatterTest("%+v", nv3, "") + addFormatterTest("%+v", v3, v3s) + addFormatterTest("%+v", pv3, "<*>("+v3Addr+")"+v3s) + addFormatterTest("%+v", &pv3, "<**>("+pv3Addr+"->"+v3Addr+")"+v3s) + addFormatterTest("%+v", nv3, "") + addFormatterTest("%#v", v3, "("+v3t+")"+v3s2) + addFormatterTest("%#v", pv3, "(*"+v3t+")"+v3s2) + addFormatterTest("%#v", &pv3, "(**"+v3t+")"+v3s2) + addFormatterTest("%#v", nv3, "(*"+v3t+")"+"") + addFormatterTest("%#+v", v3, "("+v3t+")"+v3s2) + addFormatterTest("%#+v", pv3, "(*"+v3t+")("+v3Addr+")"+v3s2) + addFormatterTest("%#+v", &pv3, "(**"+v3t+")("+pv3Addr+"->"+v3Addr+")"+v3s2) + addFormatterTest("%#+v", nv3, "(*"+v3t+")"+"") + + // Nil slice. + var v4 []int + nv4 := (*[]int)(nil) + pv4 := &v4 + v4Addr := fmt.Sprintf("%p", pv4) + pv4Addr := fmt.Sprintf("%p", &pv4) + v4t := "[]int" + v4s := "" + addFormatterTest("%v", v4, v4s) + addFormatterTest("%v", pv4, "<*>"+v4s) + addFormatterTest("%v", &pv4, "<**>"+v4s) + addFormatterTest("%+v", nv4, "") + addFormatterTest("%+v", v4, v4s) + addFormatterTest("%+v", pv4, "<*>("+v4Addr+")"+v4s) + addFormatterTest("%+v", &pv4, "<**>("+pv4Addr+"->"+v4Addr+")"+v4s) + addFormatterTest("%+v", nv4, "") + addFormatterTest("%#v", v4, "("+v4t+")"+v4s) + addFormatterTest("%#v", pv4, "(*"+v4t+")"+v4s) + addFormatterTest("%#v", &pv4, "(**"+v4t+")"+v4s) + addFormatterTest("%#v", nv4, "(*"+v4t+")"+"") + addFormatterTest("%#+v", v4, "("+v4t+")"+v4s) + addFormatterTest("%#+v", pv4, "(*"+v4t+")("+v4Addr+")"+v4s) + addFormatterTest("%#+v", &pv4, "(**"+v4t+")("+pv4Addr+"->"+v4Addr+")"+v4s) + addFormatterTest("%#+v", nv4, "(*"+v4t+")"+"") +} + +func addStringFormatterTests() { + // Standard string. + v := "test" + nv := (*string)(nil) + pv := &v + vAddr := fmt.Sprintf("%p", pv) + pvAddr := fmt.Sprintf("%p", &pv) + vt := "string" + vs := "test" + addFormatterTest("%v", v, vs) + addFormatterTest("%v", pv, "<*>"+vs) + addFormatterTest("%v", &pv, "<**>"+vs) + addFormatterTest("%+v", nv, "") + addFormatterTest("%+v", v, vs) + addFormatterTest("%+v", pv, "<*>("+vAddr+")"+vs) + addFormatterTest("%+v", &pv, "<**>("+pvAddr+"->"+vAddr+")"+vs) + addFormatterTest("%+v", nv, "") + addFormatterTest("%#v", v, "("+vt+")"+vs) + addFormatterTest("%#v", pv, "(*"+vt+")"+vs) + addFormatterTest("%#v", &pv, "(**"+vt+")"+vs) + addFormatterTest("%#v", nv, "(*"+vt+")"+"") + addFormatterTest("%#+v", v, "("+vt+")"+vs) + addFormatterTest("%#+v", pv, "(*"+vt+")("+vAddr+")"+vs) + addFormatterTest("%#+v", &pv, "(**"+vt+")("+pvAddr+"->"+vAddr+")"+vs) + addFormatterTest("%#+v", nv, "(*"+vt+")"+"") +} + +func addInterfaceFormatterTests() { + // Nil interface. + var v interface{} + nv := (*interface{})(nil) + pv := &v + vAddr := fmt.Sprintf("%p", pv) + pvAddr := fmt.Sprintf("%p", &pv) + vt := "interface {}" + vs := "" + addFormatterTest("%v", v, vs) + addFormatterTest("%v", pv, "<*>"+vs) + addFormatterTest("%v", &pv, "<**>"+vs) + addFormatterTest("%+v", nv, "") + addFormatterTest("%+v", v, vs) + addFormatterTest("%+v", pv, "<*>("+vAddr+")"+vs) + addFormatterTest("%+v", &pv, "<**>("+pvAddr+"->"+vAddr+")"+vs) + addFormatterTest("%+v", nv, "") + addFormatterTest("%#v", v, "("+vt+")"+vs) + addFormatterTest("%#v", pv, "(*"+vt+")"+vs) + addFormatterTest("%#v", &pv, "(**"+vt+")"+vs) + addFormatterTest("%#v", nv, "(*"+vt+")"+"") + addFormatterTest("%#+v", v, "("+vt+")"+vs) + addFormatterTest("%#+v", pv, "(*"+vt+")("+vAddr+")"+vs) + addFormatterTest("%#+v", &pv, "(**"+vt+")("+pvAddr+"->"+vAddr+")"+vs) + addFormatterTest("%#+v", nv, "(*"+vt+")"+"") + + // Sub-interface. + v2 := interface{}(uint16(65535)) + pv2 := &v2 + v2Addr := fmt.Sprintf("%p", pv2) + pv2Addr := fmt.Sprintf("%p", &pv2) + v2t := "uint16" + v2s := "65535" + addFormatterTest("%v", v2, v2s) + addFormatterTest("%v", pv2, "<*>"+v2s) + addFormatterTest("%v", &pv2, "<**>"+v2s) + addFormatterTest("%+v", v2, v2s) + addFormatterTest("%+v", pv2, "<*>("+v2Addr+")"+v2s) + addFormatterTest("%+v", &pv2, "<**>("+pv2Addr+"->"+v2Addr+")"+v2s) + addFormatterTest("%#v", v2, "("+v2t+")"+v2s) + addFormatterTest("%#v", pv2, "(*"+v2t+")"+v2s) + addFormatterTest("%#v", &pv2, "(**"+v2t+")"+v2s) + addFormatterTest("%#+v", v2, "("+v2t+")"+v2s) + addFormatterTest("%#+v", pv2, "(*"+v2t+")("+v2Addr+")"+v2s) + addFormatterTest("%#+v", &pv2, "(**"+v2t+")("+pv2Addr+"->"+v2Addr+")"+v2s) +} + +func addMapFormatterTests() { + // Map with string keys and int vals. + v := map[string]int{"one": 1, "two": 2} + nilMap := map[string]int(nil) + nv := (*map[string]int)(nil) + pv := &v + vAddr := fmt.Sprintf("%p", pv) + pvAddr := fmt.Sprintf("%p", &pv) + vt := "map[string]int" + vs := "map[one:1 two:2]" + vs2 := "map[two:2 one:1]" + addFormatterTest("%v", v, vs, vs2) + addFormatterTest("%v", pv, "<*>"+vs, "<*>"+vs2) + addFormatterTest("%v", &pv, "<**>"+vs, "<**>"+vs2) + addFormatterTest("%+v", nilMap, "") + addFormatterTest("%+v", nv, "") + addFormatterTest("%+v", v, vs, vs2) + addFormatterTest("%+v", pv, "<*>("+vAddr+")"+vs, "<*>("+vAddr+")"+vs2) + addFormatterTest("%+v", &pv, "<**>("+pvAddr+"->"+vAddr+")"+vs, + "<**>("+pvAddr+"->"+vAddr+")"+vs2) + addFormatterTest("%+v", nilMap, "") + addFormatterTest("%+v", nv, "") + addFormatterTest("%#v", v, "("+vt+")"+vs, "("+vt+")"+vs2) + addFormatterTest("%#v", pv, "(*"+vt+")"+vs, "(*"+vt+")"+vs2) + addFormatterTest("%#v", &pv, "(**"+vt+")"+vs, "(**"+vt+")"+vs2) + addFormatterTest("%#v", nilMap, "("+vt+")"+"") + addFormatterTest("%#v", nv, "(*"+vt+")"+"") + addFormatterTest("%#+v", v, "("+vt+")"+vs, "("+vt+")"+vs2) + addFormatterTest("%#+v", pv, "(*"+vt+")("+vAddr+")"+vs, + "(*"+vt+")("+vAddr+")"+vs2) + addFormatterTest("%#+v", &pv, "(**"+vt+")("+pvAddr+"->"+vAddr+")"+vs, + "(**"+vt+")("+pvAddr+"->"+vAddr+")"+vs2) + addFormatterTest("%#+v", nilMap, "("+vt+")"+"") + addFormatterTest("%#+v", nv, "(*"+vt+")"+"") + + // Map with custom formatter type on pointer receiver only keys and vals. + v2 := map[pstringer]pstringer{"one": "1"} + nv2 := (*map[pstringer]pstringer)(nil) + pv2 := &v2 + v2Addr := fmt.Sprintf("%p", pv2) + pv2Addr := fmt.Sprintf("%p", &pv2) + v2t := "map[spew_test.pstringer]spew_test.pstringer" + v2s := "map[stringer one:stringer 1]" + if spew.UnsafeDisabled { + v2s = "map[one:1]" + } + addFormatterTest("%v", v2, v2s) + addFormatterTest("%v", pv2, "<*>"+v2s) + addFormatterTest("%v", &pv2, "<**>"+v2s) + addFormatterTest("%+v", nv2, "") + addFormatterTest("%+v", v2, v2s) + addFormatterTest("%+v", pv2, "<*>("+v2Addr+")"+v2s) + addFormatterTest("%+v", &pv2, "<**>("+pv2Addr+"->"+v2Addr+")"+v2s) + addFormatterTest("%+v", nv2, "") + addFormatterTest("%#v", v2, "("+v2t+")"+v2s) + addFormatterTest("%#v", pv2, "(*"+v2t+")"+v2s) + addFormatterTest("%#v", &pv2, "(**"+v2t+")"+v2s) + addFormatterTest("%#v", nv2, "(*"+v2t+")"+"") + addFormatterTest("%#+v", v2, "("+v2t+")"+v2s) + addFormatterTest("%#+v", pv2, "(*"+v2t+")("+v2Addr+")"+v2s) + addFormatterTest("%#+v", &pv2, "(**"+v2t+")("+pv2Addr+"->"+v2Addr+")"+v2s) + addFormatterTest("%#+v", nv2, "(*"+v2t+")"+"") + + // Map with interface keys and values. + v3 := map[interface{}]interface{}{"one": 1} + nv3 := (*map[interface{}]interface{})(nil) + pv3 := &v3 + v3Addr := fmt.Sprintf("%p", pv3) + pv3Addr := fmt.Sprintf("%p", &pv3) + v3t := "map[interface {}]interface {}" + v3t1 := "string" + v3t2 := "int" + v3s := "map[one:1]" + v3s2 := "map[(" + v3t1 + ")one:(" + v3t2 + ")1]" + addFormatterTest("%v", v3, v3s) + addFormatterTest("%v", pv3, "<*>"+v3s) + addFormatterTest("%v", &pv3, "<**>"+v3s) + addFormatterTest("%+v", nv3, "") + addFormatterTest("%+v", v3, v3s) + addFormatterTest("%+v", pv3, "<*>("+v3Addr+")"+v3s) + addFormatterTest("%+v", &pv3, "<**>("+pv3Addr+"->"+v3Addr+")"+v3s) + addFormatterTest("%+v", nv3, "") + addFormatterTest("%#v", v3, "("+v3t+")"+v3s2) + addFormatterTest("%#v", pv3, "(*"+v3t+")"+v3s2) + addFormatterTest("%#v", &pv3, "(**"+v3t+")"+v3s2) + addFormatterTest("%#v", nv3, "(*"+v3t+")"+"") + addFormatterTest("%#+v", v3, "("+v3t+")"+v3s2) + addFormatterTest("%#+v", pv3, "(*"+v3t+")("+v3Addr+")"+v3s2) + addFormatterTest("%#+v", &pv3, "(**"+v3t+")("+pv3Addr+"->"+v3Addr+")"+v3s2) + addFormatterTest("%#+v", nv3, "(*"+v3t+")"+"") + + // Map with nil interface value + v4 := map[string]interface{}{"nil": nil} + nv4 := (*map[string]interface{})(nil) + pv4 := &v4 + v4Addr := fmt.Sprintf("%p", pv4) + pv4Addr := fmt.Sprintf("%p", &pv4) + v4t := "map[string]interface {}" + v4t1 := "interface {}" + v4s := "map[nil:]" + v4s2 := "map[nil:(" + v4t1 + ")]" + addFormatterTest("%v", v4, v4s) + addFormatterTest("%v", pv4, "<*>"+v4s) + addFormatterTest("%v", &pv4, "<**>"+v4s) + addFormatterTest("%+v", nv4, "") + addFormatterTest("%+v", v4, v4s) + addFormatterTest("%+v", pv4, "<*>("+v4Addr+")"+v4s) + addFormatterTest("%+v", &pv4, "<**>("+pv4Addr+"->"+v4Addr+")"+v4s) + addFormatterTest("%+v", nv4, "") + addFormatterTest("%#v", v4, "("+v4t+")"+v4s2) + addFormatterTest("%#v", pv4, "(*"+v4t+")"+v4s2) + addFormatterTest("%#v", &pv4, "(**"+v4t+")"+v4s2) + addFormatterTest("%#v", nv4, "(*"+v4t+")"+"") + addFormatterTest("%#+v", v4, "("+v4t+")"+v4s2) + addFormatterTest("%#+v", pv4, "(*"+v4t+")("+v4Addr+")"+v4s2) + addFormatterTest("%#+v", &pv4, "(**"+v4t+")("+pv4Addr+"->"+v4Addr+")"+v4s2) + addFormatterTest("%#+v", nv4, "(*"+v4t+")"+"") +} + +func addStructFormatterTests() { + // Struct with primitives. + type s1 struct { + a int8 + b uint8 + } + v := s1{127, 255} + nv := (*s1)(nil) + pv := &v + vAddr := fmt.Sprintf("%p", pv) + pvAddr := fmt.Sprintf("%p", &pv) + vt := "spew_test.s1" + vt2 := "int8" + vt3 := "uint8" + vs := "{127 255}" + vs2 := "{a:127 b:255}" + vs3 := "{a:(" + vt2 + ")127 b:(" + vt3 + ")255}" + addFormatterTest("%v", v, vs) + addFormatterTest("%v", pv, "<*>"+vs) + addFormatterTest("%v", &pv, "<**>"+vs) + addFormatterTest("%+v", nv, "") + addFormatterTest("%+v", v, vs2) + addFormatterTest("%+v", pv, "<*>("+vAddr+")"+vs2) + addFormatterTest("%+v", &pv, "<**>("+pvAddr+"->"+vAddr+")"+vs2) + addFormatterTest("%+v", nv, "") + addFormatterTest("%#v", v, "("+vt+")"+vs3) + addFormatterTest("%#v", pv, "(*"+vt+")"+vs3) + addFormatterTest("%#v", &pv, "(**"+vt+")"+vs3) + addFormatterTest("%#v", nv, "(*"+vt+")"+"") + addFormatterTest("%#+v", v, "("+vt+")"+vs3) + addFormatterTest("%#+v", pv, "(*"+vt+")("+vAddr+")"+vs3) + addFormatterTest("%#+v", &pv, "(**"+vt+")("+pvAddr+"->"+vAddr+")"+vs3) + addFormatterTest("%#+v", nv, "(*"+vt+")"+"") + + // Struct that contains another struct. + type s2 struct { + s1 s1 + b bool + } + v2 := s2{s1{127, 255}, true} + nv2 := (*s2)(nil) + pv2 := &v2 + v2Addr := fmt.Sprintf("%p", pv2) + pv2Addr := fmt.Sprintf("%p", &pv2) + v2t := "spew_test.s2" + v2t2 := "spew_test.s1" + v2t3 := "int8" + v2t4 := "uint8" + v2t5 := "bool" + v2s := "{{127 255} true}" + v2s2 := "{s1:{a:127 b:255} b:true}" + v2s3 := "{s1:(" + v2t2 + "){a:(" + v2t3 + ")127 b:(" + v2t4 + ")255} b:(" + + v2t5 + ")true}" + addFormatterTest("%v", v2, v2s) + addFormatterTest("%v", pv2, "<*>"+v2s) + addFormatterTest("%v", &pv2, "<**>"+v2s) + addFormatterTest("%+v", nv2, "") + addFormatterTest("%+v", v2, v2s2) + addFormatterTest("%+v", pv2, "<*>("+v2Addr+")"+v2s2) + addFormatterTest("%+v", &pv2, "<**>("+pv2Addr+"->"+v2Addr+")"+v2s2) + addFormatterTest("%+v", nv2, "") + addFormatterTest("%#v", v2, "("+v2t+")"+v2s3) + addFormatterTest("%#v", pv2, "(*"+v2t+")"+v2s3) + addFormatterTest("%#v", &pv2, "(**"+v2t+")"+v2s3) + addFormatterTest("%#v", nv2, "(*"+v2t+")"+"") + addFormatterTest("%#+v", v2, "("+v2t+")"+v2s3) + addFormatterTest("%#+v", pv2, "(*"+v2t+")("+v2Addr+")"+v2s3) + addFormatterTest("%#+v", &pv2, "(**"+v2t+")("+pv2Addr+"->"+v2Addr+")"+v2s3) + addFormatterTest("%#+v", nv2, "(*"+v2t+")"+"") + + // Struct that contains custom type with Stringer pointer interface via both + // exported and unexported fields. + type s3 struct { + s pstringer + S pstringer + } + v3 := s3{"test", "test2"} + nv3 := (*s3)(nil) + pv3 := &v3 + v3Addr := fmt.Sprintf("%p", pv3) + pv3Addr := fmt.Sprintf("%p", &pv3) + v3t := "spew_test.s3" + v3t2 := "spew_test.pstringer" + v3s := "{stringer test stringer test2}" + v3sp := v3s + v3s2 := "{s:stringer test S:stringer test2}" + v3s2p := v3s2 + v3s3 := "{s:(" + v3t2 + ")stringer test S:(" + v3t2 + ")stringer test2}" + v3s3p := v3s3 + if spew.UnsafeDisabled { + v3s = "{test test2}" + v3sp = "{test stringer test2}" + v3s2 = "{s:test S:test2}" + v3s2p = "{s:test S:stringer test2}" + v3s3 = "{s:(" + v3t2 + ")test S:(" + v3t2 + ")test2}" + v3s3p = "{s:(" + v3t2 + ")test S:(" + v3t2 + ")stringer test2}" + } + addFormatterTest("%v", v3, v3s) + addFormatterTest("%v", pv3, "<*>"+v3sp) + addFormatterTest("%v", &pv3, "<**>"+v3sp) + addFormatterTest("%+v", nv3, "") + addFormatterTest("%+v", v3, v3s2) + addFormatterTest("%+v", pv3, "<*>("+v3Addr+")"+v3s2p) + addFormatterTest("%+v", &pv3, "<**>("+pv3Addr+"->"+v3Addr+")"+v3s2p) + addFormatterTest("%+v", nv3, "") + addFormatterTest("%#v", v3, "("+v3t+")"+v3s3) + addFormatterTest("%#v", pv3, "(*"+v3t+")"+v3s3p) + addFormatterTest("%#v", &pv3, "(**"+v3t+")"+v3s3p) + addFormatterTest("%#v", nv3, "(*"+v3t+")"+"") + addFormatterTest("%#+v", v3, "("+v3t+")"+v3s3) + addFormatterTest("%#+v", pv3, "(*"+v3t+")("+v3Addr+")"+v3s3p) + addFormatterTest("%#+v", &pv3, "(**"+v3t+")("+pv3Addr+"->"+v3Addr+")"+v3s3p) + addFormatterTest("%#+v", nv3, "(*"+v3t+")"+"") + + // Struct that contains embedded struct and field to same struct. + e := embed{"embedstr"} + v4 := embedwrap{embed: &e, e: &e} + nv4 := (*embedwrap)(nil) + pv4 := &v4 + eAddr := fmt.Sprintf("%p", &e) + v4Addr := fmt.Sprintf("%p", pv4) + pv4Addr := fmt.Sprintf("%p", &pv4) + v4t := "spew_test.embedwrap" + v4t2 := "spew_test.embed" + v4t3 := "string" + v4s := "{<*>{embedstr} <*>{embedstr}}" + v4s2 := "{embed:<*>(" + eAddr + "){a:embedstr} e:<*>(" + eAddr + + "){a:embedstr}}" + v4s3 := "{embed:(*" + v4t2 + "){a:(" + v4t3 + ")embedstr} e:(*" + v4t2 + + "){a:(" + v4t3 + ")embedstr}}" + v4s4 := "{embed:(*" + v4t2 + ")(" + eAddr + "){a:(" + v4t3 + + ")embedstr} e:(*" + v4t2 + ")(" + eAddr + "){a:(" + v4t3 + ")embedstr}}" + addFormatterTest("%v", v4, v4s) + addFormatterTest("%v", pv4, "<*>"+v4s) + addFormatterTest("%v", &pv4, "<**>"+v4s) + addFormatterTest("%+v", nv4, "") + addFormatterTest("%+v", v4, v4s2) + addFormatterTest("%+v", pv4, "<*>("+v4Addr+")"+v4s2) + addFormatterTest("%+v", &pv4, "<**>("+pv4Addr+"->"+v4Addr+")"+v4s2) + addFormatterTest("%+v", nv4, "") + addFormatterTest("%#v", v4, "("+v4t+")"+v4s3) + addFormatterTest("%#v", pv4, "(*"+v4t+")"+v4s3) + addFormatterTest("%#v", &pv4, "(**"+v4t+")"+v4s3) + addFormatterTest("%#v", nv4, "(*"+v4t+")"+"") + addFormatterTest("%#+v", v4, "("+v4t+")"+v4s4) + addFormatterTest("%#+v", pv4, "(*"+v4t+")("+v4Addr+")"+v4s4) + addFormatterTest("%#+v", &pv4, "(**"+v4t+")("+pv4Addr+"->"+v4Addr+")"+v4s4) + addFormatterTest("%#+v", nv4, "(*"+v4t+")"+"") +} + +func addUintptrFormatterTests() { + // Null pointer. + v := uintptr(0) + nv := (*uintptr)(nil) + pv := &v + vAddr := fmt.Sprintf("%p", pv) + pvAddr := fmt.Sprintf("%p", &pv) + vt := "uintptr" + vs := "" + addFormatterTest("%v", v, vs) + addFormatterTest("%v", pv, "<*>"+vs) + addFormatterTest("%v", &pv, "<**>"+vs) + addFormatterTest("%+v", nv, "") + addFormatterTest("%+v", v, vs) + addFormatterTest("%+v", pv, "<*>("+vAddr+")"+vs) + addFormatterTest("%+v", &pv, "<**>("+pvAddr+"->"+vAddr+")"+vs) + addFormatterTest("%+v", nv, "") + addFormatterTest("%#v", v, "("+vt+")"+vs) + addFormatterTest("%#v", pv, "(*"+vt+")"+vs) + addFormatterTest("%#v", &pv, "(**"+vt+")"+vs) + addFormatterTest("%#v", nv, "(*"+vt+")"+"") + addFormatterTest("%#+v", v, "("+vt+")"+vs) + addFormatterTest("%#+v", pv, "(*"+vt+")("+vAddr+")"+vs) + addFormatterTest("%#+v", &pv, "(**"+vt+")("+pvAddr+"->"+vAddr+")"+vs) + addFormatterTest("%#+v", nv, "(*"+vt+")"+"") + + // Address of real variable. + i := 1 + v2 := uintptr(unsafe.Pointer(&i)) + pv2 := &v2 + v2Addr := fmt.Sprintf("%p", pv2) + pv2Addr := fmt.Sprintf("%p", &pv2) + v2t := "uintptr" + v2s := fmt.Sprintf("%p", &i) + addFormatterTest("%v", v2, v2s) + addFormatterTest("%v", pv2, "<*>"+v2s) + addFormatterTest("%v", &pv2, "<**>"+v2s) + addFormatterTest("%+v", v2, v2s) + addFormatterTest("%+v", pv2, "<*>("+v2Addr+")"+v2s) + addFormatterTest("%+v", &pv2, "<**>("+pv2Addr+"->"+v2Addr+")"+v2s) + addFormatterTest("%#v", v2, "("+v2t+")"+v2s) + addFormatterTest("%#v", pv2, "(*"+v2t+")"+v2s) + addFormatterTest("%#v", &pv2, "(**"+v2t+")"+v2s) + addFormatterTest("%#+v", v2, "("+v2t+")"+v2s) + addFormatterTest("%#+v", pv2, "(*"+v2t+")("+v2Addr+")"+v2s) + addFormatterTest("%#+v", &pv2, "(**"+v2t+")("+pv2Addr+"->"+v2Addr+")"+v2s) +} + +func addUnsafePointerFormatterTests() { + // Null pointer. + v := unsafe.Pointer(uintptr(0)) + nv := (*unsafe.Pointer)(nil) + pv := &v + vAddr := fmt.Sprintf("%p", pv) + pvAddr := fmt.Sprintf("%p", &pv) + vt := "unsafe.Pointer" + vs := "" + addFormatterTest("%v", v, vs) + addFormatterTest("%v", pv, "<*>"+vs) + addFormatterTest("%v", &pv, "<**>"+vs) + addFormatterTest("%+v", nv, "") + addFormatterTest("%+v", v, vs) + addFormatterTest("%+v", pv, "<*>("+vAddr+")"+vs) + addFormatterTest("%+v", &pv, "<**>("+pvAddr+"->"+vAddr+")"+vs) + addFormatterTest("%+v", nv, "") + addFormatterTest("%#v", v, "("+vt+")"+vs) + addFormatterTest("%#v", pv, "(*"+vt+")"+vs) + addFormatterTest("%#v", &pv, "(**"+vt+")"+vs) + addFormatterTest("%#v", nv, "(*"+vt+")"+"") + addFormatterTest("%#+v", v, "("+vt+")"+vs) + addFormatterTest("%#+v", pv, "(*"+vt+")("+vAddr+")"+vs) + addFormatterTest("%#+v", &pv, "(**"+vt+")("+pvAddr+"->"+vAddr+")"+vs) + addFormatterTest("%#+v", nv, "(*"+vt+")"+"") + + // Address of real variable. + i := 1 + v2 := unsafe.Pointer(&i) + pv2 := &v2 + v2Addr := fmt.Sprintf("%p", pv2) + pv2Addr := fmt.Sprintf("%p", &pv2) + v2t := "unsafe.Pointer" + v2s := fmt.Sprintf("%p", &i) + addFormatterTest("%v", v2, v2s) + addFormatterTest("%v", pv2, "<*>"+v2s) + addFormatterTest("%v", &pv2, "<**>"+v2s) + addFormatterTest("%+v", v2, v2s) + addFormatterTest("%+v", pv2, "<*>("+v2Addr+")"+v2s) + addFormatterTest("%+v", &pv2, "<**>("+pv2Addr+"->"+v2Addr+")"+v2s) + addFormatterTest("%#v", v2, "("+v2t+")"+v2s) + addFormatterTest("%#v", pv2, "(*"+v2t+")"+v2s) + addFormatterTest("%#v", &pv2, "(**"+v2t+")"+v2s) + addFormatterTest("%#+v", v2, "("+v2t+")"+v2s) + addFormatterTest("%#+v", pv2, "(*"+v2t+")("+v2Addr+")"+v2s) + addFormatterTest("%#+v", &pv2, "(**"+v2t+")("+pv2Addr+"->"+v2Addr+")"+v2s) +} + +func addChanFormatterTests() { + // Nil channel. + var v chan int + pv := &v + nv := (*chan int)(nil) + vAddr := fmt.Sprintf("%p", pv) + pvAddr := fmt.Sprintf("%p", &pv) + vt := "chan int" + vs := "" + addFormatterTest("%v", v, vs) + addFormatterTest("%v", pv, "<*>"+vs) + addFormatterTest("%v", &pv, "<**>"+vs) + addFormatterTest("%+v", nv, "") + addFormatterTest("%+v", v, vs) + addFormatterTest("%+v", pv, "<*>("+vAddr+")"+vs) + addFormatterTest("%+v", &pv, "<**>("+pvAddr+"->"+vAddr+")"+vs) + addFormatterTest("%+v", nv, "") + addFormatterTest("%#v", v, "("+vt+")"+vs) + addFormatterTest("%#v", pv, "(*"+vt+")"+vs) + addFormatterTest("%#v", &pv, "(**"+vt+")"+vs) + addFormatterTest("%#v", nv, "(*"+vt+")"+"") + addFormatterTest("%#+v", v, "("+vt+")"+vs) + addFormatterTest("%#+v", pv, "(*"+vt+")("+vAddr+")"+vs) + addFormatterTest("%#+v", &pv, "(**"+vt+")("+pvAddr+"->"+vAddr+")"+vs) + addFormatterTest("%#+v", nv, "(*"+vt+")"+"") + + // Real channel. + v2 := make(chan int) + pv2 := &v2 + v2Addr := fmt.Sprintf("%p", pv2) + pv2Addr := fmt.Sprintf("%p", &pv2) + v2t := "chan int" + v2s := fmt.Sprintf("%p", v2) + addFormatterTest("%v", v2, v2s) + addFormatterTest("%v", pv2, "<*>"+v2s) + addFormatterTest("%v", &pv2, "<**>"+v2s) + addFormatterTest("%+v", v2, v2s) + addFormatterTest("%+v", pv2, "<*>("+v2Addr+")"+v2s) + addFormatterTest("%+v", &pv2, "<**>("+pv2Addr+"->"+v2Addr+")"+v2s) + addFormatterTest("%#v", v2, "("+v2t+")"+v2s) + addFormatterTest("%#v", pv2, "(*"+v2t+")"+v2s) + addFormatterTest("%#v", &pv2, "(**"+v2t+")"+v2s) + addFormatterTest("%#+v", v2, "("+v2t+")"+v2s) + addFormatterTest("%#+v", pv2, "(*"+v2t+")("+v2Addr+")"+v2s) + addFormatterTest("%#+v", &pv2, "(**"+v2t+")("+pv2Addr+"->"+v2Addr+")"+v2s) +} + +func addFuncFormatterTests() { + // Function with no params and no returns. + v := addIntFormatterTests + nv := (*func())(nil) + pv := &v + vAddr := fmt.Sprintf("%p", pv) + pvAddr := fmt.Sprintf("%p", &pv) + vt := "func()" + vs := fmt.Sprintf("%p", v) + addFormatterTest("%v", v, vs) + addFormatterTest("%v", pv, "<*>"+vs) + addFormatterTest("%v", &pv, "<**>"+vs) + addFormatterTest("%+v", nv, "") + addFormatterTest("%+v", v, vs) + addFormatterTest("%+v", pv, "<*>("+vAddr+")"+vs) + addFormatterTest("%+v", &pv, "<**>("+pvAddr+"->"+vAddr+")"+vs) + addFormatterTest("%+v", nv, "") + addFormatterTest("%#v", v, "("+vt+")"+vs) + addFormatterTest("%#v", pv, "(*"+vt+")"+vs) + addFormatterTest("%#v", &pv, "(**"+vt+")"+vs) + addFormatterTest("%#v", nv, "(*"+vt+")"+"") + addFormatterTest("%#+v", v, "("+vt+")"+vs) + addFormatterTest("%#+v", pv, "(*"+vt+")("+vAddr+")"+vs) + addFormatterTest("%#+v", &pv, "(**"+vt+")("+pvAddr+"->"+vAddr+")"+vs) + addFormatterTest("%#+v", nv, "(*"+vt+")"+"") + + // Function with param and no returns. + v2 := TestFormatter + nv2 := (*func(*testing.T))(nil) + pv2 := &v2 + v2Addr := fmt.Sprintf("%p", pv2) + pv2Addr := fmt.Sprintf("%p", &pv2) + v2t := "func(*testing.T)" + v2s := fmt.Sprintf("%p", v2) + addFormatterTest("%v", v2, v2s) + addFormatterTest("%v", pv2, "<*>"+v2s) + addFormatterTest("%v", &pv2, "<**>"+v2s) + addFormatterTest("%+v", nv2, "") + addFormatterTest("%+v", v2, v2s) + addFormatterTest("%+v", pv2, "<*>("+v2Addr+")"+v2s) + addFormatterTest("%+v", &pv2, "<**>("+pv2Addr+"->"+v2Addr+")"+v2s) + addFormatterTest("%+v", nv2, "") + addFormatterTest("%#v", v2, "("+v2t+")"+v2s) + addFormatterTest("%#v", pv2, "(*"+v2t+")"+v2s) + addFormatterTest("%#v", &pv2, "(**"+v2t+")"+v2s) + addFormatterTest("%#v", nv2, "(*"+v2t+")"+"") + addFormatterTest("%#+v", v2, "("+v2t+")"+v2s) + addFormatterTest("%#+v", pv2, "(*"+v2t+")("+v2Addr+")"+v2s) + addFormatterTest("%#+v", &pv2, "(**"+v2t+")("+pv2Addr+"->"+v2Addr+")"+v2s) + addFormatterTest("%#+v", nv2, "(*"+v2t+")"+"") + + // Function with multiple params and multiple returns. + var v3 = func(i int, s string) (b bool, err error) { + return true, nil + } + nv3 := (*func(int, string) (bool, error))(nil) + pv3 := &v3 + v3Addr := fmt.Sprintf("%p", pv3) + pv3Addr := fmt.Sprintf("%p", &pv3) + v3t := "func(int, string) (bool, error)" + v3s := fmt.Sprintf("%p", v3) + addFormatterTest("%v", v3, v3s) + addFormatterTest("%v", pv3, "<*>"+v3s) + addFormatterTest("%v", &pv3, "<**>"+v3s) + addFormatterTest("%+v", nv3, "") + addFormatterTest("%+v", v3, v3s) + addFormatterTest("%+v", pv3, "<*>("+v3Addr+")"+v3s) + addFormatterTest("%+v", &pv3, "<**>("+pv3Addr+"->"+v3Addr+")"+v3s) + addFormatterTest("%+v", nv3, "") + addFormatterTest("%#v", v3, "("+v3t+")"+v3s) + addFormatterTest("%#v", pv3, "(*"+v3t+")"+v3s) + addFormatterTest("%#v", &pv3, "(**"+v3t+")"+v3s) + addFormatterTest("%#v", nv3, "(*"+v3t+")"+"") + addFormatterTest("%#+v", v3, "("+v3t+")"+v3s) + addFormatterTest("%#+v", pv3, "(*"+v3t+")("+v3Addr+")"+v3s) + addFormatterTest("%#+v", &pv3, "(**"+v3t+")("+pv3Addr+"->"+v3Addr+")"+v3s) + addFormatterTest("%#+v", nv3, "(*"+v3t+")"+"") +} + +func addCircularFormatterTests() { + // Struct that is circular through self referencing. + type circular struct { + c *circular + } + v := circular{nil} + v.c = &v + pv := &v + vAddr := fmt.Sprintf("%p", pv) + pvAddr := fmt.Sprintf("%p", &pv) + vt := "spew_test.circular" + vs := "{<*>{<*>}}" + vs2 := "{<*>}" + vs3 := "{c:<*>(" + vAddr + "){c:<*>(" + vAddr + ")}}" + vs4 := "{c:<*>(" + vAddr + ")}" + vs5 := "{c:(*" + vt + "){c:(*" + vt + ")}}" + vs6 := "{c:(*" + vt + ")}" + vs7 := "{c:(*" + vt + ")(" + vAddr + "){c:(*" + vt + ")(" + vAddr + + ")}}" + vs8 := "{c:(*" + vt + ")(" + vAddr + ")}" + addFormatterTest("%v", v, vs) + addFormatterTest("%v", pv, "<*>"+vs2) + addFormatterTest("%v", &pv, "<**>"+vs2) + addFormatterTest("%+v", v, vs3) + addFormatterTest("%+v", pv, "<*>("+vAddr+")"+vs4) + addFormatterTest("%+v", &pv, "<**>("+pvAddr+"->"+vAddr+")"+vs4) + addFormatterTest("%#v", v, "("+vt+")"+vs5) + addFormatterTest("%#v", pv, "(*"+vt+")"+vs6) + addFormatterTest("%#v", &pv, "(**"+vt+")"+vs6) + addFormatterTest("%#+v", v, "("+vt+")"+vs7) + addFormatterTest("%#+v", pv, "(*"+vt+")("+vAddr+")"+vs8) + addFormatterTest("%#+v", &pv, "(**"+vt+")("+pvAddr+"->"+vAddr+")"+vs8) + + // Structs that are circular through cross referencing. + v2 := xref1{nil} + ts2 := xref2{&v2} + v2.ps2 = &ts2 + pv2 := &v2 + ts2Addr := fmt.Sprintf("%p", &ts2) + v2Addr := fmt.Sprintf("%p", pv2) + pv2Addr := fmt.Sprintf("%p", &pv2) + v2t := "spew_test.xref1" + v2t2 := "spew_test.xref2" + v2s := "{<*>{<*>{<*>}}}" + v2s2 := "{<*>{<*>}}" + v2s3 := "{ps2:<*>(" + ts2Addr + "){ps1:<*>(" + v2Addr + "){ps2:<*>(" + + ts2Addr + ")}}}" + v2s4 := "{ps2:<*>(" + ts2Addr + "){ps1:<*>(" + v2Addr + ")}}" + v2s5 := "{ps2:(*" + v2t2 + "){ps1:(*" + v2t + "){ps2:(*" + v2t2 + + ")}}}" + v2s6 := "{ps2:(*" + v2t2 + "){ps1:(*" + v2t + ")}}" + v2s7 := "{ps2:(*" + v2t2 + ")(" + ts2Addr + "){ps1:(*" + v2t + + ")(" + v2Addr + "){ps2:(*" + v2t2 + ")(" + ts2Addr + + ")}}}" + v2s8 := "{ps2:(*" + v2t2 + ")(" + ts2Addr + "){ps1:(*" + v2t + + ")(" + v2Addr + ")}}" + addFormatterTest("%v", v2, v2s) + addFormatterTest("%v", pv2, "<*>"+v2s2) + addFormatterTest("%v", &pv2, "<**>"+v2s2) + addFormatterTest("%+v", v2, v2s3) + addFormatterTest("%+v", pv2, "<*>("+v2Addr+")"+v2s4) + addFormatterTest("%+v", &pv2, "<**>("+pv2Addr+"->"+v2Addr+")"+v2s4) + addFormatterTest("%#v", v2, "("+v2t+")"+v2s5) + addFormatterTest("%#v", pv2, "(*"+v2t+")"+v2s6) + addFormatterTest("%#v", &pv2, "(**"+v2t+")"+v2s6) + addFormatterTest("%#+v", v2, "("+v2t+")"+v2s7) + addFormatterTest("%#+v", pv2, "(*"+v2t+")("+v2Addr+")"+v2s8) + addFormatterTest("%#+v", &pv2, "(**"+v2t+")("+pv2Addr+"->"+v2Addr+")"+v2s8) + + // Structs that are indirectly circular. + v3 := indirCir1{nil} + tic2 := indirCir2{nil} + tic3 := indirCir3{&v3} + tic2.ps3 = &tic3 + v3.ps2 = &tic2 + pv3 := &v3 + tic2Addr := fmt.Sprintf("%p", &tic2) + tic3Addr := fmt.Sprintf("%p", &tic3) + v3Addr := fmt.Sprintf("%p", pv3) + pv3Addr := fmt.Sprintf("%p", &pv3) + v3t := "spew_test.indirCir1" + v3t2 := "spew_test.indirCir2" + v3t3 := "spew_test.indirCir3" + v3s := "{<*>{<*>{<*>{<*>}}}}" + v3s2 := "{<*>{<*>{<*>}}}" + v3s3 := "{ps2:<*>(" + tic2Addr + "){ps3:<*>(" + tic3Addr + "){ps1:<*>(" + + v3Addr + "){ps2:<*>(" + tic2Addr + ")}}}}" + v3s4 := "{ps2:<*>(" + tic2Addr + "){ps3:<*>(" + tic3Addr + "){ps1:<*>(" + + v3Addr + ")}}}" + v3s5 := "{ps2:(*" + v3t2 + "){ps3:(*" + v3t3 + "){ps1:(*" + v3t + + "){ps2:(*" + v3t2 + ")}}}}" + v3s6 := "{ps2:(*" + v3t2 + "){ps3:(*" + v3t3 + "){ps1:(*" + v3t + + ")}}}" + v3s7 := "{ps2:(*" + v3t2 + ")(" + tic2Addr + "){ps3:(*" + v3t3 + ")(" + + tic3Addr + "){ps1:(*" + v3t + ")(" + v3Addr + "){ps2:(*" + v3t2 + + ")(" + tic2Addr + ")}}}}" + v3s8 := "{ps2:(*" + v3t2 + ")(" + tic2Addr + "){ps3:(*" + v3t3 + ")(" + + tic3Addr + "){ps1:(*" + v3t + ")(" + v3Addr + ")}}}" + addFormatterTest("%v", v3, v3s) + addFormatterTest("%v", pv3, "<*>"+v3s2) + addFormatterTest("%v", &pv3, "<**>"+v3s2) + addFormatterTest("%+v", v3, v3s3) + addFormatterTest("%+v", pv3, "<*>("+v3Addr+")"+v3s4) + addFormatterTest("%+v", &pv3, "<**>("+pv3Addr+"->"+v3Addr+")"+v3s4) + addFormatterTest("%#v", v3, "("+v3t+")"+v3s5) + addFormatterTest("%#v", pv3, "(*"+v3t+")"+v3s6) + addFormatterTest("%#v", &pv3, "(**"+v3t+")"+v3s6) + addFormatterTest("%#+v", v3, "("+v3t+")"+v3s7) + addFormatterTest("%#+v", pv3, "(*"+v3t+")("+v3Addr+")"+v3s8) + addFormatterTest("%#+v", &pv3, "(**"+v3t+")("+pv3Addr+"->"+v3Addr+")"+v3s8) +} + +func addPanicFormatterTests() { + // Type that panics in its Stringer interface. + v := panicer(127) + nv := (*panicer)(nil) + pv := &v + vAddr := fmt.Sprintf("%p", pv) + pvAddr := fmt.Sprintf("%p", &pv) + vt := "spew_test.panicer" + vs := "(PANIC=test panic)127" + addFormatterTest("%v", v, vs) + addFormatterTest("%v", pv, "<*>"+vs) + addFormatterTest("%v", &pv, "<**>"+vs) + addFormatterTest("%v", nv, "") + addFormatterTest("%+v", v, vs) + addFormatterTest("%+v", pv, "<*>("+vAddr+")"+vs) + addFormatterTest("%+v", &pv, "<**>("+pvAddr+"->"+vAddr+")"+vs) + addFormatterTest("%+v", nv, "") + addFormatterTest("%#v", v, "("+vt+")"+vs) + addFormatterTest("%#v", pv, "(*"+vt+")"+vs) + addFormatterTest("%#v", &pv, "(**"+vt+")"+vs) + addFormatterTest("%#v", nv, "(*"+vt+")"+"") + addFormatterTest("%#+v", v, "("+vt+")"+vs) + addFormatterTest("%#+v", pv, "(*"+vt+")("+vAddr+")"+vs) + addFormatterTest("%#+v", &pv, "(**"+vt+")("+pvAddr+"->"+vAddr+")"+vs) + addFormatterTest("%#+v", nv, "(*"+vt+")"+"") +} + +func addErrorFormatterTests() { + // Type that has a custom Error interface. + v := customError(127) + nv := (*customError)(nil) + pv := &v + vAddr := fmt.Sprintf("%p", pv) + pvAddr := fmt.Sprintf("%p", &pv) + vt := "spew_test.customError" + vs := "error: 127" + addFormatterTest("%v", v, vs) + addFormatterTest("%v", pv, "<*>"+vs) + addFormatterTest("%v", &pv, "<**>"+vs) + addFormatterTest("%v", nv, "") + addFormatterTest("%+v", v, vs) + addFormatterTest("%+v", pv, "<*>("+vAddr+")"+vs) + addFormatterTest("%+v", &pv, "<**>("+pvAddr+"->"+vAddr+")"+vs) + addFormatterTest("%+v", nv, "") + addFormatterTest("%#v", v, "("+vt+")"+vs) + addFormatterTest("%#v", pv, "(*"+vt+")"+vs) + addFormatterTest("%#v", &pv, "(**"+vt+")"+vs) + addFormatterTest("%#v", nv, "(*"+vt+")"+"") + addFormatterTest("%#+v", v, "("+vt+")"+vs) + addFormatterTest("%#+v", pv, "(*"+vt+")("+vAddr+")"+vs) + addFormatterTest("%#+v", &pv, "(**"+vt+")("+pvAddr+"->"+vAddr+")"+vs) + addFormatterTest("%#+v", nv, "(*"+vt+")"+"") +} + +func addPassthroughFormatterTests() { + // %x passthrough with uint. + v := uint(4294967295) + pv := &v + vAddr := fmt.Sprintf("%x", pv) + pvAddr := fmt.Sprintf("%x", &pv) + vs := "ffffffff" + addFormatterTest("%x", v, vs) + addFormatterTest("%x", pv, vAddr) + addFormatterTest("%x", &pv, pvAddr) + + // %#x passthrough with uint. + v2 := int(2147483647) + pv2 := &v2 + v2Addr := fmt.Sprintf("%#x", pv2) + pv2Addr := fmt.Sprintf("%#x", &pv2) + v2s := "0x7fffffff" + addFormatterTest("%#x", v2, v2s) + addFormatterTest("%#x", pv2, v2Addr) + addFormatterTest("%#x", &pv2, pv2Addr) + + // %f passthrough with precision. + addFormatterTest("%.2f", 3.1415, "3.14") + addFormatterTest("%.3f", 3.1415, "3.142") + addFormatterTest("%.4f", 3.1415, "3.1415") + + // %f passthrough with width and precision. + addFormatterTest("%5.2f", 3.1415, " 3.14") + addFormatterTest("%6.3f", 3.1415, " 3.142") + addFormatterTest("%7.4f", 3.1415, " 3.1415") + + // %d passthrough with width. + addFormatterTest("%3d", 127, "127") + addFormatterTest("%4d", 127, " 127") + addFormatterTest("%5d", 127, " 127") + + // %q passthrough with string. + addFormatterTest("%q", "test", "\"test\"") +} + +// TestFormatter executes all of the tests described by formatterTests. +func TestFormatter(t *testing.T) { + // Setup tests. + addIntFormatterTests() + addUintFormatterTests() + addBoolFormatterTests() + addFloatFormatterTests() + addComplexFormatterTests() + addArrayFormatterTests() + addSliceFormatterTests() + addStringFormatterTests() + addInterfaceFormatterTests() + addMapFormatterTests() + addStructFormatterTests() + addUintptrFormatterTests() + addUnsafePointerFormatterTests() + addChanFormatterTests() + addFuncFormatterTests() + addCircularFormatterTests() + addPanicFormatterTests() + addErrorFormatterTests() + addPassthroughFormatterTests() + + t.Logf("Running %d tests", len(formatterTests)) + for i, test := range formatterTests { + buf := new(bytes.Buffer) + spew.Fprintf(buf, test.format, test.in) + s := buf.String() + if testFailed(s, test.wants) { + t.Errorf("Formatter #%d format: %s got: %s %s", i, test.format, s, + stringizeWants(test.wants)) + continue + } + } +} + +type testStruct struct { + x int +} + +func (ts testStruct) String() string { + return fmt.Sprintf("ts.%d", ts.x) +} + +type testStructP struct { + x int +} + +func (ts *testStructP) String() string { + return fmt.Sprintf("ts.%d", ts.x) +} + +func TestPrintSortedKeys(t *testing.T) { + cfg := spew.ConfigState{SortKeys: true} + s := cfg.Sprint(map[int]string{1: "1", 3: "3", 2: "2"}) + expected := "map[1:1 2:2 3:3]" + if s != expected { + t.Errorf("Sorted keys mismatch 1:\n %v %v", s, expected) + } + + s = cfg.Sprint(map[stringer]int{"1": 1, "3": 3, "2": 2}) + expected = "map[stringer 1:1 stringer 2:2 stringer 3:3]" + if s != expected { + t.Errorf("Sorted keys mismatch 2:\n %v %v", s, expected) + } + + s = cfg.Sprint(map[pstringer]int{pstringer("1"): 1, pstringer("3"): 3, pstringer("2"): 2}) + expected = "map[stringer 1:1 stringer 2:2 stringer 3:3]" + if spew.UnsafeDisabled { + expected = "map[1:1 2:2 3:3]" + } + if s != expected { + t.Errorf("Sorted keys mismatch 3:\n %v %v", s, expected) + } + + s = cfg.Sprint(map[testStruct]int{testStruct{1}: 1, testStruct{3}: 3, testStruct{2}: 2}) + expected = "map[ts.1:1 ts.2:2 ts.3:3]" + if s != expected { + t.Errorf("Sorted keys mismatch 4:\n %v %v", s, expected) + } + + if !spew.UnsafeDisabled { + s = cfg.Sprint(map[testStructP]int{testStructP{1}: 1, testStructP{3}: 3, testStructP{2}: 2}) + expected = "map[ts.1:1 ts.2:2 ts.3:3]" + if s != expected { + t.Errorf("Sorted keys mismatch 5:\n %v %v", s, expected) + } + } + + s = cfg.Sprint(map[customError]int{customError(1): 1, customError(3): 3, customError(2): 2}) + expected = "map[error: 1:1 error: 2:2 error: 3:3]" + if s != expected { + t.Errorf("Sorted keys mismatch 6:\n %v %v", s, expected) + } +} diff --git a/vendor/github.com/davecgh/go-spew/spew/internal_test.go b/vendor/github.com/davecgh/go-spew/spew/internal_test.go new file mode 100644 index 000000000..20a9cfefc --- /dev/null +++ b/vendor/github.com/davecgh/go-spew/spew/internal_test.go @@ -0,0 +1,87 @@ +/* + * Copyright (c) 2013-2016 Dave Collins + * + * Permission to use, copy, modify, and distribute this software for any + * purpose with or without fee is hereby granted, provided that the above + * copyright notice and this permission notice appear in all copies. + * + * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES + * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF + * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR + * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES + * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN + * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF + * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. + */ + +/* +This test file is part of the spew package rather than than the spew_test +package because it needs access to internals to properly test certain cases +which are not possible via the public interface since they should never happen. +*/ + +package spew + +import ( + "bytes" + "reflect" + "testing" +) + +// dummyFmtState implements a fake fmt.State to use for testing invalid +// reflect.Value handling. This is necessary because the fmt package catches +// invalid values before invoking the formatter on them. +type dummyFmtState struct { + bytes.Buffer +} + +func (dfs *dummyFmtState) Flag(f int) bool { + if f == int('+') { + return true + } + return false +} + +func (dfs *dummyFmtState) Precision() (int, bool) { + return 0, false +} + +func (dfs *dummyFmtState) Width() (int, bool) { + return 0, false +} + +// TestInvalidReflectValue ensures the dump and formatter code handles an +// invalid reflect value properly. This needs access to internal state since it +// should never happen in real code and therefore can't be tested via the public +// API. +func TestInvalidReflectValue(t *testing.T) { + i := 1 + + // Dump invalid reflect value. + v := new(reflect.Value) + buf := new(bytes.Buffer) + d := dumpState{w: buf, cs: &Config} + d.dump(*v) + s := buf.String() + want := "" + if s != want { + t.Errorf("InvalidReflectValue #%d\n got: %s want: %s", i, s, want) + } + i++ + + // Formatter invalid reflect value. + buf2 := new(dummyFmtState) + f := formatState{value: *v, cs: &Config, fs: buf2} + f.format(*v) + s = buf2.String() + want = "" + if s != want { + t.Errorf("InvalidReflectValue #%d got: %s want: %s", i, s, want) + } +} + +// SortValues makes the internal sortValues function available to the test +// package. +func SortValues(values []reflect.Value, cs *ConfigState) { + sortValues(values, cs) +} diff --git a/vendor/github.com/davecgh/go-spew/spew/internalunsafe_test.go b/vendor/github.com/davecgh/go-spew/spew/internalunsafe_test.go new file mode 100644 index 000000000..a0c612ec3 --- /dev/null +++ b/vendor/github.com/davecgh/go-spew/spew/internalunsafe_test.go @@ -0,0 +1,102 @@ +// Copyright (c) 2013-2016 Dave Collins + +// Permission to use, copy, modify, and distribute this software for any +// purpose with or without fee is hereby granted, provided that the above +// copyright notice and this permission notice appear in all copies. + +// THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES +// WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF +// MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR +// ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES +// WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN +// ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF +// OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. + +// NOTE: Due to the following build constraints, this file will only be compiled +// when the code is not running on Google App Engine, compiled by GopherJS, and +// "-tags safe" is not added to the go build command line. The "disableunsafe" +// tag is deprecated and thus should not be used. +// +build !js,!appengine,!safe,!disableunsafe + +/* +This test file is part of the spew package rather than than the spew_test +package because it needs access to internals to properly test certain cases +which are not possible via the public interface since they should never happen. +*/ + +package spew + +import ( + "bytes" + "reflect" + "testing" + "unsafe" +) + +// changeKind uses unsafe to intentionally change the kind of a reflect.Value to +// the maximum kind value which does not exist. This is needed to test the +// fallback code which punts to the standard fmt library for new types that +// might get added to the language. +func changeKind(v *reflect.Value, readOnly bool) { + rvf := (*uintptr)(unsafe.Pointer(uintptr(unsafe.Pointer(v)) + offsetFlag)) + *rvf = *rvf | ((1< + * + * Permission to use, copy, modify, and distribute this software for any + * purpose with or without fee is hereby granted, provided that the above + * copyright notice and this permission notice appear in all copies. + * + * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES + * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF + * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR + * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES + * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN + * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF + * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. + */ + +package spew + +import ( + "fmt" + "io" +) + +// Errorf is a wrapper for fmt.Errorf that treats each argument as if it were +// passed with a default Formatter interface returned by NewFormatter. It +// returns the formatted string as a value that satisfies error. See +// NewFormatter for formatting details. +// +// This function is shorthand for the following syntax: +// +// fmt.Errorf(format, spew.NewFormatter(a), spew.NewFormatter(b)) +func Errorf(format string, a ...interface{}) (err error) { + return fmt.Errorf(format, convertArgs(a)...) +} + +// Fprint is a wrapper for fmt.Fprint that treats each argument as if it were +// passed with a default Formatter interface returned by NewFormatter. It +// returns the number of bytes written and any write error encountered. See +// NewFormatter for formatting details. +// +// This function is shorthand for the following syntax: +// +// fmt.Fprint(w, spew.NewFormatter(a), spew.NewFormatter(b)) +func Fprint(w io.Writer, a ...interface{}) (n int, err error) { + return fmt.Fprint(w, convertArgs(a)...) +} + +// Fprintf is a wrapper for fmt.Fprintf that treats each argument as if it were +// passed with a default Formatter interface returned by NewFormatter. It +// returns the number of bytes written and any write error encountered. See +// NewFormatter for formatting details. +// +// This function is shorthand for the following syntax: +// +// fmt.Fprintf(w, format, spew.NewFormatter(a), spew.NewFormatter(b)) +func Fprintf(w io.Writer, format string, a ...interface{}) (n int, err error) { + return fmt.Fprintf(w, format, convertArgs(a)...) +} + +// Fprintln is a wrapper for fmt.Fprintln that treats each argument as if it +// passed with a default Formatter interface returned by NewFormatter. See +// NewFormatter for formatting details. +// +// This function is shorthand for the following syntax: +// +// fmt.Fprintln(w, spew.NewFormatter(a), spew.NewFormatter(b)) +func Fprintln(w io.Writer, a ...interface{}) (n int, err error) { + return fmt.Fprintln(w, convertArgs(a)...) +} + +// Print is a wrapper for fmt.Print that treats each argument as if it were +// passed with a default Formatter interface returned by NewFormatter. It +// returns the number of bytes written and any write error encountered. See +// NewFormatter for formatting details. +// +// This function is shorthand for the following syntax: +// +// fmt.Print(spew.NewFormatter(a), spew.NewFormatter(b)) +func Print(a ...interface{}) (n int, err error) { + return fmt.Print(convertArgs(a)...) +} + +// Printf is a wrapper for fmt.Printf that treats each argument as if it were +// passed with a default Formatter interface returned by NewFormatter. It +// returns the number of bytes written and any write error encountered. See +// NewFormatter for formatting details. +// +// This function is shorthand for the following syntax: +// +// fmt.Printf(format, spew.NewFormatter(a), spew.NewFormatter(b)) +func Printf(format string, a ...interface{}) (n int, err error) { + return fmt.Printf(format, convertArgs(a)...) +} + +// Println is a wrapper for fmt.Println that treats each argument as if it were +// passed with a default Formatter interface returned by NewFormatter. It +// returns the number of bytes written and any write error encountered. See +// NewFormatter for formatting details. +// +// This function is shorthand for the following syntax: +// +// fmt.Println(spew.NewFormatter(a), spew.NewFormatter(b)) +func Println(a ...interface{}) (n int, err error) { + return fmt.Println(convertArgs(a)...) +} + +// Sprint is a wrapper for fmt.Sprint that treats each argument as if it were +// passed with a default Formatter interface returned by NewFormatter. It +// returns the resulting string. See NewFormatter for formatting details. +// +// This function is shorthand for the following syntax: +// +// fmt.Sprint(spew.NewFormatter(a), spew.NewFormatter(b)) +func Sprint(a ...interface{}) string { + return fmt.Sprint(convertArgs(a)...) +} + +// Sprintf is a wrapper for fmt.Sprintf that treats each argument as if it were +// passed with a default Formatter interface returned by NewFormatter. It +// returns the resulting string. See NewFormatter for formatting details. +// +// This function is shorthand for the following syntax: +// +// fmt.Sprintf(format, spew.NewFormatter(a), spew.NewFormatter(b)) +func Sprintf(format string, a ...interface{}) string { + return fmt.Sprintf(format, convertArgs(a)...) +} + +// Sprintln is a wrapper for fmt.Sprintln that treats each argument as if it +// were passed with a default Formatter interface returned by NewFormatter. It +// returns the resulting string. See NewFormatter for formatting details. +// +// This function is shorthand for the following syntax: +// +// fmt.Sprintln(spew.NewFormatter(a), spew.NewFormatter(b)) +func Sprintln(a ...interface{}) string { + return fmt.Sprintln(convertArgs(a)...) +} + +// convertArgs accepts a slice of arguments and returns a slice of the same +// length with each argument converted to a default spew Formatter interface. +func convertArgs(args []interface{}) (formatters []interface{}) { + formatters = make([]interface{}, len(args)) + for index, arg := range args { + formatters[index] = NewFormatter(arg) + } + return formatters +} diff --git a/vendor/github.com/davecgh/go-spew/spew/spew_test.go b/vendor/github.com/davecgh/go-spew/spew/spew_test.go new file mode 100644 index 000000000..b70466c69 --- /dev/null +++ b/vendor/github.com/davecgh/go-spew/spew/spew_test.go @@ -0,0 +1,320 @@ +/* + * Copyright (c) 2013-2016 Dave Collins + * + * Permission to use, copy, modify, and distribute this software for any + * purpose with or without fee is hereby granted, provided that the above + * copyright notice and this permission notice appear in all copies. + * + * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES + * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF + * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR + * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES + * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN + * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF + * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. + */ + +package spew_test + +import ( + "bytes" + "fmt" + "io/ioutil" + "os" + "testing" + + "github.com/davecgh/go-spew/spew" +) + +// spewFunc is used to identify which public function of the spew package or +// ConfigState a test applies to. +type spewFunc int + +const ( + fCSFdump spewFunc = iota + fCSFprint + fCSFprintf + fCSFprintln + fCSPrint + fCSPrintln + fCSSdump + fCSSprint + fCSSprintf + fCSSprintln + fCSErrorf + fCSNewFormatter + fErrorf + fFprint + fFprintln + fPrint + fPrintln + fSdump + fSprint + fSprintf + fSprintln +) + +// Map of spewFunc values to names for pretty printing. +var spewFuncStrings = map[spewFunc]string{ + fCSFdump: "ConfigState.Fdump", + fCSFprint: "ConfigState.Fprint", + fCSFprintf: "ConfigState.Fprintf", + fCSFprintln: "ConfigState.Fprintln", + fCSSdump: "ConfigState.Sdump", + fCSPrint: "ConfigState.Print", + fCSPrintln: "ConfigState.Println", + fCSSprint: "ConfigState.Sprint", + fCSSprintf: "ConfigState.Sprintf", + fCSSprintln: "ConfigState.Sprintln", + fCSErrorf: "ConfigState.Errorf", + fCSNewFormatter: "ConfigState.NewFormatter", + fErrorf: "spew.Errorf", + fFprint: "spew.Fprint", + fFprintln: "spew.Fprintln", + fPrint: "spew.Print", + fPrintln: "spew.Println", + fSdump: "spew.Sdump", + fSprint: "spew.Sprint", + fSprintf: "spew.Sprintf", + fSprintln: "spew.Sprintln", +} + +func (f spewFunc) String() string { + if s, ok := spewFuncStrings[f]; ok { + return s + } + return fmt.Sprintf("Unknown spewFunc (%d)", int(f)) +} + +// spewTest is used to describe a test to be performed against the public +// functions of the spew package or ConfigState. +type spewTest struct { + cs *spew.ConfigState + f spewFunc + format string + in interface{} + want string +} + +// spewTests houses the tests to be performed against the public functions of +// the spew package and ConfigState. +// +// These tests are only intended to ensure the public functions are exercised +// and are intentionally not exhaustive of types. The exhaustive type +// tests are handled in the dump and format tests. +var spewTests []spewTest + +// redirStdout is a helper function to return the standard output from f as a +// byte slice. +func redirStdout(f func()) ([]byte, error) { + tempFile, err := ioutil.TempFile("", "ss-test") + if err != nil { + return nil, err + } + fileName := tempFile.Name() + defer os.Remove(fileName) // Ignore error + + origStdout := os.Stdout + os.Stdout = tempFile + f() + os.Stdout = origStdout + tempFile.Close() + + return ioutil.ReadFile(fileName) +} + +func initSpewTests() { + // Config states with various settings. + scsDefault := spew.NewDefaultConfig() + scsNoMethods := &spew.ConfigState{Indent: " ", DisableMethods: true} + scsNoPmethods := &spew.ConfigState{Indent: " ", DisablePointerMethods: true} + scsMaxDepth := &spew.ConfigState{Indent: " ", MaxDepth: 1} + scsContinue := &spew.ConfigState{Indent: " ", ContinueOnMethod: true} + scsNoPtrAddr := &spew.ConfigState{DisablePointerAddresses: true} + scsNoCap := &spew.ConfigState{DisableCapacities: true} + + // Variables for tests on types which implement Stringer interface with and + // without a pointer receiver. + ts := stringer("test") + tps := pstringer("test") + + type ptrTester struct { + s *struct{} + } + tptr := &ptrTester{s: &struct{}{}} + + // depthTester is used to test max depth handling for structs, array, slices + // and maps. + type depthTester struct { + ic indirCir1 + arr [1]string + slice []string + m map[string]int + } + dt := depthTester{indirCir1{nil}, [1]string{"arr"}, []string{"slice"}, + map[string]int{"one": 1}} + + // Variable for tests on types which implement error interface. + te := customError(10) + + spewTests = []spewTest{ + {scsDefault, fCSFdump, "", int8(127), "(int8) 127\n"}, + {scsDefault, fCSFprint, "", int16(32767), "32767"}, + {scsDefault, fCSFprintf, "%v", int32(2147483647), "2147483647"}, + {scsDefault, fCSFprintln, "", int(2147483647), "2147483647\n"}, + {scsDefault, fCSPrint, "", int64(9223372036854775807), "9223372036854775807"}, + {scsDefault, fCSPrintln, "", uint8(255), "255\n"}, + {scsDefault, fCSSdump, "", uint8(64), "(uint8) 64\n"}, + {scsDefault, fCSSprint, "", complex(1, 2), "(1+2i)"}, + {scsDefault, fCSSprintf, "%v", complex(float32(3), 4), "(3+4i)"}, + {scsDefault, fCSSprintln, "", complex(float64(5), 6), "(5+6i)\n"}, + {scsDefault, fCSErrorf, "%#v", uint16(65535), "(uint16)65535"}, + {scsDefault, fCSNewFormatter, "%v", uint32(4294967295), "4294967295"}, + {scsDefault, fErrorf, "%v", uint64(18446744073709551615), "18446744073709551615"}, + {scsDefault, fFprint, "", float32(3.14), "3.14"}, + {scsDefault, fFprintln, "", float64(6.28), "6.28\n"}, + {scsDefault, fPrint, "", true, "true"}, + {scsDefault, fPrintln, "", false, "false\n"}, + {scsDefault, fSdump, "", complex(-10, -20), "(complex128) (-10-20i)\n"}, + {scsDefault, fSprint, "", complex(-1, -2), "(-1-2i)"}, + {scsDefault, fSprintf, "%v", complex(float32(-3), -4), "(-3-4i)"}, + {scsDefault, fSprintln, "", complex(float64(-5), -6), "(-5-6i)\n"}, + {scsNoMethods, fCSFprint, "", ts, "test"}, + {scsNoMethods, fCSFprint, "", &ts, "<*>test"}, + {scsNoMethods, fCSFprint, "", tps, "test"}, + {scsNoMethods, fCSFprint, "", &tps, "<*>test"}, + {scsNoPmethods, fCSFprint, "", ts, "stringer test"}, + {scsNoPmethods, fCSFprint, "", &ts, "<*>stringer test"}, + {scsNoPmethods, fCSFprint, "", tps, "test"}, + {scsNoPmethods, fCSFprint, "", &tps, "<*>stringer test"}, + {scsMaxDepth, fCSFprint, "", dt, "{{} [] [] map[]}"}, + {scsMaxDepth, fCSFdump, "", dt, "(spew_test.depthTester) {\n" + + " ic: (spew_test.indirCir1) {\n \n },\n" + + " arr: ([1]string) (len=1 cap=1) {\n \n },\n" + + " slice: ([]string) (len=1 cap=1) {\n \n },\n" + + " m: (map[string]int) (len=1) {\n \n }\n}\n"}, + {scsContinue, fCSFprint, "", ts, "(stringer test) test"}, + {scsContinue, fCSFdump, "", ts, "(spew_test.stringer) " + + "(len=4) (stringer test) \"test\"\n"}, + {scsContinue, fCSFprint, "", te, "(error: 10) 10"}, + {scsContinue, fCSFdump, "", te, "(spew_test.customError) " + + "(error: 10) 10\n"}, + {scsNoPtrAddr, fCSFprint, "", tptr, "<*>{<*>{}}"}, + {scsNoPtrAddr, fCSSdump, "", tptr, "(*spew_test.ptrTester)({\ns: (*struct {})({\n})\n})\n"}, + {scsNoCap, fCSSdump, "", make([]string, 0, 10), "([]string) {\n}\n"}, + {scsNoCap, fCSSdump, "", make([]string, 1, 10), "([]string) (len=1) {\n(string) \"\"\n}\n"}, + } +} + +// TestSpew executes all of the tests described by spewTests. +func TestSpew(t *testing.T) { + initSpewTests() + + t.Logf("Running %d tests", len(spewTests)) + for i, test := range spewTests { + buf := new(bytes.Buffer) + switch test.f { + case fCSFdump: + test.cs.Fdump(buf, test.in) + + case fCSFprint: + test.cs.Fprint(buf, test.in) + + case fCSFprintf: + test.cs.Fprintf(buf, test.format, test.in) + + case fCSFprintln: + test.cs.Fprintln(buf, test.in) + + case fCSPrint: + b, err := redirStdout(func() { test.cs.Print(test.in) }) + if err != nil { + t.Errorf("%v #%d %v", test.f, i, err) + continue + } + buf.Write(b) + + case fCSPrintln: + b, err := redirStdout(func() { test.cs.Println(test.in) }) + if err != nil { + t.Errorf("%v #%d %v", test.f, i, err) + continue + } + buf.Write(b) + + case fCSSdump: + str := test.cs.Sdump(test.in) + buf.WriteString(str) + + case fCSSprint: + str := test.cs.Sprint(test.in) + buf.WriteString(str) + + case fCSSprintf: + str := test.cs.Sprintf(test.format, test.in) + buf.WriteString(str) + + case fCSSprintln: + str := test.cs.Sprintln(test.in) + buf.WriteString(str) + + case fCSErrorf: + err := test.cs.Errorf(test.format, test.in) + buf.WriteString(err.Error()) + + case fCSNewFormatter: + fmt.Fprintf(buf, test.format, test.cs.NewFormatter(test.in)) + + case fErrorf: + err := spew.Errorf(test.format, test.in) + buf.WriteString(err.Error()) + + case fFprint: + spew.Fprint(buf, test.in) + + case fFprintln: + spew.Fprintln(buf, test.in) + + case fPrint: + b, err := redirStdout(func() { spew.Print(test.in) }) + if err != nil { + t.Errorf("%v #%d %v", test.f, i, err) + continue + } + buf.Write(b) + + case fPrintln: + b, err := redirStdout(func() { spew.Println(test.in) }) + if err != nil { + t.Errorf("%v #%d %v", test.f, i, err) + continue + } + buf.Write(b) + + case fSdump: + str := spew.Sdump(test.in) + buf.WriteString(str) + + case fSprint: + str := spew.Sprint(test.in) + buf.WriteString(str) + + case fSprintf: + str := spew.Sprintf(test.format, test.in) + buf.WriteString(str) + + case fSprintln: + str := spew.Sprintln(test.in) + buf.WriteString(str) + + default: + t.Errorf("%v #%d unrecognized function", test.f, i) + continue + } + s := buf.String() + if test.want != s { + t.Errorf("ConfigState #%d\n got: %s want: %s", i, s, test.want) + continue + } + } +} diff --git a/vendor/github.com/davecgh/go-spew/spew/testdata/dumpcgo.go b/vendor/github.com/davecgh/go-spew/spew/testdata/dumpcgo.go new file mode 100644 index 000000000..5c87dd456 --- /dev/null +++ b/vendor/github.com/davecgh/go-spew/spew/testdata/dumpcgo.go @@ -0,0 +1,82 @@ +// Copyright (c) 2013 Dave Collins +// +// Permission to use, copy, modify, and distribute this software for any +// purpose with or without fee is hereby granted, provided that the above +// copyright notice and this permission notice appear in all copies. +// +// THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES +// WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF +// MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR +// ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES +// WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN +// ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF +// OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. + +// NOTE: Due to the following build constraints, this file will only be compiled +// when both cgo is supported and "-tags testcgo" is added to the go test +// command line. This code should really only be in the dumpcgo_test.go file, +// but unfortunately Go will not allow cgo in test files, so this is a +// workaround to allow cgo types to be tested. This configuration is used +// because spew itself does not require cgo to run even though it does handle +// certain cgo types specially. Rather than forcing all clients to require cgo +// and an external C compiler just to run the tests, this scheme makes them +// optional. +// +build cgo,testcgo + +package testdata + +/* +#include +typedef unsigned char custom_uchar_t; + +char *ncp = 0; +char *cp = "test"; +char ca[6] = {'t', 'e', 's', 't', '2', '\0'}; +unsigned char uca[6] = {'t', 'e', 's', 't', '3', '\0'}; +signed char sca[6] = {'t', 'e', 's', 't', '4', '\0'}; +uint8_t ui8ta[6] = {'t', 'e', 's', 't', '5', '\0'}; +custom_uchar_t tuca[6] = {'t', 'e', 's', 't', '6', '\0'}; +*/ +import "C" + +// GetCgoNullCharPointer returns a null char pointer via cgo. This is only +// used for tests. +func GetCgoNullCharPointer() interface{} { + return C.ncp +} + +// GetCgoCharPointer returns a char pointer via cgo. This is only used for +// tests. +func GetCgoCharPointer() interface{} { + return C.cp +} + +// GetCgoCharArray returns a char array via cgo and the array's len and cap. +// This is only used for tests. +func GetCgoCharArray() (interface{}, int, int) { + return C.ca, len(C.ca), cap(C.ca) +} + +// GetCgoUnsignedCharArray returns an unsigned char array via cgo and the +// array's len and cap. This is only used for tests. +func GetCgoUnsignedCharArray() (interface{}, int, int) { + return C.uca, len(C.uca), cap(C.uca) +} + +// GetCgoSignedCharArray returns a signed char array via cgo and the array's len +// and cap. This is only used for tests. +func GetCgoSignedCharArray() (interface{}, int, int) { + return C.sca, len(C.sca), cap(C.sca) +} + +// GetCgoUint8tArray returns a uint8_t array via cgo and the array's len and +// cap. This is only used for tests. +func GetCgoUint8tArray() (interface{}, int, int) { + return C.ui8ta, len(C.ui8ta), cap(C.ui8ta) +} + +// GetCgoTypdefedUnsignedCharArray returns a typedefed unsigned char array via +// cgo and the array's len and cap. This is only used for tests. +func GetCgoTypdefedUnsignedCharArray() (interface{}, int, int) { + return C.tuca, len(C.tuca), cap(C.tuca) +} diff --git a/vendor/github.com/davecgh/go-spew/test_coverage.txt b/vendor/github.com/davecgh/go-spew/test_coverage.txt new file mode 100644 index 000000000..2cd087a2a --- /dev/null +++ b/vendor/github.com/davecgh/go-spew/test_coverage.txt @@ -0,0 +1,61 @@ + +github.com/davecgh/go-spew/spew/dump.go dumpState.dump 100.00% (88/88) +github.com/davecgh/go-spew/spew/format.go formatState.format 100.00% (82/82) +github.com/davecgh/go-spew/spew/format.go formatState.formatPtr 100.00% (52/52) +github.com/davecgh/go-spew/spew/dump.go dumpState.dumpPtr 100.00% (44/44) +github.com/davecgh/go-spew/spew/dump.go dumpState.dumpSlice 100.00% (39/39) +github.com/davecgh/go-spew/spew/common.go handleMethods 100.00% (30/30) +github.com/davecgh/go-spew/spew/common.go printHexPtr 100.00% (18/18) +github.com/davecgh/go-spew/spew/common.go unsafeReflectValue 100.00% (13/13) +github.com/davecgh/go-spew/spew/format.go formatState.constructOrigFormat 100.00% (12/12) +github.com/davecgh/go-spew/spew/dump.go fdump 100.00% (11/11) +github.com/davecgh/go-spew/spew/format.go formatState.Format 100.00% (11/11) +github.com/davecgh/go-spew/spew/common.go init 100.00% (10/10) +github.com/davecgh/go-spew/spew/common.go printComplex 100.00% (9/9) +github.com/davecgh/go-spew/spew/common.go valuesSorter.Less 100.00% (8/8) +github.com/davecgh/go-spew/spew/format.go formatState.buildDefaultFormat 100.00% (7/7) +github.com/davecgh/go-spew/spew/format.go formatState.unpackValue 100.00% (5/5) +github.com/davecgh/go-spew/spew/dump.go dumpState.indent 100.00% (4/4) +github.com/davecgh/go-spew/spew/common.go catchPanic 100.00% (4/4) +github.com/davecgh/go-spew/spew/config.go ConfigState.convertArgs 100.00% (4/4) +github.com/davecgh/go-spew/spew/spew.go convertArgs 100.00% (4/4) +github.com/davecgh/go-spew/spew/format.go newFormatter 100.00% (3/3) +github.com/davecgh/go-spew/spew/dump.go Sdump 100.00% (3/3) +github.com/davecgh/go-spew/spew/common.go printBool 100.00% (3/3) +github.com/davecgh/go-spew/spew/common.go sortValues 100.00% (3/3) +github.com/davecgh/go-spew/spew/config.go ConfigState.Sdump 100.00% (3/3) +github.com/davecgh/go-spew/spew/dump.go dumpState.unpackValue 100.00% (3/3) +github.com/davecgh/go-spew/spew/spew.go Printf 100.00% (1/1) +github.com/davecgh/go-spew/spew/spew.go Println 100.00% (1/1) +github.com/davecgh/go-spew/spew/spew.go Sprint 100.00% (1/1) +github.com/davecgh/go-spew/spew/spew.go Sprintf 100.00% (1/1) +github.com/davecgh/go-spew/spew/spew.go Sprintln 100.00% (1/1) +github.com/davecgh/go-spew/spew/common.go printFloat 100.00% (1/1) +github.com/davecgh/go-spew/spew/config.go NewDefaultConfig 100.00% (1/1) +github.com/davecgh/go-spew/spew/common.go printInt 100.00% (1/1) +github.com/davecgh/go-spew/spew/common.go printUint 100.00% (1/1) +github.com/davecgh/go-spew/spew/common.go valuesSorter.Len 100.00% (1/1) +github.com/davecgh/go-spew/spew/common.go valuesSorter.Swap 100.00% (1/1) +github.com/davecgh/go-spew/spew/config.go ConfigState.Errorf 100.00% (1/1) +github.com/davecgh/go-spew/spew/config.go ConfigState.Fprint 100.00% (1/1) +github.com/davecgh/go-spew/spew/config.go ConfigState.Fprintf 100.00% (1/1) +github.com/davecgh/go-spew/spew/config.go ConfigState.Fprintln 100.00% (1/1) +github.com/davecgh/go-spew/spew/config.go ConfigState.Print 100.00% (1/1) +github.com/davecgh/go-spew/spew/config.go ConfigState.Printf 100.00% (1/1) +github.com/davecgh/go-spew/spew/config.go ConfigState.Println 100.00% (1/1) +github.com/davecgh/go-spew/spew/config.go ConfigState.Sprint 100.00% (1/1) +github.com/davecgh/go-spew/spew/config.go ConfigState.Sprintf 100.00% (1/1) +github.com/davecgh/go-spew/spew/config.go ConfigState.Sprintln 100.00% (1/1) +github.com/davecgh/go-spew/spew/config.go ConfigState.NewFormatter 100.00% (1/1) +github.com/davecgh/go-spew/spew/config.go ConfigState.Fdump 100.00% (1/1) +github.com/davecgh/go-spew/spew/config.go ConfigState.Dump 100.00% (1/1) +github.com/davecgh/go-spew/spew/dump.go Fdump 100.00% (1/1) +github.com/davecgh/go-spew/spew/dump.go Dump 100.00% (1/1) +github.com/davecgh/go-spew/spew/spew.go Fprintln 100.00% (1/1) +github.com/davecgh/go-spew/spew/format.go NewFormatter 100.00% (1/1) +github.com/davecgh/go-spew/spew/spew.go Errorf 100.00% (1/1) +github.com/davecgh/go-spew/spew/spew.go Fprint 100.00% (1/1) +github.com/davecgh/go-spew/spew/spew.go Fprintf 100.00% (1/1) +github.com/davecgh/go-spew/spew/spew.go Print 100.00% (1/1) +github.com/davecgh/go-spew/spew ------------------------------- 100.00% (505/505) + diff --git a/vendor/github.com/evanphx/json-patch/.gitrepo b/vendor/github.com/evanphx/json-patch/.gitrepo new file mode 100644 index 000000000..399afaf2a --- /dev/null +++ b/vendor/github.com/evanphx/json-patch/.gitrepo @@ -0,0 +1,11 @@ +; DO NOT EDIT (unless you know what you are doing) +; +; This subdirectory is a git "subrepo", and this file is maintained by the +; git-subrepo command. See https://github.com/git-commands/git-subrepo#readme +; +[subrepo] + remote = https://github.com/evanphx/json-patch.git + branch = master + commit = 30afec6a1650c11c861dc1fb58e100cd5219e490 + parent = 793eec80022a4099c569b56a2a62a5f39f0d9e3a + cmdver = 0.3.0 diff --git a/vendor/github.com/evanphx/json-patch/.travis.yml b/vendor/github.com/evanphx/json-patch/.travis.yml new file mode 100644 index 000000000..ed5cb244c --- /dev/null +++ b/vendor/github.com/evanphx/json-patch/.travis.yml @@ -0,0 +1,14 @@ +language: go + +go: + - 1.4 + - 1.3 + +install: + - if ! go get code.google.com/p/go.tools/cmd/cover; then go get golang.org/x/tools/cmd/cover; fi + +script: + - go test -cover ./... + +notifications: + email: false diff --git a/vendor/github.com/evanphx/json-patch/LICENSE b/vendor/github.com/evanphx/json-patch/LICENSE new file mode 100644 index 000000000..0eb9b72d8 --- /dev/null +++ b/vendor/github.com/evanphx/json-patch/LICENSE @@ -0,0 +1,25 @@ +Copyright (c) 2014, Evan Phoenix +All rights reserved. + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are met: + +* Redistributions of source code must retain the above copyright notice, this + list of conditions and the following disclaimer. +* Redistributions in binary form must reproduce the above copyright notice + this list of conditions and the following disclaimer in the documentation + and/or other materials provided with the distribution. +* Neither the name of the Evan Phoenix nor the names of its contributors + may be used to endorse or promote products derived from this software + without specific prior written permission. + +THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" +AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE +IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE +DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE +FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL +DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR +SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER +CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, +OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. diff --git a/vendor/github.com/evanphx/json-patch/README.md b/vendor/github.com/evanphx/json-patch/README.md new file mode 100644 index 000000000..d0d826bac --- /dev/null +++ b/vendor/github.com/evanphx/json-patch/README.md @@ -0,0 +1,29 @@ +## JSON-Patch + +Provides the ability to modify and test a JSON according to a +[RFC6902 JSON patch](http://tools.ietf.org/html/rfc6902) and [RFC7396 JSON Merge Patch](https://tools.ietf.org/html/rfc7396). + +*Version*: **1.0** + +[![GoDoc](https://godoc.org/github.com/evanphx/json-patch?status.svg)](http://godoc.org/github.com/evanphx/json-patch) + +[![Build Status](https://travis-ci.org/evanphx/json-patch.svg?branch=master)](https://travis-ci.org/evanphx/json-patch) + +### API Usage + +* Given a `[]byte`, obtain a Patch object + + `obj, err := jsonpatch.DecodePatch(patch)` + +* Apply the patch and get a new document back + + `out, err := obj.Apply(doc)` + +* Create a JSON Merge Patch document based on two json documents (a to b): + + `mergeDoc, err := jsonpatch.CreateMergePatch(a, b)` + +* Bonus API: compare documents for structural equality + + `jsonpatch.Equal(doca, docb)` + diff --git a/vendor/github.com/evanphx/json-patch/merge.go b/vendor/github.com/evanphx/json-patch/merge.go new file mode 100644 index 000000000..330b9b528 --- /dev/null +++ b/vendor/github.com/evanphx/json-patch/merge.go @@ -0,0 +1,306 @@ +package jsonpatch + +import ( + "encoding/json" + "fmt" + "reflect" + "strings" +) + +func merge(cur, patch *lazyNode) *lazyNode { + curDoc, err := cur.intoDoc() + + if err != nil { + pruneNulls(patch) + return patch + } + + patchDoc, err := patch.intoDoc() + + if err != nil { + return patch + } + + mergeDocs(curDoc, patchDoc) + + return cur +} + +func mergeDocs(doc, patch *partialDoc) { + for k, v := range *patch { + k := decodePatchKey(k) + if v == nil { + delete(*doc, k) + } else { + cur, ok := (*doc)[k] + + if !ok || cur == nil { + pruneNulls(v) + (*doc)[k] = v + } else { + (*doc)[k] = merge(cur, v) + } + } + } +} + +func pruneNulls(n *lazyNode) { + sub, err := n.intoDoc() + + if err == nil { + pruneDocNulls(sub) + } else { + ary, err := n.intoAry() + + if err == nil { + pruneAryNulls(ary) + } + } +} + +func pruneDocNulls(doc *partialDoc) *partialDoc { + for k, v := range *doc { + if v == nil { + delete(*doc, k) + } else { + pruneNulls(v) + } + } + + return doc +} + +func pruneAryNulls(ary *partialArray) *partialArray { + newAry := []*lazyNode{} + + for _, v := range *ary { + if v != nil { + pruneNulls(v) + newAry = append(newAry, v) + } + } + + *ary = newAry + + return ary +} + +var errBadJSONDoc = fmt.Errorf("Invalid JSON Document") +var errBadJSONPatch = fmt.Errorf("Invalid JSON Patch") + +// MergePatch merges the patchData into the docData. +func MergePatch(docData, patchData []byte) ([]byte, error) { + doc := &partialDoc{} + + docErr := json.Unmarshal(docData, doc) + + patch := &partialDoc{} + + patchErr := json.Unmarshal(patchData, patch) + + if _, ok := docErr.(*json.SyntaxError); ok { + return nil, errBadJSONDoc + } + + if _, ok := patchErr.(*json.SyntaxError); ok { + return nil, errBadJSONPatch + } + + if docErr == nil && *doc == nil { + return nil, errBadJSONDoc + } + + if patchErr == nil && *patch == nil { + return nil, errBadJSONPatch + } + + if docErr != nil || patchErr != nil { + // Not an error, just not a doc, so we turn straight into the patch + if patchErr == nil { + doc = pruneDocNulls(patch) + } else { + patchAry := &partialArray{} + patchErr = json.Unmarshal(patchData, patchAry) + + if patchErr != nil { + return nil, errBadJSONPatch + } + + pruneAryNulls(patchAry) + + out, patchErr := json.Marshal(patchAry) + + if patchErr != nil { + return nil, errBadJSONPatch + } + + return out, nil + } + } else { + mergeDocs(doc, patch) + } + + return json.Marshal(doc) +} + +// CreateMergePatch creates a merge patch as specified in http://tools.ietf.org/html/draft-ietf-appsawg-json-merge-patch-07 +// +// 'a' is original, 'b' is the modified document. Both are to be given as json encoded content. +// The function will return a mergeable json document with differences from a to b. +// +// An error will be returned if any of the two documents are invalid. +func CreateMergePatch(a, b []byte) ([]byte, error) { + aI := map[string]interface{}{} + bI := map[string]interface{}{} + err := json.Unmarshal(a, &aI) + if err != nil { + return nil, errBadJSONDoc + } + err = json.Unmarshal(b, &bI) + if err != nil { + return nil, errBadJSONDoc + } + dest, err := getDiff(aI, bI) + if err != nil { + return nil, err + } + return json.Marshal(dest) +} + +// Returns true if the array matches (must be json types). +// As is idiomatic for go, an empty array is not the same as a nil array. +func matchesArray(a, b []interface{}) bool { + if len(a) != len(b) { + return false + } + if (a == nil && b != nil) || (a != nil && b == nil) { + return false + } + for i := range a { + if !matchesValue(a[i], b[i]) { + return false + } + } + return true +} + +// Returns true if the values matches (must be json types) +// The types of the values must match, otherwise it will always return false +// If two map[string]interface{} are given, all elements must match. +func matchesValue(av, bv interface{}) bool { + if reflect.TypeOf(av) != reflect.TypeOf(bv) { + return false + } + switch at := av.(type) { + case string: + bt := bv.(string) + if bt == at { + return true + } + case float64: + bt := bv.(float64) + if bt == at { + return true + } + case bool: + bt := bv.(bool) + if bt == at { + return true + } + case map[string]interface{}: + bt := bv.(map[string]interface{}) + for key := range at { + if !matchesValue(at[key], bt[key]) { + return false + } + } + for key := range bt { + if !matchesValue(at[key], bt[key]) { + return false + } + } + return true + case []interface{}: + bt := bv.([]interface{}) + return matchesArray(at, bt) + } + return false +} + +// getDiff returns the (recursive) difference between a and b as a map[string]interface{}. +func getDiff(a, b map[string]interface{}) (map[string]interface{}, error) { + into := map[string]interface{}{} + for key, bv := range b { + escapedKey := encodePatchKey(key) + av, ok := a[key] + // value was added + if !ok { + into[escapedKey] = bv + continue + } + // If types have changed, replace completely + if reflect.TypeOf(av) != reflect.TypeOf(bv) { + into[escapedKey] = bv + continue + } + // Types are the same, compare values + switch at := av.(type) { + case map[string]interface{}: + bt := bv.(map[string]interface{}) + dst := make(map[string]interface{}, len(bt)) + dst, err := getDiff(at, bt) + if err != nil { + return nil, err + } + if len(dst) > 0 { + into[escapedKey] = dst + } + case string, float64, bool: + if !matchesValue(av, bv) { + into[escapedKey] = bv + } + case []interface{}: + bt := bv.([]interface{}) + if !matchesArray(at, bt) { + into[escapedKey] = bv + } + case nil: + switch bv.(type) { + case nil: + // Both nil, fine. + default: + into[escapedKey] = bv + } + default: + panic(fmt.Sprintf("Unknown type:%T in key %s", av, key)) + } + } + // Now add all deleted values as nil + for key := range a { + _, found := b[key] + if !found { + into[key] = nil + } + } + return into, nil +} + +// From http://tools.ietf.org/html/rfc6901#section-4 : +// +// Evaluation of each reference token begins by decoding any escaped +// character sequence. This is performed by first transforming any +// occurrence of the sequence '~1' to '/', and then transforming any +// occurrence of the sequence '~0' to '~'. + +var ( + rfc6901Encoder = strings.NewReplacer("~", "~0", "/", "~1") + rfc6901Decoder = strings.NewReplacer("~1", "/", "~0", "~") +) + +func decodePatchKey(k string) string { + return rfc6901Decoder.Replace(k) +} + +func encodePatchKey(k string) string { + return rfc6901Encoder.Replace(k) +} diff --git a/vendor/github.com/evanphx/json-patch/merge_test.go b/vendor/github.com/evanphx/json-patch/merge_test.go new file mode 100644 index 000000000..a840b9373 --- /dev/null +++ b/vendor/github.com/evanphx/json-patch/merge_test.go @@ -0,0 +1,393 @@ +package jsonpatch + +import ( + "strings" + "testing" +) + +func mergePatch(doc, patch string) string { + out, err := MergePatch([]byte(doc), []byte(patch)) + + if err != nil { + panic(err) + } + + return string(out) +} + +func TestMergePatchReplaceKey(t *testing.T) { + doc := `{ "title": "hello" }` + pat := `{ "title": "goodbye" }` + + res := mergePatch(doc, pat) + + if !compareJSON(pat, res) { + t.Fatalf("Key was not replaced") + } +} + +func TestMergePatchIgnoresOtherValues(t *testing.T) { + doc := `{ "title": "hello", "age": 18 }` + pat := `{ "title": "goodbye" }` + + res := mergePatch(doc, pat) + + exp := `{ "title": "goodbye", "age": 18 }` + + if !compareJSON(exp, res) { + t.Fatalf("Key was not replaced") + } +} + +func TestMergePatchNilDoc(t *testing.T) { + doc := `{ "title": null }` + pat := `{ "title": {"foo": "bar"} }` + + res := mergePatch(doc, pat) + + exp := `{ "title": {"foo": "bar"} }` + + if !compareJSON(exp, res) { + t.Fatalf("Key was not replaced") + } +} + +func TestMergePatchRecursesIntoObjects(t *testing.T) { + doc := `{ "person": { "title": "hello", "age": 18 } }` + pat := `{ "person": { "title": "goodbye" } }` + + res := mergePatch(doc, pat) + + exp := `{ "person": { "title": "goodbye", "age": 18 } }` + + if !compareJSON(exp, res) { + t.Fatalf("Key was not replaced") + } +} + +type nonObjectCases struct { + doc, pat, res string +} + +func TestMergePatchReplacesNonObjectsWholesale(t *testing.T) { + a1 := `[1]` + a2 := `[2]` + o1 := `{ "a": 1 }` + o2 := `{ "a": 2 }` + o3 := `{ "a": 1, "b": 1 }` + o4 := `{ "a": 2, "b": 1 }` + + cases := []nonObjectCases{ + {a1, a2, a2}, + {o1, a2, a2}, + {a1, o1, o1}, + {o3, o2, o4}, + } + + for _, c := range cases { + act := mergePatch(c.doc, c.pat) + + if !compareJSON(c.res, act) { + t.Errorf("whole object replacement failed") + } + } +} + +func TestMergePatchReturnsErrorOnBadJSON(t *testing.T) { + _, err := MergePatch([]byte(`[[[[`), []byte(`1`)) + + if err == nil { + t.Errorf("Did not return an error for bad json: %s", err) + } + + _, err = MergePatch([]byte(`1`), []byte(`[[[[`)) + + if err == nil { + t.Errorf("Did not return an error for bad json: %s", err) + } +} + +func TestMergePatchReturnsEmptyArrayOnEmptyArray(t *testing.T) { + doc := `{ "array": ["one", "two"] }` + pat := `{ "array": [] }` + + exp := `{ "array": [] }` + + res, err := MergePatch([]byte(doc), []byte(pat)) + + if err != nil { + t.Errorf("Unexpected error: %s, %s", err, string(res)) + } + + if !compareJSON(exp, string(res)) { + t.Fatalf("Emtpy array did not return not return as empty array") + } +} + +var rfcTests = []struct { + target string + patch string + expected string +}{ + // test cases from https://tools.ietf.org/html/rfc7386#appendix-A + {target: `{"a":"b"}`, patch: `{"a":"c"}`, expected: `{"a":"c"}`}, + {target: `{"a":"b"}`, patch: `{"b":"c"}`, expected: `{"a":"b","b":"c"}`}, + {target: `{"a":"b"}`, patch: `{"a":null}`, expected: `{}`}, + {target: `{"a":"b","b":"c"}`, patch: `{"a":null}`, expected: `{"b":"c"}`}, + {target: `{"a":["b"]}`, patch: `{"a":"c"}`, expected: `{"a":"c"}`}, + {target: `{"a":"c"}`, patch: `{"a":["b"]}`, expected: `{"a":["b"]}`}, + {target: `{"a":{"b": "c"}}`, patch: `{"a": {"b": "d","c": null}}`, expected: `{"a":{"b":"d"}}`}, + {target: `{"a":[{"b":"c"}]}`, patch: `{"a":[1]}`, expected: `{"a":[1]}`}, + {target: `["a","b"]`, patch: `["c","d"]`, expected: `["c","d"]`}, + {target: `{"a":"b"}`, patch: `["c"]`, expected: `["c"]`}, + // {target: `{"a":"foo"}`, patch: `null`, expected: `null`}, + // {target: `{"a":"foo"}`, patch: `"bar"`, expected: `"bar"`}, + {target: `{"e":null}`, patch: `{"a":1}`, expected: `{"a":1,"e":null}`}, + {target: `[1,2]`, patch: `{"a":"b","c":null}`, expected: `{"a":"b"}`}, + {target: `{}`, patch: `{"a":{"bb":{"ccc":null}}}`, expected: `{"a":{"bb":{}}}`}, +} + +func TestMergePatchRFCCases(t *testing.T) { + for i, c := range rfcTests { + out := mergePatch(c.target, c.patch) + + if !compareJSON(out, c.expected) { + t.Errorf("case[%d], patch '%s' did not apply properly to '%s'. expected:\n'%s'\ngot:\n'%s'", i, c.patch, c.target, c.expected, out) + } + } +} + +var rfcFailTests = ` + {"a":"foo"} | null + {"a":"foo"} | "bar" +` + +func TestMergePatchFailRFCCases(t *testing.T) { + tests := strings.Split(rfcFailTests, "\n") + + for _, c := range tests { + if strings.TrimSpace(c) == "" { + continue + } + + parts := strings.SplitN(c, "|", 2) + + doc := strings.TrimSpace(parts[0]) + pat := strings.TrimSpace(parts[1]) + + out, err := MergePatch([]byte(doc), []byte(pat)) + + if err != errBadJSONPatch { + t.Errorf("error not returned properly: %s, %s", err, string(out)) + } + } + +} + +func TestMergeReplaceKey(t *testing.T) { + doc := `{ "title": "hello", "nested": {"one": 1, "two": 2} }` + pat := `{ "title": "goodbye", "nested": {"one": 2, "two": 2} }` + + exp := `{ "title": "goodbye", "nested": {"one": 2} }` + + res, err := CreateMergePatch([]byte(doc), []byte(pat)) + + if err != nil { + t.Errorf("Unexpected error: %s, %s", err, string(res)) + } + + if !compareJSON(exp, string(res)) { + t.Fatalf("Key was not replaced") + } +} + +func TestMergeGetArray(t *testing.T) { + doc := `{ "title": "hello", "array": ["one", "two"], "notmatch": [1, 2, 3] }` + pat := `{ "title": "hello", "array": ["one", "two", "three"], "notmatch": [1, 2, 3] }` + + exp := `{ "array": ["one", "two", "three"] }` + + res, err := CreateMergePatch([]byte(doc), []byte(pat)) + + if err != nil { + t.Errorf("Unexpected error: %s, %s", err, string(res)) + } + + if !compareJSON(exp, string(res)) { + t.Fatalf("Array was not added") + } +} + +func TestMergeGetObjArray(t *testing.T) { + doc := `{ "title": "hello", "array": [{"banana": true}, {"evil": false}], "notmatch": [{"one":1}, {"two":2}, {"three":3}] }` + pat := `{ "title": "hello", "array": [{"banana": false}, {"evil": true}], "notmatch": [{"one":1}, {"two":2}, {"three":3}] }` + + exp := `{ "array": [{"banana": false}, {"evil": true}] }` + + res, err := CreateMergePatch([]byte(doc), []byte(pat)) + + if err != nil { + t.Errorf("Unexpected error: %s, %s", err, string(res)) + } + + if !compareJSON(exp, string(res)) { + t.Fatalf("Object array was not added") + } +} + +func TestMergeDeleteKey(t *testing.T) { + doc := `{ "title": "hello", "nested": {"one": 1, "two": 2} }` + pat := `{ "title": "hello", "nested": {"one": 1} }` + + exp := `{"nested":{"two":null}}` + + res, err := CreateMergePatch([]byte(doc), []byte(pat)) + + if err != nil { + t.Errorf("Unexpected error: %s, %s", err, string(res)) + } + + // We cannot use "compareJSON", since Equals does not report a difference if the value is null + if exp != string(res) { + t.Fatalf("Key was not removed") + } +} + +func TestMergeEmptyArray(t *testing.T) { + doc := `{ "array": null }` + pat := `{ "array": [] }` + + exp := `{"array":[]}` + + res, err := CreateMergePatch([]byte(doc), []byte(pat)) + + if err != nil { + t.Errorf("Unexpected error: %s, %s", err, string(res)) + } + + // We cannot use "compareJSON", since Equals does not report a difference if the value is null + if exp != string(res) { + t.Fatalf("Key was not removed") + } +} + +func TestMergeObjArray(t *testing.T) { + doc := `{ "array": [ {"a": {"b": 2}}, {"a": {"b": 3}} ]}` + exp := `{}` + + res, err := CreateMergePatch([]byte(doc), []byte(doc)) + + if err != nil { + t.Errorf("Unexpected error: %s, %s", err, string(res)) + } + + // We cannot use "compareJSON", since Equals does not report a difference if the value is null + if exp != string(res) { + t.Fatalf("Array was not empty, was " + string(res)) + } +} + +func TestMergeComplexMatch(t *testing.T) { + doc := `{"hello": "world","t": true ,"f": false, "n": null,"i": 123,"pi": 3.1416,"a": [1, 2, 3, 4], "nested": {"hello": "world","t": true ,"f": false, "n": null,"i": 123,"pi": 3.1416,"a": [1, 2, 3, 4]} }` + empty := `{}` + res, err := CreateMergePatch([]byte(doc), []byte(doc)) + + if err != nil { + t.Errorf("Unexpected error: %s, %s", err, string(res)) + } + + // We cannot use "compareJSON", since Equals does not report a difference if the value is null + if empty != string(res) { + t.Fatalf("Did not get empty result, was:%s", string(res)) + } +} + +func TestMergeComplexAddAll(t *testing.T) { + doc := `{"hello": "world","t": true ,"f": false, "n": null,"i": 123,"pi": 3.1416,"a": [1, 2, 3, 4], "nested": {"hello": "world","t": true ,"f": false, "n": null,"i": 123,"pi": 3.1416,"a": [1, 2, 3, 4]} }` + empty := `{}` + res, err := CreateMergePatch([]byte(empty), []byte(doc)) + + if err != nil { + t.Errorf("Unexpected error: %s, %s", err, string(res)) + } + + if !compareJSON(doc, string(res)) { + t.Fatalf("Did not get everything as, it was:\n%s", string(res)) + } +} + +func TestMergeComplexRemoveAll(t *testing.T) { + doc := `{"hello": "world","t": true ,"f": false, "n": null,"i": 123,"pi": 3.1416,"a": [1, 2, 3, 4], "nested": {"hello": "world","t": true ,"f": false, "n": null,"i": 123,"pi": 3.1416,"a": [1, 2, 3, 4]} }` + exp := `{"a":null,"f":null,"hello":null,"i":null,"n":null,"nested":null,"pi":null,"t":null}` + empty := `{}` + res, err := CreateMergePatch([]byte(doc), []byte(empty)) + + if err != nil { + t.Errorf("Unexpected error: %s, %s", err, string(res)) + } + + if exp != string(res) { + t.Fatalf("Did not get result, was:%s", string(res)) + } + + // FIXME: Crashes if using compareJSON like this: + /* + if !compareJSON(doc, string(res)) { + t.Fatalf("Did not get everything as, it was:\n%s", string(res)) + } + */ +} + +func TestMergeObjectWithInnerArray(t *testing.T) { + stateString := `{ + "OuterArray": [ + { + "InnerArray": [ + { + "StringAttr": "abc123" + } + ], + "StringAttr": "def456" + } + ] + }` + + patch, err := CreateMergePatch([]byte(stateString), []byte(stateString)) + if err != nil { + t.Fatal(err) + } + + if string(patch) != "{}" { + t.Fatalf("Patch should have been {} but was: %v", string(patch)) + } +} + +func TestMergeReplaceKeyRequiringEscape(t *testing.T) { + doc := `{ "title": "hello", "nested": {"title/escaped": 1, "two": 2} }` + pat := `{ "title": "goodbye", "nested": {"title/escaped": 2, "two": 2} }` + + exp := `{ "title": "goodbye", "nested": {"title~1escaped": 2} }` + + res, err := CreateMergePatch([]byte(doc), []byte(pat)) + + if err != nil { + t.Errorf("Unexpected error: %s, %s", err, string(res)) + } + + if !compareJSON(exp, string(res)) { + t.Log(string(res)) + t.Fatalf("Key was not replaced") + } +} + +func TestMergePatchReplaceKeyRequiringEscaping(t *testing.T) { + doc := `{ "obj": { "title/escaped": "hello" } }` + pat := `{ "obj": { "title~1escaped": "goodbye" } }` + exp := `{ "obj": { "title/escaped": "goodbye" } }` + + res := mergePatch(doc, pat) + + if !compareJSON(exp, res) { + t.Fatalf("Key was not replaced") + } +} diff --git a/vendor/github.com/evanphx/json-patch/patch.go b/vendor/github.com/evanphx/json-patch/patch.go new file mode 100644 index 000000000..691380de6 --- /dev/null +++ b/vendor/github.com/evanphx/json-patch/patch.go @@ -0,0 +1,586 @@ +package jsonpatch + +import ( + "bytes" + "encoding/json" + "fmt" + "strconv" + "strings" +) + +const ( + eRaw = iota + eDoc + eAry +) + +const LeftBrace byte = 91 // []byte("[") + +type lazyNode struct { + raw *json.RawMessage + doc partialDoc + ary partialArray + which int +} + +type operation map[string]*json.RawMessage + +// Patch is an ordered collection of operations. +type Patch []operation + +type partialDoc map[string]*lazyNode +type partialArray []*lazyNode + +type container interface { + get(key string) (*lazyNode, error) + set(key string, val *lazyNode) error + add(key string, val *lazyNode) error + remove(key string) error +} + +func newLazyNode(raw *json.RawMessage) *lazyNode { + return &lazyNode{raw: raw, doc: nil, ary: nil, which: eRaw} +} + +func (n *lazyNode) MarshalJSON() ([]byte, error) { + switch n.which { + case eRaw: + return json.Marshal(n.raw) + case eDoc: + return json.Marshal(n.doc) + case eAry: + return json.Marshal(n.ary) + default: + return nil, fmt.Errorf("Unknown type") + } +} + +func (n *lazyNode) UnmarshalJSON(data []byte) error { + dest := make(json.RawMessage, len(data)) + copy(dest, data) + n.raw = &dest + n.which = eRaw + return nil +} + +func (n *lazyNode) intoDoc() (*partialDoc, error) { + if n.which == eDoc { + return &n.doc, nil + } + + err := json.Unmarshal(*n.raw, &n.doc) + + if err != nil { + return nil, err + } + + n.which = eDoc + return &n.doc, nil +} + +func (n *lazyNode) intoAry() (*partialArray, error) { + if n.which == eAry { + return &n.ary, nil + } + + err := json.Unmarshal(*n.raw, &n.ary) + + if err != nil { + return nil, err + } + + n.which = eAry + return &n.ary, nil +} + +func (n *lazyNode) compact() []byte { + buf := &bytes.Buffer{} + + err := json.Compact(buf, *n.raw) + + if err != nil { + return *n.raw + } + + return buf.Bytes() +} + +func (n *lazyNode) tryDoc() bool { + err := json.Unmarshal(*n.raw, &n.doc) + + if err != nil { + return false + } + + n.which = eDoc + return true +} + +func (n *lazyNode) tryAry() bool { + err := json.Unmarshal(*n.raw, &n.ary) + + if err != nil { + return false + } + + n.which = eAry + return true +} + +func (n *lazyNode) equal(o *lazyNode) bool { + if n.which == eRaw { + if !n.tryDoc() && !n.tryAry() { + if o.which != eRaw { + return false + } + + return bytes.Equal(n.compact(), o.compact()) + } + } + + if n.which == eDoc { + if o.which == eRaw { + if !o.tryDoc() { + return false + } + } + + if o.which != eDoc { + return false + } + + for k, v := range n.doc { + ov, ok := o.doc[k] + + if !ok { + return false + } + + if v == nil && ov == nil { + continue + } + + if !v.equal(ov) { + return false + } + } + + return true + } + + if o.which != eAry && !o.tryAry() { + return false + } + + if len(n.ary) != len(o.ary) { + return false + } + + for idx, val := range n.ary { + if !val.equal(o.ary[idx]) { + return false + } + } + + return true +} + +func (o operation) kind() string { + if obj, ok := o["op"]; ok { + var op string + + err := json.Unmarshal(*obj, &op) + + if err != nil { + return "unknown" + } + + return op + } + + return "unknown" +} + +func (o operation) path() string { + if obj, ok := o["path"]; ok { + var op string + + err := json.Unmarshal(*obj, &op) + + if err != nil { + return "unknown" + } + + return op + } + + return "unknown" +} + +func (o operation) from() string { + if obj, ok := o["from"]; ok { + var op string + + err := json.Unmarshal(*obj, &op) + + if err != nil { + return "unknown" + } + + return op + } + + return "unknown" +} + +func (o operation) value() *lazyNode { + if obj, ok := o["value"]; ok { + return newLazyNode(obj) + } + + return nil +} + +func isArray(buf []byte) bool { +Loop: + for _, c := range buf { + switch c { + case ' ': + case '\n': + case '\t': + continue + case '[': + return true + default: + break Loop + } + } + + return false +} + +func findObject(pd *container, path string) (container, string) { + doc := *pd + + split := strings.Split(path, "/") + + parts := split[1 : len(split)-1] + + key := split[len(split)-1] + + var err error + + for _, part := range parts { + + next, ok := doc.get(decodePatchKey(part)) + + if next == nil || ok != nil { + return nil, "" + } + + if isArray(*next.raw) { + doc, err = next.intoAry() + + if err != nil { + return nil, "" + } + } else { + doc, err = next.intoDoc() + + if err != nil { + return nil, "" + } + } + } + + return doc, decodePatchKey(key) +} + +func (d *partialDoc) set(key string, val *lazyNode) error { + (*d)[key] = val + return nil +} + +func (d *partialDoc) add(key string, val *lazyNode) error { + (*d)[key] = val + return nil +} + +func (d *partialDoc) get(key string) (*lazyNode, error) { + return (*d)[key], nil +} + +func (d *partialDoc) remove(key string) error { + _, ok := (*d)[key] + if !ok { + return fmt.Errorf("Unable to remove nonexistent key: %s", key) + } + + delete(*d, key) + return nil +} + +func (d *partialArray) set(key string, val *lazyNode) error { + if key == "-" { + *d = append(*d, val) + return nil + } + + idx, err := strconv.Atoi(key) + if err != nil { + return err + } + + sz := len(*d) + if idx+1 > sz { + sz = idx + 1 + } + + ary := make([]*lazyNode, sz) + + cur := *d + + copy(ary, cur) + + if idx >= len(ary) { + return fmt.Errorf("Unable to access invalid index: %d", idx) + } + + ary[idx] = val + + *d = ary + return nil +} + +func (d *partialArray) add(key string, val *lazyNode) error { + if key == "-" { + *d = append(*d, val) + return nil + } + + idx, err := strconv.Atoi(key) + if err != nil { + return err + } + + ary := make([]*lazyNode, len(*d)+1) + + cur := *d + + copy(ary[0:idx], cur[0:idx]) + ary[idx] = val + copy(ary[idx+1:], cur[idx:]) + + *d = ary + return nil +} + +func (d *partialArray) get(key string) (*lazyNode, error) { + idx, err := strconv.Atoi(key) + + if err != nil { + return nil, err + } + + if idx >= len(*d) { + return nil, fmt.Errorf("Unable to access invalid index: %d", idx) + } + + return (*d)[idx], nil +} + +func (d *partialArray) remove(key string) error { + idx, err := strconv.Atoi(key) + if err != nil { + return err + } + + cur := *d + + if idx >= len(cur) { + return fmt.Errorf("Unable to remove invalid index: %d", idx) + } + + ary := make([]*lazyNode, len(cur)-1) + + copy(ary[0:idx], cur[0:idx]) + copy(ary[idx:], cur[idx+1:]) + + *d = ary + return nil + +} + +func (p Patch) add(doc *container, op operation) error { + path := op.path() + + con, key := findObject(doc, path) + + if con == nil { + return fmt.Errorf("jsonpatch add operation does not apply: doc is missing path: %s", path) + } + + return con.add(key, op.value()) +} + +func (p Patch) remove(doc *container, op operation) error { + path := op.path() + + con, key := findObject(doc, path) + + if con == nil { + return fmt.Errorf("jsonpatch remove operation does not apply: doc is missing path: %s", path) + } + + return con.remove(key) +} + +func (p Patch) replace(doc *container, op operation) error { + path := op.path() + + con, key := findObject(doc, path) + + if con == nil { + return fmt.Errorf("jsonpatch replace operation does not apply: doc is missing path: %s", path) + } + + return con.set(key, op.value()) +} + +func (p Patch) move(doc *container, op operation) error { + from := op.from() + + con, key := findObject(doc, from) + + if con == nil { + return fmt.Errorf("jsonpatch move operation does not apply: doc is missing from path: %s", from) + } + + val, err := con.get(key) + if err != nil { + return err + } + + err = con.remove(key) + if err != nil { + return err + } + + path := op.path() + + con, key = findObject(doc, path) + + if con == nil { + return fmt.Errorf("jsonpatch move operation does not apply: doc is missing destination path: %s", path) + } + + return con.set(key, val) +} + +func (p Patch) test(doc *container, op operation) error { + path := op.path() + + con, key := findObject(doc, path) + + if con == nil { + return fmt.Errorf("jsonpatch test operation does not apply: is missing path: %s", path) + } + + val, err := con.get(key) + + if err != nil { + return err + } + + if val == nil { + if op.value().raw == nil { + return nil + } else { + return fmt.Errorf("Testing value %s failed", path) + } + } + + if val.equal(op.value()) { + return nil + } + + return fmt.Errorf("Testing value %s failed", path) +} + +// Equal indicates if 2 JSON documents have the same structural equality. +func Equal(a, b []byte) bool { + ra := make(json.RawMessage, len(a)) + copy(ra, a) + la := newLazyNode(&ra) + + rb := make(json.RawMessage, len(b)) + copy(rb, b) + lb := newLazyNode(&rb) + + return la.equal(lb) +} + +// DecodePatch decodes the passed JSON document as an RFC 6902 patch. +func DecodePatch(buf []byte) (Patch, error) { + var p Patch + + err := json.Unmarshal(buf, &p) + + if err != nil { + return nil, err + } + + return p, nil +} + +// Apply mutates a JSON document according to the patch, and returns the new +// document. +func (p Patch) Apply(doc []byte) ([]byte, error) { + return p.ApplyIndent(doc, "") +} + +// ApplyIndent mutates a JSON document according to the patch, and returns the new +// document indented. +func (p Patch) ApplyIndent(doc []byte, indent string) ([]byte, error) { + var pd container + if (doc[0] == LeftBrace) { + pd = &partialArray{} + } else { + pd = &partialDoc{} + } + + err := json.Unmarshal(doc, pd) + + if err != nil { + return nil, err + } + + err = nil + + for _, op := range p { + switch op.kind() { + case "add": + err = p.add(&pd, op) + case "remove": + err = p.remove(&pd, op) + case "replace": + err = p.replace(&pd, op) + case "move": + err = p.move(&pd, op) + case "test": + err = p.test(&pd, op) + default: + err = fmt.Errorf("Unexpected kind: %s", op.kind()) + } + + if err != nil { + return nil, err + } + } + + if indent != "" { + return json.MarshalIndent(pd, "", indent) + } + + return json.Marshal(pd) +} diff --git a/vendor/github.com/evanphx/json-patch/patch_test.go b/vendor/github.com/evanphx/json-patch/patch_test.go new file mode 100644 index 000000000..417163e7e --- /dev/null +++ b/vendor/github.com/evanphx/json-patch/patch_test.go @@ -0,0 +1,297 @@ +package jsonpatch + +import ( + "bytes" + "encoding/json" + "fmt" + "reflect" + "testing" +) + +func reformatJSON(j string) string { + buf := new(bytes.Buffer) + + json.Indent(buf, []byte(j), "", " ") + + return buf.String() +} + +func compareJSON(a, b string) bool { + // return Equal([]byte(a), []byte(b)) + + var obj_a, obj_b map[string]interface{} + json.Unmarshal([]byte(a), &obj_a) + json.Unmarshal([]byte(b), &obj_b) + + // fmt.Printf("Comparing %#v\nagainst %#v\n", obj_a, obj_b) + return reflect.DeepEqual(obj_a, obj_b) +} + +func applyPatch(doc, patch string) (string, error) { + obj, err := DecodePatch([]byte(patch)) + + if err != nil { + panic(err) + } + + out, err := obj.Apply([]byte(doc)) + + if err != nil { + return "", err + } + + return string(out), nil +} + +type Case struct { + doc, patch, result string +} + +var Cases = []Case{ + { + `{ "foo": "bar"}`, + `[ + { "op": "add", "path": "/baz", "value": "qux" } + ]`, + `{ + "baz": "qux", + "foo": "bar" + }`, + }, + { + `{ "foo": [ "bar", "baz" ] }`, + `[ + { "op": "add", "path": "/foo/1", "value": "qux" } + ]`, + `{ "foo": [ "bar", "qux", "baz" ] }`, + }, + { + `{ "baz": "qux", "foo": "bar" }`, + `[ { "op": "remove", "path": "/baz" } ]`, + `{ "foo": "bar" }`, + }, + { + `{ "foo": [ "bar", "qux", "baz" ] }`, + `[ { "op": "remove", "path": "/foo/1" } ]`, + `{ "foo": [ "bar", "baz" ] }`, + }, + { + `{ "baz": "qux", "foo": "bar" }`, + `[ { "op": "replace", "path": "/baz", "value": "boo" } ]`, + `{ "baz": "boo", "foo": "bar" }`, + }, + { + `{ + "foo": { + "bar": "baz", + "waldo": "fred" + }, + "qux": { + "corge": "grault" + } + }`, + `[ { "op": "move", "from": "/foo/waldo", "path": "/qux/thud" } ]`, + `{ + "foo": { + "bar": "baz" + }, + "qux": { + "corge": "grault", + "thud": "fred" + } + }`, + }, + { + `{ "foo": [ "all", "grass", "cows", "eat" ] }`, + `[ { "op": "move", "from": "/foo/1", "path": "/foo/3" } ]`, + `{ "foo": [ "all", "cows", "eat", "grass" ] }`, + }, + { + `{ "foo": "bar" }`, + `[ { "op": "add", "path": "/child", "value": { "grandchild": { } } } ]`, + `{ "foo": "bar", "child": { "grandchild": { } } }`, + }, + { + `{ "foo": ["bar"] }`, + `[ { "op": "add", "path": "/foo/-", "value": ["abc", "def"] } ]`, + `{ "foo": ["bar", ["abc", "def"]] }`, + }, + { + `{ "foo": "bar", "qux": { "baz": 1, "bar": null } }`, + `[ { "op": "remove", "path": "/qux/bar" } ]`, + `{ "foo": "bar", "qux": { "baz": 1 } }`, + }, + { + `{ "foo": "bar" }`, + `[ { "op": "add", "path": "/baz", "value": null } ]`, + `{ "baz": null, "foo": "bar" }`, + }, + { + `{ "foo": ["bar"]}`, + `[ { "op": "replace", "path": "/foo/0", "value": "baz"}]`, + `{ "foo": ["baz"]}`, + }, + { + `{ "foo": ["bar","baz"]}`, + `[ { "op": "replace", "path": "/foo/0", "value": "bum"}]`, + `{ "foo": ["bum","baz"]}`, + }, + { + `{ "foo": ["bar","qux","baz"]}`, + `[ { "op": "replace", "path": "/foo/1", "value": "bum"}]`, + `{ "foo": ["bar", "bum","baz"]}`, + }, + { + `[ {"foo": ["bar","qux","baz"]}]`, + `[ { "op": "replace", "path": "/0/foo/0", "value": "bum"}]`, + `[ {"foo": ["bum","qux","baz"]}]`, + }, +} + +type BadCase struct { + doc, patch string +} + +var MutationTestCases = []BadCase{ + { + `{ "foo": "bar", "qux": { "baz": 1, "bar": null } }`, + `[ { "op": "remove", "path": "/qux/bar" } ]`, + }, + { + `{ "foo": "bar", "qux": { "baz": 1, "bar": null } }`, + `[ { "op": "replace", "path": "/qux/baz", "value": null } ]`, + }, +} + +var BadCases = []BadCase{ + { + `{ "foo": "bar" }`, + `[ { "op": "add", "path": "/baz/bat", "value": "qux" } ]`, + }, + { + `{ "a": { "b": { "d": 1 } } }`, + `[ { "op": "remove", "path": "/a/b/c" } ]`, + }, + { + `{ "a": { "b": { "d": 1 } } }`, + `[ { "op": "move", "from": "/a/b/c", "path": "/a/b/e" } ]`, + }, + { + `{ "a": { "b": [1] } }`, + `[ { "op": "remove", "path": "/a/b/1" } ]`, + }, + { + `{ "a": { "b": [1] } }`, + `[ { "op": "move", "from": "/a/b/1", "path": "/a/b/2" } ]`, + }, +} + +func TestAllCases(t *testing.T) { + for _, c := range Cases { + out, err := applyPatch(c.doc, c.patch) + + if err != nil { + t.Errorf("Unable to apply patch: %s", err) + } + + if !compareJSON(out, c.result) { + t.Errorf("Patch did not apply. Expected:\n%s\n\nActual:\n%s", + reformatJSON(c.result), reformatJSON(out)) + } + } + + for _, c := range MutationTestCases { + out, err := applyPatch(c.doc, c.patch) + + if err != nil { + t.Errorf("Unable to apply patch: %s", err) + } + + if compareJSON(out, c.doc) { + t.Errorf("Patch did not apply. Original:\n%s\n\nPatched:\n%s", + reformatJSON(c.doc), reformatJSON(out)) + } + } + + for _, c := range BadCases { + _, err := applyPatch(c.doc, c.patch) + + if err == nil { + t.Errorf("Patch should have failed to apply but it did not") + } + } +} + +type TestCase struct { + doc, patch string + result bool + failedPath string +} + +var TestCases = []TestCase{ + { + `{ + "baz": "qux", + "foo": [ "a", 2, "c" ] + }`, + `[ + { "op": "test", "path": "/baz", "value": "qux" }, + { "op": "test", "path": "/foo/1", "value": 2 } + ]`, + true, + "", + }, + { + `{ "baz": "qux" }`, + `[ { "op": "test", "path": "/baz", "value": "bar" } ]`, + false, + "/baz", + }, + { + `{ + "baz": "qux", + "foo": ["a", 2, "c"] + }`, + `[ + { "op": "test", "path": "/baz", "value": "qux" }, + { "op": "test", "path": "/foo/1", "value": "c" } + ]`, + false, + "/foo/1", + }, + { + `{ "baz": "qux" }`, + `[ { "op": "test", "path": "/foo", "value": 42 } ]`, + false, + "/foo", + }, + { + `{ "baz": "qux" }`, + `[ { "op": "test", "path": "/foo", "value": null } ]`, + true, + "", + }, + { + `{ "baz/foo": "qux" }`, + `[ { "op": "test", "path": "/baz~1foo", "value": "qux"} ]`, + true, + "", + }, +} + +func TestAllTest(t *testing.T) { + for _, c := range TestCases { + _, err := applyPatch(c.doc, c.patch) + + if c.result && err != nil { + t.Errorf("Testing failed when it should have passed: %s", err) + } else if !c.result && err == nil { + t.Errorf("Testing passed when it should have faild: %s", err) + } else if !c.result { + expected := fmt.Sprintf("Testing value %s failed", c.failedPath) + if err.Error() != expected { + t.Errorf("Testing failed as expected but invalid message: expected [%s], got [%s]", expected, err) + } + } + } +} diff --git a/vendor/github.com/mitchellh/mapstructure/.gitrepo b/vendor/github.com/mitchellh/mapstructure/.gitrepo new file mode 100644 index 000000000..f5fec2c7c --- /dev/null +++ b/vendor/github.com/mitchellh/mapstructure/.gitrepo @@ -0,0 +1,11 @@ +; DO NOT EDIT (unless you know what you are doing) +; +; This subdirectory is a git "subrepo", and this file is maintained by the +; git-subrepo command. See https://github.com/git-commands/git-subrepo#readme +; +[subrepo] + remote = https://github.com/mitchellh/mapstructure.git + branch = master + commit = bfdb1a85537d60bc7e954e600c250219ea497417 + parent = 8b9817a7a00044d68272b133d27ba93e2e55d4be + cmdver = 0.3.0 diff --git a/vendor/github.com/mitchellh/mapstructure/.travis.yml b/vendor/github.com/mitchellh/mapstructure/.travis.yml new file mode 100644 index 000000000..7f3fe9a96 --- /dev/null +++ b/vendor/github.com/mitchellh/mapstructure/.travis.yml @@ -0,0 +1,7 @@ +language: go + +go: + - 1.4 + +script: + - go test diff --git a/vendor/github.com/mitchellh/mapstructure/LICENSE b/vendor/github.com/mitchellh/mapstructure/LICENSE new file mode 100644 index 000000000..f9c841a51 --- /dev/null +++ b/vendor/github.com/mitchellh/mapstructure/LICENSE @@ -0,0 +1,21 @@ +The MIT License (MIT) + +Copyright (c) 2013 Mitchell Hashimoto + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in +all copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN +THE SOFTWARE. diff --git a/vendor/github.com/mitchellh/mapstructure/README.md b/vendor/github.com/mitchellh/mapstructure/README.md new file mode 100644 index 000000000..659d6885f --- /dev/null +++ b/vendor/github.com/mitchellh/mapstructure/README.md @@ -0,0 +1,46 @@ +# mapstructure + +mapstructure is a Go library for decoding generic map values to structures +and vice versa, while providing helpful error handling. + +This library is most useful when decoding values from some data stream (JSON, +Gob, etc.) where you don't _quite_ know the structure of the underlying data +until you read a part of it. You can therefore read a `map[string]interface{}` +and use this library to decode it into the proper underlying native Go +structure. + +## Installation + +Standard `go get`: + +``` +$ go get github.com/mitchellh/mapstructure +``` + +## Usage & Example + +For usage and examples see the [Godoc](http://godoc.org/github.com/mitchellh/mapstructure). + +The `Decode` function has examples associated with it there. + +## But Why?! + +Go offers fantastic standard libraries for decoding formats such as JSON. +The standard method is to have a struct pre-created, and populate that struct +from the bytes of the encoded format. This is great, but the problem is if +you have configuration or an encoding that changes slightly depending on +specific fields. For example, consider this JSON: + +```json +{ + "type": "person", + "name": "Mitchell" +} +``` + +Perhaps we can't populate a specific structure without first reading +the "type" field from the JSON. We could always do two passes over the +decoding of the JSON (reading the "type" first, and the rest later). +However, it is much simpler to just decode this into a `map[string]interface{}` +structure, read the "type" key, then use something like this library +to decode it into the proper structure. diff --git a/vendor/github.com/mitchellh/mapstructure/decode_hooks.go b/vendor/github.com/mitchellh/mapstructure/decode_hooks.go new file mode 100644 index 000000000..115ae67c1 --- /dev/null +++ b/vendor/github.com/mitchellh/mapstructure/decode_hooks.go @@ -0,0 +1,154 @@ +package mapstructure + +import ( + "errors" + "reflect" + "strconv" + "strings" + "time" +) + +// typedDecodeHook takes a raw DecodeHookFunc (an interface{}) and turns +// it into the proper DecodeHookFunc type, such as DecodeHookFuncType. +func typedDecodeHook(h DecodeHookFunc) DecodeHookFunc { + // Create variables here so we can reference them with the reflect pkg + var f1 DecodeHookFuncType + var f2 DecodeHookFuncKind + + // Fill in the variables into this interface and the rest is done + // automatically using the reflect package. + potential := []interface{}{f1, f2} + + v := reflect.ValueOf(h) + vt := v.Type() + for _, raw := range potential { + pt := reflect.ValueOf(raw).Type() + if vt.ConvertibleTo(pt) { + return v.Convert(pt).Interface() + } + } + + return nil +} + +// DecodeHookExec executes the given decode hook. This should be used +// since it'll naturally degrade to the older backwards compatible DecodeHookFunc +// that took reflect.Kind instead of reflect.Type. +func DecodeHookExec( + raw DecodeHookFunc, + from reflect.Type, to reflect.Type, + data interface{}) (interface{}, error) { + // Build our arguments that reflect expects + argVals := make([]reflect.Value, 3) + argVals[0] = reflect.ValueOf(from) + argVals[1] = reflect.ValueOf(to) + argVals[2] = reflect.ValueOf(data) + + switch f := typedDecodeHook(raw).(type) { + case DecodeHookFuncType: + return f(from, to, data) + case DecodeHookFuncKind: + return f(from.Kind(), to.Kind(), data) + default: + return nil, errors.New("invalid decode hook signature") + } +} + +// ComposeDecodeHookFunc creates a single DecodeHookFunc that +// automatically composes multiple DecodeHookFuncs. +// +// The composed funcs are called in order, with the result of the +// previous transformation. +func ComposeDecodeHookFunc(fs ...DecodeHookFunc) DecodeHookFunc { + return func( + f reflect.Type, + t reflect.Type, + data interface{}) (interface{}, error) { + var err error + for _, f1 := range fs { + data, err = DecodeHookExec(f1, f, t, data) + if err != nil { + return nil, err + } + + // Modify the from kind to be correct with the new data + f = nil + if val := reflect.ValueOf(data); val.IsValid() { + f = val.Type() + } + } + + return data, nil + } +} + +// StringToSliceHookFunc returns a DecodeHookFunc that converts +// string to []string by splitting on the given sep. +func StringToSliceHookFunc(sep string) DecodeHookFunc { + return func( + f reflect.Kind, + t reflect.Kind, + data interface{}) (interface{}, error) { + if f != reflect.String || t != reflect.Slice { + return data, nil + } + + raw := data.(string) + if raw == "" { + return []string{}, nil + } + + return strings.Split(raw, sep), nil + } +} + +// StringToTimeDurationHookFunc returns a DecodeHookFunc that converts +// strings to time.Duration. +func StringToTimeDurationHookFunc() DecodeHookFunc { + return func( + f reflect.Type, + t reflect.Type, + data interface{}) (interface{}, error) { + if f.Kind() != reflect.String { + return data, nil + } + if t != reflect.TypeOf(time.Duration(5)) { + return data, nil + } + + // Convert it by parsing + return time.ParseDuration(data.(string)) + } +} + +func WeaklyTypedHook( + f reflect.Kind, + t reflect.Kind, + data interface{}) (interface{}, error) { + dataVal := reflect.ValueOf(data) + switch t { + case reflect.String: + switch f { + case reflect.Bool: + if dataVal.Bool() { + return "1", nil + } else { + return "0", nil + } + case reflect.Float32: + return strconv.FormatFloat(dataVal.Float(), 'f', -1, 64), nil + case reflect.Int: + return strconv.FormatInt(dataVal.Int(), 10), nil + case reflect.Slice: + dataType := dataVal.Type() + elemKind := dataType.Elem().Kind() + if elemKind == reflect.Uint8 { + return string(dataVal.Interface().([]uint8)), nil + } + case reflect.Uint: + return strconv.FormatUint(dataVal.Uint(), 10), nil + } + } + + return data, nil +} diff --git a/vendor/github.com/mitchellh/mapstructure/decode_hooks_test.go b/vendor/github.com/mitchellh/mapstructure/decode_hooks_test.go new file mode 100644 index 000000000..53289afcf --- /dev/null +++ b/vendor/github.com/mitchellh/mapstructure/decode_hooks_test.go @@ -0,0 +1,229 @@ +package mapstructure + +import ( + "errors" + "reflect" + "testing" + "time" +) + +func TestComposeDecodeHookFunc(t *testing.T) { + f1 := func( + f reflect.Kind, + t reflect.Kind, + data interface{}) (interface{}, error) { + return data.(string) + "foo", nil + } + + f2 := func( + f reflect.Kind, + t reflect.Kind, + data interface{}) (interface{}, error) { + return data.(string) + "bar", nil + } + + f := ComposeDecodeHookFunc(f1, f2) + + result, err := DecodeHookExec( + f, reflect.TypeOf(""), reflect.TypeOf([]byte("")), "") + if err != nil { + t.Fatalf("bad: %s", err) + } + if result.(string) != "foobar" { + t.Fatalf("bad: %#v", result) + } +} + +func TestComposeDecodeHookFunc_err(t *testing.T) { + f1 := func(reflect.Kind, reflect.Kind, interface{}) (interface{}, error) { + return nil, errors.New("foo") + } + + f2 := func(reflect.Kind, reflect.Kind, interface{}) (interface{}, error) { + panic("NOPE") + } + + f := ComposeDecodeHookFunc(f1, f2) + + _, err := DecodeHookExec( + f, reflect.TypeOf(""), reflect.TypeOf([]byte("")), 42) + if err.Error() != "foo" { + t.Fatalf("bad: %s", err) + } +} + +func TestComposeDecodeHookFunc_kinds(t *testing.T) { + var f2From reflect.Kind + + f1 := func( + f reflect.Kind, + t reflect.Kind, + data interface{}) (interface{}, error) { + return int(42), nil + } + + f2 := func( + f reflect.Kind, + t reflect.Kind, + data interface{}) (interface{}, error) { + f2From = f + return data, nil + } + + f := ComposeDecodeHookFunc(f1, f2) + + _, err := DecodeHookExec( + f, reflect.TypeOf(""), reflect.TypeOf([]byte("")), "") + if err != nil { + t.Fatalf("bad: %s", err) + } + if f2From != reflect.Int { + t.Fatalf("bad: %#v", f2From) + } +} + +func TestStringToSliceHookFunc(t *testing.T) { + f := StringToSliceHookFunc(",") + + strType := reflect.TypeOf("") + sliceType := reflect.TypeOf([]byte("")) + cases := []struct { + f, t reflect.Type + data interface{} + result interface{} + err bool + }{ + {sliceType, sliceType, 42, 42, false}, + {strType, strType, 42, 42, false}, + { + strType, + sliceType, + "foo,bar,baz", + []string{"foo", "bar", "baz"}, + false, + }, + { + strType, + sliceType, + "", + []string{}, + false, + }, + } + + for i, tc := range cases { + actual, err := DecodeHookExec(f, tc.f, tc.t, tc.data) + if tc.err != (err != nil) { + t.Fatalf("case %d: expected err %#v", i, tc.err) + } + if !reflect.DeepEqual(actual, tc.result) { + t.Fatalf( + "case %d: expected %#v, got %#v", + i, tc.result, actual) + } + } +} + +func TestStringToTimeDurationHookFunc(t *testing.T) { + f := StringToTimeDurationHookFunc() + + strType := reflect.TypeOf("") + timeType := reflect.TypeOf(time.Duration(5)) + cases := []struct { + f, t reflect.Type + data interface{} + result interface{} + err bool + }{ + {strType, timeType, "5s", 5 * time.Second, false}, + {strType, timeType, "5", time.Duration(0), true}, + {strType, strType, "5", "5", false}, + } + + for i, tc := range cases { + actual, err := DecodeHookExec(f, tc.f, tc.t, tc.data) + if tc.err != (err != nil) { + t.Fatalf("case %d: expected err %#v", i, tc.err) + } + if !reflect.DeepEqual(actual, tc.result) { + t.Fatalf( + "case %d: expected %#v, got %#v", + i, tc.result, actual) + } + } +} + +func TestWeaklyTypedHook(t *testing.T) { + var f DecodeHookFunc = WeaklyTypedHook + + boolType := reflect.TypeOf(true) + strType := reflect.TypeOf("") + sliceType := reflect.TypeOf([]byte("")) + cases := []struct { + f, t reflect.Type + data interface{} + result interface{} + err bool + }{ + // TO STRING + { + boolType, + strType, + false, + "0", + false, + }, + + { + boolType, + strType, + true, + "1", + false, + }, + + { + reflect.TypeOf(float32(1)), + strType, + float32(7), + "7", + false, + }, + + { + reflect.TypeOf(int(1)), + strType, + int(7), + "7", + false, + }, + + { + sliceType, + strType, + []uint8("foo"), + "foo", + false, + }, + + { + reflect.TypeOf(uint(1)), + strType, + uint(7), + "7", + false, + }, + } + + for i, tc := range cases { + actual, err := DecodeHookExec(f, tc.f, tc.t, tc.data) + if tc.err != (err != nil) { + t.Fatalf("case %d: expected err %#v", i, tc.err) + } + if !reflect.DeepEqual(actual, tc.result) { + t.Fatalf( + "case %d: expected %#v, got %#v", + i, tc.result, actual) + } + } +} diff --git a/vendor/github.com/mitchellh/mapstructure/error.go b/vendor/github.com/mitchellh/mapstructure/error.go new file mode 100644 index 000000000..47a99e5af --- /dev/null +++ b/vendor/github.com/mitchellh/mapstructure/error.go @@ -0,0 +1,50 @@ +package mapstructure + +import ( + "errors" + "fmt" + "sort" + "strings" +) + +// Error implements the error interface and can represents multiple +// errors that occur in the course of a single decode. +type Error struct { + Errors []string +} + +func (e *Error) Error() string { + points := make([]string, len(e.Errors)) + for i, err := range e.Errors { + points[i] = fmt.Sprintf("* %s", err) + } + + sort.Strings(points) + return fmt.Sprintf( + "%d error(s) decoding:\n\n%s", + len(e.Errors), strings.Join(points, "\n")) +} + +// WrappedErrors implements the errwrap.Wrapper interface to make this +// return value more useful with the errwrap and go-multierror libraries. +func (e *Error) WrappedErrors() []error { + if e == nil { + return nil + } + + result := make([]error, len(e.Errors)) + for i, e := range e.Errors { + result[i] = errors.New(e) + } + + return result +} + +func appendErrors(errors []string, err error) []string { + switch e := err.(type) { + case *Error: + return append(errors, e.Errors...) + default: + return append(errors, e.Error()) + } +} diff --git a/vendor/github.com/mitchellh/mapstructure/mapstructure.go b/vendor/github.com/mitchellh/mapstructure/mapstructure.go new file mode 100644 index 000000000..b0ab9a3e0 --- /dev/null +++ b/vendor/github.com/mitchellh/mapstructure/mapstructure.go @@ -0,0 +1,791 @@ +// The mapstructure package exposes functionality to convert an +// arbitrary map[string]interface{} into a native Go structure. +// +// The Go structure can be arbitrarily complex, containing slices, +// other structs, etc. and the decoder will properly decode nested +// maps and so on into the proper structures in the native Go struct. +// See the examples to see what the decoder is capable of. +package mapstructure + +import ( + "encoding/json" + "errors" + "fmt" + "reflect" + "sort" + "strconv" + "strings" +) + +// DecodeHookFunc is the callback function that can be used for +// data transformations. See "DecodeHook" in the DecoderConfig +// struct. +// +// The type should be DecodeHookFuncType or DecodeHookFuncKind. +// Either is accepted. Types are a superset of Kinds (Types can return +// Kinds) and are generally a richer thing to use, but Kinds are simpler +// if you only need those. +// +// The reason DecodeHookFunc is multi-typed is for backwards compatibility: +// we started with Kinds and then realized Types were the better solution, +// but have a promise to not break backwards compat so we now support +// both. +type DecodeHookFunc interface{} + +type DecodeHookFuncType func(reflect.Type, reflect.Type, interface{}) (interface{}, error) +type DecodeHookFuncKind func(reflect.Kind, reflect.Kind, interface{}) (interface{}, error) + +// DecoderConfig is the configuration that is used to create a new decoder +// and allows customization of various aspects of decoding. +type DecoderConfig struct { + // DecodeHook, if set, will be called before any decoding and any + // type conversion (if WeaklyTypedInput is on). This lets you modify + // the values before they're set down onto the resulting struct. + // + // If an error is returned, the entire decode will fail with that + // error. + DecodeHook DecodeHookFunc + + // If ErrorUnused is true, then it is an error for there to exist + // keys in the original map that were unused in the decoding process + // (extra keys). + ErrorUnused bool + + // ZeroFields, if set to true, will zero fields before writing them. + // For example, a map will be emptied before decoded values are put in + // it. If this is false, a map will be merged. + ZeroFields bool + + // If WeaklyTypedInput is true, the decoder will make the following + // "weak" conversions: + // + // - bools to string (true = "1", false = "0") + // - numbers to string (base 10) + // - bools to int/uint (true = 1, false = 0) + // - strings to int/uint (base implied by prefix) + // - int to bool (true if value != 0) + // - string to bool (accepts: 1, t, T, TRUE, true, True, 0, f, F, + // FALSE, false, False. Anything else is an error) + // - empty array = empty map and vice versa + // - negative numbers to overflowed uint values (base 10) + // - slice of maps to a merged map + // + WeaklyTypedInput bool + + // Metadata is the struct that will contain extra metadata about + // the decoding. If this is nil, then no metadata will be tracked. + Metadata *Metadata + + // Result is a pointer to the struct that will contain the decoded + // value. + Result interface{} + + // The tag name that mapstructure reads for field names. This + // defaults to "mapstructure" + TagName string +} + +// A Decoder takes a raw interface value and turns it into structured +// data, keeping track of rich error information along the way in case +// anything goes wrong. Unlike the basic top-level Decode method, you can +// more finely control how the Decoder behaves using the DecoderConfig +// structure. The top-level Decode method is just a convenience that sets +// up the most basic Decoder. +type Decoder struct { + config *DecoderConfig +} + +// Metadata contains information about decoding a structure that +// is tedious or difficult to get otherwise. +type Metadata struct { + // Keys are the keys of the structure which were successfully decoded + Keys []string + + // Unused is a slice of keys that were found in the raw value but + // weren't decoded since there was no matching field in the result interface + Unused []string +} + +// Decode takes a map and uses reflection to convert it into the +// given Go native structure. val must be a pointer to a struct. +func Decode(m interface{}, rawVal interface{}) error { + config := &DecoderConfig{ + Metadata: nil, + Result: rawVal, + } + + decoder, err := NewDecoder(config) + if err != nil { + return err + } + + return decoder.Decode(m) +} + +// WeakDecode is the same as Decode but is shorthand to enable +// WeaklyTypedInput. See DecoderConfig for more info. +func WeakDecode(input, output interface{}) error { + config := &DecoderConfig{ + Metadata: nil, + Result: output, + WeaklyTypedInput: true, + } + + decoder, err := NewDecoder(config) + if err != nil { + return err + } + + return decoder.Decode(input) +} + +// NewDecoder returns a new decoder for the given configuration. Once +// a decoder has been returned, the same configuration must not be used +// again. +func NewDecoder(config *DecoderConfig) (*Decoder, error) { + val := reflect.ValueOf(config.Result) + if val.Kind() != reflect.Ptr { + return nil, errors.New("result must be a pointer") + } + + val = val.Elem() + if !val.CanAddr() { + return nil, errors.New("result must be addressable (a pointer)") + } + + if config.Metadata != nil { + if config.Metadata.Keys == nil { + config.Metadata.Keys = make([]string, 0) + } + + if config.Metadata.Unused == nil { + config.Metadata.Unused = make([]string, 0) + } + } + + if config.TagName == "" { + config.TagName = "mapstructure" + } + + result := &Decoder{ + config: config, + } + + return result, nil +} + +// Decode decodes the given raw interface to the target pointer specified +// by the configuration. +func (d *Decoder) Decode(raw interface{}) error { + return d.decode("", raw, reflect.ValueOf(d.config.Result).Elem()) +} + +// Decodes an unknown data type into a specific reflection value. +func (d *Decoder) decode(name string, data interface{}, val reflect.Value) error { + if data == nil { + // If the data is nil, then we don't set anything. + return nil + } + + dataVal := reflect.ValueOf(data) + if !dataVal.IsValid() { + // If the data value is invalid, then we just set the value + // to be the zero value. + val.Set(reflect.Zero(val.Type())) + return nil + } + + if d.config.DecodeHook != nil { + // We have a DecodeHook, so let's pre-process the data. + var err error + data, err = DecodeHookExec( + d.config.DecodeHook, + dataVal.Type(), val.Type(), data) + if err != nil { + return err + } + } + + var err error + dataKind := getKind(val) + switch dataKind { + case reflect.Bool: + err = d.decodeBool(name, data, val) + case reflect.Interface: + err = d.decodeBasic(name, data, val) + case reflect.String: + err = d.decodeString(name, data, val) + case reflect.Int: + err = d.decodeInt(name, data, val) + case reflect.Uint: + err = d.decodeUint(name, data, val) + case reflect.Float32: + err = d.decodeFloat(name, data, val) + case reflect.Struct: + err = d.decodeStruct(name, data, val) + case reflect.Map: + err = d.decodeMap(name, data, val) + case reflect.Ptr: + err = d.decodePtr(name, data, val) + case reflect.Slice: + err = d.decodeSlice(name, data, val) + default: + // If we reached this point then we weren't able to decode it + return fmt.Errorf("%s: unsupported type: %s", name, dataKind) + } + + // If we reached here, then we successfully decoded SOMETHING, so + // mark the key as used if we're tracking metadata. + if d.config.Metadata != nil && name != "" { + d.config.Metadata.Keys = append(d.config.Metadata.Keys, name) + } + + return err +} + +// This decodes a basic type (bool, int, string, etc.) and sets the +// value to "data" of that type. +func (d *Decoder) decodeBasic(name string, data interface{}, val reflect.Value) error { + dataVal := reflect.ValueOf(data) + if !dataVal.IsValid() { + dataVal = reflect.Zero(val.Type()) + } + + dataValType := dataVal.Type() + if !dataValType.AssignableTo(val.Type()) { + return fmt.Errorf( + "'%s' expected type '%s', got '%s'", + name, val.Type(), dataValType) + } + + val.Set(dataVal) + return nil +} + +func (d *Decoder) decodeString(name string, data interface{}, val reflect.Value) error { + dataVal := reflect.ValueOf(data) + dataKind := getKind(dataVal) + + converted := true + switch { + case dataKind == reflect.String: + val.SetString(dataVal.String()) + case dataKind == reflect.Bool && d.config.WeaklyTypedInput: + if dataVal.Bool() { + val.SetString("1") + } else { + val.SetString("0") + } + case dataKind == reflect.Int && d.config.WeaklyTypedInput: + val.SetString(strconv.FormatInt(dataVal.Int(), 10)) + case dataKind == reflect.Uint && d.config.WeaklyTypedInput: + val.SetString(strconv.FormatUint(dataVal.Uint(), 10)) + case dataKind == reflect.Float32 && d.config.WeaklyTypedInput: + val.SetString(strconv.FormatFloat(dataVal.Float(), 'f', -1, 64)) + case dataKind == reflect.Slice && d.config.WeaklyTypedInput: + dataType := dataVal.Type() + elemKind := dataType.Elem().Kind() + switch { + case elemKind == reflect.Uint8: + val.SetString(string(dataVal.Interface().([]uint8))) + default: + converted = false + } + default: + converted = false + } + + if !converted { + return fmt.Errorf( + "'%s' expected type '%s', got unconvertible type '%s'", + name, val.Type(), dataVal.Type()) + } + + return nil +} + +func (d *Decoder) decodeInt(name string, data interface{}, val reflect.Value) error { + dataVal := reflect.ValueOf(data) + dataKind := getKind(dataVal) + dataType := dataVal.Type() + + switch { + case dataKind == reflect.Int: + val.SetInt(dataVal.Int()) + case dataKind == reflect.Uint: + val.SetInt(int64(dataVal.Uint())) + case dataKind == reflect.Float32: + val.SetInt(int64(dataVal.Float())) + case dataKind == reflect.Bool && d.config.WeaklyTypedInput: + if dataVal.Bool() { + val.SetInt(1) + } else { + val.SetInt(0) + } + case dataKind == reflect.String && d.config.WeaklyTypedInput: + i, err := strconv.ParseInt(dataVal.String(), 0, val.Type().Bits()) + if err == nil { + val.SetInt(i) + } else { + return fmt.Errorf("cannot parse '%s' as int: %s", name, err) + } + case dataType.PkgPath() == "encoding/json" && dataType.Name() == "Number": + jn := data.(json.Number) + i, err := jn.Int64() + if err != nil { + return fmt.Errorf( + "error decoding json.Number into %s: %s", name, err) + } + val.SetInt(i) + default: + return fmt.Errorf( + "'%s' expected type '%s', got unconvertible type '%s'", + name, val.Type(), dataVal.Type()) + } + + return nil +} + +func (d *Decoder) decodeUint(name string, data interface{}, val reflect.Value) error { + dataVal := reflect.ValueOf(data) + dataKind := getKind(dataVal) + + switch { + case dataKind == reflect.Int: + i := dataVal.Int() + if i < 0 && !d.config.WeaklyTypedInput { + return fmt.Errorf("cannot parse '%s', %d overflows uint", + name, i) + } + val.SetUint(uint64(i)) + case dataKind == reflect.Uint: + val.SetUint(dataVal.Uint()) + case dataKind == reflect.Float32: + f := dataVal.Float() + if f < 0 && !d.config.WeaklyTypedInput { + return fmt.Errorf("cannot parse '%s', %f overflows uint", + name, f) + } + val.SetUint(uint64(f)) + case dataKind == reflect.Bool && d.config.WeaklyTypedInput: + if dataVal.Bool() { + val.SetUint(1) + } else { + val.SetUint(0) + } + case dataKind == reflect.String && d.config.WeaklyTypedInput: + i, err := strconv.ParseUint(dataVal.String(), 0, val.Type().Bits()) + if err == nil { + val.SetUint(i) + } else { + return fmt.Errorf("cannot parse '%s' as uint: %s", name, err) + } + default: + return fmt.Errorf( + "'%s' expected type '%s', got unconvertible type '%s'", + name, val.Type(), dataVal.Type()) + } + + return nil +} + +func (d *Decoder) decodeBool(name string, data interface{}, val reflect.Value) error { + dataVal := reflect.ValueOf(data) + dataKind := getKind(dataVal) + + switch { + case dataKind == reflect.Bool: + val.SetBool(dataVal.Bool()) + case dataKind == reflect.Int && d.config.WeaklyTypedInput: + val.SetBool(dataVal.Int() != 0) + case dataKind == reflect.Uint && d.config.WeaklyTypedInput: + val.SetBool(dataVal.Uint() != 0) + case dataKind == reflect.Float32 && d.config.WeaklyTypedInput: + val.SetBool(dataVal.Float() != 0) + case dataKind == reflect.String && d.config.WeaklyTypedInput: + b, err := strconv.ParseBool(dataVal.String()) + if err == nil { + val.SetBool(b) + } else if dataVal.String() == "" { + val.SetBool(false) + } else { + return fmt.Errorf("cannot parse '%s' as bool: %s", name, err) + } + default: + return fmt.Errorf( + "'%s' expected type '%s', got unconvertible type '%s'", + name, val.Type(), dataVal.Type()) + } + + return nil +} + +func (d *Decoder) decodeFloat(name string, data interface{}, val reflect.Value) error { + dataVal := reflect.ValueOf(data) + dataKind := getKind(dataVal) + dataType := dataVal.Type() + + switch { + case dataKind == reflect.Int: + val.SetFloat(float64(dataVal.Int())) + case dataKind == reflect.Uint: + val.SetFloat(float64(dataVal.Uint())) + case dataKind == reflect.Float32: + val.SetFloat(float64(dataVal.Float())) + case dataKind == reflect.Bool && d.config.WeaklyTypedInput: + if dataVal.Bool() { + val.SetFloat(1) + } else { + val.SetFloat(0) + } + case dataKind == reflect.String && d.config.WeaklyTypedInput: + f, err := strconv.ParseFloat(dataVal.String(), val.Type().Bits()) + if err == nil { + val.SetFloat(f) + } else { + return fmt.Errorf("cannot parse '%s' as float: %s", name, err) + } + case dataType.PkgPath() == "encoding/json" && dataType.Name() == "Number": + jn := data.(json.Number) + i, err := jn.Float64() + if err != nil { + return fmt.Errorf( + "error decoding json.Number into %s: %s", name, err) + } + val.SetFloat(i) + default: + return fmt.Errorf( + "'%s' expected type '%s', got unconvertible type '%s'", + name, val.Type(), dataVal.Type()) + } + + return nil +} + +func (d *Decoder) decodeMap(name string, data interface{}, val reflect.Value) error { + valType := val.Type() + valKeyType := valType.Key() + valElemType := valType.Elem() + + // By default we overwrite keys in the current map + valMap := val + + // If the map is nil or we're purposely zeroing fields, make a new map + if valMap.IsNil() || d.config.ZeroFields { + // Make a new map to hold our result + mapType := reflect.MapOf(valKeyType, valElemType) + valMap = reflect.MakeMap(mapType) + } + + // Check input type + dataVal := reflect.Indirect(reflect.ValueOf(data)) + if dataVal.Kind() != reflect.Map { + // In weak mode, we accept a slice of maps as an input... + if d.config.WeaklyTypedInput { + switch dataVal.Kind() { + case reflect.Array, reflect.Slice: + // Special case for BC reasons (covered by tests) + if dataVal.Len() == 0 { + val.Set(valMap) + return nil + } + + for i := 0; i < dataVal.Len(); i++ { + err := d.decode( + fmt.Sprintf("%s[%d]", name, i), + dataVal.Index(i).Interface(), val) + if err != nil { + return err + } + } + + return nil + } + } + + return fmt.Errorf("'%s' expected a map, got '%s'", name, dataVal.Kind()) + } + + // Accumulate errors + errors := make([]string, 0) + + for _, k := range dataVal.MapKeys() { + fieldName := fmt.Sprintf("%s[%s]", name, k) + + // First decode the key into the proper type + currentKey := reflect.Indirect(reflect.New(valKeyType)) + if err := d.decode(fieldName, k.Interface(), currentKey); err != nil { + errors = appendErrors(errors, err) + continue + } + + // Next decode the data into the proper type + v := dataVal.MapIndex(k).Interface() + currentVal := reflect.Indirect(reflect.New(valElemType)) + if err := d.decode(fieldName, v, currentVal); err != nil { + errors = appendErrors(errors, err) + continue + } + + valMap.SetMapIndex(currentKey, currentVal) + } + + // Set the built up map to the value + val.Set(valMap) + + // If we had errors, return those + if len(errors) > 0 { + return &Error{errors} + } + + return nil +} + +func (d *Decoder) decodePtr(name string, data interface{}, val reflect.Value) error { + // Create an element of the concrete (non pointer) type and decode + // into that. Then set the value of the pointer to this type. + valType := val.Type() + valElemType := valType.Elem() + + realVal:=val + if realVal.IsNil() || d.config.ZeroFields { + realVal = reflect.New(valElemType) + } + + if err := d.decode(name, data, reflect.Indirect(realVal)); err != nil { + return err + } + + val.Set(realVal) + return nil +} + +func (d *Decoder) decodeSlice(name string, data interface{}, val reflect.Value) error { + dataVal := reflect.Indirect(reflect.ValueOf(data)) + dataValKind := dataVal.Kind() + valType := val.Type() + valElemType := valType.Elem() + sliceType := reflect.SliceOf(valElemType) + + valSlice:=val + if valSlice.IsNil() || d.config.ZeroFields { + + // Check input type + if dataValKind != reflect.Array && dataValKind != reflect.Slice { + // Accept empty map instead of array/slice in weakly typed mode + if d.config.WeaklyTypedInput && dataVal.Kind() == reflect.Map && dataVal.Len() == 0 { + val.Set(reflect.MakeSlice(sliceType, 0, 0)) + return nil + } else { + return fmt.Errorf( + "'%s': source data must be an array or slice, got %s", name, dataValKind) + } + } + + // Make a new slice to hold our result, same size as the original data. + valSlice = reflect.MakeSlice(sliceType, dataVal.Len(), dataVal.Len()) + } + + // Accumulate any errors + errors := make([]string, 0) + + for i := 0; i < dataVal.Len(); i++ { + currentData := dataVal.Index(i).Interface() + currentField := valSlice.Index(i) + + fieldName := fmt.Sprintf("%s[%d]", name, i) + if err := d.decode(fieldName, currentData, currentField); err != nil { + errors = appendErrors(errors, err) + } + } + + // Finally, set the value to the slice we built up + val.Set(valSlice) + + // If there were errors, we return those + if len(errors) > 0 { + return &Error{errors} + } + + return nil +} + +func (d *Decoder) decodeStruct(name string, data interface{}, val reflect.Value) error { + dataVal := reflect.Indirect(reflect.ValueOf(data)) + + // If the type of the value to write to and the data match directly, + // then we just set it directly instead of recursing into the structure. + if dataVal.Type() == val.Type() { + val.Set(dataVal) + return nil + } + + dataValKind := dataVal.Kind() + if dataValKind != reflect.Map { + return fmt.Errorf("'%s' expected a map, got '%s'", name, dataValKind) + } + + dataValType := dataVal.Type() + if kind := dataValType.Key().Kind(); kind != reflect.String && kind != reflect.Interface { + return fmt.Errorf( + "'%s' needs a map with string keys, has '%s' keys", + name, dataValType.Key().Kind()) + } + + dataValKeys := make(map[reflect.Value]struct{}) + dataValKeysUnused := make(map[interface{}]struct{}) + for _, dataValKey := range dataVal.MapKeys() { + dataValKeys[dataValKey] = struct{}{} + dataValKeysUnused[dataValKey.Interface()] = struct{}{} + } + + errors := make([]string, 0) + + // This slice will keep track of all the structs we'll be decoding. + // There can be more than one struct if there are embedded structs + // that are squashed. + structs := make([]reflect.Value, 1, 5) + structs[0] = val + + // Compile the list of all the fields that we're going to be decoding + // from all the structs. + fields := make(map[*reflect.StructField]reflect.Value) + for len(structs) > 0 { + structVal := structs[0] + structs = structs[1:] + + structType := structVal.Type() + + for i := 0; i < structType.NumField(); i++ { + fieldType := structType.Field(i) + fieldKind := fieldType.Type.Kind() + + // If "squash" is specified in the tag, we squash the field down. + squash := false + tagParts := strings.Split(fieldType.Tag.Get(d.config.TagName), ",") + for _, tag := range tagParts[1:] { + if tag == "squash" { + squash = true + break + } + } + + if squash { + if fieldKind != reflect.Struct { + errors = appendErrors(errors, + fmt.Errorf("%s: unsupported type for squash: %s", fieldType.Name, fieldKind)) + } else { + structs = append(structs, val.FieldByName(fieldType.Name)) + } + continue + } + + // Normal struct field, store it away + fields[&fieldType] = structVal.Field(i) + } + } + + for fieldType, field := range fields { + fieldName := fieldType.Name + + tagValue := fieldType.Tag.Get(d.config.TagName) + tagValue = strings.SplitN(tagValue, ",", 2)[0] + if tagValue != "" { + fieldName = tagValue + } + + rawMapKey := reflect.ValueOf(fieldName) + rawMapVal := dataVal.MapIndex(rawMapKey) + if !rawMapVal.IsValid() { + // Do a slower search by iterating over each key and + // doing case-insensitive search. + for dataValKey := range dataValKeys { + mK, ok := dataValKey.Interface().(string) + if !ok { + // Not a string key + continue + } + + if strings.EqualFold(mK, fieldName) { + rawMapKey = dataValKey + rawMapVal = dataVal.MapIndex(dataValKey) + break + } + } + + if !rawMapVal.IsValid() { + // There was no matching key in the map for the value in + // the struct. Just ignore. + continue + } + } + + // Delete the key we're using from the unused map so we stop tracking + delete(dataValKeysUnused, rawMapKey.Interface()) + + if !field.IsValid() { + // This should never happen + panic("field is not valid") + } + + // If we can't set the field, then it is unexported or something, + // and we just continue onwards. + if !field.CanSet() { + continue + } + + // If the name is empty string, then we're at the root, and we + // don't dot-join the fields. + if name != "" { + fieldName = fmt.Sprintf("%s.%s", name, fieldName) + } + + if err := d.decode(fieldName, rawMapVal.Interface(), field); err != nil { + errors = appendErrors(errors, err) + } + } + + if d.config.ErrorUnused && len(dataValKeysUnused) > 0 { + keys := make([]string, 0, len(dataValKeysUnused)) + for rawKey := range dataValKeysUnused { + keys = append(keys, rawKey.(string)) + } + sort.Strings(keys) + + err := fmt.Errorf("'%s' has invalid keys: %s", name, strings.Join(keys, ", ")) + errors = appendErrors(errors, err) + } + + if len(errors) > 0 { + return &Error{errors} + } + + // Add the unused keys to the list of unused keys if we're tracking metadata + if d.config.Metadata != nil { + for rawKey := range dataValKeysUnused { + key := rawKey.(string) + if name != "" { + key = fmt.Sprintf("%s.%s", name, key) + } + + d.config.Metadata.Unused = append(d.config.Metadata.Unused, key) + } + } + + return nil +} + +func getKind(val reflect.Value) reflect.Kind { + kind := val.Kind() + + switch { + case kind >= reflect.Int && kind <= reflect.Int64: + return reflect.Int + case kind >= reflect.Uint && kind <= reflect.Uint64: + return reflect.Uint + case kind >= reflect.Float32 && kind <= reflect.Float64: + return reflect.Float32 + default: + return kind + } +} diff --git a/vendor/github.com/mitchellh/mapstructure/mapstructure_benchmark_test.go b/vendor/github.com/mitchellh/mapstructure/mapstructure_benchmark_test.go new file mode 100644 index 000000000..41d2a41f7 --- /dev/null +++ b/vendor/github.com/mitchellh/mapstructure/mapstructure_benchmark_test.go @@ -0,0 +1,279 @@ +package mapstructure + +import ( + "encoding/json" + "testing" +) + +func Benchmark_Decode(b *testing.B) { + type Person struct { + Name string + Age int + Emails []string + Extra map[string]string + } + + input := map[string]interface{}{ + "name": "Mitchell", + "age": 91, + "emails": []string{"one", "two", "three"}, + "extra": map[string]string{ + "twitter": "mitchellh", + }, + } + + var result Person + for i := 0; i < b.N; i++ { + Decode(input, &result) + } +} + +// decodeViaJSON takes the map data and passes it through encoding/json to convert it into the +// given Go native structure pointed to by v. v must be a pointer to a struct. +func decodeViaJSON(data interface{}, v interface{}) error { + // Perform the task by simply marshalling the input into JSON, + // then unmarshalling it into target native Go struct. + b, err := json.Marshal(data) + if err != nil { + return err + } + return json.Unmarshal(b, v) +} + +func Benchmark_DecodeViaJSON(b *testing.B) { + type Person struct { + Name string + Age int + Emails []string + Extra map[string]string + } + + input := map[string]interface{}{ + "name": "Mitchell", + "age": 91, + "emails": []string{"one", "two", "three"}, + "extra": map[string]string{ + "twitter": "mitchellh", + }, + } + + var result Person + for i := 0; i < b.N; i++ { + decodeViaJSON(input, &result) + } +} + +func Benchmark_DecodeBasic(b *testing.B) { + input := map[string]interface{}{ + "vstring": "foo", + "vint": 42, + "Vuint": 42, + "vbool": true, + "Vfloat": 42.42, + "vsilent": true, + "vdata": 42, + } + + var result Basic + for i := 0; i < b.N; i++ { + Decode(input, &result) + } +} + +func Benchmark_DecodeEmbedded(b *testing.B) { + input := map[string]interface{}{ + "vstring": "foo", + "Basic": map[string]interface{}{ + "vstring": "innerfoo", + }, + "vunique": "bar", + } + + var result Embedded + for i := 0; i < b.N; i++ { + Decode(input, &result) + } +} + +func Benchmark_DecodeTypeConversion(b *testing.B) { + input := map[string]interface{}{ + "IntToFloat": 42, + "IntToUint": 42, + "IntToBool": 1, + "IntToString": 42, + "UintToInt": 42, + "UintToFloat": 42, + "UintToBool": 42, + "UintToString": 42, + "BoolToInt": true, + "BoolToUint": true, + "BoolToFloat": true, + "BoolToString": true, + "FloatToInt": 42.42, + "FloatToUint": 42.42, + "FloatToBool": 42.42, + "FloatToString": 42.42, + "StringToInt": "42", + "StringToUint": "42", + "StringToBool": "1", + "StringToFloat": "42.42", + "SliceToMap": []interface{}{}, + "MapToSlice": map[string]interface{}{}, + } + + var resultStrict TypeConversionResult + for i := 0; i < b.N; i++ { + Decode(input, &resultStrict) + } +} + +func Benchmark_DecodeMap(b *testing.B) { + input := map[string]interface{}{ + "vfoo": "foo", + "vother": map[interface{}]interface{}{ + "foo": "foo", + "bar": "bar", + }, + } + + var result Map + for i := 0; i < b.N; i++ { + Decode(input, &result) + } +} + +func Benchmark_DecodeMapOfStruct(b *testing.B) { + input := map[string]interface{}{ + "value": map[string]interface{}{ + "foo": map[string]string{"vstring": "one"}, + "bar": map[string]string{"vstring": "two"}, + }, + } + + var result MapOfStruct + for i := 0; i < b.N; i++ { + Decode(input, &result) + } +} + +func Benchmark_DecodeSlice(b *testing.B) { + input := map[string]interface{}{ + "vfoo": "foo", + "vbar": []string{"foo", "bar", "baz"}, + } + + var result Slice + for i := 0; i < b.N; i++ { + Decode(input, &result) + } +} + +func Benchmark_DecodeSliceOfStruct(b *testing.B) { + input := map[string]interface{}{ + "value": []map[string]interface{}{ + {"vstring": "one"}, + {"vstring": "two"}, + }, + } + + var result SliceOfStruct + for i := 0; i < b.N; i++ { + Decode(input, &result) + } +} + +func Benchmark_DecodeWeaklyTypedInput(b *testing.B) { + type Person struct { + Name string + Age int + Emails []string + } + + // This input can come from anywhere, but typically comes from + // something like decoding JSON, generated by a weakly typed language + // such as PHP. + input := map[string]interface{}{ + "name": 123, // number => string + "age": "42", // string => number + "emails": map[string]interface{}{}, // empty map => empty array + } + + var result Person + config := &DecoderConfig{ + WeaklyTypedInput: true, + Result: &result, + } + + decoder, err := NewDecoder(config) + if err != nil { + panic(err) + } + + for i := 0; i < b.N; i++ { + decoder.Decode(input) + } +} + +func Benchmark_DecodeMetadata(b *testing.B) { + type Person struct { + Name string + Age int + } + + input := map[string]interface{}{ + "name": "Mitchell", + "age": 91, + "email": "foo@bar.com", + } + + var md Metadata + var result Person + config := &DecoderConfig{ + Metadata: &md, + Result: &result, + } + + decoder, err := NewDecoder(config) + if err != nil { + panic(err) + } + + for i := 0; i < b.N; i++ { + decoder.Decode(input) + } +} + +func Benchmark_DecodeMetadataEmbedded(b *testing.B) { + input := map[string]interface{}{ + "vstring": "foo", + "vunique": "bar", + } + + var md Metadata + var result EmbeddedSquash + config := &DecoderConfig{ + Metadata: &md, + Result: &result, + } + + decoder, err := NewDecoder(config) + if err != nil { + b.Fatalf("err: %s", err) + } + + for i := 0; i < b.N; i++ { + decoder.Decode(input) + } +} + +func Benchmark_DecodeTagged(b *testing.B) { + input := map[string]interface{}{ + "foo": "bar", + "bar": "value", + } + + var result Tagged + for i := 0; i < b.N; i++ { + Decode(input, &result) + } +} diff --git a/vendor/github.com/mitchellh/mapstructure/mapstructure_bugs_test.go b/vendor/github.com/mitchellh/mapstructure/mapstructure_bugs_test.go new file mode 100644 index 000000000..2e210ca24 --- /dev/null +++ b/vendor/github.com/mitchellh/mapstructure/mapstructure_bugs_test.go @@ -0,0 +1,195 @@ +package mapstructure + +import "testing" + +// GH-1 +func TestDecode_NilValue(t *testing.T) { + input := map[string]interface{}{ + "vfoo": nil, + "vother": nil, + } + + var result Map + err := Decode(input, &result) + if err != nil { + t.Fatalf("should not error: %s", err) + } + + if result.Vfoo != "" { + t.Fatalf("value should be default: %s", result.Vfoo) + } + + if result.Vother != nil { + t.Fatalf("Vother should be nil: %s", result.Vother) + } +} + +// GH-10 +func TestDecode_mapInterfaceInterface(t *testing.T) { + input := map[interface{}]interface{}{ + "vfoo": nil, + "vother": nil, + } + + var result Map + err := Decode(input, &result) + if err != nil { + t.Fatalf("should not error: %s", err) + } + + if result.Vfoo != "" { + t.Fatalf("value should be default: %s", result.Vfoo) + } + + if result.Vother != nil { + t.Fatalf("Vother should be nil: %s", result.Vother) + } +} + +// #48 +func TestNestedTypePointerWithDefaults(t *testing.T) { + t.Parallel() + + input := map[string]interface{}{ + "vfoo": "foo", + "vbar": map[string]interface{}{ + "vstring": "foo", + "vint": 42, + "vbool": true, + }, + } + + result:=NestedPointer{ + Vbar: &Basic{ + Vuint: 42, + }, + } + err := Decode(input, &result) + if err != nil { + t.Fatalf("got an err: %s", err.Error()) + } + + if result.Vfoo != "foo" { + t.Errorf("vfoo value should be 'foo': %#v", result.Vfoo) + } + + if result.Vbar.Vstring != "foo" { + t.Errorf("vstring value should be 'foo': %#v", result.Vbar.Vstring) + } + + if result.Vbar.Vint != 42 { + t.Errorf("vint value should be 42: %#v", result.Vbar.Vint) + } + + if result.Vbar.Vbool != true { + t.Errorf("vbool value should be true: %#v", result.Vbar.Vbool) + } + + if result.Vbar.Vextra != "" { + t.Errorf("vextra value should be empty: %#v", result.Vbar.Vextra) + } + + // this is the error + if result.Vbar.Vuint != 42 { + t.Errorf("vuint value should be 42: %#v", result.Vbar.Vuint) + } + +} + + +type NestedSlice struct { + Vfoo string + Vbars []Basic + Vempty []Basic +} + +// #48 +func TestNestedTypeSliceWithDefaults(t *testing.T) { + t.Parallel() + + input := map[string]interface{}{ + "vfoo": "foo", + "vbars": []map[string]interface{}{ + { "vstring": "foo", "vint": 42, "vbool": true }, + { "vint": 42, "vbool": true }, + }, + "vempty": []map[string]interface{}{ + { "vstring": "foo", "vint": 42, "vbool": true }, + { "vint": 42, "vbool": true }, + }, + } + + result:=NestedSlice{ + Vbars: []Basic{ + {Vuint: 42}, + {Vstring: "foo"}, + }, + } + err := Decode(input, &result) + if err != nil { + t.Fatalf("got an err: %s", err.Error()) + } + + if result.Vfoo != "foo" { + t.Errorf("vfoo value should be 'foo': %#v", result.Vfoo) + } + + if result.Vbars[0].Vstring != "foo" { + t.Errorf("vstring value should be 'foo': %#v", result.Vbars[0].Vstring) + } + // this is the error + if result.Vbars[0].Vuint != 42 { + t.Errorf("vuint value should be 42: %#v", result.Vbars[0].Vuint) + } + +} + +// #48 workaround +func TestNestedTypeWithDefaults(t *testing.T) { + t.Parallel() + + input := map[string]interface{}{ + "vfoo": "foo", + "vbar": map[string]interface{}{ + "vstring": "foo", + "vint": 42, + "vbool": true, + }, + } + + result:=Nested{ + Vbar: Basic{ + Vuint: 42, + }, + } + err := Decode(input, &result) + if err != nil { + t.Fatalf("got an err: %s", err.Error()) + } + + if result.Vfoo != "foo" { + t.Errorf("vfoo value should be 'foo': %#v", result.Vfoo) + } + + if result.Vbar.Vstring != "foo" { + t.Errorf("vstring value should be 'foo': %#v", result.Vbar.Vstring) + } + + if result.Vbar.Vint != 42 { + t.Errorf("vint value should be 42: %#v", result.Vbar.Vint) + } + + if result.Vbar.Vbool != true { + t.Errorf("vbool value should be true: %#v", result.Vbar.Vbool) + } + + if result.Vbar.Vextra != "" { + t.Errorf("vextra value should be empty: %#v", result.Vbar.Vextra) + } + + // this is the error + if result.Vbar.Vuint != 42 { + t.Errorf("vuint value should be 42: %#v", result.Vbar.Vuint) + } + +} \ No newline at end of file diff --git a/vendor/github.com/mitchellh/mapstructure/mapstructure_examples_test.go b/vendor/github.com/mitchellh/mapstructure/mapstructure_examples_test.go new file mode 100644 index 000000000..f17c214a8 --- /dev/null +++ b/vendor/github.com/mitchellh/mapstructure/mapstructure_examples_test.go @@ -0,0 +1,203 @@ +package mapstructure + +import ( + "fmt" +) + +func ExampleDecode() { + type Person struct { + Name string + Age int + Emails []string + Extra map[string]string + } + + // This input can come from anywhere, but typically comes from + // something like decoding JSON where we're not quite sure of the + // struct initially. + input := map[string]interface{}{ + "name": "Mitchell", + "age": 91, + "emails": []string{"one", "two", "three"}, + "extra": map[string]string{ + "twitter": "mitchellh", + }, + } + + var result Person + err := Decode(input, &result) + if err != nil { + panic(err) + } + + fmt.Printf("%#v", result) + // Output: + // mapstructure.Person{Name:"Mitchell", Age:91, Emails:[]string{"one", "two", "three"}, Extra:map[string]string{"twitter":"mitchellh"}} +} + +func ExampleDecode_errors() { + type Person struct { + Name string + Age int + Emails []string + Extra map[string]string + } + + // This input can come from anywhere, but typically comes from + // something like decoding JSON where we're not quite sure of the + // struct initially. + input := map[string]interface{}{ + "name": 123, + "age": "bad value", + "emails": []int{1, 2, 3}, + } + + var result Person + err := Decode(input, &result) + if err == nil { + panic("should have an error") + } + + fmt.Println(err.Error()) + // Output: + // 5 error(s) decoding: + // + // * 'Age' expected type 'int', got unconvertible type 'string' + // * 'Emails[0]' expected type 'string', got unconvertible type 'int' + // * 'Emails[1]' expected type 'string', got unconvertible type 'int' + // * 'Emails[2]' expected type 'string', got unconvertible type 'int' + // * 'Name' expected type 'string', got unconvertible type 'int' +} + +func ExampleDecode_metadata() { + type Person struct { + Name string + Age int + } + + // This input can come from anywhere, but typically comes from + // something like decoding JSON where we're not quite sure of the + // struct initially. + input := map[string]interface{}{ + "name": "Mitchell", + "age": 91, + "email": "foo@bar.com", + } + + // For metadata, we make a more advanced DecoderConfig so we can + // more finely configure the decoder that is used. In this case, we + // just tell the decoder we want to track metadata. + var md Metadata + var result Person + config := &DecoderConfig{ + Metadata: &md, + Result: &result, + } + + decoder, err := NewDecoder(config) + if err != nil { + panic(err) + } + + if err := decoder.Decode(input); err != nil { + panic(err) + } + + fmt.Printf("Unused keys: %#v", md.Unused) + // Output: + // Unused keys: []string{"email"} +} + +func ExampleDecode_weaklyTypedInput() { + type Person struct { + Name string + Age int + Emails []string + } + + // This input can come from anywhere, but typically comes from + // something like decoding JSON, generated by a weakly typed language + // such as PHP. + input := map[string]interface{}{ + "name": 123, // number => string + "age": "42", // string => number + "emails": map[string]interface{}{}, // empty map => empty array + } + + var result Person + config := &DecoderConfig{ + WeaklyTypedInput: true, + Result: &result, + } + + decoder, err := NewDecoder(config) + if err != nil { + panic(err) + } + + err = decoder.Decode(input) + if err != nil { + panic(err) + } + + fmt.Printf("%#v", result) + // Output: mapstructure.Person{Name:"123", Age:42, Emails:[]string{}} +} + +func ExampleDecode_tags() { + // Note that the mapstructure tags defined in the struct type + // can indicate which fields the values are mapped to. + type Person struct { + Name string `mapstructure:"person_name"` + Age int `mapstructure:"person_age"` + } + + input := map[string]interface{}{ + "person_name": "Mitchell", + "person_age": 91, + } + + var result Person + err := Decode(input, &result) + if err != nil { + panic(err) + } + + fmt.Printf("%#v", result) + // Output: + // mapstructure.Person{Name:"Mitchell", Age:91} +} + +func ExampleDecode_embeddedStruct() { + // Squashing multiple embedded structs is allowed using the squash tag. + // This is demonstrated by creating a composite struct of multiple types + // and decoding into it. In this case, a person can carry with it both + // a Family and a Location, as well as their own FirstName. + type Family struct { + LastName string + } + type Location struct { + City string + } + type Person struct { + Family `mapstructure:",squash"` + Location `mapstructure:",squash"` + FirstName string + } + + input := map[string]interface{}{ + "FirstName": "Mitchell", + "LastName": "Hashimoto", + "City": "San Francisco", + } + + var result Person + err := Decode(input, &result) + if err != nil { + panic(err) + } + + fmt.Printf("%s %s, %s", result.FirstName, result.LastName, result.City) + // Output: + // Mitchell Hashimoto, San Francisco +} diff --git a/vendor/github.com/mitchellh/mapstructure/mapstructure_test.go b/vendor/github.com/mitchellh/mapstructure/mapstructure_test.go new file mode 100644 index 000000000..f9495727a --- /dev/null +++ b/vendor/github.com/mitchellh/mapstructure/mapstructure_test.go @@ -0,0 +1,1147 @@ +package mapstructure + +import ( + "encoding/json" + "io" + "reflect" + "sort" + "strings" + "testing" +) + +type Basic struct { + Vstring string + Vint int + Vuint uint + Vbool bool + Vfloat float64 + Vextra string + vsilent bool + Vdata interface{} + VjsonInt int + VjsonFloat float64 + VjsonNumber json.Number +} + +type BasicSquash struct { + Test Basic `mapstructure:",squash"` +} + +type Embedded struct { + Basic + Vunique string +} + +type EmbeddedPointer struct { + *Basic + Vunique string +} + +type EmbeddedSquash struct { + Basic `mapstructure:",squash"` + Vunique string +} + +type SliceAlias []string + +type EmbeddedSlice struct { + SliceAlias `mapstructure:"slice_alias"` + Vunique string +} + +type SquashOnNonStructType struct { + InvalidSquashType int `mapstructure:",squash"` +} + +type Map struct { + Vfoo string + Vother map[string]string +} + +type MapOfStruct struct { + Value map[string]Basic +} + +type Nested struct { + Vfoo string + Vbar Basic +} + +type NestedPointer struct { + Vfoo string + Vbar *Basic +} + +type NilInterface struct { + W io.Writer +} + +type Slice struct { + Vfoo string + Vbar []string +} + +type SliceOfStruct struct { + Value []Basic +} + +type Tagged struct { + Extra string `mapstructure:"bar,what,what"` + Value string `mapstructure:"foo"` +} + +type TypeConversionResult struct { + IntToFloat float32 + IntToUint uint + IntToBool bool + IntToString string + UintToInt int + UintToFloat float32 + UintToBool bool + UintToString string + BoolToInt int + BoolToUint uint + BoolToFloat float32 + BoolToString string + FloatToInt int + FloatToUint uint + FloatToBool bool + FloatToString string + SliceUint8ToString string + StringToInt int + StringToUint uint + StringToBool bool + StringToFloat float32 + SliceToMap map[string]interface{} + MapToSlice []interface{} +} + +func TestBasicTypes(t *testing.T) { + t.Parallel() + + input := map[string]interface{}{ + "vstring": "foo", + "vint": 42, + "Vuint": 42, + "vbool": true, + "Vfloat": 42.42, + "vsilent": true, + "vdata": 42, + "vjsonInt": json.Number("1234"), + "vjsonFloat": json.Number("1234.5"), + "vjsonNumber": json.Number("1234.5"), + } + + var result Basic + err := Decode(input, &result) + if err != nil { + t.Errorf("got an err: %s", err.Error()) + t.FailNow() + } + + if result.Vstring != "foo" { + t.Errorf("vstring value should be 'foo': %#v", result.Vstring) + } + + if result.Vint != 42 { + t.Errorf("vint value should be 42: %#v", result.Vint) + } + + if result.Vuint != 42 { + t.Errorf("vuint value should be 42: %#v", result.Vuint) + } + + if result.Vbool != true { + t.Errorf("vbool value should be true: %#v", result.Vbool) + } + + if result.Vfloat != 42.42 { + t.Errorf("vfloat value should be 42.42: %#v", result.Vfloat) + } + + if result.Vextra != "" { + t.Errorf("vextra value should be empty: %#v", result.Vextra) + } + + if result.vsilent != false { + t.Error("vsilent should not be set, it is unexported") + } + + if result.Vdata != 42 { + t.Error("vdata should be valid") + } + + if result.VjsonInt != 1234 { + t.Errorf("vjsonint value should be 1234: %#v", result.VjsonInt) + } + + if result.VjsonFloat != 1234.5 { + t.Errorf("vjsonfloat value should be 1234.5: %#v", result.VjsonFloat) + } + + if !reflect.DeepEqual(result.VjsonNumber, json.Number("1234.5")) { + t.Errorf("vjsonnumber value should be '1234.5': %T, %#v", result.VjsonNumber, result.VjsonNumber) + } +} + +func TestBasic_IntWithFloat(t *testing.T) { + t.Parallel() + + input := map[string]interface{}{ + "vint": float64(42), + } + + var result Basic + err := Decode(input, &result) + if err != nil { + t.Fatalf("got an err: %s", err) + } +} + +func TestBasic_Merge(t *testing.T) { + t.Parallel() + + input := map[string]interface{}{ + "vint": 42, + } + + var result Basic + result.Vuint = 100 + err := Decode(input, &result) + if err != nil { + t.Fatalf("got an err: %s", err) + } + + expected := Basic{ + Vint: 42, + Vuint: 100, + } + if !reflect.DeepEqual(result, expected) { + t.Fatalf("bad: %#v", result) + } +} + +func TestDecode_BasicSquash(t *testing.T) { + t.Parallel() + + input := map[string]interface{}{ + "vstring": "foo", + } + + var result BasicSquash + err := Decode(input, &result) + if err != nil { + t.Fatalf("got an err: %s", err.Error()) + } + + if result.Test.Vstring != "foo" { + t.Errorf("vstring value should be 'foo': %#v", result.Test.Vstring) + } +} + +func TestDecode_Embedded(t *testing.T) { + t.Parallel() + + input := map[string]interface{}{ + "vstring": "foo", + "Basic": map[string]interface{}{ + "vstring": "innerfoo", + }, + "vunique": "bar", + } + + var result Embedded + err := Decode(input, &result) + if err != nil { + t.Fatalf("got an err: %s", err.Error()) + } + + if result.Vstring != "innerfoo" { + t.Errorf("vstring value should be 'innerfoo': %#v", result.Vstring) + } + + if result.Vunique != "bar" { + t.Errorf("vunique value should be 'bar': %#v", result.Vunique) + } +} + +func TestDecode_EmbeddedPointer(t *testing.T) { + t.Parallel() + + input := map[string]interface{}{ + "vstring": "foo", + "Basic": map[string]interface{}{ + "vstring": "innerfoo", + }, + "vunique": "bar", + } + + var result EmbeddedPointer + err := Decode(input, &result) + if err != nil { + t.Fatalf("err: %s", err) + } + + expected := EmbeddedPointer{ + Basic: &Basic{ + Vstring: "innerfoo", + }, + Vunique: "bar", + } + if !reflect.DeepEqual(result, expected) { + t.Fatalf("bad: %#v", result) + } +} + +func TestDecode_EmbeddedSlice(t *testing.T) { + t.Parallel() + + input := map[string]interface{}{ + "slice_alias": []string{"foo", "bar"}, + "vunique": "bar", + } + + var result EmbeddedSlice + err := Decode(input, &result) + if err != nil { + t.Fatalf("got an err: %s", err.Error()) + } + + if !reflect.DeepEqual(result.SliceAlias, SliceAlias([]string{"foo", "bar"})) { + t.Errorf("slice value: %#v", result.SliceAlias) + } + + if result.Vunique != "bar" { + t.Errorf("vunique value should be 'bar': %#v", result.Vunique) + } +} + +func TestDecode_EmbeddedSquash(t *testing.T) { + t.Parallel() + + input := map[string]interface{}{ + "vstring": "foo", + "vunique": "bar", + } + + var result EmbeddedSquash + err := Decode(input, &result) + if err != nil { + t.Fatalf("got an err: %s", err.Error()) + } + + if result.Vstring != "foo" { + t.Errorf("vstring value should be 'foo': %#v", result.Vstring) + } + + if result.Vunique != "bar" { + t.Errorf("vunique value should be 'bar': %#v", result.Vunique) + } +} + +func TestDecode_SquashOnNonStructType(t *testing.T) { + t.Parallel() + + input := map[string]interface{}{ + "InvalidSquashType": 42, + } + + var result SquashOnNonStructType + err := Decode(input, &result) + if err == nil { + t.Fatal("unexpected success decoding invalid squash field type") + } else if !strings.Contains(err.Error(), "unsupported type for squash") { + t.Fatalf("unexpected error message for invalid squash field type: %s", err) + } +} + +func TestDecode_DecodeHook(t *testing.T) { + t.Parallel() + + input := map[string]interface{}{ + "vint": "WHAT", + } + + decodeHook := func(from reflect.Kind, to reflect.Kind, v interface{}) (interface{}, error) { + if from == reflect.String && to != reflect.String { + return 5, nil + } + + return v, nil + } + + var result Basic + config := &DecoderConfig{ + DecodeHook: decodeHook, + Result: &result, + } + + decoder, err := NewDecoder(config) + if err != nil { + t.Fatalf("err: %s", err) + } + + err = decoder.Decode(input) + if err != nil { + t.Fatalf("got an err: %s", err) + } + + if result.Vint != 5 { + t.Errorf("vint should be 5: %#v", result.Vint) + } +} + +func TestDecode_DecodeHookType(t *testing.T) { + t.Parallel() + + input := map[string]interface{}{ + "vint": "WHAT", + } + + decodeHook := func(from reflect.Type, to reflect.Type, v interface{}) (interface{}, error) { + if from.Kind() == reflect.String && + to.Kind() != reflect.String { + return 5, nil + } + + return v, nil + } + + var result Basic + config := &DecoderConfig{ + DecodeHook: decodeHook, + Result: &result, + } + + decoder, err := NewDecoder(config) + if err != nil { + t.Fatalf("err: %s", err) + } + + err = decoder.Decode(input) + if err != nil { + t.Fatalf("got an err: %s", err) + } + + if result.Vint != 5 { + t.Errorf("vint should be 5: %#v", result.Vint) + } +} + +func TestDecode_Nil(t *testing.T) { + t.Parallel() + + var input interface{} = nil + result := Basic{ + Vstring: "foo", + } + + err := Decode(input, &result) + if err != nil { + t.Fatalf("err: %s", err) + } + + if result.Vstring != "foo" { + t.Fatalf("bad: %#v", result.Vstring) + } +} + +func TestDecode_NilInterfaceHook(t *testing.T) { + t.Parallel() + + input := map[string]interface{}{ + "w": "", + } + + decodeHook := func(f, t reflect.Type, v interface{}) (interface{}, error) { + if t.String() == "io.Writer" { + return nil, nil + } + + return v, nil + } + + var result NilInterface + config := &DecoderConfig{ + DecodeHook: decodeHook, + Result: &result, + } + + decoder, err := NewDecoder(config) + if err != nil { + t.Fatalf("err: %s", err) + } + + err = decoder.Decode(input) + if err != nil { + t.Fatalf("got an err: %s", err) + } + + if result.W != nil { + t.Errorf("W should be nil: %#v", result.W) + } +} + +func TestDecode_NonStruct(t *testing.T) { + t.Parallel() + + input := map[string]interface{}{ + "foo": "bar", + "bar": "baz", + } + + var result map[string]string + err := Decode(input, &result) + if err != nil { + t.Fatalf("err: %s", err) + } + + if result["foo"] != "bar" { + t.Fatal("foo is not bar") + } +} + +func TestDecode_StructMatch(t *testing.T) { + t.Parallel() + + input := map[string]interface{}{ + "vbar": Basic{ + Vstring: "foo", + }, + } + + var result Nested + err := Decode(input, &result) + if err != nil { + t.Fatalf("got an err: %s", err.Error()) + } + + if result.Vbar.Vstring != "foo" { + t.Errorf("bad: %#v", result) + } +} + +func TestDecode_TypeConversion(t *testing.T) { + input := map[string]interface{}{ + "IntToFloat": 42, + "IntToUint": 42, + "IntToBool": 1, + "IntToString": 42, + "UintToInt": 42, + "UintToFloat": 42, + "UintToBool": 42, + "UintToString": 42, + "BoolToInt": true, + "BoolToUint": true, + "BoolToFloat": true, + "BoolToString": true, + "FloatToInt": 42.42, + "FloatToUint": 42.42, + "FloatToBool": 42.42, + "FloatToString": 42.42, + "SliceUint8ToString": []uint8("foo"), + "StringToInt": "42", + "StringToUint": "42", + "StringToBool": "1", + "StringToFloat": "42.42", + "SliceToMap": []interface{}{}, + "MapToSlice": map[string]interface{}{}, + } + + expectedResultStrict := TypeConversionResult{ + IntToFloat: 42.0, + IntToUint: 42, + UintToInt: 42, + UintToFloat: 42, + BoolToInt: 0, + BoolToUint: 0, + BoolToFloat: 0, + FloatToInt: 42, + FloatToUint: 42, + } + + expectedResultWeak := TypeConversionResult{ + IntToFloat: 42.0, + IntToUint: 42, + IntToBool: true, + IntToString: "42", + UintToInt: 42, + UintToFloat: 42, + UintToBool: true, + UintToString: "42", + BoolToInt: 1, + BoolToUint: 1, + BoolToFloat: 1, + BoolToString: "1", + FloatToInt: 42, + FloatToUint: 42, + FloatToBool: true, + FloatToString: "42.42", + SliceUint8ToString: "foo", + StringToInt: 42, + StringToUint: 42, + StringToBool: true, + StringToFloat: 42.42, + SliceToMap: map[string]interface{}{}, + MapToSlice: []interface{}{}, + } + + // Test strict type conversion + var resultStrict TypeConversionResult + err := Decode(input, &resultStrict) + if err == nil { + t.Errorf("should return an error") + } + if !reflect.DeepEqual(resultStrict, expectedResultStrict) { + t.Errorf("expected %v, got: %v", expectedResultStrict, resultStrict) + } + + // Test weak type conversion + var decoder *Decoder + var resultWeak TypeConversionResult + + config := &DecoderConfig{ + WeaklyTypedInput: true, + Result: &resultWeak, + } + + decoder, err = NewDecoder(config) + if err != nil { + t.Fatalf("err: %s", err) + } + + err = decoder.Decode(input) + if err != nil { + t.Fatalf("got an err: %s", err) + } + + if !reflect.DeepEqual(resultWeak, expectedResultWeak) { + t.Errorf("expected \n%#v, got: \n%#v", expectedResultWeak, resultWeak) + } +} + +func TestDecoder_ErrorUnused(t *testing.T) { + t.Parallel() + + input := map[string]interface{}{ + "vstring": "hello", + "foo": "bar", + } + + var result Basic + config := &DecoderConfig{ + ErrorUnused: true, + Result: &result, + } + + decoder, err := NewDecoder(config) + if err != nil { + t.Fatalf("err: %s", err) + } + + err = decoder.Decode(input) + if err == nil { + t.Fatal("expected error") + } +} + +func TestMap(t *testing.T) { + t.Parallel() + + input := map[string]interface{}{ + "vfoo": "foo", + "vother": map[interface{}]interface{}{ + "foo": "foo", + "bar": "bar", + }, + } + + var result Map + err := Decode(input, &result) + if err != nil { + t.Fatalf("got an error: %s", err) + } + + if result.Vfoo != "foo" { + t.Errorf("vfoo value should be 'foo': %#v", result.Vfoo) + } + + if result.Vother == nil { + t.Fatal("vother should not be nil") + } + + if len(result.Vother) != 2 { + t.Error("vother should have two items") + } + + if result.Vother["foo"] != "foo" { + t.Errorf("'foo' key should be foo, got: %#v", result.Vother["foo"]) + } + + if result.Vother["bar"] != "bar" { + t.Errorf("'bar' key should be bar, got: %#v", result.Vother["bar"]) + } +} + +func TestMapMerge(t *testing.T) { + t.Parallel() + + input := map[string]interface{}{ + "vfoo": "foo", + "vother": map[interface{}]interface{}{ + "foo": "foo", + "bar": "bar", + }, + } + + var result Map + result.Vother = map[string]string{"hello": "world"} + err := Decode(input, &result) + if err != nil { + t.Fatalf("got an error: %s", err) + } + + if result.Vfoo != "foo" { + t.Errorf("vfoo value should be 'foo': %#v", result.Vfoo) + } + + expected := map[string]string{ + "foo": "foo", + "bar": "bar", + "hello": "world", + } + if !reflect.DeepEqual(result.Vother, expected) { + t.Errorf("bad: %#v", result.Vother) + } +} + +func TestMapOfStruct(t *testing.T) { + t.Parallel() + + input := map[string]interface{}{ + "value": map[string]interface{}{ + "foo": map[string]string{"vstring": "one"}, + "bar": map[string]string{"vstring": "two"}, + }, + } + + var result MapOfStruct + err := Decode(input, &result) + if err != nil { + t.Fatalf("got an err: %s", err) + } + + if result.Value == nil { + t.Fatal("value should not be nil") + } + + if len(result.Value) != 2 { + t.Error("value should have two items") + } + + if result.Value["foo"].Vstring != "one" { + t.Errorf("foo value should be 'one', got: %s", result.Value["foo"].Vstring) + } + + if result.Value["bar"].Vstring != "two" { + t.Errorf("bar value should be 'two', got: %s", result.Value["bar"].Vstring) + } +} + +func TestNestedType(t *testing.T) { + t.Parallel() + + input := map[string]interface{}{ + "vfoo": "foo", + "vbar": map[string]interface{}{ + "vstring": "foo", + "vint": 42, + "vbool": true, + }, + } + + var result Nested + err := Decode(input, &result) + if err != nil { + t.Fatalf("got an err: %s", err.Error()) + } + + if result.Vfoo != "foo" { + t.Errorf("vfoo value should be 'foo': %#v", result.Vfoo) + } + + if result.Vbar.Vstring != "foo" { + t.Errorf("vstring value should be 'foo': %#v", result.Vbar.Vstring) + } + + if result.Vbar.Vint != 42 { + t.Errorf("vint value should be 42: %#v", result.Vbar.Vint) + } + + if result.Vbar.Vbool != true { + t.Errorf("vbool value should be true: %#v", result.Vbar.Vbool) + } + + if result.Vbar.Vextra != "" { + t.Errorf("vextra value should be empty: %#v", result.Vbar.Vextra) + } +} + +func TestNestedTypePointer(t *testing.T) { + t.Parallel() + + input := map[string]interface{}{ + "vfoo": "foo", + "vbar": &map[string]interface{}{ + "vstring": "foo", + "vint": 42, + "vbool": true, + }, + } + + var result NestedPointer + err := Decode(input, &result) + if err != nil { + t.Fatalf("got an err: %s", err.Error()) + } + + if result.Vfoo != "foo" { + t.Errorf("vfoo value should be 'foo': %#v", result.Vfoo) + } + + if result.Vbar.Vstring != "foo" { + t.Errorf("vstring value should be 'foo': %#v", result.Vbar.Vstring) + } + + if result.Vbar.Vint != 42 { + t.Errorf("vint value should be 42: %#v", result.Vbar.Vint) + } + + if result.Vbar.Vbool != true { + t.Errorf("vbool value should be true: %#v", result.Vbar.Vbool) + } + + if result.Vbar.Vextra != "" { + t.Errorf("vextra value should be empty: %#v", result.Vbar.Vextra) + } +} + +func TestSlice(t *testing.T) { + t.Parallel() + + inputStringSlice := map[string]interface{}{ + "vfoo": "foo", + "vbar": []string{"foo", "bar", "baz"}, + } + + inputStringSlicePointer := map[string]interface{}{ + "vfoo": "foo", + "vbar": &[]string{"foo", "bar", "baz"}, + } + + outputStringSlice := &Slice{ + "foo", + []string{"foo", "bar", "baz"}, + } + + testSliceInput(t, inputStringSlice, outputStringSlice) + testSliceInput(t, inputStringSlicePointer, outputStringSlice) +} + +func TestInvalidSlice(t *testing.T) { + t.Parallel() + + input := map[string]interface{}{ + "vfoo": "foo", + "vbar": 42, + } + + result := Slice{} + err := Decode(input, &result) + if err == nil { + t.Errorf("expected failure") + } +} + +func TestSliceOfStruct(t *testing.T) { + t.Parallel() + + input := map[string]interface{}{ + "value": []map[string]interface{}{ + {"vstring": "one"}, + {"vstring": "two"}, + }, + } + + var result SliceOfStruct + err := Decode(input, &result) + if err != nil { + t.Fatalf("got unexpected error: %s", err) + } + + if len(result.Value) != 2 { + t.Fatalf("expected two values, got %d", len(result.Value)) + } + + if result.Value[0].Vstring != "one" { + t.Errorf("first value should be 'one', got: %s", result.Value[0].Vstring) + } + + if result.Value[1].Vstring != "two" { + t.Errorf("second value should be 'two', got: %s", result.Value[1].Vstring) + } +} + +func TestSliceToMap(t *testing.T) { + t.Parallel() + + input := []map[string]interface{}{ + { + "foo": "bar", + }, + { + "bar": "baz", + }, + } + + var result map[string]interface{} + err := WeakDecode(input, &result) + if err != nil { + t.Fatalf("got an error: %s", err) + } + + expected := map[string]interface{}{ + "foo": "bar", + "bar": "baz", + } + if !reflect.DeepEqual(result, expected) { + t.Errorf("bad: %#v", result) + } +} + +func TestInvalidType(t *testing.T) { + t.Parallel() + + input := map[string]interface{}{ + "vstring": 42, + } + + var result Basic + err := Decode(input, &result) + if err == nil { + t.Fatal("error should exist") + } + + derr, ok := err.(*Error) + if !ok { + t.Fatalf("error should be kind of Error, instead: %#v", err) + } + + if derr.Errors[0] != "'Vstring' expected type 'string', got unconvertible type 'int'" { + t.Errorf("got unexpected error: %s", err) + } + + inputNegIntUint := map[string]interface{}{ + "vuint": -42, + } + + err = Decode(inputNegIntUint, &result) + if err == nil { + t.Fatal("error should exist") + } + + derr, ok = err.(*Error) + if !ok { + t.Fatalf("error should be kind of Error, instead: %#v", err) + } + + if derr.Errors[0] != "cannot parse 'Vuint', -42 overflows uint" { + t.Errorf("got unexpected error: %s", err) + } + + inputNegFloatUint := map[string]interface{}{ + "vuint": -42.0, + } + + err = Decode(inputNegFloatUint, &result) + if err == nil { + t.Fatal("error should exist") + } + + derr, ok = err.(*Error) + if !ok { + t.Fatalf("error should be kind of Error, instead: %#v", err) + } + + if derr.Errors[0] != "cannot parse 'Vuint', -42.000000 overflows uint" { + t.Errorf("got unexpected error: %s", err) + } +} + +func TestMetadata(t *testing.T) { + t.Parallel() + + input := map[string]interface{}{ + "vfoo": "foo", + "vbar": map[string]interface{}{ + "vstring": "foo", + "Vuint": 42, + "foo": "bar", + }, + "bar": "nil", + } + + var md Metadata + var result Nested + config := &DecoderConfig{ + Metadata: &md, + Result: &result, + } + + decoder, err := NewDecoder(config) + if err != nil { + t.Fatalf("err: %s", err) + } + + err = decoder.Decode(input) + if err != nil { + t.Fatalf("err: %s", err.Error()) + } + + expectedKeys := []string{"Vbar", "Vbar.Vstring", "Vbar.Vuint", "Vfoo"} + sort.Strings(md.Keys) + if !reflect.DeepEqual(md.Keys, expectedKeys) { + t.Fatalf("bad keys: %#v", md.Keys) + } + + expectedUnused := []string{"Vbar.foo", "bar"} + if !reflect.DeepEqual(md.Unused, expectedUnused) { + t.Fatalf("bad unused: %#v", md.Unused) + } +} + +func TestMetadata_Embedded(t *testing.T) { + t.Parallel() + + input := map[string]interface{}{ + "vstring": "foo", + "vunique": "bar", + } + + var md Metadata + var result EmbeddedSquash + config := &DecoderConfig{ + Metadata: &md, + Result: &result, + } + + decoder, err := NewDecoder(config) + if err != nil { + t.Fatalf("err: %s", err) + } + + err = decoder.Decode(input) + if err != nil { + t.Fatalf("err: %s", err.Error()) + } + + expectedKeys := []string{"Vstring", "Vunique"} + + sort.Strings(md.Keys) + if !reflect.DeepEqual(md.Keys, expectedKeys) { + t.Fatalf("bad keys: %#v", md.Keys) + } + + expectedUnused := []string{} + if !reflect.DeepEqual(md.Unused, expectedUnused) { + t.Fatalf("bad unused: %#v", md.Unused) + } +} + +func TestNonPtrValue(t *testing.T) { + t.Parallel() + + err := Decode(map[string]interface{}{}, Basic{}) + if err == nil { + t.Fatal("error should exist") + } + + if err.Error() != "result must be a pointer" { + t.Errorf("got unexpected error: %s", err) + } +} + +func TestTagged(t *testing.T) { + t.Parallel() + + input := map[string]interface{}{ + "foo": "bar", + "bar": "value", + } + + var result Tagged + err := Decode(input, &result) + if err != nil { + t.Fatalf("unexpected error: %s", err) + } + + if result.Value != "bar" { + t.Errorf("value should be 'bar', got: %#v", result.Value) + } + + if result.Extra != "value" { + t.Errorf("extra should be 'value', got: %#v", result.Extra) + } +} + +func TestWeakDecode(t *testing.T) { + t.Parallel() + + input := map[string]interface{}{ + "foo": "4", + "bar": "value", + } + + var result struct { + Foo int + Bar string + } + + if err := WeakDecode(input, &result); err != nil { + t.Fatalf("err: %s", err) + } + if result.Foo != 4 { + t.Fatalf("bad: %#v", result) + } + if result.Bar != "value" { + t.Fatalf("bad: %#v", result) + } +} + +func testSliceInput(t *testing.T, input map[string]interface{}, expected *Slice) { + var result Slice + err := Decode(input, &result) + if err != nil { + t.Fatalf("got error: %s", err) + } + + if result.Vfoo != expected.Vfoo { + t.Errorf("Vfoo expected '%s', got '%s'", expected.Vfoo, result.Vfoo) + } + + if result.Vbar == nil { + t.Fatalf("Vbar a slice, got '%#v'", result.Vbar) + } + + if len(result.Vbar) != len(expected.Vbar) { + t.Errorf("Vbar length should be %d, got %d", len(expected.Vbar), len(result.Vbar)) + } + + for i, v := range result.Vbar { + if v != expected.Vbar[i] { + t.Errorf( + "Vbar[%d] should be '%#v', got '%#v'", + i, expected.Vbar[i], v) + } + } +} diff --git a/vendor/gopkg.in/yaml.v2/.gitrepo b/vendor/gopkg.in/yaml.v2/.gitrepo new file mode 100644 index 000000000..30bc27bab --- /dev/null +++ b/vendor/gopkg.in/yaml.v2/.gitrepo @@ -0,0 +1,11 @@ +; DO NOT EDIT (unless you know what you are doing) +; +; This subdirectory is a git "subrepo", and this file is maintained by the +; git-subrepo command. See https://github.com/git-commands/git-subrepo#readme +; +[subrepo] + remote = https://gopkg.in/yaml.v2.git + branch = master + commit = a5b47d31c556af34a302ce5d659e6fea44d90de0 + parent = e2999d5f77a3444f68a7fbbcfb5b79b0c22e89f3 + cmdver = 0.3.0 diff --git a/vendor/gopkg.in/yaml.v2/.travis.yml b/vendor/gopkg.in/yaml.v2/.travis.yml new file mode 100644 index 000000000..004172a2e --- /dev/null +++ b/vendor/gopkg.in/yaml.v2/.travis.yml @@ -0,0 +1,9 @@ +language: go + +go: + - 1.4 + - 1.5 + - 1.6 + - tip + +go_import_path: gopkg.in/yaml.v2 diff --git a/vendor/gopkg.in/yaml.v2/LICENSE b/vendor/gopkg.in/yaml.v2/LICENSE new file mode 100644 index 000000000..866d74a7a --- /dev/null +++ b/vendor/gopkg.in/yaml.v2/LICENSE @@ -0,0 +1,13 @@ +Copyright 2011-2016 Canonical Ltd. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. diff --git a/vendor/gopkg.in/yaml.v2/LICENSE.libyaml b/vendor/gopkg.in/yaml.v2/LICENSE.libyaml new file mode 100644 index 000000000..8da58fbf6 --- /dev/null +++ b/vendor/gopkg.in/yaml.v2/LICENSE.libyaml @@ -0,0 +1,31 @@ +The following files were ported to Go from C files of libyaml, and thus +are still covered by their original copyright and license: + + apic.go + emitterc.go + parserc.go + readerc.go + scannerc.go + writerc.go + yamlh.go + yamlprivateh.go + +Copyright (c) 2006 Kirill Simonov + +Permission is hereby granted, free of charge, to any person obtaining a copy of +this software and associated documentation files (the "Software"), to deal in +the Software without restriction, including without limitation the rights to +use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies +of the Software, and to permit persons to whom the Software is furnished to do +so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in all +copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE +SOFTWARE. diff --git a/vendor/gopkg.in/yaml.v2/README.md b/vendor/gopkg.in/yaml.v2/README.md new file mode 100644 index 000000000..1884de6a7 --- /dev/null +++ b/vendor/gopkg.in/yaml.v2/README.md @@ -0,0 +1,131 @@ +# YAML support for the Go language + +Introduction +------------ + +The yaml package enables Go programs to comfortably encode and decode YAML +values. It was developed within [Canonical](https://www.canonical.com) as +part of the [juju](https://juju.ubuntu.com) project, and is based on a +pure Go port of the well-known [libyaml](http://pyyaml.org/wiki/LibYAML) +C library to parse and generate YAML data quickly and reliably. + +Compatibility +------------- + +The yaml package supports most of YAML 1.1 and 1.2, including support for +anchors, tags, map merging, etc. Multi-document unmarshalling is not yet +implemented, and base-60 floats from YAML 1.1 are purposefully not +supported since they're a poor design and are gone in YAML 1.2. + +Installation and usage +---------------------- + +The import path for the package is *gopkg.in/yaml.v2*. + +To install it, run: + + go get gopkg.in/yaml.v2 + +API documentation +----------------- + +If opened in a browser, the import path itself leads to the API documentation: + + * [https://gopkg.in/yaml.v2](https://gopkg.in/yaml.v2) + +API stability +------------- + +The package API for yaml v2 will remain stable as described in [gopkg.in](https://gopkg.in). + + +License +------- + +The yaml package is licensed under the Apache License 2.0. Please see the LICENSE file for details. + + +Example +------- + +```Go +package main + +import ( + "fmt" + "log" + + "gopkg.in/yaml.v2" +) + +var data = ` +a: Easy! +b: + c: 2 + d: [3, 4] +` + +type T struct { + A string + B struct { + RenamedC int `yaml:"c"` + D []int `yaml:",flow"` + } +} + +func main() { + t := T{} + + err := yaml.Unmarshal([]byte(data), &t) + if err != nil { + log.Fatalf("error: %v", err) + } + fmt.Printf("--- t:\n%v\n\n", t) + + d, err := yaml.Marshal(&t) + if err != nil { + log.Fatalf("error: %v", err) + } + fmt.Printf("--- t dump:\n%s\n\n", string(d)) + + m := make(map[interface{}]interface{}) + + err = yaml.Unmarshal([]byte(data), &m) + if err != nil { + log.Fatalf("error: %v", err) + } + fmt.Printf("--- m:\n%v\n\n", m) + + d, err = yaml.Marshal(&m) + if err != nil { + log.Fatalf("error: %v", err) + } + fmt.Printf("--- m dump:\n%s\n\n", string(d)) +} +``` + +This example will generate the following output: + +``` +--- t: +{Easy! {2 [3 4]}} + +--- t dump: +a: Easy! +b: + c: 2 + d: [3, 4] + + +--- m: +map[a:Easy! b:map[c:2 d:[3 4]]] + +--- m dump: +a: Easy! +b: + c: 2 + d: + - 3 + - 4 +``` + diff --git a/vendor/gopkg.in/yaml.v2/apic.go b/vendor/gopkg.in/yaml.v2/apic.go new file mode 100644 index 000000000..95ec014e8 --- /dev/null +++ b/vendor/gopkg.in/yaml.v2/apic.go @@ -0,0 +1,742 @@ +package yaml + +import ( + "io" + "os" +) + +func yaml_insert_token(parser *yaml_parser_t, pos int, token *yaml_token_t) { + //fmt.Println("yaml_insert_token", "pos:", pos, "typ:", token.typ, "head:", parser.tokens_head, "len:", len(parser.tokens)) + + // Check if we can move the queue at the beginning of the buffer. + if parser.tokens_head > 0 && len(parser.tokens) == cap(parser.tokens) { + if parser.tokens_head != len(parser.tokens) { + copy(parser.tokens, parser.tokens[parser.tokens_head:]) + } + parser.tokens = parser.tokens[:len(parser.tokens)-parser.tokens_head] + parser.tokens_head = 0 + } + parser.tokens = append(parser.tokens, *token) + if pos < 0 { + return + } + copy(parser.tokens[parser.tokens_head+pos+1:], parser.tokens[parser.tokens_head+pos:]) + parser.tokens[parser.tokens_head+pos] = *token +} + +// Create a new parser object. +func yaml_parser_initialize(parser *yaml_parser_t) bool { + *parser = yaml_parser_t{ + raw_buffer: make([]byte, 0, input_raw_buffer_size), + buffer: make([]byte, 0, input_buffer_size), + } + return true +} + +// Destroy a parser object. +func yaml_parser_delete(parser *yaml_parser_t) { + *parser = yaml_parser_t{} +} + +// String read handler. +func yaml_string_read_handler(parser *yaml_parser_t, buffer []byte) (n int, err error) { + if parser.input_pos == len(parser.input) { + return 0, io.EOF + } + n = copy(buffer, parser.input[parser.input_pos:]) + parser.input_pos += n + return n, nil +} + +// File read handler. +func yaml_file_read_handler(parser *yaml_parser_t, buffer []byte) (n int, err error) { + return parser.input_file.Read(buffer) +} + +// Set a string input. +func yaml_parser_set_input_string(parser *yaml_parser_t, input []byte) { + if parser.read_handler != nil { + panic("must set the input source only once") + } + parser.read_handler = yaml_string_read_handler + parser.input = input + parser.input_pos = 0 +} + +// Set a file input. +func yaml_parser_set_input_file(parser *yaml_parser_t, file *os.File) { + if parser.read_handler != nil { + panic("must set the input source only once") + } + parser.read_handler = yaml_file_read_handler + parser.input_file = file +} + +// Set the source encoding. +func yaml_parser_set_encoding(parser *yaml_parser_t, encoding yaml_encoding_t) { + if parser.encoding != yaml_ANY_ENCODING { + panic("must set the encoding only once") + } + parser.encoding = encoding +} + +// Create a new emitter object. +func yaml_emitter_initialize(emitter *yaml_emitter_t) bool { + *emitter = yaml_emitter_t{ + buffer: make([]byte, output_buffer_size), + raw_buffer: make([]byte, 0, output_raw_buffer_size), + states: make([]yaml_emitter_state_t, 0, initial_stack_size), + events: make([]yaml_event_t, 0, initial_queue_size), + } + return true +} + +// Destroy an emitter object. +func yaml_emitter_delete(emitter *yaml_emitter_t) { + *emitter = yaml_emitter_t{} +} + +// String write handler. +func yaml_string_write_handler(emitter *yaml_emitter_t, buffer []byte) error { + *emitter.output_buffer = append(*emitter.output_buffer, buffer...) + return nil +} + +// File write handler. +func yaml_file_write_handler(emitter *yaml_emitter_t, buffer []byte) error { + _, err := emitter.output_file.Write(buffer) + return err +} + +// Set a string output. +func yaml_emitter_set_output_string(emitter *yaml_emitter_t, output_buffer *[]byte) { + if emitter.write_handler != nil { + panic("must set the output target only once") + } + emitter.write_handler = yaml_string_write_handler + emitter.output_buffer = output_buffer +} + +// Set a file output. +func yaml_emitter_set_output_file(emitter *yaml_emitter_t, file io.Writer) { + if emitter.write_handler != nil { + panic("must set the output target only once") + } + emitter.write_handler = yaml_file_write_handler + emitter.output_file = file +} + +// Set the output encoding. +func yaml_emitter_set_encoding(emitter *yaml_emitter_t, encoding yaml_encoding_t) { + if emitter.encoding != yaml_ANY_ENCODING { + panic("must set the output encoding only once") + } + emitter.encoding = encoding +} + +// Set the canonical output style. +func yaml_emitter_set_canonical(emitter *yaml_emitter_t, canonical bool) { + emitter.canonical = canonical +} + +//// Set the indentation increment. +func yaml_emitter_set_indent(emitter *yaml_emitter_t, indent int) { + if indent < 2 || indent > 9 { + indent = 2 + } + emitter.best_indent = indent +} + +// Set the preferred line width. +func yaml_emitter_set_width(emitter *yaml_emitter_t, width int) { + if width < 0 { + width = -1 + } + emitter.best_width = width +} + +// Set if unescaped non-ASCII characters are allowed. +func yaml_emitter_set_unicode(emitter *yaml_emitter_t, unicode bool) { + emitter.unicode = unicode +} + +// Set the preferred line break character. +func yaml_emitter_set_break(emitter *yaml_emitter_t, line_break yaml_break_t) { + emitter.line_break = line_break +} + +///* +// * Destroy a token object. +// */ +// +//YAML_DECLARE(void) +//yaml_token_delete(yaml_token_t *token) +//{ +// assert(token); // Non-NULL token object expected. +// +// switch (token.type) +// { +// case YAML_TAG_DIRECTIVE_TOKEN: +// yaml_free(token.data.tag_directive.handle); +// yaml_free(token.data.tag_directive.prefix); +// break; +// +// case YAML_ALIAS_TOKEN: +// yaml_free(token.data.alias.value); +// break; +// +// case YAML_ANCHOR_TOKEN: +// yaml_free(token.data.anchor.value); +// break; +// +// case YAML_TAG_TOKEN: +// yaml_free(token.data.tag.handle); +// yaml_free(token.data.tag.suffix); +// break; +// +// case YAML_SCALAR_TOKEN: +// yaml_free(token.data.scalar.value); +// break; +// +// default: +// break; +// } +// +// memset(token, 0, sizeof(yaml_token_t)); +//} +// +///* +// * Check if a string is a valid UTF-8 sequence. +// * +// * Check 'reader.c' for more details on UTF-8 encoding. +// */ +// +//static int +//yaml_check_utf8(yaml_char_t *start, size_t length) +//{ +// yaml_char_t *end = start+length; +// yaml_char_t *pointer = start; +// +// while (pointer < end) { +// unsigned char octet; +// unsigned int width; +// unsigned int value; +// size_t k; +// +// octet = pointer[0]; +// width = (octet & 0x80) == 0x00 ? 1 : +// (octet & 0xE0) == 0xC0 ? 2 : +// (octet & 0xF0) == 0xE0 ? 3 : +// (octet & 0xF8) == 0xF0 ? 4 : 0; +// value = (octet & 0x80) == 0x00 ? octet & 0x7F : +// (octet & 0xE0) == 0xC0 ? octet & 0x1F : +// (octet & 0xF0) == 0xE0 ? octet & 0x0F : +// (octet & 0xF8) == 0xF0 ? octet & 0x07 : 0; +// if (!width) return 0; +// if (pointer+width > end) return 0; +// for (k = 1; k < width; k ++) { +// octet = pointer[k]; +// if ((octet & 0xC0) != 0x80) return 0; +// value = (value << 6) + (octet & 0x3F); +// } +// if (!((width == 1) || +// (width == 2 && value >= 0x80) || +// (width == 3 && value >= 0x800) || +// (width == 4 && value >= 0x10000))) return 0; +// +// pointer += width; +// } +// +// return 1; +//} +// + +// Create STREAM-START. +func yaml_stream_start_event_initialize(event *yaml_event_t, encoding yaml_encoding_t) bool { + *event = yaml_event_t{ + typ: yaml_STREAM_START_EVENT, + encoding: encoding, + } + return true +} + +// Create STREAM-END. +func yaml_stream_end_event_initialize(event *yaml_event_t) bool { + *event = yaml_event_t{ + typ: yaml_STREAM_END_EVENT, + } + return true +} + +// Create DOCUMENT-START. +func yaml_document_start_event_initialize(event *yaml_event_t, version_directive *yaml_version_directive_t, + tag_directives []yaml_tag_directive_t, implicit bool) bool { + *event = yaml_event_t{ + typ: yaml_DOCUMENT_START_EVENT, + version_directive: version_directive, + tag_directives: tag_directives, + implicit: implicit, + } + return true +} + +// Create DOCUMENT-END. +func yaml_document_end_event_initialize(event *yaml_event_t, implicit bool) bool { + *event = yaml_event_t{ + typ: yaml_DOCUMENT_END_EVENT, + implicit: implicit, + } + return true +} + +///* +// * Create ALIAS. +// */ +// +//YAML_DECLARE(int) +//yaml_alias_event_initialize(event *yaml_event_t, anchor *yaml_char_t) +//{ +// mark yaml_mark_t = { 0, 0, 0 } +// anchor_copy *yaml_char_t = NULL +// +// assert(event) // Non-NULL event object is expected. +// assert(anchor) // Non-NULL anchor is expected. +// +// if (!yaml_check_utf8(anchor, strlen((char *)anchor))) return 0 +// +// anchor_copy = yaml_strdup(anchor) +// if (!anchor_copy) +// return 0 +// +// ALIAS_EVENT_INIT(*event, anchor_copy, mark, mark) +// +// return 1 +//} + +// Create SCALAR. +func yaml_scalar_event_initialize(event *yaml_event_t, anchor, tag, value []byte, plain_implicit, quoted_implicit bool, style yaml_scalar_style_t) bool { + *event = yaml_event_t{ + typ: yaml_SCALAR_EVENT, + anchor: anchor, + tag: tag, + value: value, + implicit: plain_implicit, + quoted_implicit: quoted_implicit, + style: yaml_style_t(style), + } + return true +} + +// Create SEQUENCE-START. +func yaml_sequence_start_event_initialize(event *yaml_event_t, anchor, tag []byte, implicit bool, style yaml_sequence_style_t) bool { + *event = yaml_event_t{ + typ: yaml_SEQUENCE_START_EVENT, + anchor: anchor, + tag: tag, + implicit: implicit, + style: yaml_style_t(style), + } + return true +} + +// Create SEQUENCE-END. +func yaml_sequence_end_event_initialize(event *yaml_event_t) bool { + *event = yaml_event_t{ + typ: yaml_SEQUENCE_END_EVENT, + } + return true +} + +// Create MAPPING-START. +func yaml_mapping_start_event_initialize(event *yaml_event_t, anchor, tag []byte, implicit bool, style yaml_mapping_style_t) bool { + *event = yaml_event_t{ + typ: yaml_MAPPING_START_EVENT, + anchor: anchor, + tag: tag, + implicit: implicit, + style: yaml_style_t(style), + } + return true +} + +// Create MAPPING-END. +func yaml_mapping_end_event_initialize(event *yaml_event_t) bool { + *event = yaml_event_t{ + typ: yaml_MAPPING_END_EVENT, + } + return true +} + +// Destroy an event object. +func yaml_event_delete(event *yaml_event_t) { + *event = yaml_event_t{} +} + +///* +// * Create a document object. +// */ +// +//YAML_DECLARE(int) +//yaml_document_initialize(document *yaml_document_t, +// version_directive *yaml_version_directive_t, +// tag_directives_start *yaml_tag_directive_t, +// tag_directives_end *yaml_tag_directive_t, +// start_implicit int, end_implicit int) +//{ +// struct { +// error yaml_error_type_t +// } context +// struct { +// start *yaml_node_t +// end *yaml_node_t +// top *yaml_node_t +// } nodes = { NULL, NULL, NULL } +// version_directive_copy *yaml_version_directive_t = NULL +// struct { +// start *yaml_tag_directive_t +// end *yaml_tag_directive_t +// top *yaml_tag_directive_t +// } tag_directives_copy = { NULL, NULL, NULL } +// value yaml_tag_directive_t = { NULL, NULL } +// mark yaml_mark_t = { 0, 0, 0 } +// +// assert(document) // Non-NULL document object is expected. +// assert((tag_directives_start && tag_directives_end) || +// (tag_directives_start == tag_directives_end)) +// // Valid tag directives are expected. +// +// if (!STACK_INIT(&context, nodes, INITIAL_STACK_SIZE)) goto error +// +// if (version_directive) { +// version_directive_copy = yaml_malloc(sizeof(yaml_version_directive_t)) +// if (!version_directive_copy) goto error +// version_directive_copy.major = version_directive.major +// version_directive_copy.minor = version_directive.minor +// } +// +// if (tag_directives_start != tag_directives_end) { +// tag_directive *yaml_tag_directive_t +// if (!STACK_INIT(&context, tag_directives_copy, INITIAL_STACK_SIZE)) +// goto error +// for (tag_directive = tag_directives_start +// tag_directive != tag_directives_end; tag_directive ++) { +// assert(tag_directive.handle) +// assert(tag_directive.prefix) +// if (!yaml_check_utf8(tag_directive.handle, +// strlen((char *)tag_directive.handle))) +// goto error +// if (!yaml_check_utf8(tag_directive.prefix, +// strlen((char *)tag_directive.prefix))) +// goto error +// value.handle = yaml_strdup(tag_directive.handle) +// value.prefix = yaml_strdup(tag_directive.prefix) +// if (!value.handle || !value.prefix) goto error +// if (!PUSH(&context, tag_directives_copy, value)) +// goto error +// value.handle = NULL +// value.prefix = NULL +// } +// } +// +// DOCUMENT_INIT(*document, nodes.start, nodes.end, version_directive_copy, +// tag_directives_copy.start, tag_directives_copy.top, +// start_implicit, end_implicit, mark, mark) +// +// return 1 +// +//error: +// STACK_DEL(&context, nodes) +// yaml_free(version_directive_copy) +// while (!STACK_EMPTY(&context, tag_directives_copy)) { +// value yaml_tag_directive_t = POP(&context, tag_directives_copy) +// yaml_free(value.handle) +// yaml_free(value.prefix) +// } +// STACK_DEL(&context, tag_directives_copy) +// yaml_free(value.handle) +// yaml_free(value.prefix) +// +// return 0 +//} +// +///* +// * Destroy a document object. +// */ +// +//YAML_DECLARE(void) +//yaml_document_delete(document *yaml_document_t) +//{ +// struct { +// error yaml_error_type_t +// } context +// tag_directive *yaml_tag_directive_t +// +// context.error = YAML_NO_ERROR // Eliminate a compliler warning. +// +// assert(document) // Non-NULL document object is expected. +// +// while (!STACK_EMPTY(&context, document.nodes)) { +// node yaml_node_t = POP(&context, document.nodes) +// yaml_free(node.tag) +// switch (node.type) { +// case YAML_SCALAR_NODE: +// yaml_free(node.data.scalar.value) +// break +// case YAML_SEQUENCE_NODE: +// STACK_DEL(&context, node.data.sequence.items) +// break +// case YAML_MAPPING_NODE: +// STACK_DEL(&context, node.data.mapping.pairs) +// break +// default: +// assert(0) // Should not happen. +// } +// } +// STACK_DEL(&context, document.nodes) +// +// yaml_free(document.version_directive) +// for (tag_directive = document.tag_directives.start +// tag_directive != document.tag_directives.end +// tag_directive++) { +// yaml_free(tag_directive.handle) +// yaml_free(tag_directive.prefix) +// } +// yaml_free(document.tag_directives.start) +// +// memset(document, 0, sizeof(yaml_document_t)) +//} +// +///** +// * Get a document node. +// */ +// +//YAML_DECLARE(yaml_node_t *) +//yaml_document_get_node(document *yaml_document_t, index int) +//{ +// assert(document) // Non-NULL document object is expected. +// +// if (index > 0 && document.nodes.start + index <= document.nodes.top) { +// return document.nodes.start + index - 1 +// } +// return NULL +//} +// +///** +// * Get the root object. +// */ +// +//YAML_DECLARE(yaml_node_t *) +//yaml_document_get_root_node(document *yaml_document_t) +//{ +// assert(document) // Non-NULL document object is expected. +// +// if (document.nodes.top != document.nodes.start) { +// return document.nodes.start +// } +// return NULL +//} +// +///* +// * Add a scalar node to a document. +// */ +// +//YAML_DECLARE(int) +//yaml_document_add_scalar(document *yaml_document_t, +// tag *yaml_char_t, value *yaml_char_t, length int, +// style yaml_scalar_style_t) +//{ +// struct { +// error yaml_error_type_t +// } context +// mark yaml_mark_t = { 0, 0, 0 } +// tag_copy *yaml_char_t = NULL +// value_copy *yaml_char_t = NULL +// node yaml_node_t +// +// assert(document) // Non-NULL document object is expected. +// assert(value) // Non-NULL value is expected. +// +// if (!tag) { +// tag = (yaml_char_t *)YAML_DEFAULT_SCALAR_TAG +// } +// +// if (!yaml_check_utf8(tag, strlen((char *)tag))) goto error +// tag_copy = yaml_strdup(tag) +// if (!tag_copy) goto error +// +// if (length < 0) { +// length = strlen((char *)value) +// } +// +// if (!yaml_check_utf8(value, length)) goto error +// value_copy = yaml_malloc(length+1) +// if (!value_copy) goto error +// memcpy(value_copy, value, length) +// value_copy[length] = '\0' +// +// SCALAR_NODE_INIT(node, tag_copy, value_copy, length, style, mark, mark) +// if (!PUSH(&context, document.nodes, node)) goto error +// +// return document.nodes.top - document.nodes.start +// +//error: +// yaml_free(tag_copy) +// yaml_free(value_copy) +// +// return 0 +//} +// +///* +// * Add a sequence node to a document. +// */ +// +//YAML_DECLARE(int) +//yaml_document_add_sequence(document *yaml_document_t, +// tag *yaml_char_t, style yaml_sequence_style_t) +//{ +// struct { +// error yaml_error_type_t +// } context +// mark yaml_mark_t = { 0, 0, 0 } +// tag_copy *yaml_char_t = NULL +// struct { +// start *yaml_node_item_t +// end *yaml_node_item_t +// top *yaml_node_item_t +// } items = { NULL, NULL, NULL } +// node yaml_node_t +// +// assert(document) // Non-NULL document object is expected. +// +// if (!tag) { +// tag = (yaml_char_t *)YAML_DEFAULT_SEQUENCE_TAG +// } +// +// if (!yaml_check_utf8(tag, strlen((char *)tag))) goto error +// tag_copy = yaml_strdup(tag) +// if (!tag_copy) goto error +// +// if (!STACK_INIT(&context, items, INITIAL_STACK_SIZE)) goto error +// +// SEQUENCE_NODE_INIT(node, tag_copy, items.start, items.end, +// style, mark, mark) +// if (!PUSH(&context, document.nodes, node)) goto error +// +// return document.nodes.top - document.nodes.start +// +//error: +// STACK_DEL(&context, items) +// yaml_free(tag_copy) +// +// return 0 +//} +// +///* +// * Add a mapping node to a document. +// */ +// +//YAML_DECLARE(int) +//yaml_document_add_mapping(document *yaml_document_t, +// tag *yaml_char_t, style yaml_mapping_style_t) +//{ +// struct { +// error yaml_error_type_t +// } context +// mark yaml_mark_t = { 0, 0, 0 } +// tag_copy *yaml_char_t = NULL +// struct { +// start *yaml_node_pair_t +// end *yaml_node_pair_t +// top *yaml_node_pair_t +// } pairs = { NULL, NULL, NULL } +// node yaml_node_t +// +// assert(document) // Non-NULL document object is expected. +// +// if (!tag) { +// tag = (yaml_char_t *)YAML_DEFAULT_MAPPING_TAG +// } +// +// if (!yaml_check_utf8(tag, strlen((char *)tag))) goto error +// tag_copy = yaml_strdup(tag) +// if (!tag_copy) goto error +// +// if (!STACK_INIT(&context, pairs, INITIAL_STACK_SIZE)) goto error +// +// MAPPING_NODE_INIT(node, tag_copy, pairs.start, pairs.end, +// style, mark, mark) +// if (!PUSH(&context, document.nodes, node)) goto error +// +// return document.nodes.top - document.nodes.start +// +//error: +// STACK_DEL(&context, pairs) +// yaml_free(tag_copy) +// +// return 0 +//} +// +///* +// * Append an item to a sequence node. +// */ +// +//YAML_DECLARE(int) +//yaml_document_append_sequence_item(document *yaml_document_t, +// sequence int, item int) +//{ +// struct { +// error yaml_error_type_t +// } context +// +// assert(document) // Non-NULL document is required. +// assert(sequence > 0 +// && document.nodes.start + sequence <= document.nodes.top) +// // Valid sequence id is required. +// assert(document.nodes.start[sequence-1].type == YAML_SEQUENCE_NODE) +// // A sequence node is required. +// assert(item > 0 && document.nodes.start + item <= document.nodes.top) +// // Valid item id is required. +// +// if (!PUSH(&context, +// document.nodes.start[sequence-1].data.sequence.items, item)) +// return 0 +// +// return 1 +//} +// +///* +// * Append a pair of a key and a value to a mapping node. +// */ +// +//YAML_DECLARE(int) +//yaml_document_append_mapping_pair(document *yaml_document_t, +// mapping int, key int, value int) +//{ +// struct { +// error yaml_error_type_t +// } context +// +// pair yaml_node_pair_t +// +// assert(document) // Non-NULL document is required. +// assert(mapping > 0 +// && document.nodes.start + mapping <= document.nodes.top) +// // Valid mapping id is required. +// assert(document.nodes.start[mapping-1].type == YAML_MAPPING_NODE) +// // A mapping node is required. +// assert(key > 0 && document.nodes.start + key <= document.nodes.top) +// // Valid key id is required. +// assert(value > 0 && document.nodes.start + value <= document.nodes.top) +// // Valid value id is required. +// +// pair.key = key +// pair.value = value +// +// if (!PUSH(&context, +// document.nodes.start[mapping-1].data.mapping.pairs, pair)) +// return 0 +// +// return 1 +//} +// +// diff --git a/vendor/gopkg.in/yaml.v2/decode.go b/vendor/gopkg.in/yaml.v2/decode.go new file mode 100644 index 000000000..b13ab9f07 --- /dev/null +++ b/vendor/gopkg.in/yaml.v2/decode.go @@ -0,0 +1,683 @@ +package yaml + +import ( + "encoding" + "encoding/base64" + "fmt" + "math" + "reflect" + "strconv" + "time" +) + +const ( + documentNode = 1 << iota + mappingNode + sequenceNode + scalarNode + aliasNode +) + +type node struct { + kind int + line, column int + tag string + value string + implicit bool + children []*node + anchors map[string]*node +} + +// ---------------------------------------------------------------------------- +// Parser, produces a node tree out of a libyaml event stream. + +type parser struct { + parser yaml_parser_t + event yaml_event_t + doc *node +} + +func newParser(b []byte) *parser { + p := parser{} + if !yaml_parser_initialize(&p.parser) { + panic("failed to initialize YAML emitter") + } + + if len(b) == 0 { + b = []byte{'\n'} + } + + yaml_parser_set_input_string(&p.parser, b) + + p.skip() + if p.event.typ != yaml_STREAM_START_EVENT { + panic("expected stream start event, got " + strconv.Itoa(int(p.event.typ))) + } + p.skip() + return &p +} + +func (p *parser) destroy() { + if p.event.typ != yaml_NO_EVENT { + yaml_event_delete(&p.event) + } + yaml_parser_delete(&p.parser) +} + +func (p *parser) skip() { + if p.event.typ != yaml_NO_EVENT { + if p.event.typ == yaml_STREAM_END_EVENT { + failf("attempted to go past the end of stream; corrupted value?") + } + yaml_event_delete(&p.event) + } + if !yaml_parser_parse(&p.parser, &p.event) { + p.fail() + } +} + +func (p *parser) fail() { + var where string + var line int + if p.parser.problem_mark.line != 0 { + line = p.parser.problem_mark.line + } else if p.parser.context_mark.line != 0 { + line = p.parser.context_mark.line + } + if line != 0 { + where = "line " + strconv.Itoa(line) + ": " + } + var msg string + if len(p.parser.problem) > 0 { + msg = p.parser.problem + } else { + msg = "unknown problem parsing YAML content" + } + failf("%s%s", where, msg) +} + +func (p *parser) anchor(n *node, anchor []byte) { + if anchor != nil { + p.doc.anchors[string(anchor)] = n + } +} + +func (p *parser) parse() *node { + switch p.event.typ { + case yaml_SCALAR_EVENT: + return p.scalar() + case yaml_ALIAS_EVENT: + return p.alias() + case yaml_MAPPING_START_EVENT: + return p.mapping() + case yaml_SEQUENCE_START_EVENT: + return p.sequence() + case yaml_DOCUMENT_START_EVENT: + return p.document() + case yaml_STREAM_END_EVENT: + // Happens when attempting to decode an empty buffer. + return nil + default: + panic("attempted to parse unknown event: " + strconv.Itoa(int(p.event.typ))) + } + panic("unreachable") +} + +func (p *parser) node(kind int) *node { + return &node{ + kind: kind, + line: p.event.start_mark.line, + column: p.event.start_mark.column, + } +} + +func (p *parser) document() *node { + n := p.node(documentNode) + n.anchors = make(map[string]*node) + p.doc = n + p.skip() + n.children = append(n.children, p.parse()) + if p.event.typ != yaml_DOCUMENT_END_EVENT { + panic("expected end of document event but got " + strconv.Itoa(int(p.event.typ))) + } + p.skip() + return n +} + +func (p *parser) alias() *node { + n := p.node(aliasNode) + n.value = string(p.event.anchor) + p.skip() + return n +} + +func (p *parser) scalar() *node { + n := p.node(scalarNode) + n.value = string(p.event.value) + n.tag = string(p.event.tag) + n.implicit = p.event.implicit + p.anchor(n, p.event.anchor) + p.skip() + return n +} + +func (p *parser) sequence() *node { + n := p.node(sequenceNode) + p.anchor(n, p.event.anchor) + p.skip() + for p.event.typ != yaml_SEQUENCE_END_EVENT { + n.children = append(n.children, p.parse()) + } + p.skip() + return n +} + +func (p *parser) mapping() *node { + n := p.node(mappingNode) + p.anchor(n, p.event.anchor) + p.skip() + for p.event.typ != yaml_MAPPING_END_EVENT { + n.children = append(n.children, p.parse(), p.parse()) + } + p.skip() + return n +} + +// ---------------------------------------------------------------------------- +// Decoder, unmarshals a node into a provided value. + +type decoder struct { + doc *node + aliases map[string]bool + mapType reflect.Type + terrors []string +} + +var ( + mapItemType = reflect.TypeOf(MapItem{}) + durationType = reflect.TypeOf(time.Duration(0)) + defaultMapType = reflect.TypeOf(map[interface{}]interface{}{}) + ifaceType = defaultMapType.Elem() +) + +func newDecoder() *decoder { + d := &decoder{mapType: defaultMapType} + d.aliases = make(map[string]bool) + return d +} + +func (d *decoder) terror(n *node, tag string, out reflect.Value) { + if n.tag != "" { + tag = n.tag + } + value := n.value + if tag != yaml_SEQ_TAG && tag != yaml_MAP_TAG { + if len(value) > 10 { + value = " `" + value[:7] + "...`" + } else { + value = " `" + value + "`" + } + } + d.terrors = append(d.terrors, fmt.Sprintf("line %d: cannot unmarshal %s%s into %s", n.line+1, shortTag(tag), value, out.Type())) +} + +func (d *decoder) callUnmarshaler(n *node, u Unmarshaler) (good bool) { + terrlen := len(d.terrors) + err := u.UnmarshalYAML(func(v interface{}) (err error) { + defer handleErr(&err) + d.unmarshal(n, reflect.ValueOf(v)) + if len(d.terrors) > terrlen { + issues := d.terrors[terrlen:] + d.terrors = d.terrors[:terrlen] + return &TypeError{issues} + } + return nil + }) + if e, ok := err.(*TypeError); ok { + d.terrors = append(d.terrors, e.Errors...) + return false + } + if err != nil { + fail(err) + } + return true +} + +// d.prepare initializes and dereferences pointers and calls UnmarshalYAML +// if a value is found to implement it. +// It returns the initialized and dereferenced out value, whether +// unmarshalling was already done by UnmarshalYAML, and if so whether +// its types unmarshalled appropriately. +// +// If n holds a null value, prepare returns before doing anything. +func (d *decoder) prepare(n *node, out reflect.Value) (newout reflect.Value, unmarshaled, good bool) { + if n.tag == yaml_NULL_TAG || n.kind == scalarNode && n.tag == "" && (n.value == "null" || n.value == "" && n.implicit) { + return out, false, false + } + again := true + for again { + again = false + if out.Kind() == reflect.Ptr { + if out.IsNil() { + out.Set(reflect.New(out.Type().Elem())) + } + out = out.Elem() + again = true + } + if out.CanAddr() { + if u, ok := out.Addr().Interface().(Unmarshaler); ok { + good = d.callUnmarshaler(n, u) + return out, true, good + } + } + } + return out, false, false +} + +func (d *decoder) unmarshal(n *node, out reflect.Value) (good bool) { + switch n.kind { + case documentNode: + return d.document(n, out) + case aliasNode: + return d.alias(n, out) + } + out, unmarshaled, good := d.prepare(n, out) + if unmarshaled { + return good + } + switch n.kind { + case scalarNode: + good = d.scalar(n, out) + case mappingNode: + good = d.mapping(n, out) + case sequenceNode: + good = d.sequence(n, out) + default: + panic("internal error: unknown node kind: " + strconv.Itoa(n.kind)) + } + return good +} + +func (d *decoder) document(n *node, out reflect.Value) (good bool) { + if len(n.children) == 1 { + d.doc = n + d.unmarshal(n.children[0], out) + return true + } + return false +} + +func (d *decoder) alias(n *node, out reflect.Value) (good bool) { + an, ok := d.doc.anchors[n.value] + if !ok { + failf("unknown anchor '%s' referenced", n.value) + } + if d.aliases[n.value] { + failf("anchor '%s' value contains itself", n.value) + } + d.aliases[n.value] = true + good = d.unmarshal(an, out) + delete(d.aliases, n.value) + return good +} + +var zeroValue reflect.Value + +func resetMap(out reflect.Value) { + for _, k := range out.MapKeys() { + out.SetMapIndex(k, zeroValue) + } +} + +func (d *decoder) scalar(n *node, out reflect.Value) (good bool) { + var tag string + var resolved interface{} + if n.tag == "" && !n.implicit { + tag = yaml_STR_TAG + resolved = n.value + } else { + tag, resolved = resolve(n.tag, n.value) + if tag == yaml_BINARY_TAG { + data, err := base64.StdEncoding.DecodeString(resolved.(string)) + if err != nil { + failf("!!binary value contains invalid base64 data") + } + resolved = string(data) + } + } + if resolved == nil { + if out.Kind() == reflect.Map && !out.CanAddr() { + resetMap(out) + } else { + out.Set(reflect.Zero(out.Type())) + } + return true + } + if s, ok := resolved.(string); ok && out.CanAddr() { + if u, ok := out.Addr().Interface().(encoding.TextUnmarshaler); ok { + err := u.UnmarshalText([]byte(s)) + if err != nil { + fail(err) + } + return true + } + } + switch out.Kind() { + case reflect.String: + if tag == yaml_BINARY_TAG { + out.SetString(resolved.(string)) + good = true + } else if resolved != nil { + out.SetString(n.value) + good = true + } + case reflect.Interface: + if resolved == nil { + out.Set(reflect.Zero(out.Type())) + } else { + out.Set(reflect.ValueOf(resolved)) + } + good = true + case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64: + switch resolved := resolved.(type) { + case int: + if !out.OverflowInt(int64(resolved)) { + out.SetInt(int64(resolved)) + good = true + } + case int64: + if !out.OverflowInt(resolved) { + out.SetInt(resolved) + good = true + } + case uint64: + if resolved <= math.MaxInt64 && !out.OverflowInt(int64(resolved)) { + out.SetInt(int64(resolved)) + good = true + } + case float64: + if resolved <= math.MaxInt64 && !out.OverflowInt(int64(resolved)) { + out.SetInt(int64(resolved)) + good = true + } + case string: + if out.Type() == durationType { + d, err := time.ParseDuration(resolved) + if err == nil { + out.SetInt(int64(d)) + good = true + } + } + } + case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64, reflect.Uintptr: + switch resolved := resolved.(type) { + case int: + if resolved >= 0 && !out.OverflowUint(uint64(resolved)) { + out.SetUint(uint64(resolved)) + good = true + } + case int64: + if resolved >= 0 && !out.OverflowUint(uint64(resolved)) { + out.SetUint(uint64(resolved)) + good = true + } + case uint64: + if !out.OverflowUint(uint64(resolved)) { + out.SetUint(uint64(resolved)) + good = true + } + case float64: + if resolved <= math.MaxUint64 && !out.OverflowUint(uint64(resolved)) { + out.SetUint(uint64(resolved)) + good = true + } + } + case reflect.Bool: + switch resolved := resolved.(type) { + case bool: + out.SetBool(resolved) + good = true + } + case reflect.Float32, reflect.Float64: + switch resolved := resolved.(type) { + case int: + out.SetFloat(float64(resolved)) + good = true + case int64: + out.SetFloat(float64(resolved)) + good = true + case uint64: + out.SetFloat(float64(resolved)) + good = true + case float64: + out.SetFloat(resolved) + good = true + } + case reflect.Ptr: + if out.Type().Elem() == reflect.TypeOf(resolved) { + // TODO DOes this make sense? When is out a Ptr except when decoding a nil value? + elem := reflect.New(out.Type().Elem()) + elem.Elem().Set(reflect.ValueOf(resolved)) + out.Set(elem) + good = true + } + } + if !good { + d.terror(n, tag, out) + } + return good +} + +func settableValueOf(i interface{}) reflect.Value { + v := reflect.ValueOf(i) + sv := reflect.New(v.Type()).Elem() + sv.Set(v) + return sv +} + +func (d *decoder) sequence(n *node, out reflect.Value) (good bool) { + l := len(n.children) + + var iface reflect.Value + switch out.Kind() { + case reflect.Slice: + out.Set(reflect.MakeSlice(out.Type(), l, l)) + case reflect.Interface: + // No type hints. Will have to use a generic sequence. + iface = out + out = settableValueOf(make([]interface{}, l)) + default: + d.terror(n, yaml_SEQ_TAG, out) + return false + } + et := out.Type().Elem() + + j := 0 + for i := 0; i < l; i++ { + e := reflect.New(et).Elem() + if ok := d.unmarshal(n.children[i], e); ok { + out.Index(j).Set(e) + j++ + } + } + out.Set(out.Slice(0, j)) + if iface.IsValid() { + iface.Set(out) + } + return true +} + +func (d *decoder) mapping(n *node, out reflect.Value) (good bool) { + switch out.Kind() { + case reflect.Struct: + return d.mappingStruct(n, out) + case reflect.Slice: + return d.mappingSlice(n, out) + case reflect.Map: + // okay + case reflect.Interface: + if d.mapType.Kind() == reflect.Map { + iface := out + out = reflect.MakeMap(d.mapType) + iface.Set(out) + } else { + slicev := reflect.New(d.mapType).Elem() + if !d.mappingSlice(n, slicev) { + return false + } + out.Set(slicev) + return true + } + default: + d.terror(n, yaml_MAP_TAG, out) + return false + } + outt := out.Type() + kt := outt.Key() + et := outt.Elem() + + mapType := d.mapType + if outt.Key() == ifaceType && outt.Elem() == ifaceType { + d.mapType = outt + } + + if out.IsNil() { + out.Set(reflect.MakeMap(outt)) + } + l := len(n.children) + for i := 0; i < l; i += 2 { + if isMerge(n.children[i]) { + d.merge(n.children[i+1], out) + continue + } + k := reflect.New(kt).Elem() + if d.unmarshal(n.children[i], k) { + kkind := k.Kind() + if kkind == reflect.Interface { + kkind = k.Elem().Kind() + } + if kkind == reflect.Map || kkind == reflect.Slice { + failf("invalid map key: %#v", k.Interface()) + } + e := reflect.New(et).Elem() + if d.unmarshal(n.children[i+1], e) { + out.SetMapIndex(k, e) + } + } + } + d.mapType = mapType + return true +} + +func (d *decoder) mappingSlice(n *node, out reflect.Value) (good bool) { + outt := out.Type() + if outt.Elem() != mapItemType { + d.terror(n, yaml_MAP_TAG, out) + return false + } + + mapType := d.mapType + d.mapType = outt + + var slice []MapItem + var l = len(n.children) + for i := 0; i < l; i += 2 { + if isMerge(n.children[i]) { + d.merge(n.children[i+1], out) + continue + } + item := MapItem{} + k := reflect.ValueOf(&item.Key).Elem() + if d.unmarshal(n.children[i], k) { + v := reflect.ValueOf(&item.Value).Elem() + if d.unmarshal(n.children[i+1], v) { + slice = append(slice, item) + } + } + } + out.Set(reflect.ValueOf(slice)) + d.mapType = mapType + return true +} + +func (d *decoder) mappingStruct(n *node, out reflect.Value) (good bool) { + sinfo, err := getStructInfo(out.Type()) + if err != nil { + panic(err) + } + name := settableValueOf("") + l := len(n.children) + + var inlineMap reflect.Value + var elemType reflect.Type + if sinfo.InlineMap != -1 { + inlineMap = out.Field(sinfo.InlineMap) + inlineMap.Set(reflect.New(inlineMap.Type()).Elem()) + elemType = inlineMap.Type().Elem() + } + + for i := 0; i < l; i += 2 { + ni := n.children[i] + if isMerge(ni) { + d.merge(n.children[i+1], out) + continue + } + if !d.unmarshal(ni, name) { + continue + } + if info, ok := sinfo.FieldsMap[name.String()]; ok { + var field reflect.Value + if info.Inline == nil { + field = out.Field(info.Num) + } else { + field = out.FieldByIndex(info.Inline) + } + d.unmarshal(n.children[i+1], field) + } else if sinfo.InlineMap != -1 { + if inlineMap.IsNil() { + inlineMap.Set(reflect.MakeMap(inlineMap.Type())) + } + value := reflect.New(elemType).Elem() + d.unmarshal(n.children[i+1], value) + inlineMap.SetMapIndex(name, value) + } + } + return true +} + +func failWantMap() { + failf("map merge requires map or sequence of maps as the value") +} + +func (d *decoder) merge(n *node, out reflect.Value) { + switch n.kind { + case mappingNode: + d.unmarshal(n, out) + case aliasNode: + an, ok := d.doc.anchors[n.value] + if ok && an.kind != mappingNode { + failWantMap() + } + d.unmarshal(n, out) + case sequenceNode: + // Step backwards as earlier nodes take precedence. + for i := len(n.children) - 1; i >= 0; i-- { + ni := n.children[i] + if ni.kind == aliasNode { + an, ok := d.doc.anchors[ni.value] + if ok && an.kind != mappingNode { + failWantMap() + } + } else if ni.kind != mappingNode { + failWantMap() + } + d.unmarshal(ni, out) + } + default: + failWantMap() + } +} + +func isMerge(n *node) bool { + return n.kind == scalarNode && n.value == "<<" && (n.implicit == true || n.tag == yaml_MERGE_TAG) +} diff --git a/vendor/gopkg.in/yaml.v2/decode_test.go b/vendor/gopkg.in/yaml.v2/decode_test.go new file mode 100644 index 000000000..3da6fadf8 --- /dev/null +++ b/vendor/gopkg.in/yaml.v2/decode_test.go @@ -0,0 +1,989 @@ +package yaml_test + +import ( + "errors" + . "gopkg.in/check.v1" + "gopkg.in/yaml.v2" + "math" + "net" + "reflect" + "strings" + "time" +) + +var unmarshalIntTest = 123 + +var unmarshalTests = []struct { + data string + value interface{} +}{ + { + "", + &struct{}{}, + }, { + "{}", &struct{}{}, + }, { + "v: hi", + map[string]string{"v": "hi"}, + }, { + "v: hi", map[string]interface{}{"v": "hi"}, + }, { + "v: true", + map[string]string{"v": "true"}, + }, { + "v: true", + map[string]interface{}{"v": true}, + }, { + "v: 10", + map[string]interface{}{"v": 10}, + }, { + "v: 0b10", + map[string]interface{}{"v": 2}, + }, { + "v: 0xA", + map[string]interface{}{"v": 10}, + }, { + "v: 4294967296", + map[string]int64{"v": 4294967296}, + }, { + "v: 0.1", + map[string]interface{}{"v": 0.1}, + }, { + "v: .1", + map[string]interface{}{"v": 0.1}, + }, { + "v: .Inf", + map[string]interface{}{"v": math.Inf(+1)}, + }, { + "v: -.Inf", + map[string]interface{}{"v": math.Inf(-1)}, + }, { + "v: -10", + map[string]interface{}{"v": -10}, + }, { + "v: -.1", + map[string]interface{}{"v": -0.1}, + }, + + // Simple values. + { + "123", + &unmarshalIntTest, + }, + + // Floats from spec + { + "canonical: 6.8523e+5", + map[string]interface{}{"canonical": 6.8523e+5}, + }, { + "expo: 685.230_15e+03", + map[string]interface{}{"expo": 685.23015e+03}, + }, { + "fixed: 685_230.15", + map[string]interface{}{"fixed": 685230.15}, + }, { + "neginf: -.inf", + map[string]interface{}{"neginf": math.Inf(-1)}, + }, { + "fixed: 685_230.15", + map[string]float64{"fixed": 685230.15}, + }, + //{"sexa: 190:20:30.15", map[string]interface{}{"sexa": 0}}, // Unsupported + //{"notanum: .NaN", map[string]interface{}{"notanum": math.NaN()}}, // Equality of NaN fails. + + // Bools from spec + { + "canonical: y", + map[string]interface{}{"canonical": true}, + }, { + "answer: NO", + map[string]interface{}{"answer": false}, + }, { + "logical: True", + map[string]interface{}{"logical": true}, + }, { + "option: on", + map[string]interface{}{"option": true}, + }, { + "option: on", + map[string]bool{"option": true}, + }, + // Ints from spec + { + "canonical: 685230", + map[string]interface{}{"canonical": 685230}, + }, { + "decimal: +685_230", + map[string]interface{}{"decimal": 685230}, + }, { + "octal: 02472256", + map[string]interface{}{"octal": 685230}, + }, { + "hexa: 0x_0A_74_AE", + map[string]interface{}{"hexa": 685230}, + }, { + "bin: 0b1010_0111_0100_1010_1110", + map[string]interface{}{"bin": 685230}, + }, { + "bin: -0b101010", + map[string]interface{}{"bin": -42}, + }, { + "decimal: +685_230", + map[string]int{"decimal": 685230}, + }, + + //{"sexa: 190:20:30", map[string]interface{}{"sexa": 0}}, // Unsupported + + // Nulls from spec + { + "empty:", + map[string]interface{}{"empty": nil}, + }, { + "canonical: ~", + map[string]interface{}{"canonical": nil}, + }, { + "english: null", + map[string]interface{}{"english": nil}, + }, { + "~: null key", + map[interface{}]string{nil: "null key"}, + }, { + "empty:", + map[string]*bool{"empty": nil}, + }, + + // Flow sequence + { + "seq: [A,B]", + map[string]interface{}{"seq": []interface{}{"A", "B"}}, + }, { + "seq: [A,B,C,]", + map[string][]string{"seq": []string{"A", "B", "C"}}, + }, { + "seq: [A,1,C]", + map[string][]string{"seq": []string{"A", "1", "C"}}, + }, { + "seq: [A,1,C]", + map[string][]int{"seq": []int{1}}, + }, { + "seq: [A,1,C]", + map[string]interface{}{"seq": []interface{}{"A", 1, "C"}}, + }, + // Block sequence + { + "seq:\n - A\n - B", + map[string]interface{}{"seq": []interface{}{"A", "B"}}, + }, { + "seq:\n - A\n - B\n - C", + map[string][]string{"seq": []string{"A", "B", "C"}}, + }, { + "seq:\n - A\n - 1\n - C", + map[string][]string{"seq": []string{"A", "1", "C"}}, + }, { + "seq:\n - A\n - 1\n - C", + map[string][]int{"seq": []int{1}}, + }, { + "seq:\n - A\n - 1\n - C", + map[string]interface{}{"seq": []interface{}{"A", 1, "C"}}, + }, + + // Literal block scalar + { + "scalar: | # Comment\n\n literal\n\n \ttext\n\n", + map[string]string{"scalar": "\nliteral\n\n\ttext\n"}, + }, + + // Folded block scalar + { + "scalar: > # Comment\n\n folded\n line\n \n next\n line\n * one\n * two\n\n last\n line\n\n", + map[string]string{"scalar": "\nfolded line\nnext line\n * one\n * two\n\nlast line\n"}, + }, + + // Map inside interface with no type hints. + { + "a: {b: c}", + map[interface{}]interface{}{"a": map[interface{}]interface{}{"b": "c"}}, + }, + + // Structs and type conversions. + { + "hello: world", + &struct{ Hello string }{"world"}, + }, { + "a: {b: c}", + &struct{ A struct{ B string } }{struct{ B string }{"c"}}, + }, { + "a: {b: c}", + &struct{ A *struct{ B string } }{&struct{ B string }{"c"}}, + }, { + "a: {b: c}", + &struct{ A map[string]string }{map[string]string{"b": "c"}}, + }, { + "a: {b: c}", + &struct{ A *map[string]string }{&map[string]string{"b": "c"}}, + }, { + "a:", + &struct{ A map[string]string }{}, + }, { + "a: 1", + &struct{ A int }{1}, + }, { + "a: 1", + &struct{ A float64 }{1}, + }, { + "a: 1.0", + &struct{ A int }{1}, + }, { + "a: 1.0", + &struct{ A uint }{1}, + }, { + "a: [1, 2]", + &struct{ A []int }{[]int{1, 2}}, + }, { + "a: 1", + &struct{ B int }{0}, + }, { + "a: 1", + &struct { + B int "a" + }{1}, + }, { + "a: y", + &struct{ A bool }{true}, + }, + + // Some cross type conversions + { + "v: 42", + map[string]uint{"v": 42}, + }, { + "v: -42", + map[string]uint{}, + }, { + "v: 4294967296", + map[string]uint64{"v": 4294967296}, + }, { + "v: -4294967296", + map[string]uint64{}, + }, + + // int + { + "int_max: 2147483647", + map[string]int{"int_max": math.MaxInt32}, + }, + { + "int_min: -2147483648", + map[string]int{"int_min": math.MinInt32}, + }, + { + "int_overflow: 9223372036854775808", // math.MaxInt64 + 1 + map[string]int{}, + }, + + // int64 + { + "int64_max: 9223372036854775807", + map[string]int64{"int64_max": math.MaxInt64}, + }, + { + "int64_max_base2: 0b111111111111111111111111111111111111111111111111111111111111111", + map[string]int64{"int64_max_base2": math.MaxInt64}, + }, + { + "int64_min: -9223372036854775808", + map[string]int64{"int64_min": math.MinInt64}, + }, + { + "int64_neg_base2: -0b111111111111111111111111111111111111111111111111111111111111111", + map[string]int64{"int64_neg_base2": -math.MaxInt64}, + }, + { + "int64_overflow: 9223372036854775808", // math.MaxInt64 + 1 + map[string]int64{}, + }, + + // uint + { + "uint_min: 0", + map[string]uint{"uint_min": 0}, + }, + { + "uint_max: 4294967295", + map[string]uint{"uint_max": math.MaxUint32}, + }, + { + "uint_underflow: -1", + map[string]uint{}, + }, + + // uint64 + { + "uint64_min: 0", + map[string]uint{"uint64_min": 0}, + }, + { + "uint64_max: 18446744073709551615", + map[string]uint64{"uint64_max": math.MaxUint64}, + }, + { + "uint64_max_base2: 0b1111111111111111111111111111111111111111111111111111111111111111", + map[string]uint64{"uint64_max_base2": math.MaxUint64}, + }, + { + "uint64_maxint64: 9223372036854775807", + map[string]uint64{"uint64_maxint64": math.MaxInt64}, + }, + { + "uint64_underflow: -1", + map[string]uint64{}, + }, + + // float32 + { + "float32_max: 3.40282346638528859811704183484516925440e+38", + map[string]float32{"float32_max": math.MaxFloat32}, + }, + { + "float32_nonzero: 1.401298464324817070923729583289916131280e-45", + map[string]float32{"float32_nonzero": math.SmallestNonzeroFloat32}, + }, + { + "float32_maxuint64: 18446744073709551615", + map[string]float32{"float32_maxuint64": float32(math.MaxUint64)}, + }, + { + "float32_maxuint64+1: 18446744073709551616", + map[string]float32{"float32_maxuint64+1": float32(math.MaxUint64 + 1)}, + }, + + // float64 + { + "float64_max: 1.797693134862315708145274237317043567981e+308", + map[string]float64{"float64_max": math.MaxFloat64}, + }, + { + "float64_nonzero: 4.940656458412465441765687928682213723651e-324", + map[string]float64{"float64_nonzero": math.SmallestNonzeroFloat64}, + }, + { + "float64_maxuint64: 18446744073709551615", + map[string]float64{"float64_maxuint64": float64(math.MaxUint64)}, + }, + { + "float64_maxuint64+1: 18446744073709551616", + map[string]float64{"float64_maxuint64+1": float64(math.MaxUint64 + 1)}, + }, + + // Overflow cases. + { + "v: 4294967297", + map[string]int32{}, + }, { + "v: 128", + map[string]int8{}, + }, + + // Quoted values. + { + "'1': '\"2\"'", + map[interface{}]interface{}{"1": "\"2\""}, + }, { + "v:\n- A\n- 'B\n\n C'\n", + map[string][]string{"v": []string{"A", "B\nC"}}, + }, + + // Explicit tags. + { + "v: !!float '1.1'", + map[string]interface{}{"v": 1.1}, + }, { + "v: !!null ''", + map[string]interface{}{"v": nil}, + }, { + "%TAG !y! tag:yaml.org,2002:\n---\nv: !y!int '1'", + map[string]interface{}{"v": 1}, + }, + + // Anchors and aliases. + { + "a: &x 1\nb: &y 2\nc: *x\nd: *y\n", + &struct{ A, B, C, D int }{1, 2, 1, 2}, + }, { + "a: &a {c: 1}\nb: *a", + &struct { + A, B struct { + C int + } + }{struct{ C int }{1}, struct{ C int }{1}}, + }, { + "a: &a [1, 2]\nb: *a", + &struct{ B []int }{[]int{1, 2}}, + }, { + "b: *a\na: &a {c: 1}", + &struct { + A, B struct { + C int + } + }{struct{ C int }{1}, struct{ C int }{1}}, + }, + + // Bug #1133337 + { + "foo: ''", + map[string]*string{"foo": new(string)}, + }, { + "foo: null", + map[string]string{"foo": ""}, + }, { + "foo: null", + map[string]interface{}{"foo": nil}, + }, + + // Ignored field + { + "a: 1\nb: 2\n", + &struct { + A int + B int "-" + }{1, 0}, + }, + + // Bug #1191981 + { + "" + + "%YAML 1.1\n" + + "--- !!str\n" + + `"Generic line break (no glyph)\n\` + "\n" + + ` Generic line break (glyphed)\n\` + "\n" + + ` Line separator\u2028\` + "\n" + + ` Paragraph separator\u2029"` + "\n", + "" + + "Generic line break (no glyph)\n" + + "Generic line break (glyphed)\n" + + "Line separator\u2028Paragraph separator\u2029", + }, + + // Struct inlining + { + "a: 1\nb: 2\nc: 3\n", + &struct { + A int + C inlineB `yaml:",inline"` + }{1, inlineB{2, inlineC{3}}}, + }, + + // Map inlining + { + "a: 1\nb: 2\nc: 3\n", + &struct { + A int + C map[string]int `yaml:",inline"` + }{1, map[string]int{"b": 2, "c": 3}}, + }, + + // bug 1243827 + { + "a: -b_c", + map[string]interface{}{"a": "-b_c"}, + }, + { + "a: +b_c", + map[string]interface{}{"a": "+b_c"}, + }, + { + "a: 50cent_of_dollar", + map[string]interface{}{"a": "50cent_of_dollar"}, + }, + + // Duration + { + "a: 3s", + map[string]time.Duration{"a": 3 * time.Second}, + }, + + // Issue #24. + { + "a: ", + map[string]string{"a": ""}, + }, + + // Base 60 floats are obsolete and unsupported. + { + "a: 1:1\n", + map[string]string{"a": "1:1"}, + }, + + // Binary data. + { + "a: !!binary gIGC\n", + map[string]string{"a": "\x80\x81\x82"}, + }, { + "a: !!binary |\n " + strings.Repeat("kJCQ", 17) + "kJ\n CQ\n", + map[string]string{"a": strings.Repeat("\x90", 54)}, + }, { + "a: !!binary |\n " + strings.Repeat("A", 70) + "\n ==\n", + map[string]string{"a": strings.Repeat("\x00", 52)}, + }, + + // Ordered maps. + { + "{b: 2, a: 1, d: 4, c: 3, sub: {e: 5}}", + &yaml.MapSlice{{"b", 2}, {"a", 1}, {"d", 4}, {"c", 3}, {"sub", yaml.MapSlice{{"e", 5}}}}, + }, + + // Issue #39. + { + "a:\n b:\n c: d\n", + map[string]struct{ B interface{} }{"a": {map[interface{}]interface{}{"c": "d"}}}, + }, + + // Custom map type. + { + "a: {b: c}", + M{"a": M{"b": "c"}}, + }, + + // Support encoding.TextUnmarshaler. + { + "a: 1.2.3.4\n", + map[string]net.IP{"a": net.IPv4(1, 2, 3, 4)}, + }, + { + "a: 2015-02-24T18:19:39Z\n", + map[string]time.Time{"a": time.Unix(1424801979, 0)}, + }, + + // Encode empty lists as zero-length slices. + { + "a: []", + &struct{ A []int }{[]int{}}, + }, + + // UTF-16-LE + { + "\xff\xfe\xf1\x00o\x00\xf1\x00o\x00:\x00 \x00v\x00e\x00r\x00y\x00 \x00y\x00e\x00s\x00\n\x00", + M{"ñoño": "very yes"}, + }, + // UTF-16-LE with surrogate. + { + "\xff\xfe\xf1\x00o\x00\xf1\x00o\x00:\x00 \x00v\x00e\x00r\x00y\x00 \x00y\x00e\x00s\x00 \x00=\xd8\xd4\xdf\n\x00", + M{"ñoño": "very yes 🟔"}, + }, + + // UTF-16-BE + { + "\xfe\xff\x00\xf1\x00o\x00\xf1\x00o\x00:\x00 \x00v\x00e\x00r\x00y\x00 \x00y\x00e\x00s\x00\n", + M{"ñoño": "very yes"}, + }, + // UTF-16-BE with surrogate. + { + "\xfe\xff\x00\xf1\x00o\x00\xf1\x00o\x00:\x00 \x00v\x00e\x00r\x00y\x00 \x00y\x00e\x00s\x00 \xd8=\xdf\xd4\x00\n", + M{"ñoño": "very yes 🟔"}, + }, +} + +type M map[interface{}]interface{} + +type inlineB struct { + B int + inlineC `yaml:",inline"` +} + +type inlineC struct { + C int +} + +func (s *S) TestUnmarshal(c *C) { + for _, item := range unmarshalTests { + t := reflect.ValueOf(item.value).Type() + var value interface{} + switch t.Kind() { + case reflect.Map: + value = reflect.MakeMap(t).Interface() + case reflect.String: + value = reflect.New(t).Interface() + case reflect.Ptr: + value = reflect.New(t.Elem()).Interface() + default: + c.Fatalf("missing case for %s", t) + } + err := yaml.Unmarshal([]byte(item.data), value) + if _, ok := err.(*yaml.TypeError); !ok { + c.Assert(err, IsNil) + } + if t.Kind() == reflect.String { + c.Assert(*value.(*string), Equals, item.value) + } else { + c.Assert(value, DeepEquals, item.value) + } + } +} + +func (s *S) TestUnmarshalNaN(c *C) { + value := map[string]interface{}{} + err := yaml.Unmarshal([]byte("notanum: .NaN"), &value) + c.Assert(err, IsNil) + c.Assert(math.IsNaN(value["notanum"].(float64)), Equals, true) +} + +var unmarshalErrorTests = []struct { + data, error string +}{ + {"v: !!float 'error'", "yaml: cannot decode !!str `error` as a !!float"}, + {"v: [A,", "yaml: line 1: did not find expected node content"}, + {"v:\n- [A,", "yaml: line 2: did not find expected node content"}, + {"a: *b\n", "yaml: unknown anchor 'b' referenced"}, + {"a: &a\n b: *a\n", "yaml: anchor 'a' value contains itself"}, + {"value: -", "yaml: block sequence entries are not allowed in this context"}, + {"a: !!binary ==", "yaml: !!binary value contains invalid base64 data"}, + {"{[.]}", `yaml: invalid map key: \[\]interface \{\}\{"\."\}`}, + {"{{.}}", `yaml: invalid map key: map\[interface\ \{\}\]interface \{\}\{".":interface \{\}\(nil\)\}`}, +} + +func (s *S) TestUnmarshalErrors(c *C) { + for _, item := range unmarshalErrorTests { + var value interface{} + err := yaml.Unmarshal([]byte(item.data), &value) + c.Assert(err, ErrorMatches, item.error, Commentf("Partial unmarshal: %#v", value)) + } +} + +var unmarshalerTests = []struct { + data, tag string + value interface{} +}{ + {"_: {hi: there}", "!!map", map[interface{}]interface{}{"hi": "there"}}, + {"_: [1,A]", "!!seq", []interface{}{1, "A"}}, + {"_: 10", "!!int", 10}, + {"_: null", "!!null", nil}, + {`_: BAR!`, "!!str", "BAR!"}, + {`_: "BAR!"`, "!!str", "BAR!"}, + {"_: !!foo 'BAR!'", "!!foo", "BAR!"}, + {`_: ""`, "!!str", ""}, +} + +var unmarshalerResult = map[int]error{} + +type unmarshalerType struct { + value interface{} +} + +func (o *unmarshalerType) UnmarshalYAML(unmarshal func(v interface{}) error) error { + if err := unmarshal(&o.value); err != nil { + return err + } + if i, ok := o.value.(int); ok { + if result, ok := unmarshalerResult[i]; ok { + return result + } + } + return nil +} + +type unmarshalerPointer struct { + Field *unmarshalerType "_" +} + +type unmarshalerValue struct { + Field unmarshalerType "_" +} + +func (s *S) TestUnmarshalerPointerField(c *C) { + for _, item := range unmarshalerTests { + obj := &unmarshalerPointer{} + err := yaml.Unmarshal([]byte(item.data), obj) + c.Assert(err, IsNil) + if item.value == nil { + c.Assert(obj.Field, IsNil) + } else { + c.Assert(obj.Field, NotNil, Commentf("Pointer not initialized (%#v)", item.value)) + c.Assert(obj.Field.value, DeepEquals, item.value) + } + } +} + +func (s *S) TestUnmarshalerValueField(c *C) { + for _, item := range unmarshalerTests { + obj := &unmarshalerValue{} + err := yaml.Unmarshal([]byte(item.data), obj) + c.Assert(err, IsNil) + c.Assert(obj.Field, NotNil, Commentf("Pointer not initialized (%#v)", item.value)) + c.Assert(obj.Field.value, DeepEquals, item.value) + } +} + +func (s *S) TestUnmarshalerWholeDocument(c *C) { + obj := &unmarshalerType{} + err := yaml.Unmarshal([]byte(unmarshalerTests[0].data), obj) + c.Assert(err, IsNil) + value, ok := obj.value.(map[interface{}]interface{}) + c.Assert(ok, Equals, true, Commentf("value: %#v", obj.value)) + c.Assert(value["_"], DeepEquals, unmarshalerTests[0].value) +} + +func (s *S) TestUnmarshalerTypeError(c *C) { + unmarshalerResult[2] = &yaml.TypeError{[]string{"foo"}} + unmarshalerResult[4] = &yaml.TypeError{[]string{"bar"}} + defer func() { + delete(unmarshalerResult, 2) + delete(unmarshalerResult, 4) + }() + + type T struct { + Before int + After int + M map[string]*unmarshalerType + } + var v T + data := `{before: A, m: {abc: 1, def: 2, ghi: 3, jkl: 4}, after: B}` + err := yaml.Unmarshal([]byte(data), &v) + c.Assert(err, ErrorMatches, ""+ + "yaml: unmarshal errors:\n"+ + " line 1: cannot unmarshal !!str `A` into int\n"+ + " foo\n"+ + " bar\n"+ + " line 1: cannot unmarshal !!str `B` into int") + c.Assert(v.M["abc"], NotNil) + c.Assert(v.M["def"], IsNil) + c.Assert(v.M["ghi"], NotNil) + c.Assert(v.M["jkl"], IsNil) + + c.Assert(v.M["abc"].value, Equals, 1) + c.Assert(v.M["ghi"].value, Equals, 3) +} + +type proxyTypeError struct{} + +func (v *proxyTypeError) UnmarshalYAML(unmarshal func(interface{}) error) error { + var s string + var a int32 + var b int64 + if err := unmarshal(&s); err != nil { + panic(err) + } + if s == "a" { + if err := unmarshal(&b); err == nil { + panic("should have failed") + } + return unmarshal(&a) + } + if err := unmarshal(&a); err == nil { + panic("should have failed") + } + return unmarshal(&b) +} + +func (s *S) TestUnmarshalerTypeErrorProxying(c *C) { + type T struct { + Before int + After int + M map[string]*proxyTypeError + } + var v T + data := `{before: A, m: {abc: a, def: b}, after: B}` + err := yaml.Unmarshal([]byte(data), &v) + c.Assert(err, ErrorMatches, ""+ + "yaml: unmarshal errors:\n"+ + " line 1: cannot unmarshal !!str `A` into int\n"+ + " line 1: cannot unmarshal !!str `a` into int32\n"+ + " line 1: cannot unmarshal !!str `b` into int64\n"+ + " line 1: cannot unmarshal !!str `B` into int") +} + +type failingUnmarshaler struct{} + +var failingErr = errors.New("failingErr") + +func (ft *failingUnmarshaler) UnmarshalYAML(unmarshal func(interface{}) error) error { + return failingErr +} + +func (s *S) TestUnmarshalerError(c *C) { + err := yaml.Unmarshal([]byte("a: b"), &failingUnmarshaler{}) + c.Assert(err, Equals, failingErr) +} + +type sliceUnmarshaler []int + +func (su *sliceUnmarshaler) UnmarshalYAML(unmarshal func(interface{}) error) error { + var slice []int + err := unmarshal(&slice) + if err == nil { + *su = slice + return nil + } + + var intVal int + err = unmarshal(&intVal) + if err == nil { + *su = []int{intVal} + return nil + } + + return err +} + +func (s *S) TestUnmarshalerRetry(c *C) { + var su sliceUnmarshaler + err := yaml.Unmarshal([]byte("[1, 2, 3]"), &su) + c.Assert(err, IsNil) + c.Assert(su, DeepEquals, sliceUnmarshaler([]int{1, 2, 3})) + + err = yaml.Unmarshal([]byte("1"), &su) + c.Assert(err, IsNil) + c.Assert(su, DeepEquals, sliceUnmarshaler([]int{1})) +} + +// From http://yaml.org/type/merge.html +var mergeTests = ` +anchors: + list: + - &CENTER { "x": 1, "y": 2 } + - &LEFT { "x": 0, "y": 2 } + - &BIG { "r": 10 } + - &SMALL { "r": 1 } + +# All the following maps are equal: + +plain: + # Explicit keys + "x": 1 + "y": 2 + "r": 10 + label: center/big + +mergeOne: + # Merge one map + << : *CENTER + "r": 10 + label: center/big + +mergeMultiple: + # Merge multiple maps + << : [ *CENTER, *BIG ] + label: center/big + +override: + # Override + << : [ *BIG, *LEFT, *SMALL ] + "x": 1 + label: center/big + +shortTag: + # Explicit short merge tag + !!merge "<<" : [ *CENTER, *BIG ] + label: center/big + +longTag: + # Explicit merge long tag + ! "<<" : [ *CENTER, *BIG ] + label: center/big + +inlineMap: + # Inlined map + << : {"x": 1, "y": 2, "r": 10} + label: center/big + +inlineSequenceMap: + # Inlined map in sequence + << : [ *CENTER, {"r": 10} ] + label: center/big +` + +func (s *S) TestMerge(c *C) { + var want = map[interface{}]interface{}{ + "x": 1, + "y": 2, + "r": 10, + "label": "center/big", + } + + var m map[interface{}]interface{} + err := yaml.Unmarshal([]byte(mergeTests), &m) + c.Assert(err, IsNil) + for name, test := range m { + if name == "anchors" { + continue + } + c.Assert(test, DeepEquals, want, Commentf("test %q failed", name)) + } +} + +func (s *S) TestMergeStruct(c *C) { + type Data struct { + X, Y, R int + Label string + } + want := Data{1, 2, 10, "center/big"} + + var m map[string]Data + err := yaml.Unmarshal([]byte(mergeTests), &m) + c.Assert(err, IsNil) + for name, test := range m { + if name == "anchors" { + continue + } + c.Assert(test, Equals, want, Commentf("test %q failed", name)) + } +} + +var unmarshalNullTests = []func() interface{}{ + func() interface{} { var v interface{}; v = "v"; return &v }, + func() interface{} { var s = "s"; return &s }, + func() interface{} { var s = "s"; sptr := &s; return &sptr }, + func() interface{} { var i = 1; return &i }, + func() interface{} { var i = 1; iptr := &i; return &iptr }, + func() interface{} { m := map[string]int{"s": 1}; return &m }, + func() interface{} { m := map[string]int{"s": 1}; return m }, +} + +func (s *S) TestUnmarshalNull(c *C) { + for _, test := range unmarshalNullTests { + item := test() + zero := reflect.Zero(reflect.TypeOf(item).Elem()).Interface() + err := yaml.Unmarshal([]byte("null"), item) + c.Assert(err, IsNil) + if reflect.TypeOf(item).Kind() == reflect.Map { + c.Assert(reflect.ValueOf(item).Interface(), DeepEquals, reflect.MakeMap(reflect.TypeOf(item)).Interface()) + } else { + c.Assert(reflect.ValueOf(item).Elem().Interface(), DeepEquals, zero) + } + } +} + +func (s *S) TestUnmarshalSliceOnPreset(c *C) { + // Issue #48. + v := struct{ A []int }{[]int{1}} + yaml.Unmarshal([]byte("a: [2]"), &v) + c.Assert(v.A, DeepEquals, []int{2}) +} + +//var data []byte +//func init() { +// var err error +// data, err = ioutil.ReadFile("/tmp/file.yaml") +// if err != nil { +// panic(err) +// } +//} +// +//func (s *S) BenchmarkUnmarshal(c *C) { +// var err error +// for i := 0; i < c.N; i++ { +// var v map[string]interface{} +// err = yaml.Unmarshal(data, &v) +// } +// if err != nil { +// panic(err) +// } +//} +// +//func (s *S) BenchmarkMarshal(c *C) { +// var v map[string]interface{} +// yaml.Unmarshal(data, &v) +// c.ResetTimer() +// for i := 0; i < c.N; i++ { +// yaml.Marshal(&v) +// } +//} diff --git a/vendor/gopkg.in/yaml.v2/emitterc.go b/vendor/gopkg.in/yaml.v2/emitterc.go new file mode 100644 index 000000000..2befd553e --- /dev/null +++ b/vendor/gopkg.in/yaml.v2/emitterc.go @@ -0,0 +1,1685 @@ +package yaml + +import ( + "bytes" +) + +// Flush the buffer if needed. +func flush(emitter *yaml_emitter_t) bool { + if emitter.buffer_pos+5 >= len(emitter.buffer) { + return yaml_emitter_flush(emitter) + } + return true +} + +// Put a character to the output buffer. +func put(emitter *yaml_emitter_t, value byte) bool { + if emitter.buffer_pos+5 >= len(emitter.buffer) && !yaml_emitter_flush(emitter) { + return false + } + emitter.buffer[emitter.buffer_pos] = value + emitter.buffer_pos++ + emitter.column++ + return true +} + +// Put a line break to the output buffer. +func put_break(emitter *yaml_emitter_t) bool { + if emitter.buffer_pos+5 >= len(emitter.buffer) && !yaml_emitter_flush(emitter) { + return false + } + switch emitter.line_break { + case yaml_CR_BREAK: + emitter.buffer[emitter.buffer_pos] = '\r' + emitter.buffer_pos += 1 + case yaml_LN_BREAK: + emitter.buffer[emitter.buffer_pos] = '\n' + emitter.buffer_pos += 1 + case yaml_CRLN_BREAK: + emitter.buffer[emitter.buffer_pos+0] = '\r' + emitter.buffer[emitter.buffer_pos+1] = '\n' + emitter.buffer_pos += 2 + default: + panic("unknown line break setting") + } + emitter.column = 0 + emitter.line++ + return true +} + +// Copy a character from a string into buffer. +func write(emitter *yaml_emitter_t, s []byte, i *int) bool { + if emitter.buffer_pos+5 >= len(emitter.buffer) && !yaml_emitter_flush(emitter) { + return false + } + p := emitter.buffer_pos + w := width(s[*i]) + switch w { + case 4: + emitter.buffer[p+3] = s[*i+3] + fallthrough + case 3: + emitter.buffer[p+2] = s[*i+2] + fallthrough + case 2: + emitter.buffer[p+1] = s[*i+1] + fallthrough + case 1: + emitter.buffer[p+0] = s[*i+0] + default: + panic("unknown character width") + } + emitter.column++ + emitter.buffer_pos += w + *i += w + return true +} + +// Write a whole string into buffer. +func write_all(emitter *yaml_emitter_t, s []byte) bool { + for i := 0; i < len(s); { + if !write(emitter, s, &i) { + return false + } + } + return true +} + +// Copy a line break character from a string into buffer. +func write_break(emitter *yaml_emitter_t, s []byte, i *int) bool { + if s[*i] == '\n' { + if !put_break(emitter) { + return false + } + *i++ + } else { + if !write(emitter, s, i) { + return false + } + emitter.column = 0 + emitter.line++ + } + return true +} + +// Set an emitter error and return false. +func yaml_emitter_set_emitter_error(emitter *yaml_emitter_t, problem string) bool { + emitter.error = yaml_EMITTER_ERROR + emitter.problem = problem + return false +} + +// Emit an event. +func yaml_emitter_emit(emitter *yaml_emitter_t, event *yaml_event_t) bool { + emitter.events = append(emitter.events, *event) + for !yaml_emitter_need_more_events(emitter) { + event := &emitter.events[emitter.events_head] + if !yaml_emitter_analyze_event(emitter, event) { + return false + } + if !yaml_emitter_state_machine(emitter, event) { + return false + } + yaml_event_delete(event) + emitter.events_head++ + } + return true +} + +// Check if we need to accumulate more events before emitting. +// +// We accumulate extra +// - 1 event for DOCUMENT-START +// - 2 events for SEQUENCE-START +// - 3 events for MAPPING-START +// +func yaml_emitter_need_more_events(emitter *yaml_emitter_t) bool { + if emitter.events_head == len(emitter.events) { + return true + } + var accumulate int + switch emitter.events[emitter.events_head].typ { + case yaml_DOCUMENT_START_EVENT: + accumulate = 1 + break + case yaml_SEQUENCE_START_EVENT: + accumulate = 2 + break + case yaml_MAPPING_START_EVENT: + accumulate = 3 + break + default: + return false + } + if len(emitter.events)-emitter.events_head > accumulate { + return false + } + var level int + for i := emitter.events_head; i < len(emitter.events); i++ { + switch emitter.events[i].typ { + case yaml_STREAM_START_EVENT, yaml_DOCUMENT_START_EVENT, yaml_SEQUENCE_START_EVENT, yaml_MAPPING_START_EVENT: + level++ + case yaml_STREAM_END_EVENT, yaml_DOCUMENT_END_EVENT, yaml_SEQUENCE_END_EVENT, yaml_MAPPING_END_EVENT: + level-- + } + if level == 0 { + return false + } + } + return true +} + +// Append a directive to the directives stack. +func yaml_emitter_append_tag_directive(emitter *yaml_emitter_t, value *yaml_tag_directive_t, allow_duplicates bool) bool { + for i := 0; i < len(emitter.tag_directives); i++ { + if bytes.Equal(value.handle, emitter.tag_directives[i].handle) { + if allow_duplicates { + return true + } + return yaml_emitter_set_emitter_error(emitter, "duplicate %TAG directive") + } + } + + // [Go] Do we actually need to copy this given garbage collection + // and the lack of deallocating destructors? + tag_copy := yaml_tag_directive_t{ + handle: make([]byte, len(value.handle)), + prefix: make([]byte, len(value.prefix)), + } + copy(tag_copy.handle, value.handle) + copy(tag_copy.prefix, value.prefix) + emitter.tag_directives = append(emitter.tag_directives, tag_copy) + return true +} + +// Increase the indentation level. +func yaml_emitter_increase_indent(emitter *yaml_emitter_t, flow, indentless bool) bool { + emitter.indents = append(emitter.indents, emitter.indent) + if emitter.indent < 0 { + if flow { + emitter.indent = emitter.best_indent + } else { + emitter.indent = 0 + } + } else if !indentless { + emitter.indent += emitter.best_indent + } + return true +} + +// State dispatcher. +func yaml_emitter_state_machine(emitter *yaml_emitter_t, event *yaml_event_t) bool { + switch emitter.state { + default: + case yaml_EMIT_STREAM_START_STATE: + return yaml_emitter_emit_stream_start(emitter, event) + + case yaml_EMIT_FIRST_DOCUMENT_START_STATE: + return yaml_emitter_emit_document_start(emitter, event, true) + + case yaml_EMIT_DOCUMENT_START_STATE: + return yaml_emitter_emit_document_start(emitter, event, false) + + case yaml_EMIT_DOCUMENT_CONTENT_STATE: + return yaml_emitter_emit_document_content(emitter, event) + + case yaml_EMIT_DOCUMENT_END_STATE: + return yaml_emitter_emit_document_end(emitter, event) + + case yaml_EMIT_FLOW_SEQUENCE_FIRST_ITEM_STATE: + return yaml_emitter_emit_flow_sequence_item(emitter, event, true) + + case yaml_EMIT_FLOW_SEQUENCE_ITEM_STATE: + return yaml_emitter_emit_flow_sequence_item(emitter, event, false) + + case yaml_EMIT_FLOW_MAPPING_FIRST_KEY_STATE: + return yaml_emitter_emit_flow_mapping_key(emitter, event, true) + + case yaml_EMIT_FLOW_MAPPING_KEY_STATE: + return yaml_emitter_emit_flow_mapping_key(emitter, event, false) + + case yaml_EMIT_FLOW_MAPPING_SIMPLE_VALUE_STATE: + return yaml_emitter_emit_flow_mapping_value(emitter, event, true) + + case yaml_EMIT_FLOW_MAPPING_VALUE_STATE: + return yaml_emitter_emit_flow_mapping_value(emitter, event, false) + + case yaml_EMIT_BLOCK_SEQUENCE_FIRST_ITEM_STATE: + return yaml_emitter_emit_block_sequence_item(emitter, event, true) + + case yaml_EMIT_BLOCK_SEQUENCE_ITEM_STATE: + return yaml_emitter_emit_block_sequence_item(emitter, event, false) + + case yaml_EMIT_BLOCK_MAPPING_FIRST_KEY_STATE: + return yaml_emitter_emit_block_mapping_key(emitter, event, true) + + case yaml_EMIT_BLOCK_MAPPING_KEY_STATE: + return yaml_emitter_emit_block_mapping_key(emitter, event, false) + + case yaml_EMIT_BLOCK_MAPPING_SIMPLE_VALUE_STATE: + return yaml_emitter_emit_block_mapping_value(emitter, event, true) + + case yaml_EMIT_BLOCK_MAPPING_VALUE_STATE: + return yaml_emitter_emit_block_mapping_value(emitter, event, false) + + case yaml_EMIT_END_STATE: + return yaml_emitter_set_emitter_error(emitter, "expected nothing after STREAM-END") + } + panic("invalid emitter state") +} + +// Expect STREAM-START. +func yaml_emitter_emit_stream_start(emitter *yaml_emitter_t, event *yaml_event_t) bool { + if event.typ != yaml_STREAM_START_EVENT { + return yaml_emitter_set_emitter_error(emitter, "expected STREAM-START") + } + if emitter.encoding == yaml_ANY_ENCODING { + emitter.encoding = event.encoding + if emitter.encoding == yaml_ANY_ENCODING { + emitter.encoding = yaml_UTF8_ENCODING + } + } + if emitter.best_indent < 2 || emitter.best_indent > 9 { + emitter.best_indent = 2 + } + if emitter.best_width >= 0 && emitter.best_width <= emitter.best_indent*2 { + emitter.best_width = 80 + } + if emitter.best_width < 0 { + emitter.best_width = 1<<31 - 1 + } + if emitter.line_break == yaml_ANY_BREAK { + emitter.line_break = yaml_LN_BREAK + } + + emitter.indent = -1 + emitter.line = 0 + emitter.column = 0 + emitter.whitespace = true + emitter.indention = true + + if emitter.encoding != yaml_UTF8_ENCODING { + if !yaml_emitter_write_bom(emitter) { + return false + } + } + emitter.state = yaml_EMIT_FIRST_DOCUMENT_START_STATE + return true +} + +// Expect DOCUMENT-START or STREAM-END. +func yaml_emitter_emit_document_start(emitter *yaml_emitter_t, event *yaml_event_t, first bool) bool { + + if event.typ == yaml_DOCUMENT_START_EVENT { + + if event.version_directive != nil { + if !yaml_emitter_analyze_version_directive(emitter, event.version_directive) { + return false + } + } + + for i := 0; i < len(event.tag_directives); i++ { + tag_directive := &event.tag_directives[i] + if !yaml_emitter_analyze_tag_directive(emitter, tag_directive) { + return false + } + if !yaml_emitter_append_tag_directive(emitter, tag_directive, false) { + return false + } + } + + for i := 0; i < len(default_tag_directives); i++ { + tag_directive := &default_tag_directives[i] + if !yaml_emitter_append_tag_directive(emitter, tag_directive, true) { + return false + } + } + + implicit := event.implicit + if !first || emitter.canonical { + implicit = false + } + + if emitter.open_ended && (event.version_directive != nil || len(event.tag_directives) > 0) { + if !yaml_emitter_write_indicator(emitter, []byte("..."), true, false, false) { + return false + } + if !yaml_emitter_write_indent(emitter) { + return false + } + } + + if event.version_directive != nil { + implicit = false + if !yaml_emitter_write_indicator(emitter, []byte("%YAML"), true, false, false) { + return false + } + if !yaml_emitter_write_indicator(emitter, []byte("1.1"), true, false, false) { + return false + } + if !yaml_emitter_write_indent(emitter) { + return false + } + } + + if len(event.tag_directives) > 0 { + implicit = false + for i := 0; i < len(event.tag_directives); i++ { + tag_directive := &event.tag_directives[i] + if !yaml_emitter_write_indicator(emitter, []byte("%TAG"), true, false, false) { + return false + } + if !yaml_emitter_write_tag_handle(emitter, tag_directive.handle) { + return false + } + if !yaml_emitter_write_tag_content(emitter, tag_directive.prefix, true) { + return false + } + if !yaml_emitter_write_indent(emitter) { + return false + } + } + } + + if yaml_emitter_check_empty_document(emitter) { + implicit = false + } + if !implicit { + if !yaml_emitter_write_indent(emitter) { + return false + } + if !yaml_emitter_write_indicator(emitter, []byte("---"), true, false, false) { + return false + } + if emitter.canonical { + if !yaml_emitter_write_indent(emitter) { + return false + } + } + } + + emitter.state = yaml_EMIT_DOCUMENT_CONTENT_STATE + return true + } + + if event.typ == yaml_STREAM_END_EVENT { + if emitter.open_ended { + if !yaml_emitter_write_indicator(emitter, []byte("..."), true, false, false) { + return false + } + if !yaml_emitter_write_indent(emitter) { + return false + } + } + if !yaml_emitter_flush(emitter) { + return false + } + emitter.state = yaml_EMIT_END_STATE + return true + } + + return yaml_emitter_set_emitter_error(emitter, "expected DOCUMENT-START or STREAM-END") +} + +// Expect the root node. +func yaml_emitter_emit_document_content(emitter *yaml_emitter_t, event *yaml_event_t) bool { + emitter.states = append(emitter.states, yaml_EMIT_DOCUMENT_END_STATE) + return yaml_emitter_emit_node(emitter, event, true, false, false, false) +} + +// Expect DOCUMENT-END. +func yaml_emitter_emit_document_end(emitter *yaml_emitter_t, event *yaml_event_t) bool { + if event.typ != yaml_DOCUMENT_END_EVENT { + return yaml_emitter_set_emitter_error(emitter, "expected DOCUMENT-END") + } + if !yaml_emitter_write_indent(emitter) { + return false + } + if !event.implicit { + // [Go] Allocate the slice elsewhere. + if !yaml_emitter_write_indicator(emitter, []byte("..."), true, false, false) { + return false + } + if !yaml_emitter_write_indent(emitter) { + return false + } + } + if !yaml_emitter_flush(emitter) { + return false + } + emitter.state = yaml_EMIT_DOCUMENT_START_STATE + emitter.tag_directives = emitter.tag_directives[:0] + return true +} + +// Expect a flow item node. +func yaml_emitter_emit_flow_sequence_item(emitter *yaml_emitter_t, event *yaml_event_t, first bool) bool { + if first { + if !yaml_emitter_write_indicator(emitter, []byte{'['}, true, true, false) { + return false + } + if !yaml_emitter_increase_indent(emitter, true, false) { + return false + } + emitter.flow_level++ + } + + if event.typ == yaml_SEQUENCE_END_EVENT { + emitter.flow_level-- + emitter.indent = emitter.indents[len(emitter.indents)-1] + emitter.indents = emitter.indents[:len(emitter.indents)-1] + if emitter.canonical && !first { + if !yaml_emitter_write_indicator(emitter, []byte{','}, false, false, false) { + return false + } + if !yaml_emitter_write_indent(emitter) { + return false + } + } + if !yaml_emitter_write_indicator(emitter, []byte{']'}, false, false, false) { + return false + } + emitter.state = emitter.states[len(emitter.states)-1] + emitter.states = emitter.states[:len(emitter.states)-1] + + return true + } + + if !first { + if !yaml_emitter_write_indicator(emitter, []byte{','}, false, false, false) { + return false + } + } + + if emitter.canonical || emitter.column > emitter.best_width { + if !yaml_emitter_write_indent(emitter) { + return false + } + } + emitter.states = append(emitter.states, yaml_EMIT_FLOW_SEQUENCE_ITEM_STATE) + return yaml_emitter_emit_node(emitter, event, false, true, false, false) +} + +// Expect a flow key node. +func yaml_emitter_emit_flow_mapping_key(emitter *yaml_emitter_t, event *yaml_event_t, first bool) bool { + if first { + if !yaml_emitter_write_indicator(emitter, []byte{'{'}, true, true, false) { + return false + } + if !yaml_emitter_increase_indent(emitter, true, false) { + return false + } + emitter.flow_level++ + } + + if event.typ == yaml_MAPPING_END_EVENT { + emitter.flow_level-- + emitter.indent = emitter.indents[len(emitter.indents)-1] + emitter.indents = emitter.indents[:len(emitter.indents)-1] + if emitter.canonical && !first { + if !yaml_emitter_write_indicator(emitter, []byte{','}, false, false, false) { + return false + } + if !yaml_emitter_write_indent(emitter) { + return false + } + } + if !yaml_emitter_write_indicator(emitter, []byte{'}'}, false, false, false) { + return false + } + emitter.state = emitter.states[len(emitter.states)-1] + emitter.states = emitter.states[:len(emitter.states)-1] + return true + } + + if !first { + if !yaml_emitter_write_indicator(emitter, []byte{','}, false, false, false) { + return false + } + } + if emitter.canonical || emitter.column > emitter.best_width { + if !yaml_emitter_write_indent(emitter) { + return false + } + } + + if !emitter.canonical && yaml_emitter_check_simple_key(emitter) { + emitter.states = append(emitter.states, yaml_EMIT_FLOW_MAPPING_SIMPLE_VALUE_STATE) + return yaml_emitter_emit_node(emitter, event, false, false, true, true) + } + if !yaml_emitter_write_indicator(emitter, []byte{'?'}, true, false, false) { + return false + } + emitter.states = append(emitter.states, yaml_EMIT_FLOW_MAPPING_VALUE_STATE) + return yaml_emitter_emit_node(emitter, event, false, false, true, false) +} + +// Expect a flow value node. +func yaml_emitter_emit_flow_mapping_value(emitter *yaml_emitter_t, event *yaml_event_t, simple bool) bool { + if simple { + if !yaml_emitter_write_indicator(emitter, []byte{':'}, false, false, false) { + return false + } + } else { + if emitter.canonical || emitter.column > emitter.best_width { + if !yaml_emitter_write_indent(emitter) { + return false + } + } + if !yaml_emitter_write_indicator(emitter, []byte{':'}, true, false, false) { + return false + } + } + emitter.states = append(emitter.states, yaml_EMIT_FLOW_MAPPING_KEY_STATE) + return yaml_emitter_emit_node(emitter, event, false, false, true, false) +} + +// Expect a block item node. +func yaml_emitter_emit_block_sequence_item(emitter *yaml_emitter_t, event *yaml_event_t, first bool) bool { + if first { + if !yaml_emitter_increase_indent(emitter, false, emitter.mapping_context && !emitter.indention) { + return false + } + } + if event.typ == yaml_SEQUENCE_END_EVENT { + emitter.indent = emitter.indents[len(emitter.indents)-1] + emitter.indents = emitter.indents[:len(emitter.indents)-1] + emitter.state = emitter.states[len(emitter.states)-1] + emitter.states = emitter.states[:len(emitter.states)-1] + return true + } + if !yaml_emitter_write_indent(emitter) { + return false + } + if !yaml_emitter_write_indicator(emitter, []byte{'-'}, true, false, true) { + return false + } + emitter.states = append(emitter.states, yaml_EMIT_BLOCK_SEQUENCE_ITEM_STATE) + return yaml_emitter_emit_node(emitter, event, false, true, false, false) +} + +// Expect a block key node. +func yaml_emitter_emit_block_mapping_key(emitter *yaml_emitter_t, event *yaml_event_t, first bool) bool { + if first { + if !yaml_emitter_increase_indent(emitter, false, false) { + return false + } + } + if event.typ == yaml_MAPPING_END_EVENT { + emitter.indent = emitter.indents[len(emitter.indents)-1] + emitter.indents = emitter.indents[:len(emitter.indents)-1] + emitter.state = emitter.states[len(emitter.states)-1] + emitter.states = emitter.states[:len(emitter.states)-1] + return true + } + if !yaml_emitter_write_indent(emitter) { + return false + } + if yaml_emitter_check_simple_key(emitter) { + emitter.states = append(emitter.states, yaml_EMIT_BLOCK_MAPPING_SIMPLE_VALUE_STATE) + return yaml_emitter_emit_node(emitter, event, false, false, true, true) + } + if !yaml_emitter_write_indicator(emitter, []byte{'?'}, true, false, true) { + return false + } + emitter.states = append(emitter.states, yaml_EMIT_BLOCK_MAPPING_VALUE_STATE) + return yaml_emitter_emit_node(emitter, event, false, false, true, false) +} + +// Expect a block value node. +func yaml_emitter_emit_block_mapping_value(emitter *yaml_emitter_t, event *yaml_event_t, simple bool) bool { + if simple { + if !yaml_emitter_write_indicator(emitter, []byte{':'}, false, false, false) { + return false + } + } else { + if !yaml_emitter_write_indent(emitter) { + return false + } + if !yaml_emitter_write_indicator(emitter, []byte{':'}, true, false, true) { + return false + } + } + emitter.states = append(emitter.states, yaml_EMIT_BLOCK_MAPPING_KEY_STATE) + return yaml_emitter_emit_node(emitter, event, false, false, true, false) +} + +// Expect a node. +func yaml_emitter_emit_node(emitter *yaml_emitter_t, event *yaml_event_t, + root bool, sequence bool, mapping bool, simple_key bool) bool { + + emitter.root_context = root + emitter.sequence_context = sequence + emitter.mapping_context = mapping + emitter.simple_key_context = simple_key + + switch event.typ { + case yaml_ALIAS_EVENT: + return yaml_emitter_emit_alias(emitter, event) + case yaml_SCALAR_EVENT: + return yaml_emitter_emit_scalar(emitter, event) + case yaml_SEQUENCE_START_EVENT: + return yaml_emitter_emit_sequence_start(emitter, event) + case yaml_MAPPING_START_EVENT: + return yaml_emitter_emit_mapping_start(emitter, event) + default: + return yaml_emitter_set_emitter_error(emitter, + "expected SCALAR, SEQUENCE-START, MAPPING-START, or ALIAS") + } + return false +} + +// Expect ALIAS. +func yaml_emitter_emit_alias(emitter *yaml_emitter_t, event *yaml_event_t) bool { + if !yaml_emitter_process_anchor(emitter) { + return false + } + emitter.state = emitter.states[len(emitter.states)-1] + emitter.states = emitter.states[:len(emitter.states)-1] + return true +} + +// Expect SCALAR. +func yaml_emitter_emit_scalar(emitter *yaml_emitter_t, event *yaml_event_t) bool { + if !yaml_emitter_select_scalar_style(emitter, event) { + return false + } + if !yaml_emitter_process_anchor(emitter) { + return false + } + if !yaml_emitter_process_tag(emitter) { + return false + } + if !yaml_emitter_increase_indent(emitter, true, false) { + return false + } + if !yaml_emitter_process_scalar(emitter) { + return false + } + emitter.indent = emitter.indents[len(emitter.indents)-1] + emitter.indents = emitter.indents[:len(emitter.indents)-1] + emitter.state = emitter.states[len(emitter.states)-1] + emitter.states = emitter.states[:len(emitter.states)-1] + return true +} + +// Expect SEQUENCE-START. +func yaml_emitter_emit_sequence_start(emitter *yaml_emitter_t, event *yaml_event_t) bool { + if !yaml_emitter_process_anchor(emitter) { + return false + } + if !yaml_emitter_process_tag(emitter) { + return false + } + if emitter.flow_level > 0 || emitter.canonical || event.sequence_style() == yaml_FLOW_SEQUENCE_STYLE || + yaml_emitter_check_empty_sequence(emitter) { + emitter.state = yaml_EMIT_FLOW_SEQUENCE_FIRST_ITEM_STATE + } else { + emitter.state = yaml_EMIT_BLOCK_SEQUENCE_FIRST_ITEM_STATE + } + return true +} + +// Expect MAPPING-START. +func yaml_emitter_emit_mapping_start(emitter *yaml_emitter_t, event *yaml_event_t) bool { + if !yaml_emitter_process_anchor(emitter) { + return false + } + if !yaml_emitter_process_tag(emitter) { + return false + } + if emitter.flow_level > 0 || emitter.canonical || event.mapping_style() == yaml_FLOW_MAPPING_STYLE || + yaml_emitter_check_empty_mapping(emitter) { + emitter.state = yaml_EMIT_FLOW_MAPPING_FIRST_KEY_STATE + } else { + emitter.state = yaml_EMIT_BLOCK_MAPPING_FIRST_KEY_STATE + } + return true +} + +// Check if the document content is an empty scalar. +func yaml_emitter_check_empty_document(emitter *yaml_emitter_t) bool { + return false // [Go] Huh? +} + +// Check if the next events represent an empty sequence. +func yaml_emitter_check_empty_sequence(emitter *yaml_emitter_t) bool { + if len(emitter.events)-emitter.events_head < 2 { + return false + } + return emitter.events[emitter.events_head].typ == yaml_SEQUENCE_START_EVENT && + emitter.events[emitter.events_head+1].typ == yaml_SEQUENCE_END_EVENT +} + +// Check if the next events represent an empty mapping. +func yaml_emitter_check_empty_mapping(emitter *yaml_emitter_t) bool { + if len(emitter.events)-emitter.events_head < 2 { + return false + } + return emitter.events[emitter.events_head].typ == yaml_MAPPING_START_EVENT && + emitter.events[emitter.events_head+1].typ == yaml_MAPPING_END_EVENT +} + +// Check if the next node can be expressed as a simple key. +func yaml_emitter_check_simple_key(emitter *yaml_emitter_t) bool { + length := 0 + switch emitter.events[emitter.events_head].typ { + case yaml_ALIAS_EVENT: + length += len(emitter.anchor_data.anchor) + case yaml_SCALAR_EVENT: + if emitter.scalar_data.multiline { + return false + } + length += len(emitter.anchor_data.anchor) + + len(emitter.tag_data.handle) + + len(emitter.tag_data.suffix) + + len(emitter.scalar_data.value) + case yaml_SEQUENCE_START_EVENT: + if !yaml_emitter_check_empty_sequence(emitter) { + return false + } + length += len(emitter.anchor_data.anchor) + + len(emitter.tag_data.handle) + + len(emitter.tag_data.suffix) + case yaml_MAPPING_START_EVENT: + if !yaml_emitter_check_empty_mapping(emitter) { + return false + } + length += len(emitter.anchor_data.anchor) + + len(emitter.tag_data.handle) + + len(emitter.tag_data.suffix) + default: + return false + } + return length <= 128 +} + +// Determine an acceptable scalar style. +func yaml_emitter_select_scalar_style(emitter *yaml_emitter_t, event *yaml_event_t) bool { + + no_tag := len(emitter.tag_data.handle) == 0 && len(emitter.tag_data.suffix) == 0 + if no_tag && !event.implicit && !event.quoted_implicit { + return yaml_emitter_set_emitter_error(emitter, "neither tag nor implicit flags are specified") + } + + style := event.scalar_style() + if style == yaml_ANY_SCALAR_STYLE { + style = yaml_PLAIN_SCALAR_STYLE + } + if emitter.canonical { + style = yaml_DOUBLE_QUOTED_SCALAR_STYLE + } + if emitter.simple_key_context && emitter.scalar_data.multiline { + style = yaml_DOUBLE_QUOTED_SCALAR_STYLE + } + + if style == yaml_PLAIN_SCALAR_STYLE { + if emitter.flow_level > 0 && !emitter.scalar_data.flow_plain_allowed || + emitter.flow_level == 0 && !emitter.scalar_data.block_plain_allowed { + style = yaml_SINGLE_QUOTED_SCALAR_STYLE + } + if len(emitter.scalar_data.value) == 0 && (emitter.flow_level > 0 || emitter.simple_key_context) { + style = yaml_SINGLE_QUOTED_SCALAR_STYLE + } + if no_tag && !event.implicit { + style = yaml_SINGLE_QUOTED_SCALAR_STYLE + } + } + if style == yaml_SINGLE_QUOTED_SCALAR_STYLE { + if !emitter.scalar_data.single_quoted_allowed { + style = yaml_DOUBLE_QUOTED_SCALAR_STYLE + } + } + if style == yaml_LITERAL_SCALAR_STYLE || style == yaml_FOLDED_SCALAR_STYLE { + if !emitter.scalar_data.block_allowed || emitter.flow_level > 0 || emitter.simple_key_context { + style = yaml_DOUBLE_QUOTED_SCALAR_STYLE + } + } + + if no_tag && !event.quoted_implicit && style != yaml_PLAIN_SCALAR_STYLE { + emitter.tag_data.handle = []byte{'!'} + } + emitter.scalar_data.style = style + return true +} + +// Write an achor. +func yaml_emitter_process_anchor(emitter *yaml_emitter_t) bool { + if emitter.anchor_data.anchor == nil { + return true + } + c := []byte{'&'} + if emitter.anchor_data.alias { + c[0] = '*' + } + if !yaml_emitter_write_indicator(emitter, c, true, false, false) { + return false + } + return yaml_emitter_write_anchor(emitter, emitter.anchor_data.anchor) +} + +// Write a tag. +func yaml_emitter_process_tag(emitter *yaml_emitter_t) bool { + if len(emitter.tag_data.handle) == 0 && len(emitter.tag_data.suffix) == 0 { + return true + } + if len(emitter.tag_data.handle) > 0 { + if !yaml_emitter_write_tag_handle(emitter, emitter.tag_data.handle) { + return false + } + if len(emitter.tag_data.suffix) > 0 { + if !yaml_emitter_write_tag_content(emitter, emitter.tag_data.suffix, false) { + return false + } + } + } else { + // [Go] Allocate these slices elsewhere. + if !yaml_emitter_write_indicator(emitter, []byte("!<"), true, false, false) { + return false + } + if !yaml_emitter_write_tag_content(emitter, emitter.tag_data.suffix, false) { + return false + } + if !yaml_emitter_write_indicator(emitter, []byte{'>'}, false, false, false) { + return false + } + } + return true +} + +// Write a scalar. +func yaml_emitter_process_scalar(emitter *yaml_emitter_t) bool { + switch emitter.scalar_data.style { + case yaml_PLAIN_SCALAR_STYLE: + return yaml_emitter_write_plain_scalar(emitter, emitter.scalar_data.value, !emitter.simple_key_context) + + case yaml_SINGLE_QUOTED_SCALAR_STYLE: + return yaml_emitter_write_single_quoted_scalar(emitter, emitter.scalar_data.value, !emitter.simple_key_context) + + case yaml_DOUBLE_QUOTED_SCALAR_STYLE: + return yaml_emitter_write_double_quoted_scalar(emitter, emitter.scalar_data.value, !emitter.simple_key_context) + + case yaml_LITERAL_SCALAR_STYLE: + return yaml_emitter_write_literal_scalar(emitter, emitter.scalar_data.value) + + case yaml_FOLDED_SCALAR_STYLE: + return yaml_emitter_write_folded_scalar(emitter, emitter.scalar_data.value) + } + panic("unknown scalar style") +} + +// Check if a %YAML directive is valid. +func yaml_emitter_analyze_version_directive(emitter *yaml_emitter_t, version_directive *yaml_version_directive_t) bool { + if version_directive.major != 1 || version_directive.minor != 1 { + return yaml_emitter_set_emitter_error(emitter, "incompatible %YAML directive") + } + return true +} + +// Check if a %TAG directive is valid. +func yaml_emitter_analyze_tag_directive(emitter *yaml_emitter_t, tag_directive *yaml_tag_directive_t) bool { + handle := tag_directive.handle + prefix := tag_directive.prefix + if len(handle) == 0 { + return yaml_emitter_set_emitter_error(emitter, "tag handle must not be empty") + } + if handle[0] != '!' { + return yaml_emitter_set_emitter_error(emitter, "tag handle must start with '!'") + } + if handle[len(handle)-1] != '!' { + return yaml_emitter_set_emitter_error(emitter, "tag handle must end with '!'") + } + for i := 1; i < len(handle)-1; i += width(handle[i]) { + if !is_alpha(handle, i) { + return yaml_emitter_set_emitter_error(emitter, "tag handle must contain alphanumerical characters only") + } + } + if len(prefix) == 0 { + return yaml_emitter_set_emitter_error(emitter, "tag prefix must not be empty") + } + return true +} + +// Check if an anchor is valid. +func yaml_emitter_analyze_anchor(emitter *yaml_emitter_t, anchor []byte, alias bool) bool { + if len(anchor) == 0 { + problem := "anchor value must not be empty" + if alias { + problem = "alias value must not be empty" + } + return yaml_emitter_set_emitter_error(emitter, problem) + } + for i := 0; i < len(anchor); i += width(anchor[i]) { + if !is_alpha(anchor, i) { + problem := "anchor value must contain alphanumerical characters only" + if alias { + problem = "alias value must contain alphanumerical characters only" + } + return yaml_emitter_set_emitter_error(emitter, problem) + } + } + emitter.anchor_data.anchor = anchor + emitter.anchor_data.alias = alias + return true +} + +// Check if a tag is valid. +func yaml_emitter_analyze_tag(emitter *yaml_emitter_t, tag []byte) bool { + if len(tag) == 0 { + return yaml_emitter_set_emitter_error(emitter, "tag value must not be empty") + } + for i := 0; i < len(emitter.tag_directives); i++ { + tag_directive := &emitter.tag_directives[i] + if bytes.HasPrefix(tag, tag_directive.prefix) { + emitter.tag_data.handle = tag_directive.handle + emitter.tag_data.suffix = tag[len(tag_directive.prefix):] + return true + } + } + emitter.tag_data.suffix = tag + return true +} + +// Check if a scalar is valid. +func yaml_emitter_analyze_scalar(emitter *yaml_emitter_t, value []byte) bool { + var ( + block_indicators = false + flow_indicators = false + line_breaks = false + special_characters = false + + leading_space = false + leading_break = false + trailing_space = false + trailing_break = false + break_space = false + space_break = false + + preceeded_by_whitespace = false + followed_by_whitespace = false + previous_space = false + previous_break = false + ) + + emitter.scalar_data.value = value + + if len(value) == 0 { + emitter.scalar_data.multiline = false + emitter.scalar_data.flow_plain_allowed = false + emitter.scalar_data.block_plain_allowed = true + emitter.scalar_data.single_quoted_allowed = true + emitter.scalar_data.block_allowed = false + return true + } + + if len(value) >= 3 && ((value[0] == '-' && value[1] == '-' && value[2] == '-') || (value[0] == '.' && value[1] == '.' && value[2] == '.')) { + block_indicators = true + flow_indicators = true + } + + preceeded_by_whitespace = true + for i, w := 0, 0; i < len(value); i += w { + w = width(value[i]) + followed_by_whitespace = i+w >= len(value) || is_blank(value, i+w) + + if i == 0 { + switch value[i] { + case '#', ',', '[', ']', '{', '}', '&', '*', '!', '|', '>', '\'', '"', '%', '@', '`': + flow_indicators = true + block_indicators = true + case '?', ':': + flow_indicators = true + if followed_by_whitespace { + block_indicators = true + } + case '-': + if followed_by_whitespace { + flow_indicators = true + block_indicators = true + } + } + } else { + switch value[i] { + case ',', '?', '[', ']', '{', '}': + flow_indicators = true + case ':': + flow_indicators = true + if followed_by_whitespace { + block_indicators = true + } + case '#': + if preceeded_by_whitespace { + flow_indicators = true + block_indicators = true + } + } + } + + if !is_printable(value, i) || !is_ascii(value, i) && !emitter.unicode { + special_characters = true + } + if is_space(value, i) { + if i == 0 { + leading_space = true + } + if i+width(value[i]) == len(value) { + trailing_space = true + } + if previous_break { + break_space = true + } + previous_space = true + previous_break = false + } else if is_break(value, i) { + line_breaks = true + if i == 0 { + leading_break = true + } + if i+width(value[i]) == len(value) { + trailing_break = true + } + if previous_space { + space_break = true + } + previous_space = false + previous_break = true + } else { + previous_space = false + previous_break = false + } + + // [Go]: Why 'z'? Couldn't be the end of the string as that's the loop condition. + preceeded_by_whitespace = is_blankz(value, i) + } + + emitter.scalar_data.multiline = line_breaks + emitter.scalar_data.flow_plain_allowed = true + emitter.scalar_data.block_plain_allowed = true + emitter.scalar_data.single_quoted_allowed = true + emitter.scalar_data.block_allowed = true + + if leading_space || leading_break || trailing_space || trailing_break { + emitter.scalar_data.flow_plain_allowed = false + emitter.scalar_data.block_plain_allowed = false + } + if trailing_space { + emitter.scalar_data.block_allowed = false + } + if break_space { + emitter.scalar_data.flow_plain_allowed = false + emitter.scalar_data.block_plain_allowed = false + emitter.scalar_data.single_quoted_allowed = false + } + if space_break || special_characters { + emitter.scalar_data.flow_plain_allowed = false + emitter.scalar_data.block_plain_allowed = false + emitter.scalar_data.single_quoted_allowed = false + emitter.scalar_data.block_allowed = false + } + if line_breaks { + emitter.scalar_data.flow_plain_allowed = false + emitter.scalar_data.block_plain_allowed = false + } + if flow_indicators { + emitter.scalar_data.flow_plain_allowed = false + } + if block_indicators { + emitter.scalar_data.block_plain_allowed = false + } + return true +} + +// Check if the event data is valid. +func yaml_emitter_analyze_event(emitter *yaml_emitter_t, event *yaml_event_t) bool { + + emitter.anchor_data.anchor = nil + emitter.tag_data.handle = nil + emitter.tag_data.suffix = nil + emitter.scalar_data.value = nil + + switch event.typ { + case yaml_ALIAS_EVENT: + if !yaml_emitter_analyze_anchor(emitter, event.anchor, true) { + return false + } + + case yaml_SCALAR_EVENT: + if len(event.anchor) > 0 { + if !yaml_emitter_analyze_anchor(emitter, event.anchor, false) { + return false + } + } + if len(event.tag) > 0 && (emitter.canonical || (!event.implicit && !event.quoted_implicit)) { + if !yaml_emitter_analyze_tag(emitter, event.tag) { + return false + } + } + if !yaml_emitter_analyze_scalar(emitter, event.value) { + return false + } + + case yaml_SEQUENCE_START_EVENT: + if len(event.anchor) > 0 { + if !yaml_emitter_analyze_anchor(emitter, event.anchor, false) { + return false + } + } + if len(event.tag) > 0 && (emitter.canonical || !event.implicit) { + if !yaml_emitter_analyze_tag(emitter, event.tag) { + return false + } + } + + case yaml_MAPPING_START_EVENT: + if len(event.anchor) > 0 { + if !yaml_emitter_analyze_anchor(emitter, event.anchor, false) { + return false + } + } + if len(event.tag) > 0 && (emitter.canonical || !event.implicit) { + if !yaml_emitter_analyze_tag(emitter, event.tag) { + return false + } + } + } + return true +} + +// Write the BOM character. +func yaml_emitter_write_bom(emitter *yaml_emitter_t) bool { + if !flush(emitter) { + return false + } + pos := emitter.buffer_pos + emitter.buffer[pos+0] = '\xEF' + emitter.buffer[pos+1] = '\xBB' + emitter.buffer[pos+2] = '\xBF' + emitter.buffer_pos += 3 + return true +} + +func yaml_emitter_write_indent(emitter *yaml_emitter_t) bool { + indent := emitter.indent + if indent < 0 { + indent = 0 + } + if !emitter.indention || emitter.column > indent || (emitter.column == indent && !emitter.whitespace) { + if !put_break(emitter) { + return false + } + } + for emitter.column < indent { + if !put(emitter, ' ') { + return false + } + } + emitter.whitespace = true + emitter.indention = true + return true +} + +func yaml_emitter_write_indicator(emitter *yaml_emitter_t, indicator []byte, need_whitespace, is_whitespace, is_indention bool) bool { + if need_whitespace && !emitter.whitespace { + if !put(emitter, ' ') { + return false + } + } + if !write_all(emitter, indicator) { + return false + } + emitter.whitespace = is_whitespace + emitter.indention = (emitter.indention && is_indention) + emitter.open_ended = false + return true +} + +func yaml_emitter_write_anchor(emitter *yaml_emitter_t, value []byte) bool { + if !write_all(emitter, value) { + return false + } + emitter.whitespace = false + emitter.indention = false + return true +} + +func yaml_emitter_write_tag_handle(emitter *yaml_emitter_t, value []byte) bool { + if !emitter.whitespace { + if !put(emitter, ' ') { + return false + } + } + if !write_all(emitter, value) { + return false + } + emitter.whitespace = false + emitter.indention = false + return true +} + +func yaml_emitter_write_tag_content(emitter *yaml_emitter_t, value []byte, need_whitespace bool) bool { + if need_whitespace && !emitter.whitespace { + if !put(emitter, ' ') { + return false + } + } + for i := 0; i < len(value); { + var must_write bool + switch value[i] { + case ';', '/', '?', ':', '@', '&', '=', '+', '$', ',', '_', '.', '~', '*', '\'', '(', ')', '[', ']': + must_write = true + default: + must_write = is_alpha(value, i) + } + if must_write { + if !write(emitter, value, &i) { + return false + } + } else { + w := width(value[i]) + for k := 0; k < w; k++ { + octet := value[i] + i++ + if !put(emitter, '%') { + return false + } + + c := octet >> 4 + if c < 10 { + c += '0' + } else { + c += 'A' - 10 + } + if !put(emitter, c) { + return false + } + + c = octet & 0x0f + if c < 10 { + c += '0' + } else { + c += 'A' - 10 + } + if !put(emitter, c) { + return false + } + } + } + } + emitter.whitespace = false + emitter.indention = false + return true +} + +func yaml_emitter_write_plain_scalar(emitter *yaml_emitter_t, value []byte, allow_breaks bool) bool { + if !emitter.whitespace { + if !put(emitter, ' ') { + return false + } + } + + spaces := false + breaks := false + for i := 0; i < len(value); { + if is_space(value, i) { + if allow_breaks && !spaces && emitter.column > emitter.best_width && !is_space(value, i+1) { + if !yaml_emitter_write_indent(emitter) { + return false + } + i += width(value[i]) + } else { + if !write(emitter, value, &i) { + return false + } + } + spaces = true + } else if is_break(value, i) { + if !breaks && value[i] == '\n' { + if !put_break(emitter) { + return false + } + } + if !write_break(emitter, value, &i) { + return false + } + emitter.indention = true + breaks = true + } else { + if breaks { + if !yaml_emitter_write_indent(emitter) { + return false + } + } + if !write(emitter, value, &i) { + return false + } + emitter.indention = false + spaces = false + breaks = false + } + } + + emitter.whitespace = false + emitter.indention = false + if emitter.root_context { + emitter.open_ended = true + } + + return true +} + +func yaml_emitter_write_single_quoted_scalar(emitter *yaml_emitter_t, value []byte, allow_breaks bool) bool { + + if !yaml_emitter_write_indicator(emitter, []byte{'\''}, true, false, false) { + return false + } + + spaces := false + breaks := false + for i := 0; i < len(value); { + if is_space(value, i) { + if allow_breaks && !spaces && emitter.column > emitter.best_width && i > 0 && i < len(value)-1 && !is_space(value, i+1) { + if !yaml_emitter_write_indent(emitter) { + return false + } + i += width(value[i]) + } else { + if !write(emitter, value, &i) { + return false + } + } + spaces = true + } else if is_break(value, i) { + if !breaks && value[i] == '\n' { + if !put_break(emitter) { + return false + } + } + if !write_break(emitter, value, &i) { + return false + } + emitter.indention = true + breaks = true + } else { + if breaks { + if !yaml_emitter_write_indent(emitter) { + return false + } + } + if value[i] == '\'' { + if !put(emitter, '\'') { + return false + } + } + if !write(emitter, value, &i) { + return false + } + emitter.indention = false + spaces = false + breaks = false + } + } + if !yaml_emitter_write_indicator(emitter, []byte{'\''}, false, false, false) { + return false + } + emitter.whitespace = false + emitter.indention = false + return true +} + +func yaml_emitter_write_double_quoted_scalar(emitter *yaml_emitter_t, value []byte, allow_breaks bool) bool { + spaces := false + if !yaml_emitter_write_indicator(emitter, []byte{'"'}, true, false, false) { + return false + } + + for i := 0; i < len(value); { + if !is_printable(value, i) || (!emitter.unicode && !is_ascii(value, i)) || + is_bom(value, i) || is_break(value, i) || + value[i] == '"' || value[i] == '\\' { + + octet := value[i] + + var w int + var v rune + switch { + case octet&0x80 == 0x00: + w, v = 1, rune(octet&0x7F) + case octet&0xE0 == 0xC0: + w, v = 2, rune(octet&0x1F) + case octet&0xF0 == 0xE0: + w, v = 3, rune(octet&0x0F) + case octet&0xF8 == 0xF0: + w, v = 4, rune(octet&0x07) + } + for k := 1; k < w; k++ { + octet = value[i+k] + v = (v << 6) + (rune(octet) & 0x3F) + } + i += w + + if !put(emitter, '\\') { + return false + } + + var ok bool + switch v { + case 0x00: + ok = put(emitter, '0') + case 0x07: + ok = put(emitter, 'a') + case 0x08: + ok = put(emitter, 'b') + case 0x09: + ok = put(emitter, 't') + case 0x0A: + ok = put(emitter, 'n') + case 0x0b: + ok = put(emitter, 'v') + case 0x0c: + ok = put(emitter, 'f') + case 0x0d: + ok = put(emitter, 'r') + case 0x1b: + ok = put(emitter, 'e') + case 0x22: + ok = put(emitter, '"') + case 0x5c: + ok = put(emitter, '\\') + case 0x85: + ok = put(emitter, 'N') + case 0xA0: + ok = put(emitter, '_') + case 0x2028: + ok = put(emitter, 'L') + case 0x2029: + ok = put(emitter, 'P') + default: + if v <= 0xFF { + ok = put(emitter, 'x') + w = 2 + } else if v <= 0xFFFF { + ok = put(emitter, 'u') + w = 4 + } else { + ok = put(emitter, 'U') + w = 8 + } + for k := (w - 1) * 4; ok && k >= 0; k -= 4 { + digit := byte((v >> uint(k)) & 0x0F) + if digit < 10 { + ok = put(emitter, digit+'0') + } else { + ok = put(emitter, digit+'A'-10) + } + } + } + if !ok { + return false + } + spaces = false + } else if is_space(value, i) { + if allow_breaks && !spaces && emitter.column > emitter.best_width && i > 0 && i < len(value)-1 { + if !yaml_emitter_write_indent(emitter) { + return false + } + if is_space(value, i+1) { + if !put(emitter, '\\') { + return false + } + } + i += width(value[i]) + } else if !write(emitter, value, &i) { + return false + } + spaces = true + } else { + if !write(emitter, value, &i) { + return false + } + spaces = false + } + } + if !yaml_emitter_write_indicator(emitter, []byte{'"'}, false, false, false) { + return false + } + emitter.whitespace = false + emitter.indention = false + return true +} + +func yaml_emitter_write_block_scalar_hints(emitter *yaml_emitter_t, value []byte) bool { + if is_space(value, 0) || is_break(value, 0) { + indent_hint := []byte{'0' + byte(emitter.best_indent)} + if !yaml_emitter_write_indicator(emitter, indent_hint, false, false, false) { + return false + } + } + + emitter.open_ended = false + + var chomp_hint [1]byte + if len(value) == 0 { + chomp_hint[0] = '-' + } else { + i := len(value) - 1 + for value[i]&0xC0 == 0x80 { + i-- + } + if !is_break(value, i) { + chomp_hint[0] = '-' + } else if i == 0 { + chomp_hint[0] = '+' + emitter.open_ended = true + } else { + i-- + for value[i]&0xC0 == 0x80 { + i-- + } + if is_break(value, i) { + chomp_hint[0] = '+' + emitter.open_ended = true + } + } + } + if chomp_hint[0] != 0 { + if !yaml_emitter_write_indicator(emitter, chomp_hint[:], false, false, false) { + return false + } + } + return true +} + +func yaml_emitter_write_literal_scalar(emitter *yaml_emitter_t, value []byte) bool { + if !yaml_emitter_write_indicator(emitter, []byte{'|'}, true, false, false) { + return false + } + if !yaml_emitter_write_block_scalar_hints(emitter, value) { + return false + } + if !put_break(emitter) { + return false + } + emitter.indention = true + emitter.whitespace = true + breaks := true + for i := 0; i < len(value); { + if is_break(value, i) { + if !write_break(emitter, value, &i) { + return false + } + emitter.indention = true + breaks = true + } else { + if breaks { + if !yaml_emitter_write_indent(emitter) { + return false + } + } + if !write(emitter, value, &i) { + return false + } + emitter.indention = false + breaks = false + } + } + + return true +} + +func yaml_emitter_write_folded_scalar(emitter *yaml_emitter_t, value []byte) bool { + if !yaml_emitter_write_indicator(emitter, []byte{'>'}, true, false, false) { + return false + } + if !yaml_emitter_write_block_scalar_hints(emitter, value) { + return false + } + + if !put_break(emitter) { + return false + } + emitter.indention = true + emitter.whitespace = true + + breaks := true + leading_spaces := true + for i := 0; i < len(value); { + if is_break(value, i) { + if !breaks && !leading_spaces && value[i] == '\n' { + k := 0 + for is_break(value, k) { + k += width(value[k]) + } + if !is_blankz(value, k) { + if !put_break(emitter) { + return false + } + } + } + if !write_break(emitter, value, &i) { + return false + } + emitter.indention = true + breaks = true + } else { + if breaks { + if !yaml_emitter_write_indent(emitter) { + return false + } + leading_spaces = is_blank(value, i) + } + if !breaks && is_space(value, i) && !is_space(value, i+1) && emitter.column > emitter.best_width { + if !yaml_emitter_write_indent(emitter) { + return false + } + i += width(value[i]) + } else { + if !write(emitter, value, &i) { + return false + } + } + emitter.indention = false + breaks = false + } + } + return true +} diff --git a/vendor/gopkg.in/yaml.v2/encode.go b/vendor/gopkg.in/yaml.v2/encode.go new file mode 100644 index 000000000..84f849955 --- /dev/null +++ b/vendor/gopkg.in/yaml.v2/encode.go @@ -0,0 +1,306 @@ +package yaml + +import ( + "encoding" + "fmt" + "reflect" + "regexp" + "sort" + "strconv" + "strings" + "time" +) + +type encoder struct { + emitter yaml_emitter_t + event yaml_event_t + out []byte + flow bool +} + +func newEncoder() (e *encoder) { + e = &encoder{} + e.must(yaml_emitter_initialize(&e.emitter)) + yaml_emitter_set_output_string(&e.emitter, &e.out) + yaml_emitter_set_unicode(&e.emitter, true) + e.must(yaml_stream_start_event_initialize(&e.event, yaml_UTF8_ENCODING)) + e.emit() + e.must(yaml_document_start_event_initialize(&e.event, nil, nil, true)) + e.emit() + return e +} + +func (e *encoder) finish() { + e.must(yaml_document_end_event_initialize(&e.event, true)) + e.emit() + e.emitter.open_ended = false + e.must(yaml_stream_end_event_initialize(&e.event)) + e.emit() +} + +func (e *encoder) destroy() { + yaml_emitter_delete(&e.emitter) +} + +func (e *encoder) emit() { + // This will internally delete the e.event value. + if !yaml_emitter_emit(&e.emitter, &e.event) && e.event.typ != yaml_DOCUMENT_END_EVENT && e.event.typ != yaml_STREAM_END_EVENT { + e.must(false) + } +} + +func (e *encoder) must(ok bool) { + if !ok { + msg := e.emitter.problem + if msg == "" { + msg = "unknown problem generating YAML content" + } + failf("%s", msg) + } +} + +func (e *encoder) marshal(tag string, in reflect.Value) { + if !in.IsValid() { + e.nilv() + return + } + iface := in.Interface() + if m, ok := iface.(Marshaler); ok { + v, err := m.MarshalYAML() + if err != nil { + fail(err) + } + if v == nil { + e.nilv() + return + } + in = reflect.ValueOf(v) + } else if m, ok := iface.(encoding.TextMarshaler); ok { + text, err := m.MarshalText() + if err != nil { + fail(err) + } + in = reflect.ValueOf(string(text)) + } + switch in.Kind() { + case reflect.Interface: + if in.IsNil() { + e.nilv() + } else { + e.marshal(tag, in.Elem()) + } + case reflect.Map: + e.mapv(tag, in) + case reflect.Ptr: + if in.IsNil() { + e.nilv() + } else { + e.marshal(tag, in.Elem()) + } + case reflect.Struct: + e.structv(tag, in) + case reflect.Slice: + if in.Type().Elem() == mapItemType { + e.itemsv(tag, in) + } else { + e.slicev(tag, in) + } + case reflect.String: + e.stringv(tag, in) + case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64: + if in.Type() == durationType { + e.stringv(tag, reflect.ValueOf(iface.(time.Duration).String())) + } else { + e.intv(tag, in) + } + case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64, reflect.Uintptr: + e.uintv(tag, in) + case reflect.Float32, reflect.Float64: + e.floatv(tag, in) + case reflect.Bool: + e.boolv(tag, in) + default: + panic("cannot marshal type: " + in.Type().String()) + } +} + +func (e *encoder) mapv(tag string, in reflect.Value) { + e.mappingv(tag, func() { + keys := keyList(in.MapKeys()) + sort.Sort(keys) + for _, k := range keys { + e.marshal("", k) + e.marshal("", in.MapIndex(k)) + } + }) +} + +func (e *encoder) itemsv(tag string, in reflect.Value) { + e.mappingv(tag, func() { + slice := in.Convert(reflect.TypeOf([]MapItem{})).Interface().([]MapItem) + for _, item := range slice { + e.marshal("", reflect.ValueOf(item.Key)) + e.marshal("", reflect.ValueOf(item.Value)) + } + }) +} + +func (e *encoder) structv(tag string, in reflect.Value) { + sinfo, err := getStructInfo(in.Type()) + if err != nil { + panic(err) + } + e.mappingv(tag, func() { + for _, info := range sinfo.FieldsList { + var value reflect.Value + if info.Inline == nil { + value = in.Field(info.Num) + } else { + value = in.FieldByIndex(info.Inline) + } + if info.OmitEmpty && isZero(value) { + continue + } + e.marshal("", reflect.ValueOf(info.Key)) + e.flow = info.Flow + e.marshal("", value) + } + if sinfo.InlineMap >= 0 { + m := in.Field(sinfo.InlineMap) + if m.Len() > 0 { + e.flow = false + keys := keyList(m.MapKeys()) + sort.Sort(keys) + for _, k := range keys { + if _, found := sinfo.FieldsMap[k.String()]; found { + panic(fmt.Sprintf("Can't have key %q in inlined map; conflicts with struct field", k.String())) + } + e.marshal("", k) + e.flow = false + e.marshal("", m.MapIndex(k)) + } + } + } + }) +} + +func (e *encoder) mappingv(tag string, f func()) { + implicit := tag == "" + style := yaml_BLOCK_MAPPING_STYLE + if e.flow { + e.flow = false + style = yaml_FLOW_MAPPING_STYLE + } + e.must(yaml_mapping_start_event_initialize(&e.event, nil, []byte(tag), implicit, style)) + e.emit() + f() + e.must(yaml_mapping_end_event_initialize(&e.event)) + e.emit() +} + +func (e *encoder) slicev(tag string, in reflect.Value) { + implicit := tag == "" + style := yaml_BLOCK_SEQUENCE_STYLE + if e.flow { + e.flow = false + style = yaml_FLOW_SEQUENCE_STYLE + } + e.must(yaml_sequence_start_event_initialize(&e.event, nil, []byte(tag), implicit, style)) + e.emit() + n := in.Len() + for i := 0; i < n; i++ { + e.marshal("", in.Index(i)) + } + e.must(yaml_sequence_end_event_initialize(&e.event)) + e.emit() +} + +// isBase60 returns whether s is in base 60 notation as defined in YAML 1.1. +// +// The base 60 float notation in YAML 1.1 is a terrible idea and is unsupported +// in YAML 1.2 and by this package, but these should be marshalled quoted for +// the time being for compatibility with other parsers. +func isBase60Float(s string) (result bool) { + // Fast path. + if s == "" { + return false + } + c := s[0] + if !(c == '+' || c == '-' || c >= '0' && c <= '9') || strings.IndexByte(s, ':') < 0 { + return false + } + // Do the full match. + return base60float.MatchString(s) +} + +// From http://yaml.org/type/float.html, except the regular expression there +// is bogus. In practice parsers do not enforce the "\.[0-9_]*" suffix. +var base60float = regexp.MustCompile(`^[-+]?[0-9][0-9_]*(?::[0-5]?[0-9])+(?:\.[0-9_]*)?$`) + +func (e *encoder) stringv(tag string, in reflect.Value) { + var style yaml_scalar_style_t + s := in.String() + rtag, rs := resolve("", s) + if rtag == yaml_BINARY_TAG { + if tag == "" || tag == yaml_STR_TAG { + tag = rtag + s = rs.(string) + } else if tag == yaml_BINARY_TAG { + failf("explicitly tagged !!binary data must be base64-encoded") + } else { + failf("cannot marshal invalid UTF-8 data as %s", shortTag(tag)) + } + } + if tag == "" && (rtag != yaml_STR_TAG || isBase60Float(s)) { + style = yaml_DOUBLE_QUOTED_SCALAR_STYLE + } else if strings.Contains(s, "\n") { + style = yaml_LITERAL_SCALAR_STYLE + } else { + style = yaml_PLAIN_SCALAR_STYLE + } + e.emitScalar(s, "", tag, style) +} + +func (e *encoder) boolv(tag string, in reflect.Value) { + var s string + if in.Bool() { + s = "true" + } else { + s = "false" + } + e.emitScalar(s, "", tag, yaml_PLAIN_SCALAR_STYLE) +} + +func (e *encoder) intv(tag string, in reflect.Value) { + s := strconv.FormatInt(in.Int(), 10) + e.emitScalar(s, "", tag, yaml_PLAIN_SCALAR_STYLE) +} + +func (e *encoder) uintv(tag string, in reflect.Value) { + s := strconv.FormatUint(in.Uint(), 10) + e.emitScalar(s, "", tag, yaml_PLAIN_SCALAR_STYLE) +} + +func (e *encoder) floatv(tag string, in reflect.Value) { + // FIXME: Handle 64 bits here. + s := strconv.FormatFloat(float64(in.Float()), 'g', -1, 32) + switch s { + case "+Inf": + s = ".inf" + case "-Inf": + s = "-.inf" + case "NaN": + s = ".nan" + } + e.emitScalar(s, "", tag, yaml_PLAIN_SCALAR_STYLE) +} + +func (e *encoder) nilv() { + e.emitScalar("null", "", "", yaml_PLAIN_SCALAR_STYLE) +} + +func (e *encoder) emitScalar(value, anchor, tag string, style yaml_scalar_style_t) { + implicit := tag == "" + e.must(yaml_scalar_event_initialize(&e.event, []byte(anchor), []byte(tag), []byte(value), implicit, implicit, style)) + e.emit() +} diff --git a/vendor/gopkg.in/yaml.v2/encode_test.go b/vendor/gopkg.in/yaml.v2/encode_test.go new file mode 100644 index 000000000..84099bd38 --- /dev/null +++ b/vendor/gopkg.in/yaml.v2/encode_test.go @@ -0,0 +1,501 @@ +package yaml_test + +import ( + "fmt" + "math" + "strconv" + "strings" + "time" + + . "gopkg.in/check.v1" + "gopkg.in/yaml.v2" + "net" + "os" +) + +var marshalIntTest = 123 + +var marshalTests = []struct { + value interface{} + data string +}{ + { + nil, + "null\n", + }, { + &struct{}{}, + "{}\n", + }, { + map[string]string{"v": "hi"}, + "v: hi\n", + }, { + map[string]interface{}{"v": "hi"}, + "v: hi\n", + }, { + map[string]string{"v": "true"}, + "v: \"true\"\n", + }, { + map[string]string{"v": "false"}, + "v: \"false\"\n", + }, { + map[string]interface{}{"v": true}, + "v: true\n", + }, { + map[string]interface{}{"v": false}, + "v: false\n", + }, { + map[string]interface{}{"v": 10}, + "v: 10\n", + }, { + map[string]interface{}{"v": -10}, + "v: -10\n", + }, { + map[string]uint{"v": 42}, + "v: 42\n", + }, { + map[string]interface{}{"v": int64(4294967296)}, + "v: 4294967296\n", + }, { + map[string]int64{"v": int64(4294967296)}, + "v: 4294967296\n", + }, { + map[string]uint64{"v": 4294967296}, + "v: 4294967296\n", + }, { + map[string]interface{}{"v": "10"}, + "v: \"10\"\n", + }, { + map[string]interface{}{"v": 0.1}, + "v: 0.1\n", + }, { + map[string]interface{}{"v": float64(0.1)}, + "v: 0.1\n", + }, { + map[string]interface{}{"v": -0.1}, + "v: -0.1\n", + }, { + map[string]interface{}{"v": math.Inf(+1)}, + "v: .inf\n", + }, { + map[string]interface{}{"v": math.Inf(-1)}, + "v: -.inf\n", + }, { + map[string]interface{}{"v": math.NaN()}, + "v: .nan\n", + }, { + map[string]interface{}{"v": nil}, + "v: null\n", + }, { + map[string]interface{}{"v": ""}, + "v: \"\"\n", + }, { + map[string][]string{"v": []string{"A", "B"}}, + "v:\n- A\n- B\n", + }, { + map[string][]string{"v": []string{"A", "B\nC"}}, + "v:\n- A\n- |-\n B\n C\n", + }, { + map[string][]interface{}{"v": []interface{}{"A", 1, map[string][]int{"B": []int{2, 3}}}}, + "v:\n- A\n- 1\n- B:\n - 2\n - 3\n", + }, { + map[string]interface{}{"a": map[interface{}]interface{}{"b": "c"}}, + "a:\n b: c\n", + }, { + map[string]interface{}{"a": "-"}, + "a: '-'\n", + }, + + // Simple values. + { + &marshalIntTest, + "123\n", + }, + + // Structures + { + &struct{ Hello string }{"world"}, + "hello: world\n", + }, { + &struct { + A struct { + B string + } + }{struct{ B string }{"c"}}, + "a:\n b: c\n", + }, { + &struct { + A *struct { + B string + } + }{&struct{ B string }{"c"}}, + "a:\n b: c\n", + }, { + &struct { + A *struct { + B string + } + }{}, + "a: null\n", + }, { + &struct{ A int }{1}, + "a: 1\n", + }, { + &struct{ A []int }{[]int{1, 2}}, + "a:\n- 1\n- 2\n", + }, { + &struct { + B int "a" + }{1}, + "a: 1\n", + }, { + &struct{ A bool }{true}, + "a: true\n", + }, + + // Conditional flag + { + &struct { + A int "a,omitempty" + B int "b,omitempty" + }{1, 0}, + "a: 1\n", + }, { + &struct { + A int "a,omitempty" + B int "b,omitempty" + }{0, 0}, + "{}\n", + }, { + &struct { + A *struct{ X, y int } "a,omitempty,flow" + }{&struct{ X, y int }{1, 2}}, + "a: {x: 1}\n", + }, { + &struct { + A *struct{ X, y int } "a,omitempty,flow" + }{nil}, + "{}\n", + }, { + &struct { + A *struct{ X, y int } "a,omitempty,flow" + }{&struct{ X, y int }{}}, + "a: {x: 0}\n", + }, { + &struct { + A struct{ X, y int } "a,omitempty,flow" + }{struct{ X, y int }{1, 2}}, + "a: {x: 1}\n", + }, { + &struct { + A struct{ X, y int } "a,omitempty,flow" + }{struct{ X, y int }{0, 1}}, + "{}\n", + }, { + &struct { + A float64 "a,omitempty" + B float64 "b,omitempty" + }{1, 0}, + "a: 1\n", + }, + + // Flow flag + { + &struct { + A []int "a,flow" + }{[]int{1, 2}}, + "a: [1, 2]\n", + }, { + &struct { + A map[string]string "a,flow" + }{map[string]string{"b": "c", "d": "e"}}, + "a: {b: c, d: e}\n", + }, { + &struct { + A struct { + B, D string + } "a,flow" + }{struct{ B, D string }{"c", "e"}}, + "a: {b: c, d: e}\n", + }, + + // Unexported field + { + &struct { + u int + A int + }{0, 1}, + "a: 1\n", + }, + + // Ignored field + { + &struct { + A int + B int "-" + }{1, 2}, + "a: 1\n", + }, + + // Struct inlining + { + &struct { + A int + C inlineB `yaml:",inline"` + }{1, inlineB{2, inlineC{3}}}, + "a: 1\nb: 2\nc: 3\n", + }, + + // Map inlining + { + &struct { + A int + C map[string]int `yaml:",inline"` + }{1, map[string]int{"b": 2, "c": 3}}, + "a: 1\nb: 2\nc: 3\n", + }, + + // Duration + { + map[string]time.Duration{"a": 3 * time.Second}, + "a: 3s\n", + }, + + // Issue #24: bug in map merging logic. + { + map[string]string{"a": ""}, + "a: \n", + }, + + // Issue #34: marshal unsupported base 60 floats quoted for compatibility + // with old YAML 1.1 parsers. + { + map[string]string{"a": "1:1"}, + "a: \"1:1\"\n", + }, + + // Binary data. + { + map[string]string{"a": "\x00"}, + "a: \"\\0\"\n", + }, { + map[string]string{"a": "\x80\x81\x82"}, + "a: !!binary gIGC\n", + }, { + map[string]string{"a": strings.Repeat("\x90", 54)}, + "a: !!binary |\n " + strings.Repeat("kJCQ", 17) + "kJ\n CQ\n", + }, + + // Ordered maps. + { + &yaml.MapSlice{{"b", 2}, {"a", 1}, {"d", 4}, {"c", 3}, {"sub", yaml.MapSlice{{"e", 5}}}}, + "b: 2\na: 1\nd: 4\nc: 3\nsub:\n e: 5\n", + }, + + // Encode unicode as utf-8 rather than in escaped form. + { + map[string]string{"a": "你好"}, + "a: 你好\n", + }, + + // Support encoding.TextMarshaler. + { + map[string]net.IP{"a": net.IPv4(1, 2, 3, 4)}, + "a: 1.2.3.4\n", + }, + { + map[string]time.Time{"a": time.Unix(1424801979, 0)}, + "a: 2015-02-24T18:19:39Z\n", + }, + + // Ensure strings containing ": " are quoted (reported as PR #43, but not reproducible). + { + map[string]string{"a": "b: c"}, + "a: 'b: c'\n", + }, + + // Containing hash mark ('#') in string should be quoted + { + map[string]string{"a": "Hello #comment"}, + "a: 'Hello #comment'\n", + }, + { + map[string]string{"a": "你好 #comment"}, + "a: '你好 #comment'\n", + }, +} + +func (s *S) TestMarshal(c *C) { + defer os.Setenv("TZ", os.Getenv("TZ")) + os.Setenv("TZ", "UTC") + for _, item := range marshalTests { + data, err := yaml.Marshal(item.value) + c.Assert(err, IsNil) + c.Assert(string(data), Equals, item.data) + } +} + +var marshalErrorTests = []struct { + value interface{} + error string + panic string +}{{ + value: &struct { + B int + inlineB ",inline" + }{1, inlineB{2, inlineC{3}}}, + panic: `Duplicated key 'b' in struct struct \{ B int; .*`, +}, { + value: &struct { + A int + B map[string]int ",inline" + }{1, map[string]int{"a": 2}}, + panic: `Can't have key "a" in inlined map; conflicts with struct field`, +}} + +func (s *S) TestMarshalErrors(c *C) { + for _, item := range marshalErrorTests { + if item.panic != "" { + c.Assert(func() { yaml.Marshal(item.value) }, PanicMatches, item.panic) + } else { + _, err := yaml.Marshal(item.value) + c.Assert(err, ErrorMatches, item.error) + } + } +} + +func (s *S) TestMarshalTypeCache(c *C) { + var data []byte + var err error + func() { + type T struct{ A int } + data, err = yaml.Marshal(&T{}) + c.Assert(err, IsNil) + }() + func() { + type T struct{ B int } + data, err = yaml.Marshal(&T{}) + c.Assert(err, IsNil) + }() + c.Assert(string(data), Equals, "b: 0\n") +} + +var marshalerTests = []struct { + data string + value interface{} +}{ + {"_:\n hi: there\n", map[interface{}]interface{}{"hi": "there"}}, + {"_:\n- 1\n- A\n", []interface{}{1, "A"}}, + {"_: 10\n", 10}, + {"_: null\n", nil}, + {"_: BAR!\n", "BAR!"}, +} + +type marshalerType struct { + value interface{} +} + +func (o marshalerType) MarshalText() ([]byte, error) { + panic("MarshalText called on type with MarshalYAML") +} + +func (o marshalerType) MarshalYAML() (interface{}, error) { + return o.value, nil +} + +type marshalerValue struct { + Field marshalerType "_" +} + +func (s *S) TestMarshaler(c *C) { + for _, item := range marshalerTests { + obj := &marshalerValue{} + obj.Field.value = item.value + data, err := yaml.Marshal(obj) + c.Assert(err, IsNil) + c.Assert(string(data), Equals, string(item.data)) + } +} + +func (s *S) TestMarshalerWholeDocument(c *C) { + obj := &marshalerType{} + obj.value = map[string]string{"hello": "world!"} + data, err := yaml.Marshal(obj) + c.Assert(err, IsNil) + c.Assert(string(data), Equals, "hello: world!\n") +} + +type failingMarshaler struct{} + +func (ft *failingMarshaler) MarshalYAML() (interface{}, error) { + return nil, failingErr +} + +func (s *S) TestMarshalerError(c *C) { + _, err := yaml.Marshal(&failingMarshaler{}) + c.Assert(err, Equals, failingErr) +} + +func (s *S) TestSortedOutput(c *C) { + order := []interface{}{ + false, + true, + 1, + uint(1), + 1.0, + 1.1, + 1.2, + 2, + uint(2), + 2.0, + 2.1, + "", + ".1", + ".2", + ".a", + "1", + "2", + "a!10", + "a/2", + "a/10", + "a~10", + "ab/1", + "b/1", + "b/01", + "b/2", + "b/02", + "b/3", + "b/03", + "b1", + "b01", + "b3", + "c2.10", + "c10.2", + "d1", + "d12", + "d12a", + } + m := make(map[interface{}]int) + for _, k := range order { + m[k] = 1 + } + data, err := yaml.Marshal(m) + c.Assert(err, IsNil) + out := "\n" + string(data) + last := 0 + for i, k := range order { + repr := fmt.Sprint(k) + if s, ok := k.(string); ok { + if _, err = strconv.ParseFloat(repr, 32); s == "" || err == nil { + repr = `"` + repr + `"` + } + } + index := strings.Index(out, "\n"+repr+":") + if index == -1 { + c.Fatalf("%#v is not in the output: %#v", k, out) + } + if index < last { + c.Fatalf("%#v was generated before %#v: %q", k, order[i-1], out) + } + last = index + } +} diff --git a/vendor/gopkg.in/yaml.v2/parserc.go b/vendor/gopkg.in/yaml.v2/parserc.go new file mode 100644 index 000000000..0a7037ad1 --- /dev/null +++ b/vendor/gopkg.in/yaml.v2/parserc.go @@ -0,0 +1,1096 @@ +package yaml + +import ( + "bytes" +) + +// The parser implements the following grammar: +// +// stream ::= STREAM-START implicit_document? explicit_document* STREAM-END +// implicit_document ::= block_node DOCUMENT-END* +// explicit_document ::= DIRECTIVE* DOCUMENT-START block_node? DOCUMENT-END* +// block_node_or_indentless_sequence ::= +// ALIAS +// | properties (block_content | indentless_block_sequence)? +// | block_content +// | indentless_block_sequence +// block_node ::= ALIAS +// | properties block_content? +// | block_content +// flow_node ::= ALIAS +// | properties flow_content? +// | flow_content +// properties ::= TAG ANCHOR? | ANCHOR TAG? +// block_content ::= block_collection | flow_collection | SCALAR +// flow_content ::= flow_collection | SCALAR +// block_collection ::= block_sequence | block_mapping +// flow_collection ::= flow_sequence | flow_mapping +// block_sequence ::= BLOCK-SEQUENCE-START (BLOCK-ENTRY block_node?)* BLOCK-END +// indentless_sequence ::= (BLOCK-ENTRY block_node?)+ +// block_mapping ::= BLOCK-MAPPING_START +// ((KEY block_node_or_indentless_sequence?)? +// (VALUE block_node_or_indentless_sequence?)?)* +// BLOCK-END +// flow_sequence ::= FLOW-SEQUENCE-START +// (flow_sequence_entry FLOW-ENTRY)* +// flow_sequence_entry? +// FLOW-SEQUENCE-END +// flow_sequence_entry ::= flow_node | KEY flow_node? (VALUE flow_node?)? +// flow_mapping ::= FLOW-MAPPING-START +// (flow_mapping_entry FLOW-ENTRY)* +// flow_mapping_entry? +// FLOW-MAPPING-END +// flow_mapping_entry ::= flow_node | KEY flow_node? (VALUE flow_node?)? + +// Peek the next token in the token queue. +func peek_token(parser *yaml_parser_t) *yaml_token_t { + if parser.token_available || yaml_parser_fetch_more_tokens(parser) { + return &parser.tokens[parser.tokens_head] + } + return nil +} + +// Remove the next token from the queue (must be called after peek_token). +func skip_token(parser *yaml_parser_t) { + parser.token_available = false + parser.tokens_parsed++ + parser.stream_end_produced = parser.tokens[parser.tokens_head].typ == yaml_STREAM_END_TOKEN + parser.tokens_head++ +} + +// Get the next event. +func yaml_parser_parse(parser *yaml_parser_t, event *yaml_event_t) bool { + // Erase the event object. + *event = yaml_event_t{} + + // No events after the end of the stream or error. + if parser.stream_end_produced || parser.error != yaml_NO_ERROR || parser.state == yaml_PARSE_END_STATE { + return true + } + + // Generate the next event. + return yaml_parser_state_machine(parser, event) +} + +// Set parser error. +func yaml_parser_set_parser_error(parser *yaml_parser_t, problem string, problem_mark yaml_mark_t) bool { + parser.error = yaml_PARSER_ERROR + parser.problem = problem + parser.problem_mark = problem_mark + return false +} + +func yaml_parser_set_parser_error_context(parser *yaml_parser_t, context string, context_mark yaml_mark_t, problem string, problem_mark yaml_mark_t) bool { + parser.error = yaml_PARSER_ERROR + parser.context = context + parser.context_mark = context_mark + parser.problem = problem + parser.problem_mark = problem_mark + return false +} + +// State dispatcher. +func yaml_parser_state_machine(parser *yaml_parser_t, event *yaml_event_t) bool { + //trace("yaml_parser_state_machine", "state:", parser.state.String()) + + switch parser.state { + case yaml_PARSE_STREAM_START_STATE: + return yaml_parser_parse_stream_start(parser, event) + + case yaml_PARSE_IMPLICIT_DOCUMENT_START_STATE: + return yaml_parser_parse_document_start(parser, event, true) + + case yaml_PARSE_DOCUMENT_START_STATE: + return yaml_parser_parse_document_start(parser, event, false) + + case yaml_PARSE_DOCUMENT_CONTENT_STATE: + return yaml_parser_parse_document_content(parser, event) + + case yaml_PARSE_DOCUMENT_END_STATE: + return yaml_parser_parse_document_end(parser, event) + + case yaml_PARSE_BLOCK_NODE_STATE: + return yaml_parser_parse_node(parser, event, true, false) + + case yaml_PARSE_BLOCK_NODE_OR_INDENTLESS_SEQUENCE_STATE: + return yaml_parser_parse_node(parser, event, true, true) + + case yaml_PARSE_FLOW_NODE_STATE: + return yaml_parser_parse_node(parser, event, false, false) + + case yaml_PARSE_BLOCK_SEQUENCE_FIRST_ENTRY_STATE: + return yaml_parser_parse_block_sequence_entry(parser, event, true) + + case yaml_PARSE_BLOCK_SEQUENCE_ENTRY_STATE: + return yaml_parser_parse_block_sequence_entry(parser, event, false) + + case yaml_PARSE_INDENTLESS_SEQUENCE_ENTRY_STATE: + return yaml_parser_parse_indentless_sequence_entry(parser, event) + + case yaml_PARSE_BLOCK_MAPPING_FIRST_KEY_STATE: + return yaml_parser_parse_block_mapping_key(parser, event, true) + + case yaml_PARSE_BLOCK_MAPPING_KEY_STATE: + return yaml_parser_parse_block_mapping_key(parser, event, false) + + case yaml_PARSE_BLOCK_MAPPING_VALUE_STATE: + return yaml_parser_parse_block_mapping_value(parser, event) + + case yaml_PARSE_FLOW_SEQUENCE_FIRST_ENTRY_STATE: + return yaml_parser_parse_flow_sequence_entry(parser, event, true) + + case yaml_PARSE_FLOW_SEQUENCE_ENTRY_STATE: + return yaml_parser_parse_flow_sequence_entry(parser, event, false) + + case yaml_PARSE_FLOW_SEQUENCE_ENTRY_MAPPING_KEY_STATE: + return yaml_parser_parse_flow_sequence_entry_mapping_key(parser, event) + + case yaml_PARSE_FLOW_SEQUENCE_ENTRY_MAPPING_VALUE_STATE: + return yaml_parser_parse_flow_sequence_entry_mapping_value(parser, event) + + case yaml_PARSE_FLOW_SEQUENCE_ENTRY_MAPPING_END_STATE: + return yaml_parser_parse_flow_sequence_entry_mapping_end(parser, event) + + case yaml_PARSE_FLOW_MAPPING_FIRST_KEY_STATE: + return yaml_parser_parse_flow_mapping_key(parser, event, true) + + case yaml_PARSE_FLOW_MAPPING_KEY_STATE: + return yaml_parser_parse_flow_mapping_key(parser, event, false) + + case yaml_PARSE_FLOW_MAPPING_VALUE_STATE: + return yaml_parser_parse_flow_mapping_value(parser, event, false) + + case yaml_PARSE_FLOW_MAPPING_EMPTY_VALUE_STATE: + return yaml_parser_parse_flow_mapping_value(parser, event, true) + + default: + panic("invalid parser state") + } + return false +} + +// Parse the production: +// stream ::= STREAM-START implicit_document? explicit_document* STREAM-END +// ************ +func yaml_parser_parse_stream_start(parser *yaml_parser_t, event *yaml_event_t) bool { + token := peek_token(parser) + if token == nil { + return false + } + if token.typ != yaml_STREAM_START_TOKEN { + return yaml_parser_set_parser_error(parser, "did not find expected ", token.start_mark) + } + parser.state = yaml_PARSE_IMPLICIT_DOCUMENT_START_STATE + *event = yaml_event_t{ + typ: yaml_STREAM_START_EVENT, + start_mark: token.start_mark, + end_mark: token.end_mark, + encoding: token.encoding, + } + skip_token(parser) + return true +} + +// Parse the productions: +// implicit_document ::= block_node DOCUMENT-END* +// * +// explicit_document ::= DIRECTIVE* DOCUMENT-START block_node? DOCUMENT-END* +// ************************* +func yaml_parser_parse_document_start(parser *yaml_parser_t, event *yaml_event_t, implicit bool) bool { + + token := peek_token(parser) + if token == nil { + return false + } + + // Parse extra document end indicators. + if !implicit { + for token.typ == yaml_DOCUMENT_END_TOKEN { + skip_token(parser) + token = peek_token(parser) + if token == nil { + return false + } + } + } + + if implicit && token.typ != yaml_VERSION_DIRECTIVE_TOKEN && + token.typ != yaml_TAG_DIRECTIVE_TOKEN && + token.typ != yaml_DOCUMENT_START_TOKEN && + token.typ != yaml_STREAM_END_TOKEN { + // Parse an implicit document. + if !yaml_parser_process_directives(parser, nil, nil) { + return false + } + parser.states = append(parser.states, yaml_PARSE_DOCUMENT_END_STATE) + parser.state = yaml_PARSE_BLOCK_NODE_STATE + + *event = yaml_event_t{ + typ: yaml_DOCUMENT_START_EVENT, + start_mark: token.start_mark, + end_mark: token.end_mark, + } + + } else if token.typ != yaml_STREAM_END_TOKEN { + // Parse an explicit document. + var version_directive *yaml_version_directive_t + var tag_directives []yaml_tag_directive_t + start_mark := token.start_mark + if !yaml_parser_process_directives(parser, &version_directive, &tag_directives) { + return false + } + token = peek_token(parser) + if token == nil { + return false + } + if token.typ != yaml_DOCUMENT_START_TOKEN { + yaml_parser_set_parser_error(parser, + "did not find expected ", token.start_mark) + return false + } + parser.states = append(parser.states, yaml_PARSE_DOCUMENT_END_STATE) + parser.state = yaml_PARSE_DOCUMENT_CONTENT_STATE + end_mark := token.end_mark + + *event = yaml_event_t{ + typ: yaml_DOCUMENT_START_EVENT, + start_mark: start_mark, + end_mark: end_mark, + version_directive: version_directive, + tag_directives: tag_directives, + implicit: false, + } + skip_token(parser) + + } else { + // Parse the stream end. + parser.state = yaml_PARSE_END_STATE + *event = yaml_event_t{ + typ: yaml_STREAM_END_EVENT, + start_mark: token.start_mark, + end_mark: token.end_mark, + } + skip_token(parser) + } + + return true +} + +// Parse the productions: +// explicit_document ::= DIRECTIVE* DOCUMENT-START block_node? DOCUMENT-END* +// *********** +// +func yaml_parser_parse_document_content(parser *yaml_parser_t, event *yaml_event_t) bool { + token := peek_token(parser) + if token == nil { + return false + } + if token.typ == yaml_VERSION_DIRECTIVE_TOKEN || + token.typ == yaml_TAG_DIRECTIVE_TOKEN || + token.typ == yaml_DOCUMENT_START_TOKEN || + token.typ == yaml_DOCUMENT_END_TOKEN || + token.typ == yaml_STREAM_END_TOKEN { + parser.state = parser.states[len(parser.states)-1] + parser.states = parser.states[:len(parser.states)-1] + return yaml_parser_process_empty_scalar(parser, event, + token.start_mark) + } + return yaml_parser_parse_node(parser, event, true, false) +} + +// Parse the productions: +// implicit_document ::= block_node DOCUMENT-END* +// ************* +// explicit_document ::= DIRECTIVE* DOCUMENT-START block_node? DOCUMENT-END* +// +func yaml_parser_parse_document_end(parser *yaml_parser_t, event *yaml_event_t) bool { + token := peek_token(parser) + if token == nil { + return false + } + + start_mark := token.start_mark + end_mark := token.start_mark + + implicit := true + if token.typ == yaml_DOCUMENT_END_TOKEN { + end_mark = token.end_mark + skip_token(parser) + implicit = false + } + + parser.tag_directives = parser.tag_directives[:0] + + parser.state = yaml_PARSE_DOCUMENT_START_STATE + *event = yaml_event_t{ + typ: yaml_DOCUMENT_END_EVENT, + start_mark: start_mark, + end_mark: end_mark, + implicit: implicit, + } + return true +} + +// Parse the productions: +// block_node_or_indentless_sequence ::= +// ALIAS +// ***** +// | properties (block_content | indentless_block_sequence)? +// ********** * +// | block_content | indentless_block_sequence +// * +// block_node ::= ALIAS +// ***** +// | properties block_content? +// ********** * +// | block_content +// * +// flow_node ::= ALIAS +// ***** +// | properties flow_content? +// ********** * +// | flow_content +// * +// properties ::= TAG ANCHOR? | ANCHOR TAG? +// ************************* +// block_content ::= block_collection | flow_collection | SCALAR +// ****** +// flow_content ::= flow_collection | SCALAR +// ****** +func yaml_parser_parse_node(parser *yaml_parser_t, event *yaml_event_t, block, indentless_sequence bool) bool { + //defer trace("yaml_parser_parse_node", "block:", block, "indentless_sequence:", indentless_sequence)() + + token := peek_token(parser) + if token == nil { + return false + } + + if token.typ == yaml_ALIAS_TOKEN { + parser.state = parser.states[len(parser.states)-1] + parser.states = parser.states[:len(parser.states)-1] + *event = yaml_event_t{ + typ: yaml_ALIAS_EVENT, + start_mark: token.start_mark, + end_mark: token.end_mark, + anchor: token.value, + } + skip_token(parser) + return true + } + + start_mark := token.start_mark + end_mark := token.start_mark + + var tag_token bool + var tag_handle, tag_suffix, anchor []byte + var tag_mark yaml_mark_t + if token.typ == yaml_ANCHOR_TOKEN { + anchor = token.value + start_mark = token.start_mark + end_mark = token.end_mark + skip_token(parser) + token = peek_token(parser) + if token == nil { + return false + } + if token.typ == yaml_TAG_TOKEN { + tag_token = true + tag_handle = token.value + tag_suffix = token.suffix + tag_mark = token.start_mark + end_mark = token.end_mark + skip_token(parser) + token = peek_token(parser) + if token == nil { + return false + } + } + } else if token.typ == yaml_TAG_TOKEN { + tag_token = true + tag_handle = token.value + tag_suffix = token.suffix + start_mark = token.start_mark + tag_mark = token.start_mark + end_mark = token.end_mark + skip_token(parser) + token = peek_token(parser) + if token == nil { + return false + } + if token.typ == yaml_ANCHOR_TOKEN { + anchor = token.value + end_mark = token.end_mark + skip_token(parser) + token = peek_token(parser) + if token == nil { + return false + } + } + } + + var tag []byte + if tag_token { + if len(tag_handle) == 0 { + tag = tag_suffix + tag_suffix = nil + } else { + for i := range parser.tag_directives { + if bytes.Equal(parser.tag_directives[i].handle, tag_handle) { + tag = append([]byte(nil), parser.tag_directives[i].prefix...) + tag = append(tag, tag_suffix...) + break + } + } + if len(tag) == 0 { + yaml_parser_set_parser_error_context(parser, + "while parsing a node", start_mark, + "found undefined tag handle", tag_mark) + return false + } + } + } + + implicit := len(tag) == 0 + if indentless_sequence && token.typ == yaml_BLOCK_ENTRY_TOKEN { + end_mark = token.end_mark + parser.state = yaml_PARSE_INDENTLESS_SEQUENCE_ENTRY_STATE + *event = yaml_event_t{ + typ: yaml_SEQUENCE_START_EVENT, + start_mark: start_mark, + end_mark: end_mark, + anchor: anchor, + tag: tag, + implicit: implicit, + style: yaml_style_t(yaml_BLOCK_SEQUENCE_STYLE), + } + return true + } + if token.typ == yaml_SCALAR_TOKEN { + var plain_implicit, quoted_implicit bool + end_mark = token.end_mark + if (len(tag) == 0 && token.style == yaml_PLAIN_SCALAR_STYLE) || (len(tag) == 1 && tag[0] == '!') { + plain_implicit = true + } else if len(tag) == 0 { + quoted_implicit = true + } + parser.state = parser.states[len(parser.states)-1] + parser.states = parser.states[:len(parser.states)-1] + + *event = yaml_event_t{ + typ: yaml_SCALAR_EVENT, + start_mark: start_mark, + end_mark: end_mark, + anchor: anchor, + tag: tag, + value: token.value, + implicit: plain_implicit, + quoted_implicit: quoted_implicit, + style: yaml_style_t(token.style), + } + skip_token(parser) + return true + } + if token.typ == yaml_FLOW_SEQUENCE_START_TOKEN { + // [Go] Some of the events below can be merged as they differ only on style. + end_mark = token.end_mark + parser.state = yaml_PARSE_FLOW_SEQUENCE_FIRST_ENTRY_STATE + *event = yaml_event_t{ + typ: yaml_SEQUENCE_START_EVENT, + start_mark: start_mark, + end_mark: end_mark, + anchor: anchor, + tag: tag, + implicit: implicit, + style: yaml_style_t(yaml_FLOW_SEQUENCE_STYLE), + } + return true + } + if token.typ == yaml_FLOW_MAPPING_START_TOKEN { + end_mark = token.end_mark + parser.state = yaml_PARSE_FLOW_MAPPING_FIRST_KEY_STATE + *event = yaml_event_t{ + typ: yaml_MAPPING_START_EVENT, + start_mark: start_mark, + end_mark: end_mark, + anchor: anchor, + tag: tag, + implicit: implicit, + style: yaml_style_t(yaml_FLOW_MAPPING_STYLE), + } + return true + } + if block && token.typ == yaml_BLOCK_SEQUENCE_START_TOKEN { + end_mark = token.end_mark + parser.state = yaml_PARSE_BLOCK_SEQUENCE_FIRST_ENTRY_STATE + *event = yaml_event_t{ + typ: yaml_SEQUENCE_START_EVENT, + start_mark: start_mark, + end_mark: end_mark, + anchor: anchor, + tag: tag, + implicit: implicit, + style: yaml_style_t(yaml_BLOCK_SEQUENCE_STYLE), + } + return true + } + if block && token.typ == yaml_BLOCK_MAPPING_START_TOKEN { + end_mark = token.end_mark + parser.state = yaml_PARSE_BLOCK_MAPPING_FIRST_KEY_STATE + *event = yaml_event_t{ + typ: yaml_MAPPING_START_EVENT, + start_mark: start_mark, + end_mark: end_mark, + anchor: anchor, + tag: tag, + implicit: implicit, + style: yaml_style_t(yaml_BLOCK_MAPPING_STYLE), + } + return true + } + if len(anchor) > 0 || len(tag) > 0 { + parser.state = parser.states[len(parser.states)-1] + parser.states = parser.states[:len(parser.states)-1] + + *event = yaml_event_t{ + typ: yaml_SCALAR_EVENT, + start_mark: start_mark, + end_mark: end_mark, + anchor: anchor, + tag: tag, + implicit: implicit, + quoted_implicit: false, + style: yaml_style_t(yaml_PLAIN_SCALAR_STYLE), + } + return true + } + + context := "while parsing a flow node" + if block { + context = "while parsing a block node" + } + yaml_parser_set_parser_error_context(parser, context, start_mark, + "did not find expected node content", token.start_mark) + return false +} + +// Parse the productions: +// block_sequence ::= BLOCK-SEQUENCE-START (BLOCK-ENTRY block_node?)* BLOCK-END +// ******************** *********** * ********* +// +func yaml_parser_parse_block_sequence_entry(parser *yaml_parser_t, event *yaml_event_t, first bool) bool { + if first { + token := peek_token(parser) + parser.marks = append(parser.marks, token.start_mark) + skip_token(parser) + } + + token := peek_token(parser) + if token == nil { + return false + } + + if token.typ == yaml_BLOCK_ENTRY_TOKEN { + mark := token.end_mark + skip_token(parser) + token = peek_token(parser) + if token == nil { + return false + } + if token.typ != yaml_BLOCK_ENTRY_TOKEN && token.typ != yaml_BLOCK_END_TOKEN { + parser.states = append(parser.states, yaml_PARSE_BLOCK_SEQUENCE_ENTRY_STATE) + return yaml_parser_parse_node(parser, event, true, false) + } else { + parser.state = yaml_PARSE_BLOCK_SEQUENCE_ENTRY_STATE + return yaml_parser_process_empty_scalar(parser, event, mark) + } + } + if token.typ == yaml_BLOCK_END_TOKEN { + parser.state = parser.states[len(parser.states)-1] + parser.states = parser.states[:len(parser.states)-1] + parser.marks = parser.marks[:len(parser.marks)-1] + + *event = yaml_event_t{ + typ: yaml_SEQUENCE_END_EVENT, + start_mark: token.start_mark, + end_mark: token.end_mark, + } + + skip_token(parser) + return true + } + + context_mark := parser.marks[len(parser.marks)-1] + parser.marks = parser.marks[:len(parser.marks)-1] + return yaml_parser_set_parser_error_context(parser, + "while parsing a block collection", context_mark, + "did not find expected '-' indicator", token.start_mark) +} + +// Parse the productions: +// indentless_sequence ::= (BLOCK-ENTRY block_node?)+ +// *********** * +func yaml_parser_parse_indentless_sequence_entry(parser *yaml_parser_t, event *yaml_event_t) bool { + token := peek_token(parser) + if token == nil { + return false + } + + if token.typ == yaml_BLOCK_ENTRY_TOKEN { + mark := token.end_mark + skip_token(parser) + token = peek_token(parser) + if token == nil { + return false + } + if token.typ != yaml_BLOCK_ENTRY_TOKEN && + token.typ != yaml_KEY_TOKEN && + token.typ != yaml_VALUE_TOKEN && + token.typ != yaml_BLOCK_END_TOKEN { + parser.states = append(parser.states, yaml_PARSE_INDENTLESS_SEQUENCE_ENTRY_STATE) + return yaml_parser_parse_node(parser, event, true, false) + } + parser.state = yaml_PARSE_INDENTLESS_SEQUENCE_ENTRY_STATE + return yaml_parser_process_empty_scalar(parser, event, mark) + } + parser.state = parser.states[len(parser.states)-1] + parser.states = parser.states[:len(parser.states)-1] + + *event = yaml_event_t{ + typ: yaml_SEQUENCE_END_EVENT, + start_mark: token.start_mark, + end_mark: token.start_mark, // [Go] Shouldn't this be token.end_mark? + } + return true +} + +// Parse the productions: +// block_mapping ::= BLOCK-MAPPING_START +// ******************* +// ((KEY block_node_or_indentless_sequence?)? +// *** * +// (VALUE block_node_or_indentless_sequence?)?)* +// +// BLOCK-END +// ********* +// +func yaml_parser_parse_block_mapping_key(parser *yaml_parser_t, event *yaml_event_t, first bool) bool { + if first { + token := peek_token(parser) + parser.marks = append(parser.marks, token.start_mark) + skip_token(parser) + } + + token := peek_token(parser) + if token == nil { + return false + } + + if token.typ == yaml_KEY_TOKEN { + mark := token.end_mark + skip_token(parser) + token = peek_token(parser) + if token == nil { + return false + } + if token.typ != yaml_KEY_TOKEN && + token.typ != yaml_VALUE_TOKEN && + token.typ != yaml_BLOCK_END_TOKEN { + parser.states = append(parser.states, yaml_PARSE_BLOCK_MAPPING_VALUE_STATE) + return yaml_parser_parse_node(parser, event, true, true) + } else { + parser.state = yaml_PARSE_BLOCK_MAPPING_VALUE_STATE + return yaml_parser_process_empty_scalar(parser, event, mark) + } + } else if token.typ == yaml_BLOCK_END_TOKEN { + parser.state = parser.states[len(parser.states)-1] + parser.states = parser.states[:len(parser.states)-1] + parser.marks = parser.marks[:len(parser.marks)-1] + *event = yaml_event_t{ + typ: yaml_MAPPING_END_EVENT, + start_mark: token.start_mark, + end_mark: token.end_mark, + } + skip_token(parser) + return true + } + + context_mark := parser.marks[len(parser.marks)-1] + parser.marks = parser.marks[:len(parser.marks)-1] + return yaml_parser_set_parser_error_context(parser, + "while parsing a block mapping", context_mark, + "did not find expected key", token.start_mark) +} + +// Parse the productions: +// block_mapping ::= BLOCK-MAPPING_START +// +// ((KEY block_node_or_indentless_sequence?)? +// +// (VALUE block_node_or_indentless_sequence?)?)* +// ***** * +// BLOCK-END +// +// +func yaml_parser_parse_block_mapping_value(parser *yaml_parser_t, event *yaml_event_t) bool { + token := peek_token(parser) + if token == nil { + return false + } + if token.typ == yaml_VALUE_TOKEN { + mark := token.end_mark + skip_token(parser) + token = peek_token(parser) + if token == nil { + return false + } + if token.typ != yaml_KEY_TOKEN && + token.typ != yaml_VALUE_TOKEN && + token.typ != yaml_BLOCK_END_TOKEN { + parser.states = append(parser.states, yaml_PARSE_BLOCK_MAPPING_KEY_STATE) + return yaml_parser_parse_node(parser, event, true, true) + } + parser.state = yaml_PARSE_BLOCK_MAPPING_KEY_STATE + return yaml_parser_process_empty_scalar(parser, event, mark) + } + parser.state = yaml_PARSE_BLOCK_MAPPING_KEY_STATE + return yaml_parser_process_empty_scalar(parser, event, token.start_mark) +} + +// Parse the productions: +// flow_sequence ::= FLOW-SEQUENCE-START +// ******************* +// (flow_sequence_entry FLOW-ENTRY)* +// * ********** +// flow_sequence_entry? +// * +// FLOW-SEQUENCE-END +// ***************** +// flow_sequence_entry ::= flow_node | KEY flow_node? (VALUE flow_node?)? +// * +// +func yaml_parser_parse_flow_sequence_entry(parser *yaml_parser_t, event *yaml_event_t, first bool) bool { + if first { + token := peek_token(parser) + parser.marks = append(parser.marks, token.start_mark) + skip_token(parser) + } + token := peek_token(parser) + if token == nil { + return false + } + if token.typ != yaml_FLOW_SEQUENCE_END_TOKEN { + if !first { + if token.typ == yaml_FLOW_ENTRY_TOKEN { + skip_token(parser) + token = peek_token(parser) + if token == nil { + return false + } + } else { + context_mark := parser.marks[len(parser.marks)-1] + parser.marks = parser.marks[:len(parser.marks)-1] + return yaml_parser_set_parser_error_context(parser, + "while parsing a flow sequence", context_mark, + "did not find expected ',' or ']'", token.start_mark) + } + } + + if token.typ == yaml_KEY_TOKEN { + parser.state = yaml_PARSE_FLOW_SEQUENCE_ENTRY_MAPPING_KEY_STATE + *event = yaml_event_t{ + typ: yaml_MAPPING_START_EVENT, + start_mark: token.start_mark, + end_mark: token.end_mark, + implicit: true, + style: yaml_style_t(yaml_FLOW_MAPPING_STYLE), + } + skip_token(parser) + return true + } else if token.typ != yaml_FLOW_SEQUENCE_END_TOKEN { + parser.states = append(parser.states, yaml_PARSE_FLOW_SEQUENCE_ENTRY_STATE) + return yaml_parser_parse_node(parser, event, false, false) + } + } + + parser.state = parser.states[len(parser.states)-1] + parser.states = parser.states[:len(parser.states)-1] + parser.marks = parser.marks[:len(parser.marks)-1] + + *event = yaml_event_t{ + typ: yaml_SEQUENCE_END_EVENT, + start_mark: token.start_mark, + end_mark: token.end_mark, + } + + skip_token(parser) + return true +} + +// +// Parse the productions: +// flow_sequence_entry ::= flow_node | KEY flow_node? (VALUE flow_node?)? +// *** * +// +func yaml_parser_parse_flow_sequence_entry_mapping_key(parser *yaml_parser_t, event *yaml_event_t) bool { + token := peek_token(parser) + if token == nil { + return false + } + if token.typ != yaml_VALUE_TOKEN && + token.typ != yaml_FLOW_ENTRY_TOKEN && + token.typ != yaml_FLOW_SEQUENCE_END_TOKEN { + parser.states = append(parser.states, yaml_PARSE_FLOW_SEQUENCE_ENTRY_MAPPING_VALUE_STATE) + return yaml_parser_parse_node(parser, event, false, false) + } + mark := token.end_mark + skip_token(parser) + parser.state = yaml_PARSE_FLOW_SEQUENCE_ENTRY_MAPPING_VALUE_STATE + return yaml_parser_process_empty_scalar(parser, event, mark) +} + +// Parse the productions: +// flow_sequence_entry ::= flow_node | KEY flow_node? (VALUE flow_node?)? +// ***** * +// +func yaml_parser_parse_flow_sequence_entry_mapping_value(parser *yaml_parser_t, event *yaml_event_t) bool { + token := peek_token(parser) + if token == nil { + return false + } + if token.typ == yaml_VALUE_TOKEN { + skip_token(parser) + token := peek_token(parser) + if token == nil { + return false + } + if token.typ != yaml_FLOW_ENTRY_TOKEN && token.typ != yaml_FLOW_SEQUENCE_END_TOKEN { + parser.states = append(parser.states, yaml_PARSE_FLOW_SEQUENCE_ENTRY_MAPPING_END_STATE) + return yaml_parser_parse_node(parser, event, false, false) + } + } + parser.state = yaml_PARSE_FLOW_SEQUENCE_ENTRY_MAPPING_END_STATE + return yaml_parser_process_empty_scalar(parser, event, token.start_mark) +} + +// Parse the productions: +// flow_sequence_entry ::= flow_node | KEY flow_node? (VALUE flow_node?)? +// * +// +func yaml_parser_parse_flow_sequence_entry_mapping_end(parser *yaml_parser_t, event *yaml_event_t) bool { + token := peek_token(parser) + if token == nil { + return false + } + parser.state = yaml_PARSE_FLOW_SEQUENCE_ENTRY_STATE + *event = yaml_event_t{ + typ: yaml_MAPPING_END_EVENT, + start_mark: token.start_mark, + end_mark: token.start_mark, // [Go] Shouldn't this be end_mark? + } + return true +} + +// Parse the productions: +// flow_mapping ::= FLOW-MAPPING-START +// ****************** +// (flow_mapping_entry FLOW-ENTRY)* +// * ********** +// flow_mapping_entry? +// ****************** +// FLOW-MAPPING-END +// **************** +// flow_mapping_entry ::= flow_node | KEY flow_node? (VALUE flow_node?)? +// * *** * +// +func yaml_parser_parse_flow_mapping_key(parser *yaml_parser_t, event *yaml_event_t, first bool) bool { + if first { + token := peek_token(parser) + parser.marks = append(parser.marks, token.start_mark) + skip_token(parser) + } + + token := peek_token(parser) + if token == nil { + return false + } + + if token.typ != yaml_FLOW_MAPPING_END_TOKEN { + if !first { + if token.typ == yaml_FLOW_ENTRY_TOKEN { + skip_token(parser) + token = peek_token(parser) + if token == nil { + return false + } + } else { + context_mark := parser.marks[len(parser.marks)-1] + parser.marks = parser.marks[:len(parser.marks)-1] + return yaml_parser_set_parser_error_context(parser, + "while parsing a flow mapping", context_mark, + "did not find expected ',' or '}'", token.start_mark) + } + } + + if token.typ == yaml_KEY_TOKEN { + skip_token(parser) + token = peek_token(parser) + if token == nil { + return false + } + if token.typ != yaml_VALUE_TOKEN && + token.typ != yaml_FLOW_ENTRY_TOKEN && + token.typ != yaml_FLOW_MAPPING_END_TOKEN { + parser.states = append(parser.states, yaml_PARSE_FLOW_MAPPING_VALUE_STATE) + return yaml_parser_parse_node(parser, event, false, false) + } else { + parser.state = yaml_PARSE_FLOW_MAPPING_VALUE_STATE + return yaml_parser_process_empty_scalar(parser, event, token.start_mark) + } + } else if token.typ != yaml_FLOW_MAPPING_END_TOKEN { + parser.states = append(parser.states, yaml_PARSE_FLOW_MAPPING_EMPTY_VALUE_STATE) + return yaml_parser_parse_node(parser, event, false, false) + } + } + + parser.state = parser.states[len(parser.states)-1] + parser.states = parser.states[:len(parser.states)-1] + parser.marks = parser.marks[:len(parser.marks)-1] + *event = yaml_event_t{ + typ: yaml_MAPPING_END_EVENT, + start_mark: token.start_mark, + end_mark: token.end_mark, + } + skip_token(parser) + return true +} + +// Parse the productions: +// flow_mapping_entry ::= flow_node | KEY flow_node? (VALUE flow_node?)? +// * ***** * +// +func yaml_parser_parse_flow_mapping_value(parser *yaml_parser_t, event *yaml_event_t, empty bool) bool { + token := peek_token(parser) + if token == nil { + return false + } + if empty { + parser.state = yaml_PARSE_FLOW_MAPPING_KEY_STATE + return yaml_parser_process_empty_scalar(parser, event, token.start_mark) + } + if token.typ == yaml_VALUE_TOKEN { + skip_token(parser) + token = peek_token(parser) + if token == nil { + return false + } + if token.typ != yaml_FLOW_ENTRY_TOKEN && token.typ != yaml_FLOW_MAPPING_END_TOKEN { + parser.states = append(parser.states, yaml_PARSE_FLOW_MAPPING_KEY_STATE) + return yaml_parser_parse_node(parser, event, false, false) + } + } + parser.state = yaml_PARSE_FLOW_MAPPING_KEY_STATE + return yaml_parser_process_empty_scalar(parser, event, token.start_mark) +} + +// Generate an empty scalar event. +func yaml_parser_process_empty_scalar(parser *yaml_parser_t, event *yaml_event_t, mark yaml_mark_t) bool { + *event = yaml_event_t{ + typ: yaml_SCALAR_EVENT, + start_mark: mark, + end_mark: mark, + value: nil, // Empty + implicit: true, + style: yaml_style_t(yaml_PLAIN_SCALAR_STYLE), + } + return true +} + +var default_tag_directives = []yaml_tag_directive_t{ + {[]byte("!"), []byte("!")}, + {[]byte("!!"), []byte("tag:yaml.org,2002:")}, +} + +// Parse directives. +func yaml_parser_process_directives(parser *yaml_parser_t, + version_directive_ref **yaml_version_directive_t, + tag_directives_ref *[]yaml_tag_directive_t) bool { + + var version_directive *yaml_version_directive_t + var tag_directives []yaml_tag_directive_t + + token := peek_token(parser) + if token == nil { + return false + } + + for token.typ == yaml_VERSION_DIRECTIVE_TOKEN || token.typ == yaml_TAG_DIRECTIVE_TOKEN { + if token.typ == yaml_VERSION_DIRECTIVE_TOKEN { + if version_directive != nil { + yaml_parser_set_parser_error(parser, + "found duplicate %YAML directive", token.start_mark) + return false + } + if token.major != 1 || token.minor != 1 { + yaml_parser_set_parser_error(parser, + "found incompatible YAML document", token.start_mark) + return false + } + version_directive = &yaml_version_directive_t{ + major: token.major, + minor: token.minor, + } + } else if token.typ == yaml_TAG_DIRECTIVE_TOKEN { + value := yaml_tag_directive_t{ + handle: token.value, + prefix: token.prefix, + } + if !yaml_parser_append_tag_directive(parser, value, false, token.start_mark) { + return false + } + tag_directives = append(tag_directives, value) + } + + skip_token(parser) + token = peek_token(parser) + if token == nil { + return false + } + } + + for i := range default_tag_directives { + if !yaml_parser_append_tag_directive(parser, default_tag_directives[i], true, token.start_mark) { + return false + } + } + + if version_directive_ref != nil { + *version_directive_ref = version_directive + } + if tag_directives_ref != nil { + *tag_directives_ref = tag_directives + } + return true +} + +// Append a tag directive to the directives stack. +func yaml_parser_append_tag_directive(parser *yaml_parser_t, value yaml_tag_directive_t, allow_duplicates bool, mark yaml_mark_t) bool { + for i := range parser.tag_directives { + if bytes.Equal(value.handle, parser.tag_directives[i].handle) { + if allow_duplicates { + return true + } + return yaml_parser_set_parser_error(parser, "found duplicate %TAG directive", mark) + } + } + + // [Go] I suspect the copy is unnecessary. This was likely done + // because there was no way to track ownership of the data. + value_copy := yaml_tag_directive_t{ + handle: make([]byte, len(value.handle)), + prefix: make([]byte, len(value.prefix)), + } + copy(value_copy.handle, value.handle) + copy(value_copy.prefix, value.prefix) + parser.tag_directives = append(parser.tag_directives, value_copy) + return true +} diff --git a/vendor/gopkg.in/yaml.v2/readerc.go b/vendor/gopkg.in/yaml.v2/readerc.go new file mode 100644 index 000000000..f45079171 --- /dev/null +++ b/vendor/gopkg.in/yaml.v2/readerc.go @@ -0,0 +1,394 @@ +package yaml + +import ( + "io" +) + +// Set the reader error and return 0. +func yaml_parser_set_reader_error(parser *yaml_parser_t, problem string, offset int, value int) bool { + parser.error = yaml_READER_ERROR + parser.problem = problem + parser.problem_offset = offset + parser.problem_value = value + return false +} + +// Byte order marks. +const ( + bom_UTF8 = "\xef\xbb\xbf" + bom_UTF16LE = "\xff\xfe" + bom_UTF16BE = "\xfe\xff" +) + +// Determine the input stream encoding by checking the BOM symbol. If no BOM is +// found, the UTF-8 encoding is assumed. Return 1 on success, 0 on failure. +func yaml_parser_determine_encoding(parser *yaml_parser_t) bool { + // Ensure that we had enough bytes in the raw buffer. + for !parser.eof && len(parser.raw_buffer)-parser.raw_buffer_pos < 3 { + if !yaml_parser_update_raw_buffer(parser) { + return false + } + } + + // Determine the encoding. + buf := parser.raw_buffer + pos := parser.raw_buffer_pos + avail := len(buf) - pos + if avail >= 2 && buf[pos] == bom_UTF16LE[0] && buf[pos+1] == bom_UTF16LE[1] { + parser.encoding = yaml_UTF16LE_ENCODING + parser.raw_buffer_pos += 2 + parser.offset += 2 + } else if avail >= 2 && buf[pos] == bom_UTF16BE[0] && buf[pos+1] == bom_UTF16BE[1] { + parser.encoding = yaml_UTF16BE_ENCODING + parser.raw_buffer_pos += 2 + parser.offset += 2 + } else if avail >= 3 && buf[pos] == bom_UTF8[0] && buf[pos+1] == bom_UTF8[1] && buf[pos+2] == bom_UTF8[2] { + parser.encoding = yaml_UTF8_ENCODING + parser.raw_buffer_pos += 3 + parser.offset += 3 + } else { + parser.encoding = yaml_UTF8_ENCODING + } + return true +} + +// Update the raw buffer. +func yaml_parser_update_raw_buffer(parser *yaml_parser_t) bool { + size_read := 0 + + // Return if the raw buffer is full. + if parser.raw_buffer_pos == 0 && len(parser.raw_buffer) == cap(parser.raw_buffer) { + return true + } + + // Return on EOF. + if parser.eof { + return true + } + + // Move the remaining bytes in the raw buffer to the beginning. + if parser.raw_buffer_pos > 0 && parser.raw_buffer_pos < len(parser.raw_buffer) { + copy(parser.raw_buffer, parser.raw_buffer[parser.raw_buffer_pos:]) + } + parser.raw_buffer = parser.raw_buffer[:len(parser.raw_buffer)-parser.raw_buffer_pos] + parser.raw_buffer_pos = 0 + + // Call the read handler to fill the buffer. + size_read, err := parser.read_handler(parser, parser.raw_buffer[len(parser.raw_buffer):cap(parser.raw_buffer)]) + parser.raw_buffer = parser.raw_buffer[:len(parser.raw_buffer)+size_read] + if err == io.EOF { + parser.eof = true + } else if err != nil { + return yaml_parser_set_reader_error(parser, "input error: "+err.Error(), parser.offset, -1) + } + return true +} + +// Ensure that the buffer contains at least `length` characters. +// Return true on success, false on failure. +// +// The length is supposed to be significantly less that the buffer size. +func yaml_parser_update_buffer(parser *yaml_parser_t, length int) bool { + if parser.read_handler == nil { + panic("read handler must be set") + } + + // If the EOF flag is set and the raw buffer is empty, do nothing. + if parser.eof && parser.raw_buffer_pos == len(parser.raw_buffer) { + return true + } + + // Return if the buffer contains enough characters. + if parser.unread >= length { + return true + } + + // Determine the input encoding if it is not known yet. + if parser.encoding == yaml_ANY_ENCODING { + if !yaml_parser_determine_encoding(parser) { + return false + } + } + + // Move the unread characters to the beginning of the buffer. + buffer_len := len(parser.buffer) + if parser.buffer_pos > 0 && parser.buffer_pos < buffer_len { + copy(parser.buffer, parser.buffer[parser.buffer_pos:]) + buffer_len -= parser.buffer_pos + parser.buffer_pos = 0 + } else if parser.buffer_pos == buffer_len { + buffer_len = 0 + parser.buffer_pos = 0 + } + + // Open the whole buffer for writing, and cut it before returning. + parser.buffer = parser.buffer[:cap(parser.buffer)] + + // Fill the buffer until it has enough characters. + first := true + for parser.unread < length { + + // Fill the raw buffer if necessary. + if !first || parser.raw_buffer_pos == len(parser.raw_buffer) { + if !yaml_parser_update_raw_buffer(parser) { + parser.buffer = parser.buffer[:buffer_len] + return false + } + } + first = false + + // Decode the raw buffer. + inner: + for parser.raw_buffer_pos != len(parser.raw_buffer) { + var value rune + var width int + + raw_unread := len(parser.raw_buffer) - parser.raw_buffer_pos + + // Decode the next character. + switch parser.encoding { + case yaml_UTF8_ENCODING: + // Decode a UTF-8 character. Check RFC 3629 + // (http://www.ietf.org/rfc/rfc3629.txt) for more details. + // + // The following table (taken from the RFC) is used for + // decoding. + // + // Char. number range | UTF-8 octet sequence + // (hexadecimal) | (binary) + // --------------------+------------------------------------ + // 0000 0000-0000 007F | 0xxxxxxx + // 0000 0080-0000 07FF | 110xxxxx 10xxxxxx + // 0000 0800-0000 FFFF | 1110xxxx 10xxxxxx 10xxxxxx + // 0001 0000-0010 FFFF | 11110xxx 10xxxxxx 10xxxxxx 10xxxxxx + // + // Additionally, the characters in the range 0xD800-0xDFFF + // are prohibited as they are reserved for use with UTF-16 + // surrogate pairs. + + // Determine the length of the UTF-8 sequence. + octet := parser.raw_buffer[parser.raw_buffer_pos] + switch { + case octet&0x80 == 0x00: + width = 1 + case octet&0xE0 == 0xC0: + width = 2 + case octet&0xF0 == 0xE0: + width = 3 + case octet&0xF8 == 0xF0: + width = 4 + default: + // The leading octet is invalid. + return yaml_parser_set_reader_error(parser, + "invalid leading UTF-8 octet", + parser.offset, int(octet)) + } + + // Check if the raw buffer contains an incomplete character. + if width > raw_unread { + if parser.eof { + return yaml_parser_set_reader_error(parser, + "incomplete UTF-8 octet sequence", + parser.offset, -1) + } + break inner + } + + // Decode the leading octet. + switch { + case octet&0x80 == 0x00: + value = rune(octet & 0x7F) + case octet&0xE0 == 0xC0: + value = rune(octet & 0x1F) + case octet&0xF0 == 0xE0: + value = rune(octet & 0x0F) + case octet&0xF8 == 0xF0: + value = rune(octet & 0x07) + default: + value = 0 + } + + // Check and decode the trailing octets. + for k := 1; k < width; k++ { + octet = parser.raw_buffer[parser.raw_buffer_pos+k] + + // Check if the octet is valid. + if (octet & 0xC0) != 0x80 { + return yaml_parser_set_reader_error(parser, + "invalid trailing UTF-8 octet", + parser.offset+k, int(octet)) + } + + // Decode the octet. + value = (value << 6) + rune(octet&0x3F) + } + + // Check the length of the sequence against the value. + switch { + case width == 1: + case width == 2 && value >= 0x80: + case width == 3 && value >= 0x800: + case width == 4 && value >= 0x10000: + default: + return yaml_parser_set_reader_error(parser, + "invalid length of a UTF-8 sequence", + parser.offset, -1) + } + + // Check the range of the value. + if value >= 0xD800 && value <= 0xDFFF || value > 0x10FFFF { + return yaml_parser_set_reader_error(parser, + "invalid Unicode character", + parser.offset, int(value)) + } + + case yaml_UTF16LE_ENCODING, yaml_UTF16BE_ENCODING: + var low, high int + if parser.encoding == yaml_UTF16LE_ENCODING { + low, high = 0, 1 + } else { + low, high = 1, 0 + } + + // The UTF-16 encoding is not as simple as one might + // naively think. Check RFC 2781 + // (http://www.ietf.org/rfc/rfc2781.txt). + // + // Normally, two subsequent bytes describe a Unicode + // character. However a special technique (called a + // surrogate pair) is used for specifying character + // values larger than 0xFFFF. + // + // A surrogate pair consists of two pseudo-characters: + // high surrogate area (0xD800-0xDBFF) + // low surrogate area (0xDC00-0xDFFF) + // + // The following formulas are used for decoding + // and encoding characters using surrogate pairs: + // + // U = U' + 0x10000 (0x01 00 00 <= U <= 0x10 FF FF) + // U' = yyyyyyyyyyxxxxxxxxxx (0 <= U' <= 0x0F FF FF) + // W1 = 110110yyyyyyyyyy + // W2 = 110111xxxxxxxxxx + // + // where U is the character value, W1 is the high surrogate + // area, W2 is the low surrogate area. + + // Check for incomplete UTF-16 character. + if raw_unread < 2 { + if parser.eof { + return yaml_parser_set_reader_error(parser, + "incomplete UTF-16 character", + parser.offset, -1) + } + break inner + } + + // Get the character. + value = rune(parser.raw_buffer[parser.raw_buffer_pos+low]) + + (rune(parser.raw_buffer[parser.raw_buffer_pos+high]) << 8) + + // Check for unexpected low surrogate area. + if value&0xFC00 == 0xDC00 { + return yaml_parser_set_reader_error(parser, + "unexpected low surrogate area", + parser.offset, int(value)) + } + + // Check for a high surrogate area. + if value&0xFC00 == 0xD800 { + width = 4 + + // Check for incomplete surrogate pair. + if raw_unread < 4 { + if parser.eof { + return yaml_parser_set_reader_error(parser, + "incomplete UTF-16 surrogate pair", + parser.offset, -1) + } + break inner + } + + // Get the next character. + value2 := rune(parser.raw_buffer[parser.raw_buffer_pos+low+2]) + + (rune(parser.raw_buffer[parser.raw_buffer_pos+high+2]) << 8) + + // Check for a low surrogate area. + if value2&0xFC00 != 0xDC00 { + return yaml_parser_set_reader_error(parser, + "expected low surrogate area", + parser.offset+2, int(value2)) + } + + // Generate the value of the surrogate pair. + value = 0x10000 + ((value & 0x3FF) << 10) + (value2 & 0x3FF) + } else { + width = 2 + } + + default: + panic("impossible") + } + + // Check if the character is in the allowed range: + // #x9 | #xA | #xD | [#x20-#x7E] (8 bit) + // | #x85 | [#xA0-#xD7FF] | [#xE000-#xFFFD] (16 bit) + // | [#x10000-#x10FFFF] (32 bit) + switch { + case value == 0x09: + case value == 0x0A: + case value == 0x0D: + case value >= 0x20 && value <= 0x7E: + case value == 0x85: + case value >= 0xA0 && value <= 0xD7FF: + case value >= 0xE000 && value <= 0xFFFD: + case value >= 0x10000 && value <= 0x10FFFF: + default: + return yaml_parser_set_reader_error(parser, + "control characters are not allowed", + parser.offset, int(value)) + } + + // Move the raw pointers. + parser.raw_buffer_pos += width + parser.offset += width + + // Finally put the character into the buffer. + if value <= 0x7F { + // 0000 0000-0000 007F . 0xxxxxxx + parser.buffer[buffer_len+0] = byte(value) + buffer_len += 1 + } else if value <= 0x7FF { + // 0000 0080-0000 07FF . 110xxxxx 10xxxxxx + parser.buffer[buffer_len+0] = byte(0xC0 + (value >> 6)) + parser.buffer[buffer_len+1] = byte(0x80 + (value & 0x3F)) + buffer_len += 2 + } else if value <= 0xFFFF { + // 0000 0800-0000 FFFF . 1110xxxx 10xxxxxx 10xxxxxx + parser.buffer[buffer_len+0] = byte(0xE0 + (value >> 12)) + parser.buffer[buffer_len+1] = byte(0x80 + ((value >> 6) & 0x3F)) + parser.buffer[buffer_len+2] = byte(0x80 + (value & 0x3F)) + buffer_len += 3 + } else { + // 0001 0000-0010 FFFF . 11110xxx 10xxxxxx 10xxxxxx 10xxxxxx + parser.buffer[buffer_len+0] = byte(0xF0 + (value >> 18)) + parser.buffer[buffer_len+1] = byte(0x80 + ((value >> 12) & 0x3F)) + parser.buffer[buffer_len+2] = byte(0x80 + ((value >> 6) & 0x3F)) + parser.buffer[buffer_len+3] = byte(0x80 + (value & 0x3F)) + buffer_len += 4 + } + + parser.unread++ + } + + // On EOF, put NUL into the buffer and return. + if parser.eof { + parser.buffer[buffer_len] = 0 + buffer_len++ + parser.unread++ + break + } + } + parser.buffer = parser.buffer[:buffer_len] + return true +} diff --git a/vendor/gopkg.in/yaml.v2/resolve.go b/vendor/gopkg.in/yaml.v2/resolve.go new file mode 100644 index 000000000..93a863274 --- /dev/null +++ b/vendor/gopkg.in/yaml.v2/resolve.go @@ -0,0 +1,203 @@ +package yaml + +import ( + "encoding/base64" + "math" + "strconv" + "strings" + "unicode/utf8" +) + +type resolveMapItem struct { + value interface{} + tag string +} + +var resolveTable = make([]byte, 256) +var resolveMap = make(map[string]resolveMapItem) + +func init() { + t := resolveTable + t[int('+')] = 'S' // Sign + t[int('-')] = 'S' + for _, c := range "0123456789" { + t[int(c)] = 'D' // Digit + } + for _, c := range "yYnNtTfFoO~" { + t[int(c)] = 'M' // In map + } + t[int('.')] = '.' // Float (potentially in map) + + var resolveMapList = []struct { + v interface{} + tag string + l []string + }{ + {true, yaml_BOOL_TAG, []string{"y", "Y", "yes", "Yes", "YES"}}, + {true, yaml_BOOL_TAG, []string{"true", "True", "TRUE"}}, + {true, yaml_BOOL_TAG, []string{"on", "On", "ON"}}, + {false, yaml_BOOL_TAG, []string{"n", "N", "no", "No", "NO"}}, + {false, yaml_BOOL_TAG, []string{"false", "False", "FALSE"}}, + {false, yaml_BOOL_TAG, []string{"off", "Off", "OFF"}}, + {nil, yaml_NULL_TAG, []string{"", "~", "null", "Null", "NULL"}}, + {math.NaN(), yaml_FLOAT_TAG, []string{".nan", ".NaN", ".NAN"}}, + {math.Inf(+1), yaml_FLOAT_TAG, []string{".inf", ".Inf", ".INF"}}, + {math.Inf(+1), yaml_FLOAT_TAG, []string{"+.inf", "+.Inf", "+.INF"}}, + {math.Inf(-1), yaml_FLOAT_TAG, []string{"-.inf", "-.Inf", "-.INF"}}, + {"<<", yaml_MERGE_TAG, []string{"<<"}}, + } + + m := resolveMap + for _, item := range resolveMapList { + for _, s := range item.l { + m[s] = resolveMapItem{item.v, item.tag} + } + } +} + +const longTagPrefix = "tag:yaml.org,2002:" + +func shortTag(tag string) string { + // TODO This can easily be made faster and produce less garbage. + if strings.HasPrefix(tag, longTagPrefix) { + return "!!" + tag[len(longTagPrefix):] + } + return tag +} + +func longTag(tag string) string { + if strings.HasPrefix(tag, "!!") { + return longTagPrefix + tag[2:] + } + return tag +} + +func resolvableTag(tag string) bool { + switch tag { + case "", yaml_STR_TAG, yaml_BOOL_TAG, yaml_INT_TAG, yaml_FLOAT_TAG, yaml_NULL_TAG: + return true + } + return false +} + +func resolve(tag string, in string) (rtag string, out interface{}) { + if !resolvableTag(tag) { + return tag, in + } + + defer func() { + switch tag { + case "", rtag, yaml_STR_TAG, yaml_BINARY_TAG: + return + } + failf("cannot decode %s `%s` as a %s", shortTag(rtag), in, shortTag(tag)) + }() + + // Any data is accepted as a !!str or !!binary. + // Otherwise, the prefix is enough of a hint about what it might be. + hint := byte('N') + if in != "" { + hint = resolveTable[in[0]] + } + if hint != 0 && tag != yaml_STR_TAG && tag != yaml_BINARY_TAG { + // Handle things we can lookup in a map. + if item, ok := resolveMap[in]; ok { + return item.tag, item.value + } + + // Base 60 floats are a bad idea, were dropped in YAML 1.2, and + // are purposefully unsupported here. They're still quoted on + // the way out for compatibility with other parser, though. + + switch hint { + case 'M': + // We've already checked the map above. + + case '.': + // Not in the map, so maybe a normal float. + floatv, err := strconv.ParseFloat(in, 64) + if err == nil { + return yaml_FLOAT_TAG, floatv + } + + case 'D', 'S': + // Int, float, or timestamp. + plain := strings.Replace(in, "_", "", -1) + intv, err := strconv.ParseInt(plain, 0, 64) + if err == nil { + if intv == int64(int(intv)) { + return yaml_INT_TAG, int(intv) + } else { + return yaml_INT_TAG, intv + } + } + uintv, err := strconv.ParseUint(plain, 0, 64) + if err == nil { + return yaml_INT_TAG, uintv + } + floatv, err := strconv.ParseFloat(plain, 64) + if err == nil { + return yaml_FLOAT_TAG, floatv + } + if strings.HasPrefix(plain, "0b") { + intv, err := strconv.ParseInt(plain[2:], 2, 64) + if err == nil { + if intv == int64(int(intv)) { + return yaml_INT_TAG, int(intv) + } else { + return yaml_INT_TAG, intv + } + } + uintv, err := strconv.ParseUint(plain[2:], 2, 64) + if err == nil { + return yaml_INT_TAG, uintv + } + } else if strings.HasPrefix(plain, "-0b") { + intv, err := strconv.ParseInt(plain[3:], 2, 64) + if err == nil { + if intv == int64(int(intv)) { + return yaml_INT_TAG, -int(intv) + } else { + return yaml_INT_TAG, -intv + } + } + } + // XXX Handle timestamps here. + + default: + panic("resolveTable item not yet handled: " + string(rune(hint)) + " (with " + in + ")") + } + } + if tag == yaml_BINARY_TAG { + return yaml_BINARY_TAG, in + } + if utf8.ValidString(in) { + return yaml_STR_TAG, in + } + return yaml_BINARY_TAG, encodeBase64(in) +} + +// encodeBase64 encodes s as base64 that is broken up into multiple lines +// as appropriate for the resulting length. +func encodeBase64(s string) string { + const lineLen = 70 + encLen := base64.StdEncoding.EncodedLen(len(s)) + lines := encLen/lineLen + 1 + buf := make([]byte, encLen*2+lines) + in := buf[0:encLen] + out := buf[encLen:] + base64.StdEncoding.Encode(in, []byte(s)) + k := 0 + for i := 0; i < len(in); i += lineLen { + j := i + lineLen + if j > len(in) { + j = len(in) + } + k += copy(out[k:], in[i:j]) + if lines > 1 { + out[k] = '\n' + k++ + } + } + return string(out[:k]) +} diff --git a/vendor/gopkg.in/yaml.v2/scannerc.go b/vendor/gopkg.in/yaml.v2/scannerc.go new file mode 100644 index 000000000..25808000f --- /dev/null +++ b/vendor/gopkg.in/yaml.v2/scannerc.go @@ -0,0 +1,2710 @@ +package yaml + +import ( + "bytes" + "fmt" +) + +// Introduction +// ************ +// +// The following notes assume that you are familiar with the YAML specification +// (http://yaml.org/spec/cvs/current.html). We mostly follow it, although in +// some cases we are less restrictive that it requires. +// +// The process of transforming a YAML stream into a sequence of events is +// divided on two steps: Scanning and Parsing. +// +// The Scanner transforms the input stream into a sequence of tokens, while the +// parser transform the sequence of tokens produced by the Scanner into a +// sequence of parsing events. +// +// The Scanner is rather clever and complicated. The Parser, on the contrary, +// is a straightforward implementation of a recursive-descendant parser (or, +// LL(1) parser, as it is usually called). +// +// Actually there are two issues of Scanning that might be called "clever", the +// rest is quite straightforward. The issues are "block collection start" and +// "simple keys". Both issues are explained below in details. +// +// Here the Scanning step is explained and implemented. We start with the list +// of all the tokens produced by the Scanner together with short descriptions. +// +// Now, tokens: +// +// STREAM-START(encoding) # The stream start. +// STREAM-END # The stream end. +// VERSION-DIRECTIVE(major,minor) # The '%YAML' directive. +// TAG-DIRECTIVE(handle,prefix) # The '%TAG' directive. +// DOCUMENT-START # '---' +// DOCUMENT-END # '...' +// BLOCK-SEQUENCE-START # Indentation increase denoting a block +// BLOCK-MAPPING-START # sequence or a block mapping. +// BLOCK-END # Indentation decrease. +// FLOW-SEQUENCE-START # '[' +// FLOW-SEQUENCE-END # ']' +// BLOCK-SEQUENCE-START # '{' +// BLOCK-SEQUENCE-END # '}' +// BLOCK-ENTRY # '-' +// FLOW-ENTRY # ',' +// KEY # '?' or nothing (simple keys). +// VALUE # ':' +// ALIAS(anchor) # '*anchor' +// ANCHOR(anchor) # '&anchor' +// TAG(handle,suffix) # '!handle!suffix' +// SCALAR(value,style) # A scalar. +// +// The following two tokens are "virtual" tokens denoting the beginning and the +// end of the stream: +// +// STREAM-START(encoding) +// STREAM-END +// +// We pass the information about the input stream encoding with the +// STREAM-START token. +// +// The next two tokens are responsible for tags: +// +// VERSION-DIRECTIVE(major,minor) +// TAG-DIRECTIVE(handle,prefix) +// +// Example: +// +// %YAML 1.1 +// %TAG ! !foo +// %TAG !yaml! tag:yaml.org,2002: +// --- +// +// The correspoding sequence of tokens: +// +// STREAM-START(utf-8) +// VERSION-DIRECTIVE(1,1) +// TAG-DIRECTIVE("!","!foo") +// TAG-DIRECTIVE("!yaml","tag:yaml.org,2002:") +// DOCUMENT-START +// STREAM-END +// +// Note that the VERSION-DIRECTIVE and TAG-DIRECTIVE tokens occupy a whole +// line. +// +// The document start and end indicators are represented by: +// +// DOCUMENT-START +// DOCUMENT-END +// +// Note that if a YAML stream contains an implicit document (without '---' +// and '...' indicators), no DOCUMENT-START and DOCUMENT-END tokens will be +// produced. +// +// In the following examples, we present whole documents together with the +// produced tokens. +// +// 1. An implicit document: +// +// 'a scalar' +// +// Tokens: +// +// STREAM-START(utf-8) +// SCALAR("a scalar",single-quoted) +// STREAM-END +// +// 2. An explicit document: +// +// --- +// 'a scalar' +// ... +// +// Tokens: +// +// STREAM-START(utf-8) +// DOCUMENT-START +// SCALAR("a scalar",single-quoted) +// DOCUMENT-END +// STREAM-END +// +// 3. Several documents in a stream: +// +// 'a scalar' +// --- +// 'another scalar' +// --- +// 'yet another scalar' +// +// Tokens: +// +// STREAM-START(utf-8) +// SCALAR("a scalar",single-quoted) +// DOCUMENT-START +// SCALAR("another scalar",single-quoted) +// DOCUMENT-START +// SCALAR("yet another scalar",single-quoted) +// STREAM-END +// +// We have already introduced the SCALAR token above. The following tokens are +// used to describe aliases, anchors, tag, and scalars: +// +// ALIAS(anchor) +// ANCHOR(anchor) +// TAG(handle,suffix) +// SCALAR(value,style) +// +// The following series of examples illustrate the usage of these tokens: +// +// 1. A recursive sequence: +// +// &A [ *A ] +// +// Tokens: +// +// STREAM-START(utf-8) +// ANCHOR("A") +// FLOW-SEQUENCE-START +// ALIAS("A") +// FLOW-SEQUENCE-END +// STREAM-END +// +// 2. A tagged scalar: +// +// !!float "3.14" # A good approximation. +// +// Tokens: +// +// STREAM-START(utf-8) +// TAG("!!","float") +// SCALAR("3.14",double-quoted) +// STREAM-END +// +// 3. Various scalar styles: +// +// --- # Implicit empty plain scalars do not produce tokens. +// --- a plain scalar +// --- 'a single-quoted scalar' +// --- "a double-quoted scalar" +// --- |- +// a literal scalar +// --- >- +// a folded +// scalar +// +// Tokens: +// +// STREAM-START(utf-8) +// DOCUMENT-START +// DOCUMENT-START +// SCALAR("a plain scalar",plain) +// DOCUMENT-START +// SCALAR("a single-quoted scalar",single-quoted) +// DOCUMENT-START +// SCALAR("a double-quoted scalar",double-quoted) +// DOCUMENT-START +// SCALAR("a literal scalar",literal) +// DOCUMENT-START +// SCALAR("a folded scalar",folded) +// STREAM-END +// +// Now it's time to review collection-related tokens. We will start with +// flow collections: +// +// FLOW-SEQUENCE-START +// FLOW-SEQUENCE-END +// FLOW-MAPPING-START +// FLOW-MAPPING-END +// FLOW-ENTRY +// KEY +// VALUE +// +// The tokens FLOW-SEQUENCE-START, FLOW-SEQUENCE-END, FLOW-MAPPING-START, and +// FLOW-MAPPING-END represent the indicators '[', ']', '{', and '}' +// correspondingly. FLOW-ENTRY represent the ',' indicator. Finally the +// indicators '?' and ':', which are used for denoting mapping keys and values, +// are represented by the KEY and VALUE tokens. +// +// The following examples show flow collections: +// +// 1. A flow sequence: +// +// [item 1, item 2, item 3] +// +// Tokens: +// +// STREAM-START(utf-8) +// FLOW-SEQUENCE-START +// SCALAR("item 1",plain) +// FLOW-ENTRY +// SCALAR("item 2",plain) +// FLOW-ENTRY +// SCALAR("item 3",plain) +// FLOW-SEQUENCE-END +// STREAM-END +// +// 2. A flow mapping: +// +// { +// a simple key: a value, # Note that the KEY token is produced. +// ? a complex key: another value, +// } +// +// Tokens: +// +// STREAM-START(utf-8) +// FLOW-MAPPING-START +// KEY +// SCALAR("a simple key",plain) +// VALUE +// SCALAR("a value",plain) +// FLOW-ENTRY +// KEY +// SCALAR("a complex key",plain) +// VALUE +// SCALAR("another value",plain) +// FLOW-ENTRY +// FLOW-MAPPING-END +// STREAM-END +// +// A simple key is a key which is not denoted by the '?' indicator. Note that +// the Scanner still produce the KEY token whenever it encounters a simple key. +// +// For scanning block collections, the following tokens are used (note that we +// repeat KEY and VALUE here): +// +// BLOCK-SEQUENCE-START +// BLOCK-MAPPING-START +// BLOCK-END +// BLOCK-ENTRY +// KEY +// VALUE +// +// The tokens BLOCK-SEQUENCE-START and BLOCK-MAPPING-START denote indentation +// increase that precedes a block collection (cf. the INDENT token in Python). +// The token BLOCK-END denote indentation decrease that ends a block collection +// (cf. the DEDENT token in Python). However YAML has some syntax pecularities +// that makes detections of these tokens more complex. +// +// The tokens BLOCK-ENTRY, KEY, and VALUE are used to represent the indicators +// '-', '?', and ':' correspondingly. +// +// The following examples show how the tokens BLOCK-SEQUENCE-START, +// BLOCK-MAPPING-START, and BLOCK-END are emitted by the Scanner: +// +// 1. Block sequences: +// +// - item 1 +// - item 2 +// - +// - item 3.1 +// - item 3.2 +// - +// key 1: value 1 +// key 2: value 2 +// +// Tokens: +// +// STREAM-START(utf-8) +// BLOCK-SEQUENCE-START +// BLOCK-ENTRY +// SCALAR("item 1",plain) +// BLOCK-ENTRY +// SCALAR("item 2",plain) +// BLOCK-ENTRY +// BLOCK-SEQUENCE-START +// BLOCK-ENTRY +// SCALAR("item 3.1",plain) +// BLOCK-ENTRY +// SCALAR("item 3.2",plain) +// BLOCK-END +// BLOCK-ENTRY +// BLOCK-MAPPING-START +// KEY +// SCALAR("key 1",plain) +// VALUE +// SCALAR("value 1",plain) +// KEY +// SCALAR("key 2",plain) +// VALUE +// SCALAR("value 2",plain) +// BLOCK-END +// BLOCK-END +// STREAM-END +// +// 2. Block mappings: +// +// a simple key: a value # The KEY token is produced here. +// ? a complex key +// : another value +// a mapping: +// key 1: value 1 +// key 2: value 2 +// a sequence: +// - item 1 +// - item 2 +// +// Tokens: +// +// STREAM-START(utf-8) +// BLOCK-MAPPING-START +// KEY +// SCALAR("a simple key",plain) +// VALUE +// SCALAR("a value",plain) +// KEY +// SCALAR("a complex key",plain) +// VALUE +// SCALAR("another value",plain) +// KEY +// SCALAR("a mapping",plain) +// BLOCK-MAPPING-START +// KEY +// SCALAR("key 1",plain) +// VALUE +// SCALAR("value 1",plain) +// KEY +// SCALAR("key 2",plain) +// VALUE +// SCALAR("value 2",plain) +// BLOCK-END +// KEY +// SCALAR("a sequence",plain) +// VALUE +// BLOCK-SEQUENCE-START +// BLOCK-ENTRY +// SCALAR("item 1",plain) +// BLOCK-ENTRY +// SCALAR("item 2",plain) +// BLOCK-END +// BLOCK-END +// STREAM-END +// +// YAML does not always require to start a new block collection from a new +// line. If the current line contains only '-', '?', and ':' indicators, a new +// block collection may start at the current line. The following examples +// illustrate this case: +// +// 1. Collections in a sequence: +// +// - - item 1 +// - item 2 +// - key 1: value 1 +// key 2: value 2 +// - ? complex key +// : complex value +// +// Tokens: +// +// STREAM-START(utf-8) +// BLOCK-SEQUENCE-START +// BLOCK-ENTRY +// BLOCK-SEQUENCE-START +// BLOCK-ENTRY +// SCALAR("item 1",plain) +// BLOCK-ENTRY +// SCALAR("item 2",plain) +// BLOCK-END +// BLOCK-ENTRY +// BLOCK-MAPPING-START +// KEY +// SCALAR("key 1",plain) +// VALUE +// SCALAR("value 1",plain) +// KEY +// SCALAR("key 2",plain) +// VALUE +// SCALAR("value 2",plain) +// BLOCK-END +// BLOCK-ENTRY +// BLOCK-MAPPING-START +// KEY +// SCALAR("complex key") +// VALUE +// SCALAR("complex value") +// BLOCK-END +// BLOCK-END +// STREAM-END +// +// 2. Collections in a mapping: +// +// ? a sequence +// : - item 1 +// - item 2 +// ? a mapping +// : key 1: value 1 +// key 2: value 2 +// +// Tokens: +// +// STREAM-START(utf-8) +// BLOCK-MAPPING-START +// KEY +// SCALAR("a sequence",plain) +// VALUE +// BLOCK-SEQUENCE-START +// BLOCK-ENTRY +// SCALAR("item 1",plain) +// BLOCK-ENTRY +// SCALAR("item 2",plain) +// BLOCK-END +// KEY +// SCALAR("a mapping",plain) +// VALUE +// BLOCK-MAPPING-START +// KEY +// SCALAR("key 1",plain) +// VALUE +// SCALAR("value 1",plain) +// KEY +// SCALAR("key 2",plain) +// VALUE +// SCALAR("value 2",plain) +// BLOCK-END +// BLOCK-END +// STREAM-END +// +// YAML also permits non-indented sequences if they are included into a block +// mapping. In this case, the token BLOCK-SEQUENCE-START is not produced: +// +// key: +// - item 1 # BLOCK-SEQUENCE-START is NOT produced here. +// - item 2 +// +// Tokens: +// +// STREAM-START(utf-8) +// BLOCK-MAPPING-START +// KEY +// SCALAR("key",plain) +// VALUE +// BLOCK-ENTRY +// SCALAR("item 1",plain) +// BLOCK-ENTRY +// SCALAR("item 2",plain) +// BLOCK-END +// + +// Ensure that the buffer contains the required number of characters. +// Return true on success, false on failure (reader error or memory error). +func cache(parser *yaml_parser_t, length int) bool { + // [Go] This was inlined: !cache(A, B) -> unread < B && !update(A, B) + return parser.unread >= length || yaml_parser_update_buffer(parser, length) +} + +// Advance the buffer pointer. +func skip(parser *yaml_parser_t) { + parser.mark.index++ + parser.mark.column++ + parser.unread-- + parser.buffer_pos += width(parser.buffer[parser.buffer_pos]) +} + +func skip_line(parser *yaml_parser_t) { + if is_crlf(parser.buffer, parser.buffer_pos) { + parser.mark.index += 2 + parser.mark.column = 0 + parser.mark.line++ + parser.unread -= 2 + parser.buffer_pos += 2 + } else if is_break(parser.buffer, parser.buffer_pos) { + parser.mark.index++ + parser.mark.column = 0 + parser.mark.line++ + parser.unread-- + parser.buffer_pos += width(parser.buffer[parser.buffer_pos]) + } +} + +// Copy a character to a string buffer and advance pointers. +func read(parser *yaml_parser_t, s []byte) []byte { + w := width(parser.buffer[parser.buffer_pos]) + if w == 0 { + panic("invalid character sequence") + } + if len(s) == 0 { + s = make([]byte, 0, 32) + } + if w == 1 && len(s)+w <= cap(s) { + s = s[:len(s)+1] + s[len(s)-1] = parser.buffer[parser.buffer_pos] + parser.buffer_pos++ + } else { + s = append(s, parser.buffer[parser.buffer_pos:parser.buffer_pos+w]...) + parser.buffer_pos += w + } + parser.mark.index++ + parser.mark.column++ + parser.unread-- + return s +} + +// Copy a line break character to a string buffer and advance pointers. +func read_line(parser *yaml_parser_t, s []byte) []byte { + buf := parser.buffer + pos := parser.buffer_pos + switch { + case buf[pos] == '\r' && buf[pos+1] == '\n': + // CR LF . LF + s = append(s, '\n') + parser.buffer_pos += 2 + parser.mark.index++ + parser.unread-- + case buf[pos] == '\r' || buf[pos] == '\n': + // CR|LF . LF + s = append(s, '\n') + parser.buffer_pos += 1 + case buf[pos] == '\xC2' && buf[pos+1] == '\x85': + // NEL . LF + s = append(s, '\n') + parser.buffer_pos += 2 + case buf[pos] == '\xE2' && buf[pos+1] == '\x80' && (buf[pos+2] == '\xA8' || buf[pos+2] == '\xA9'): + // LS|PS . LS|PS + s = append(s, buf[parser.buffer_pos:pos+3]...) + parser.buffer_pos += 3 + default: + return s + } + parser.mark.index++ + parser.mark.column = 0 + parser.mark.line++ + parser.unread-- + return s +} + +// Get the next token. +func yaml_parser_scan(parser *yaml_parser_t, token *yaml_token_t) bool { + // Erase the token object. + *token = yaml_token_t{} // [Go] Is this necessary? + + // No tokens after STREAM-END or error. + if parser.stream_end_produced || parser.error != yaml_NO_ERROR { + return true + } + + // Ensure that the tokens queue contains enough tokens. + if !parser.token_available { + if !yaml_parser_fetch_more_tokens(parser) { + return false + } + } + + // Fetch the next token from the queue. + *token = parser.tokens[parser.tokens_head] + parser.tokens_head++ + parser.tokens_parsed++ + parser.token_available = false + + if token.typ == yaml_STREAM_END_TOKEN { + parser.stream_end_produced = true + } + return true +} + +// Set the scanner error and return false. +func yaml_parser_set_scanner_error(parser *yaml_parser_t, context string, context_mark yaml_mark_t, problem string) bool { + parser.error = yaml_SCANNER_ERROR + parser.context = context + parser.context_mark = context_mark + parser.problem = problem + parser.problem_mark = parser.mark + return false +} + +func yaml_parser_set_scanner_tag_error(parser *yaml_parser_t, directive bool, context_mark yaml_mark_t, problem string) bool { + context := "while parsing a tag" + if directive { + context = "while parsing a %TAG directive" + } + return yaml_parser_set_scanner_error(parser, context, context_mark, "did not find URI escaped octet") +} + +func trace(args ...interface{}) func() { + pargs := append([]interface{}{"+++"}, args...) + fmt.Println(pargs...) + pargs = append([]interface{}{"---"}, args...) + return func() { fmt.Println(pargs...) } +} + +// Ensure that the tokens queue contains at least one token which can be +// returned to the Parser. +func yaml_parser_fetch_more_tokens(parser *yaml_parser_t) bool { + // While we need more tokens to fetch, do it. + for { + // Check if we really need to fetch more tokens. + need_more_tokens := false + + if parser.tokens_head == len(parser.tokens) { + // Queue is empty. + need_more_tokens = true + } else { + // Check if any potential simple key may occupy the head position. + if !yaml_parser_stale_simple_keys(parser) { + return false + } + + for i := range parser.simple_keys { + simple_key := &parser.simple_keys[i] + if simple_key.possible && simple_key.token_number == parser.tokens_parsed { + need_more_tokens = true + break + } + } + } + + // We are finished. + if !need_more_tokens { + break + } + // Fetch the next token. + if !yaml_parser_fetch_next_token(parser) { + return false + } + } + + parser.token_available = true + return true +} + +// The dispatcher for token fetchers. +func yaml_parser_fetch_next_token(parser *yaml_parser_t) bool { + // Ensure that the buffer is initialized. + if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) { + return false + } + + // Check if we just started scanning. Fetch STREAM-START then. + if !parser.stream_start_produced { + return yaml_parser_fetch_stream_start(parser) + } + + // Eat whitespaces and comments until we reach the next token. + if !yaml_parser_scan_to_next_token(parser) { + return false + } + + // Remove obsolete potential simple keys. + if !yaml_parser_stale_simple_keys(parser) { + return false + } + + // Check the indentation level against the current column. + if !yaml_parser_unroll_indent(parser, parser.mark.column) { + return false + } + + // Ensure that the buffer contains at least 4 characters. 4 is the length + // of the longest indicators ('--- ' and '... '). + if parser.unread < 4 && !yaml_parser_update_buffer(parser, 4) { + return false + } + + // Is it the end of the stream? + if is_z(parser.buffer, parser.buffer_pos) { + return yaml_parser_fetch_stream_end(parser) + } + + // Is it a directive? + if parser.mark.column == 0 && parser.buffer[parser.buffer_pos] == '%' { + return yaml_parser_fetch_directive(parser) + } + + buf := parser.buffer + pos := parser.buffer_pos + + // Is it the document start indicator? + if parser.mark.column == 0 && buf[pos] == '-' && buf[pos+1] == '-' && buf[pos+2] == '-' && is_blankz(buf, pos+3) { + return yaml_parser_fetch_document_indicator(parser, yaml_DOCUMENT_START_TOKEN) + } + + // Is it the document end indicator? + if parser.mark.column == 0 && buf[pos] == '.' && buf[pos+1] == '.' && buf[pos+2] == '.' && is_blankz(buf, pos+3) { + return yaml_parser_fetch_document_indicator(parser, yaml_DOCUMENT_END_TOKEN) + } + + // Is it the flow sequence start indicator? + if buf[pos] == '[' { + return yaml_parser_fetch_flow_collection_start(parser, yaml_FLOW_SEQUENCE_START_TOKEN) + } + + // Is it the flow mapping start indicator? + if parser.buffer[parser.buffer_pos] == '{' { + return yaml_parser_fetch_flow_collection_start(parser, yaml_FLOW_MAPPING_START_TOKEN) + } + + // Is it the flow sequence end indicator? + if parser.buffer[parser.buffer_pos] == ']' { + return yaml_parser_fetch_flow_collection_end(parser, + yaml_FLOW_SEQUENCE_END_TOKEN) + } + + // Is it the flow mapping end indicator? + if parser.buffer[parser.buffer_pos] == '}' { + return yaml_parser_fetch_flow_collection_end(parser, + yaml_FLOW_MAPPING_END_TOKEN) + } + + // Is it the flow entry indicator? + if parser.buffer[parser.buffer_pos] == ',' { + return yaml_parser_fetch_flow_entry(parser) + } + + // Is it the block entry indicator? + if parser.buffer[parser.buffer_pos] == '-' && is_blankz(parser.buffer, parser.buffer_pos+1) { + return yaml_parser_fetch_block_entry(parser) + } + + // Is it the key indicator? + if parser.buffer[parser.buffer_pos] == '?' && (parser.flow_level > 0 || is_blankz(parser.buffer, parser.buffer_pos+1)) { + return yaml_parser_fetch_key(parser) + } + + // Is it the value indicator? + if parser.buffer[parser.buffer_pos] == ':' && (parser.flow_level > 0 || is_blankz(parser.buffer, parser.buffer_pos+1)) { + return yaml_parser_fetch_value(parser) + } + + // Is it an alias? + if parser.buffer[parser.buffer_pos] == '*' { + return yaml_parser_fetch_anchor(parser, yaml_ALIAS_TOKEN) + } + + // Is it an anchor? + if parser.buffer[parser.buffer_pos] == '&' { + return yaml_parser_fetch_anchor(parser, yaml_ANCHOR_TOKEN) + } + + // Is it a tag? + if parser.buffer[parser.buffer_pos] == '!' { + return yaml_parser_fetch_tag(parser) + } + + // Is it a literal scalar? + if parser.buffer[parser.buffer_pos] == '|' && parser.flow_level == 0 { + return yaml_parser_fetch_block_scalar(parser, true) + } + + // Is it a folded scalar? + if parser.buffer[parser.buffer_pos] == '>' && parser.flow_level == 0 { + return yaml_parser_fetch_block_scalar(parser, false) + } + + // Is it a single-quoted scalar? + if parser.buffer[parser.buffer_pos] == '\'' { + return yaml_parser_fetch_flow_scalar(parser, true) + } + + // Is it a double-quoted scalar? + if parser.buffer[parser.buffer_pos] == '"' { + return yaml_parser_fetch_flow_scalar(parser, false) + } + + // Is it a plain scalar? + // + // A plain scalar may start with any non-blank characters except + // + // '-', '?', ':', ',', '[', ']', '{', '}', + // '#', '&', '*', '!', '|', '>', '\'', '\"', + // '%', '@', '`'. + // + // In the block context (and, for the '-' indicator, in the flow context + // too), it may also start with the characters + // + // '-', '?', ':' + // + // if it is followed by a non-space character. + // + // The last rule is more restrictive than the specification requires. + // [Go] Make this logic more reasonable. + //switch parser.buffer[parser.buffer_pos] { + //case '-', '?', ':', ',', '?', '-', ',', ':', ']', '[', '}', '{', '&', '#', '!', '*', '>', '|', '"', '\'', '@', '%', '-', '`': + //} + if !(is_blankz(parser.buffer, parser.buffer_pos) || parser.buffer[parser.buffer_pos] == '-' || + parser.buffer[parser.buffer_pos] == '?' || parser.buffer[parser.buffer_pos] == ':' || + parser.buffer[parser.buffer_pos] == ',' || parser.buffer[parser.buffer_pos] == '[' || + parser.buffer[parser.buffer_pos] == ']' || parser.buffer[parser.buffer_pos] == '{' || + parser.buffer[parser.buffer_pos] == '}' || parser.buffer[parser.buffer_pos] == '#' || + parser.buffer[parser.buffer_pos] == '&' || parser.buffer[parser.buffer_pos] == '*' || + parser.buffer[parser.buffer_pos] == '!' || parser.buffer[parser.buffer_pos] == '|' || + parser.buffer[parser.buffer_pos] == '>' || parser.buffer[parser.buffer_pos] == '\'' || + parser.buffer[parser.buffer_pos] == '"' || parser.buffer[parser.buffer_pos] == '%' || + parser.buffer[parser.buffer_pos] == '@' || parser.buffer[parser.buffer_pos] == '`') || + (parser.buffer[parser.buffer_pos] == '-' && !is_blank(parser.buffer, parser.buffer_pos+1)) || + (parser.flow_level == 0 && + (parser.buffer[parser.buffer_pos] == '?' || parser.buffer[parser.buffer_pos] == ':') && + !is_blankz(parser.buffer, parser.buffer_pos+1)) { + return yaml_parser_fetch_plain_scalar(parser) + } + + // If we don't determine the token type so far, it is an error. + return yaml_parser_set_scanner_error(parser, + "while scanning for the next token", parser.mark, + "found character that cannot start any token") +} + +// Check the list of potential simple keys and remove the positions that +// cannot contain simple keys anymore. +func yaml_parser_stale_simple_keys(parser *yaml_parser_t) bool { + // Check for a potential simple key for each flow level. + for i := range parser.simple_keys { + simple_key := &parser.simple_keys[i] + + // The specification requires that a simple key + // + // - is limited to a single line, + // - is shorter than 1024 characters. + if simple_key.possible && (simple_key.mark.line < parser.mark.line || simple_key.mark.index+1024 < parser.mark.index) { + + // Check if the potential simple key to be removed is required. + if simple_key.required { + return yaml_parser_set_scanner_error(parser, + "while scanning a simple key", simple_key.mark, + "could not find expected ':'") + } + simple_key.possible = false + } + } + return true +} + +// Check if a simple key may start at the current position and add it if +// needed. +func yaml_parser_save_simple_key(parser *yaml_parser_t) bool { + // A simple key is required at the current position if the scanner is in + // the block context and the current column coincides with the indentation + // level. + + required := parser.flow_level == 0 && parser.indent == parser.mark.column + + // A simple key is required only when it is the first token in the current + // line. Therefore it is always allowed. But we add a check anyway. + if required && !parser.simple_key_allowed { + panic("should not happen") + } + + // + // If the current position may start a simple key, save it. + // + if parser.simple_key_allowed { + simple_key := yaml_simple_key_t{ + possible: true, + required: required, + token_number: parser.tokens_parsed + (len(parser.tokens) - parser.tokens_head), + } + simple_key.mark = parser.mark + + if !yaml_parser_remove_simple_key(parser) { + return false + } + parser.simple_keys[len(parser.simple_keys)-1] = simple_key + } + return true +} + +// Remove a potential simple key at the current flow level. +func yaml_parser_remove_simple_key(parser *yaml_parser_t) bool { + i := len(parser.simple_keys) - 1 + if parser.simple_keys[i].possible { + // If the key is required, it is an error. + if parser.simple_keys[i].required { + return yaml_parser_set_scanner_error(parser, + "while scanning a simple key", parser.simple_keys[i].mark, + "could not find expected ':'") + } + } + // Remove the key from the stack. + parser.simple_keys[i].possible = false + return true +} + +// Increase the flow level and resize the simple key list if needed. +func yaml_parser_increase_flow_level(parser *yaml_parser_t) bool { + // Reset the simple key on the next level. + parser.simple_keys = append(parser.simple_keys, yaml_simple_key_t{}) + + // Increase the flow level. + parser.flow_level++ + return true +} + +// Decrease the flow level. +func yaml_parser_decrease_flow_level(parser *yaml_parser_t) bool { + if parser.flow_level > 0 { + parser.flow_level-- + parser.simple_keys = parser.simple_keys[:len(parser.simple_keys)-1] + } + return true +} + +// Push the current indentation level to the stack and set the new level +// the current column is greater than the indentation level. In this case, +// append or insert the specified token into the token queue. +func yaml_parser_roll_indent(parser *yaml_parser_t, column, number int, typ yaml_token_type_t, mark yaml_mark_t) bool { + // In the flow context, do nothing. + if parser.flow_level > 0 { + return true + } + + if parser.indent < column { + // Push the current indentation level to the stack and set the new + // indentation level. + parser.indents = append(parser.indents, parser.indent) + parser.indent = column + + // Create a token and insert it into the queue. + token := yaml_token_t{ + typ: typ, + start_mark: mark, + end_mark: mark, + } + if number > -1 { + number -= parser.tokens_parsed + } + yaml_insert_token(parser, number, &token) + } + return true +} + +// Pop indentation levels from the indents stack until the current level +// becomes less or equal to the column. For each indentation level, append +// the BLOCK-END token. +func yaml_parser_unroll_indent(parser *yaml_parser_t, column int) bool { + // In the flow context, do nothing. + if parser.flow_level > 0 { + return true + } + + // Loop through the indentation levels in the stack. + for parser.indent > column { + // Create a token and append it to the queue. + token := yaml_token_t{ + typ: yaml_BLOCK_END_TOKEN, + start_mark: parser.mark, + end_mark: parser.mark, + } + yaml_insert_token(parser, -1, &token) + + // Pop the indentation level. + parser.indent = parser.indents[len(parser.indents)-1] + parser.indents = parser.indents[:len(parser.indents)-1] + } + return true +} + +// Initialize the scanner and produce the STREAM-START token. +func yaml_parser_fetch_stream_start(parser *yaml_parser_t) bool { + + // Set the initial indentation. + parser.indent = -1 + + // Initialize the simple key stack. + parser.simple_keys = append(parser.simple_keys, yaml_simple_key_t{}) + + // A simple key is allowed at the beginning of the stream. + parser.simple_key_allowed = true + + // We have started. + parser.stream_start_produced = true + + // Create the STREAM-START token and append it to the queue. + token := yaml_token_t{ + typ: yaml_STREAM_START_TOKEN, + start_mark: parser.mark, + end_mark: parser.mark, + encoding: parser.encoding, + } + yaml_insert_token(parser, -1, &token) + return true +} + +// Produce the STREAM-END token and shut down the scanner. +func yaml_parser_fetch_stream_end(parser *yaml_parser_t) bool { + + // Force new line. + if parser.mark.column != 0 { + parser.mark.column = 0 + parser.mark.line++ + } + + // Reset the indentation level. + if !yaml_parser_unroll_indent(parser, -1) { + return false + } + + // Reset simple keys. + if !yaml_parser_remove_simple_key(parser) { + return false + } + + parser.simple_key_allowed = false + + // Create the STREAM-END token and append it to the queue. + token := yaml_token_t{ + typ: yaml_STREAM_END_TOKEN, + start_mark: parser.mark, + end_mark: parser.mark, + } + yaml_insert_token(parser, -1, &token) + return true +} + +// Produce a VERSION-DIRECTIVE or TAG-DIRECTIVE token. +func yaml_parser_fetch_directive(parser *yaml_parser_t) bool { + // Reset the indentation level. + if !yaml_parser_unroll_indent(parser, -1) { + return false + } + + // Reset simple keys. + if !yaml_parser_remove_simple_key(parser) { + return false + } + + parser.simple_key_allowed = false + + // Create the YAML-DIRECTIVE or TAG-DIRECTIVE token. + token := yaml_token_t{} + if !yaml_parser_scan_directive(parser, &token) { + return false + } + // Append the token to the queue. + yaml_insert_token(parser, -1, &token) + return true +} + +// Produce the DOCUMENT-START or DOCUMENT-END token. +func yaml_parser_fetch_document_indicator(parser *yaml_parser_t, typ yaml_token_type_t) bool { + // Reset the indentation level. + if !yaml_parser_unroll_indent(parser, -1) { + return false + } + + // Reset simple keys. + if !yaml_parser_remove_simple_key(parser) { + return false + } + + parser.simple_key_allowed = false + + // Consume the token. + start_mark := parser.mark + + skip(parser) + skip(parser) + skip(parser) + + end_mark := parser.mark + + // Create the DOCUMENT-START or DOCUMENT-END token. + token := yaml_token_t{ + typ: typ, + start_mark: start_mark, + end_mark: end_mark, + } + // Append the token to the queue. + yaml_insert_token(parser, -1, &token) + return true +} + +// Produce the FLOW-SEQUENCE-START or FLOW-MAPPING-START token. +func yaml_parser_fetch_flow_collection_start(parser *yaml_parser_t, typ yaml_token_type_t) bool { + // The indicators '[' and '{' may start a simple key. + if !yaml_parser_save_simple_key(parser) { + return false + } + + // Increase the flow level. + if !yaml_parser_increase_flow_level(parser) { + return false + } + + // A simple key may follow the indicators '[' and '{'. + parser.simple_key_allowed = true + + // Consume the token. + start_mark := parser.mark + skip(parser) + end_mark := parser.mark + + // Create the FLOW-SEQUENCE-START of FLOW-MAPPING-START token. + token := yaml_token_t{ + typ: typ, + start_mark: start_mark, + end_mark: end_mark, + } + // Append the token to the queue. + yaml_insert_token(parser, -1, &token) + return true +} + +// Produce the FLOW-SEQUENCE-END or FLOW-MAPPING-END token. +func yaml_parser_fetch_flow_collection_end(parser *yaml_parser_t, typ yaml_token_type_t) bool { + // Reset any potential simple key on the current flow level. + if !yaml_parser_remove_simple_key(parser) { + return false + } + + // Decrease the flow level. + if !yaml_parser_decrease_flow_level(parser) { + return false + } + + // No simple keys after the indicators ']' and '}'. + parser.simple_key_allowed = false + + // Consume the token. + + start_mark := parser.mark + skip(parser) + end_mark := parser.mark + + // Create the FLOW-SEQUENCE-END of FLOW-MAPPING-END token. + token := yaml_token_t{ + typ: typ, + start_mark: start_mark, + end_mark: end_mark, + } + // Append the token to the queue. + yaml_insert_token(parser, -1, &token) + return true +} + +// Produce the FLOW-ENTRY token. +func yaml_parser_fetch_flow_entry(parser *yaml_parser_t) bool { + // Reset any potential simple keys on the current flow level. + if !yaml_parser_remove_simple_key(parser) { + return false + } + + // Simple keys are allowed after ','. + parser.simple_key_allowed = true + + // Consume the token. + start_mark := parser.mark + skip(parser) + end_mark := parser.mark + + // Create the FLOW-ENTRY token and append it to the queue. + token := yaml_token_t{ + typ: yaml_FLOW_ENTRY_TOKEN, + start_mark: start_mark, + end_mark: end_mark, + } + yaml_insert_token(parser, -1, &token) + return true +} + +// Produce the BLOCK-ENTRY token. +func yaml_parser_fetch_block_entry(parser *yaml_parser_t) bool { + // Check if the scanner is in the block context. + if parser.flow_level == 0 { + // Check if we are allowed to start a new entry. + if !parser.simple_key_allowed { + return yaml_parser_set_scanner_error(parser, "", parser.mark, + "block sequence entries are not allowed in this context") + } + // Add the BLOCK-SEQUENCE-START token if needed. + if !yaml_parser_roll_indent(parser, parser.mark.column, -1, yaml_BLOCK_SEQUENCE_START_TOKEN, parser.mark) { + return false + } + } else { + // It is an error for the '-' indicator to occur in the flow context, + // but we let the Parser detect and report about it because the Parser + // is able to point to the context. + } + + // Reset any potential simple keys on the current flow level. + if !yaml_parser_remove_simple_key(parser) { + return false + } + + // Simple keys are allowed after '-'. + parser.simple_key_allowed = true + + // Consume the token. + start_mark := parser.mark + skip(parser) + end_mark := parser.mark + + // Create the BLOCK-ENTRY token and append it to the queue. + token := yaml_token_t{ + typ: yaml_BLOCK_ENTRY_TOKEN, + start_mark: start_mark, + end_mark: end_mark, + } + yaml_insert_token(parser, -1, &token) + return true +} + +// Produce the KEY token. +func yaml_parser_fetch_key(parser *yaml_parser_t) bool { + + // In the block context, additional checks are required. + if parser.flow_level == 0 { + // Check if we are allowed to start a new key (not nessesary simple). + if !parser.simple_key_allowed { + return yaml_parser_set_scanner_error(parser, "", parser.mark, + "mapping keys are not allowed in this context") + } + // Add the BLOCK-MAPPING-START token if needed. + if !yaml_parser_roll_indent(parser, parser.mark.column, -1, yaml_BLOCK_MAPPING_START_TOKEN, parser.mark) { + return false + } + } + + // Reset any potential simple keys on the current flow level. + if !yaml_parser_remove_simple_key(parser) { + return false + } + + // Simple keys are allowed after '?' in the block context. + parser.simple_key_allowed = parser.flow_level == 0 + + // Consume the token. + start_mark := parser.mark + skip(parser) + end_mark := parser.mark + + // Create the KEY token and append it to the queue. + token := yaml_token_t{ + typ: yaml_KEY_TOKEN, + start_mark: start_mark, + end_mark: end_mark, + } + yaml_insert_token(parser, -1, &token) + return true +} + +// Produce the VALUE token. +func yaml_parser_fetch_value(parser *yaml_parser_t) bool { + + simple_key := &parser.simple_keys[len(parser.simple_keys)-1] + + // Have we found a simple key? + if simple_key.possible { + // Create the KEY token and insert it into the queue. + token := yaml_token_t{ + typ: yaml_KEY_TOKEN, + start_mark: simple_key.mark, + end_mark: simple_key.mark, + } + yaml_insert_token(parser, simple_key.token_number-parser.tokens_parsed, &token) + + // In the block context, we may need to add the BLOCK-MAPPING-START token. + if !yaml_parser_roll_indent(parser, simple_key.mark.column, + simple_key.token_number, + yaml_BLOCK_MAPPING_START_TOKEN, simple_key.mark) { + return false + } + + // Remove the simple key. + simple_key.possible = false + + // A simple key cannot follow another simple key. + parser.simple_key_allowed = false + + } else { + // The ':' indicator follows a complex key. + + // In the block context, extra checks are required. + if parser.flow_level == 0 { + + // Check if we are allowed to start a complex value. + if !parser.simple_key_allowed { + return yaml_parser_set_scanner_error(parser, "", parser.mark, + "mapping values are not allowed in this context") + } + + // Add the BLOCK-MAPPING-START token if needed. + if !yaml_parser_roll_indent(parser, parser.mark.column, -1, yaml_BLOCK_MAPPING_START_TOKEN, parser.mark) { + return false + } + } + + // Simple keys after ':' are allowed in the block context. + parser.simple_key_allowed = parser.flow_level == 0 + } + + // Consume the token. + start_mark := parser.mark + skip(parser) + end_mark := parser.mark + + // Create the VALUE token and append it to the queue. + token := yaml_token_t{ + typ: yaml_VALUE_TOKEN, + start_mark: start_mark, + end_mark: end_mark, + } + yaml_insert_token(parser, -1, &token) + return true +} + +// Produce the ALIAS or ANCHOR token. +func yaml_parser_fetch_anchor(parser *yaml_parser_t, typ yaml_token_type_t) bool { + // An anchor or an alias could be a simple key. + if !yaml_parser_save_simple_key(parser) { + return false + } + + // A simple key cannot follow an anchor or an alias. + parser.simple_key_allowed = false + + // Create the ALIAS or ANCHOR token and append it to the queue. + var token yaml_token_t + if !yaml_parser_scan_anchor(parser, &token, typ) { + return false + } + yaml_insert_token(parser, -1, &token) + return true +} + +// Produce the TAG token. +func yaml_parser_fetch_tag(parser *yaml_parser_t) bool { + // A tag could be a simple key. + if !yaml_parser_save_simple_key(parser) { + return false + } + + // A simple key cannot follow a tag. + parser.simple_key_allowed = false + + // Create the TAG token and append it to the queue. + var token yaml_token_t + if !yaml_parser_scan_tag(parser, &token) { + return false + } + yaml_insert_token(parser, -1, &token) + return true +} + +// Produce the SCALAR(...,literal) or SCALAR(...,folded) tokens. +func yaml_parser_fetch_block_scalar(parser *yaml_parser_t, literal bool) bool { + // Remove any potential simple keys. + if !yaml_parser_remove_simple_key(parser) { + return false + } + + // A simple key may follow a block scalar. + parser.simple_key_allowed = true + + // Create the SCALAR token and append it to the queue. + var token yaml_token_t + if !yaml_parser_scan_block_scalar(parser, &token, literal) { + return false + } + yaml_insert_token(parser, -1, &token) + return true +} + +// Produce the SCALAR(...,single-quoted) or SCALAR(...,double-quoted) tokens. +func yaml_parser_fetch_flow_scalar(parser *yaml_parser_t, single bool) bool { + // A plain scalar could be a simple key. + if !yaml_parser_save_simple_key(parser) { + return false + } + + // A simple key cannot follow a flow scalar. + parser.simple_key_allowed = false + + // Create the SCALAR token and append it to the queue. + var token yaml_token_t + if !yaml_parser_scan_flow_scalar(parser, &token, single) { + return false + } + yaml_insert_token(parser, -1, &token) + return true +} + +// Produce the SCALAR(...,plain) token. +func yaml_parser_fetch_plain_scalar(parser *yaml_parser_t) bool { + // A plain scalar could be a simple key. + if !yaml_parser_save_simple_key(parser) { + return false + } + + // A simple key cannot follow a flow scalar. + parser.simple_key_allowed = false + + // Create the SCALAR token and append it to the queue. + var token yaml_token_t + if !yaml_parser_scan_plain_scalar(parser, &token) { + return false + } + yaml_insert_token(parser, -1, &token) + return true +} + +// Eat whitespaces and comments until the next token is found. +func yaml_parser_scan_to_next_token(parser *yaml_parser_t) bool { + + // Until the next token is not found. + for { + // Allow the BOM mark to start a line. + if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) { + return false + } + if parser.mark.column == 0 && is_bom(parser.buffer, parser.buffer_pos) { + skip(parser) + } + + // Eat whitespaces. + // Tabs are allowed: + // - in the flow context + // - in the block context, but not at the beginning of the line or + // after '-', '?', or ':' (complex value). + if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) { + return false + } + + for parser.buffer[parser.buffer_pos] == ' ' || ((parser.flow_level > 0 || !parser.simple_key_allowed) && parser.buffer[parser.buffer_pos] == '\t') { + skip(parser) + if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) { + return false + } + } + + // Eat a comment until a line break. + if parser.buffer[parser.buffer_pos] == '#' { + for !is_breakz(parser.buffer, parser.buffer_pos) { + skip(parser) + if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) { + return false + } + } + } + + // If it is a line break, eat it. + if is_break(parser.buffer, parser.buffer_pos) { + if parser.unread < 2 && !yaml_parser_update_buffer(parser, 2) { + return false + } + skip_line(parser) + + // In the block context, a new line may start a simple key. + if parser.flow_level == 0 { + parser.simple_key_allowed = true + } + } else { + break // We have found a token. + } + } + + return true +} + +// Scan a YAML-DIRECTIVE or TAG-DIRECTIVE token. +// +// Scope: +// %YAML 1.1 # a comment \n +// ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ +// %TAG !yaml! tag:yaml.org,2002: \n +// ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ +// +func yaml_parser_scan_directive(parser *yaml_parser_t, token *yaml_token_t) bool { + // Eat '%'. + start_mark := parser.mark + skip(parser) + + // Scan the directive name. + var name []byte + if !yaml_parser_scan_directive_name(parser, start_mark, &name) { + return false + } + + // Is it a YAML directive? + if bytes.Equal(name, []byte("YAML")) { + // Scan the VERSION directive value. + var major, minor int8 + if !yaml_parser_scan_version_directive_value(parser, start_mark, &major, &minor) { + return false + } + end_mark := parser.mark + + // Create a VERSION-DIRECTIVE token. + *token = yaml_token_t{ + typ: yaml_VERSION_DIRECTIVE_TOKEN, + start_mark: start_mark, + end_mark: end_mark, + major: major, + minor: minor, + } + + // Is it a TAG directive? + } else if bytes.Equal(name, []byte("TAG")) { + // Scan the TAG directive value. + var handle, prefix []byte + if !yaml_parser_scan_tag_directive_value(parser, start_mark, &handle, &prefix) { + return false + } + end_mark := parser.mark + + // Create a TAG-DIRECTIVE token. + *token = yaml_token_t{ + typ: yaml_TAG_DIRECTIVE_TOKEN, + start_mark: start_mark, + end_mark: end_mark, + value: handle, + prefix: prefix, + } + + // Unknown directive. + } else { + yaml_parser_set_scanner_error(parser, "while scanning a directive", + start_mark, "found unknown directive name") + return false + } + + // Eat the rest of the line including any comments. + if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) { + return false + } + + for is_blank(parser.buffer, parser.buffer_pos) { + skip(parser) + if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) { + return false + } + } + + if parser.buffer[parser.buffer_pos] == '#' { + for !is_breakz(parser.buffer, parser.buffer_pos) { + skip(parser) + if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) { + return false + } + } + } + + // Check if we are at the end of the line. + if !is_breakz(parser.buffer, parser.buffer_pos) { + yaml_parser_set_scanner_error(parser, "while scanning a directive", + start_mark, "did not find expected comment or line break") + return false + } + + // Eat a line break. + if is_break(parser.buffer, parser.buffer_pos) { + if parser.unread < 2 && !yaml_parser_update_buffer(parser, 2) { + return false + } + skip_line(parser) + } + + return true +} + +// Scan the directive name. +// +// Scope: +// %YAML 1.1 # a comment \n +// ^^^^ +// %TAG !yaml! tag:yaml.org,2002: \n +// ^^^ +// +func yaml_parser_scan_directive_name(parser *yaml_parser_t, start_mark yaml_mark_t, name *[]byte) bool { + // Consume the directive name. + if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) { + return false + } + + var s []byte + for is_alpha(parser.buffer, parser.buffer_pos) { + s = read(parser, s) + if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) { + return false + } + } + + // Check if the name is empty. + if len(s) == 0 { + yaml_parser_set_scanner_error(parser, "while scanning a directive", + start_mark, "could not find expected directive name") + return false + } + + // Check for an blank character after the name. + if !is_blankz(parser.buffer, parser.buffer_pos) { + yaml_parser_set_scanner_error(parser, "while scanning a directive", + start_mark, "found unexpected non-alphabetical character") + return false + } + *name = s + return true +} + +// Scan the value of VERSION-DIRECTIVE. +// +// Scope: +// %YAML 1.1 # a comment \n +// ^^^^^^ +func yaml_parser_scan_version_directive_value(parser *yaml_parser_t, start_mark yaml_mark_t, major, minor *int8) bool { + // Eat whitespaces. + if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) { + return false + } + for is_blank(parser.buffer, parser.buffer_pos) { + skip(parser) + if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) { + return false + } + } + + // Consume the major version number. + if !yaml_parser_scan_version_directive_number(parser, start_mark, major) { + return false + } + + // Eat '.'. + if parser.buffer[parser.buffer_pos] != '.' { + return yaml_parser_set_scanner_error(parser, "while scanning a %YAML directive", + start_mark, "did not find expected digit or '.' character") + } + + skip(parser) + + // Consume the minor version number. + if !yaml_parser_scan_version_directive_number(parser, start_mark, minor) { + return false + } + return true +} + +const max_number_length = 2 + +// Scan the version number of VERSION-DIRECTIVE. +// +// Scope: +// %YAML 1.1 # a comment \n +// ^ +// %YAML 1.1 # a comment \n +// ^ +func yaml_parser_scan_version_directive_number(parser *yaml_parser_t, start_mark yaml_mark_t, number *int8) bool { + + // Repeat while the next character is digit. + if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) { + return false + } + var value, length int8 + for is_digit(parser.buffer, parser.buffer_pos) { + // Check if the number is too long. + length++ + if length > max_number_length { + return yaml_parser_set_scanner_error(parser, "while scanning a %YAML directive", + start_mark, "found extremely long version number") + } + value = value*10 + int8(as_digit(parser.buffer, parser.buffer_pos)) + skip(parser) + if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) { + return false + } + } + + // Check if the number was present. + if length == 0 { + return yaml_parser_set_scanner_error(parser, "while scanning a %YAML directive", + start_mark, "did not find expected version number") + } + *number = value + return true +} + +// Scan the value of a TAG-DIRECTIVE token. +// +// Scope: +// %TAG !yaml! tag:yaml.org,2002: \n +// ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ +// +func yaml_parser_scan_tag_directive_value(parser *yaml_parser_t, start_mark yaml_mark_t, handle, prefix *[]byte) bool { + var handle_value, prefix_value []byte + + // Eat whitespaces. + if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) { + return false + } + + for is_blank(parser.buffer, parser.buffer_pos) { + skip(parser) + if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) { + return false + } + } + + // Scan a handle. + if !yaml_parser_scan_tag_handle(parser, true, start_mark, &handle_value) { + return false + } + + // Expect a whitespace. + if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) { + return false + } + if !is_blank(parser.buffer, parser.buffer_pos) { + yaml_parser_set_scanner_error(parser, "while scanning a %TAG directive", + start_mark, "did not find expected whitespace") + return false + } + + // Eat whitespaces. + for is_blank(parser.buffer, parser.buffer_pos) { + skip(parser) + if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) { + return false + } + } + + // Scan a prefix. + if !yaml_parser_scan_tag_uri(parser, true, nil, start_mark, &prefix_value) { + return false + } + + // Expect a whitespace or line break. + if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) { + return false + } + if !is_blankz(parser.buffer, parser.buffer_pos) { + yaml_parser_set_scanner_error(parser, "while scanning a %TAG directive", + start_mark, "did not find expected whitespace or line break") + return false + } + + *handle = handle_value + *prefix = prefix_value + return true +} + +func yaml_parser_scan_anchor(parser *yaml_parser_t, token *yaml_token_t, typ yaml_token_type_t) bool { + var s []byte + + // Eat the indicator character. + start_mark := parser.mark + skip(parser) + + // Consume the value. + if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) { + return false + } + + for is_alpha(parser.buffer, parser.buffer_pos) { + s = read(parser, s) + if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) { + return false + } + } + + end_mark := parser.mark + + /* + * Check if length of the anchor is greater than 0 and it is followed by + * a whitespace character or one of the indicators: + * + * '?', ':', ',', ']', '}', '%', '@', '`'. + */ + + if len(s) == 0 || + !(is_blankz(parser.buffer, parser.buffer_pos) || parser.buffer[parser.buffer_pos] == '?' || + parser.buffer[parser.buffer_pos] == ':' || parser.buffer[parser.buffer_pos] == ',' || + parser.buffer[parser.buffer_pos] == ']' || parser.buffer[parser.buffer_pos] == '}' || + parser.buffer[parser.buffer_pos] == '%' || parser.buffer[parser.buffer_pos] == '@' || + parser.buffer[parser.buffer_pos] == '`') { + context := "while scanning an alias" + if typ == yaml_ANCHOR_TOKEN { + context = "while scanning an anchor" + } + yaml_parser_set_scanner_error(parser, context, start_mark, + "did not find expected alphabetic or numeric character") + return false + } + + // Create a token. + *token = yaml_token_t{ + typ: typ, + start_mark: start_mark, + end_mark: end_mark, + value: s, + } + + return true +} + +/* + * Scan a TAG token. + */ + +func yaml_parser_scan_tag(parser *yaml_parser_t, token *yaml_token_t) bool { + var handle, suffix []byte + + start_mark := parser.mark + + // Check if the tag is in the canonical form. + if parser.unread < 2 && !yaml_parser_update_buffer(parser, 2) { + return false + } + + if parser.buffer[parser.buffer_pos+1] == '<' { + // Keep the handle as '' + + // Eat '!<' + skip(parser) + skip(parser) + + // Consume the tag value. + if !yaml_parser_scan_tag_uri(parser, false, nil, start_mark, &suffix) { + return false + } + + // Check for '>' and eat it. + if parser.buffer[parser.buffer_pos] != '>' { + yaml_parser_set_scanner_error(parser, "while scanning a tag", + start_mark, "did not find the expected '>'") + return false + } + + skip(parser) + } else { + // The tag has either the '!suffix' or the '!handle!suffix' form. + + // First, try to scan a handle. + if !yaml_parser_scan_tag_handle(parser, false, start_mark, &handle) { + return false + } + + // Check if it is, indeed, handle. + if handle[0] == '!' && len(handle) > 1 && handle[len(handle)-1] == '!' { + // Scan the suffix now. + if !yaml_parser_scan_tag_uri(parser, false, nil, start_mark, &suffix) { + return false + } + } else { + // It wasn't a handle after all. Scan the rest of the tag. + if !yaml_parser_scan_tag_uri(parser, false, handle, start_mark, &suffix) { + return false + } + + // Set the handle to '!'. + handle = []byte{'!'} + + // A special case: the '!' tag. Set the handle to '' and the + // suffix to '!'. + if len(suffix) == 0 { + handle, suffix = suffix, handle + } + } + } + + // Check the character which ends the tag. + if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) { + return false + } + if !is_blankz(parser.buffer, parser.buffer_pos) { + yaml_parser_set_scanner_error(parser, "while scanning a tag", + start_mark, "did not find expected whitespace or line break") + return false + } + + end_mark := parser.mark + + // Create a token. + *token = yaml_token_t{ + typ: yaml_TAG_TOKEN, + start_mark: start_mark, + end_mark: end_mark, + value: handle, + suffix: suffix, + } + return true +} + +// Scan a tag handle. +func yaml_parser_scan_tag_handle(parser *yaml_parser_t, directive bool, start_mark yaml_mark_t, handle *[]byte) bool { + // Check the initial '!' character. + if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) { + return false + } + if parser.buffer[parser.buffer_pos] != '!' { + yaml_parser_set_scanner_tag_error(parser, directive, + start_mark, "did not find expected '!'") + return false + } + + var s []byte + + // Copy the '!' character. + s = read(parser, s) + + // Copy all subsequent alphabetical and numerical characters. + if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) { + return false + } + for is_alpha(parser.buffer, parser.buffer_pos) { + s = read(parser, s) + if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) { + return false + } + } + + // Check if the trailing character is '!' and copy it. + if parser.buffer[parser.buffer_pos] == '!' { + s = read(parser, s) + } else { + // It's either the '!' tag or not really a tag handle. If it's a %TAG + // directive, it's an error. If it's a tag token, it must be a part of URI. + if directive && !(s[0] == '!' && s[1] == 0) { + yaml_parser_set_scanner_tag_error(parser, directive, + start_mark, "did not find expected '!'") + return false + } + } + + *handle = s + return true +} + +// Scan a tag. +func yaml_parser_scan_tag_uri(parser *yaml_parser_t, directive bool, head []byte, start_mark yaml_mark_t, uri *[]byte) bool { + //size_t length = head ? strlen((char *)head) : 0 + var s []byte + + // Copy the head if needed. + // + // Note that we don't copy the leading '!' character. + if len(head) > 1 { + s = append(s, head[1:]...) + } + + // Scan the tag. + if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) { + return false + } + + // The set of characters that may appear in URI is as follows: + // + // '0'-'9', 'A'-'Z', 'a'-'z', '_', '-', ';', '/', '?', ':', '@', '&', + // '=', '+', '$', ',', '.', '!', '~', '*', '\'', '(', ')', '[', ']', + // '%'. + // [Go] Convert this into more reasonable logic. + for is_alpha(parser.buffer, parser.buffer_pos) || parser.buffer[parser.buffer_pos] == ';' || + parser.buffer[parser.buffer_pos] == '/' || parser.buffer[parser.buffer_pos] == '?' || + parser.buffer[parser.buffer_pos] == ':' || parser.buffer[parser.buffer_pos] == '@' || + parser.buffer[parser.buffer_pos] == '&' || parser.buffer[parser.buffer_pos] == '=' || + parser.buffer[parser.buffer_pos] == '+' || parser.buffer[parser.buffer_pos] == '$' || + parser.buffer[parser.buffer_pos] == ',' || parser.buffer[parser.buffer_pos] == '.' || + parser.buffer[parser.buffer_pos] == '!' || parser.buffer[parser.buffer_pos] == '~' || + parser.buffer[parser.buffer_pos] == '*' || parser.buffer[parser.buffer_pos] == '\'' || + parser.buffer[parser.buffer_pos] == '(' || parser.buffer[parser.buffer_pos] == ')' || + parser.buffer[parser.buffer_pos] == '[' || parser.buffer[parser.buffer_pos] == ']' || + parser.buffer[parser.buffer_pos] == '%' { + // Check if it is a URI-escape sequence. + if parser.buffer[parser.buffer_pos] == '%' { + if !yaml_parser_scan_uri_escapes(parser, directive, start_mark, &s) { + return false + } + } else { + s = read(parser, s) + } + if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) { + return false + } + } + + // Check if the tag is non-empty. + if len(s) == 0 { + yaml_parser_set_scanner_tag_error(parser, directive, + start_mark, "did not find expected tag URI") + return false + } + *uri = s + return true +} + +// Decode an URI-escape sequence corresponding to a single UTF-8 character. +func yaml_parser_scan_uri_escapes(parser *yaml_parser_t, directive bool, start_mark yaml_mark_t, s *[]byte) bool { + + // Decode the required number of characters. + w := 1024 + for w > 0 { + // Check for a URI-escaped octet. + if parser.unread < 3 && !yaml_parser_update_buffer(parser, 3) { + return false + } + + if !(parser.buffer[parser.buffer_pos] == '%' && + is_hex(parser.buffer, parser.buffer_pos+1) && + is_hex(parser.buffer, parser.buffer_pos+2)) { + return yaml_parser_set_scanner_tag_error(parser, directive, + start_mark, "did not find URI escaped octet") + } + + // Get the octet. + octet := byte((as_hex(parser.buffer, parser.buffer_pos+1) << 4) + as_hex(parser.buffer, parser.buffer_pos+2)) + + // If it is the leading octet, determine the length of the UTF-8 sequence. + if w == 1024 { + w = width(octet) + if w == 0 { + return yaml_parser_set_scanner_tag_error(parser, directive, + start_mark, "found an incorrect leading UTF-8 octet") + } + } else { + // Check if the trailing octet is correct. + if octet&0xC0 != 0x80 { + return yaml_parser_set_scanner_tag_error(parser, directive, + start_mark, "found an incorrect trailing UTF-8 octet") + } + } + + // Copy the octet and move the pointers. + *s = append(*s, octet) + skip(parser) + skip(parser) + skip(parser) + w-- + } + return true +} + +// Scan a block scalar. +func yaml_parser_scan_block_scalar(parser *yaml_parser_t, token *yaml_token_t, literal bool) bool { + // Eat the indicator '|' or '>'. + start_mark := parser.mark + skip(parser) + + // Scan the additional block scalar indicators. + if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) { + return false + } + + // Check for a chomping indicator. + var chomping, increment int + if parser.buffer[parser.buffer_pos] == '+' || parser.buffer[parser.buffer_pos] == '-' { + // Set the chomping method and eat the indicator. + if parser.buffer[parser.buffer_pos] == '+' { + chomping = +1 + } else { + chomping = -1 + } + skip(parser) + + // Check for an indentation indicator. + if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) { + return false + } + if is_digit(parser.buffer, parser.buffer_pos) { + // Check that the indentation is greater than 0. + if parser.buffer[parser.buffer_pos] == '0' { + yaml_parser_set_scanner_error(parser, "while scanning a block scalar", + start_mark, "found an indentation indicator equal to 0") + return false + } + + // Get the indentation level and eat the indicator. + increment = as_digit(parser.buffer, parser.buffer_pos) + skip(parser) + } + + } else if is_digit(parser.buffer, parser.buffer_pos) { + // Do the same as above, but in the opposite order. + + if parser.buffer[parser.buffer_pos] == '0' { + yaml_parser_set_scanner_error(parser, "while scanning a block scalar", + start_mark, "found an indentation indicator equal to 0") + return false + } + increment = as_digit(parser.buffer, parser.buffer_pos) + skip(parser) + + if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) { + return false + } + if parser.buffer[parser.buffer_pos] == '+' || parser.buffer[parser.buffer_pos] == '-' { + if parser.buffer[parser.buffer_pos] == '+' { + chomping = +1 + } else { + chomping = -1 + } + skip(parser) + } + } + + // Eat whitespaces and comments to the end of the line. + if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) { + return false + } + for is_blank(parser.buffer, parser.buffer_pos) { + skip(parser) + if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) { + return false + } + } + if parser.buffer[parser.buffer_pos] == '#' { + for !is_breakz(parser.buffer, parser.buffer_pos) { + skip(parser) + if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) { + return false + } + } + } + + // Check if we are at the end of the line. + if !is_breakz(parser.buffer, parser.buffer_pos) { + yaml_parser_set_scanner_error(parser, "while scanning a block scalar", + start_mark, "did not find expected comment or line break") + return false + } + + // Eat a line break. + if is_break(parser.buffer, parser.buffer_pos) { + if parser.unread < 2 && !yaml_parser_update_buffer(parser, 2) { + return false + } + skip_line(parser) + } + + end_mark := parser.mark + + // Set the indentation level if it was specified. + var indent int + if increment > 0 { + if parser.indent >= 0 { + indent = parser.indent + increment + } else { + indent = increment + } + } + + // Scan the leading line breaks and determine the indentation level if needed. + var s, leading_break, trailing_breaks []byte + if !yaml_parser_scan_block_scalar_breaks(parser, &indent, &trailing_breaks, start_mark, &end_mark) { + return false + } + + // Scan the block scalar content. + if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) { + return false + } + var leading_blank, trailing_blank bool + for parser.mark.column == indent && !is_z(parser.buffer, parser.buffer_pos) { + // We are at the beginning of a non-empty line. + + // Is it a trailing whitespace? + trailing_blank = is_blank(parser.buffer, parser.buffer_pos) + + // Check if we need to fold the leading line break. + if !literal && !leading_blank && !trailing_blank && len(leading_break) > 0 && leading_break[0] == '\n' { + // Do we need to join the lines by space? + if len(trailing_breaks) == 0 { + s = append(s, ' ') + } + } else { + s = append(s, leading_break...) + } + leading_break = leading_break[:0] + + // Append the remaining line breaks. + s = append(s, trailing_breaks...) + trailing_breaks = trailing_breaks[:0] + + // Is it a leading whitespace? + leading_blank = is_blank(parser.buffer, parser.buffer_pos) + + // Consume the current line. + for !is_breakz(parser.buffer, parser.buffer_pos) { + s = read(parser, s) + if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) { + return false + } + } + + // Consume the line break. + if parser.unread < 2 && !yaml_parser_update_buffer(parser, 2) { + return false + } + + leading_break = read_line(parser, leading_break) + + // Eat the following indentation spaces and line breaks. + if !yaml_parser_scan_block_scalar_breaks(parser, &indent, &trailing_breaks, start_mark, &end_mark) { + return false + } + } + + // Chomp the tail. + if chomping != -1 { + s = append(s, leading_break...) + } + if chomping == 1 { + s = append(s, trailing_breaks...) + } + + // Create a token. + *token = yaml_token_t{ + typ: yaml_SCALAR_TOKEN, + start_mark: start_mark, + end_mark: end_mark, + value: s, + style: yaml_LITERAL_SCALAR_STYLE, + } + if !literal { + token.style = yaml_FOLDED_SCALAR_STYLE + } + return true +} + +// Scan indentation spaces and line breaks for a block scalar. Determine the +// indentation level if needed. +func yaml_parser_scan_block_scalar_breaks(parser *yaml_parser_t, indent *int, breaks *[]byte, start_mark yaml_mark_t, end_mark *yaml_mark_t) bool { + *end_mark = parser.mark + + // Eat the indentation spaces and line breaks. + max_indent := 0 + for { + // Eat the indentation spaces. + if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) { + return false + } + for (*indent == 0 || parser.mark.column < *indent) && is_space(parser.buffer, parser.buffer_pos) { + skip(parser) + if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) { + return false + } + } + if parser.mark.column > max_indent { + max_indent = parser.mark.column + } + + // Check for a tab character messing the indentation. + if (*indent == 0 || parser.mark.column < *indent) && is_tab(parser.buffer, parser.buffer_pos) { + return yaml_parser_set_scanner_error(parser, "while scanning a block scalar", + start_mark, "found a tab character where an indentation space is expected") + } + + // Have we found a non-empty line? + if !is_break(parser.buffer, parser.buffer_pos) { + break + } + + // Consume the line break. + if parser.unread < 2 && !yaml_parser_update_buffer(parser, 2) { + return false + } + // [Go] Should really be returning breaks instead. + *breaks = read_line(parser, *breaks) + *end_mark = parser.mark + } + + // Determine the indentation level if needed. + if *indent == 0 { + *indent = max_indent + if *indent < parser.indent+1 { + *indent = parser.indent + 1 + } + if *indent < 1 { + *indent = 1 + } + } + return true +} + +// Scan a quoted scalar. +func yaml_parser_scan_flow_scalar(parser *yaml_parser_t, token *yaml_token_t, single bool) bool { + // Eat the left quote. + start_mark := parser.mark + skip(parser) + + // Consume the content of the quoted scalar. + var s, leading_break, trailing_breaks, whitespaces []byte + for { + // Check that there are no document indicators at the beginning of the line. + if parser.unread < 4 && !yaml_parser_update_buffer(parser, 4) { + return false + } + + if parser.mark.column == 0 && + ((parser.buffer[parser.buffer_pos+0] == '-' && + parser.buffer[parser.buffer_pos+1] == '-' && + parser.buffer[parser.buffer_pos+2] == '-') || + (parser.buffer[parser.buffer_pos+0] == '.' && + parser.buffer[parser.buffer_pos+1] == '.' && + parser.buffer[parser.buffer_pos+2] == '.')) && + is_blankz(parser.buffer, parser.buffer_pos+3) { + yaml_parser_set_scanner_error(parser, "while scanning a quoted scalar", + start_mark, "found unexpected document indicator") + return false + } + + // Check for EOF. + if is_z(parser.buffer, parser.buffer_pos) { + yaml_parser_set_scanner_error(parser, "while scanning a quoted scalar", + start_mark, "found unexpected end of stream") + return false + } + + // Consume non-blank characters. + leading_blanks := false + for !is_blankz(parser.buffer, parser.buffer_pos) { + if single && parser.buffer[parser.buffer_pos] == '\'' && parser.buffer[parser.buffer_pos+1] == '\'' { + // Is is an escaped single quote. + s = append(s, '\'') + skip(parser) + skip(parser) + + } else if single && parser.buffer[parser.buffer_pos] == '\'' { + // It is a right single quote. + break + } else if !single && parser.buffer[parser.buffer_pos] == '"' { + // It is a right double quote. + break + + } else if !single && parser.buffer[parser.buffer_pos] == '\\' && is_break(parser.buffer, parser.buffer_pos+1) { + // It is an escaped line break. + if parser.unread < 3 && !yaml_parser_update_buffer(parser, 3) { + return false + } + skip(parser) + skip_line(parser) + leading_blanks = true + break + + } else if !single && parser.buffer[parser.buffer_pos] == '\\' { + // It is an escape sequence. + code_length := 0 + + // Check the escape character. + switch parser.buffer[parser.buffer_pos+1] { + case '0': + s = append(s, 0) + case 'a': + s = append(s, '\x07') + case 'b': + s = append(s, '\x08') + case 't', '\t': + s = append(s, '\x09') + case 'n': + s = append(s, '\x0A') + case 'v': + s = append(s, '\x0B') + case 'f': + s = append(s, '\x0C') + case 'r': + s = append(s, '\x0D') + case 'e': + s = append(s, '\x1B') + case ' ': + s = append(s, '\x20') + case '"': + s = append(s, '"') + case '\'': + s = append(s, '\'') + case '\\': + s = append(s, '\\') + case 'N': // NEL (#x85) + s = append(s, '\xC2') + s = append(s, '\x85') + case '_': // #xA0 + s = append(s, '\xC2') + s = append(s, '\xA0') + case 'L': // LS (#x2028) + s = append(s, '\xE2') + s = append(s, '\x80') + s = append(s, '\xA8') + case 'P': // PS (#x2029) + s = append(s, '\xE2') + s = append(s, '\x80') + s = append(s, '\xA9') + case 'x': + code_length = 2 + case 'u': + code_length = 4 + case 'U': + code_length = 8 + default: + yaml_parser_set_scanner_error(parser, "while parsing a quoted scalar", + start_mark, "found unknown escape character") + return false + } + + skip(parser) + skip(parser) + + // Consume an arbitrary escape code. + if code_length > 0 { + var value int + + // Scan the character value. + if parser.unread < code_length && !yaml_parser_update_buffer(parser, code_length) { + return false + } + for k := 0; k < code_length; k++ { + if !is_hex(parser.buffer, parser.buffer_pos+k) { + yaml_parser_set_scanner_error(parser, "while parsing a quoted scalar", + start_mark, "did not find expected hexdecimal number") + return false + } + value = (value << 4) + as_hex(parser.buffer, parser.buffer_pos+k) + } + + // Check the value and write the character. + if (value >= 0xD800 && value <= 0xDFFF) || value > 0x10FFFF { + yaml_parser_set_scanner_error(parser, "while parsing a quoted scalar", + start_mark, "found invalid Unicode character escape code") + return false + } + if value <= 0x7F { + s = append(s, byte(value)) + } else if value <= 0x7FF { + s = append(s, byte(0xC0+(value>>6))) + s = append(s, byte(0x80+(value&0x3F))) + } else if value <= 0xFFFF { + s = append(s, byte(0xE0+(value>>12))) + s = append(s, byte(0x80+((value>>6)&0x3F))) + s = append(s, byte(0x80+(value&0x3F))) + } else { + s = append(s, byte(0xF0+(value>>18))) + s = append(s, byte(0x80+((value>>12)&0x3F))) + s = append(s, byte(0x80+((value>>6)&0x3F))) + s = append(s, byte(0x80+(value&0x3F))) + } + + // Advance the pointer. + for k := 0; k < code_length; k++ { + skip(parser) + } + } + } else { + // It is a non-escaped non-blank character. + s = read(parser, s) + } + if parser.unread < 2 && !yaml_parser_update_buffer(parser, 2) { + return false + } + } + + // Check if we are at the end of the scalar. + if single { + if parser.buffer[parser.buffer_pos] == '\'' { + break + } + } else { + if parser.buffer[parser.buffer_pos] == '"' { + break + } + } + + // Consume blank characters. + if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) { + return false + } + + for is_blank(parser.buffer, parser.buffer_pos) || is_break(parser.buffer, parser.buffer_pos) { + if is_blank(parser.buffer, parser.buffer_pos) { + // Consume a space or a tab character. + if !leading_blanks { + whitespaces = read(parser, whitespaces) + } else { + skip(parser) + } + } else { + if parser.unread < 2 && !yaml_parser_update_buffer(parser, 2) { + return false + } + + // Check if it is a first line break. + if !leading_blanks { + whitespaces = whitespaces[:0] + leading_break = read_line(parser, leading_break) + leading_blanks = true + } else { + trailing_breaks = read_line(parser, trailing_breaks) + } + } + if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) { + return false + } + } + + // Join the whitespaces or fold line breaks. + if leading_blanks { + // Do we need to fold line breaks? + if len(leading_break) > 0 && leading_break[0] == '\n' { + if len(trailing_breaks) == 0 { + s = append(s, ' ') + } else { + s = append(s, trailing_breaks...) + } + } else { + s = append(s, leading_break...) + s = append(s, trailing_breaks...) + } + trailing_breaks = trailing_breaks[:0] + leading_break = leading_break[:0] + } else { + s = append(s, whitespaces...) + whitespaces = whitespaces[:0] + } + } + + // Eat the right quote. + skip(parser) + end_mark := parser.mark + + // Create a token. + *token = yaml_token_t{ + typ: yaml_SCALAR_TOKEN, + start_mark: start_mark, + end_mark: end_mark, + value: s, + style: yaml_SINGLE_QUOTED_SCALAR_STYLE, + } + if !single { + token.style = yaml_DOUBLE_QUOTED_SCALAR_STYLE + } + return true +} + +// Scan a plain scalar. +func yaml_parser_scan_plain_scalar(parser *yaml_parser_t, token *yaml_token_t) bool { + + var s, leading_break, trailing_breaks, whitespaces []byte + var leading_blanks bool + var indent = parser.indent + 1 + + start_mark := parser.mark + end_mark := parser.mark + + // Consume the content of the plain scalar. + for { + // Check for a document indicator. + if parser.unread < 4 && !yaml_parser_update_buffer(parser, 4) { + return false + } + if parser.mark.column == 0 && + ((parser.buffer[parser.buffer_pos+0] == '-' && + parser.buffer[parser.buffer_pos+1] == '-' && + parser.buffer[parser.buffer_pos+2] == '-') || + (parser.buffer[parser.buffer_pos+0] == '.' && + parser.buffer[parser.buffer_pos+1] == '.' && + parser.buffer[parser.buffer_pos+2] == '.')) && + is_blankz(parser.buffer, parser.buffer_pos+3) { + break + } + + // Check for a comment. + if parser.buffer[parser.buffer_pos] == '#' { + break + } + + // Consume non-blank characters. + for !is_blankz(parser.buffer, parser.buffer_pos) { + + // Check for 'x:x' in the flow context. TODO: Fix the test "spec-08-13". + if parser.flow_level > 0 && + parser.buffer[parser.buffer_pos] == ':' && + !is_blankz(parser.buffer, parser.buffer_pos+1) { + yaml_parser_set_scanner_error(parser, "while scanning a plain scalar", + start_mark, "found unexpected ':'") + return false + } + + // Check for indicators that may end a plain scalar. + if (parser.buffer[parser.buffer_pos] == ':' && is_blankz(parser.buffer, parser.buffer_pos+1)) || + (parser.flow_level > 0 && + (parser.buffer[parser.buffer_pos] == ',' || parser.buffer[parser.buffer_pos] == ':' || + parser.buffer[parser.buffer_pos] == '?' || parser.buffer[parser.buffer_pos] == '[' || + parser.buffer[parser.buffer_pos] == ']' || parser.buffer[parser.buffer_pos] == '{' || + parser.buffer[parser.buffer_pos] == '}')) { + break + } + + // Check if we need to join whitespaces and breaks. + if leading_blanks || len(whitespaces) > 0 { + if leading_blanks { + // Do we need to fold line breaks? + if leading_break[0] == '\n' { + if len(trailing_breaks) == 0 { + s = append(s, ' ') + } else { + s = append(s, trailing_breaks...) + } + } else { + s = append(s, leading_break...) + s = append(s, trailing_breaks...) + } + trailing_breaks = trailing_breaks[:0] + leading_break = leading_break[:0] + leading_blanks = false + } else { + s = append(s, whitespaces...) + whitespaces = whitespaces[:0] + } + } + + // Copy the character. + s = read(parser, s) + + end_mark = parser.mark + if parser.unread < 2 && !yaml_parser_update_buffer(parser, 2) { + return false + } + } + + // Is it the end? + if !(is_blank(parser.buffer, parser.buffer_pos) || is_break(parser.buffer, parser.buffer_pos)) { + break + } + + // Consume blank characters. + if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) { + return false + } + + for is_blank(parser.buffer, parser.buffer_pos) || is_break(parser.buffer, parser.buffer_pos) { + if is_blank(parser.buffer, parser.buffer_pos) { + + // Check for tab character that abuse indentation. + if leading_blanks && parser.mark.column < indent && is_tab(parser.buffer, parser.buffer_pos) { + yaml_parser_set_scanner_error(parser, "while scanning a plain scalar", + start_mark, "found a tab character that violate indentation") + return false + } + + // Consume a space or a tab character. + if !leading_blanks { + whitespaces = read(parser, whitespaces) + } else { + skip(parser) + } + } else { + if parser.unread < 2 && !yaml_parser_update_buffer(parser, 2) { + return false + } + + // Check if it is a first line break. + if !leading_blanks { + whitespaces = whitespaces[:0] + leading_break = read_line(parser, leading_break) + leading_blanks = true + } else { + trailing_breaks = read_line(parser, trailing_breaks) + } + } + if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) { + return false + } + } + + // Check indentation level. + if parser.flow_level == 0 && parser.mark.column < indent { + break + } + } + + // Create a token. + *token = yaml_token_t{ + typ: yaml_SCALAR_TOKEN, + start_mark: start_mark, + end_mark: end_mark, + value: s, + style: yaml_PLAIN_SCALAR_STYLE, + } + + // Note that we change the 'simple_key_allowed' flag. + if leading_blanks { + parser.simple_key_allowed = true + } + return true +} diff --git a/vendor/gopkg.in/yaml.v2/sorter.go b/vendor/gopkg.in/yaml.v2/sorter.go new file mode 100644 index 000000000..5958822f9 --- /dev/null +++ b/vendor/gopkg.in/yaml.v2/sorter.go @@ -0,0 +1,104 @@ +package yaml + +import ( + "reflect" + "unicode" +) + +type keyList []reflect.Value + +func (l keyList) Len() int { return len(l) } +func (l keyList) Swap(i, j int) { l[i], l[j] = l[j], l[i] } +func (l keyList) Less(i, j int) bool { + a := l[i] + b := l[j] + ak := a.Kind() + bk := b.Kind() + for (ak == reflect.Interface || ak == reflect.Ptr) && !a.IsNil() { + a = a.Elem() + ak = a.Kind() + } + for (bk == reflect.Interface || bk == reflect.Ptr) && !b.IsNil() { + b = b.Elem() + bk = b.Kind() + } + af, aok := keyFloat(a) + bf, bok := keyFloat(b) + if aok && bok { + if af != bf { + return af < bf + } + if ak != bk { + return ak < bk + } + return numLess(a, b) + } + if ak != reflect.String || bk != reflect.String { + return ak < bk + } + ar, br := []rune(a.String()), []rune(b.String()) + for i := 0; i < len(ar) && i < len(br); i++ { + if ar[i] == br[i] { + continue + } + al := unicode.IsLetter(ar[i]) + bl := unicode.IsLetter(br[i]) + if al && bl { + return ar[i] < br[i] + } + if al || bl { + return bl + } + var ai, bi int + var an, bn int64 + for ai = i; ai < len(ar) && unicode.IsDigit(ar[ai]); ai++ { + an = an*10 + int64(ar[ai]-'0') + } + for bi = i; bi < len(br) && unicode.IsDigit(br[bi]); bi++ { + bn = bn*10 + int64(br[bi]-'0') + } + if an != bn { + return an < bn + } + if ai != bi { + return ai < bi + } + return ar[i] < br[i] + } + return len(ar) < len(br) +} + +// keyFloat returns a float value for v if it is a number/bool +// and whether it is a number/bool or not. +func keyFloat(v reflect.Value) (f float64, ok bool) { + switch v.Kind() { + case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64: + return float64(v.Int()), true + case reflect.Float32, reflect.Float64: + return v.Float(), true + case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64, reflect.Uintptr: + return float64(v.Uint()), true + case reflect.Bool: + if v.Bool() { + return 1, true + } + return 0, true + } + return 0, false +} + +// numLess returns whether a < b. +// a and b must necessarily have the same kind. +func numLess(a, b reflect.Value) bool { + switch a.Kind() { + case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64: + return a.Int() < b.Int() + case reflect.Float32, reflect.Float64: + return a.Float() < b.Float() + case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64, reflect.Uintptr: + return a.Uint() < b.Uint() + case reflect.Bool: + return !a.Bool() && b.Bool() + } + panic("not a number") +} diff --git a/vendor/gopkg.in/yaml.v2/suite_test.go b/vendor/gopkg.in/yaml.v2/suite_test.go new file mode 100644 index 000000000..c5cf1ed4f --- /dev/null +++ b/vendor/gopkg.in/yaml.v2/suite_test.go @@ -0,0 +1,12 @@ +package yaml_test + +import ( + . "gopkg.in/check.v1" + "testing" +) + +func Test(t *testing.T) { TestingT(t) } + +type S struct{} + +var _ = Suite(&S{}) diff --git a/vendor/gopkg.in/yaml.v2/writerc.go b/vendor/gopkg.in/yaml.v2/writerc.go new file mode 100644 index 000000000..190362f25 --- /dev/null +++ b/vendor/gopkg.in/yaml.v2/writerc.go @@ -0,0 +1,89 @@ +package yaml + +// Set the writer error and return false. +func yaml_emitter_set_writer_error(emitter *yaml_emitter_t, problem string) bool { + emitter.error = yaml_WRITER_ERROR + emitter.problem = problem + return false +} + +// Flush the output buffer. +func yaml_emitter_flush(emitter *yaml_emitter_t) bool { + if emitter.write_handler == nil { + panic("write handler not set") + } + + // Check if the buffer is empty. + if emitter.buffer_pos == 0 { + return true + } + + // If the output encoding is UTF-8, we don't need to recode the buffer. + if emitter.encoding == yaml_UTF8_ENCODING { + if err := emitter.write_handler(emitter, emitter.buffer[:emitter.buffer_pos]); err != nil { + return yaml_emitter_set_writer_error(emitter, "write error: "+err.Error()) + } + emitter.buffer_pos = 0 + return true + } + + // Recode the buffer into the raw buffer. + var low, high int + if emitter.encoding == yaml_UTF16LE_ENCODING { + low, high = 0, 1 + } else { + high, low = 1, 0 + } + + pos := 0 + for pos < emitter.buffer_pos { + // See the "reader.c" code for more details on UTF-8 encoding. Note + // that we assume that the buffer contains a valid UTF-8 sequence. + + // Read the next UTF-8 character. + octet := emitter.buffer[pos] + + var w int + var value rune + switch { + case octet&0x80 == 0x00: + w, value = 1, rune(octet&0x7F) + case octet&0xE0 == 0xC0: + w, value = 2, rune(octet&0x1F) + case octet&0xF0 == 0xE0: + w, value = 3, rune(octet&0x0F) + case octet&0xF8 == 0xF0: + w, value = 4, rune(octet&0x07) + } + for k := 1; k < w; k++ { + octet = emitter.buffer[pos+k] + value = (value << 6) + (rune(octet) & 0x3F) + } + pos += w + + // Write the character. + if value < 0x10000 { + var b [2]byte + b[high] = byte(value >> 8) + b[low] = byte(value & 0xFF) + emitter.raw_buffer = append(emitter.raw_buffer, b[0], b[1]) + } else { + // Write the character using a surrogate pair (check "reader.c"). + var b [4]byte + value -= 0x10000 + b[high] = byte(0xD8 + (value >> 18)) + b[low] = byte((value >> 10) & 0xFF) + b[high+2] = byte(0xDC + ((value >> 8) & 0xFF)) + b[low+2] = byte(value & 0xFF) + emitter.raw_buffer = append(emitter.raw_buffer, b[0], b[1], b[2], b[3]) + } + } + + // Write the raw buffer. + if err := emitter.write_handler(emitter, emitter.raw_buffer); err != nil { + return yaml_emitter_set_writer_error(emitter, "write error: "+err.Error()) + } + emitter.buffer_pos = 0 + emitter.raw_buffer = emitter.raw_buffer[:0] + return true +} diff --git a/vendor/gopkg.in/yaml.v2/yaml.go b/vendor/gopkg.in/yaml.v2/yaml.go new file mode 100644 index 000000000..36d6b883a --- /dev/null +++ b/vendor/gopkg.in/yaml.v2/yaml.go @@ -0,0 +1,346 @@ +// Package yaml implements YAML support for the Go language. +// +// Source code and other details for the project are available at GitHub: +// +// https://github.com/go-yaml/yaml +// +package yaml + +import ( + "errors" + "fmt" + "reflect" + "strings" + "sync" +) + +// MapSlice encodes and decodes as a YAML map. +// The order of keys is preserved when encoding and decoding. +type MapSlice []MapItem + +// MapItem is an item in a MapSlice. +type MapItem struct { + Key, Value interface{} +} + +// The Unmarshaler interface may be implemented by types to customize their +// behavior when being unmarshaled from a YAML document. The UnmarshalYAML +// method receives a function that may be called to unmarshal the original +// YAML value into a field or variable. It is safe to call the unmarshal +// function parameter more than once if necessary. +type Unmarshaler interface { + UnmarshalYAML(unmarshal func(interface{}) error) error +} + +// The Marshaler interface may be implemented by types to customize their +// behavior when being marshaled into a YAML document. The returned value +// is marshaled in place of the original value implementing Marshaler. +// +// If an error is returned by MarshalYAML, the marshaling procedure stops +// and returns with the provided error. +type Marshaler interface { + MarshalYAML() (interface{}, error) +} + +// Unmarshal decodes the first document found within the in byte slice +// and assigns decoded values into the out value. +// +// Maps and pointers (to a struct, string, int, etc) are accepted as out +// values. If an internal pointer within a struct is not initialized, +// the yaml package will initialize it if necessary for unmarshalling +// the provided data. The out parameter must not be nil. +// +// The type of the decoded values should be compatible with the respective +// values in out. If one or more values cannot be decoded due to a type +// mismatches, decoding continues partially until the end of the YAML +// content, and a *yaml.TypeError is returned with details for all +// missed values. +// +// Struct fields are only unmarshalled if they are exported (have an +// upper case first letter), and are unmarshalled using the field name +// lowercased as the default key. Custom keys may be defined via the +// "yaml" name in the field tag: the content preceding the first comma +// is used as the key, and the following comma-separated options are +// used to tweak the marshalling process (see Marshal). +// Conflicting names result in a runtime error. +// +// For example: +// +// type T struct { +// F int `yaml:"a,omitempty"` +// B int +// } +// var t T +// yaml.Unmarshal([]byte("a: 1\nb: 2"), &t) +// +// See the documentation of Marshal for the format of tags and a list of +// supported tag options. +// +func Unmarshal(in []byte, out interface{}) (err error) { + defer handleErr(&err) + d := newDecoder() + p := newParser(in) + defer p.destroy() + node := p.parse() + if node != nil { + v := reflect.ValueOf(out) + if v.Kind() == reflect.Ptr && !v.IsNil() { + v = v.Elem() + } + d.unmarshal(node, v) + } + if len(d.terrors) > 0 { + return &TypeError{d.terrors} + } + return nil +} + +// Marshal serializes the value provided into a YAML document. The structure +// of the generated document will reflect the structure of the value itself. +// Maps and pointers (to struct, string, int, etc) are accepted as the in value. +// +// Struct fields are only unmarshalled if they are exported (have an upper case +// first letter), and are unmarshalled using the field name lowercased as the +// default key. Custom keys may be defined via the "yaml" name in the field +// tag: the content preceding the first comma is used as the key, and the +// following comma-separated options are used to tweak the marshalling process. +// Conflicting names result in a runtime error. +// +// The field tag format accepted is: +// +// `(...) yaml:"[][,[,]]" (...)` +// +// The following flags are currently supported: +// +// omitempty Only include the field if it's not set to the zero +// value for the type or to empty slices or maps. +// Does not apply to zero valued structs. +// +// flow Marshal using a flow style (useful for structs, +// sequences and maps). +// +// inline Inline the field, which must be a struct or a map, +// causing all of its fields or keys to be processed as if +// they were part of the outer struct. For maps, keys must +// not conflict with the yaml keys of other struct fields. +// +// In addition, if the key is "-", the field is ignored. +// +// For example: +// +// type T struct { +// F int "a,omitempty" +// B int +// } +// yaml.Marshal(&T{B: 2}) // Returns "b: 2\n" +// yaml.Marshal(&T{F: 1}} // Returns "a: 1\nb: 0\n" +// +func Marshal(in interface{}) (out []byte, err error) { + defer handleErr(&err) + e := newEncoder() + defer e.destroy() + e.marshal("", reflect.ValueOf(in)) + e.finish() + out = e.out + return +} + +func handleErr(err *error) { + if v := recover(); v != nil { + if e, ok := v.(yamlError); ok { + *err = e.err + } else { + panic(v) + } + } +} + +type yamlError struct { + err error +} + +func fail(err error) { + panic(yamlError{err}) +} + +func failf(format string, args ...interface{}) { + panic(yamlError{fmt.Errorf("yaml: "+format, args...)}) +} + +// A TypeError is returned by Unmarshal when one or more fields in +// the YAML document cannot be properly decoded into the requested +// types. When this error is returned, the value is still +// unmarshaled partially. +type TypeError struct { + Errors []string +} + +func (e *TypeError) Error() string { + return fmt.Sprintf("yaml: unmarshal errors:\n %s", strings.Join(e.Errors, "\n ")) +} + +// -------------------------------------------------------------------------- +// Maintain a mapping of keys to structure field indexes + +// The code in this section was copied from mgo/bson. + +// structInfo holds details for the serialization of fields of +// a given struct. +type structInfo struct { + FieldsMap map[string]fieldInfo + FieldsList []fieldInfo + + // InlineMap is the number of the field in the struct that + // contains an ,inline map, or -1 if there's none. + InlineMap int +} + +type fieldInfo struct { + Key string + Num int + OmitEmpty bool + Flow bool + + // Inline holds the field index if the field is part of an inlined struct. + Inline []int +} + +var structMap = make(map[reflect.Type]*structInfo) +var fieldMapMutex sync.RWMutex + +func getStructInfo(st reflect.Type) (*structInfo, error) { + fieldMapMutex.RLock() + sinfo, found := structMap[st] + fieldMapMutex.RUnlock() + if found { + return sinfo, nil + } + + n := st.NumField() + fieldsMap := make(map[string]fieldInfo) + fieldsList := make([]fieldInfo, 0, n) + inlineMap := -1 + for i := 0; i != n; i++ { + field := st.Field(i) + if field.PkgPath != "" && !field.Anonymous { + continue // Private field + } + + info := fieldInfo{Num: i} + + tag := field.Tag.Get("yaml") + if tag == "" && strings.Index(string(field.Tag), ":") < 0 { + tag = string(field.Tag) + } + if tag == "-" { + continue + } + + inline := false + fields := strings.Split(tag, ",") + if len(fields) > 1 { + for _, flag := range fields[1:] { + switch flag { + case "omitempty": + info.OmitEmpty = true + case "flow": + info.Flow = true + case "inline": + inline = true + default: + return nil, errors.New(fmt.Sprintf("Unsupported flag %q in tag %q of type %s", flag, tag, st)) + } + } + tag = fields[0] + } + + if inline { + switch field.Type.Kind() { + case reflect.Map: + if inlineMap >= 0 { + return nil, errors.New("Multiple ,inline maps in struct " + st.String()) + } + if field.Type.Key() != reflect.TypeOf("") { + return nil, errors.New("Option ,inline needs a map with string keys in struct " + st.String()) + } + inlineMap = info.Num + case reflect.Struct: + sinfo, err := getStructInfo(field.Type) + if err != nil { + return nil, err + } + for _, finfo := range sinfo.FieldsList { + if _, found := fieldsMap[finfo.Key]; found { + msg := "Duplicated key '" + finfo.Key + "' in struct " + st.String() + return nil, errors.New(msg) + } + if finfo.Inline == nil { + finfo.Inline = []int{i, finfo.Num} + } else { + finfo.Inline = append([]int{i}, finfo.Inline...) + } + fieldsMap[finfo.Key] = finfo + fieldsList = append(fieldsList, finfo) + } + default: + //return nil, errors.New("Option ,inline needs a struct value or map field") + return nil, errors.New("Option ,inline needs a struct value field") + } + continue + } + + if tag != "" { + info.Key = tag + } else { + info.Key = strings.ToLower(field.Name) + } + + if _, found = fieldsMap[info.Key]; found { + msg := "Duplicated key '" + info.Key + "' in struct " + st.String() + return nil, errors.New(msg) + } + + fieldsList = append(fieldsList, info) + fieldsMap[info.Key] = info + } + + sinfo = &structInfo{fieldsMap, fieldsList, inlineMap} + + fieldMapMutex.Lock() + structMap[st] = sinfo + fieldMapMutex.Unlock() + return sinfo, nil +} + +func isZero(v reflect.Value) bool { + switch v.Kind() { + case reflect.String: + return len(v.String()) == 0 + case reflect.Interface, reflect.Ptr: + return v.IsNil() + case reflect.Slice: + return v.Len() == 0 + case reflect.Map: + return v.Len() == 0 + case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64: + return v.Int() == 0 + case reflect.Float32, reflect.Float64: + return v.Float() == 0 + case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64, reflect.Uintptr: + return v.Uint() == 0 + case reflect.Bool: + return !v.Bool() + case reflect.Struct: + vt := v.Type() + for i := v.NumField() - 1; i >= 0; i-- { + if vt.Field(i).PkgPath != "" { + continue // Private field + } + if !isZero(v.Field(i)) { + return false + } + } + return true + } + return false +} diff --git a/vendor/gopkg.in/yaml.v2/yamlh.go b/vendor/gopkg.in/yaml.v2/yamlh.go new file mode 100644 index 000000000..d60a6b6b0 --- /dev/null +++ b/vendor/gopkg.in/yaml.v2/yamlh.go @@ -0,0 +1,716 @@ +package yaml + +import ( + "io" +) + +// The version directive data. +type yaml_version_directive_t struct { + major int8 // The major version number. + minor int8 // The minor version number. +} + +// The tag directive data. +type yaml_tag_directive_t struct { + handle []byte // The tag handle. + prefix []byte // The tag prefix. +} + +type yaml_encoding_t int + +// The stream encoding. +const ( + // Let the parser choose the encoding. + yaml_ANY_ENCODING yaml_encoding_t = iota + + yaml_UTF8_ENCODING // The default UTF-8 encoding. + yaml_UTF16LE_ENCODING // The UTF-16-LE encoding with BOM. + yaml_UTF16BE_ENCODING // The UTF-16-BE encoding with BOM. +) + +type yaml_break_t int + +// Line break types. +const ( + // Let the parser choose the break type. + yaml_ANY_BREAK yaml_break_t = iota + + yaml_CR_BREAK // Use CR for line breaks (Mac style). + yaml_LN_BREAK // Use LN for line breaks (Unix style). + yaml_CRLN_BREAK // Use CR LN for line breaks (DOS style). +) + +type yaml_error_type_t int + +// Many bad things could happen with the parser and emitter. +const ( + // No error is produced. + yaml_NO_ERROR yaml_error_type_t = iota + + yaml_MEMORY_ERROR // Cannot allocate or reallocate a block of memory. + yaml_READER_ERROR // Cannot read or decode the input stream. + yaml_SCANNER_ERROR // Cannot scan the input stream. + yaml_PARSER_ERROR // Cannot parse the input stream. + yaml_COMPOSER_ERROR // Cannot compose a YAML document. + yaml_WRITER_ERROR // Cannot write to the output stream. + yaml_EMITTER_ERROR // Cannot emit a YAML stream. +) + +// The pointer position. +type yaml_mark_t struct { + index int // The position index. + line int // The position line. + column int // The position column. +} + +// Node Styles + +type yaml_style_t int8 + +type yaml_scalar_style_t yaml_style_t + +// Scalar styles. +const ( + // Let the emitter choose the style. + yaml_ANY_SCALAR_STYLE yaml_scalar_style_t = iota + + yaml_PLAIN_SCALAR_STYLE // The plain scalar style. + yaml_SINGLE_QUOTED_SCALAR_STYLE // The single-quoted scalar style. + yaml_DOUBLE_QUOTED_SCALAR_STYLE // The double-quoted scalar style. + yaml_LITERAL_SCALAR_STYLE // The literal scalar style. + yaml_FOLDED_SCALAR_STYLE // The folded scalar style. +) + +type yaml_sequence_style_t yaml_style_t + +// Sequence styles. +const ( + // Let the emitter choose the style. + yaml_ANY_SEQUENCE_STYLE yaml_sequence_style_t = iota + + yaml_BLOCK_SEQUENCE_STYLE // The block sequence style. + yaml_FLOW_SEQUENCE_STYLE // The flow sequence style. +) + +type yaml_mapping_style_t yaml_style_t + +// Mapping styles. +const ( + // Let the emitter choose the style. + yaml_ANY_MAPPING_STYLE yaml_mapping_style_t = iota + + yaml_BLOCK_MAPPING_STYLE // The block mapping style. + yaml_FLOW_MAPPING_STYLE // The flow mapping style. +) + +// Tokens + +type yaml_token_type_t int + +// Token types. +const ( + // An empty token. + yaml_NO_TOKEN yaml_token_type_t = iota + + yaml_STREAM_START_TOKEN // A STREAM-START token. + yaml_STREAM_END_TOKEN // A STREAM-END token. + + yaml_VERSION_DIRECTIVE_TOKEN // A VERSION-DIRECTIVE token. + yaml_TAG_DIRECTIVE_TOKEN // A TAG-DIRECTIVE token. + yaml_DOCUMENT_START_TOKEN // A DOCUMENT-START token. + yaml_DOCUMENT_END_TOKEN // A DOCUMENT-END token. + + yaml_BLOCK_SEQUENCE_START_TOKEN // A BLOCK-SEQUENCE-START token. + yaml_BLOCK_MAPPING_START_TOKEN // A BLOCK-SEQUENCE-END token. + yaml_BLOCK_END_TOKEN // A BLOCK-END token. + + yaml_FLOW_SEQUENCE_START_TOKEN // A FLOW-SEQUENCE-START token. + yaml_FLOW_SEQUENCE_END_TOKEN // A FLOW-SEQUENCE-END token. + yaml_FLOW_MAPPING_START_TOKEN // A FLOW-MAPPING-START token. + yaml_FLOW_MAPPING_END_TOKEN // A FLOW-MAPPING-END token. + + yaml_BLOCK_ENTRY_TOKEN // A BLOCK-ENTRY token. + yaml_FLOW_ENTRY_TOKEN // A FLOW-ENTRY token. + yaml_KEY_TOKEN // A KEY token. + yaml_VALUE_TOKEN // A VALUE token. + + yaml_ALIAS_TOKEN // An ALIAS token. + yaml_ANCHOR_TOKEN // An ANCHOR token. + yaml_TAG_TOKEN // A TAG token. + yaml_SCALAR_TOKEN // A SCALAR token. +) + +func (tt yaml_token_type_t) String() string { + switch tt { + case yaml_NO_TOKEN: + return "yaml_NO_TOKEN" + case yaml_STREAM_START_TOKEN: + return "yaml_STREAM_START_TOKEN" + case yaml_STREAM_END_TOKEN: + return "yaml_STREAM_END_TOKEN" + case yaml_VERSION_DIRECTIVE_TOKEN: + return "yaml_VERSION_DIRECTIVE_TOKEN" + case yaml_TAG_DIRECTIVE_TOKEN: + return "yaml_TAG_DIRECTIVE_TOKEN" + case yaml_DOCUMENT_START_TOKEN: + return "yaml_DOCUMENT_START_TOKEN" + case yaml_DOCUMENT_END_TOKEN: + return "yaml_DOCUMENT_END_TOKEN" + case yaml_BLOCK_SEQUENCE_START_TOKEN: + return "yaml_BLOCK_SEQUENCE_START_TOKEN" + case yaml_BLOCK_MAPPING_START_TOKEN: + return "yaml_BLOCK_MAPPING_START_TOKEN" + case yaml_BLOCK_END_TOKEN: + return "yaml_BLOCK_END_TOKEN" + case yaml_FLOW_SEQUENCE_START_TOKEN: + return "yaml_FLOW_SEQUENCE_START_TOKEN" + case yaml_FLOW_SEQUENCE_END_TOKEN: + return "yaml_FLOW_SEQUENCE_END_TOKEN" + case yaml_FLOW_MAPPING_START_TOKEN: + return "yaml_FLOW_MAPPING_START_TOKEN" + case yaml_FLOW_MAPPING_END_TOKEN: + return "yaml_FLOW_MAPPING_END_TOKEN" + case yaml_BLOCK_ENTRY_TOKEN: + return "yaml_BLOCK_ENTRY_TOKEN" + case yaml_FLOW_ENTRY_TOKEN: + return "yaml_FLOW_ENTRY_TOKEN" + case yaml_KEY_TOKEN: + return "yaml_KEY_TOKEN" + case yaml_VALUE_TOKEN: + return "yaml_VALUE_TOKEN" + case yaml_ALIAS_TOKEN: + return "yaml_ALIAS_TOKEN" + case yaml_ANCHOR_TOKEN: + return "yaml_ANCHOR_TOKEN" + case yaml_TAG_TOKEN: + return "yaml_TAG_TOKEN" + case yaml_SCALAR_TOKEN: + return "yaml_SCALAR_TOKEN" + } + return "" +} + +// The token structure. +type yaml_token_t struct { + // The token type. + typ yaml_token_type_t + + // The start/end of the token. + start_mark, end_mark yaml_mark_t + + // The stream encoding (for yaml_STREAM_START_TOKEN). + encoding yaml_encoding_t + + // The alias/anchor/scalar value or tag/tag directive handle + // (for yaml_ALIAS_TOKEN, yaml_ANCHOR_TOKEN, yaml_SCALAR_TOKEN, yaml_TAG_TOKEN, yaml_TAG_DIRECTIVE_TOKEN). + value []byte + + // The tag suffix (for yaml_TAG_TOKEN). + suffix []byte + + // The tag directive prefix (for yaml_TAG_DIRECTIVE_TOKEN). + prefix []byte + + // The scalar style (for yaml_SCALAR_TOKEN). + style yaml_scalar_style_t + + // The version directive major/minor (for yaml_VERSION_DIRECTIVE_TOKEN). + major, minor int8 +} + +// Events + +type yaml_event_type_t int8 + +// Event types. +const ( + // An empty event. + yaml_NO_EVENT yaml_event_type_t = iota + + yaml_STREAM_START_EVENT // A STREAM-START event. + yaml_STREAM_END_EVENT // A STREAM-END event. + yaml_DOCUMENT_START_EVENT // A DOCUMENT-START event. + yaml_DOCUMENT_END_EVENT // A DOCUMENT-END event. + yaml_ALIAS_EVENT // An ALIAS event. + yaml_SCALAR_EVENT // A SCALAR event. + yaml_SEQUENCE_START_EVENT // A SEQUENCE-START event. + yaml_SEQUENCE_END_EVENT // A SEQUENCE-END event. + yaml_MAPPING_START_EVENT // A MAPPING-START event. + yaml_MAPPING_END_EVENT // A MAPPING-END event. +) + +// The event structure. +type yaml_event_t struct { + + // The event type. + typ yaml_event_type_t + + // The start and end of the event. + start_mark, end_mark yaml_mark_t + + // The document encoding (for yaml_STREAM_START_EVENT). + encoding yaml_encoding_t + + // The version directive (for yaml_DOCUMENT_START_EVENT). + version_directive *yaml_version_directive_t + + // The list of tag directives (for yaml_DOCUMENT_START_EVENT). + tag_directives []yaml_tag_directive_t + + // The anchor (for yaml_SCALAR_EVENT, yaml_SEQUENCE_START_EVENT, yaml_MAPPING_START_EVENT, yaml_ALIAS_EVENT). + anchor []byte + + // The tag (for yaml_SCALAR_EVENT, yaml_SEQUENCE_START_EVENT, yaml_MAPPING_START_EVENT). + tag []byte + + // The scalar value (for yaml_SCALAR_EVENT). + value []byte + + // Is the document start/end indicator implicit, or the tag optional? + // (for yaml_DOCUMENT_START_EVENT, yaml_DOCUMENT_END_EVENT, yaml_SEQUENCE_START_EVENT, yaml_MAPPING_START_EVENT, yaml_SCALAR_EVENT). + implicit bool + + // Is the tag optional for any non-plain style? (for yaml_SCALAR_EVENT). + quoted_implicit bool + + // The style (for yaml_SCALAR_EVENT, yaml_SEQUENCE_START_EVENT, yaml_MAPPING_START_EVENT). + style yaml_style_t +} + +func (e *yaml_event_t) scalar_style() yaml_scalar_style_t { return yaml_scalar_style_t(e.style) } +func (e *yaml_event_t) sequence_style() yaml_sequence_style_t { return yaml_sequence_style_t(e.style) } +func (e *yaml_event_t) mapping_style() yaml_mapping_style_t { return yaml_mapping_style_t(e.style) } + +// Nodes + +const ( + yaml_NULL_TAG = "tag:yaml.org,2002:null" // The tag !!null with the only possible value: null. + yaml_BOOL_TAG = "tag:yaml.org,2002:bool" // The tag !!bool with the values: true and false. + yaml_STR_TAG = "tag:yaml.org,2002:str" // The tag !!str for string values. + yaml_INT_TAG = "tag:yaml.org,2002:int" // The tag !!int for integer values. + yaml_FLOAT_TAG = "tag:yaml.org,2002:float" // The tag !!float for float values. + yaml_TIMESTAMP_TAG = "tag:yaml.org,2002:timestamp" // The tag !!timestamp for date and time values. + + yaml_SEQ_TAG = "tag:yaml.org,2002:seq" // The tag !!seq is used to denote sequences. + yaml_MAP_TAG = "tag:yaml.org,2002:map" // The tag !!map is used to denote mapping. + + // Not in original libyaml. + yaml_BINARY_TAG = "tag:yaml.org,2002:binary" + yaml_MERGE_TAG = "tag:yaml.org,2002:merge" + + yaml_DEFAULT_SCALAR_TAG = yaml_STR_TAG // The default scalar tag is !!str. + yaml_DEFAULT_SEQUENCE_TAG = yaml_SEQ_TAG // The default sequence tag is !!seq. + yaml_DEFAULT_MAPPING_TAG = yaml_MAP_TAG // The default mapping tag is !!map. +) + +type yaml_node_type_t int + +// Node types. +const ( + // An empty node. + yaml_NO_NODE yaml_node_type_t = iota + + yaml_SCALAR_NODE // A scalar node. + yaml_SEQUENCE_NODE // A sequence node. + yaml_MAPPING_NODE // A mapping node. +) + +// An element of a sequence node. +type yaml_node_item_t int + +// An element of a mapping node. +type yaml_node_pair_t struct { + key int // The key of the element. + value int // The value of the element. +} + +// The node structure. +type yaml_node_t struct { + typ yaml_node_type_t // The node type. + tag []byte // The node tag. + + // The node data. + + // The scalar parameters (for yaml_SCALAR_NODE). + scalar struct { + value []byte // The scalar value. + length int // The length of the scalar value. + style yaml_scalar_style_t // The scalar style. + } + + // The sequence parameters (for YAML_SEQUENCE_NODE). + sequence struct { + items_data []yaml_node_item_t // The stack of sequence items. + style yaml_sequence_style_t // The sequence style. + } + + // The mapping parameters (for yaml_MAPPING_NODE). + mapping struct { + pairs_data []yaml_node_pair_t // The stack of mapping pairs (key, value). + pairs_start *yaml_node_pair_t // The beginning of the stack. + pairs_end *yaml_node_pair_t // The end of the stack. + pairs_top *yaml_node_pair_t // The top of the stack. + style yaml_mapping_style_t // The mapping style. + } + + start_mark yaml_mark_t // The beginning of the node. + end_mark yaml_mark_t // The end of the node. + +} + +// The document structure. +type yaml_document_t struct { + + // The document nodes. + nodes []yaml_node_t + + // The version directive. + version_directive *yaml_version_directive_t + + // The list of tag directives. + tag_directives_data []yaml_tag_directive_t + tag_directives_start int // The beginning of the tag directives list. + tag_directives_end int // The end of the tag directives list. + + start_implicit int // Is the document start indicator implicit? + end_implicit int // Is the document end indicator implicit? + + // The start/end of the document. + start_mark, end_mark yaml_mark_t +} + +// The prototype of a read handler. +// +// The read handler is called when the parser needs to read more bytes from the +// source. The handler should write not more than size bytes to the buffer. +// The number of written bytes should be set to the size_read variable. +// +// [in,out] data A pointer to an application data specified by +// yaml_parser_set_input(). +// [out] buffer The buffer to write the data from the source. +// [in] size The size of the buffer. +// [out] size_read The actual number of bytes read from the source. +// +// On success, the handler should return 1. If the handler failed, +// the returned value should be 0. On EOF, the handler should set the +// size_read to 0 and return 1. +type yaml_read_handler_t func(parser *yaml_parser_t, buffer []byte) (n int, err error) + +// This structure holds information about a potential simple key. +type yaml_simple_key_t struct { + possible bool // Is a simple key possible? + required bool // Is a simple key required? + token_number int // The number of the token. + mark yaml_mark_t // The position mark. +} + +// The states of the parser. +type yaml_parser_state_t int + +const ( + yaml_PARSE_STREAM_START_STATE yaml_parser_state_t = iota + + yaml_PARSE_IMPLICIT_DOCUMENT_START_STATE // Expect the beginning of an implicit document. + yaml_PARSE_DOCUMENT_START_STATE // Expect DOCUMENT-START. + yaml_PARSE_DOCUMENT_CONTENT_STATE // Expect the content of a document. + yaml_PARSE_DOCUMENT_END_STATE // Expect DOCUMENT-END. + yaml_PARSE_BLOCK_NODE_STATE // Expect a block node. + yaml_PARSE_BLOCK_NODE_OR_INDENTLESS_SEQUENCE_STATE // Expect a block node or indentless sequence. + yaml_PARSE_FLOW_NODE_STATE // Expect a flow node. + yaml_PARSE_BLOCK_SEQUENCE_FIRST_ENTRY_STATE // Expect the first entry of a block sequence. + yaml_PARSE_BLOCK_SEQUENCE_ENTRY_STATE // Expect an entry of a block sequence. + yaml_PARSE_INDENTLESS_SEQUENCE_ENTRY_STATE // Expect an entry of an indentless sequence. + yaml_PARSE_BLOCK_MAPPING_FIRST_KEY_STATE // Expect the first key of a block mapping. + yaml_PARSE_BLOCK_MAPPING_KEY_STATE // Expect a block mapping key. + yaml_PARSE_BLOCK_MAPPING_VALUE_STATE // Expect a block mapping value. + yaml_PARSE_FLOW_SEQUENCE_FIRST_ENTRY_STATE // Expect the first entry of a flow sequence. + yaml_PARSE_FLOW_SEQUENCE_ENTRY_STATE // Expect an entry of a flow sequence. + yaml_PARSE_FLOW_SEQUENCE_ENTRY_MAPPING_KEY_STATE // Expect a key of an ordered mapping. + yaml_PARSE_FLOW_SEQUENCE_ENTRY_MAPPING_VALUE_STATE // Expect a value of an ordered mapping. + yaml_PARSE_FLOW_SEQUENCE_ENTRY_MAPPING_END_STATE // Expect the and of an ordered mapping entry. + yaml_PARSE_FLOW_MAPPING_FIRST_KEY_STATE // Expect the first key of a flow mapping. + yaml_PARSE_FLOW_MAPPING_KEY_STATE // Expect a key of a flow mapping. + yaml_PARSE_FLOW_MAPPING_VALUE_STATE // Expect a value of a flow mapping. + yaml_PARSE_FLOW_MAPPING_EMPTY_VALUE_STATE // Expect an empty value of a flow mapping. + yaml_PARSE_END_STATE // Expect nothing. +) + +func (ps yaml_parser_state_t) String() string { + switch ps { + case yaml_PARSE_STREAM_START_STATE: + return "yaml_PARSE_STREAM_START_STATE" + case yaml_PARSE_IMPLICIT_DOCUMENT_START_STATE: + return "yaml_PARSE_IMPLICIT_DOCUMENT_START_STATE" + case yaml_PARSE_DOCUMENT_START_STATE: + return "yaml_PARSE_DOCUMENT_START_STATE" + case yaml_PARSE_DOCUMENT_CONTENT_STATE: + return "yaml_PARSE_DOCUMENT_CONTENT_STATE" + case yaml_PARSE_DOCUMENT_END_STATE: + return "yaml_PARSE_DOCUMENT_END_STATE" + case yaml_PARSE_BLOCK_NODE_STATE: + return "yaml_PARSE_BLOCK_NODE_STATE" + case yaml_PARSE_BLOCK_NODE_OR_INDENTLESS_SEQUENCE_STATE: + return "yaml_PARSE_BLOCK_NODE_OR_INDENTLESS_SEQUENCE_STATE" + case yaml_PARSE_FLOW_NODE_STATE: + return "yaml_PARSE_FLOW_NODE_STATE" + case yaml_PARSE_BLOCK_SEQUENCE_FIRST_ENTRY_STATE: + return "yaml_PARSE_BLOCK_SEQUENCE_FIRST_ENTRY_STATE" + case yaml_PARSE_BLOCK_SEQUENCE_ENTRY_STATE: + return "yaml_PARSE_BLOCK_SEQUENCE_ENTRY_STATE" + case yaml_PARSE_INDENTLESS_SEQUENCE_ENTRY_STATE: + return "yaml_PARSE_INDENTLESS_SEQUENCE_ENTRY_STATE" + case yaml_PARSE_BLOCK_MAPPING_FIRST_KEY_STATE: + return "yaml_PARSE_BLOCK_MAPPING_FIRST_KEY_STATE" + case yaml_PARSE_BLOCK_MAPPING_KEY_STATE: + return "yaml_PARSE_BLOCK_MAPPING_KEY_STATE" + case yaml_PARSE_BLOCK_MAPPING_VALUE_STATE: + return "yaml_PARSE_BLOCK_MAPPING_VALUE_STATE" + case yaml_PARSE_FLOW_SEQUENCE_FIRST_ENTRY_STATE: + return "yaml_PARSE_FLOW_SEQUENCE_FIRST_ENTRY_STATE" + case yaml_PARSE_FLOW_SEQUENCE_ENTRY_STATE: + return "yaml_PARSE_FLOW_SEQUENCE_ENTRY_STATE" + case yaml_PARSE_FLOW_SEQUENCE_ENTRY_MAPPING_KEY_STATE: + return "yaml_PARSE_FLOW_SEQUENCE_ENTRY_MAPPING_KEY_STATE" + case yaml_PARSE_FLOW_SEQUENCE_ENTRY_MAPPING_VALUE_STATE: + return "yaml_PARSE_FLOW_SEQUENCE_ENTRY_MAPPING_VALUE_STATE" + case yaml_PARSE_FLOW_SEQUENCE_ENTRY_MAPPING_END_STATE: + return "yaml_PARSE_FLOW_SEQUENCE_ENTRY_MAPPING_END_STATE" + case yaml_PARSE_FLOW_MAPPING_FIRST_KEY_STATE: + return "yaml_PARSE_FLOW_MAPPING_FIRST_KEY_STATE" + case yaml_PARSE_FLOW_MAPPING_KEY_STATE: + return "yaml_PARSE_FLOW_MAPPING_KEY_STATE" + case yaml_PARSE_FLOW_MAPPING_VALUE_STATE: + return "yaml_PARSE_FLOW_MAPPING_VALUE_STATE" + case yaml_PARSE_FLOW_MAPPING_EMPTY_VALUE_STATE: + return "yaml_PARSE_FLOW_MAPPING_EMPTY_VALUE_STATE" + case yaml_PARSE_END_STATE: + return "yaml_PARSE_END_STATE" + } + return "" +} + +// This structure holds aliases data. +type yaml_alias_data_t struct { + anchor []byte // The anchor. + index int // The node id. + mark yaml_mark_t // The anchor mark. +} + +// The parser structure. +// +// All members are internal. Manage the structure using the +// yaml_parser_ family of functions. +type yaml_parser_t struct { + + // Error handling + + error yaml_error_type_t // Error type. + + problem string // Error description. + + // The byte about which the problem occured. + problem_offset int + problem_value int + problem_mark yaml_mark_t + + // The error context. + context string + context_mark yaml_mark_t + + // Reader stuff + + read_handler yaml_read_handler_t // Read handler. + + input_file io.Reader // File input data. + input []byte // String input data. + input_pos int + + eof bool // EOF flag + + buffer []byte // The working buffer. + buffer_pos int // The current position of the buffer. + + unread int // The number of unread characters in the buffer. + + raw_buffer []byte // The raw buffer. + raw_buffer_pos int // The current position of the buffer. + + encoding yaml_encoding_t // The input encoding. + + offset int // The offset of the current position (in bytes). + mark yaml_mark_t // The mark of the current position. + + // Scanner stuff + + stream_start_produced bool // Have we started to scan the input stream? + stream_end_produced bool // Have we reached the end of the input stream? + + flow_level int // The number of unclosed '[' and '{' indicators. + + tokens []yaml_token_t // The tokens queue. + tokens_head int // The head of the tokens queue. + tokens_parsed int // The number of tokens fetched from the queue. + token_available bool // Does the tokens queue contain a token ready for dequeueing. + + indent int // The current indentation level. + indents []int // The indentation levels stack. + + simple_key_allowed bool // May a simple key occur at the current position? + simple_keys []yaml_simple_key_t // The stack of simple keys. + + // Parser stuff + + state yaml_parser_state_t // The current parser state. + states []yaml_parser_state_t // The parser states stack. + marks []yaml_mark_t // The stack of marks. + tag_directives []yaml_tag_directive_t // The list of TAG directives. + + // Dumper stuff + + aliases []yaml_alias_data_t // The alias data. + + document *yaml_document_t // The currently parsed document. +} + +// Emitter Definitions + +// The prototype of a write handler. +// +// The write handler is called when the emitter needs to flush the accumulated +// characters to the output. The handler should write @a size bytes of the +// @a buffer to the output. +// +// @param[in,out] data A pointer to an application data specified by +// yaml_emitter_set_output(). +// @param[in] buffer The buffer with bytes to be written. +// @param[in] size The size of the buffer. +// +// @returns On success, the handler should return @c 1. If the handler failed, +// the returned value should be @c 0. +// +type yaml_write_handler_t func(emitter *yaml_emitter_t, buffer []byte) error + +type yaml_emitter_state_t int + +// The emitter states. +const ( + // Expect STREAM-START. + yaml_EMIT_STREAM_START_STATE yaml_emitter_state_t = iota + + yaml_EMIT_FIRST_DOCUMENT_START_STATE // Expect the first DOCUMENT-START or STREAM-END. + yaml_EMIT_DOCUMENT_START_STATE // Expect DOCUMENT-START or STREAM-END. + yaml_EMIT_DOCUMENT_CONTENT_STATE // Expect the content of a document. + yaml_EMIT_DOCUMENT_END_STATE // Expect DOCUMENT-END. + yaml_EMIT_FLOW_SEQUENCE_FIRST_ITEM_STATE // Expect the first item of a flow sequence. + yaml_EMIT_FLOW_SEQUENCE_ITEM_STATE // Expect an item of a flow sequence. + yaml_EMIT_FLOW_MAPPING_FIRST_KEY_STATE // Expect the first key of a flow mapping. + yaml_EMIT_FLOW_MAPPING_KEY_STATE // Expect a key of a flow mapping. + yaml_EMIT_FLOW_MAPPING_SIMPLE_VALUE_STATE // Expect a value for a simple key of a flow mapping. + yaml_EMIT_FLOW_MAPPING_VALUE_STATE // Expect a value of a flow mapping. + yaml_EMIT_BLOCK_SEQUENCE_FIRST_ITEM_STATE // Expect the first item of a block sequence. + yaml_EMIT_BLOCK_SEQUENCE_ITEM_STATE // Expect an item of a block sequence. + yaml_EMIT_BLOCK_MAPPING_FIRST_KEY_STATE // Expect the first key of a block mapping. + yaml_EMIT_BLOCK_MAPPING_KEY_STATE // Expect the key of a block mapping. + yaml_EMIT_BLOCK_MAPPING_SIMPLE_VALUE_STATE // Expect a value for a simple key of a block mapping. + yaml_EMIT_BLOCK_MAPPING_VALUE_STATE // Expect a value of a block mapping. + yaml_EMIT_END_STATE // Expect nothing. +) + +// The emitter structure. +// +// All members are internal. Manage the structure using the @c yaml_emitter_ +// family of functions. +type yaml_emitter_t struct { + + // Error handling + + error yaml_error_type_t // Error type. + problem string // Error description. + + // Writer stuff + + write_handler yaml_write_handler_t // Write handler. + + output_buffer *[]byte // String output data. + output_file io.Writer // File output data. + + buffer []byte // The working buffer. + buffer_pos int // The current position of the buffer. + + raw_buffer []byte // The raw buffer. + raw_buffer_pos int // The current position of the buffer. + + encoding yaml_encoding_t // The stream encoding. + + // Emitter stuff + + canonical bool // If the output is in the canonical style? + best_indent int // The number of indentation spaces. + best_width int // The preferred width of the output lines. + unicode bool // Allow unescaped non-ASCII characters? + line_break yaml_break_t // The preferred line break. + + state yaml_emitter_state_t // The current emitter state. + states []yaml_emitter_state_t // The stack of states. + + events []yaml_event_t // The event queue. + events_head int // The head of the event queue. + + indents []int // The stack of indentation levels. + + tag_directives []yaml_tag_directive_t // The list of tag directives. + + indent int // The current indentation level. + + flow_level int // The current flow level. + + root_context bool // Is it the document root context? + sequence_context bool // Is it a sequence context? + mapping_context bool // Is it a mapping context? + simple_key_context bool // Is it a simple mapping key context? + + line int // The current line. + column int // The current column. + whitespace bool // If the last character was a whitespace? + indention bool // If the last character was an indentation character (' ', '-', '?', ':')? + open_ended bool // If an explicit document end is required? + + // Anchor analysis. + anchor_data struct { + anchor []byte // The anchor value. + alias bool // Is it an alias? + } + + // Tag analysis. + tag_data struct { + handle []byte // The tag handle. + suffix []byte // The tag suffix. + } + + // Scalar analysis. + scalar_data struct { + value []byte // The scalar value. + multiline bool // Does the scalar contain line breaks? + flow_plain_allowed bool // Can the scalar be expessed in the flow plain style? + block_plain_allowed bool // Can the scalar be expressed in the block plain style? + single_quoted_allowed bool // Can the scalar be expressed in the single quoted style? + block_allowed bool // Can the scalar be expressed in the literal or folded styles? + style yaml_scalar_style_t // The output style. + } + + // Dumper stuff + + opened bool // If the stream was already opened? + closed bool // If the stream was already closed? + + // The information associated with the document nodes. + anchors *struct { + references int // The number of references. + anchor int // The anchor id. + serialized bool // If the node has been emitted? + } + + last_anchor_id int // The last assigned anchor id. + + document *yaml_document_t // The currently emitted document. +} diff --git a/vendor/gopkg.in/yaml.v2/yamlprivateh.go b/vendor/gopkg.in/yaml.v2/yamlprivateh.go new file mode 100644 index 000000000..8110ce3c3 --- /dev/null +++ b/vendor/gopkg.in/yaml.v2/yamlprivateh.go @@ -0,0 +1,173 @@ +package yaml + +const ( + // The size of the input raw buffer. + input_raw_buffer_size = 512 + + // The size of the input buffer. + // It should be possible to decode the whole raw buffer. + input_buffer_size = input_raw_buffer_size * 3 + + // The size of the output buffer. + output_buffer_size = 128 + + // The size of the output raw buffer. + // It should be possible to encode the whole output buffer. + output_raw_buffer_size = (output_buffer_size*2 + 2) + + // The size of other stacks and queues. + initial_stack_size = 16 + initial_queue_size = 16 + initial_string_size = 16 +) + +// Check if the character at the specified position is an alphabetical +// character, a digit, '_', or '-'. +func is_alpha(b []byte, i int) bool { + return b[i] >= '0' && b[i] <= '9' || b[i] >= 'A' && b[i] <= 'Z' || b[i] >= 'a' && b[i] <= 'z' || b[i] == '_' || b[i] == '-' +} + +// Check if the character at the specified position is a digit. +func is_digit(b []byte, i int) bool { + return b[i] >= '0' && b[i] <= '9' +} + +// Get the value of a digit. +func as_digit(b []byte, i int) int { + return int(b[i]) - '0' +} + +// Check if the character at the specified position is a hex-digit. +func is_hex(b []byte, i int) bool { + return b[i] >= '0' && b[i] <= '9' || b[i] >= 'A' && b[i] <= 'F' || b[i] >= 'a' && b[i] <= 'f' +} + +// Get the value of a hex-digit. +func as_hex(b []byte, i int) int { + bi := b[i] + if bi >= 'A' && bi <= 'F' { + return int(bi) - 'A' + 10 + } + if bi >= 'a' && bi <= 'f' { + return int(bi) - 'a' + 10 + } + return int(bi) - '0' +} + +// Check if the character is ASCII. +func is_ascii(b []byte, i int) bool { + return b[i] <= 0x7F +} + +// Check if the character at the start of the buffer can be printed unescaped. +func is_printable(b []byte, i int) bool { + return ((b[i] == 0x0A) || // . == #x0A + (b[i] >= 0x20 && b[i] <= 0x7E) || // #x20 <= . <= #x7E + (b[i] == 0xC2 && b[i+1] >= 0xA0) || // #0xA0 <= . <= #xD7FF + (b[i] > 0xC2 && b[i] < 0xED) || + (b[i] == 0xED && b[i+1] < 0xA0) || + (b[i] == 0xEE) || + (b[i] == 0xEF && // #xE000 <= . <= #xFFFD + !(b[i+1] == 0xBB && b[i+2] == 0xBF) && // && . != #xFEFF + !(b[i+1] == 0xBF && (b[i+2] == 0xBE || b[i+2] == 0xBF)))) +} + +// Check if the character at the specified position is NUL. +func is_z(b []byte, i int) bool { + return b[i] == 0x00 +} + +// Check if the beginning of the buffer is a BOM. +func is_bom(b []byte, i int) bool { + return b[0] == 0xEF && b[1] == 0xBB && b[2] == 0xBF +} + +// Check if the character at the specified position is space. +func is_space(b []byte, i int) bool { + return b[i] == ' ' +} + +// Check if the character at the specified position is tab. +func is_tab(b []byte, i int) bool { + return b[i] == '\t' +} + +// Check if the character at the specified position is blank (space or tab). +func is_blank(b []byte, i int) bool { + //return is_space(b, i) || is_tab(b, i) + return b[i] == ' ' || b[i] == '\t' +} + +// Check if the character at the specified position is a line break. +func is_break(b []byte, i int) bool { + return (b[i] == '\r' || // CR (#xD) + b[i] == '\n' || // LF (#xA) + b[i] == 0xC2 && b[i+1] == 0x85 || // NEL (#x85) + b[i] == 0xE2 && b[i+1] == 0x80 && b[i+2] == 0xA8 || // LS (#x2028) + b[i] == 0xE2 && b[i+1] == 0x80 && b[i+2] == 0xA9) // PS (#x2029) +} + +func is_crlf(b []byte, i int) bool { + return b[i] == '\r' && b[i+1] == '\n' +} + +// Check if the character is a line break or NUL. +func is_breakz(b []byte, i int) bool { + //return is_break(b, i) || is_z(b, i) + return ( // is_break: + b[i] == '\r' || // CR (#xD) + b[i] == '\n' || // LF (#xA) + b[i] == 0xC2 && b[i+1] == 0x85 || // NEL (#x85) + b[i] == 0xE2 && b[i+1] == 0x80 && b[i+2] == 0xA8 || // LS (#x2028) + b[i] == 0xE2 && b[i+1] == 0x80 && b[i+2] == 0xA9 || // PS (#x2029) + // is_z: + b[i] == 0) +} + +// Check if the character is a line break, space, or NUL. +func is_spacez(b []byte, i int) bool { + //return is_space(b, i) || is_breakz(b, i) + return ( // is_space: + b[i] == ' ' || + // is_breakz: + b[i] == '\r' || // CR (#xD) + b[i] == '\n' || // LF (#xA) + b[i] == 0xC2 && b[i+1] == 0x85 || // NEL (#x85) + b[i] == 0xE2 && b[i+1] == 0x80 && b[i+2] == 0xA8 || // LS (#x2028) + b[i] == 0xE2 && b[i+1] == 0x80 && b[i+2] == 0xA9 || // PS (#x2029) + b[i] == 0) +} + +// Check if the character is a line break, space, tab, or NUL. +func is_blankz(b []byte, i int) bool { + //return is_blank(b, i) || is_breakz(b, i) + return ( // is_blank: + b[i] == ' ' || b[i] == '\t' || + // is_breakz: + b[i] == '\r' || // CR (#xD) + b[i] == '\n' || // LF (#xA) + b[i] == 0xC2 && b[i+1] == 0x85 || // NEL (#x85) + b[i] == 0xE2 && b[i+1] == 0x80 && b[i+2] == 0xA8 || // LS (#x2028) + b[i] == 0xE2 && b[i+1] == 0x80 && b[i+2] == 0xA9 || // PS (#x2029) + b[i] == 0) +} + +// Determine the width of the character. +func width(b byte) int { + // Don't replace these by a switch without first + // confirming that it is being inlined. + if b&0x80 == 0x00 { + return 1 + } + if b&0xE0 == 0xC0 { + return 2 + } + if b&0xF0 == 0xE0 { + return 3 + } + if b&0xF8 == 0xF0 { + return 4 + } + return 0 + +}