Skip to content

Commit

Permalink
Merge pull request #156 from influxdata/nc-docs-fixes
Browse files Browse the repository at this point in the history
clean up docs
  • Loading branch information
Nathaniel Cook committed Jan 20, 2016
2 parents af8e166 + 70471d7 commit 88cd92e
Show file tree
Hide file tree
Showing 5 changed files with 80 additions and 27 deletions.
13 changes: 7 additions & 6 deletions pipeline/alert.go
Original file line number Diff line number Diff line change
Expand Up @@ -50,7 +50,7 @@ const defaultMessageTmpl = "{{ .ID }} is {{ .Level }}"
// Using the AlertNode.StateChangesOnly property events will only be sent to handlers
// if the alert changed state.
//
// It is valid to configure multiple alert handlers of the same type and of different types.
// It is valid to configure multiple alert handlers, even with the same type.
//
// Example:
// stream
Expand Down Expand Up @@ -357,7 +357,7 @@ type ExecHandler struct {
}

// Log JSON alert data to file. One event per line.
// Must specify the absolute path the the log file.
// Must specify the absolute path to the log file.
// It will be created if it does not exist.
// tick:property
func (a *AlertNode) Log(filepath string) *LogHandler {
Expand Down Expand Up @@ -579,7 +579,9 @@ type HipChatHandler struct {
//
// In order to not post a message every alert interval
// use AlertNode.StateChangesOnly so that only events
// where the alert changed state are posted to the room.
// where the alert changed state are sent to Alerta.
//
// Send alerts to Alerta. The resource and event properties are required.
//
// Example:
// stream...
Expand All @@ -588,7 +590,7 @@ type HipChatHandler struct {
// .resource('Hostname or service')
// .event('Something went wrong')
//
// Send alerts to Alerta. Alerta requires a resource and event description.
// Alerta also accepts optional alert information.
//
// Example:
// stream...
Expand All @@ -597,10 +599,9 @@ type HipChatHandler struct {
// .resource('Hostname or service')
// .event('Something went wrong')
// .environment('Development')
// .status('Open')
// .group('Dev. Servers')
//
// Send alerts to Alerta. Alerta accepts detailed alert information.
// NOTE: Alerta cannot be configured globally because of its required properties.
// tick:property
func (a *AlertNode) Alerta() *AlertaHandler {
alerta := &AlertaHandler{
Expand Down
5 changes: 0 additions & 5 deletions pipeline/node.go
Original file line number Diff line number Diff line change
Expand Up @@ -180,11 +180,6 @@ func (n *node) dot(buf *bytes.Buffer) {
// Create a new stream of data that contains the internal statistics of the node.
// The interval represents how often to emit the statistics based on real time.
// This means the interval time is independent of the times of the data points the source node is receiving.
//
// Each node has these internal statistics:
//
// * collected -- the number of points or batches this node has received.
//
func (n *node) Stats(interval time.Duration) *StatsNode {
stats := newStatsNode(n, interval)
n.pipeline().addSource(stats)
Expand Down
23 changes: 23 additions & 0 deletions pipeline/stats.go
Original file line number Diff line number Diff line change
Expand Up @@ -2,6 +2,29 @@ package pipeline

import "time"

// A StatsNode emits internal statistics about the another node at a given interval.
//
// The interval represents how often to emit the statistics based on real time.
// This means the interval time is independent of the times of the data points the other node is receiving.
// As a result the StatsNode is a root node in the task pipeline.
//
//
// The currently available internal statistics:
//
// * collected -- the number of points or batches this node has received.
//
// Each stat is available as a field in the emitted data stream.
//
// Example:
// var data = stream.from()...
// // Emit statistics every 1 minute and cache them via the HTTP API.
// data.stats(1m).httpOut('stats')
// // Contiue normal processing of the data stream
// data....
//
// WARNING: It is not recommened to join the stats stream with the orginal data stream.
// Since they operate on different clocks you could potentially create a deadlock.
// This is a limitation of the current implementation and may be removed in the future.
type StatsNode struct {
chainnode
// tick:ignore
Expand Down
43 changes: 41 additions & 2 deletions pipeline/udf.go
Original file line number Diff line number Diff line change
Expand Up @@ -9,9 +9,42 @@ import (
"github.com/influxdata/kapacitor/udf"
)

// A UDFNode is a User Defined Function.
// UDFs can be defined in the configuration file. in the [udf] section.
// A UDFNode is a node that can run a User Defined Function (UDF) in a separate process.
//
// A UDF is a custom script or binary that can communicate via Kapacitor's UDF RPC protocol.
// The path and arguments to the UDF program are specified in Kapacitor's configuration.
// Using TICKscripts you can invoke and configure your UDF for each task.
//
// See the [README.md](https://github.com/influxdata/kapacitor/tree/master/udf/agent/)
// for details on how to write your own UDF.
//
// UDFs are configured via Kapacitor's main configuration file.
//
// Example:
// [udf]
// [udf.functions]
// # Example moving average UDF.
// [udf.functions.movingAverage]
// prog = "/path/to/executable/moving_avg"
// args = []
// timeout = "10s"
//
// UDFs are first class objects in TICKscripts and are referenced via their configuration name.
//
// Example:
// // Given you have a UDF that computes a moving average
// // The UDF can define what its options are and then can be
// // invoked via a TICKscript like so:
// stream
// .from()...
// .movingAverage()
// .field('value')
// .size(100)
// .as('mavg')
// .httpOut('movingaverage')
//
// NOTE: The UDF process runs as the same user as the Kapacitor daemon.
// As a result make the user is properly secured as well as the configuration file.
type UDFNode struct {
chainnode

Expand Down Expand Up @@ -51,10 +84,12 @@ func NewUDF(
return udf
}

// tick:ignore
func (u *UDFNode) Desc() string {
return u.desc
}

// tick:ignore
func (u *UDFNode) HasMethod(name string) bool {
_, ok := u.options[name]
if ok {
Expand All @@ -63,6 +98,7 @@ func (u *UDFNode) HasMethod(name string) bool {
return u.describer.HasMethod(name)
}

// tick:ignore
func (u *UDFNode) CallMethod(name string, args ...interface{}) (interface{}, error) {
opt, ok := u.options[name]
if ok {
Expand Down Expand Up @@ -102,14 +138,17 @@ func (u *UDFNode) CallMethod(name string, args ...interface{}) (interface{}, err
return u.describer.CallMethod(name, args...)
}

// tick:ignore
func (u *UDFNode) HasProperty(name string) bool {
return u.describer.HasProperty(name)
}

// tick:ignore
func (u *UDFNode) Property(name string) interface{} {
return u.describer.Property(name)
}

// tick:ignore
func (u *UDFNode) SetProperty(name string, value interface{}) error {
return u.describer.SetProperty(name, value)
}
23 changes: 9 additions & 14 deletions tick/cmd/tickdoc/main.go
Original file line number Diff line number Diff line change
Expand Up @@ -95,7 +95,7 @@ func main() {

ordered := make([]string, 0, len(nodes))
for name, node := range nodes {
if name == "" || !ast.IsExported(name) {
if name == "" || !ast.IsExported(name) || node.Name == "" {
continue
}
if node.Embedded {
Expand Down Expand Up @@ -439,7 +439,7 @@ title: %s
note: Auto generated by tickdoc
menu:
kapacitor_02:
kapacitor_010:
name: %s
identifier: %s
weight: %d
Expand All @@ -449,7 +449,7 @@ menu:
n.Name,
strings.Replace(n.Name, "Node", "", 1),
snaker.CamelToSnake(n.Name),
(index+1)*indexWidth,
(index+3)*indexWidth,
)

buf.Write([]byte(header))
Expand All @@ -463,7 +463,7 @@ menu:
buf.Write([]byte("Property methods modify state on the calling node. They do not add another node to the pipeline, and always return a reference to the calling node."))
return true
})
renderProperties(buf, r, n.Properties, nodes, 3, "node")
renderProperties(buf, r, n.Properties, nodes, 3, "node", "")
}

// Methods
Expand All @@ -489,7 +489,7 @@ menu:
return nil
}

func renderProperties(buf *bytes.Buffer, r Renderer, properties map[string]*Property, nodes map[string]*Node, header int, node string) {
func renderProperties(buf *bytes.Buffer, r Renderer, properties map[string]*Property, nodes map[string]*Node, header int, node, namePrefix string) {
props := make([]string, len(properties))
i := 0
for name, _ := range properties {
Expand All @@ -498,7 +498,7 @@ func renderProperties(buf *bytes.Buffer, r Renderer, properties map[string]*Prop
}
sort.Strings(props)
for _, name := range props {
properties[name].Render(buf, r, nodes, header, node)
properties[name].Render(buf, r, nodes, header, node, namePrefix)
buf.Write([]byte("\n"))
}
}
Expand All @@ -510,8 +510,8 @@ type Property struct {
EmbeddedProperties map[string]*Property
}

func (p *Property) Render(buf *bytes.Buffer, r Renderer, nodes map[string]*Node, header int, node string) error {
r.Header(buf, func() bool { buf.Write([]byte(p.Name)); return true }, header, "")
func (p *Property) Render(buf *bytes.Buffer, r Renderer, nodes map[string]*Node, header int, node, namePrefix string) error {
r.Header(buf, func() bool { buf.Write([]byte(namePrefix + p.Name)); return true }, header, "")

renderDoc(buf, nodes, r, p.Doc)

Expand All @@ -531,12 +531,7 @@ func (p *Property) Render(buf *bytes.Buffer, r Renderer, nodes map[string]*Node,
r.BlockCode(buf, code.Bytes(), tickLang)

if len(p.EmbeddedProperties) > 0 {
r.Paragraph(buf, func() bool {
buf.Write([]byte("Properties of "))
buf.Write([]byte(p.Name))
return true
})
renderProperties(buf, r, p.EmbeddedProperties, nodes, header+1, code.String()+" ")
renderProperties(buf, r, p.EmbeddedProperties, nodes, header+1, code.String()+" ", p.Name+" ")
}

return nil
Expand Down

0 comments on commit 88cd92e

Please sign in to comment.