Skip to content

Commit

Permalink
Skip runtime and some zap frames in stacktraces (#311)
Browse files Browse the repository at this point in the history
Remove `runtime.goexit`, `runtime.main`, and some zap frames from our stacktraces. Since we're now formatting our own traces, we also get a *big* performance win: nearly 5x faster and half the number of bytes allocated!

Current master:

```
BenchmarkStackField-4             200000              9587 ns/op             960 B/op          2 allocs/op
```

After this PR:

```
BenchmarkStackField-4             500000              2149 ns/op             448 B/op          2 allocs/op
```

Fixes #308.
  • Loading branch information
bufdev authored and akshayjshah committed Mar 7, 2017
1 parent 05dc1c4 commit e3f45be
Show file tree
Hide file tree
Showing 6 changed files with 91 additions and 40 deletions.
2 changes: 1 addition & 1 deletion config_test.go
Original file line number Diff line number Diff line change
Expand Up @@ -50,7 +50,7 @@ func TestConfig(t *testing.T) {
expectRe: "DEBUG\t.*go.uber.org/zap/config_test.go:" + `\d+` + "\tdebug\t" + `{"k": "v", "z": "zz"}` + "\n" +
"INFO\t.*go.uber.org/zap/config_test.go:" + `\d+` + "\tinfo\t" + `{"k": "v", "z": "zz"}` + "\n" +
"WARN\t.*go.uber.org/zap/config_test.go:" + `\d+` + "\twarn\t" + `{"k": "v", "z": "zz"}` + "\n" +
`goroutine \d+ \[running\]:`,
`go.uber.org/zap.Stack`,
},
}

Expand Down
12 changes: 3 additions & 9 deletions field.go
Original file line number Diff line number Diff line change
Expand Up @@ -25,7 +25,6 @@ import (
"math"
"time"

"go.uber.org/zap/internal/bufferpool"
"go.uber.org/zap/zapcore"
)

Expand Down Expand Up @@ -192,19 +191,14 @@ func NamedError(key string, err error) zapcore.Field {

// Stack constructs a field that stores a stacktrace of the current goroutine
// under provided key. Keep in mind that taking a stacktrace is eager and
// extremely expensive (relatively speaking); this function both makes an
// allocation and takes ~10 microseconds.
// expensive (relatively speaking); this function both makes an allocation and
// takes about two microseconds.
func Stack(key string) zapcore.Field {
// Try to avoid allocating a buffer.
buf := bufferpool.Get()
bs := buf.Bytes()
// Returning the stacktrace as a string costs an allocation, but saves us
// from expanding the zapcore.Field union struct to include a byte slice. Since
// taking a stacktrace is already so expensive (~10us), the extra allocation
// is okay.
field := String(key, takeStacktrace(bs[:cap(bs)], false))
bufferpool.Put(buf)
return field
return String(key, takeStacktrace())
}

// Duration constructs a field with the given key and value. The encoder
Expand Down
1 change: 0 additions & 1 deletion logger_test.go
Original file line number Diff line number Diff line change
Expand Up @@ -363,7 +363,6 @@ func TestLoggerAddStacktrace(t *testing.T) {
assertHasStack := func(t testing.TB, obs observer.LoggedEntry) {
assert.Contains(t, obs.Entry.Stack, "zap.TestLoggerAddStacktrace", "Expected to find test function in stacktrace.")
}

withLogger(t, DebugLevel, opts(AddStacktrace(InfoLevel)), func(logger *Logger, logs *observer.ObservedLogs) {
logger.Debug("")
assert.Empty(
Expand Down
3 changes: 1 addition & 2 deletions options.go
Original file line number Diff line number Diff line change
Expand Up @@ -92,8 +92,7 @@ func AddCallerSkip(skip int) Option {
}

// AddStacktrace configures the Logger to record a stack trace for all messages at
// or above a given level. Keep in mind that taking a stacktrace takes several
// microseconds; relative to the cost of logging, this is quite slow.
// or above a given level.
func AddStacktrace(lvl zapcore.LevelEnabler) Option {
return optionFunc(func(log *Logger) {
log.addStack = lvl
Expand Down
92 changes: 75 additions & 17 deletions stacktrace.go
Original file line number Diff line number Diff line change
Expand Up @@ -20,23 +20,81 @@

package zap

import "runtime"

// takeStacktrace attempts to use the provided byte slice to take a stacktrace.
// If the provided slice isn't large enough, takeStacktrace will allocate
// successively larger slices until it can capture the whole stack.
func takeStacktrace(buf []byte, includeAllGoroutines bool) string {
if len(buf) == 0 {
// We may have been passed a nil byte slice.
buf = make([]byte, 1024)
import (
"runtime"
"strings"
"sync"

"go.uber.org/zap/internal/bufferpool"
)

var (
_stacktraceIgnorePrefixes = []string{
"runtime.goexit",
"runtime.main",
}
_stacktracePool = sync.Pool{
New: func() interface{} {
return newProgramCounters(64)
},
}
)

func takeStacktrace() string {
buffer := bufferpool.Get()
defer bufferpool.Put(buffer)
programCounters := _stacktracePool.Get().(*programCounters)
defer _stacktracePool.Put(programCounters)

for {
// Skip the call to runtime.Counters and takeStacktrace so that the
// program counters start at the caller of takeStacktrace.
n := runtime.Callers(2, programCounters.pcs)
if n < cap(programCounters.pcs) {
programCounters.pcs = programCounters.pcs[:n]
break
}
// Don't put the too-short counter slice back into the pool; this lets
// the pool adjust if we consistently take deep stacktraces.
programCounters = newProgramCounters(len(programCounters.pcs) * 2)
}
n := runtime.Stack(buf, includeAllGoroutines)
for n >= len(buf) {
// Buffer wasn't large enough, allocate a larger one. No need to copy
// previous buffer's contents.
size := 2 * n
buf = make([]byte, size)
n = runtime.Stack(buf, includeAllGoroutines)

i := 0
for _, programCounter := range programCounters.pcs {
f := runtime.FuncForPC(programCounter)
name := f.Name()
if shouldIgnoreStacktraceName(name) {
continue
}
if i != 0 {
buffer.AppendByte('\n')
}
i++
file, line := f.FileLine(programCounter - 1)
buffer.AppendString(name)
buffer.AppendByte('\n')
buffer.AppendByte('\t')
buffer.AppendString(file)
buffer.AppendByte(':')
buffer.AppendInt(int64(line))
}
return string(buf[:n])

return buffer.String()
}

func shouldIgnoreStacktraceName(name string) bool {
for _, prefix := range _stacktraceIgnorePrefixes {
if strings.HasPrefix(name, prefix) {
return true
}
}
return false
}

type programCounters struct {
pcs []uintptr
}

func newProgramCounters(size int) *programCounters {
return &programCounters{make([]uintptr, size)}
}
21 changes: 11 additions & 10 deletions stacktrace_test.go
Original file line number Diff line number Diff line change
Expand Up @@ -21,20 +21,21 @@
package zap

import (
"strings"
"testing"

"github.com/stretchr/testify/assert"
"github.com/stretchr/testify/require"
)

func TestTakeStacktrace(t *testing.T) {
// Even if we pass a tiny buffer, takeStacktrace should allocate until it
// can capture the whole stacktrace.
traceNil := takeStacktrace(nil, false)
traceTiny := takeStacktrace(make([]byte, 1), false)
for _, trace := range []string{traceNil, traceTiny} {
// The top frame should be takeStacktrace.
assert.Contains(t, trace, "zap.takeStacktrace", "Stacktrace should contain the takeStacktrace function.")
// The stacktrace should also capture its immediate caller.
assert.Contains(t, trace, "TestTakeStacktrace", "Stacktrace should contain the test function.")
}
trace := takeStacktrace()
lines := strings.Split(trace, "\n")
require.True(t, len(lines) > 0, "Expected stacktrace to have at least one frame.")
assert.Contains(
t,
lines[0],
"TestTakeStacktrace",
"Expected stacktrace to start with this test function, but top frame is %s.", lines[0],
)
}

0 comments on commit e3f45be

Please sign in to comment.