diff --git a/distsql/distsql.go b/distsql/distsql.go index 45f8665fac2a7..1dc51d8667e43 100644 --- a/distsql/distsql.go +++ b/distsql/distsql.go @@ -19,6 +19,7 @@ import ( "github.com/pingcap/tidb/sessionctx" "github.com/pingcap/tidb/statistics" "github.com/pingcap/tidb/types" + "github.com/pingcap/tidb/util/tracing" "golang.org/x/net/context" ) @@ -35,9 +36,13 @@ func Select(ctx context.Context, sctx sessionctx.Context, kvReq *kv.Request, fie hook.(func(*kv.Request))(kvReq) } + child := tracing.ChildSpanFromContxt(ctx, "distsql_select") + defer child.Finish() + if !sctx.GetSessionVars().EnableStreaming { kvReq.Streaming = false } + resp := sctx.GetClient().Send(ctx, kvReq, sctx.GetSessionVars().KVVars) if resp == nil { err := errors.New("client returns nil response") @@ -54,6 +59,7 @@ func Select(ctx context.Context, sctx sessionctx.Context, kvReq *kv.Request, fie }, nil } + child.LogKV("event", "finished sending rpc calls") return &selectResult{ label: "dag", resp: resp, diff --git a/executor/aggregate.go b/executor/aggregate.go index 5a1613a31768e..572587edcdd26 100644 --- a/executor/aggregate.go +++ b/executor/aggregate.go @@ -16,7 +16,10 @@ package executor import ( "sync" + "fmt" + "github.com/juju/errors" + "github.com/opentracing/opentracing-go" "github.com/pingcap/tidb/executor/aggfuncs" "github.com/pingcap/tidb/expression" "github.com/pingcap/tidb/expression/aggregation" @@ -27,6 +30,7 @@ import ( "github.com/pingcap/tidb/util/chunk" "github.com/pingcap/tidb/util/codec" "github.com/pingcap/tidb/util/mvmap" + "github.com/pingcap/tidb/util/tracing" "github.com/spaolacci/murmur3" "golang.org/x/net/context" ) @@ -64,6 +68,8 @@ type HashAggPartialWorker struct { // chk stores the input data from child, // and is reused by childExec and partial worker. chk *chunk.Chunk + + trace opentracing.Span } // HashAggFinalWorker indicates the final workers of parallel hash agg execution, @@ -79,6 +85,8 @@ type HashAggFinalWorker struct { inputCh chan *HashAggIntermData outputCh chan *AfFinalResult finalResultHolderCh chan *chunk.Chunk + + trace opentracing.Span } // AfFinalResult indicates aggregation functions final result. @@ -351,12 +359,14 @@ func (w *HashAggPartialWorker) run(ctx sessionctx.Context, waitGroup *sync.WaitG if needShuffle { w.shuffleIntermData(sc, finalConcurrency) } + w.trace.Finish() waitGroup.Done() }() for { if !w.getChildInput() { return } + w.trace.LogKV("event", "update partial result") if err := w.updatePartialResult(ctx, sc, w.chk, len(w.aggCtxsMap)); err != nil { w.globalOutputCh <- &AfFinalResult{err: errors.Trace(err)} return @@ -369,6 +379,7 @@ func (w *HashAggPartialWorker) run(ctx sessionctx.Context, waitGroup *sync.WaitG func (w *HashAggPartialWorker) updatePartialResult(ctx sessionctx.Context, sc *stmtctx.StatementContext, chk *chunk.Chunk, finalConcurrency int) (err error) { inputIter := chunk.NewIterator4Chunk(chk) + w.trace.LogKV("event", "iterating chunk's data") for row := inputIter.Begin(); row != inputIter.End(); row = inputIter.Next() { groupKey, err := w.getGroupKey(sc, row) if err != nil { @@ -388,6 +399,7 @@ func (w *HashAggPartialWorker) updatePartialResult(ctx sessionctx.Context, sc *s // We only support parallel execution for single-machine, so process of encode and decode can be skipped. func (w *HashAggPartialWorker) shuffleIntermData(sc *stmtctx.StatementContext, finalConcurrency int) { groupKeysSlice := make([][][]byte, finalConcurrency) + w.trace.LogKV("event", "shuffling interm data") for groupKey := range w.aggCtxsMap { groupKeyBytes := []byte(groupKey) finalWorkerIdx := int(murmur3.Sum32(groupKeyBytes)) % finalConcurrency @@ -457,6 +469,7 @@ func (w *HashAggFinalWorker) consumeIntermData(sc *stmtctx.StatementContext) (er ok bool intermDataRowsBuffer [][]types.Datum ) + w.trace.LogKV("event", "consuming interm data") for { if input, ok = w.getPartialInput(); !ok { return nil @@ -527,6 +540,7 @@ func (w *HashAggFinalWorker) receiveFinalResultHolder() (*chunk.Chunk, bool) { func (w *HashAggFinalWorker) run(ctx sessionctx.Context, waitGroup *sync.WaitGroup) { defer func() { + w.trace.Finish() waitGroup.Done() }() sc := ctx.GetSessionVars().StmtCtx @@ -539,10 +553,14 @@ func (w *HashAggFinalWorker) run(ctx sessionctx.Context, waitGroup *sync.WaitGro // Next implements the Executor Next interface. func (e *HashAggExec) Next(ctx context.Context, chk *chunk.Chunk) error { chk.Reset() + child := tracing.ChildSpanFromContxt(ctx, "hash_agg_exec") + defer child.Finish() + if e.isUnparallelExec { return errors.Trace(e.unparallelExec(ctx, chk)) } - return errors.Trace(e.parallelExec(ctx, chk)) + err := e.parallelExec(ctx, chk) + return errors.Trace(err) } func (e *HashAggExec) fetchChildData(ctx context.Context) { @@ -596,7 +614,9 @@ func (e *HashAggExec) prepare4ParallelExec(ctx context.Context) { partialWorkerWaitGroup := &sync.WaitGroup{} partialWorkerWaitGroup.Add(len(e.partialWorkers)) + for i := range e.partialWorkers { + e.partialWorkers[i].trace = tracing.ChildSpanFromContxt(ctx, fmt.Sprintf("hash_agg_exec_partial_worker_%d", i)) go e.partialWorkers[i].run(e.ctx, partialWorkerWaitGroup, len(e.finalWorkers)) } go e.waitPartialWorkerAndCloseOutputChs(partialWorkerWaitGroup) @@ -604,6 +624,7 @@ func (e *HashAggExec) prepare4ParallelExec(ctx context.Context) { finalWorkerWaitGroup := &sync.WaitGroup{} finalWorkerWaitGroup.Add(len(e.finalWorkers)) for i := range e.finalWorkers { + e.finalWorkers[i].trace = tracing.ChildSpanFromContxt(ctx, fmt.Sprintf("hash_agg_exec_final_worker_%d", i)) go e.finalWorkers[i].run(e.ctx, finalWorkerWaitGroup) } go e.waitFinalWorkerAndCloseFinalOutput(finalWorkerWaitGroup) @@ -793,6 +814,8 @@ func (e *StreamAggExec) Close() error { func (e *StreamAggExec) Next(ctx context.Context, chk *chunk.Chunk) error { chk.Reset() + child := tracing.ChildSpanFromContxt(ctx, "stream_agg_exec") + defer child.Finish() for !e.executed && chk.NumRows() < e.maxChunkSize { err := e.consumeOneGroup(ctx, chk) if err != nil { @@ -800,6 +823,7 @@ func (e *StreamAggExec) Next(ctx context.Context, chk *chunk.Chunk) error { return errors.Trace(err) } } + child.LogKV("event", "StreamAggExec is finished") return nil } diff --git a/executor/builder.go b/executor/builder.go index 7122e5f7a33a1..eadc782b1c1ba 100644 --- a/executor/builder.go +++ b/executor/builder.go @@ -89,6 +89,8 @@ func (b *executorBuilder) build(p plan.Plan) Executor { return b.buildDelete(v) case *plan.Execute: return b.buildExecute(v) + case *plan.Trace: + return b.buildTrace(v) case *plan.Explain: return b.buildExplain(v) case *plan.PointGetPlan: diff --git a/executor/index_lookup_join.go b/executor/index_lookup_join.go index 92378d80a1e07..1470843d03743 100644 --- a/executor/index_lookup_join.go +++ b/executor/index_lookup_join.go @@ -32,6 +32,7 @@ import ( "github.com/pingcap/tidb/util/memory" "github.com/pingcap/tidb/util/mvmap" "github.com/pingcap/tidb/util/ranger" + "github.com/pingcap/tidb/util/tracing" log "github.com/sirupsen/logrus" "golang.org/x/net/context" ) @@ -189,6 +190,11 @@ func (e *IndexLookUpJoin) newInnerWorker(taskCh chan *lookUpJoinTask) *innerWork // Next implements the Executor interface. func (e *IndexLookUpJoin) Next(ctx context.Context, chk *chunk.Chunk) error { + sp := tracing.ChildSpanFromContxt(ctx, "index_lookup_join") + defer func() { + sp.LogKV("event", "index lookup join is finished.") + sp.Finish() + }() chk.Reset() e.joinResult.Reset() for { diff --git a/executor/join.go b/executor/join.go index 8eb1b953c5691..475bdc00713f0 100644 --- a/executor/join.go +++ b/executor/join.go @@ -28,6 +28,7 @@ import ( "github.com/pingcap/tidb/util/codec" "github.com/pingcap/tidb/util/memory" "github.com/pingcap/tidb/util/mvmap" + "github.com/pingcap/tidb/util/tracing" log "github.com/sirupsen/logrus" "golang.org/x/net/context" ) @@ -184,7 +185,10 @@ func (e *HashJoinExec) getJoinKeyFromChkRow(isOuterKey bool, row chunk.Row, keyB // fetchOuterChunks get chunks from fetches chunks from the big table in a background goroutine // and sends the chunks to multiple channels which will be read by multiple join workers. func (e *HashJoinExec) fetchOuterChunks(ctx context.Context) { + sp := tracing.SpanFromContext(ctx) defer func() { + sp.LogKV("event", "finishing fetchOuterChunks") + sp.Finish() for i := range e.outerResultChs { close(e.outerResultChs[i]) } @@ -471,6 +475,8 @@ func (e *HashJoinExec) join2Chunk(workerID uint, outerChk *chunk.Chunk, joinResu // step 1. fetch data from inner child and build a hash table; // step 2. fetch data from outer child in a background goroutine and probe the hash table in multiple join workers. func (e *HashJoinExec) Next(ctx context.Context, chk *chunk.Chunk) (err error) { + sp := tracing.ChildSpanFromContxt(ctx, "hash_join_exec") + defer sp.Finish() if !e.prepared { e.innerFinished = make(chan error, 1) go e.fetchInnerAndBuildHashTable(ctx) @@ -491,6 +497,7 @@ func (e *HashJoinExec) Next(ctx context.Context, chk *chunk.Chunk) (err error) { } chk.SwapColumns(result.chk) result.src <- result.chk + sp.LogKV("event", "hash join exec is finished") return nil } diff --git a/executor/projection.go b/executor/projection.go index 8df62681283a3..4aca4f6875828 100644 --- a/executor/projection.go +++ b/executor/projection.go @@ -18,6 +18,7 @@ import ( "github.com/pingcap/tidb/expression" "github.com/pingcap/tidb/sessionctx" "github.com/pingcap/tidb/util/chunk" + "github.com/pingcap/tidb/util/tracing" "golang.org/x/net/context" ) @@ -61,6 +62,7 @@ type ProjectionExec struct { // Open implements the Executor Open interface. func (e *ProjectionExec) Open(ctx context.Context) error { + _ = tracing.ChildSpanFromContxt(ctx, "projection_exec") if err := e.baseExecutor.Open(ctx); err != nil { return errors.Trace(err) } @@ -139,6 +141,11 @@ func (e *ProjectionExec) Open(ctx context.Context) error { // +------------------------------+ +----------------------+ // func (e *ProjectionExec) Next(ctx context.Context, chk *chunk.Chunk) error { + sp := tracing.ChildSpanFromContxt(ctx, "projection_exec") + defer func() { + sp.LogKV("event", "projection_exec is finished") + sp.Finish() + }() chk.Reset() if e.isUnparallelExec() { return errors.Trace(e.unParallelExecute(ctx, chk)) diff --git a/executor/table_reader.go b/executor/table_reader.go index a04b5897213c8..2278d739740df 100644 --- a/executor/table_reader.go +++ b/executor/table_reader.go @@ -22,6 +22,7 @@ import ( "github.com/pingcap/tidb/table" "github.com/pingcap/tidb/util/chunk" "github.com/pingcap/tidb/util/ranger" + "github.com/pingcap/tidb/util/tracing" tipb "github.com/pingcap/tipb/go-tipb" "golang.org/x/net/context" ) @@ -57,8 +58,8 @@ type TableReaderExecutor struct { // Open initialzes necessary variables for using this executor. func (e *TableReaderExecutor) Open(ctx context.Context) error { - span, ctx := startSpanFollowsContext(ctx, "executor.TableReader.Open") - defer span.Finish() + child := tracing.ChildSpanFromContxt(ctx, "table_reader_exec") + defer child.Finish() var err error if e.corColInFilter { diff --git a/executor/trace.go b/executor/trace.go new file mode 100644 index 0000000000000..52051b07613c6 --- /dev/null +++ b/executor/trace.go @@ -0,0 +1,123 @@ +// Copyright 2018 PingCAP, Inc. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// See the License for the specific language governing permissions and +// limitations under the License. + +package executor + +import ( + "fmt" + "time" + + "github.com/juju/errors" + "github.com/opentracing/basictracer-go" + opentracing "github.com/opentracing/opentracing-go" + "github.com/pingcap/tidb/plan" + "github.com/pingcap/tidb/types" + "github.com/pingcap/tidb/util/chunk" + "github.com/pingcap/tidb/util/tracing" + "golang.org/x/net/context" +) + +var traceColumns = append([]*types.FieldType{}) + +// TraceExec represents a root executor of trace query. +type TraceExec struct { + baseExecutor + // CollectedSpans collects all span during execution. Span is appended via + // callback method which passes into tracer implementation. + CollectedSpans []basictracer.RawSpan + // exhausted being true means there is no more result. + exhausted bool + // plan is the real query plan and it is used for building real query's executor. + plan plan.Plan + // rootTrace represents root span which is father of all other span. + rootTrace opentracing.Span + + childrenResults []*chunk.Chunk +} + +// buildTrace builds a TraceExec for future executing. This method will be called +// at build(). +func (b *executorBuilder) buildTrace(v *plan.Trace) Executor { + e := &TraceExec{ + baseExecutor: newBaseExecutor(b.ctx, v.Schema(), v.ExplainID()), + } + + pp, _ := v.StmtPlan.(plan.PhysicalPlan) + e.children = make([]Executor, 0, len(pp.Children())) + for _, child := range pp.Children() { + switch p := child.(type) { + case *plan.PhysicalTableReader, *plan.PhysicalIndexReader, *plan.PhysicalIndexLookUpReader, *plan.PhysicalHashAgg, *plan.PhysicalProjection, *plan.PhysicalStreamAgg: + e.children = append(e.children, b.build(p)) + default: + panic(fmt.Sprintf("%v is not supported", child)) + } + + } + + return e +} + +// Open opens a trace executor and it will create a root trace span which will be +// used for the following span in a relationship of `ChildOf` or `FollowFrom`. +// for more details, you could refer to http://opentracing.io +func (e *TraceExec) Open(ctx context.Context) error { + e.rootTrace = tracing.NewRecordedTrace("trace_exec", func(sp basictracer.RawSpan) { + e.CollectedSpans = append(e.CollectedSpans, sp) + }) + ctx = opentracing.ContextWithSpan(ctx, e.rootTrace) + for _, child := range e.children { + err := child.Open(ctx) + if err != nil { + return errors.Trace(err) + } + } + e.childrenResults = make([]*chunk.Chunk, 0, len(e.children)) + for _, child := range e.children { + e.childrenResults = append(e.childrenResults, child.newChunk()) + } + + return nil +} + +// Next executes real query and collects span later. +func (e *TraceExec) Next(ctx context.Context, chk *chunk.Chunk) error { + chk.Reset() + if e.exhausted { + return nil + } + + ctx = opentracing.ContextWithSpan(ctx, e.rootTrace) + if len(e.children) > 0 { + if err := e.children[0].Next(ctx, e.childrenResults[0]); err != nil { + return errors.Trace(err) + } + } + + e.rootTrace.LogKV("event", "tracing completed") + e.rootTrace.Finish() + var timeVal string + for i, sp := range e.CollectedSpans { + spanStartTime := sp.Start + for _, entry := range sp.Logs { + chk.AppendString(0, entry.Timestamp.Format(time.RFC3339)) + timeVal = entry.Timestamp.Sub(spanStartTime).String() + chk.AppendString(1, timeVal) + chk.AppendString(2, sp.Operation) + chk.AppendInt64(3, int64(i)) + chk.AppendString(4, entry.Fields[0].String()) + } + } + e.exhausted = true + + return nil +} diff --git a/plan/planbuilder.go b/plan/planbuilder.go index a24fcfe6bedb4..9fa2d20a70462 100644 --- a/plan/planbuilder.go +++ b/plan/planbuilder.go @@ -141,6 +141,8 @@ func (b *planBuilder) build(node ast.Node) (Plan, error) { return b.buildExecute(x) case *ast.ExplainStmt: return b.buildExplain(x) + case *ast.TraceStmt: + return b.buildTrace(x) case *ast.InsertStmt: return b.buildInsert(x) case *ast.LoadDataStmt: diff --git a/plan/trace.go b/plan/trace.go new file mode 100644 index 0000000000000..9774dd1f31ec4 --- /dev/null +++ b/plan/trace.go @@ -0,0 +1,39 @@ +package plan + +import ( + "github.com/juju/errors" + "github.com/pingcap/tidb/ast" + "github.com/pingcap/tidb/expression" + "github.com/pingcap/tidb/mysql" +) + +// Trace represents a trace plan. +type Trace struct { + baseSchemaProducer + + StmtPlan Plan +} + +// buildTrace builds a trace plan. Inside this method, it first optimize the +// underlying query and then constructs a schema, which will be used to constructs +// rows result. +func (b *planBuilder) buildTrace(trace *ast.TraceStmt) (Plan, error) { + if _, ok := trace.Stmt.(*ast.SelectStmt); !ok { + return nil, errors.New("trace only supports select query") + } + optimizedP, err := Optimize(b.ctx, trace.Stmt, b.is) + if err != nil { + return nil, errors.New("fail to optimize during build trace") + } + p := &Trace{StmtPlan: optimizedP} + + retFields := []string{"timestamp", "duration", "pos", "operation", "event"} + schema := expression.NewSchema(make([]*expression.Column, 0, len(retFields))...) + schema.Append(buildColumn("", "timestamp", mysql.TypeString, mysql.MaxBlobWidth)) + schema.Append(buildColumn("", "duration", mysql.TypeString, mysql.MaxBlobWidth)) + schema.Append(buildColumn("", "operation", mysql.TypeString, mysql.MaxBlobWidth)) + schema.Append(buildColumn("", "pos", mysql.TypeInt24, mysql.MaxBlobWidth)) + schema.Append(buildColumn("", "log", mysql.TypeString, mysql.MaxBlobWidth)) + p.SetSchema(schema) + return p, nil +} diff --git a/session/session.go b/session/session.go index d76313e581abd..b0e6b918d760b 100644 --- a/session/session.go +++ b/session/session.go @@ -375,11 +375,6 @@ func (s *session) doCommitWithRetry(ctx context.Context) error { } func (s *session) CommitTxn(ctx context.Context) error { - if span := opentracing.SpanFromContext(ctx); span != nil { - span = opentracing.StartSpan("session.CommitTxn", opentracing.ChildOf(span.Context())) - defer span.Finish() - } - err := s.doCommitWithRetry(ctx) label := metrics.LblOK if err != nil { @@ -390,11 +385,6 @@ func (s *session) CommitTxn(ctx context.Context) error { } func (s *session) RollbackTxn(ctx context.Context) error { - if span := opentracing.SpanFromContext(ctx); span != nil { - span = opentracing.StartSpan("session.RollbackTxn", opentracing.ChildOf(span.Context())) - defer span.Finish() - } - var err error if s.txn.Valid() { terror.Log(s.txn.Rollback()) diff --git a/store/tikv/backoff.go b/store/tikv/backoff.go index 84a4478fa0837..62e2f25abdc15 100644 --- a/store/tikv/backoff.go +++ b/store/tikv/backoff.go @@ -25,6 +25,7 @@ import ( "github.com/pingcap/tidb/metrics" "github.com/pingcap/tidb/mysql" "github.com/pingcap/tidb/terror" + "github.com/pingcap/tidb/util/tracing" log "github.com/sirupsen/logrus" "golang.org/x/net/context" ) @@ -211,6 +212,8 @@ func (b *Backoffer) Backoff(typ backoffType, err error) error { default: } + child := tracing.ChildSpanFromContxt(b.ctx, "backoffer") + defer child.Finish() metrics.TiKVBackoffCounter.WithLabelValues(typ.String()).Inc() // Lazy initialize. if b.fn == nil { @@ -225,7 +228,9 @@ func (b *Backoffer) Backoff(typ backoffType, err error) error { b.totalSleep += f(b.ctx) b.types = append(b.types, typ) - log.Debugf("%v, retry later(totalSleep %dms, maxSleep %dms)", err, b.totalSleep, b.maxSleep) + log.Debugf("%v, retry later(totalsleep %dms, maxsleep %dms)", err, b.totalSleep, b.maxSleep) + child.LogKV("error", err.Error()) + child.LogKV("retry", fmt.Sprintf("retry later(totalsleep %dms, maxsleep %dms)", b.totalSleep, b.maxSleep)) b.errors = append(b.errors, errors.Errorf("%s at %s", err.Error(), time.Now().Format(time.RFC3339Nano))) if b.maxSleep > 0 && b.totalSleep >= b.maxSleep { diff --git a/util/tracing/tracer.go b/util/tracing/tracer.go new file mode 100644 index 0000000000000..abab0cc599f6c --- /dev/null +++ b/util/tracing/tracer.go @@ -0,0 +1,67 @@ +// Copyright 2018 PingCAP, Inc. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// See the License for the specific language governing permissions and +// limitations under the License. + +package tracing + +import ( + "github.com/opentracing/basictracer-go" + "github.com/opentracing/opentracing-go" + "golang.org/x/net/context" +) + +// TiDBTrace is set as Baggage on traces which are used for tidb tracing. +const TiDBTrace = "tr" + +// A CallbackRecorder immediately invokes itself on received trace spans. +type CallbackRecorder func(sp basictracer.RawSpan) + +// RecordSpan implements basictracer.SpanRecorder. +func (cr CallbackRecorder) RecordSpan(sp basictracer.RawSpan) { + cr(sp) +} + +// NewRecordedTrace returns a Span which records directly via the specified +// callback. +func NewRecordedTrace(opName string, callback func(sp basictracer.RawSpan)) opentracing.Span { + tr := basictracer.New(CallbackRecorder(callback)) + opentracing.SetGlobalTracer(tr) + sp := tr.StartSpan(opName) + sp.SetBaggageItem(TiDBTrace, "1") + return sp +} + +// noopSpan returns a Span which discards all operations. +func noopSpan() opentracing.Span { + return (opentracing.NoopTracer{}).StartSpan("DefaultSpan") +} + +// SpanFromContext returns the span obtained from the context or, if none is found, a new one started through tracer. +func SpanFromContext(ctx context.Context) (sp opentracing.Span) { + if sp = opentracing.SpanFromContext(ctx); sp == nil { + return noopSpan() + } + return sp +} + +// ChildSpanFromContxt return a non-nil span. If span can be got from ctx, then returned span is +// a child of such span. Otherwise, returned span is a noop span. +func ChildSpanFromContxt(ctx context.Context, opName string) (sp opentracing.Span) { + if sp := opentracing.SpanFromContext(ctx); sp != nil { + if _, ok := sp.Tracer().(opentracing.NoopTracer); !ok { + child := opentracing.StartSpan(opName, opentracing.ChildOf(sp.Context())) + opentracing.ContextWithSpan(ctx, child) + return child + } + } + return noopSpan() +} diff --git a/util/tracing/tracer_test.go b/util/tracing/tracer_test.go new file mode 100644 index 0000000000000..150aab02ba61b --- /dev/null +++ b/util/tracing/tracer_test.go @@ -0,0 +1,78 @@ +// Copyright 2018 PingCAP, Inc. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// See the License for the specific language governing permissions and +// limitations under the License. + +package tracing_test + +import ( + "testing" + + . "github.com/pingcap/check" + + basictracer "github.com/opentracing/basictracer-go" + "github.com/opentracing/opentracing-go" + "github.com/pingcap/tidb/util/tracing" + "golang.org/x/net/context" +) + +var _ = Suite(&testTraceSuite{}) + +func TestT(t *testing.T) { + TestingT(t) +} + +type testTraceSuite struct { +} + +func (s *testTraceSuite) TestSpanFromContext(c *C) { + ctx := context.Background() + noopSp := tracing.SpanFromContext(ctx) + // test noop span + _, ok := noopSp.Tracer().(opentracing.NoopTracer) + c.Assert(ok, IsTrue) + + // test tidb tracing + collectedSpan := make([]basictracer.RawSpan, 1) + sp := tracing.NewRecordedTrace("test", func(sp basictracer.RawSpan) { + collectedSpan = append(collectedSpan, sp) + }) + sp.Finish() + opentracing.ContextWithSpan(ctx, sp) + child := tracing.SpanFromContext(ctx) + child.Finish() + + // verify second span's operation is not nil, this way we can ensure + // callback logic works. + c.Assert(collectedSpan[0].Operation, NotNil) +} + +func (s *testTraceSuite) TestChildSpanFromContext(c *C) { + ctx := context.Background() + noopSp := tracing.ChildSpanFromContxt(ctx, "") + _, ok := noopSp.Tracer().(opentracing.NoopTracer) + c.Assert(ok, IsTrue) + + // test tidb tracing + collectedSpan := make([]basictracer.RawSpan, 1) + sp := tracing.NewRecordedTrace("test", func(sp basictracer.RawSpan) { + collectedSpan = append(collectedSpan, sp) + }) + sp.Finish() + opentracing.ContextWithSpan(ctx, sp) + child := tracing.ChildSpanFromContxt(ctx, "test_child") + child.Finish() + + // verify second span's operation is not nil, this way we can ensure + // callback logic works. + c.Assert(collectedSpan[1].Operation, NotNil) + +}