From 764e2d43e59bd2f4df0d823e01e43707f5b62697 Mon Sep 17 00:00:00 2001 From: Jiaxing Liang Date: Sat, 29 Jul 2017 19:16:34 -0700 Subject: [PATCH 01/10] Update CONTRIBUTING.md (#3945) I am seeing no_push when I followed the steps $ git remote -v origin https://github.com/liangjiaxing/tidb.git (fetch) origin https://github.com/liangjiaxing/tidb.git (push) upstream https://github.com/pingcap/tidb.git (fetch) upstream no_push (push) --- CONTRIBUTING.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/CONTRIBUTING.md b/CONTRIBUTING.md index f4f9fb57a0f10..cd122e6aef2cd 100644 --- a/CONTRIBUTING.md +++ b/CONTRIBUTING.md @@ -108,7 +108,7 @@ git remote set-url --push upstream no_push # origin git@github.com:$(user)/tidb.git (fetch) # origin git@github.com:$(user)/tidb.git (push) # upstream https://github.com/pingcap/tidb (fetch) -# upstream https://github.com/pingcap/tidb (push) +# upstream no_push (push) git remote -v ``` From 9f66dd19319b6e6c96cb94a053b08a0db3d3b73a Mon Sep 17 00:00:00 2001 From: Changjian Zhang Date: Mon, 31 Jul 2017 10:10:38 +0800 Subject: [PATCH 02/10] expression, executor, plan: rewrite builtin function ltrim and rtrim. (#3939) --- executor/executor_test.go | 6 +++ expression/builtin_string.go | 75 ++++++++++++++++++------------- expression/builtin_string_test.go | 70 +++++++++++++++++++++++++++++ plan/typeinfer_test.go | 4 ++ 4 files changed, 123 insertions(+), 32 deletions(-) diff --git a/executor/executor_test.go b/executor/executor_test.go index 9484d3d83b629..b277b81348d9a 100644 --- a/executor/executor_test.go +++ b/executor/executor_test.go @@ -974,6 +974,12 @@ func (s *testSuite) TestStringBuiltin(c *C) { result.Check(testkit.Rows("MySQL 123 123 ")) result = tk.MustQuery(`select unhex('string'), unhex('你好'), unhex(123.4), unhex(null)`) result.Check(testkit.Rows(" ")) + + // for ltrim and rtrim + result = tk.MustQuery(`select ltrim(' bar '), ltrim('bar'), ltrim(''), ltrim(null)`) + result.Check(testutil.RowsWithSep(",", "bar ,bar,,")) + result = tk.MustQuery(`select rtrim(' bar '), rtrim('bar'), rtrim(''), rtrim(null)`) + result.Check(testutil.RowsWithSep(",", " bar,bar,,")) } func (s *testSuite) TestEncryptionBuiltin(c *C) { diff --git a/expression/builtin_string.go b/expression/builtin_string.go index 0a057f2fa49da..41d46880ee7b5 100644 --- a/expression/builtin_string.go +++ b/expression/builtin_string.go @@ -1194,20 +1194,34 @@ type lTrimFunctionClass struct { } func (c *lTrimFunctionClass) getFunction(args []Expression, ctx context.Context) (builtinFunc, error) { - sig := &builtinLTrimSig{newBaseBuiltinFunc(args, ctx)} - return sig.setSelf(sig), errors.Trace(c.verifyArgs(args)) + if err := c.verifyArgs(args); err != nil { + return nil, errors.Trace(err) + } + bf, err := newBaseBuiltinFuncWithTp(args, ctx, tpString, tpString) + if err != nil { + return nil, errors.Trace(err) + } + argType := args[0].GetType() + bf.tp.Flen = argType.Flen + if mysql.HasBinaryFlag(argType.Flag) { + types.SetBinChsClnFlag(bf.tp) + } + sig := &builtinLTrimSig{baseStringBuiltinFunc{bf}} + return sig.setSelf(sig), nil } type builtinLTrimSig struct { - baseBuiltinFunc + baseStringBuiltinFunc } -func (b *builtinLTrimSig) eval(row []types.Datum) (types.Datum, error) { - args, err := b.evalArgs(row) - if err != nil { - return types.Datum{}, errors.Trace(err) +// evalString evals a builtinLTrimSig +// See https://dev.mysql.com/doc/refman/5.7/en/string-functions.html#function_ltrim +func (b *builtinLTrimSig) evalString(row []types.Datum) (d string, isNull bool, err error) { + d, isNull, err = b.args[0].EvalString(row, b.ctx.GetSessionVars().StmtCtx) + if isNull || err != nil { + return d, isNull, errors.Trace(err) } - return trimFn(strings.TrimLeft, spaceChars)(args, b.ctx) + return strings.TrimLeft(d, spaceChars), false, nil } type rTrimFunctionClass struct { @@ -1215,37 +1229,34 @@ type rTrimFunctionClass struct { } func (c *rTrimFunctionClass) getFunction(args []Expression, ctx context.Context) (builtinFunc, error) { - sig := &builtinRTrimSig{newBaseBuiltinFunc(args, ctx)} - return sig.setSelf(sig), errors.Trace(c.verifyArgs(args)) + if err := c.verifyArgs(args); err != nil { + return nil, errors.Trace(err) + } + bf, err := newBaseBuiltinFuncWithTp(args, ctx, tpString, tpString) + if err != nil { + return nil, errors.Trace(err) + } + argType := args[0].GetType() + bf.tp.Flen = argType.Flen + if mysql.HasBinaryFlag(argType.Flag) { + types.SetBinChsClnFlag(bf.tp) + } + sig := &builtinRTrimSig{baseStringBuiltinFunc{bf}} + return sig.setSelf(sig), nil } type builtinRTrimSig struct { - baseBuiltinFunc -} - -func (b *builtinRTrimSig) eval(row []types.Datum) (types.Datum, error) { - args, err := b.evalArgs(row) - if err != nil { - return types.Datum{}, errors.Trace(err) - } - return trimFn(strings.TrimRight, spaceChars)(args, b.ctx) + baseStringBuiltinFunc } -// trimFn returns a BuildFunc for ltrim and rtrim. -// See https://dev.mysql.com/doc/refman/5.7/en/string-functions.html#function_ltrim +// evalString evals a builtinRTrimSig // See https://dev.mysql.com/doc/refman/5.7/en/string-functions.html#function_rtrim -func trimFn(fn func(string, string) string, cutset string) BuiltinFunc { - return func(args []types.Datum, ctx context.Context) (d types.Datum, err error) { - if args[0].IsNull() { - return d, nil - } - str, err := args[0].ToString() - if err != nil { - return d, errors.Trace(err) - } - d.SetString(fn(str, cutset)) - return d, nil +func (b *builtinRTrimSig) evalString(row []types.Datum) (d string, isNull bool, err error) { + d, isNull, err = b.args[0].EvalString(row, b.ctx.GetSessionVars().StmtCtx) + if isNull || err != nil { + return d, isNull, errors.Trace(err) } + return strings.TrimRight(d, spaceChars), false, nil } func trimLeft(str, remstr string) string { diff --git a/expression/builtin_string_test.go b/expression/builtin_string_test.go index dd3f66888b0c3..66563420ef08f 100644 --- a/expression/builtin_string_test.go +++ b/expression/builtin_string_test.go @@ -914,6 +914,76 @@ func (s *testEvaluatorSuite) TestTrim(c *C) { } } +func (s *testEvaluatorSuite) TestLTrim(c *C) { + defer testleak.AfterTest(c)() + cases := []struct { + arg interface{} + isNil bool + getErr bool + res string + }{ + {" bar ", false, false, "bar "}, + {"bar", false, false, "bar"}, + {"", false, false, ""}, + {nil, true, false, ""}, + {errors.New("must error"), false, true, ""}, + } + for _, t := range cases { + f, err := newFunctionForTest(s.ctx, ast.LTrim, primitiveValsToConstants([]interface{}{t.arg})...) + c.Assert(err, IsNil) + d, err := f.Eval(nil) + if t.getErr { + c.Assert(err, NotNil) + } else { + c.Assert(err, IsNil) + if t.isNil { + c.Assert(d.Kind(), Equals, types.KindNull) + } else { + c.Assert(d.GetString(), Equals, t.res) + } + } + } + + f, err := funcs[ast.LTrim].getFunction([]Expression{Zero}, s.ctx) + c.Assert(err, IsNil) + c.Assert(f.isDeterministic(), IsTrue) +} + +func (s *testEvaluatorSuite) TestRTrim(c *C) { + defer testleak.AfterTest(c)() + cases := []struct { + arg interface{} + isNil bool + getErr bool + res string + }{ + {" bar ", false, false, " bar"}, + {"bar", false, false, "bar"}, + {"", false, false, ""}, + {nil, true, false, ""}, + {errors.New("must error"), false, true, ""}, + } + for _, t := range cases { + f, err := newFunctionForTest(s.ctx, ast.RTrim, primitiveValsToConstants([]interface{}{t.arg})...) + c.Assert(err, IsNil) + d, err := f.Eval(nil) + if t.getErr { + c.Assert(err, NotNil) + } else { + c.Assert(err, IsNil) + if t.isNil { + c.Assert(d.Kind(), Equals, types.KindNull) + } else { + c.Assert(d.GetString(), Equals, t.res) + } + } + } + + f, err := funcs[ast.RTrim].getFunction([]Expression{Zero}, s.ctx) + c.Assert(err, IsNil) + c.Assert(f.isDeterministic(), IsTrue) +} + func (s *testEvaluatorSuite) TestHexFunc(c *C) { defer testleak.AfterTest(c)() cases := []struct { diff --git a/plan/typeinfer_test.go b/plan/typeinfer_test.go index 5de2c5a7983b5..7f20e35eb5103 100644 --- a/plan/typeinfer_test.go +++ b/plan/typeinfer_test.go @@ -112,6 +112,10 @@ func (s *testPlanSuite) TestInferType(c *C) { {"atan(c_double,c_double)", mysql.TypeDouble, charset.CharsetBin, mysql.BinaryFlag, mysql.MaxRealWidth, types.UnspecifiedLength}, {"asin(c_double)", mysql.TypeDouble, charset.CharsetBin, mysql.BinaryFlag, mysql.MaxRealWidth, types.UnspecifiedLength}, {"acos(c_double)", mysql.TypeDouble, charset.CharsetBin, mysql.BinaryFlag, mysql.MaxRealWidth, types.UnspecifiedLength}, + {"ltrim(c_char)", mysql.TypeVarString, charset.CharsetUTF8, 0, 20, types.UnspecifiedLength}, + {"ltrim(c_binary)", mysql.TypeVarString, charset.CharsetBin, mysql.BinaryFlag, 20, types.UnspecifiedLength}, + {"rtrim(c_char)", mysql.TypeVarString, charset.CharsetUTF8, 0, 20, types.UnspecifiedLength}, + {"rtrim(c_binary)", mysql.TypeVarString, charset.CharsetBin, mysql.BinaryFlag, 20, types.UnspecifiedLength}, {"cot(c_int)", mysql.TypeDouble, charset.CharsetBin, mysql.BinaryFlag, mysql.MaxRealWidth, types.UnspecifiedLength}, {"cot(c_float)", mysql.TypeDouble, charset.CharsetBin, mysql.BinaryFlag, mysql.MaxRealWidth, types.UnspecifiedLength}, From c9292044fa4ee66e2fa9c2a45587ab203faa54b3 Mon Sep 17 00:00:00 2001 From: tiancaiamao Date: Mon, 31 Jul 2017 10:24:47 +0800 Subject: [PATCH 03/10] config: set default value for QueryLogMaxlen and QueryLogMaxlen (#3947) In the test code, those two config value is not set explicitly, so the default value should be set. --- config/config.go | 5 ++++- 1 file changed, 4 insertions(+), 1 deletion(-) diff --git a/config/config.go b/config/config.go index 082e5028620f0..51da6ddc05809 100644 --- a/config/config.go +++ b/config/config.go @@ -37,7 +37,10 @@ var once sync.Once // Other parts of the system can read the global configuration use this function. func GetGlobalConfig() *Config { once.Do(func() { - cfg = &Config{} + cfg = &Config{ + SlowThreshold: 300, + QueryLogMaxlen: 2048, + } }) return cfg } From c95bc0c96724a8f50c4d6388eaae4a8a0cfde8d5 Mon Sep 17 00:00:00 2001 From: Jian Zhang Date: Mon, 31 Jul 2017 10:59:52 +0800 Subject: [PATCH 04/10] plan: implement ExplainInfo() interface for join operators (#3915) --- executor/explain_test.go | 630 ++++----------------------------------- expression/explain.go | 24 ++ plan/explain.go | 156 ++++++++-- plan/logical_plans.go | 16 + plan/physical_plans.go | 1 + 5 files changed, 237 insertions(+), 590 deletions(-) diff --git a/executor/explain_test.go b/executor/explain_test.go index 58a4d5addc225..5d8d750061136 100644 --- a/executor/explain_test.go +++ b/executor/explain_test.go @@ -20,7 +20,6 @@ import ( ) func (s *testSuite) TestExplain(c *C) { - c.Skip("new planner use different explain, reopen this test in the future.") tk := testkit.NewTestKit(c, s.store) defer func() { s.cleanEnv(c) @@ -34,655 +33,154 @@ func (s *testSuite) TestExplain(c *C) { tk.MustExec("insert into t2 values(1, 0), (2, 1)") tests := []struct { - sql string - ids []string - parentIds []string - result []string + sql string + expect []string }{ { "select * from t1", []string{ - "TableScan_3", - }, - []string{ - "", - }, - []string{ - `{ - "db": "test", - "table": "t1", - "desc": false, - "keep order": false, - "push down info": { - "limit": 0, - "access conditions": null, - "index filter conditions": null, - "table filter conditions": null - } -}`, + "TableScan_3 cop ", + "TableReader_4 root ", }, }, { "select * from t1 order by c2", []string{ - "IndexScan_5", - }, - []string{ - "", - }, - []string{ - `{ - "db": "test", - "table": "t1", - "index": "c2", - "ranges": "[[\u003cnil\u003e,+inf]]", - "desc": false, - "out of order": false, - "double read": true, - "push down info": { - "limit": 0, - "access conditions": null, - "index filter conditions": null, - "table filter conditions": null - } -}`, + "IndexScan_13 cop ", + "TableScan_14 cop ", + "IndexLookUp_15 root ", }, }, { "select * from t2 order by c2", []string{ - "TableScan_6", "Sort_3", - }, - []string{ - "Sort_3", "", - }, - []string{ - `{ - "db": "test", - "table": "t2", - "desc": false, - "keep order": false, - "push down info": { - "limit": 0, - "access conditions": null, - "index filter conditions": null, - "table filter conditions": null - } -}`, - `{ - "exprs": [ - { - "Expr": "t2.c2", - "Desc": false - } - ], - "limit": null, - "child": "TableScan_6" -}`, + "TableScan_4 cop ", + "TableReader_5 Sort_3 root ", + "Sort_3 root t2.c2:asc", }, }, { "select * from t1 where t1.c1 > 0", []string{ - "TableScan_4", - }, - []string{ - "", - }, - []string{`{ - "db": "test", - "table": "t1", - "desc": false, - "keep order": false, - "push down info": { - "limit": 0, - "access conditions": [ - "gt(test.t1.c1, 0)" - ], - "index filter conditions": null, - "table filter conditions": null - } -}`, + "TableScan_4 cop ", + "TableReader_5 root ", }, }, { "select t1.c1, t1.c2 from t1 where t1.c2 = 1", []string{ - "IndexScan_5", - }, - []string{ - "", - }, - []string{`{ - "db": "test", - "table": "t1", - "index": "c2", - "ranges": "[[1,1]]", - "desc": false, - "out of order": true, - "double read": false, - "push down info": { - "limit": 0, - "access conditions": [ - "eq(test.t1.c2, 1)" - ], - "index filter conditions": null, - "table filter conditions": null - } -}`, + "IndexScan_7 cop ", + "IndexReader_8 root ", }, }, { "select * from t1 left join t2 on t1.c2 = t2.c1 where t1.c1 > 1", []string{ - "TableScan_7", "TableScan_10", "HashLeftJoin_9", - }, - []string{ - "HashLeftJoin_9", "HashLeftJoin_9", "", - }, - []string{ - `{ - "db": "test", - "table": "t1", - "desc": false, - "keep order": false, - "push down info": { - "limit": 0, - "access conditions": [ - "gt(test.t1.c1, 1)" - ], - "index filter conditions": null, - "table filter conditions": null - } -}`, - `{ - "db": "test", - "table": "t2", - "desc": false, - "keep order": false, - "push down info": { - "limit": 0, - "access conditions": null, - "index filter conditions": null, - "table filter conditions": null - } -}`, - `{ - "eqCond": [ - "eq(test.t1.c2, test.t2.c1)" - ], - "leftCond": null, - "rightCond": null, - "otherCond": null, - "leftPlan": "TableScan_7", - "rightPlan": "TableScan_10" -}`, + "TableScan_22 cop ", + "TableReader_23 IndexJoin_7 root ", + "IndexScan_33 cop ", + "TableScan_34 cop ", + "IndexLookUp_35 IndexJoin_7 root ", + "IndexJoin_7 root outer:TableReader_23, outer key:test.t1.c2, inner key:test.t2.c1", }, }, { "update t1 set t1.c2 = 2 where t1.c1 = 1", []string{ - "TableScan_4", "Update_3", - }, - []string{ - "Update_3", "", - }, - []string{`{ - "db": "test", - "table": "t1", - "desc": false, - "keep order": false, - "push down info": { - "limit": 0, - "access conditions": [ - "eq(test.t1.c1, 1)" - ], - "index filter conditions": null, - "table filter conditions": null - } -}`, - `{ - "children": [ - "TableScan_4" - ] -}`, + "TableScan_4 cop ", + "TableReader_5 Update_3 root ", + "Update_3 root ", }, }, { "delete from t1 where t1.c2 = 1", []string{ - "IndexScan_5", "Delete_3", - }, - []string{ - "Delete_3", "", - }, - []string{`{ - "db": "test", - "table": "t1", - "index": "c2", - "ranges": "[[1,1]]", - "desc": false, - "out of order": true, - "double read": true, - "push down info": { - "limit": 0, - "access conditions": [ - "eq(test.t1.c2, 1)" - ], - "index filter conditions": null, - "table filter conditions": null - } -}`, - `{ - "children": [ - "IndexScan_5" - ] -}`, + "IndexScan_7 cop ", + "TableScan_8 cop ", + "IndexLookUp_9 Delete_3 root ", + "Delete_3 root ", }, }, { "select count(b.c2) from t1 a, t2 b where a.c1 = b.c2 group by a.c1", []string{ - "TableScan_16", "TableScan_10", "HashAgg_11", "HashLeftJoin_15", "Projection_9", - }, - []string{ - "HashLeftJoin_15", "HashAgg_11", "HashLeftJoin_15", "Projection_9", "", - }, - []string{`{ - "db": "test", - "table": "t1", - "desc": false, - "keep order": false, - "push down info": { - "limit": 0, - "access conditions": null, - "index filter conditions": null, - "table filter conditions": null - } -}`, - `{ - "db": "test", - "table": "t2", - "desc": false, - "keep order": false, - "push down info": { - "limit": 0, - "aggregated push down": true, - "gby items": [ - "b.c2" - ], - "agg funcs": [ - "count(b.c2)", - "firstrow(b.c2)" - ], - "access conditions": null, - "index filter conditions": null, - "table filter conditions": null - } -}`, - `{ - "AggFuncs": [ - "count([b.c2])", - "firstrow([b.c2])" - ], - "GroupByItems": [ - "[b.c2]" - ], - "child": "TableScan_10" -}`, - `{ - "eqCond": [ - "eq(a.c1, b.c2)" - ], - "leftCond": null, - "rightCond": null, - "otherCond": null, - "leftPlan": "TableScan_16", - "rightPlan": "HashAgg_11" -}`, - `{ - "exprs": [ - "cast(join_agg_0)" - ], - "child": "HashLeftJoin_15" -}`, + "TableScan_17 HashAgg_16 cop ", + "HashAgg_16 cop type:complete, group by:b.c2, funcs:count(b.c2), firstrow(b.c2)", + "TableReader_21 HashAgg_20 root ", + "HashAgg_20 IndexJoin_9 root type:final, group by:, funcs:count(col_0), firstrow(col_1)", + "TableScan_12 cop ", + "TableReader_31 IndexJoin_9 root ", + "IndexJoin_9 Projection_8 root outer:TableReader_31, outer key:b.c2, inner key:a.c1", + "Projection_8 root cast(join_agg_0)", }, }, { "select * from t2 order by t2.c2 limit 0, 1", []string{ - "TableScan_5", "Sort_6", - }, - []string{ - "Sort_6", "", - }, - []string{ - `{ - "db": "test", - "table": "t2", - "desc": false, - "keep order": true, - "push down info": { - "limit": 1, - "sort items": [ - { - "Expr": "test.t2.c2", - "Desc": false - } - ], - "access conditions": null, - "index filter conditions": null, - "table filter conditions": null - } -}`, - `{ - "exprs": [ - { - "Expr": "test.t2.c2", - "Desc": false - } - ], - "limit": 1, - "child": "TableScan_5" -}`, + "TableScan_7 TopN_5 cop ", + "TopN_5 cop ", + "TableReader_10 TopN_5 root ", + "TopN_5 root ", }, }, { "select * from t1 where c1 > 1 and c2 = 1 and c3 < 1", []string{ - "IndexScan_5", - }, - []string{ - "", - }, - []string{ - `{ - "db": "test", - "table": "t1", - "index": "c2", - "ranges": "[[1,1]]", - "desc": false, - "out of order": true, - "double read": true, - "push down info": { - "limit": 0, - "access conditions": [ - "eq(test.t1.c2, 1)" - ], - "index filter conditions": [ - "gt(test.t1.c1, 1)" - ], - "table filter conditions": [ - "lt(test.t1.c3, 1)" - ] - } -}`, + "IndexScan_7 Selection_9 cop ", + "Selection_9 cop gt(test.t1.c1, 1)", + "TableScan_8 Selection_10 cop ", + "Selection_10 cop lt(test.t1.c3, 1)", + "IndexLookUp_11 root ", }, }, { "select * from t1 where c1 =1 and c2 > 1", []string{ - "TableScan_4", - }, - []string{ - "", - }, - []string{ - `{ - "db": "test", - "table": "t1", - "desc": false, - "keep order": false, - "push down info": { - "limit": 0, - "access conditions": [ - "eq(test.t1.c1, 1)" - ], - "index filter conditions": null, - "table filter conditions": [ - "gt(test.t1.c2, 1)" - ] - } -}`, + "TableScan_4 Selection_5 cop ", + "Selection_5 cop gt(test.t1.c2, 1)", + "TableReader_6 root ", }, }, { "select sum(t1.c1 in (select c1 from t2)) from t1", - []string{"TableScan_7", "HashAgg_8"}, - []string{"HashAgg_8", ""}, []string{ - `{ - "db": "test", - "table": "t1", - "desc": false, - "keep order": false, - "push down info": { - "limit": 0, - "aggregated push down": true, - "gby items": null, - "agg funcs": [ - "sum(in(test.t1.c1, 1, 2))" - ], - "access conditions": null, - "index filter conditions": null, - "table filter conditions": null - } -}`, - `{ - "AggFuncs": [ - "sum([in(test.t1.c1, 1, 2)])" - ], - "GroupByItems": [ - "[]" - ], - "child": "TableScan_7" -}`, - }, - }, - { - "select sum(t1.c1 in (select c1 from t2 where false)) from t1", - []string{"TableScan_8", "HashAgg_9"}, - []string{"HashAgg_9", ""}, - []string{ - `{ - "db": "test", - "table": "t1", - "desc": false, - "keep order": false, - "push down info": { - "limit": 0, - "aggregated push down": true, - "gby items": null, - "agg funcs": [ - "sum(0)" - ], - "access conditions": null, - "index filter conditions": null, - "table filter conditions": null - } -}`, - `{ - "AggFuncs": [ - "sum([0])" - ], - "GroupByItems": [ - "[]" - ], - "child": "TableScan_8" -}`, + "TableScan_11 HashAgg_10 cop ", + "HashAgg_10 cop type:complete, funcs:sum(in(test.t1.c1, 1, 2))", + "TableReader_14 HashAgg_13 root ", + "HashAgg_13 root type:final, funcs:sum(col_0)", }, }, { "select c1 from t1 where c1 in (select c2 from t2)", - []string{"TableScan_7"}, - []string{""}, []string{ - `{ - "db": "test", - "table": "t1", - "desc": false, - "keep order": false, - "push down info": { - "limit": 0, - "access conditions": [ - "in(test.t1.c1, 0, 1)" - ], - "index filter conditions": null, - "table filter conditions": null - } -}`, + "TableScan_11 cop ", + "TableReader_12 root ", }, }, { "select (select count(1) k from t1 s where s.c1 = t1.c1 having k != 0) from t1", []string{ - "TableScan_12", "TableScan_13", "Selection_4", "StreamAgg_15", "Selection_10", "Apply_16", "Projection_2", - }, - []string{ - "Apply_16", "Selection_4", "StreamAgg_15", "Selection_10", "Apply_16", "Projection_2", "", - }, - []string{ - `{ - "db": "test", - "table": "t1", - "desc": false, - "keep order": false, - "push down info": { - "limit": 0, - "access conditions": null, - "index filter conditions": null, - "table filter conditions": null - } -}`, - `{ - "db": "test", - "table": "t1", - "desc": false, - "keep order": false, - "push down info": { - "limit": 0, - "access conditions": null, - "index filter conditions": null, - "table filter conditions": null - } -}`, - `{ - "condition": [ - "eq(s.c1, test.t1.c1)" - ], - "scanController": true, - "child": "TableScan_13" -}`, - `{ - "AggFuncs": [ - "count(1)" - ], - "GroupByItems": null, - "child": "Selection_4" -}`, - `{ - "condition": [ - "ne(aggregation_5_col_0, 0)" - ], - "scanController": false, - "child": "StreamAgg_15" -}`, - `{ - "innerPlan": "Selection_10", - "outerPlan": "TableScan_12", - "join": { - "eqCond": null, - "leftCond": null, - "rightCond": null, - "otherCond": null, - "leftPlan": "TableScan_12", - "rightPlan": "Selection_10" - } -}`, - `{ - "exprs": [ - "k" - ], - "child": "Apply_16" -}`, + "TableScan_13 cop ", + "TableReader_14 Apply_12 root ", + "TableScan_18 cop ", + "TableReader_19 Selection_4 root ", + "Selection_4 HashAgg_17 root eq(s.c1, test.t1.c1)", + "HashAgg_17 Selection_10 root type:complete, funcs:count(1)", + "Selection_10 Apply_12 root ne(k, 0)", + "Apply_12 Projection_2 root left outer join, small:Selection_10, right:Selection_10", + "Projection_2 root k", }, }, { "select * from information_schema.columns", - []string{"MemTableScan_3"}, - []string{""}, - []string{ - `{ - "db": "information_schema", - "table": "COLUMNS" -}`, - }, - }, - { - "select s.c1 from t2 s left outer join t2 t on s.c2 = t.c2 limit 10", - []string{"TableScan_6", "Limit_7", "TableScan_10", "HashLeftJoin_9", "Limit_11", "Projection_4"}, - []string{"Limit_7", "HashLeftJoin_9", "HashLeftJoin_9", "Limit_11", "Projection_4", ""}, []string{ - `{ - "db": "test", - "table": "t2", - "desc": false, - "keep order": false, - "push down info": { - "limit": 10, - "access conditions": null, - "index filter conditions": null, - "table filter conditions": null - } -}`, - `{ - "limit": 10, - "offset": 0, - "child": "TableScan_6" -}`, - `{ - "db": "test", - "table": "t2", - "desc": false, - "keep order": false, - "push down info": { - "limit": 0, - "access conditions": null, - "index filter conditions": null, - "table filter conditions": null - } -}`, - `{ - "eqCond": [ - "eq(s.c2, t.c2)" - ], - "leftCond": null, - "rightCond": null, - "otherCond": null, - "leftPlan": "Limit_7", - "rightPlan": "TableScan_10" -}`, - `{ - "limit": 10, - "offset": 0, - "child": "HashLeftJoin_9" -}`, - `{ - "exprs": [ - "s.c1" - ], - "child": "Limit_11" -}`, + "MemTableScan_3 root ", }, }, } tk.MustExec("set @@session.tidb_opt_insubquery_unfold = 1") for _, tt := range tests { result := tk.MustQuery("explain " + tt.sql) - var resultList []string - for i := range tt.ids { - resultList = append(resultList, tt.ids[i]+" "+tt.result[i]+" "+tt.parentIds[i]) - } - result.Check(testkit.Rows(resultList...)) + result.Check(testkit.Rows(tt.expect...)) } } diff --git a/expression/explain.go b/expression/explain.go index 4ea7c35e2b9e2..764670ca95b9e 100644 --- a/expression/explain.go +++ b/expression/explain.go @@ -60,3 +60,27 @@ func ExplainAggFunc(agg AggregationFunction) string { buffer.WriteString(")") return buffer.String() } + +// ExplainExpressionList generates explain information for a list of expressions. +func ExplainExpressionList(exprs []Expression) []byte { + buffer := bytes.NewBufferString("") + for i, expr := range exprs { + buffer.WriteString(expr.ExplainInfo()) + if i+1 < len(exprs) { + buffer.WriteString(", ") + } + } + return buffer.Bytes() +} + +// ExplainColumnList generates explain information for a list of columns. +func ExplainColumnList(cols []*Column) []byte { + buffer := bytes.NewBufferString("") + for i, col := range cols { + buffer.WriteString(col.ExplainInfo()) + if i+1 < len(cols) { + buffer.WriteString(", ") + } + } + return buffer.Bytes() +} diff --git a/plan/explain.go b/plan/explain.go index f237e11d6d1ec..7fc1b1777140d 100644 --- a/plan/explain.go +++ b/plan/explain.go @@ -58,26 +58,12 @@ func setParents4FinalPlan(plan PhysicalPlan) { // ExplainInfo implements PhysicalPlan interface. func (p *Selection) ExplainInfo() string { - buffer := bytes.NewBufferString("") - for i, cond := range p.Conditions { - buffer.WriteString(cond.ExplainInfo()) - if i+1 < len(p.Conditions) { - buffer.WriteString(", ") - } - } - return buffer.String() + return string(expression.ExplainExpressionList(p.Conditions)) } // ExplainInfo implements PhysicalPlan interface. func (p *Projection) ExplainInfo() string { - buffer := bytes.NewBufferString("") - for i, expr := range p.Exprs { - buffer.WriteString(expr.ExplainInfo()) - if i+1 < len(p.Exprs) { - buffer.WriteString(", ") - } - } - return buffer.String() + return string(expression.ExplainExpressionList(p.Exprs)) } // ExplainInfo implements PhysicalPlan interface. @@ -109,14 +95,9 @@ func (p *Limit) ExplainInfo() string { // ExplainInfo implements PhysicalPlan interface. func (p *PhysicalAggregation) ExplainInfo() string { buffer := bytes.NewBufferString(fmt.Sprintf("type:%s", p.AggType)) - if p.HasGby { - buffer.WriteString(", group by:") - for i, gby := range p.GroupByItems { - buffer.WriteString(gby.ExplainInfo()) - if i+1 < len(p.GroupByItems) { - buffer.WriteString(", ") - } - } + if p.HasGby && len(p.GroupByItems) > 0 { + buffer.WriteString(fmt.Sprintf(", group by:%s", + expression.ExplainExpressionList(p.GroupByItems))) } buffer.WriteString(", funcs:") for i, agg := range p.AggFuncs { @@ -127,3 +108,130 @@ func (p *PhysicalAggregation) ExplainInfo() string { } return buffer.String() } + +// ExplainInfo implements PhysicalPlan interface. +func (p *PhysicalApply) ExplainInfo() string { + buffer := bytes.NewBufferString(p.PhysicalJoin.ExplainInfo()) + buffer.WriteString(fmt.Sprintf(", right:%s", p.Children()[p.rightChOffset].ID())) + return buffer.String() +} + +// ExplainInfo implements PhysicalPlan interface. +func (p *PhysicalIndexJoin) ExplainInfo() string { + buffer := bytes.NewBufferString(fmt.Sprintf("outer:%s", + p.Children()[p.outerIndex].ID())) + if len(p.OuterJoinKeys) > 0 { + buffer.WriteString(fmt.Sprintf(", outer key:%s", + expression.ExplainColumnList(p.OuterJoinKeys))) + } + if len(p.InnerJoinKeys) > 0 { + buffer.WriteString(fmt.Sprintf(", inner key:%s", + expression.ExplainColumnList(p.InnerJoinKeys))) + } + if len(p.LeftConditions) > 0 { + buffer.WriteString(fmt.Sprintf(", left cond:%s", + expression.ExplainExpressionList(p.LeftConditions))) + } + if len(p.RightConditions) > 0 { + buffer.WriteString(fmt.Sprintf(", right cond:%s", + expression.ExplainExpressionList(p.RightConditions))) + } + if len(p.OtherConditions) > 0 { + buffer.WriteString(fmt.Sprintf(", other cond:%s", + expression.ExplainExpressionList(p.OtherConditions))) + } + return buffer.String() +} + +// ExplainInfo implements PhysicalPlan interface. +func (p *PhysicalHashJoin) ExplainInfo() string { + buffer := bytes.NewBufferString(p.JoinType.String()) + buffer.WriteString(fmt.Sprintf(", small:%s", p.Children()[p.SmallTable].ID())) + if len(p.EqualConditions) > 0 { + buffer.WriteString(fmt.Sprintf(", equal:%s", p.EqualConditions)) + } + if len(p.LeftConditions) > 0 { + buffer.WriteString(fmt.Sprintf(", left cond:%s", p.LeftConditions)) + } + if len(p.RightConditions) > 0 { + buffer.WriteString(fmt.Sprintf(", right cond:%s", + expression.ExplainExpressionList(p.RightConditions))) + } + if len(p.OtherConditions) > 0 { + buffer.WriteString(fmt.Sprintf(", other cond:%s", + expression.ExplainExpressionList(p.OtherConditions))) + } + return buffer.String() +} + +// ExplainInfo implements PhysicalPlan interface. +func (p *PhysicalHashSemiJoin) ExplainInfo() string { + buffer := bytes.NewBufferString(fmt.Sprintf("right:%s", p.Children()[p.rightChOffset].ID())) + if p.WithAux { + buffer.WriteString(", aux") + } + if p.Anti { + buffer.WriteString(", anti") + } + if len(p.EqualConditions) > 0 { + buffer.WriteString(fmt.Sprintf(", equal:%s", p.EqualConditions)) + } + if len(p.LeftConditions) > 0 { + buffer.WriteString(fmt.Sprintf(", left cond:%s", p.LeftConditions)) + } + if len(p.RightConditions) > 0 { + buffer.WriteString(fmt.Sprintf(", right cond:%s", + expression.ExplainExpressionList(p.RightConditions))) + } + if len(p.OtherConditions) > 0 { + buffer.WriteString(fmt.Sprintf(", other cond:%s", + expression.ExplainExpressionList(p.OtherConditions))) + } + return buffer.String() +} + +// ExplainInfo implements PhysicalPlan interface. +func (p *PhysicalMergeJoin) ExplainInfo() string { + buffer := bytes.NewBufferString(p.JoinType.String()) + if len(p.EqualConditions) > 0 { + buffer.WriteString(fmt.Sprintf(", equal:%s", p.EqualConditions)) + } + if len(p.LeftConditions) > 0 { + buffer.WriteString(fmt.Sprintf(", left cond:%s", p.LeftConditions)) + } + if len(p.RightConditions) > 0 { + buffer.WriteString(fmt.Sprintf(", right cond:%s", + expression.ExplainExpressionList(p.RightConditions))) + } + if len(p.OtherConditions) > 0 { + buffer.WriteString(fmt.Sprintf(", other cond:%s", + expression.ExplainExpressionList(p.OtherConditions))) + } + if len(p.DefaultValues) > 0 { + buffer.WriteString("default vals:") + for i, val := range p.DefaultValues { + str, err := val.ToString() + if err != nil { + str = err.Error() + } + buffer.WriteString(str) + if i+1 < len(p.DefaultValues) { + buffer.WriteString(", ") + } + } + } + if p.Desc { + buffer.WriteString("desc") + } else { + buffer.WriteString("asc") + } + if len(p.leftKeys) > 0 { + buffer.WriteString(fmt.Sprintf("left key:%s", + expression.ExplainColumnList(p.leftKeys))) + } + if len(p.rightKeys) > 0 { + buffer.WriteString(fmt.Sprintf("right key:%s", + expression.ExplainColumnList(p.rightKeys))) + } + return buffer.String() +} diff --git a/plan/logical_plans.go b/plan/logical_plans.go index 7bf7d6f4e9e56..ab90c95a89844 100644 --- a/plan/logical_plans.go +++ b/plan/logical_plans.go @@ -59,6 +59,22 @@ const ( LeftOuterSemiJoin ) +func (tp JoinType) String() string { + switch tp { + case InnerJoin: + return "inner join" + case LeftOuterJoin: + return "left outer join" + case RightOuterJoin: + return "right outer join" + case SemiJoin: + return "semi join" + case LeftOuterSemiJoin: + return "left outer semi join" + } + return "unsupported join type" +} + const ( preferLeftAsOuter = 1 << iota preferRightAsOuter diff --git a/plan/physical_plans.go b/plan/physical_plans.go index a6e6985ff7cee..5ab906ffdb527 100644 --- a/plan/physical_plans.go +++ b/plan/physical_plans.go @@ -60,6 +60,7 @@ var ( _ PhysicalPlan = &PhysicalTableScan{} _ PhysicalPlan = &PhysicalAggregation{} _ PhysicalPlan = &PhysicalApply{} + _ PhysicalPlan = &PhysicalIndexJoin{} _ PhysicalPlan = &PhysicalHashJoin{} _ PhysicalPlan = &PhysicalHashSemiJoin{} _ PhysicalPlan = &PhysicalMergeJoin{} From 0aa27c3d3d2ecdea78c01b2e174be3a08a639199 Mon Sep 17 00:00:00 2001 From: Cholerae Hu Date: Sun, 30 Jul 2017 22:35:36 -0500 Subject: [PATCH 05/10] lexer: fix panic when input "'\\" (#3948) * lexer: fix panic when input "'\\" --- parser/lexer.go | 9 +++++++-- parser/lexer_test.go | 13 ++++++++++++- 2 files changed, 19 insertions(+), 3 deletions(-) diff --git a/parser/lexer.go b/parser/lexer.go index aeb317aaffa99..a9eb9aec49638 100644 --- a/parser/lexer.go +++ b/parser/lexer.go @@ -452,6 +452,9 @@ func scanQuotedIdent(s *Scanner) (tok int, pos Pos, lit string) { func startString(s *Scanner) (tok int, pos Pos, lit string) { tok, pos, lit = s.scanString() + if tok == unicode.ReplacementChar { + return + } // Quoted strings placed next to each other are concatenated to a single string. // See http://dev.mysql.com/doc/refman/5.7/en/string-literals.html @@ -527,8 +530,10 @@ func (s *Scanner) scanString() (tok int, pos Pos, lit string) { ch0 = handleEscape(s) } mb.writeRune(ch0, s.r.w) - s.r.inc() - ch0 = s.r.peek() + if !s.r.eof() { + s.r.inc() + ch0 = s.r.peek() + } } tok = unicode.ReplacementChar diff --git a/parser/lexer_test.go b/parser/lexer_test.go index 72abf4560b3c2..d7917a6506b60 100644 --- a/parser/lexer_test.go +++ b/parser/lexer_test.go @@ -115,7 +115,6 @@ func (s *testLexerSuite) TestLiteral(c *C) { {"0x3c26", hexLit}, {"x'13181C76734725455A'", hexLit}, {"0b01", bitLit}, - {fmt.Sprintf("%c", 0), invalid}, {fmt.Sprintf("t1%c", 0), identifier}, {".*", int('.')}, {".1_t_1_x", int('.')}, @@ -315,3 +314,15 @@ func (s *testLexerSuite) TestSQLModeANSIQuotes(c *C) { c.Assert(v.ident, Equals, t.ident) } } + +func (s *testLexerSuite) TestIllegal(c *C) { + defer testleak.AfterTest(c)() + table := []testCaseItem{ + {"'", 0}, + {"'fu", 0}, + {"'\\n", 0}, + {"'\\", 0}, + {fmt.Sprintf("%c", 0), invalid}, + } + runTest(c, table) +} From dff73cc6e44f7acf7b95b8204b0f04caaefc04ea Mon Sep 17 00:00:00 2001 From: Jack Yu Date: Sun, 30 Jul 2017 23:02:52 -0500 Subject: [PATCH 06/10] *: Modify the Makefile for go vet (#3900) --- Makefile | 10 ++-- bootstrap.go | 7 ++- cmd/benchfilesort/main.go | 6 +- ddl/column.go | 6 +- ddl/ddl_api.go | 8 +-- ddl/ddl_db_test.go | 2 +- ddl/foreign_key_test.go | 6 +- ddl/index.go | 3 +- ddl/owner_manager.go | 4 +- ddl/schema.go | 3 +- distsql/xeval/eval_control_funcs.go | 3 +- distsql/xeval/eval_data_type.go | 3 +- domain/domain.go | 3 +- executor/analyze.go | 2 +- executor/builder.go | 2 +- executor/executor.go | 6 +- executor/executor_test.go | 5 +- executor/merge_join.go | 3 +- executor/write.go | 12 ++-- expression/builtin_cast.go | 6 +- expression/builtin_compare.go | 3 +- expression/builtin_json_test.go | 24 +++++--- expression/builtin_string.go | 3 +- expression/helper.go | 2 +- infoschema/builder.go | 20 +++---- kv/fault_injection_test.go | 2 +- perfschema/perfschema_test.go | 3 +- plan/expression_rewriter.go | 5 +- plan/new_physical_plan_builder.go | 76 ++++++++++++------------- plan/optimizer.go | 4 +- plan/physical_plan_builder.go | 8 ++- plan/task.go | 58 +++++++++---------- server/region_handler_test.go | 18 +++--- sessionctx/varsutil/varsutil_test.go | 2 +- statistics/ddl.go | 7 ++- statistics/histogram.go | 9 ++- store/tikv/2pc.go | 2 +- store/tikv/mock-tikv/cop_handler_dag.go | 9 ++- table/tables/tables.go | 6 +- util/filesort/filesort.go | 4 +- util/types/datum.go | 6 +- util/types/datum_eval.go | 3 +- util/types/datum_test.go | 6 +- util/types/json/functions_test.go | 3 +- util/types/json/normalize.go | 5 +- 45 files changed, 219 insertions(+), 169 deletions(-) diff --git a/Makefile b/Makefile index ca0e3e6b6a059..15c4f81ddb793 100644 --- a/Makefile +++ b/Makefile @@ -20,8 +20,9 @@ GOVERALLS := goveralls ARCH := "`uname -s`" LINUX := "Linux" MAC := "Darwin" -PACKAGES := $$(go list ./...| grep -vE 'vendor') -FILES := $$(find . -name '*.go' | grep -vE 'vendor') +PACKAGES := $$(go list ./...| grep -vE "vendor") +FILES := $$(find . -name "*.go" | grep -vE "vendor") +TOPDIRS := $$(ls -d */ | grep -vE "vendor") LDFLAGS += -X "github.com/pingcap/tidb/util/printer.TiDBBuildTS=$(shell date -u '+%Y-%m-%d %I:%M:%S')" LDFLAGS += -X "github.com/pingcap/tidb/util/printer.TiDBGitHash=$(shell git rev-parse HEAD)" @@ -74,9 +75,8 @@ check: go get github.com/golang/lint/golint @echo "vet" - @ go tool vet $(FILES) 2>&1 | awk '{print} END{if(NR>0) {exit 1}}' - @echo "vet --shadow" - @ go tool vet --shadow $(FILES) 2>&1 | awk '{print} END{if(NR>0) {exit 1}}' + @ go tool vet -all -shadow $(TOPDIRS) 2>&1 | awk '{print} END{if(NR>0) {exit 1}}' + @ go tool vet -all -shadow *.go 2>&1 | awk '{print} END{if(NR>0) {exit 1}}' @echo "golint" @ golint ./... 2>&1 | grep -vE 'context\.Context|LastInsertId|NewLexer|\.pb\.go' | awk '{print} END{if(NR>0) {exit 1}}' @echo "gofmt (simplify)" diff --git a/bootstrap.go b/bootstrap.go index 3796f7880df5b..248795c37ad7d 100644 --- a/bootstrap.go +++ b/bootstrap.go @@ -451,13 +451,14 @@ func upgradeToVer12(s Session) { user := row.Data[0].GetString() host := row.Data[1].GetString() pass := row.Data[2].GetString() - newpass, err := oldPasswordUpgrade(pass) + var newPass string + newPass, err = oldPasswordUpgrade(pass) if err != nil { log.Fatal(err) return } - sql := fmt.Sprintf(`UPDATE mysql.user set password = "%s" where user="%s" and host="%s"`, newpass, user, host) - sqls = append(sqls, sql) + updateSQL := fmt.Sprintf(`UPDATE mysql.user set password = "%s" where user="%s" and host="%s"`, newPass, user, host) + sqls = append(sqls, updateSQL) row, err = r.Next() } diff --git a/cmd/benchfilesort/main.go b/cmd/benchfilesort/main.go index 9d5dfa9e21bf7..0e63e87c07551 100644 --- a/cmd/benchfilesort/main.go +++ b/cmd/benchfilesort/main.go @@ -417,9 +417,9 @@ func main() { flag.Parse() if len(os.Args) == 1 { - fmt.Println("Usage:\n") - fmt.Println("\tbenchfilesort command [arguments]\n") - fmt.Println("The commands are:\n") + fmt.Printf("Usage:\n\n") + fmt.Printf("\tbenchfilesort command [arguments]\n\n") + fmt.Printf("The commands are:\n\n") fmt.Println("\tgen\t", "generate rows") fmt.Println("\trun\t", "run tests") fmt.Println("") diff --git a/ddl/column.go b/ddl/column.go index 94d145ac49fe5..9daa84cdc3a51 100644 --- a/ddl/column.go +++ b/ddl/column.go @@ -150,7 +150,8 @@ func (d *ddl) onAddColumn(t *meta.Meta, job *model.Job) (ver int64, _ error) { case model.StateWriteReorganization: // reorganization -> public // Get the current version for reorganization if we don't have it. - reorgInfo, err := d.getReorgInfo(t, job) + var reorgInfo *reorgInfo + reorgInfo, err = d.getReorgInfo(t, job) if err != nil || reorgInfo.first { // If we run reorg firstly, we should update the job snapshot version // and then run the reorg next time. @@ -233,7 +234,8 @@ func (d *ddl) onDropColumn(t *meta.Meta, job *model.Job) (ver int64, _ error) { ver, err = updateTableInfo(t, job, tblInfo, originalState) case model.StateDeleteReorganization: // reorganization -> absent - reorgInfo, err := d.getReorgInfo(t, job) + var reorgInfo *reorgInfo + reorgInfo, err = d.getReorgInfo(t, job) if err != nil || reorgInfo.first { // If we run reorg firstly, we should update the job snapshot version // and then run the reorg next time. diff --git a/ddl/ddl_api.go b/ddl/ddl_api.go index 631488139944a..aaaee8f13cbe1 100644 --- a/ddl/ddl_api.go +++ b/ddl/ddl_api.go @@ -889,7 +889,7 @@ func (d *ddl) AddColumn(ctx context.Context, ti ast.Ident, spec *ast.AlterTableS referableColNames[col.Name.L] = struct{}{} } _, dependColNames := findDependedColumnNames(spec.NewColumn) - if err := columnNamesCover(referableColNames, dependColNames); err != nil { + if err = columnNamesCover(referableColNames, dependColNames); err != nil { return errors.Trace(err) } } @@ -1158,7 +1158,7 @@ func (d *ddl) getModifiableColumnJob(ctx context.Context, ident ast.Ident, origi if err != nil { return nil, errors.Trace(err) } - if err := setDefaultAndComment(ctx, newCol, spec.NewColumn.Options); err != nil { + if err = setDefaultAndComment(ctx, newCol, spec.NewColumn.Options); err != nil { return nil, errors.Trace(err) } @@ -1261,12 +1261,12 @@ func (d *ddl) AlterColumn(ctx context.Context, ident ast.Ident, spec *ast.AlterT } // Clean the NoDefaultValueFlag value. - col.Flag &= (^uint(mysql.NoDefaultValueFlag)) + col.Flag &= ^uint(mysql.NoDefaultValueFlag) if len(spec.NewColumn.Options) == 0 { col.DefaultValue = nil setNoDefaultValueFlag(col, false) } else { - err := setDefaultValue(ctx, col, spec.NewColumn.Options[0]) + err = setDefaultValue(ctx, col, spec.NewColumn.Options[0]) if err != nil { return errors.Trace(err) } diff --git a/ddl/ddl_db_test.go b/ddl/ddl_db_test.go index 043c0c55e3d19..619de9543848c 100644 --- a/ddl/ddl_db_test.go +++ b/ddl/ddl_db_test.go @@ -698,7 +698,7 @@ LOOP: matchRows(c, rows, [][]interface{}{{count - int64(step)}}) for i := num; i < num+step; i++ { - rows := s.mustQuery(c, "select c4 from t2 where c4 = ?", i) + rows = s.mustQuery(c, "select c4 from t2 where c4 = ?", i) matchRows(c, rows, [][]interface{}{{i}}) } diff --git a/ddl/foreign_key_test.go b/ddl/foreign_key_test.go index d44c142e64fe3..19e837dc50073 100644 --- a/ddl/foreign_key_test.go +++ b/ddl/foreign_key_test.go @@ -139,7 +139,8 @@ func (s *testForeighKeySuite) TestForeignKey(c *C) { } mu.Lock() defer mu.Unlock() - t, err := testGetTableWithError(d, s.dbInfo.ID, tblInfo.ID) + var t table.Table + t, err = testGetTableWithError(d, s.dbInfo.ID, tblInfo.ID) if err != nil { hookErr = errors.Trace(err) return @@ -178,7 +179,8 @@ func (s *testForeighKeySuite) TestForeignKey(c *C) { } mu.Lock() defer mu.Unlock() - t, err := testGetTableWithError(d, s.dbInfo.ID, tblInfo.ID) + var t table.Table + t, err = testGetTableWithError(d, s.dbInfo.ID, tblInfo.ID) if err != nil { hookErr = errors.Trace(err) return diff --git a/ddl/index.go b/ddl/index.go index 80280c8566053..0868772da6a80 100644 --- a/ddl/index.go +++ b/ddl/index.go @@ -253,7 +253,8 @@ func (d *ddl) onCreateIndex(t *meta.Meta, job *model.Job) (ver int64, err error) ver, err = updateTableInfo(t, job, tblInfo, originalState) case model.StateWriteReorganization: // reorganization -> public - reorgInfo, err := d.getReorgInfo(t, job) + var reorgInfo *reorgInfo + reorgInfo, err = d.getReorgInfo(t, job) if err != nil || reorgInfo.first { // If we run reorg firstly, we should update the job snapshot version // and then run the reorg next time. diff --git a/ddl/owner_manager.go b/ddl/owner_manager.go index 3ecd772116d68..d15d1208b4d77 100644 --- a/ddl/owner_manager.go +++ b/ddl/owner_manager.go @@ -184,9 +184,9 @@ func (m *ownerManager) campaignLoop(ctx goctx.Context, etcdSession *concurrency. case <-ctx.Done(): // Revoke the session lease. // If revoke takes longer than the ttl, lease is expired anyway. - ctx, cancel := goctx.WithTimeout(goctx.Background(), + cancelCtx, cancel := goctx.WithTimeout(goctx.Background(), time.Duration(ManagerSessionTTL)*time.Second) - _, err = m.etcdCli.Revoke(ctx, etcdSession.Lease()) + _, err = m.etcdCli.Revoke(cancelCtx, etcdSession.Lease()) cancel() log.Infof("[ddl] %s break campaign loop err %v", idInfo, err) return diff --git a/ddl/schema.go b/ddl/schema.go index 094e317c3c708..4948f6e371f09 100644 --- a/ddl/schema.go +++ b/ddl/schema.go @@ -102,7 +102,8 @@ func (d *ddl) onDropSchema(t *meta.Meta, job *model.Job) (ver int64, _ error) { err = t.UpdateDatabase(dbInfo) case model.StateDeleteOnly: dbInfo.State = model.StateNone - tables, err := t.ListTables(job.SchemaID) + var tables []*model.TableInfo + tables, err = t.ListTables(job.SchemaID) if err != nil { return ver, errors.Trace(err) } diff --git a/distsql/xeval/eval_control_funcs.go b/distsql/xeval/eval_control_funcs.go index 770cb4c9a920f..5e647d827ed88 100644 --- a/distsql/xeval/eval_control_funcs.go +++ b/distsql/xeval/eval_control_funcs.go @@ -76,7 +76,8 @@ func (e *Evaluator) evalIf(expr *tipb.Expr) (d types.Datum, err error) { return d, errors.Trace(err) } if !child1.IsNull() { - x, err := child1.ToBool(e.StatementCtx) + var x int64 + x, err = child1.ToBool(e.StatementCtx) if err != nil { return d, errors.Trace(err) } diff --git a/distsql/xeval/eval_data_type.go b/distsql/xeval/eval_data_type.go index e26e08e118322..806de4961cd86 100644 --- a/distsql/xeval/eval_data_type.go +++ b/distsql/xeval/eval_data_type.go @@ -58,7 +58,8 @@ func (e *Evaluator) evalColumnRef(val []byte) (types.Datum, error) { // TODO: Remove this case. if e.ColVals == nil { - d, ok := e.Row[i] + var ok bool + d, ok = e.Row[i] if !ok { return d, ErrInvalid.Gen("column % x not found", val) } diff --git a/domain/domain.go b/domain/domain.go index 698d21467a47e..504da8dcce89a 100644 --- a/domain/domain.go +++ b/domain/domain.go @@ -399,7 +399,8 @@ func NewDomain(store kv.Storage, ddlLease time.Duration, statsLease time.Duratio if ebd, ok := store.(etcdBackend); ok { if addrs := ebd.EtcdAddrs(); addrs != nil { - cli, err := clientv3.New(clientv3.Config{ + var cli *clientv3.Client + cli, err = clientv3.New(clientv3.Config{ Endpoints: addrs, DialTimeout: 5 * time.Second, }) diff --git a/executor/analyze.go b/executor/analyze.go index 9018d854761ac..d417870fe5558 100644 --- a/executor/analyze.go +++ b/executor/analyze.go @@ -240,7 +240,7 @@ func CollectSamplesAndEstimateNDVs(ctx context.Context, e ast.RecordSet, numCols return collectors, pkBuilder, nil } if pkInfo != nil { - err := pkBuilder.Iterate(row.Data) + err = pkBuilder.Iterate(row.Data) if err != nil { return nil, nil, errors.Trace(err) } diff --git a/executor/builder.go b/executor/builder.go index 4fe462e18077b..38ad04ffabc49 100644 --- a/executor/builder.go +++ b/executor/builder.go @@ -839,7 +839,7 @@ func (b *executorBuilder) buildTableScanForAnalyze(tblInfo *model.TableInfo, pk cols = append([]*model.ColumnInfo{pk}, cols...) } schema := expression.NewSchema(expression.ColumnInfos2Columns(tblInfo.Name, cols)...) - ranges := []types.IntColumnRange{{math.MinInt64, math.MaxInt64}} + ranges := []types.IntColumnRange{{LowVal: math.MinInt64, HighVal: math.MaxInt64}} if b.ctx.GetClient().IsRequestTypeSupported(kv.ReqTypeDAG, kv.ReqSubTypeBasic) { e := &TableReaderExecutor{ table: table, diff --git a/executor/executor.go b/executor/executor.go index 29153489b597d..9137e7d38b636 100644 --- a/executor/executor.go +++ b/executor/executor.go @@ -443,7 +443,11 @@ func (e *SelectionExec) initController() error { } x.ranges = ranges case *XSelectIndexExec: - accessCondition, newConds, _, accessInAndEqCount := ranger.DetachIndexScanConditions(newConds, x.index) + var ( + accessCondition []expression.Expression + accessInAndEqCount int + ) + accessCondition, newConds, _, accessInAndEqCount = ranger.DetachIndexScanConditions(newConds, x.index) idxConds, tblConds := ranger.DetachIndexFilterConditions(newConds, x.index.Columns, x.tableInfo) x.indexConditionPBExpr, _, _ = expression.ExpressionsToPB(sc, idxConds, client) tableConditionPBExpr, _, _ := expression.ExpressionsToPB(sc, tblConds, client) diff --git a/executor/executor_test.go b/executor/executor_test.go index b277b81348d9a..5da8656dd08fa 100644 --- a/executor/executor_test.go +++ b/executor/executor_test.go @@ -404,7 +404,8 @@ func (s *testSuite) TestIssue2612(c *C) { c.Assert(err, IsNil) row, err := rs.Next() c.Assert(err, IsNil) - row.Data[0].GetMysqlDuration().String() + str := row.Data[0].GetMysqlDuration().String() + c.Assert(str, Equals, "-46:09:02") } // TestIssue345 is related with https://github.com/pingcap/tidb/issues/345 @@ -1236,7 +1237,7 @@ func (s *testSuite) TestBuiltin(c *C) { tk.MustExec("create table t (a varchar(255), b int)") for i, d := range data { tk.MustExec(fmt.Sprintf("insert into t values('%s', %d)", d.val, i)) - result := tk.MustQuery(fmt.Sprintf("select * from t where a %s '%s'", queryOp, d.pattern)) + result = tk.MustQuery(fmt.Sprintf("select * from t where a %s '%s'", queryOp, d.pattern)) if d.result == 1 { rowStr := fmt.Sprintf("%s %d", d.val, i) result.Check(testkit.Rows(rowStr)) diff --git a/executor/merge_join.go b/executor/merge_join.go index de857524aa183..7b0a3df6033f1 100644 --- a/executor/merge_join.go +++ b/executor/merge_join.go @@ -371,7 +371,8 @@ func (e *MergeJoinExec) computeCrossProduct() error { for _, lRow := range e.leftRows { // make up for outer join since we ignored single table conditions previously if e.leftFilter != nil { - matched, err := expression.EvalBool(e.leftFilter, lRow.Data, e.ctx) + var matched bool + matched, err = expression.EvalBool(e.leftFilter, lRow.Data, e.ctx) if err != nil { return errors.Trace(err) } diff --git a/executor/write.go b/executor/write.go index d81cb9dbf50a8..d856c62a12ce7 100644 --- a/executor/write.go +++ b/executor/write.go @@ -64,9 +64,9 @@ func updateRecord(ctx context.Context, h int64, oldData, newData []types.Datum, if newData[i].IsNull() { return false, errors.Errorf("Column '%v' cannot be null", col.Name.O) } - val, err := newData[i].ToInt64(sc) - if err != nil { - return false, errors.Trace(err) + val, errTI := newData[i].ToInt64(sc) + if errTI != nil { + return false, errors.Trace(errTI) } t.RebaseAutoID(val, true) } @@ -106,9 +106,9 @@ func updateRecord(ctx context.Context, h int64, oldData, newData []types.Datum, // Fill values into on-update-now fields, only if they are really changed. for i, col := range t.Cols() { if mysql.HasOnUpdateNowFlag(col.Flag) && !modified[i] && !onUpdateSpecified[i] { - v, err := expression.GetTimeValue(ctx, expression.CurrentTimestamp, col.Tp, col.Decimal) - if err != nil { - return false, errors.Trace(err) + v, errGT := expression.GetTimeValue(ctx, expression.CurrentTimestamp, col.Tp, col.Decimal) + if errGT != nil { + return false, errors.Trace(errGT) } newData[i] = v } diff --git a/expression/builtin_cast.go b/expression/builtin_cast.go index ec857584e6f5c..2393b446c2e94 100644 --- a/expression/builtin_cast.go +++ b/expression/builtin_cast.go @@ -367,7 +367,8 @@ func (b *builtinCastIntAsDecimalSig) evalDecimal(row []types.Datum) (res *types. if !mysql.HasUnsignedFlag(b.args[0].GetType().Flag) { res = types.NewDecFromInt(val) } else { - uVal, err := types.ConvertIntToUint(val, types.UnsignedUpperBound[mysql.TypeLonglong], mysql.TypeLonglong) + var uVal uint64 + uVal, err = types.ConvertIntToUint(val, types.UnsignedUpperBound[mysql.TypeLonglong], mysql.TypeLonglong) if err != nil { return res, false, errors.Trace(err) } @@ -390,7 +391,8 @@ func (b *builtinCastIntAsStringSig) evalString(row []types.Datum) (res string, i if !mysql.HasUnsignedFlag(b.args[0].GetType().Flag) { res = strconv.FormatInt(val, 10) } else { - uVal, err := types.ConvertIntToUint(val, types.UnsignedUpperBound[mysql.TypeLonglong], mysql.TypeLonglong) + var uVal uint64 + uVal, err = types.ConvertIntToUint(val, types.UnsignedUpperBound[mysql.TypeLonglong], mysql.TypeLonglong) if err != nil { return res, false, errors.Trace(err) } diff --git a/expression/builtin_compare.go b/expression/builtin_compare.go index 03aa9f3119354..8b8f2cfc4ae17 100644 --- a/expression/builtin_compare.go +++ b/expression/builtin_compare.go @@ -962,7 +962,8 @@ func (s *builtinCompareSig) eval(row []types.Datum) (d types.Datum, err error) { } if s.op != opcode.NullEQ { - if aa, bb, err := types.CoerceDatum(sc, a, b); err == nil { + var aa, bb types.Datum + if aa, bb, err = types.CoerceDatum(sc, a, b); err == nil { a = aa b = bb } diff --git a/expression/builtin_json_test.go b/expression/builtin_json_test.go index 2d22548ddb89b..2362ca551896e 100644 --- a/expression/builtin_json_test.go +++ b/expression/builtin_json_test.go @@ -94,10 +94,12 @@ func (s *testEvaluatorSuite) TestJSONExtract(c *C) { c.Assert(err, IsNil) switch x := t.Expected.(type) { case string: - j1, err := json.ParseFromString(x) + var j1 json.JSON + j1, err = json.ParseFromString(x) c.Assert(err, IsNil) j2 := d.GetMysqlJSON() - cmp, err := json.CompareJSON(j1, j2) + var cmp int + cmp, err = json.CompareJSON(j1, j2) c.Assert(err, IsNil) c.Assert(cmp, Equals, 0) } @@ -134,10 +136,12 @@ func (s *testEvaluatorSuite) TestJSONSetInsertReplace(c *C) { c.Assert(err, IsNil) switch x := t.Expected.(type) { case string: - j1, err := json.ParseFromString(x) + var j1 json.JSON + j1, err = json.ParseFromString(x) c.Assert(err, IsNil) j2 := d.GetMysqlJSON() - cmp, err := json.CompareJSON(j1, j2) + var cmp int + cmp, err = json.CompareJSON(j1, j2) c.Assert(err, IsNil) c.Assert(cmp, Equals, 0) } @@ -228,10 +232,12 @@ func (s *testEvaluatorSuite) TestJSONObject(c *C) { c.Assert(err, IsNil) switch x := t.Expected.(type) { case string: - j1, err := json.ParseFromString(x) + var j1 json.JSON + j1, err = json.ParseFromString(x) c.Assert(err, IsNil) j2 := d.GetMysqlJSON() - cmp, err := json.CompareJSON(j1, j2) + var cmp int + cmp, err = json.CompareJSON(j1, j2) c.Assert(err, IsNil) c.Assert(cmp, Equals, 0) } @@ -277,10 +283,12 @@ func (s *testEvaluatorSuite) TestJSONORemove(c *C) { c.Assert(err, IsNil) switch x := t.Expected.(type) { case string: - j1, err := json.ParseFromString(x) + var j1 json.JSON + j1, err = json.ParseFromString(x) c.Assert(err, IsNil) j2 := d.GetMysqlJSON() - cmp, err := json.CompareJSON(j1, j2) + var cmp int + cmp, err = json.CompareJSON(j1, j2) c.Assert(err, IsNil) c.Assert(cmp, Equals, 0) } diff --git a/expression/builtin_string.go b/expression/builtin_string.go index 41d46880ee7b5..6f75331ebc32f 100644 --- a/expression/builtin_string.go +++ b/expression/builtin_string.go @@ -1947,7 +1947,8 @@ func (b *builtinExportSetSig) eval(row []types.Datum) (d types.Datum, err error) ) switch len(args) { case 5: - arg, err := args[4].ToInt64(b.ctx.GetSessionVars().StmtCtx) + var arg int64 + arg, err = args[4].ToInt64(b.ctx.GetSessionVars().StmtCtx) if err != nil { return d, errors.Trace(err) } diff --git a/expression/helper.go b/expression/helper.go index 5003c2b3ff9b6..5cd635ca0ecf2 100644 --- a/expression/helper.go +++ b/expression/helper.go @@ -71,7 +71,7 @@ func getTimeValue(ctx context.Context, v interface{}, tp byte, fsp int) (d types if upperX == CurrentTimestamp { value.Time = types.FromGoTime(defaultTime) if tp == mysql.TypeTimestamp { - err := value.ConvertTimeZone(time.Local, ctx.GetSessionVars().GetTimeZone()) + err = value.ConvertTimeZone(time.Local, ctx.GetSessionVars().GetTimeZone()) if err != nil { return d, errors.Trace(err) } diff --git a/infoschema/builder.go b/infoschema/builder.go index acc75a64d0ff0..7bfee388d3446 100644 --- a/infoschema/builder.go +++ b/infoschema/builder.go @@ -153,10 +153,10 @@ func (b *Builder) applyCreateTable(m *meta.Meta, roDBInfo *model.DBInfo, tableID tableNames := b.is.schemaMap[roDBInfo.Name.L] tableNames.tables[tblInfo.Name.L] = tbl bucketIdx := tableBucketIdx(tableID) - sortedTables := b.is.sortedTablesBuckets[bucketIdx] - sortedTables = append(sortedTables, tbl) - sort.Sort(sortedTables) - b.is.sortedTablesBuckets[bucketIdx] = sortedTables + sortedTbls := b.is.sortedTablesBuckets[bucketIdx] + sortedTbls = append(sortedTbls, tbl) + sort.Sort(sortedTbls) + b.is.sortedTablesBuckets[bucketIdx] = sortedTbls newTbl, ok := b.is.TableByID(tableID) if ok { @@ -167,16 +167,16 @@ func (b *Builder) applyCreateTable(m *meta.Meta, roDBInfo *model.DBInfo, tableID func (b *Builder) applyDropTable(roDBInfo *model.DBInfo, tableID int64) { bucketIdx := tableBucketIdx(tableID) - sortedTables := b.is.sortedTablesBuckets[bucketIdx] - idx := sortedTables.searchTable(tableID) + sortedTbls := b.is.sortedTablesBuckets[bucketIdx] + idx := sortedTbls.searchTable(tableID) if idx == -1 { return } if tableNames, ok := b.is.schemaMap[roDBInfo.Name.L]; ok { - delete(tableNames.tables, sortedTables[idx].Meta().Name.L) + delete(tableNames.tables, sortedTbls[idx].Meta().Name.L) } // Remove the table in sorted table slice. - b.is.sortedTablesBuckets[bucketIdx] = append(sortedTables[0:idx], sortedTables[idx+1:]...) + b.is.sortedTablesBuckets[bucketIdx] = append(sortedTbls[0:idx], sortedTbls[idx+1:]...) // The old DBInfo still holds a reference to old table info, we need to remove it. for i, tblInfo := range roDBInfo.Tables { @@ -256,8 +256,8 @@ func (b *Builder) createSchemaTablesForDB(di *model.DBInfo) error { return errors.Trace(err) } schTbls.tables[t.Name.L] = tbl - sortedTables := b.is.sortedTablesBuckets[tableBucketIdx(t.ID)] - b.is.sortedTablesBuckets[tableBucketIdx(t.ID)] = append(sortedTables, tbl) + sortedTbls := b.is.sortedTablesBuckets[tableBucketIdx(t.ID)] + b.is.sortedTablesBuckets[tableBucketIdx(t.ID)] = append(sortedTbls, tbl) } return nil } diff --git a/kv/fault_injection_test.go b/kv/fault_injection_test.go index babac20b3a8f9..2bae33a47eed7 100644 --- a/kv/fault_injection_test.go +++ b/kv/fault_injection_test.go @@ -33,7 +33,7 @@ func (s testFaultInjectionSuite) TestFaultInjectionBasic(c *C) { c.Assert(err, IsNil) _, err = storage.BeginWithStartTS(0) c.Assert(err, IsNil) - ver := kv.Version{1} + ver := kv.Version{Ver: 1} snap, err := storage.GetSnapshot(ver) c.Assert(err, IsNil) b, err := txn.Get([]byte{'a'}) diff --git a/perfschema/perfschema_test.go b/perfschema/perfschema_test.go index 1a9ba0979fab1..9ef06de08d6cb 100644 --- a/perfschema/perfschema_test.go +++ b/perfschema/perfschema_test.go @@ -193,7 +193,8 @@ func mustQuery(c *C, se tidb.Session, s string) int { _, err := r.Fields() c.Assert(err, IsNil) cnt := 0 - for row, err := r.Next(); row != nil && err == nil; row, err = r.Next() { + var row *ast.Row + for row, err = r.Next(); row != nil && err == nil; row, err = r.Next() { cnt++ } c.Assert(err, IsNil) diff --git a/plan/expression_rewriter.go b/plan/expression_rewriter.go index 6baca080bc25f..ecd992bad770c 100644 --- a/plan/expression_rewriter.go +++ b/plan/expression_rewriter.go @@ -190,11 +190,12 @@ func (er *expressionRewriter) constructBinaryOpFunction(l expression.Expression, expr1, _ = expression.NewFunction(er.ctx, ast.NE, types.NewFieldType(mysql.TypeTiny), larg0, rarg0) expr2, _ = expression.NewFunction(er.ctx, op, types.NewFieldType(mysql.TypeTiny), larg0, rarg0) } - l, err := popRowArg(er.ctx, l) + var err error + l, err = popRowArg(er.ctx, l) if err != nil { return nil, errors.Trace(err) } - r, err := popRowArg(er.ctx, r) + r, err = popRowArg(er.ctx, r) if err != nil { return nil, errors.Trace(err) } diff --git a/plan/new_physical_plan_builder.go b/plan/new_physical_plan_builder.go index 33cc175889934..466bbb2b7ff36 100644 --- a/plan/new_physical_plan_builder.go +++ b/plan/new_physical_plan_builder.go @@ -447,12 +447,12 @@ func (p *TopN) generatePhysicalPlans() []PhysicalPlan { // If this sort is a topN plan, we will try to push the sort down and leave the limit. // TODO: If this is a sort plan and the coming prop is not nil, this plan is redundant and can be removed. func (p *Sort) convert2NewPhysicalPlan(prop *requiredProp) (task, error) { - task, err := p.getTask(prop) + t, err := p.getTask(prop) if err != nil { return nil, errors.Trace(err) } - if task != nil { - return task, nil + if t != nil { + return t, nil } if prop.taskTp != rootTaskType { // TODO: This is a trick here, because an operator that can be pushed to Coprocessor can never be pushed across sort. @@ -462,11 +462,11 @@ func (p *Sort) convert2NewPhysicalPlan(prop *requiredProp) (task, error) { return invalidTask, p.storeTask(prop, invalidTask) } // enforce branch - task, err = p.children[0].(LogicalPlan).convert2NewPhysicalPlan(&requiredProp{taskTp: rootTaskType, expectedCnt: math.MaxFloat64}) + t, err = p.children[0].(LogicalPlan).convert2NewPhysicalPlan(&requiredProp{taskTp: rootTaskType, expectedCnt: math.MaxFloat64}) if err != nil { return nil, errors.Trace(err) } - task = p.attach2Task(task) + t = p.attach2Task(t) newProp, canPassProp := getPropByOrderByItems(p.ByItems) if canPassProp { newProp.expectedCnt = prop.expectedCnt @@ -474,52 +474,52 @@ func (p *Sort) convert2NewPhysicalPlan(prop *requiredProp) (task, error) { if err != nil { return nil, errors.Trace(err) } - if orderedTask.cost() < task.cost() { - task = orderedTask + if orderedTask.cost() < t.cost() { + t = orderedTask } } - task = prop.enforceProperty(task, p.ctx, p.allocator) - return task, p.storeTask(prop, task) + t = prop.enforceProperty(t, p.ctx, p.allocator) + return t, p.storeTask(prop, t) } // convert2NewPhysicalPlan implements LogicalPlan interface. func (p *baseLogicalPlan) convert2NewPhysicalPlan(prop *requiredProp) (task, error) { // look up the task map - task, err := p.getTask(prop) + t, err := p.getTask(prop) if err != nil { return nil, errors.Trace(err) } - if task != nil { - return task, nil + if t != nil { + return t, nil } - task = invalidTask + t = invalidTask if prop.taskTp != rootTaskType { // Currently all plan cannot totally push down. - return task, p.storeTask(prop, task) + return t, p.storeTask(prop, t) } // Now we only consider rootTask. if len(p.basePlan.children) == 0 { // When the children length is 0, we process it specially. - task = &rootTask{p: p.basePlan.self.(PhysicalPlan)} - task = prop.enforceProperty(task, p.basePlan.ctx, p.basePlan.allocator) - return task, p.storeTask(prop, task) + t = &rootTask{p: p.basePlan.self.(PhysicalPlan)} + t = prop.enforceProperty(t, p.basePlan.ctx, p.basePlan.allocator) + return t, p.storeTask(prop, t) } // Else we suppose it only has one child. for _, pp := range p.basePlan.self.(LogicalPlan).generatePhysicalPlans() { // We consider to add enforcer firstly. - task, err = p.getBestTask(task, prop, pp, true) + t, err = p.getBestTask(t, prop, pp, true) if err != nil { return nil, errors.Trace(err) } if prop.isEmpty() { continue } - task, err = p.getBestTask(task, prop, pp, false) + t, err = p.getBestTask(t, prop, pp, false) if err != nil { return nil, errors.Trace(err) } } - return task, p.storeTask(prop, task) + return t, p.storeTask(prop, t) } func (p *baseLogicalPlan) getBestTask(bestTask task, prop *requiredProp, pp PhysicalPlan, enforced bool) (task, error) { @@ -553,13 +553,13 @@ func tryToAddUnionScan(cop *copTask, conds []expression.Expression, ctx context. if ctx.Txn() == nil || ctx.Txn().IsReadOnly() { return cop } - task := finishCopTask(cop, ctx, allocator) + t := finishCopTask(cop, ctx, allocator) us := PhysicalUnionScan{ Conditions: conds, }.init(allocator, ctx) - us.SetSchema(task.plan().Schema()) - us.profile = task.plan().statsProfile() - return us.attach2Task(task) + us.SetSchema(t.plan().Schema()) + us.profile = t.plan().statsProfile() + return us.attach2Task(t) } // tryToGetMemTask will check if this table is a mem table. If it is, it will produce a task and store it. @@ -618,32 +618,32 @@ func (p *DataSource) tryToGetDualTask() (task, error) { // convert2NewPhysicalPlan implements the PhysicalPlan interface. // It will enumerate all the available indices and choose a plan with least cost. func (p *DataSource) convert2NewPhysicalPlan(prop *requiredProp) (task, error) { - task, err := p.getTask(prop) + t, err := p.getTask(prop) if err != nil { return nil, errors.Trace(err) } - if task != nil { - return task, nil + if t != nil { + return t, nil } - task, err = p.tryToGetDualTask() + t, err = p.tryToGetDualTask() if err != nil { return nil, errors.Trace(err) } - if task != nil { - return task, p.storeTask(prop, task) + if t != nil { + return t, p.storeTask(prop, t) } - task, err = p.tryToGetMemTask(prop) + t, err = p.tryToGetMemTask(prop) if err != nil { return nil, errors.Trace(err) } - if task != nil { - return task, p.storeTask(prop, task) + if t != nil { + return t, p.storeTask(prop, t) } // TODO: We have not checked if this table has a predicate. If not, we can only consider table scan. indices, includeTableScan := availableIndices(p.indexHints, p.tableInfo) - task = invalidTask + t = invalidTask if includeTableScan { - task, err = p.convertToTableScan(prop) + t, err = p.convertToTableScan(prop) if err != nil { return nil, errors.Trace(err) } @@ -653,11 +653,11 @@ func (p *DataSource) convert2NewPhysicalPlan(prop *requiredProp) (task, error) { if err != nil { return nil, errors.Trace(err) } - if idxTask.cost() < task.cost() { - task = idxTask + if idxTask.cost() < t.cost() { + t = idxTask } } - return task, p.storeTask(prop, task) + return t, p.storeTask(prop, t) } // convertToIndexScan converts the DataSource to index scan with idx. diff --git a/plan/optimizer.go b/plan/optimizer.go index 89a42ed8a77cb..06fa5fda22379 100644 --- a/plan/optimizer.go +++ b/plan/optimizer.go @@ -156,11 +156,11 @@ func logicalOptimize(flag uint64, logic LogicalPlan, ctx context.Context, alloc func dagPhysicalOptimize(logic LogicalPlan) (PhysicalPlan, error) { logic.preparePossibleProperties() logic.prepareStatsProfile() - task, err := logic.convert2NewPhysicalPlan(&requiredProp{taskTp: rootTaskType, expectedCnt: math.MaxFloat64}) + t, err := logic.convert2NewPhysicalPlan(&requiredProp{taskTp: rootTaskType, expectedCnt: math.MaxFloat64}) if err != nil { return nil, errors.Trace(err) } - p := task.plan() + p := t.plan() p.ResolveIndices() return p, nil } diff --git a/plan/physical_plan_builder.go b/plan/physical_plan_builder.go index 70ab8638a6c78..adf5f198d0bcc 100644 --- a/plan/physical_plan_builder.go +++ b/plan/physical_plan_builder.go @@ -65,7 +65,7 @@ func (p *DataSource) convert2TableScan(prop *requiredProperty) (*physicalPlanInf resultPlan = ts table := p.tableInfo sc := p.ctx.GetSessionVars().StmtCtx - ts.Ranges = []types.IntColumnRange{{math.MinInt64, math.MaxInt64}} + ts.Ranges = []types.IntColumnRange{{LowVal: math.MinInt64, HighVal: math.MaxInt64}} if len(p.parents) > 0 { if sel, ok := p.parents[0].(*Selection); ok { newSel := sel.Copy().(*Selection) @@ -976,7 +976,8 @@ func (p *LogicalJoin) convert2PhysicalPlan(prop *requiredProperty) (*physicalPla } } - nljInfo, err := p.convert2IndexNestedLoopJoinLeft(prop, false) + var nljInfo *physicalPlanInfo + nljInfo, err = p.convert2IndexNestedLoopJoinLeft(prop, false) if err != nil { return nil, errors.Trace(err) } @@ -999,7 +1000,8 @@ func (p *LogicalJoin) convert2PhysicalPlan(prop *requiredProperty) (*physicalPla break } } - nljInfo, err := p.convert2IndexNestedLoopJoinRight(prop, false) + var nljInfo *physicalPlanInfo + nljInfo, err = p.convert2IndexNestedLoopJoinRight(prop, false) if err != nil { return nil, errors.Trace(err) } diff --git a/plan/task.go b/plan/task.go index 1e90230908c87..c4906482fa3cc 100644 --- a/plan/task.go +++ b/plan/task.go @@ -110,8 +110,8 @@ func (t *copTask) finishIndexPlan() { } func (p *basePhysicalPlan) attach2Task(tasks ...task) task { - task := finishCopTask(tasks[0].copy(), p.basePlan.ctx, p.basePlan.allocator) - return attachPlan2Task(p.basePlan.self.(PhysicalPlan).Copy(), task) + t := finishCopTask(tasks[0].copy(), p.basePlan.ctx, p.basePlan.allocator) + return attachPlan2Task(p.basePlan.self.(PhysicalPlan).Copy(), t) } func (p *PhysicalApply) attach2Task(tasks ...task) task { @@ -270,8 +270,8 @@ func (p *Limit) attach2Task(tasks ...task) task { if tasks[0].plan() == nil { return tasks[0] } - task := tasks[0].copy() - if cop, ok := task.(*copTask); ok { + t := tasks[0].copy() + if cop, ok := t.(*copTask); ok { // If the task is copTask, the Limit can always be pushed down. // When limit be pushed down, it should remove its offset. pushedDownLimit := Limit{Count: p.Offset + p.Count}.init(p.allocator, p.ctx) @@ -282,12 +282,12 @@ func (p *Limit) attach2Task(tasks ...task) task { pushedDownLimit.SetSchema(cop.indexPlan.Schema()) } cop = attachPlan2Task(pushedDownLimit, cop).(*copTask) - task = finishCopTask(cop, p.ctx, p.allocator) + t = finishCopTask(cop, p.ctx, p.allocator) } if !p.partial { - task = attachPlan2Task(p.Copy(), task) + t = attachPlan2Task(p.Copy(), t) } - return task + return t } func (p *Sort) getCost(count float64) float64 { @@ -317,11 +317,11 @@ func (p *TopN) allColsFromSchema(schema *expression.Schema) bool { } func (p *Sort) attach2Task(tasks ...task) task { - task := tasks[0].copy() - task = finishCopTask(task, p.ctx, p.allocator) - task = attachPlan2Task(p.Copy(), task) - task.addCost(p.getCost(task.count())) - return task + t := tasks[0].copy() + t = finishCopTask(t, p.ctx, p.allocator) + t = attachPlan2Task(p.Copy(), t) + t.addCost(p.getCost(t.count())) + return t } func (p *TopN) attach2Task(tasks ...task) task { @@ -329,9 +329,9 @@ func (p *TopN) attach2Task(tasks ...task) task { if tasks[0].plan() == nil { return tasks[0] } - task := tasks[0].copy() + t := tasks[0].copy() // This is a topN plan. - if copTask, ok := task.(*copTask); ok && p.canPushDown() { + if copTask, ok := t.(*copTask); ok && p.canPushDown() { pushedDownTopN := p.Copy().(*TopN) // When topN is pushed down, it should remove its offset. pushedDownTopN.Count, pushedDownTopN.Offset = p.Count+p.Offset, 0 @@ -349,27 +349,27 @@ func (p *TopN) attach2Task(tasks ...task) task { copTask.tablePlan = pushedDownTopN pushedDownTopN.SetSchema(copTask.tablePlan.Schema()) } - copTask.addCost(pushedDownTopN.getCost(task.count())) + copTask.addCost(pushedDownTopN.getCost(t.count())) } - task = finishCopTask(task, p.ctx, p.allocator) + t = finishCopTask(t, p.ctx, p.allocator) if !p.partial { - task = attachPlan2Task(p.Copy(), task) - task.addCost(p.getCost(task.count())) + t = attachPlan2Task(p.Copy(), t) + t.addCost(p.getCost(t.count())) } - return task + return t } func (p *Projection) attach2Task(tasks ...task) task { - task := tasks[0].copy() + t := tasks[0].copy() np := p.Copy() - switch t := task.(type) { + switch tp := t.(type) { case *copTask: // TODO: Support projection push down. - task := finishCopTask(task, p.ctx, p.allocator) - task = attachPlan2Task(np, task) - return task + t = finishCopTask(t, p.ctx, p.allocator) + t = attachPlan2Task(np, t) + return t case *rootTask: - return attachPlan2Task(np, t) + return attachPlan2Task(np, tp) } return nil } @@ -388,10 +388,10 @@ func (p *Union) attach2Task(tasks ...task) task { } func (sel *Selection) attach2Task(tasks ...task) task { - task := finishCopTask(tasks[0].copy(), sel.ctx, sel.allocator) - task.addCost(task.count() * cpuFactor) - task = attachPlan2Task(sel.Copy(), task) - return task + t := finishCopTask(tasks[0].copy(), sel.ctx, sel.allocator) + t.addCost(t.count() * cpuFactor) + t = attachPlan2Task(sel.Copy(), t) + return t } func (p *PhysicalAggregation) newPartialAggregate() (partialAgg, finalAgg *PhysicalAggregation) { diff --git a/server/region_handler_test.go b/server/region_handler_test.go index 582b57a9da88c..724525ae56e96 100644 --- a/server/region_handler_test.go +++ b/server/region_handler_test.go @@ -45,9 +45,9 @@ func (ts *TidbRegionHandlerTestSuite) TestRegionIndexRange(c *C) { startKey := codec.EncodeBytes(nil, tablecodec.EncodeTableIndexPrefix(sTableID, sIndex)) endKey := codec.EncodeBytes(nil, tablecodec.GenTableRecordPrefix(eTableID)) region := &tikv.KeyLocation{ - tikv.RegionVerID{}, - startKey, - endKey, + Region: tikv.RegionVerID{}, + StartKey: startKey, + EndKey: endKey, } indexRange, err := NewRegionFrameRange(region) c.Assert(err, IsNil) @@ -70,9 +70,9 @@ func (ts *TidbRegionHandlerTestSuite) TestRegionIndexRangeWithEndNoLimit(c *C) { startKey := codec.EncodeBytes(nil, tablecodec.GenTableRecordPrefix(sTableID)) endKey := codec.EncodeBytes(nil, []byte("z_aaaaafdfd")) region := &tikv.KeyLocation{ - tikv.RegionVerID{}, - startKey, - endKey, + Region: tikv.RegionVerID{}, + StartKey: startKey, + EndKey: endKey, } indexRange, err := NewRegionFrameRange(region) c.Assert(err, IsNil) @@ -95,9 +95,9 @@ func (ts *TidbRegionHandlerTestSuite) TestRegionIndexRangeWithStartNoLimit(c *C) startKey := codec.EncodeBytes(nil, []byte("m_aaaaafdfd")) endKey := codec.EncodeBytes(nil, tablecodec.GenTableRecordPrefix(eTableID)) region := &tikv.KeyLocation{ - tikv.RegionVerID{}, - startKey, - endKey, + Region: tikv.RegionVerID{}, + StartKey: startKey, + EndKey: endKey, } indexRange, err := NewRegionFrameRange(region) c.Assert(err, IsNil) diff --git a/sessionctx/varsutil/varsutil_test.go b/sessionctx/varsutil/varsutil_test.go index 59d8eac1b95c8..c873e9f3360e4 100644 --- a/sessionctx/varsutil/varsutil_test.go +++ b/sessionctx/varsutil/varsutil_test.go @@ -118,7 +118,7 @@ func (s *testVarsutilSuite) TestVarsutil(c *C) { {"-6:00", "UTC", true, 6 * time.Hour}, } for _, tt := range tests { - err := SetSessionSystemVar(v, variable.TimeZone, types.NewStringDatum(tt.input)) + err = SetSessionSystemVar(v, variable.TimeZone, types.NewStringDatum(tt.input)) c.Assert(err, IsNil) c.Assert(v.TimeZone.String(), Equals, tt.expect) if tt.compareValue { diff --git a/statistics/ddl.go b/statistics/ddl.go index 16686b1c6c0db..856d1c2185af2 100644 --- a/statistics/ddl.go +++ b/statistics/ddl.go @@ -18,6 +18,7 @@ import ( "github.com/juju/errors" "github.com/ngaut/log" + "github.com/pingcap/tidb/ast" "github.com/pingcap/tidb/ddl" "github.com/pingcap/tidb/model" "github.com/pingcap/tidb/mysql" @@ -118,11 +119,13 @@ func (h *Handle) insertColStats2KV(tableID int64, colInfo *model.ColumnInfo) err if h.ctx.GetSessionVars().StmtCtx.AffectedRows() > 0 { exec := h.ctx.(sqlexec.SQLExecutor) // By this step we can get the count of this table, then we can sure the count and repeats of bucket. - rs, err := exec.Execute(fmt.Sprintf("select count from mysql.stats_meta where table_id = %d", tableID)) + var rs []ast.RecordSet + rs, err = exec.Execute(fmt.Sprintf("select count from mysql.stats_meta where table_id = %d", tableID)) if err != nil { return errors.Trace(err) } - row, err := rs[0].Next() + var row *ast.Row + row, err = rs[0].Next() if err != nil { return errors.Trace(err) } diff --git a/statistics/histogram.go b/statistics/histogram.go index 7020c4d59c6d1..9b47122ab8816 100644 --- a/statistics/histogram.go +++ b/statistics/histogram.go @@ -87,11 +87,13 @@ func (hg *Histogram) SaveToStorage(ctx context.Context, tableID int64, count int } else { count = bucket.Count - hg.Buckets[i-1].Count } - upperBound, err := bucket.UpperBound.ConvertTo(ctx.GetSessionVars().StmtCtx, types.NewFieldType(mysql.TypeBlob)) + var upperBound types.Datum + upperBound, err = bucket.UpperBound.ConvertTo(ctx.GetSessionVars().StmtCtx, types.NewFieldType(mysql.TypeBlob)) if err != nil { return errors.Trace(err) } - lowerBound, err := bucket.LowerBound.ConvertTo(ctx.GetSessionVars().StmtCtx, types.NewFieldType(mysql.TypeBlob)) + var lowerBound types.Datum + lowerBound, err = bucket.LowerBound.ConvertTo(ctx.GetSessionVars().StmtCtx, types.NewFieldType(mysql.TypeBlob)) if err != nil { return errors.Trace(err) } @@ -389,7 +391,8 @@ func (c *Column) getColumnRowCount(sc *variable.StatementContext, ranges []*type if cmp == 0 { // the point case. if !rg.LowExcl && !rg.HighExcl { - cnt, err := c.equalRowCount(sc, rg.Low) + var cnt float64 + cnt, err = c.equalRowCount(sc, rg.Low) if err != nil { return 0, errors.Trace(err) } diff --git a/store/tikv/2pc.go b/store/tikv/2pc.go index f404ab81e685c..865cc8071e9c0 100644 --- a/store/tikv/2pc.go +++ b/store/tikv/2pc.go @@ -580,7 +580,7 @@ func (c *twoPhaseCommitter) execute() error { return errors.Trace(err) } c.commitTS = commitTS - if err := c.checkSchemaValid(); err != nil { + if err = c.checkSchemaValid(); err != nil { return errors.Trace(err) } diff --git a/store/tikv/mock-tikv/cop_handler_dag.go b/store/tikv/mock-tikv/cop_handler_dag.go index a014ea9dd6b7b..1044846534755 100644 --- a/store/tikv/mock-tikv/cop_handler_dag.go +++ b/store/tikv/mock-tikv/cop_handler_dag.go @@ -65,7 +65,11 @@ func (h *rpcHandler) handleCopDAGRequest(req *coprocessor.Request) (*coprocessor } var chunks []tipb.Chunk for { - handle, row, err := e.Next() + var ( + handle int64 + row [][]byte + ) + handle, row, err = e.Next() if err != nil { return nil, errors.Trace(err) } @@ -185,7 +189,8 @@ func (h *rpcHandler) buildAggregation(ctx *dagContext, executor *tipb.Executor) var err error var relatedColOffsets []int for _, expr := range executor.Aggregation.AggFunc { - aggExpr, err := expression.NewDistAggFunc(expr, ctx.evalCtx.fieldTps, ctx.evalCtx.sc) + var aggExpr expression.AggregationFunction + aggExpr, err = expression.NewDistAggFunc(expr, ctx.evalCtx.fieldTps, ctx.evalCtx.sc) if err != nil { return nil, errors.Trace(err) } diff --git a/table/tables/tables.go b/table/tables/tables.go index cc3be4c473d28..e66f9881cf63f 100644 --- a/table/tables/tables.go +++ b/table/tables/tables.go @@ -239,9 +239,9 @@ func (t *Table) UpdateRecord(ctx context.Context, h int64, oldData, newData []ty if err != nil { return errors.Trace(err) } - cmp, err := oldData[col.Offset].CompareDatum(ctx.GetSessionVars().StmtCtx, value) - if err != nil { - return errors.Trace(err) + cmp, errCmp := oldData[col.Offset].CompareDatum(ctx.GetSessionVars().StmtCtx, value) + if errCmp != nil { + return errors.Trace(errCmp) } if cmp != 0 { value = oldData[col.Offset] diff --git a/util/filesort/filesort.go b/util/filesort/filesort.go index e31684f323a30..2875ce084e303 100644 --- a/util/filesort/filesort.go +++ b/util/filesort/filesort.go @@ -384,12 +384,12 @@ func (fs *FileSorter) externalSort() (*comparableRow, error) { return nil, errors.Trace(err) } if row != nil { - im := &item{ + nextIm := &item{ index: im.index, value: row, } - heap.Push(fs.rowHeap, im) + heap.Push(fs.rowHeap, nextIm) if fs.rowHeap.err != nil { return nil, errors.Trace(fs.rowHeap.err) } diff --git a/util/types/datum.go b/util/types/datum.go index b1ec6e2c615fb..96b6ec4fe2f88 100644 --- a/util/types/datum.go +++ b/util/types/datum.go @@ -1609,7 +1609,8 @@ func CoerceDatum(sc *variable.StatementContext, a, b Datum) (x, y Datum, err err case KindMysqlSet: x.SetFloat64(x.GetMysqlSet().ToNumber()) case KindMysqlDecimal: - fval, err := x.ToFloat64(sc) + var fval float64 + fval, err = x.ToFloat64(sc) if err != nil { return x, y, errors.Trace(err) } @@ -1629,7 +1630,8 @@ func CoerceDatum(sc *variable.StatementContext, a, b Datum) (x, y Datum, err err case KindMysqlSet: y.SetFloat64(y.GetMysqlSet().ToNumber()) case KindMysqlDecimal: - fval, err := y.ToFloat64(sc) + var fval float64 + fval, err = y.ToFloat64(sc) if err != nil { return x, y, errors.Trace(err) } diff --git a/util/types/datum_eval.go b/util/types/datum_eval.go index 3a77890f43650..e5fffa13f461b 100644 --- a/util/types/datum_eval.go +++ b/util/types/datum_eval.go @@ -405,7 +405,8 @@ func decimal2RoundUint(x *MyDecimal) (uint64, error) { err error ) if roundX.IsNegative() { - intX, err := roundX.ToInt() + var intX int64 + intX, err = roundX.ToInt() if err != nil && err != ErrTruncated { return 0, errors.Trace(err) } diff --git a/util/types/datum_test.go b/util/types/datum_test.go index 88cf8b3fd5d79..79ab6d68d263d 100644 --- a/util/types/datum_test.go +++ b/util/types/datum_test.go @@ -221,10 +221,12 @@ func (ts *testDatumSuite) TestToJSON(c *C) { c.Assert(err, IsNil) sd := NewStringDatum(tt.expected) - expected, err := sd.ConvertTo(sc, ft) + var expected Datum + expected, err = sd.ConvertTo(sc, ft) c.Assert(err, IsNil) - cmp, err := obtain.CompareDatum(sc, expected) + var cmp int + cmp, err = obtain.CompareDatum(sc, expected) c.Assert(err, IsNil) c.Assert(cmp, Equals, 0) } else { diff --git a/util/types/json/functions_test.go b/util/types/json/functions_test.go index 520c395885c1d..545cd7b842913 100644 --- a/util/types/json/functions_test.go +++ b/util/types/json/functions_test.go @@ -215,7 +215,8 @@ func (s *testJSONSuite) TestJSONModify(c *C) { obtain, err := base.Modify([]PathExpression{pathExpr}, []JSON{value}, tt.mt) if tt.success { c.Assert(err, IsNil) - cmp, err := CompareJSON(obtain, expected) + var cmp int + cmp, err = CompareJSON(obtain, expected) c.Assert(err, IsNil) c.Assert(cmp, Equals, 0) } else { diff --git a/util/types/json/normalize.go b/util/types/json/normalize.go index c39794e4806e0..8e90cad60cb68 100644 --- a/util/types/json/normalize.go +++ b/util/types/json/normalize.go @@ -44,7 +44,7 @@ func normalize(in interface{}) (j JSON, err error) { j.typeCode = typeCodeFloat64 *(*float64)(unsafe.Pointer(&j.i64)) = t case json.Number: - if i64, err := t.Int64(); err == nil { + if i64, errTp := t.Int64(); errTp == nil { j.typeCode = typeCodeInt64 j.i64 = i64 } else { @@ -75,7 +75,8 @@ func normalize(in interface{}) (j JSON, err error) { j.typeCode = typeCodeArray j.array = make([]JSON, 0, len(t)) for _, elem := range t { - elem1, err := normalize(elem) + var elem1 JSON + elem1, err = normalize(elem) if err != nil { return j, err } From 2c4a46e2f0b1b7000519d83778444553db2fd5c2 Mon Sep 17 00:00:00 2001 From: Jian Zhang Date: Mon, 31 Jul 2017 14:28:04 +0800 Subject: [PATCH 07/10] statistics: return 1 when statistics is not ready and row count of table is 0 (#3952) --- plan/cbo_test.go | 4 ++++ statistics/selectivity.go | 2 +- 2 files changed, 5 insertions(+), 1 deletion(-) diff --git a/plan/cbo_test.go b/plan/cbo_test.go index 44f690d84a0ea..32e40a9df039a 100644 --- a/plan/cbo_test.go +++ b/plan/cbo_test.go @@ -147,6 +147,10 @@ func (s *testAnalyzeSuite) TestEmptyTable(c *C) { sql: "select * from t, t1 where t.c1 = t1.c1", best: "LeftHashJoin{TableReader(Table(t))->TableReader(Table(t1))}(test.t.c1,test.t1.c1)", }, + { + sql: "select * from t limit 0", + best: "TableReader(Table(t)->Limit)->Limit", + }, } for _, tt := range tests { ctx := testKit.Se.(context.Context) diff --git a/statistics/selectivity.go b/statistics/selectivity.go index 70f66a468720a..abd0f1eafc608 100644 --- a/statistics/selectivity.go +++ b/statistics/selectivity.go @@ -84,7 +84,7 @@ func pseudoSelectivity(exprs []expression.Expression) float64 { // Currently the time complexity is o(n^2). func (t *Table) Selectivity(ctx context.Context, exprs []expression.Expression) (float64, error) { if t.Count == 0 { - return 0, nil + return 1, nil } // TODO: If len(exprs) is bigger than 63, we could use bitset structure to replace the int64. // This will simplify some code and speed up if we use this rather than a boolean slice. From f283948ae904fc5bd690e63aa4e4ddd436e97cd9 Mon Sep 17 00:00:00 2001 From: Han Fei Date: Mon, 31 Jul 2017 15:49:14 +0800 Subject: [PATCH 08/10] plan: fix the order of schema columns (#3946) --- executor/join_test.go | 1 + plan/new_physical_plan_builder.go | 1 + plan/optimizer.go | 1 + plan/physical_plans.go | 42 +++++++++++++++++++++++++++++++ 4 files changed, 45 insertions(+) diff --git a/executor/join_test.go b/executor/join_test.go index 9e809de99ddd6..03937228ed130 100644 --- a/executor/join_test.go +++ b/executor/join_test.go @@ -226,6 +226,7 @@ func (s *testSuite) TestJoin(c *C) { tk.MustExec("insert into t values(1, 3), (2, 2), (3, 1)") tk.MustExec("insert into t1 values(1, 2), (1, 3), (3, 4)") tk.MustQuery("select /*+ TIDB_INLJ(t) */ * from t join t1 on t.a=t1.a order by t.b").Check(testkit.Rows("3 1 3 4", "1 3 1 2", "1 3 1 3")) + tk.MustQuery("select /*+ TIDB_INLJ(t1) */ t.a, t.b from t join t1 on t.a=t1.a where t1.b = 4 limit 1").Check(testkit.Rows("3 1")) } diff --git a/plan/new_physical_plan_builder.go b/plan/new_physical_plan_builder.go index 466bbb2b7ff36..2bdf1003eba84 100644 --- a/plan/new_physical_plan_builder.go +++ b/plan/new_physical_plan_builder.go @@ -124,6 +124,7 @@ func (p *LogicalJoin) constructIndexJoin(innerJoinKeys, outerJoinKeys []*express DefaultValues: p.DefaultValues, outerSchema: p.children[outerIdx].Schema(), }.init(p.allocator, p.ctx, p.children[outerIdx], p.children[1-outerIdx]) + join.SetSchema(expression.MergeSchema(p.children[outerIdx].Schema(), p.children[1-outerIdx].Schema())) join.profile = p.profile orderJoin := join.Copy().(*PhysicalIndexJoin) orderJoin.KeepOrder = true diff --git a/plan/optimizer.go b/plan/optimizer.go index 06fa5fda22379..d608096bb9b5f 100644 --- a/plan/optimizer.go +++ b/plan/optimizer.go @@ -161,6 +161,7 @@ func dagPhysicalOptimize(logic LogicalPlan) (PhysicalPlan, error) { return nil, errors.Trace(err) } p := t.plan() + rebuildSchema(p) p.ResolveIndices() return p, nil } diff --git a/plan/physical_plans.go b/plan/physical_plans.go index 5ab906ffdb527..f30139baecbcb 100644 --- a/plan/physical_plans.go +++ b/plan/physical_plans.go @@ -1134,3 +1134,45 @@ func (p *Cache) Copy() PhysicalPlan { np.basePhysicalPlan = newBasePhysicalPlan(np.basePlan) return &np } + +func buildSchema(p PhysicalPlan) { + switch x := p.(type) { + case *Limit, *TopN, *Sort, *Selection, *MaxOneRow, *SelectLock: + p.SetSchema(p.Children()[0].Schema()) + case *PhysicalHashJoin, *PhysicalMergeJoin, *PhysicalIndexJoin: + p.SetSchema(expression.MergeSchema(p.Children()[0].Schema(), p.Children()[1].Schema())) + case *PhysicalApply: + buildSchema(x.PhysicalJoin) + x.schema = x.PhysicalJoin.Schema() + case *PhysicalHashSemiJoin: + if x.WithAux { + auxCol := x.schema.Columns[x.Schema().Len()-1] + x.SetSchema(x.children[0].Schema().Clone()) + x.schema.Append(auxCol) + } else { + x.SetSchema(x.children[0].Schema().Clone()) + } + case *Union: + panic("Union shouldn't rebuild schema") + } +} + +// rebuildSchema rebuilds the schema for physical plans, because new planner may change indexjoin's schema. +func rebuildSchema(p PhysicalPlan) bool { + need2Rebuild := false + for _, ch := range p.Children() { + need2Rebuild = need2Rebuild || rebuildSchema(ch.(PhysicalPlan)) + } + if need2Rebuild { + buildSchema(p) + } + switch x := p.(type) { + case *PhysicalIndexJoin: + if x.outerIndex == 1 { + need2Rebuild = true + } + case *Projection, *PhysicalAggregation: + need2Rebuild = false + } + return need2Rebuild +} From c8d31054b77476e80425ba0a644f85580a150f02 Mon Sep 17 00:00:00 2001 From: Han Fei Date: Mon, 31 Jul 2017 19:39:28 +0800 Subject: [PATCH 09/10] explain: fix a panic for explain. (#3955) --- plan/explain.go | 15 +-------------- 1 file changed, 1 insertion(+), 14 deletions(-) diff --git a/plan/explain.go b/plan/explain.go index 7fc1b1777140d..8baa31b9b6d17 100644 --- a/plan/explain.go +++ b/plan/explain.go @@ -166,7 +166,7 @@ func (p *PhysicalHashJoin) ExplainInfo() string { // ExplainInfo implements PhysicalPlan interface. func (p *PhysicalHashSemiJoin) ExplainInfo() string { - buffer := bytes.NewBufferString(fmt.Sprintf("right:%s", p.Children()[p.rightChOffset].ID())) + buffer := bytes.NewBufferString(fmt.Sprintf("right:%s", p.Children()[1].ID())) if p.WithAux { buffer.WriteString(", aux") } @@ -207,19 +207,6 @@ func (p *PhysicalMergeJoin) ExplainInfo() string { buffer.WriteString(fmt.Sprintf(", other cond:%s", expression.ExplainExpressionList(p.OtherConditions))) } - if len(p.DefaultValues) > 0 { - buffer.WriteString("default vals:") - for i, val := range p.DefaultValues { - str, err := val.ToString() - if err != nil { - str = err.Error() - } - buffer.WriteString(str) - if i+1 < len(p.DefaultValues) { - buffer.WriteString(", ") - } - } - } if p.Desc { buffer.WriteString("desc") } else { From 58dca67d6e576a50d45ce0dbd348ae485128e015 Mon Sep 17 00:00:00 2001 From: Han Fei Date: Mon, 31 Jul 2017 21:13:42 +0800 Subject: [PATCH 10/10] *: fix close problem for index look up executor. (#3957) --- executor/new_distsql.go | 4 ++++ util/testkit/testkit.go | 2 ++ 2 files changed, 6 insertions(+) diff --git a/executor/new_distsql.go b/executor/new_distsql.go index f07619303f2a1..16a889f64984d 100644 --- a/executor/new_distsql.go +++ b/executor/new_distsql.go @@ -447,6 +447,10 @@ func (e *IndexLookUpExecutor) Schema() *expression.Schema { // Close implements Exec Close interface. func (e *IndexLookUpExecutor) Close() error { + // If this executor is closed once, we should not close it second time. + if e.taskChan == nil { + return nil + } // TODO: It's better to notify fetchHandles to close instead of fetching all index handle. // Consume the task channel in case channel is full. for range e.taskChan { diff --git a/util/testkit/testkit.go b/util/testkit/testkit.go index 4ccb01e15d97b..bda632da145bc 100644 --- a/util/testkit/testkit.go +++ b/util/testkit/testkit.go @@ -138,6 +138,8 @@ func (tk *TestKit) MustQuery(sql string, args ...interface{}) *Result { tk.c.Assert(rs, check.NotNil, comment) rows, err := tidb.GetRows(rs) tk.c.Assert(errors.ErrorStack(err), check.Equals, "", comment) + err = rs.Close() + tk.c.Assert(errors.ErrorStack(err), check.Equals, "", comment) sRows := make([][]string, len(rows)) for i := range rows { row := rows[i]