diff --git a/ddl/column.go b/ddl/column.go index bc44ed8529ae7..d8e70a42d50c8 100644 --- a/ddl/column.go +++ b/ddl/column.go @@ -1065,7 +1065,10 @@ func (w *worker) doModifyColumnTypeWithData( func() { addIndexErr = errCancelledDDLJob.GenWithStack("modify table `%v` column `%v` panic", tblInfo.Name, oldCol.Name) }, false) - return w.updateColumnAndIndexes(tbl, oldCol, changingCol, changingIdxs, reorgInfo) + // Use old column name to generate less confusing error messages. + changingColCpy := changingCol.Clone() + changingColCpy.Name = oldCol.Name + return w.updateColumnAndIndexes(tbl, oldCol, changingColCpy, changingIdxs, reorgInfo) }) if err != nil { if errWaitReorgTimeout.Equal(err) { diff --git a/ddl/column_type_change_test.go b/ddl/column_type_change_test.go index 57a4e3d167a21..bf205872ee166 100644 --- a/ddl/column_type_change_test.go +++ b/ddl/column_type_change_test.go @@ -2192,3 +2192,15 @@ func (s *testColumnTypeChangeSuite) TestChangeFromUnsignedIntToTime(c *C) { tk.MustQuery("select a from t;").Check(testkit.Rows("18:08:57")) tk.MustExec("drop table if exists t;") } + +// See https://github.com/pingcap/tidb/issues/25287. +func (s *testColumnTypeChangeSuite) TestChangeFromBitToStringInvalidUtf8ErrMsg(c *C) { + tk := testkit.NewTestKit(c, s.store) + tk.MustExec("use test;") + + tk.MustExec("drop table if exists t;") + tk.MustExec("create table t (a bit(45));") + tk.MustExec("insert into t values (1174717);") + errMsg := "[table:1366]Incorrect string value '\\xEC\\xBD' for column 'a'" + tk.MustGetErrMsg("alter table t modify column a varchar(31) collate utf8mb4_general_ci;", errMsg) +} diff --git a/expression/aggregation/base_func.go b/expression/aggregation/base_func.go index eef2c144361ea..731bc265736c7 100644 --- a/expression/aggregation/base_func.go +++ b/expression/aggregation/base_func.go @@ -434,7 +434,7 @@ func (a *baseFuncDesc) WrapCastForAggArgs(ctx sessionctx.Context) { if col, ok := a.Args[i].(*expression.Column); ok { col.RetType = types.NewFieldType(col.RetType.Tp) } - // originTp is used when the the `Tp` of column is TypeFloat32 while + // originTp is used when the `Tp` of column is TypeFloat32 while // the type of the aggregation function is TypeFloat64. originTp := a.Args[i].GetType().Tp *(a.Args[i].GetType()) = *(a.RetTp) diff --git a/expression/constant_propagation.go b/expression/constant_propagation.go index 19906023b07be..1c691b6800538 100644 --- a/expression/constant_propagation.go +++ b/expression/constant_propagation.go @@ -147,7 +147,7 @@ func tryToReplaceCond(ctx sessionctx.Context, src *Column, tgt *Column, cond Exp sf.FuncName.L == ast.If || sf.FuncName.L == ast.Case || sf.FuncName.L == ast.NullEQ) { - return false, false, cond + return false, true, cond } for idx, expr := range sf.GetArgs() { if src.Equal(nil, expr) { diff --git a/expression/integration_test.go b/expression/integration_test.go index 6ebf5e35d1b52..0e04425697122 100644 --- a/expression/integration_test.go +++ b/expression/integration_test.go @@ -10173,3 +10173,21 @@ func (s *testIntegrationSuite) TestIssue26958(c *C) { tk.MustQuery("select \n(select count(distinct c_int) from t2 where c_int >= t1.c_int) c1, \n(select count(distinct c_int) from t2 where c_int >= t1.c_int) c2\nfrom t1 group by c_int;\n"). Check(testkit.Rows("3 3", "2 2", "1 1")) } + +func (s *testIntegrationSuite) TestConstPropNullFunctions(c *C) { + tk := testkit.NewTestKit(c, s.store) + tk.MustExec("use test") + tk.MustExec("drop table if exists t1, t2") + tk.MustExec("create table t1 (a integer)") + tk.MustExec("insert into t1 values (0), (1), (2), (3)") + tk.MustExec("create table t2 (a integer, b integer)") + tk.MustExec("insert into t2 values (0,1), (1,1), (2,1), (3,1)") + tk.MustQuery("select t1.* from t1 left join t2 on t2.a = t1.a where t1.a = ifnull(t2.b, 0)").Check(testkit.Rows("1")) + + tk.MustExec("drop table if exists t1, t2") + tk.MustExec("create table t1 (i1 integer, c1 char)") + tk.MustExec("insert into t1 values (2, 'a'), (1, 'b'), (3, 'c'), (0, null);") + tk.MustExec("create table t2 (i2 integer, c2 char, f2 float)") + tk.MustExec("insert into t2 values (0, 'c', null), (1, null, 0.1), (3, 'b', 0.01), (2, 'q', 0.12), (null, 'a', -0.1), (null, null, null)") + tk.MustQuery("select * from t2 where t2.i2=((select count(1) from t1 where t1.i1=t2.i2))").Check(testkit.Rows("1 0.1")) +} diff --git a/meta/main_test.go b/meta/main_test.go new file mode 100644 index 0000000000000..e6f34f4df31da --- /dev/null +++ b/meta/main_test.go @@ -0,0 +1,30 @@ +// Copyright 2021 PingCAP, Inc. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// See the License for the specific language governing permissions and +// limitations under the License. + +package meta + +import ( + "testing" + + "github.com/pingcap/tidb/util/testbridge" + "go.uber.org/goleak" +) + +func TestMain(m *testing.M) { + testbridge.WorkaroundGoCheckFlags() + opts := []goleak.Option{ + goleak.IgnoreTopFunction("go.opencensus.io/stats/view.(*worker).start"), + goleak.IgnoreTopFunction("go.etcd.io/etcd/pkg/logutil.(*MergeLogger).outputLoop"), + } + goleak.VerifyTestMain(m, opts...) +} diff --git a/meta/meta_test.go b/meta/meta_test.go index 4ba54f1935a3a..341b15981188e 100644 --- a/meta/meta_test.go +++ b/meta/meta_test.go @@ -15,172 +15,163 @@ package meta_test import ( "context" + "fmt" "math" "strconv" "sync" "testing" "time" - . "github.com/pingcap/check" "github.com/pingcap/errors" "github.com/pingcap/parser/model" "github.com/pingcap/tidb/kv" "github.com/pingcap/tidb/meta" "github.com/pingcap/tidb/store/mockstore" - "github.com/pingcap/tidb/util/testleak" - . "github.com/pingcap/tidb/util/testutil" + "github.com/pingcap/tidb/testkit" + "github.com/stretchr/testify/require" ) -func TestT(t *testing.T) { - CustomVerboseFlag = true - TestingT(t) -} - -var _ = Suite(&testSuite{}) +func TestMeta(t *testing.T) { + t.Parallel() -type testSuite struct { - CommonHandleSuite -} - -func (s *testSuite) TestMeta(c *C) { - defer testleak.AfterTest(c)() store, err := mockstore.NewMockStore() - c.Assert(err, IsNil) + require.NoError(t, err) + defer func() { err := store.Close() - c.Assert(err, IsNil) + require.NoError(t, err) }() txn, err := store.Begin() - c.Assert(err, IsNil) + require.NoError(t, err) - t := meta.NewMeta(txn) + m := meta.NewMeta(txn) - n, err := t.GenGlobalID() - c.Assert(err, IsNil) - c.Assert(n, Equals, int64(1)) + n, err := m.GenGlobalID() + require.NoError(t, err) + require.Equal(t, int64(1), n) - n, err = t.GetGlobalID() - c.Assert(err, IsNil) - c.Assert(n, Equals, int64(1)) + n, err = m.GetGlobalID() + require.NoError(t, err) + require.Equal(t, int64(1), n) var wg sync.WaitGroup wg.Add(1) go func() { defer wg.Done() - ids, err := t.GenGlobalIDs(3) - c.Assert(err, IsNil) - anyMatch(c, ids, []int64{2, 3, 4}, []int64{6, 7, 8}) + ids, err := m.GenGlobalIDs(3) + require.NoError(t, err) + anyMatch(t, ids, []int64{2, 3, 4}, []int64{6, 7, 8}) }() wg.Add(1) go func() { defer wg.Done() - ids, err := t.GenGlobalIDs(4) - c.Assert(err, IsNil) - anyMatch(c, ids, []int64{5, 6, 7, 8}, []int64{2, 3, 4, 5}) + ids, err := m.GenGlobalIDs(4) + require.NoError(t, err) + anyMatch(t, ids, []int64{5, 6, 7, 8}, []int64{2, 3, 4, 5}) }() wg.Wait() - n, err = t.GetSchemaVersion() - c.Assert(err, IsNil) - c.Assert(n, Equals, int64(0)) + n, err = m.GetSchemaVersion() + require.NoError(t, err) + require.Equal(t, int64(0), n) - n, err = t.GenSchemaVersion() - c.Assert(err, IsNil) - c.Assert(n, Equals, int64(1)) + n, err = m.GenSchemaVersion() + require.NoError(t, err) + require.Equal(t, int64(1), n) - n, err = t.GetSchemaVersion() - c.Assert(err, IsNil) - c.Assert(n, Equals, int64(1)) + n, err = m.GetSchemaVersion() + require.NoError(t, err) + require.Equal(t, int64(1), n) dbInfo := &model.DBInfo{ ID: 1, Name: model.NewCIStr("a"), } - err = t.CreateDatabase(dbInfo) - c.Assert(err, IsNil) + err = m.CreateDatabase(dbInfo) + require.NoError(t, err) - err = t.CreateDatabase(dbInfo) - c.Assert(err, NotNil) - c.Assert(meta.ErrDBExists.Equal(err), IsTrue) + err = m.CreateDatabase(dbInfo) + require.NotNil(t, err) + require.True(t, meta.ErrDBExists.Equal(err)) - v, err := t.GetDatabase(1) - c.Assert(err, IsNil) - c.Assert(v, DeepEquals, dbInfo) + v, err := m.GetDatabase(1) + require.NoError(t, err) + require.Equal(t, dbInfo, v) dbInfo.Name = model.NewCIStr("aa") - err = t.UpdateDatabase(dbInfo) - c.Assert(err, IsNil) + err = m.UpdateDatabase(dbInfo) + require.NoError(t, err) - v, err = t.GetDatabase(1) - c.Assert(err, IsNil) - c.Assert(v, DeepEquals, dbInfo) + v, err = m.GetDatabase(1) + require.NoError(t, err) + require.Equal(t, dbInfo, v) - dbs, err := t.ListDatabases() - c.Assert(err, IsNil) - c.Assert(dbs, DeepEquals, []*model.DBInfo{dbInfo}) + dbs, err := m.ListDatabases() + require.NoError(t, err) + require.Equal(t, []*model.DBInfo{dbInfo}, dbs) tbInfo := &model.TableInfo{ ID: 1, Name: model.NewCIStr("t"), } - err = t.CreateTableOrView(1, tbInfo) - c.Assert(err, IsNil) + err = m.CreateTableOrView(1, tbInfo) + require.NoError(t, err) - n, err = t.GenAutoTableID(1, 1, 10) - c.Assert(err, IsNil) - c.Assert(n, Equals, int64(10)) + n, err = m.GenAutoTableID(1, 1, 10) + require.NoError(t, err) + require.Equal(t, int64(10), n) - n, err = t.GetAutoTableID(1, 1) - c.Assert(err, IsNil) - c.Assert(n, Equals, int64(10)) + n, err = m.GetAutoTableID(1, 1) + require.NoError(t, err) + require.Equal(t, int64(10), n) - err = t.CreateTableOrView(1, tbInfo) - c.Assert(err, NotNil) - c.Assert(meta.ErrTableExists.Equal(err), IsTrue) + err = m.CreateTableOrView(1, tbInfo) + require.NotNil(t, err) + require.True(t, meta.ErrTableExists.Equal(err)) tbInfo.Name = model.NewCIStr("tt") - err = t.UpdateTable(1, tbInfo) - c.Assert(err, IsNil) + err = m.UpdateTable(1, tbInfo) + require.NoError(t, err) - table, err := t.GetTable(1, 1) - c.Assert(err, IsNil) - c.Assert(table, DeepEquals, tbInfo) + table, err := m.GetTable(1, 1) + require.NoError(t, err) + require.Equal(t, tbInfo, table) - table, err = t.GetTable(1, 2) - c.Assert(err, IsNil) - c.Assert(table, IsNil) + table, err = m.GetTable(1, 2) + require.NoError(t, err) + require.Nil(t, table) tbInfo2 := &model.TableInfo{ ID: 2, Name: model.NewCIStr("bb"), } - err = t.CreateTableOrView(1, tbInfo2) - c.Assert(err, IsNil) + err = m.CreateTableOrView(1, tbInfo2) + require.NoError(t, err) - tables, err := t.ListTables(1) - c.Assert(err, IsNil) - c.Assert(tables, DeepEquals, []*model.TableInfo{tbInfo, tbInfo2}) + tables, err := m.ListTables(1) + require.NoError(t, err) + require.Equal(t, []*model.TableInfo{tbInfo, tbInfo2}, tables) // Generate an auto id. - n, err = t.GenAutoTableID(1, 2, 10) - c.Assert(err, IsNil) - c.Assert(n, Equals, int64(10)) + n, err = m.GenAutoTableID(1, 2, 10) + require.NoError(t, err) + require.Equal(t, int64(10), n) // Make sure the auto id key-value entry is there. - n, err = t.GetAutoTableID(1, 2) - c.Assert(err, IsNil) - c.Assert(n, Equals, int64(10)) + n, err = m.GetAutoTableID(1, 2) + require.NoError(t, err) + require.Equal(t, int64(10), n) - err = t.DropTableOrView(1, tbInfo2.ID, true) - c.Assert(err, IsNil) + err = m.DropTableOrView(1, tbInfo2.ID, true) + require.NoError(t, err) // Make sure auto id key-value entry is gone. - n, err = t.GetAutoTableID(1, 2) - c.Assert(err, IsNil) - c.Assert(n, Equals, int64(0)) + n, err = m.GetAutoTableID(1, 2) + require.NoError(t, err) + require.Equal(t, int64(0), n) - tables, err = t.ListTables(1) - c.Assert(err, IsNil) - c.Assert(tables, DeepEquals, []*model.TableInfo{tbInfo}) + tables, err = m.ListTables(1) + require.NoError(t, err) + require.Equal(t, []*model.TableInfo{tbInfo}, tables) // Test case for drop a table without delete auto id key-value entry. tid := int64(100) @@ -189,66 +180,67 @@ func (s *testSuite) TestMeta(c *C) { Name: model.NewCIStr("t_rename"), } // Create table. - err = t.CreateTableOrView(1, tbInfo100) - c.Assert(err, IsNil) + err = m.CreateTableOrView(1, tbInfo100) + require.NoError(t, err) // Update auto ID. currentDBID := int64(1) - n, err = t.GenAutoTableID(currentDBID, tid, 10) - c.Assert(err, IsNil) - c.Assert(n, Equals, int64(10)) + n, err = m.GenAutoTableID(currentDBID, tid, 10) + require.NoError(t, err) + require.Equal(t, int64(10), n) // Fail to update auto ID. // The table ID doesn't exist. nonExistentID := int64(1234) - _, err = t.GenAutoTableID(currentDBID, nonExistentID, 10) - c.Assert(err, NotNil) - c.Assert(meta.ErrTableNotExists.Equal(err), IsTrue) + _, err = m.GenAutoTableID(currentDBID, nonExistentID, 10) + require.NotNil(t, err) + require.True(t, meta.ErrTableNotExists.Equal(err)) // Fail to update auto ID. // The current database ID doesn't exist. currentDBID = nonExistentID - _, err = t.GenAutoTableID(currentDBID, tid, 10) - c.Assert(err, NotNil) - c.Assert(meta.ErrDBNotExists.Equal(err), IsTrue) + _, err = m.GenAutoTableID(currentDBID, tid, 10) + require.NotNil(t, err) + require.True(t, meta.ErrDBNotExists.Equal(err)) // Test case for CreateTableAndSetAutoID. tbInfo3 := &model.TableInfo{ ID: 3, Name: model.NewCIStr("tbl3"), } - err = t.CreateTableAndSetAutoID(1, tbInfo3, 123, 0) - c.Assert(err, IsNil) - id, err := t.GetAutoTableID(1, tbInfo3.ID) - c.Assert(err, IsNil) - c.Assert(id, Equals, int64(123)) + err = m.CreateTableAndSetAutoID(1, tbInfo3, 123, 0) + require.NoError(t, err) + id, err := m.GetAutoTableID(1, tbInfo3.ID) + require.NoError(t, err) + require.Equal(t, int64(123), id) // Test case for GenAutoTableIDKeyValue. - key, val := t.GenAutoTableIDKeyValue(1, tbInfo3.ID, 1234) - c.Assert(val, DeepEquals, []byte(strconv.FormatInt(1234, 10))) - c.Assert(key, DeepEquals, []byte{0x6d, 0x44, 0x42, 0x3a, 0x31, 0x0, 0x0, 0x0, 0x0, 0xfb, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x68, 0x54, 0x49, 0x44, 0x3a, 0x33, 0x0, 0x0, 0x0, 0xfc}) + key, val := m.GenAutoTableIDKeyValue(1, tbInfo3.ID, 1234) + require.Equal(t, []byte(strconv.FormatInt(1234, 10)), val) + require.Equal(t, []byte{0x6d, 0x44, 0x42, 0x3a, 0x31, 0x0, 0x0, 0x0, 0x0, 0xfb, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x68, 0x54, 0x49, 0x44, 0x3a, 0x33, 0x0, 0x0, 0x0, 0xfc}, key) - err = t.DropDatabase(1) - c.Assert(err, IsNil) - err = t.DropDatabase(currentDBID) - c.Assert(err, IsNil) + err = m.DropDatabase(1) + require.NoError(t, err) + err = m.DropDatabase(currentDBID) + require.NoError(t, err) - dbs, err = t.ListDatabases() - c.Assert(err, IsNil) - c.Assert(dbs, HasLen, 0) + dbs, err = m.ListDatabases() + require.NoError(t, err) + require.Len(t, dbs, 0) - bootstrapVer, err := t.GetBootstrapVersion() - c.Assert(err, IsNil) - c.Assert(bootstrapVer, Equals, int64(0)) + bootstrapVer, err := m.GetBootstrapVersion() + require.NoError(t, err) + require.Equal(t, int64(0), bootstrapVer) - err = t.FinishBootstrap(int64(1)) - c.Assert(err, IsNil) + err = m.FinishBootstrap(int64(1)) + require.NoError(t, err) - bootstrapVer, err = t.GetBootstrapVersion() - c.Assert(err, IsNil) - c.Assert(bootstrapVer, Equals, int64(1)) + bootstrapVer, err = m.GetBootstrapVersion() + require.NoError(t, err) + require.Equal(t, int64(1), bootstrapVer) // Test case for meta.FinishBootstrap with a version. - err = t.FinishBootstrap(int64(10)) - c.Assert(err, IsNil) - bootstrapVer, err = t.GetBootstrapVersion() - c.Assert(err, IsNil) - c.Assert(bootstrapVer, Equals, int64(10)) + err = m.FinishBootstrap(int64(10)) + require.NoError(t, err) + bootstrapVer, err = m.GetBootstrapVersion() + require.NoError(t, err) + + require.Equal(t, int64(10), bootstrapVer) // Test case for SchemaDiff. schemaDiff := &model.SchemaDiff{ @@ -258,68 +250,69 @@ func (s *testSuite) TestMeta(c *C) { TableID: 2, OldTableID: 3, } - err = t.SetSchemaDiff(schemaDiff) - c.Assert(err, IsNil) - readDiff, err := t.GetSchemaDiff(schemaDiff.Version) - c.Assert(err, IsNil) - c.Assert(readDiff, DeepEquals, schemaDiff) + err = m.SetSchemaDiff(schemaDiff) + require.NoError(t, err) + readDiff, err := m.GetSchemaDiff(schemaDiff.Version) + require.NoError(t, err) + require.Equal(t, schemaDiff, readDiff) err = txn.Commit(context.Background()) - c.Assert(err, IsNil) + require.NoError(t, err) // Test for DDLJobHistoryKey. - key = meta.DDLJobHistoryKey(t, 888) - c.Assert(key, DeepEquals, []byte{0x6d, 0x44, 0x44, 0x4c, 0x4a, 0x6f, 0x62, 0x48, 0x69, 0xff, 0x73, 0x74, 0x6f, 0x72, 0x79, 0x0, 0x0, 0x0, 0xfc, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x68, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x3, 0x78, 0xff, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0xf7}) + key = meta.DDLJobHistoryKey(m, 888) + require.Equal(t, []byte{0x6d, 0x44, 0x44, 0x4c, 0x4a, 0x6f, 0x62, 0x48, 0x69, 0xff, 0x73, 0x74, 0x6f, 0x72, 0x79, 0x0, 0x0, 0x0, 0xfc, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x68, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x3, 0x78, 0xff, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0xf7}, key) } -func (s *testSuite) TestSnapshot(c *C) { - defer testleak.AfterTest(c)() +func TestSnapshot(t *testing.T) { + t.Parallel() store, err := mockstore.NewMockStore() - c.Assert(err, IsNil) + require.NoError(t, err) defer func() { err := store.Close() - c.Assert(err, IsNil) + require.NoError(t, err) }() txn, _ := store.Begin() m := meta.NewMeta(txn) _, err = m.GenGlobalID() - c.Assert(err, IsNil) + require.NoError(t, err) n, _ := m.GetGlobalID() - c.Assert(n, Equals, int64(1)) + require.Equal(t, int64(1), n) err = txn.Commit(context.Background()) - c.Assert(err, IsNil) + require.NoError(t, err) ver1, _ := store.CurrentVersion(kv.GlobalTxnScope) time.Sleep(time.Millisecond) txn, _ = store.Begin() m = meta.NewMeta(txn) _, err = m.GenGlobalID() - c.Assert(err, IsNil) + require.NoError(t, err) n, _ = m.GetGlobalID() - c.Assert(n, Equals, int64(2)) + require.Equal(t, int64(2), n) err = txn.Commit(context.Background()) - c.Assert(err, IsNil) + require.NoError(t, err) snapshot := store.GetSnapshot(ver1) snapMeta := meta.NewSnapshotMeta(snapshot) n, _ = snapMeta.GetGlobalID() - c.Assert(n, Equals, int64(1)) + require.Equal(t, int64(1), n) _, err = snapMeta.GenGlobalID() - c.Assert(err, NotNil) - c.Assert(err.Error(), Equals, "[structure:8220]write on snapshot") + require.NotNil(t, err) + require.Equal(t, "[structure:8220]write on snapshot", err.Error()) } -func (s *testSuite) TestElement(c *C) { +func TestElement(t *testing.T) { + t.Parallel() checkElement := func(key []byte, resErr error) { e := &meta.Element{ID: 123, TypeKey: key} eBytes := e.EncodeElement() resE, err := meta.DecodeElement(eBytes) if resErr == nil { - c.Assert(err, Equals, resErr) - c.Assert(e, DeepEquals, resE) + require.NoError(t, err) + require.Equal(t, resE, e) } else { - c.Assert(err.Error(), Equals, resErr.Error()) + require.EqualError(t, err, resErr.Error()) } } key := []byte("_col") @@ -330,248 +323,285 @@ func (s *testSuite) TestElement(c *C) { checkElement(key, errors.Errorf("invalid encoded element key prefix %q", key[:5])) _, err := meta.DecodeElement([]byte("_col")) - c.Assert(err.Error(), Equals, `invalid encoded element "_col" length 4`) + require.EqualError(t, err, `invalid encoded element "_col" length 4`) _, err = meta.DecodeElement(meta.ColumnElementKey) - c.Assert(err.Error(), Equals, `invalid encoded element "_col_" length 5`) + require.EqualError(t, err, `invalid encoded element "_col_" length 5`) } -func (s *testSuite) TestDDL(c *C) { - defer testleak.AfterTest(c)() - store, err := mockstore.NewMockStore() - c.Assert(err, IsNil) - defer func() { - err := store.Close() - c.Assert(err, IsNil) - }() - - txn, err := store.Begin() - c.Assert(err, IsNil) +func TestDDL(t *testing.T) { + t.Parallel() + + testCases := []struct { + desc string + startHandle kv.Handle + endHandle kv.Handle + }{ + { + "kv.IntHandle", + kv.IntHandle(1), + kv.IntHandle(2), + }, + { + "kv.CommonHandle", + testkit.MustNewCommonHandle(t, "abc", 1222, "string"), + testkit.MustNewCommonHandle(t, "dddd", 1222, "string"), + }, + } - t := meta.NewMeta(txn) + for _, tc := range testCases { + t.Run(tc.desc, func(t *testing.T) { + t.Parallel() + store, err := mockstore.NewMockStore() + require.NoError(t, err) + defer func() { + err := store.Close() + require.NoError(t, err) + }() + + txn, err := store.Begin() + require.NoError(t, err) + + m := meta.NewMeta(txn) + + job := &model.Job{ID: 1} + err = m.EnQueueDDLJob(job) + require.NoError(t, err) + n, err := m.DDLJobQueueLen() + require.NoError(t, err) + require.Equal(t, int64(1), n) + + v, err := m.GetDDLJobByIdx(0) + require.NoError(t, err) + require.Equal(t, job, v) + v, err = m.GetDDLJobByIdx(1) + require.NoError(t, err) + require.Nil(t, v) + + job.ID = 2 + err = m.UpdateDDLJob(0, job, true) + require.NoError(t, err) + + element := &meta.Element{ID: 123, TypeKey: meta.IndexElementKey} + // There are 3 meta key relate to index reorganization: + // start_handle, end_handle and physical_table_id. + // Only start_handle is initialized. + err = m.UpdateDDLReorgStartHandle(job, element, kv.IntHandle(1).Encoded()) + require.NoError(t, err) + + // Since physical_table_id is uninitialized, we simulate older TiDB version that doesn't store them. + // In this case GetDDLReorgHandle always return maxInt64 as end_handle. + e, i, j, k, err := m.GetDDLReorgHandle(job) + require.NoError(t, err) + require.Equal(t, element, e) + require.Equal(t, kv.Key(kv.IntHandle(1).Encoded()), i) + require.Equal(t, kv.Key(kv.IntHandle(math.MaxInt64).Encoded()), j) + require.Equal(t, int64(0), k) + + element = &meta.Element{ID: 222, TypeKey: meta.ColumnElementKey} + err = m.UpdateDDLReorgHandle(job, tc.startHandle.Encoded(), tc.endHandle.Encoded(), 3, element) + require.NoError(t, err) + element1 := &meta.Element{ID: 223, TypeKey: meta.IndexElementKey} + err = m.UpdateDDLReorgHandle(job, tc.startHandle.Encoded(), tc.endHandle.Encoded(), 3, element1) + require.NoError(t, err) + + e, i, j, k, err = m.GetDDLReorgHandle(job) + require.NoError(t, err) + require.Equal(t, element1, e) + require.Equal(t, kv.Key(tc.startHandle.Encoded()), i) + require.Equal(t, kv.Key(tc.endHandle.Encoded()), j) + require.Equal(t, int64(3), k) + + err = m.RemoveDDLReorgHandle(job, []*meta.Element{element, element1}) + require.NoError(t, err) + e, i, j, k, err = m.GetDDLReorgHandle(job) + require.True(t, meta.ErrDDLReorgElementNotExist.Equal(err)) + require.Nil(t, e) + require.Nil(t, i) + require.Nil(t, j) + require.Equal(t, k, int64(0)) + + // new TiDB binary running on old TiDB DDL reorg data. + e, i, j, k, err = m.GetDDLReorgHandle(job) + require.True(t, meta.ErrDDLReorgElementNotExist.Equal(err)) + require.Nil(t, e) + require.Nil(t, i) + require.Nil(t, j) + require.Equal(t, k, int64(0)) + + // Test GetDDLReorgHandle failed. + _, _, _, _, err = m.GetDDLReorgHandle(job) + require.True(t, meta.ErrDDLReorgElementNotExist.Equal(err)) + + v, err = m.DeQueueDDLJob() + require.NoError(t, err) + require.Equal(t, job, v) + + err = m.AddHistoryDDLJob(job, true) + require.NoError(t, err) + v, err = m.GetHistoryDDLJob(2) + require.NoError(t, err) + require.Equal(t, job, v) + + // Add multiple history jobs. + arg := "test arg" + historyJob1 := &model.Job{ID: 1234} + historyJob1.Args = append(job.Args, arg) + err = m.AddHistoryDDLJob(historyJob1, true) + require.NoError(t, err) + historyJob2 := &model.Job{ID: 123} + historyJob2.Args = append(job.Args, arg) + err = m.AddHistoryDDLJob(historyJob2, false) + require.NoError(t, err) + all, err := m.GetAllHistoryDDLJobs() + require.NoError(t, err) + var lastID int64 + for _, job := range all { + require.Greater(t, job.ID, lastID) + lastID = job.ID + arg1 := "" + err := job.DecodeArgs(&arg1) + require.NoError(t, err) + if job.ID == historyJob1.ID { + require.Equal(t, historyJob1.Args[0], *(job.Args[0].(*string))) + } else { + require.Len(t, job.Args, 0) + } + } - job := &model.Job{ID: 1} - err = t.EnQueueDDLJob(job) - c.Assert(err, IsNil) - n, err := t.DDLJobQueueLen() - c.Assert(err, IsNil) - c.Assert(n, Equals, int64(1)) - - v, err := t.GetDDLJobByIdx(0) - c.Assert(err, IsNil) - c.Assert(v, DeepEquals, job) - v, err = t.GetDDLJobByIdx(1) - c.Assert(err, IsNil) - c.Assert(v, IsNil) - job.ID = 2 - err = t.UpdateDDLJob(0, job, true) - c.Assert(err, IsNil) - - element := &meta.Element{ID: 123, TypeKey: meta.IndexElementKey} - // There are 3 meta key relate to index reorganization: - // start_handle, end_handle and physical_table_id. - // Only start_handle is initialized. - err = t.UpdateDDLReorgStartHandle(job, element, kv.IntHandle(1).Encoded()) - c.Assert(err, IsNil) - - // Since physical_table_id is uninitialized, we simulate older TiDB version that doesn't store them. - // In this case GetDDLReorgHandle always return maxInt64 as end_handle. - e, i, j, k, err := t.GetDDLReorgHandle(job) - c.Assert(err, IsNil) - c.Assert(e, DeepEquals, element) - c.Assert(i, DeepEquals, kv.Key(kv.IntHandle(1).Encoded())) - c.Assert(j, DeepEquals, kv.Key(kv.IntHandle(math.MaxInt64).Encoded())) - c.Assert(k, Equals, int64(0)) - - startHandle := s.NewHandle().Int(1).Common("abc", 1222, "string") - endHandle := s.NewHandle().Int(2).Common("dddd", 1222, "string") - element = &meta.Element{ID: 222, TypeKey: meta.ColumnElementKey} - err = t.UpdateDDLReorgHandle(job, startHandle.Encoded(), endHandle.Encoded(), 3, element) - c.Assert(err, IsNil) - element1 := &meta.Element{ID: 223, TypeKey: meta.IndexElementKey} - err = t.UpdateDDLReorgHandle(job, startHandle.Encoded(), endHandle.Encoded(), 3, element1) - c.Assert(err, IsNil) - - e, i, j, k, err = t.GetDDLReorgHandle(job) - c.Assert(err, IsNil) - c.Assert(e, DeepEquals, element1) - c.Assert(i, DeepEquals, kv.Key(startHandle.Encoded())) - c.Assert(j, DeepEquals, kv.Key(endHandle.Encoded())) - c.Assert(k, Equals, int64(3)) - - err = t.RemoveDDLReorgHandle(job, []*meta.Element{element, element1}) - c.Assert(err, IsNil) - e, i, j, k, err = t.GetDDLReorgHandle(job) - c.Assert(meta.ErrDDLReorgElementNotExist.Equal(err), IsTrue) - c.Assert(e, IsNil) - c.Assert(i, IsNil) - c.Assert(j, IsNil) - c.Assert(k, Equals, int64(0)) - - // new TiDB binary running on old TiDB DDL reorg data. - e, i, j, k, err = t.GetDDLReorgHandle(job) - c.Assert(meta.ErrDDLReorgElementNotExist.Equal(err), IsTrue) - c.Assert(e, IsNil) - c.Assert(i, IsNil) - c.Assert(j, IsNil) - c.Assert(k, Equals, int64(0)) - - // Test GetDDLReorgHandle failed. - _, _, _, _, err = t.GetDDLReorgHandle(job) - c.Assert(meta.ErrDDLReorgElementNotExist.Equal(err), IsTrue) - - v, err = t.DeQueueDDLJob() - c.Assert(err, IsNil) - c.Assert(v, DeepEquals, job) - - err = t.AddHistoryDDLJob(job, true) - c.Assert(err, IsNil) - v, err = t.GetHistoryDDLJob(2) - c.Assert(err, IsNil) - c.Assert(v, DeepEquals, job) - - // Add multiple history jobs. - arg := "test arg" - historyJob1 := &model.Job{ID: 1234} - historyJob1.Args = append(job.Args, arg) - err = t.AddHistoryDDLJob(historyJob1, true) - c.Assert(err, IsNil) - historyJob2 := &model.Job{ID: 123} - historyJob2.Args = append(job.Args, arg) - err = t.AddHistoryDDLJob(historyJob2, false) - c.Assert(err, IsNil) - all, err := t.GetAllHistoryDDLJobs() - c.Assert(err, IsNil) - var lastID int64 - for _, job := range all { - c.Assert(job.ID, Greater, lastID) - lastID = job.ID - arg1 := "" - err := job.DecodeArgs(&arg1) - c.Assert(err, IsNil) - if job.ID == historyJob1.ID { - c.Assert(*(job.Args[0].(*string)), Equals, historyJob1.Args[0]) - } else { - c.Assert(job.Args, HasLen, 0) - } + // Test for get last N history ddl jobs. + historyJobs, err := m.GetLastNHistoryDDLJobs(2) + require.NoError(t, err) + require.Len(t, historyJobs, 2) + require.Equal(t, int64(1234), historyJobs[0].ID) + require.Equal(t, int64(123), historyJobs[1].ID) + + // Test GetAllDDLJobsInQueue. + err = m.EnQueueDDLJob(job) + require.NoError(t, err) + job1 := &model.Job{ID: 2} + err = m.EnQueueDDLJob(job1) + require.NoError(t, err) + jobs, err := m.GetAllDDLJobsInQueue() + require.NoError(t, err) + expectJobs := []*model.Job{job, job1} + require.Equal(t, expectJobs, jobs) + + err = txn.Commit(context.Background()) + require.NoError(t, err) + }) } +} - // Test for get last N history ddl jobs. - historyJobs, err := t.GetLastNHistoryDDLJobs(2) - c.Assert(err, IsNil) - c.Assert(len(historyJobs), Equals, 2) - c.Assert(historyJobs[0].ID == 1234, IsTrue) - c.Assert(historyJobs[1].ID == 123, IsTrue) - - // Test GetAllDDLJobsInQueue. - err = t.EnQueueDDLJob(job) - c.Assert(err, IsNil) - job1 := &model.Job{ID: 2} - err = t.EnQueueDDLJob(job1) - c.Assert(err, IsNil) - jobs, err := t.GetAllDDLJobsInQueue() - c.Assert(err, IsNil) - expectJobs := []*model.Job{job, job1} - c.Assert(jobs, DeepEquals, expectJobs) +func TestAddIndexJob(t *testing.T) { + t.Parallel() - err = txn.Commit(context.Background()) - c.Assert(err, IsNil) + store, err := mockstore.NewMockStore() + require.NoError(t, err) + defer func() { + err := store.Close() + require.NoError(t, err) + }() - // Test for add index job. txn1, err := store.Begin() - c.Assert(err, IsNil) + require.NoError(t, err) m := meta.NewMeta(txn1, meta.AddIndexJobListKey) + job := &model.Job{ID: 1} err = m.EnQueueDDLJob(job) - c.Assert(err, IsNil) + require.NoError(t, err) job.ID = 123 err = m.UpdateDDLJob(0, job, true, meta.AddIndexJobListKey) - c.Assert(err, IsNil) - v, err = m.GetDDLJobByIdx(0, meta.AddIndexJobListKey) - c.Assert(err, IsNil) - c.Assert(v, DeepEquals, job) + require.NoError(t, err) + v, err := m.GetDDLJobByIdx(0, meta.AddIndexJobListKey) + require.NoError(t, err) + require.Equal(t, job, v) l, err := m.DDLJobQueueLen(meta.AddIndexJobListKey) - c.Assert(err, IsNil) - c.Assert(l, Equals, int64(1)) - jobs, err = m.GetAllDDLJobsInQueue(meta.AddIndexJobListKey) - c.Assert(err, IsNil) - expectJobs = []*model.Job{job} - c.Assert(jobs, DeepEquals, expectJobs) + require.NoError(t, err) + require.Equal(t, int64(1), l) + jobs, err := m.GetAllDDLJobsInQueue(meta.AddIndexJobListKey) + require.NoError(t, err) + expectJobs := []*model.Job{job} + require.Equal(t, expectJobs, jobs) err = txn1.Commit(context.Background()) - c.Assert(err, IsNil) - - s.RerunWithCommonHandleEnabled(c, s.TestDDL) + require.NoError(t, err) } -func (s *testSuite) BenchmarkGenGlobalIDs(c *C) { - defer testleak.AfterTest(c)() +func BenchmarkGenGlobalIDs(b *testing.B) { store, err := mockstore.NewMockStore() - c.Assert(err, IsNil) + require.NoError(b, err) defer func() { err := store.Close() - c.Assert(err, IsNil) + require.NoError(b, err) }() txn, err := store.Begin() - c.Assert(err, IsNil) + require.NoError(b, err) defer func() { err := txn.Rollback() - c.Assert(err, IsNil) + require.NoError(b, err) }() - t := meta.NewMeta(txn) + m := meta.NewMeta(txn) - c.ResetTimer() + b.ResetTimer() var ids []int64 - for i := 0; i < c.N; i++ { - ids, _ = t.GenGlobalIDs(10) + for i := 0; i < b.N; i++ { + ids, _ = m.GenGlobalIDs(10) } - c.Assert(ids, HasLen, 10) - c.Assert(ids[9], Equals, int64(c.N)*10) + require.Len(b, ids, 10) + require.Equal(b, int64(b.N)*10, ids[9]) } -func (s *testSuite) BenchmarkGenGlobalIDOneByOne(c *C) { - defer testleak.AfterTest(c)() +func BenchmarkGenGlobalIDOneByOne(b *testing.B) { store, err := mockstore.NewMockStore() - c.Assert(err, IsNil) + require.NoError(b, err) defer func() { err := store.Close() - c.Assert(err, IsNil) + require.NoError(b, err) }() txn, err := store.Begin() - c.Assert(err, IsNil) + require.NoError(b, err) defer func() { err := txn.Rollback() - c.Assert(err, IsNil) + require.NoError(b, err) }() - t := meta.NewMeta(txn) + m := meta.NewMeta(txn) - c.ResetTimer() + b.ResetTimer() var id int64 - for i := 0; i < c.N; i++ { + for i := 0; i < b.N; i++ { for j := 0; j < 10; j++ { - id, _ = t.GenGlobalID() + id, _ = m.GenGlobalID() } } - c.Assert(id, Equals, int64(c.N)*10) + require.Equal(b, int64(b.N)*10, id) } -func anyMatch(c *C, ids []int64, candidates ...[]int64) { - var match bool -OUTER: - for _, cand := range candidates { - if len(ids) != len(cand) { - continue +func anyMatch(t *testing.T, ids []int64, candidates ...[]int64) { + comment := fmt.Sprintf("ids %v cannot match any of %v", ids, candidates) + + for _, candidate := range candidates { + if match(ids, candidate) { + return } - for i, v := range cand { - if ids[i] != v { - continue OUTER - } + } + + require.FailNow(t, comment) +} + +func match(ids, candidate []int64) bool { + if len(ids) != len(candidate) { + return false + } + + for i, v := range candidate { + if ids[i] != v { + return false } - match = true - break } - c.Assert(match, IsTrue) + + return true } diff --git a/planner/core/logical_plan_builder.go b/planner/core/logical_plan_builder.go index 7ce827d86ac79..a9a3978329c2b 100644 --- a/planner/core/logical_plan_builder.go +++ b/planner/core/logical_plan_builder.go @@ -5189,7 +5189,7 @@ func (b *PlanBuilder) buildProjectionForWindow(ctx context.Context, p LogicalPla p = np switch newArg.(type) { case *expression.Column, *expression.Constant: - newArgList = append(newArgList, newArg) + newArgList = append(newArgList, newArg.Clone()) continue } proj.Exprs = append(proj.Exprs, newArg) @@ -5221,7 +5221,7 @@ func (b *PlanBuilder) buildArgs4WindowFunc(ctx context.Context, p LogicalPlan, a p = np switch newArg.(type) { case *expression.Column, *expression.Constant: - newArgList = append(newArgList, newArg) + newArgList = append(newArgList, newArg.Clone()) continue } col := &expression.Column{ diff --git a/planner/core/testdata/integration_partition_suite_out.json b/planner/core/testdata/integration_partition_suite_out.json index 5237f3bfc3e14..c8eb3874fe516 100644 --- a/planner/core/testdata/integration_partition_suite_out.json +++ b/planner/core/testdata/integration_partition_suite_out.json @@ -1021,11 +1021,11 @@ ] }, { - "SQL": "create table tto_seconds (a date, b datetime) partition by list (TO_SECONDS(a)) (partition p0 values in (0, 1, 2, 3, 63745012800), partition p1 values in (4, 5, 6, 7, 63744969600))", + "SQL": "create table tto_seconds (a date, b datetime) partition by list (TO_SECONDS(a)) (partition p0 values in (0, 1, 2, 3, 63740649600), partition p1 values in (4, 5, 6, 7, 63744969600))", "Results": null }, { - "SQL": "insert into tto_seconds values ('2019-12-31 12:00:00', '2019-12-31 23:59:59'), ('2019-12-31 23:06:59', '2019-12-31 12:00:00')", + "SQL": "insert into tto_seconds values ('2019-12-31 12:00:00', '2019-12-31 23:59:59'), ('2019-11-11 23:06:59', '2019-12-31 12:00:00')", "Results": null }, { diff --git a/planner/core/testdata/integration_serial_suite_out.json b/planner/core/testdata/integration_serial_suite_out.json index f808157f2d229..65a0ad7865771 100644 --- a/planner/core/testdata/integration_serial_suite_out.json +++ b/planner/core/testdata/integration_serial_suite_out.json @@ -2399,14 +2399,13 @@ " └─HashAgg 6400.00 batchCop[tiflash] group by:test.t.id, funcs:sum(test.t.value)->Column#8", " └─Selection 8000.00 batchCop[tiflash] gt(plus(test.t.id, 1), ifnull(Column#7, 0))", " └─HashJoin 10000.00 batchCop[tiflash] left outer join, equal:[eq(test.t.id, test.t.id) eq(test.t.value, test.t.value)]", - " ├─Selection(Build) 6387.21 batchCop[tiflash] gt(plus(test.t.id, 1), ifnull(Column#7, 0))", - " │ └─Projection 7984.01 batchCop[tiflash] Column#7, test.t.id, test.t.value", - " │ └─HashAgg 7984.01 batchCop[tiflash] group by:test.t.id, test.t.value, funcs:sum(Column#24)->Column#7, funcs:firstrow(test.t.id)->test.t.id, funcs:firstrow(test.t.value)->test.t.value", - " │ └─ExchangeReceiver 7984.01 batchCop[tiflash] ", - " │ └─ExchangeSender 7984.01 batchCop[tiflash] ExchangeType: HashPartition, Hash Cols: [name: test.t.id, collate: N/A]", - " │ └─HashAgg 7984.01 batchCop[tiflash] group by:test.t.id, test.t.value, funcs:count(1)->Column#24", - " │ └─Selection 9980.01 batchCop[tiflash] not(isnull(test.t.id)), not(isnull(test.t.value))", - " │ └─TableFullScan 10000.00 batchCop[tiflash] table:t keep order:false, stats:pseudo", + " ├─Projection(Build) 7984.01 batchCop[tiflash] Column#7, test.t.id, test.t.value", + " │ └─HashAgg 7984.01 batchCop[tiflash] group by:test.t.id, test.t.value, funcs:sum(Column#24)->Column#7, funcs:firstrow(test.t.id)->test.t.id, funcs:firstrow(test.t.value)->test.t.value", + " │ └─ExchangeReceiver 7984.01 batchCop[tiflash] ", + " │ └─ExchangeSender 7984.01 batchCop[tiflash] ExchangeType: HashPartition, Hash Cols: [name: test.t.id, collate: N/A]", + " │ └─HashAgg 7984.01 batchCop[tiflash] group by:test.t.id, test.t.value, funcs:count(1)->Column#24", + " │ └─Selection 9980.01 batchCop[tiflash] not(isnull(test.t.id)), not(isnull(test.t.value))", + " │ └─TableFullScan 10000.00 batchCop[tiflash] table:t keep order:false, stats:pseudo", " └─ExchangeReceiver(Probe) 10000.00 batchCop[tiflash] ", " └─ExchangeSender 10000.00 batchCop[tiflash] ExchangeType: HashPartition, Hash Cols: [name: test.t.id, collate: N/A]", " └─TableFullScan 10000.00 batchCop[tiflash] table:B keep order:false, stats:pseudo" diff --git a/sessionctx/variable/sysvar.go b/sessionctx/variable/sysvar.go index b3627bd10a435..ddfff2133a71f 100644 --- a/sessionctx/variable/sysvar.go +++ b/sessionctx/variable/sysvar.go @@ -881,7 +881,7 @@ var defaultSysVars = []*SysVar{ }}, {Scope: ScopeGlobal | ScopeSession, Name: TiDBOptBCJ, Value: BoolToOnOff(DefOptBCJ), Type: TypeBool, Validation: func(vars *SessionVars, normalizedValue string, originalValue string, scope ScopeFlag) (string, error) { if TiDBOptOn(normalizedValue) && vars.AllowBatchCop == 0 { - return normalizedValue, ErrWrongValueForVar.GenWithStackByArgs("Can't set Broadcast Join to 1 but tidb_allow_batch_cop is 0, please active batch cop at first.") + return normalizedValue, ErrWrongValueForVar.GenWithStackByArgs(TiDBOptBCJ, "'true' while tidb_allow_batch_cop is 0, please active batch cop at first.") } return normalizedValue, nil }, SetSession: func(s *SessionVars, val string) error { @@ -1051,7 +1051,7 @@ var defaultSysVars = []*SysVar{ }}, {Scope: ScopeGlobal | ScopeSession, Name: TiDBAllowBatchCop, Value: strconv.Itoa(DefTiDBAllowBatchCop), Type: TypeInt, MinValue: 0, MaxValue: 2, Validation: func(vars *SessionVars, normalizedValue string, originalValue string, scope ScopeFlag) (string, error) { if normalizedValue == "0" && vars.AllowBCJ { - return normalizedValue, ErrWrongValueForVar.GenWithStackByArgs("Can't set batch cop 0 but tidb_opt_broadcast_join is 1, please set tidb_opt_broadcast_join 0 at first") + return normalizedValue, ErrWrongValueForVar.GenWithStackByArgs(TiDBAllowBatchCop, "'0' while tidb_opt_broadcast_join is true, please set tidb_opt_broadcast_join false at first") } return normalizedValue, nil }, SetSession: func(s *SessionVars, val string) error {