diff --git a/ddl/db_integration_test.go b/ddl/db_integration_test.go index 9f258dbe165bb..72793151ab32f 100644 --- a/ddl/db_integration_test.go +++ b/ddl/db_integration_test.go @@ -16,6 +16,7 @@ package ddl_test import ( "context" "fmt" + "strconv" "strings" "sync/atomic" "time" @@ -1935,3 +1936,107 @@ func (s *testIntegrationSuite3) TestForeignKeyOnUpdateOnDelete(c *C) { tk.MustExec("create table t5 (a int, b int, foreign key (b) references t (a) on update restrict)") tk.MustExec("create table t6 (a int, b int, foreign key (b) references t (a) on update restrict on delete restrict)") } + +// TestCreateTableWithAutoIdCache test the auto_id_cache table option. +// `auto_id_cache` take effects on handle too when `PKIshandle` is false, +// or even there is no auto_increment column at all. +func (s *testIntegrationSuite3) TestCreateTableWithAutoIdCache(c *C) { + tk := testkit.NewTestKit(c, s.store) + tk.MustExec("USE test;") + tk.MustExec("drop table if exists t;") + tk.MustExec("drop table if exists t1;") + + // Test primary key is handle. + tk.MustExec("create table t(a int auto_increment key) auto_id_cache 100") + tblInfo, err := s.dom.InfoSchema().TableByName(model.NewCIStr("test"), model.NewCIStr("t")) + c.Assert(err, IsNil) + c.Assert(tblInfo.Meta().AutoIdCache, Equals, int64(100)) + tk.MustExec("insert into t values()") + tk.MustQuery("select * from t").Check(testkit.Rows("1")) + tk.MustExec("delete from t") + + // Invalid the allocator cache, insert will trigger a new cache + tk.MustExec("rename table t to t1;") + tk.MustExec("insert into t1 values()") + tk.MustQuery("select * from t1").Check(testkit.Rows("101")) + + // Test primary key is not handle. + tk.MustExec("drop table if exists t;") + tk.MustExec("drop table if exists t1;") + tk.MustExec("create table t(a int) auto_id_cache 100") + tblInfo, err = s.dom.InfoSchema().TableByName(model.NewCIStr("test"), model.NewCIStr("t")) + c.Assert(err, IsNil) + + tk.MustExec("insert into t values()") + tk.MustQuery("select _tidb_rowid from t").Check(testkit.Rows("1")) + tk.MustExec("delete from t") + + // Invalid the allocator cache, insert will trigger a new cache + tk.MustExec("rename table t to t1;") + tk.MustExec("insert into t1 values()") + tk.MustQuery("select _tidb_rowid from t1").Check(testkit.Rows("101")) + + // Test both auto_increment and rowid exist. + tk.MustExec("drop table if exists t;") + tk.MustExec("drop table if exists t1;") + tk.MustExec("create table t(a int null, b int auto_increment unique) auto_id_cache 100") + tblInfo, err = s.dom.InfoSchema().TableByName(model.NewCIStr("test"), model.NewCIStr("t")) + c.Assert(err, IsNil) + + tk.MustExec("insert into t(b) values(NULL)") + tk.MustQuery("select b, _tidb_rowid from t").Check(testkit.Rows("1 2")) + tk.MustExec("delete from t") + + // Invalid the allocator cache, insert will trigger a new cache. + tk.MustExec("rename table t to t1;") + tk.MustExec("insert into t1(b) values(NULL)") + tk.MustQuery("select b, _tidb_rowid from t1").Check(testkit.Rows("101 102")) + tk.MustExec("delete from t1") + + // Test alter auto_id_cache. + tk.MustExec("alter table t1 auto_id_cache 200") + tblInfo, err = s.dom.InfoSchema().TableByName(model.NewCIStr("test"), model.NewCIStr("t1")) + c.Assert(err, IsNil) + c.Assert(tblInfo.Meta().AutoIdCache, Equals, int64(200)) + + tk.MustExec("insert into t1(b) values(NULL)") + tk.MustQuery("select b, _tidb_rowid from t1").Check(testkit.Rows("201 202")) + tk.MustExec("delete from t1") + + // Invalid the allocator cache, insert will trigger a new cache. + tk.MustExec("rename table t1 to t;") + tk.MustExec("insert into t(b) values(NULL)") + tk.MustQuery("select b, _tidb_rowid from t").Check(testkit.Rows("401 402")) + tk.MustExec("delete from t") + + tk.MustExec("drop table if exists t;") + tk.MustExec("drop table if exists t1;") + tk.MustExec("create table t(a int auto_increment key) auto_id_cache 3") + tblInfo, err = s.dom.InfoSchema().TableByName(model.NewCIStr("test"), model.NewCIStr("t")) + c.Assert(err, IsNil) + c.Assert(tblInfo.Meta().AutoIdCache, Equals, int64(3)) + + // Test insert batch size(4 here) greater than the customized autoid step(3 here). + tk.MustExec("insert into t(a) values(NULL),(NULL),(NULL),(NULL)") + tk.MustQuery("select a from t").Check(testkit.Rows("1", "2", "3", "4")) + tk.MustExec("delete from t") + + // Invalid the allocator cache, insert will trigger a new cache. + tk.MustExec("rename table t to t1;") + tk.MustExec("insert into t1(a) values(NULL)") + next := tk.MustQuery("select a from t1").Rows()[0][0].(string) + nextInt, err := strconv.Atoi(next) + c.Assert(err, IsNil) + c.Assert(nextInt, Greater, 5) + + // Test auto_id_cache overflows int64. + tk.MustExec("drop table if exists t;") + _, err = tk.Exec("create table t(a int) auto_id_cache = 9223372036854775808") + c.Assert(err, NotNil) + c.Assert(err.Error(), Equals, "table option auto_id_cache overflows int64") + + tk.MustExec("create table t(a int) auto_id_cache = 9223372036854775807") + _, err = tk.Exec("alter table t auto_id_cache = 9223372036854775808") + c.Assert(err, NotNil) + c.Assert(err.Error(), Equals, "table option auto_id_cache overflows int64") +} diff --git a/ddl/ddl_api.go b/ddl/ddl_api.go index 45b6cf4c37c06..195aa2fc98ed4 100644 --- a/ddl/ddl_api.go +++ b/ddl/ddl_api.go @@ -21,6 +21,7 @@ import ( "bytes" "context" "fmt" + "math" "strconv" "strings" "sync/atomic" @@ -1747,7 +1748,12 @@ func checkCharsetAndCollation(cs string, co string) error { // handleAutoIncID handles auto_increment option in DDL. It creates a ID counter for the table and initiates the counter to a proper value. // For example if the option sets auto_increment to 10. The counter will be set to 9. So the next allocated ID will be 10. func (d *ddl) handleAutoIncID(tbInfo *model.TableInfo, schemaID int64) error { - alloc := autoid.NewAllocator(d.store, tbInfo.GetDBID(schemaID), tbInfo.IsAutoIncColUnsigned()) + var alloc autoid.Allocator + if tbInfo.AutoIdCache > 0 { + alloc = autoid.NewAllocator(d.store, tbInfo.GetDBID(schemaID), tbInfo.IsAutoIncColUnsigned(), autoid.CustomAutoIncCacheOption(tbInfo.AutoIdCache)) + } else { + alloc = autoid.NewAllocator(d.store, tbInfo.GetDBID(schemaID), tbInfo.IsAutoIncColUnsigned()) + } tbInfo.State = model.StatePublic tb, err := table.TableFromMeta(alloc, tbInfo) if err != nil { @@ -1783,6 +1789,12 @@ func handleTableOptions(options []*ast.TableOption, tbInfo *model.TableInfo) err switch op.Tp { case ast.TableOptionAutoIncrement: tbInfo.AutoIncID = int64(op.UintValue) + case ast.TableOptionAutoIdCache: + if op.UintValue > uint64(math.MaxInt64) { + // TODO: Refine this error. + return errors.New("table option auto_id_cache overflows int64") + } + tbInfo.AutoIdCache = int64(op.UintValue) case ast.TableOptionComment: tbInfo.Comment = op.StrValue case ast.TableOptionCompression: @@ -1967,6 +1979,12 @@ func (d *ddl) AlterTable(ctx sessionctx.Context, ident ast.Ident, specs []*ast.A err = d.ShardRowID(ctx, ident, opt.UintValue) case ast.TableOptionAutoIncrement: err = d.RebaseAutoID(ctx, ident, int64(opt.UintValue)) + case ast.TableOptionAutoIdCache: + if opt.UintValue > uint64(math.MaxInt64) { + // TODO: Refine this error. + return errors.New("table option auto_id_cache overflows int64") + } + err = d.AlterTableAutoIDCache(ctx, ident, int64(opt.UintValue)) case ast.TableOptionComment: spec.Comment = opt.StrValue err = d.AlterTableComment(ctx, ident, spec) @@ -2879,6 +2897,27 @@ func (d *ddl) AlterTableComment(ctx sessionctx.Context, ident ast.Ident, spec *a return errors.Trace(err) } +// AlterTableAutoIDCache updates the table comment information. +func (d *ddl) AlterTableAutoIDCache(ctx sessionctx.Context, ident ast.Ident, newCache int64) error { + schema, tb, err := d.getSchemaAndTableByIdent(ctx, ident) + if err != nil { + return errors.Trace(err) + } + + job := &model.Job{ + SchemaID: schema.ID, + TableID: tb.Meta().ID, + SchemaName: schema.Name.L, + Type: model.ActionModifyTableAutoIdCache, + BinlogInfo: &model.HistoryInfo{}, + Args: []interface{}{newCache}, + } + + err = d.doDDLJob(ctx, job) + err = d.callHookOnChanged(err) + return errors.Trace(err) +} + // AlterTableCharset changes the table charset and collate. func (d *ddl) AlterTableCharsetAndCollate(ctx sessionctx.Context, ident ast.Ident, toCharset, toCollate string) error { // use the last one. diff --git a/ddl/ddl_worker.go b/ddl/ddl_worker.go index 8cbd9439fbbbe..9b6b6d64703c9 100644 --- a/ddl/ddl_worker.go +++ b/ddl/ddl_worker.go @@ -597,6 +597,8 @@ func (w *worker) runDDLJob(d *ddlCtx, t *meta.Meta, job *model.Job) (ver int64, ver, err = w.onShardRowID(d, t, job) case model.ActionModifyTableComment: ver, err = onModifyTableComment(t, job) + case model.ActionModifyTableAutoIdCache: + ver, err = onModifyTableAutoIDCache(t, job) case model.ActionAddTablePartition: ver, err = onAddTablePartition(d, t, job) case model.ActionModifyTableCharsetAndCollate: diff --git a/ddl/rollingback.go b/ddl/rollingback.go index 21040b72b3c44..0515ecb0eca4f 100644 --- a/ddl/rollingback.go +++ b/ddl/rollingback.go @@ -295,7 +295,7 @@ func convertJob2RollbackJob(w *worker, d *ddlCtx, t *meta.Meta, job *model.Job) model.ActionModifyColumn, model.ActionAddForeignKey, model.ActionDropForeignKey, model.ActionRenameTable, model.ActionModifyTableCharsetAndCollate, model.ActionTruncateTablePartition, - model.ActionModifySchemaCharsetAndCollate: + model.ActionModifySchemaCharsetAndCollate, model.ActionModifyTableAutoIdCache: ver, err = cancelOnlyNotHandledJob(job) default: job.State = model.JobStateCancelled diff --git a/ddl/table.go b/ddl/table.go index a44368195fb9e..7bf898ae24633 100644 --- a/ddl/table.go +++ b/ddl/table.go @@ -339,7 +339,12 @@ func checkSafePoint(w *worker, snapshotTS uint64) error { } func getTable(store kv.Storage, schemaID int64, tblInfo *model.TableInfo) (table.Table, error) { - alloc := autoid.NewAllocator(store, tblInfo.GetDBID(schemaID), tblInfo.IsAutoIncColUnsigned()) + var alloc autoid.Allocator + if tblInfo.AutoIdCache > 0 { + alloc = autoid.NewAllocator(store, tblInfo.GetDBID(schemaID), tblInfo.IsAutoIncColUnsigned(), autoid.CustomAutoIncCacheOption(tblInfo.AutoIdCache)) + } else { + alloc = autoid.NewAllocator(store, tblInfo.GetDBID(schemaID), tblInfo.IsAutoIncColUnsigned()) + } tbl, err := table.TableFromMeta(alloc, tblInfo) return tbl, errors.Trace(err) } @@ -487,6 +492,27 @@ func onRebaseAutoID(store kv.Storage, t *meta.Meta, job *model.Job) (ver int64, return ver, nil } +func onModifyTableAutoIDCache(t *meta.Meta, job *model.Job) (int64, error) { + var cache int64 + if err := job.DecodeArgs(&cache); err != nil { + job.State = model.JobStateCancelled + return 0, errors.Trace(err) + } + + tblInfo, err := getTableInfoAndCancelFaultJob(t, job, job.SchemaID) + if err != nil { + return 0, errors.Trace(err) + } + + tblInfo.AutoIdCache = cache + ver, err := updateVersionAndTableInfo(t, job, tblInfo, true) + if err != nil { + return ver, errors.Trace(err) + } + job.FinishTableJob(model.JobStateDone, model.StatePublic, ver, tblInfo) + return ver, nil +} + func (w *worker) onShardRowID(d *ddlCtx, t *meta.Meta, job *model.Job) (ver int64, _ error) { var shardRowIDBits uint64 err := job.DecodeArgs(&shardRowIDBits) diff --git a/executor/show.go b/executor/show.go index f0ee081d2914c..c5e30b92d903a 100644 --- a/executor/show.go +++ b/executor/show.go @@ -883,6 +883,10 @@ func (e *ShowExec) fetchShowCreateTable() error { } } + if tb.Meta().AutoIdCache != 0 { + fmt.Fprintf(&buf, " /*T![auto_id_cache] AUTO_ID_CACHE=%d */", tb.Meta().AutoIdCache) + } + if tb.Meta().ShardRowIDBits > 0 { fmt.Fprintf(&buf, "/*!90000 SHARD_ROW_ID_BITS=%d ", tb.Meta().ShardRowIDBits) if tb.Meta().PreSplitRegions > 0 { diff --git a/executor/show_test.go b/executor/show_test.go index 0cf7eb2d517f1..122b0fa8d82ea 100644 --- a/executor/show_test.go +++ b/executor/show_test.go @@ -631,6 +631,43 @@ func (s *testSuite2) TestShowCreateTable(c *C) { )) } +// Override testSuite2 to test auto id cache. +func (s *testSuite2) TestAutoIdCache(c *C) { + tk := testkit.NewTestKit(c, s.store) + tk.MustExec("use test") + + tk.MustExec("drop table if exists t") + tk.MustExec("create table t(a int auto_increment key) auto_id_cache = 10") + tk.MustQuery("show create table t").Check(testutil.RowsWithSep("|", + ""+ + "t CREATE TABLE `t` (\n"+ + " `a` int(11) NOT NULL AUTO_INCREMENT,\n"+ + " PRIMARY KEY (`a`)\n"+ + ") ENGINE=InnoDB DEFAULT CHARSET=utf8mb4 COLLATE=utf8mb4_bin /*T![auto_id_cache] AUTO_ID_CACHE=10 */", + )) + tk.MustExec("drop table if exists t") + tk.MustExec("create table t(a int auto_increment unique, b int key) auto_id_cache 100") + tk.MustQuery("show create table t").Check(testutil.RowsWithSep("|", + ""+ + "t CREATE TABLE `t` (\n"+ + " `a` int(11) NOT NULL AUTO_INCREMENT,\n"+ + " `b` int(11) NOT NULL,\n"+ + " PRIMARY KEY (`b`),\n"+ + " UNIQUE KEY `a` (`a`)\n"+ + ") ENGINE=InnoDB DEFAULT CHARSET=utf8mb4 COLLATE=utf8mb4_bin /*T![auto_id_cache] AUTO_ID_CACHE=100 */", + )) + tk.MustExec("drop table if exists t") + tk.MustExec("create table t(a int key) auto_id_cache 5") + tk.MustQuery("show create table t").Check(testutil.RowsWithSep("|", + ""+ + "t CREATE TABLE `t` (\n"+ + " `a` int(11) NOT NULL,\n"+ + " PRIMARY KEY (`a`)\n"+ + ") ENGINE=InnoDB DEFAULT CHARSET=utf8mb4 COLLATE=utf8mb4_bin /*T![auto_id_cache] AUTO_ID_CACHE=5 */", + )) + tk.MustExec("drop table if exists t") +} + func (s *testSuite2) TestShowEscape(c *C) { tk := testkit.NewTestKit(c, s.store) diff --git a/go.mod b/go.mod index 31b8ed8265ab0..bf15dee371cd2 100644 --- a/go.mod +++ b/go.mod @@ -38,7 +38,7 @@ require ( github.com/pingcap/goleveldb v0.0.0-20171020122428-b9ff6c35079e github.com/pingcap/kvproto v0.0.0-20200311073257-e53d835099b0 github.com/pingcap/log v0.0.0-20190715063458-479153f07ebd - github.com/pingcap/parser v3.0.13-0.20200428065913-b93e38b9c865+incompatible + github.com/pingcap/parser v3.0.13-0.20200428073321-7e525711e35d+incompatible github.com/pingcap/pd v1.1.0-beta.0.20191223090411-ea2b748f6ee2 github.com/pingcap/tidb-tools v3.0.6-0.20191119150227-ff0a3c6e5763+incompatible github.com/pingcap/tipb v0.0.0-20200401051346-bec3080a5428 diff --git a/go.sum b/go.sum index f40260c0b6105..d08f490bf1c27 100644 --- a/go.sum +++ b/go.sum @@ -157,8 +157,8 @@ github.com/pingcap/kvproto v0.0.0-20200311073257-e53d835099b0 h1:dXXNHvDwAEN1YNg github.com/pingcap/kvproto v0.0.0-20200311073257-e53d835099b0/go.mod h1:QMdbTAXCHzzygQzqcG9uVUgU2fKeSN1GmfMiykdSzzY= github.com/pingcap/log v0.0.0-20190715063458-479153f07ebd h1:hWDol43WY5PGhsh3+8794bFHY1bPrmu6bTalpssCrGg= github.com/pingcap/log v0.0.0-20190715063458-479153f07ebd/go.mod h1:WpHUKhNZ18v116SvGrmjkA9CBhYmuUTKL+p8JC9ANEw= -github.com/pingcap/parser v3.0.13-0.20200428065913-b93e38b9c865+incompatible h1:rW5mMhtEXf8A/Kx+1l48x3N6RWJ4zXmqoE925YH3Vqg= -github.com/pingcap/parser v3.0.13-0.20200428065913-b93e38b9c865+incompatible/go.mod h1:1FNvfp9+J0wvc4kl8eGNh7Rqrxveg15jJoWo/a0uHwA= +github.com/pingcap/parser v3.0.13-0.20200428073321-7e525711e35d+incompatible h1:t/vYVkoW99IdMdDKCL1SbvHRvsbcg5fwTatehu2iDoU= +github.com/pingcap/parser v3.0.13-0.20200428073321-7e525711e35d+incompatible/go.mod h1:1FNvfp9+J0wvc4kl8eGNh7Rqrxveg15jJoWo/a0uHwA= github.com/pingcap/pd v1.1.0-beta.0.20191223090411-ea2b748f6ee2 h1:NL23b8tsg6M1QpSQedK14/Jx++QeyKL2rGiBvXAQVfA= github.com/pingcap/pd v1.1.0-beta.0.20191223090411-ea2b748f6ee2/go.mod h1:b4gaAPSxaVVtaB+EHamV4Nsv8JmTdjlw0cTKmp4+dRQ= github.com/pingcap/tidb-tools v3.0.6-0.20191119150227-ff0a3c6e5763+incompatible h1:I8HirWsu1MZp6t9G/g8yKCEjJJxtHooKakEgccvdJ4M= diff --git a/infoschema/builder.go b/infoschema/builder.go index 61120075ab862..2d88648c98862 100644 --- a/infoschema/builder.go +++ b/infoschema/builder.go @@ -73,7 +73,7 @@ func (b *Builder) ApplyDiff(m *meta.Meta, diff *model.SchemaDiff) ([]int64, erro // We try to reuse the old allocator, so the cached auto ID can be reused. var alloc autoid.Allocator if tableIDIsValid(oldTableID) { - if oldTableID == newTableID && diff.Type != model.ActionRenameTable && diff.Type != model.ActionRebaseAutoID { + if oldTableID == newTableID && diff.Type != model.ActionRenameTable && diff.Type != model.ActionRebaseAutoID && diff.Type != model.ActionModifyTableAutoIdCache { alloc, _ = b.is.AllocByID(oldTableID) } @@ -213,7 +213,11 @@ func (b *Builder) applyCreateTable(m *meta.Meta, dbInfo *model.DBInfo, tableID i if alloc == nil { schemaID := dbInfo.ID - alloc = autoid.NewAllocator(b.handle.store, tblInfo.GetDBID(schemaID), tblInfo.IsAutoIncColUnsigned()) + if tblInfo.AutoIdCache > 0 { + alloc = autoid.NewAllocator(b.handle.store, tblInfo.GetDBID(schemaID), tblInfo.IsAutoIncColUnsigned(), autoid.CustomAutoIncCacheOption(tblInfo.AutoIdCache)) + } else { + alloc = autoid.NewAllocator(b.handle.store, tblInfo.GetDBID(schemaID), tblInfo.IsAutoIncColUnsigned()) + } } tbl, err := tables.TableFromMeta(alloc, tblInfo) if err != nil { @@ -360,7 +364,12 @@ func (b *Builder) createSchemaTablesForDB(di *model.DBInfo, tableFromMeta tableF b.is.schemaMap[di.Name.L] = schTbls for _, t := range di.Tables { schemaID := di.ID - alloc := autoid.NewAllocator(b.handle.store, t.GetDBID(schemaID), t.IsAutoIncColUnsigned()) + var alloc autoid.Allocator + if t.AutoIdCache > 0 { + alloc = autoid.NewAllocator(b.handle.store, t.GetDBID(schemaID), t.IsAutoIncColUnsigned(), autoid.CustomAutoIncCacheOption(t.AutoIdCache)) + } else { + alloc = autoid.NewAllocator(b.handle.store, t.GetDBID(schemaID), t.IsAutoIncColUnsigned()) + } var tbl table.Table tbl, err := tableFromMeta(alloc, t) if err != nil { diff --git a/meta/autoid/autoid.go b/meta/autoid/autoid.go index 03cb0014557ad..f5ad8c78f1fc3 100755 --- a/meta/autoid/autoid.go +++ b/meta/autoid/autoid.go @@ -44,6 +44,20 @@ var step = int64(30000) var errInvalidTableID = terror.ClassAutoid.New(codeInvalidTableID, "invalid TableID") +// CustomAutoIncCacheOption is one kind of AllocOption to customize the allocator step length. +type CustomAutoIncCacheOption int64 + +// ApplyOn is implement the AllocOption interface. +func (step CustomAutoIncCacheOption) ApplyOn(alloc *allocator) { + alloc.step = int64(step) + alloc.customStep = true +} + +// AllocOption is a interface to define allocator custom options coming in future. +type AllocOption interface { + ApplyOn(*allocator) +} + // Allocator is an auto increment id generator. // Just keep id unique actually. type Allocator interface { @@ -78,6 +92,7 @@ type allocator struct { isUnsigned bool lastAllocTime time.Time step int64 + customStep bool } // GetStep is only used by tests @@ -247,14 +262,18 @@ func NextStep(curStep int64, consumeDur time.Duration) int64 { } // NewAllocator returns a new auto increment id generator on the store. -func NewAllocator(store kv.Storage, dbID int64, isUnsigned bool) Allocator { - return &allocator{ +func NewAllocator(store kv.Storage, dbID int64, isUnsigned bool, opts ...AllocOption) Allocator { + alloc := &allocator{ store: store, dbID: dbID, isUnsigned: isUnsigned, step: step, lastAllocTime: time.Now(), } + for _, fn := range opts { + fn.ApplyOn(alloc) + } + return alloc } //codeInvalidTableID is the code of autoid error. @@ -362,13 +381,18 @@ func (alloc *allocator) alloc4Signed(tableID int64, n uint64, increment, offset if alloc.base+n1 > alloc.end { var newBase, newEnd int64 startTime := time.Now() - // Although it may skip a segment here, we still think it is consumed. - consumeDur := startTime.Sub(alloc.lastAllocTime) - nextStep := NextStep(alloc.step, consumeDur) - // Make sure nextStep is big enough. + nextStep := alloc.step + if !alloc.customStep { + // Although it may skip a segment here, we still think it is consumed. + consumeDur := startTime.Sub(alloc.lastAllocTime) + nextStep = NextStep(alloc.step, consumeDur) + } + // Although the step is customized by user, we still need to make sure nextStep is big enough for insert batch. if nextStep <= n1 { - alloc.step = mathutil.MinInt64(n1*2, maxStep) - } else { + nextStep = mathutil.MinInt64(n1*2, maxStep) + } + // Store the step for non-customized-step allocator to calculate next dynamic step. + if !alloc.customStep { alloc.step = nextStep } err := kv.RunInNewTxn(alloc.store, true, func(txn kv.Transaction) error { @@ -378,7 +402,7 @@ func (alloc *allocator) alloc4Signed(tableID int64, n uint64, increment, offset if err1 != nil { return err1 } - tmpStep := mathutil.MinInt64(math.MaxInt64-newBase, alloc.step) + tmpStep := mathutil.MinInt64(math.MaxInt64-newBase, nextStep) // The global rest is not enough for alloc. if tmpStep < n1 { return ErrAutoincReadFailed @@ -424,13 +448,18 @@ func (alloc *allocator) alloc4Unsigned(tableID int64, n uint64, increment, offse if uint64(alloc.base)+uint64(n1) > uint64(alloc.end) { var newBase, newEnd int64 startTime := time.Now() - // Although it may skip a segment here, we still treat it as consumed. - consumeDur := startTime.Sub(alloc.lastAllocTime) - nextStep := NextStep(alloc.step, consumeDur) - // Make sure nextStep is big enough. + nextStep := alloc.step + if !alloc.customStep { + // Although it may skip a segment here, we still treat it as consumed. + consumeDur := startTime.Sub(alloc.lastAllocTime) + nextStep = NextStep(alloc.step, consumeDur) + } + // Although the step is customized by user, we still need to make sure nextStep is big enough for insert batch. if nextStep <= n1 { - alloc.step = mathutil.MinInt64(n1*2, maxStep) - } else { + nextStep = mathutil.MinInt64(n1*2, maxStep) + } + // Store the step for non-customized-step allocator to calculate next dynamic step. + if !alloc.customStep { alloc.step = nextStep } err := kv.RunInNewTxn(alloc.store, true, func(txn kv.Transaction) error { @@ -440,7 +469,7 @@ func (alloc *allocator) alloc4Unsigned(tableID int64, n uint64, increment, offse if err1 != nil { return err1 } - tmpStep := int64(mathutil.MinUint64(math.MaxUint64-uint64(newBase), uint64(alloc.step))) + tmpStep := int64(mathutil.MinUint64(math.MaxUint64-uint64(newBase), uint64(nextStep))) // The global rest is not enough for alloc. if tmpStep < n1 { return ErrAutoincReadFailed diff --git a/sessionctx/binloginfo/binloginfo.go b/sessionctx/binloginfo/binloginfo.go index 638da8dd42b73..3a7f6efeb0b73 100644 --- a/sessionctx/binloginfo/binloginfo.go +++ b/sessionctx/binloginfo/binloginfo.go @@ -30,6 +30,7 @@ import ( "github.com/pingcap/tidb/kv" "github.com/pingcap/tidb/metrics" "github.com/pingcap/tidb/sessionctx" + driver "github.com/pingcap/tidb/types/parser_driver" "github.com/pingcap/tidb/util/logutil" "github.com/pingcap/tipb/go-binlog" "go.uber.org/zap" @@ -302,14 +303,18 @@ const specialPrefix = `/*!90000 ` // AddSpecialComment uses to add comment for table option in DDL query. // Export for testing. func AddSpecialComment(ddlQuery string) string { - if strings.Contains(ddlQuery, specialPrefix) { + if strings.Contains(ddlQuery, specialPrefix) || strings.Contains(ddlQuery, driver.SpecialCommentVersionPrefix) { return ddlQuery } - return addSpecialCommentByRegexps(ddlQuery, shardPat, preSplitPat) + ddlQuery = addSpecialCommentByRegexps(ddlQuery, specialPrefix, shardPat, preSplitPat) + for featureID, pattern := range driver.FeatureIDPatterns { + ddlQuery = addSpecialCommentByRegexps(ddlQuery, driver.BuildSpecialCommentPrefix(featureID), pattern) + } + return ddlQuery } // addSpecialCommentByRegexps uses to add special comment for the worlds in the ddlQuery with match the regexps. -func addSpecialCommentByRegexps(ddlQuery string, regs ...*regexp.Regexp) string { +func addSpecialCommentByRegexps(ddlQuery string, prefix string, regs ...*regexp.Regexp) string { upperQuery := strings.ToUpper(ddlQuery) var specialComments []string minIdx := math.MaxInt64 @@ -328,7 +333,7 @@ func addSpecialCommentByRegexps(ddlQuery string, regs ...*regexp.Regexp) string upperQuery = upperQuery[:loc[0]] + upperQuery[loc[1]:] } if minIdx != math.MaxInt64 { - query := ddlQuery[:minIdx] + specialPrefix + query := ddlQuery[:minIdx] + prefix for _, comment := range specialComments { if query[len(query)-1] != ' ' { query += " " diff --git a/sessionctx/binloginfo/binloginfo_test.go b/sessionctx/binloginfo/binloginfo_test.go index dead446276f9a..dda3b6e79fc48 100644 --- a/sessionctx/binloginfo/binloginfo_test.go +++ b/sessionctx/binloginfo/binloginfo_test.go @@ -467,6 +467,26 @@ func (s *testBinlogSuite) TestAddSpecialComment(c *C) { "alter table t shard_row_id_bits=2 ", "alter table t /*!90000 shard_row_id_bits=2 */", }, + { + "create table t1 (id int auto_increment key) auto_id_cache 100;", + "create table t1 (id int auto_increment key) /*T![auto_id_cache] auto_id_cache 100 */ ;", + }, + { + "create table t1 (id int auto_increment unique) auto_id_cache 10;", + "create table t1 (id int auto_increment unique) /*T![auto_id_cache] auto_id_cache 10 */ ;", + }, + { + "create table t1 (id int) auto_id_cache = 5;", + "create table t1 (id int) /*T![auto_id_cache] auto_id_cache = 5 */ ;", + }, + { + "create table t1 (id int) auto_id_cache=5;", + "create table t1 (id int) /*T![auto_id_cache] auto_id_cache=5 */ ;", + }, + { + "create table t1 (id int) /*T![auto_id_cache] auto_id_cache=5 */ ;", + "create table t1 (id int) /*T![auto_id_cache] auto_id_cache=5 */ ;", + }, } for _, ca := range testCase { re := binloginfo.AddSpecialComment(ca.input) diff --git a/types/parser_driver/special_cmt_ctrl.go b/types/parser_driver/special_cmt_ctrl.go new file mode 100644 index 0000000000000..c7d7398c0e4b5 --- /dev/null +++ b/types/parser_driver/special_cmt_ctrl.go @@ -0,0 +1,60 @@ +// Copyright 2020 PingCAP, Inc. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// See the License for the specific language governing permissions and +// limitations under the License. + +package driver + +import ( + "fmt" + "regexp" + + "github.com/pingcap/parser" +) + +// To add new features that needs to be downgrade-compatible, +// 1. Define a featureID below and make sure it is unique. +// For example, `const FeatureIDMyFea = "my_fea"`. +// 2. Register the new featureID in init(). +// Only the registered parser can parse the comment annotated with `my_fea`. +// Now, the parser treats `/*T![my_fea] what_ever */` and `what_ever` equivalent. +// In other word, the parser in old-version TiDB will ignores these comments. +// 3. [optional] Add a pattern into FeatureIDPatterns. +// This is only required if the new feature is contained in DDL, +// and we want to comment out this part of SQL in binlog. +func init() { + parser.SpecialCommentsController.Register(string(FeatureIDAutoIDCache)) +} + +// SpecialCommentVersionPrefix is the prefix of TiDB executable comments. +const SpecialCommentVersionPrefix = `/*T!` + +// BuildSpecialCommentPrefix returns the prefix of `featureID` special comment. +// For some special feature in TiDB, we will refine ddl query with special comment, +// which may be useful when +// A: the downstream is directly MySQL instance (treat it as comment for compatibility). +// B: the downstream is lower version TiDB (ignore the unknown feature comment). +// C: the downstream is same/higher version TiDB (parse the feature syntax out). +func BuildSpecialCommentPrefix(featureID featureID) string { + return fmt.Sprintf("%s[%s]", SpecialCommentVersionPrefix, featureID) +} + +type featureID string + +const ( + // FeatureIDAutoIDCache is the `auto_id_cache` feature. + FeatureIDAutoIDCache featureID = "auto_id_cache" +) + +// FeatureIDPatterns is used to record special comments patterns. +var FeatureIDPatterns = map[featureID]*regexp.Regexp{ + FeatureIDAutoIDCache: regexp.MustCompile(`(?i)AUTO_ID_CACHE\s*=?\s*\d+\s*`), +} diff --git a/util/admin/admin.go b/util/admin/admin.go index 3e471360bbca6..d8d8c579d9cb3 100644 --- a/util/admin/admin.go +++ b/util/admin/admin.go @@ -107,7 +107,7 @@ func IsJobRollbackable(job *model.Job) bool { model.ActionTruncateTable, model.ActionAddForeignKey, model.ActionDropForeignKey, model.ActionRenameTable, model.ActionModifyTableCharsetAndCollate, model.ActionTruncateTablePartition, - model.ActionModifySchemaCharsetAndCollate: + model.ActionModifySchemaCharsetAndCollate, model.ActionModifyTableAutoIdCache: return job.SchemaState == model.StateNone } return true