From 9dd6a7eec8305d5eed9f590506e869bdb4493c9f Mon Sep 17 00:00:00 2001 From: lance6716 Date: Tue, 20 Dec 2022 11:34:28 +0800 Subject: [PATCH 01/26] loader(dm): remove loader --- dm/_utils/terror_gen/errors_release.txt | 1 + dm/config/subtask.go | 19 +- dm/config/task.go | 47 +- dm/errors.toml | 6 + dm/loader/checkpoint.go | 404 ----- dm/loader/checkpoint_test.go | 237 --- dm/loader/convert_data.go | 329 ---- dm/loader/convert_data_test.go | 464 ----- dm/loader/lightning.go | 9 +- dm/loader/loader.go | 1553 ----------------- dm/loader/loader_test.go | 82 - dm/loader/metrics.go | 13 - dm/loader/status.go | 75 - dm/loader/status_test.go | 56 - dm/pkg/checker/conn_checker.go | 21 +- dm/pkg/terror/error_list.go | 2 + dm/tests/all_mode/run.sh | 69 - dm/tests/dmctl_command/run.sh | 50 +- .../conf/diff_config.toml | 27 - .../import_goroutine_leak/conf/dm-master.toml | 4 - .../import_goroutine_leak/conf/dm-task.yaml | 42 - .../conf/dm-worker1.toml | 2 - .../import_goroutine_leak/conf/source1.yaml | 11 - dm/tests/import_goroutine_leak/run.sh | 149 -- dm/tests/load_interrupt/conf/diff_config.toml | 29 - dm/tests/load_interrupt/conf/dm-master.toml | 4 - dm/tests/load_interrupt/conf/dm-task.yaml | 42 - dm/tests/load_interrupt/conf/dm-worker1.toml | 2 - dm/tests/load_interrupt/conf/source1.yaml | 11 - dm/tests/load_interrupt/run.sh | 133 -- dm/tests/load_task/conf/diff_config1.toml | 35 - dm/tests/load_task/conf/diff_config2.toml | 35 - dm/tests/load_task/conf/diff_config3.toml | 29 - dm/tests/load_task/conf/diff_config4.toml | 29 - dm/tests/load_task/conf/dm-master.toml | 6 - .../load_task/conf/dm-task-standalone.yaml | 41 - dm/tests/load_task/conf/dm-task.yaml | 47 - .../load_task/conf/dm-task2-standalone.yaml | 41 - dm/tests/load_task/conf/dm-task2.yaml | 47 - dm/tests/load_task/conf/dm-task3.yaml | 41 - dm/tests/load_task/conf/dm-task4.yaml | 41 - dm/tests/load_task/conf/dm-worker1.toml | 2 - dm/tests/load_task/conf/dm-worker2.toml | 2 - dm/tests/load_task/conf/dm-worker3.toml | 2 - dm/tests/load_task/conf/source1.yaml | 13 - dm/tests/load_task/conf/source2.yaml | 9 - dm/tests/load_task/data/db1.increment.sql | 11 - dm/tests/load_task/data/db1.prepare.sql | 17 - dm/tests/load_task/data/db2.increment.sql | 8 - dm/tests/load_task/data/db2.prepare.sql | 17 - dm/tests/load_task/run.sh | 322 ---- dm/worker/subtask.go | 19 +- 52 files changed, 59 insertions(+), 4648 deletions(-) delete mode 100644 dm/loader/convert_data.go delete mode 100644 dm/loader/convert_data_test.go delete mode 100644 dm/loader/loader.go delete mode 100644 dm/loader/loader_test.go delete mode 100644 dm/loader/status.go delete mode 100644 dm/loader/status_test.go delete mode 100644 dm/tests/import_goroutine_leak/conf/diff_config.toml delete mode 100644 dm/tests/import_goroutine_leak/conf/dm-master.toml delete mode 100644 dm/tests/import_goroutine_leak/conf/dm-task.yaml delete mode 100644 dm/tests/import_goroutine_leak/conf/dm-worker1.toml delete mode 100644 dm/tests/import_goroutine_leak/conf/source1.yaml delete mode 100644 dm/tests/import_goroutine_leak/run.sh delete mode 100644 dm/tests/load_interrupt/conf/diff_config.toml delete mode 100644 dm/tests/load_interrupt/conf/dm-master.toml delete mode 100644 dm/tests/load_interrupt/conf/dm-task.yaml delete mode 100644 dm/tests/load_interrupt/conf/dm-worker1.toml delete mode 100644 dm/tests/load_interrupt/conf/source1.yaml delete mode 100755 dm/tests/load_interrupt/run.sh delete mode 100644 dm/tests/load_task/conf/diff_config1.toml delete mode 100644 dm/tests/load_task/conf/diff_config2.toml delete mode 100644 dm/tests/load_task/conf/diff_config3.toml delete mode 100644 dm/tests/load_task/conf/diff_config4.toml delete mode 100644 dm/tests/load_task/conf/dm-master.toml delete mode 100644 dm/tests/load_task/conf/dm-task-standalone.yaml delete mode 100644 dm/tests/load_task/conf/dm-task.yaml delete mode 100644 dm/tests/load_task/conf/dm-task2-standalone.yaml delete mode 100644 dm/tests/load_task/conf/dm-task2.yaml delete mode 100644 dm/tests/load_task/conf/dm-task3.yaml delete mode 100644 dm/tests/load_task/conf/dm-task4.yaml delete mode 100644 dm/tests/load_task/conf/dm-worker1.toml delete mode 100644 dm/tests/load_task/conf/dm-worker2.toml delete mode 100644 dm/tests/load_task/conf/dm-worker3.toml delete mode 100644 dm/tests/load_task/conf/source1.yaml delete mode 100644 dm/tests/load_task/conf/source2.yaml delete mode 100644 dm/tests/load_task/data/db1.increment.sql delete mode 100644 dm/tests/load_task/data/db1.prepare.sql delete mode 100644 dm/tests/load_task/data/db2.increment.sql delete mode 100644 dm/tests/load_task/data/db2.prepare.sql delete mode 100755 dm/tests/load_task/run.sh diff --git a/dm/_utils/terror_gen/errors_release.txt b/dm/_utils/terror_gen/errors_release.txt index c6c296c1f38..230f7334c9a 100644 --- a/dm/_utils/terror_gen/errors_release.txt +++ b/dm/_utils/terror_gen/errors_release.txt @@ -195,6 +195,7 @@ ErrConfigLoaderS3NotSupport,[code=20059:class=config:scope=internal:level=high], ErrConfigInvalidSafeModeDuration,[code=20060:class=config:scope=internal:level=medium], "Message: safe-mode-duration '%s' parsed failed: %v, Workaround: Please check the `safe-mode-duration` is correct." ErrConfigConfictSafeModeDurationAndSafeMode,[code=20061:class=config:scope=internal:level=low], "Message: safe-mode(true) conflicts with safe-mode-duration(0s), Workaround: Please set safe-mode to false or safe-mode-duration to non-zero." ErrConfigInvalidPhysicalDuplicateResolution,[code=20062:class=config:scope=internal:level=medium], "Message: invalid load on-duplicate-physical option '%s', Workaround: Please choose a valid value in ['none', 'manual'] or leave it empty." +ErrConfigColumnMappingDeprecated,[code=20063:class=config:scope=internal:level=high], "Message: column-mapping is not supported since v6.6.0, Workaround: Please use extract-table/extract-schema/extract-source to handle data conflict when merge tables. See https://docs.pingcap.com/tidb/v6.4/task-configuration-file-full#task-configuration-file-template-advanced" ErrBinlogExtractPosition,[code=22001:class=binlog-op:scope=internal:level=high] ErrBinlogInvalidFilename,[code=22002:class=binlog-op:scope=internal:level=high], "Message: invalid binlog filename" ErrBinlogParsePosFromStr,[code=22003:class=binlog-op:scope=internal:level=high] diff --git a/dm/config/subtask.go b/dm/config/subtask.go index c83417ee5d4..1c6a23361b8 100644 --- a/dm/config/subtask.go +++ b/dm/config/subtask.go @@ -129,10 +129,11 @@ type SubTaskConfig struct { From dbconfig.DBConfig `toml:"from" json:"from"` To dbconfig.DBConfig `toml:"to" json:"to"` - RouteRules []*router.TableRule `toml:"route-rules" json:"route-rules"` - FilterRules []*bf.BinlogEventRule `toml:"filter-rules" json:"filter-rules"` - ColumnMappingRules []*column.Rule `toml:"mapping-rule" json:"mapping-rule"` - ExprFilter []*ExpressionFilter `yaml:"expression-filter" toml:"expression-filter" json:"expression-filter"` + RouteRules []*router.TableRule `toml:"route-rules" json:"route-rules"` + FilterRules []*bf.BinlogEventRule `toml:"filter-rules" json:"filter-rules"` + // deprecated + ColumnMappingRules []*column.Rule `toml:"mapping-rule" json:"mapping-rule"` + ExprFilter []*ExpressionFilter `yaml:"expression-filter" toml:"expression-filter" json:"expression-filter"` // black-white-list is deprecated, use block-allow-list instead BWList *filter.Rules `toml:"black-white-list" json:"black-white-list"` @@ -282,6 +283,10 @@ func (c *SubTaskConfig) Adjust(verifyDecryptPassword bool) error { c.ShardMode = ShardPessimistic // use the pessimistic mode as default for back compatible. } + if len(c.ColumnMappingRules) > 0 { + return terror.ErrConfigColumnMappingDeprecated.Generate() + } + if c.OnlineDDLScheme != "" && c.OnlineDDLScheme != PT && c.OnlineDDLScheme != GHOST { return terror.ErrConfigOnlineSchemeNotSupport.Generate(c.OnlineDDLScheme) } else if c.OnlineDDLScheme == PT || c.OnlineDDLScheme == GHOST { @@ -468,9 +473,3 @@ func (c *SubTaskConfig) Clone() (*SubTaskConfig, error) { return clone, nil } - -// NeedUseLightning returns whether need to use lightning loader. -func (c *SubTaskConfig) NeedUseLightning() bool { - // TODO: return true after remove loader - return (c.Mode == ModeAll || c.Mode == ModeFull) && c.ImportMode != LoadModeLoader -} diff --git a/dm/config/task.go b/dm/config/task.go index f0b48221709..2b81f7da507 100644 --- a/dm/config/task.go +++ b/dm/config/task.go @@ -137,9 +137,10 @@ func (m *Meta) Verify() error { // MySQLInstance represents a sync config of a MySQL instance. type MySQLInstance struct { // it represents a MySQL/MariaDB instance or a replica group - SourceID string `yaml:"source-id"` - Meta *Meta `yaml:"meta"` - FilterRules []string `yaml:"filter-rules"` + SourceID string `yaml:"source-id"` + Meta *Meta `yaml:"meta"` + FilterRules []string `yaml:"filter-rules"` + // deprecated ColumnMappingRules []string `yaml:"column-mapping-rules"` RouteRules []string `yaml:"route-rules"` ExpressionFilters []string `yaml:"expression-filters"` @@ -243,13 +244,13 @@ const ( // LoadModeSQL means write data by sql statements, uses tidb-lightning tidb backend to load data. // deprecated, use LoadModeLogical instead. LoadModeSQL LoadMode = "sql" - // LoadModeLoader is the legacy sql mode, use loader to load data. this should be replaced by sql mode in new version. - // deprecated, loader will be removed in future. - LoadModeLoader = "loader" + // LoadModeLoader is the legacy sql mode, use loader to load data. this should be replaced by LoadModeLogical mode. + // deprecated, use LoadModeLogical instead. + LoadModeLoader LoadMode = "loader" // LoadModeLogical means use tidb backend of lightning to load data, which uses SQL to load data. - LoadModeLogical = "logical" + LoadModeLogical LoadMode = "logical" // LoadModePhysical means use local backend of lightning to load data, which ingest SST files to load data. - LoadModePhysical = "physical" + LoadModePhysical LoadMode = "physical" ) // LogicalDuplicateResolveType defines the duplication resolution when meet duplicate rows for logical import. @@ -315,7 +316,8 @@ func (m *LoaderConfig) adjust() error { if m.ImportMode == "" { m.ImportMode = LoadModeLogical } - if strings.EqualFold(string(m.ImportMode), string(LoadModeSQL)) { + if strings.EqualFold(string(m.ImportMode), string(LoadModeSQL)) || + strings.EqualFold(string(m.ImportMode), string(LoadModeLoader)) { m.ImportMode = LoadModeLogical } m.ImportMode = LoadMode(strings.ToLower(string(m.ImportMode))) @@ -508,10 +510,11 @@ type TaskConfig struct { // deprecated OnlineDDLScheme string `yaml:"online-ddl-scheme" toml:"online-ddl-scheme" json:"online-ddl-scheme"` - Routes map[string]*router.TableRule `yaml:"routes" toml:"routes" json:"routes"` - Filters map[string]*bf.BinlogEventRule `yaml:"filters" toml:"filters" json:"filters"` - ColumnMappings map[string]*column.Rule `yaml:"column-mappings" toml:"column-mappings" json:"column-mappings"` - ExprFilter map[string]*ExpressionFilter `yaml:"expression-filter" toml:"expression-filter" json:"expression-filter"` + Routes map[string]*router.TableRule `yaml:"routes" toml:"routes" json:"routes"` + Filters map[string]*bf.BinlogEventRule `yaml:"filters" toml:"filters" json:"filters"` + // deprecated + ColumnMappings map[string]*column.Rule `yaml:"column-mappings" toml:"column-mappings" json:"column-mappings"` + ExprFilter map[string]*ExpressionFilter `yaml:"expression-filter" toml:"expression-filter" json:"expression-filter"` // black-white-list is deprecated, use block-allow-list instead BWList map[string]*filter.Rules `yaml:"black-white-list" toml:"black-white-list" json:"black-white-list"` @@ -613,12 +616,11 @@ func (c *TaskConfig) RawDecode(data string) error { } // find unused items in config. -var configRefPrefixes = []string{"RouteRules", "FilterRules", "ColumnMappingRules", "Mydumper", "Loader", "Syncer", "ExprFilter", "Validator"} +var configRefPrefixes = []string{"RouteRules", "FilterRules", "Mydumper", "Loader", "Syncer", "ExprFilter", "Validator"} const ( routeRulesIdx = iota filterRulesIdx - columnMappingIdx mydumperIdx loaderIdx syncerIdx @@ -651,6 +653,10 @@ func (c *TaskConfig) adjust() error { c.ShardMode = ShardPessimistic // use the pessimistic mode as default for back compatible. } + if len(c.ColumnMappings) > 0 { + return terror.ErrConfigColumnMappingDeprecated.Generate() + } + if c.CollationCompatible != "" && c.CollationCompatible != LooseCollationCompatible && c.CollationCompatible != StrictCollationCompatible { return terror.ErrConfigCollationCompatibleNotSupport.Generate(c.CollationCompatible) } else if c.CollationCompatible == "" { @@ -762,12 +768,6 @@ func (c *TaskConfig) adjust() error { } globalConfigReferCount[configRefPrefixes[filterRulesIdx]+name]++ } - for _, name := range inst.ColumnMappingRules { - if _, ok := c.ColumnMappings[name]; !ok { - return terror.ErrConfigColumnMappingNotFound.Generate(i, name) - } - globalConfigReferCount[configRefPrefixes[columnMappingIdx]+name]++ - } // only when BAList is empty use BWList if len(c.BAList) == 0 && len(c.BWList) != 0 { @@ -920,11 +920,6 @@ func (c *TaskConfig) adjust() error { unusedConfigs = append(unusedConfigs, filter) } } - for columnMapping := range c.ColumnMappings { - if globalConfigReferCount[configRefPrefixes[columnMappingIdx]+columnMapping] == 0 { - unusedConfigs = append(unusedConfigs, columnMapping) - } - } for mydumper := range c.Mydumpers { if globalConfigReferCount[configRefPrefixes[mydumperIdx]+mydumper] == 0 { unusedConfigs = append(unusedConfigs, mydumper) diff --git a/dm/errors.toml b/dm/errors.toml index 9084a07ec39..a6f60a23f31 100644 --- a/dm/errors.toml +++ b/dm/errors.toml @@ -1186,6 +1186,12 @@ description = "" workaround = "Please choose a valid value in ['none', 'manual'] or leave it empty." tags = ["internal", "medium"] +[error.DM-config-20063] +message = "column-mapping is not supported since v6.6.0" +description = "" +workaround = "Please use extract-table/extract-schema/extract-source to handle data conflict when merge tables. See https://docs.pingcap.com/tidb/v6.4/task-configuration-file-full#task-configuration-file-template-advanced" +tags = ["internal", "high"] + [error.DM-binlog-op-22001] message = "" description = "" diff --git a/dm/loader/checkpoint.go b/dm/loader/checkpoint.go index 0a974605459..b7502995326 100644 --- a/dm/loader/checkpoint.go +++ b/dm/loader/checkpoint.go @@ -15,18 +15,12 @@ package loader import ( "context" - "encoding/json" "fmt" - "strings" - "sync" - "time" "github.com/pingcap/tidb/util/dbutil" - "github.com/pingcap/tiflow/dm/config" "github.com/pingcap/tiflow/dm/pkg/conn" tcontext "github.com/pingcap/tiflow/dm/pkg/context" "github.com/pingcap/tiflow/dm/pkg/cputil" - fr "github.com/pingcap/tiflow/dm/pkg/func-rollback" "github.com/pingcap/tiflow/dm/pkg/log" "github.com/pingcap/tiflow/dm/pkg/terror" "go.uber.org/zap" @@ -80,404 +74,6 @@ type CheckPoint interface { AllFinished() bool } -// RemoteCheckPoint implements CheckPoint by saving status in remote database system, mostly in TiDB. -// it's not thread-safe. -type RemoteCheckPoint struct { - // used to protect database operation with `conn`. - // if more operations need to be protected, add another mutex or rename this one. - connMutex sync.Mutex - - db *conn.BaseDB - conn *DBConn - id string - schema string - tableName string // tableName contains schema name - restoringFiles struct { - sync.RWMutex - pos map[string]map[string]FilePosSet // schema -> table -> FilePosSet(filename -> [cur, end]) - } - finishedTables map[string]struct{} - logger log.Logger -} - -func newRemoteCheckPoint(tctx *tcontext.Context, cfg *config.SubTaskConfig, id string) (CheckPoint, error) { - var err error - var db *conn.BaseDB - var dbConns []*DBConn - - rollbackHolder := fr.NewRollbackHolder("loader") - defer func() { - if err != nil { - rollbackHolder.RollbackReverseOrder() - } - }() - - db, dbConns, err = createConns(tctx, cfg, cfg.Name, cfg.SourceID, 1) - if err != nil { - return nil, err - } - - cp := &RemoteCheckPoint{ - db: db, - conn: dbConns[0], - id: id, - finishedTables: make(map[string]struct{}), - schema: dbutil.ColumnName(cfg.MetaSchema), - tableName: dbutil.TableName(cfg.MetaSchema, cputil.LoaderCheckpoint(cfg.Name)), - logger: tctx.L().WithFields(zap.String("component", "remote checkpoint")), - } - cp.restoringFiles.pos = make(map[string]map[string]FilePosSet) - rollbackHolder.Add(fr.FuncRollback{Name: "CloseRemoteCheckPoint", Fn: cp.Close}) - - err = cp.prepare(tctx) - if err != nil { - return nil, err - } - - return cp, nil -} - -func (cp *RemoteCheckPoint) prepare(tctx *tcontext.Context) error { - // create schema - if err := cp.createSchema(tctx); err != nil { - return err - } - // create table - return cp.createTable(tctx) -} - -func (cp *RemoteCheckPoint) createSchema(tctx *tcontext.Context) error { - sql2 := fmt.Sprintf("CREATE SCHEMA IF NOT EXISTS %s", cp.schema) - cp.connMutex.Lock() - err := cp.conn.executeSQL(tctx, []string{sql2}) - cp.connMutex.Unlock() - return terror.WithScope(err, terror.ScopeDownstream) -} - -func (cp *RemoteCheckPoint) createTable(tctx *tcontext.Context) error { - createTable := `CREATE TABLE IF NOT EXISTS %s ( - id char(32) NOT NULL, - filename varchar(255) NOT NULL, - cp_schema varchar(128) NOT NULL, - cp_table varchar(128) NOT NULL, - offset bigint NOT NULL, - end_pos bigint NOT NULL, - create_time timestamp NOT NULL DEFAULT CURRENT_TIMESTAMP, - update_time timestamp NOT NULL DEFAULT CURRENT_TIMESTAMP ON UPDATE CURRENT_TIMESTAMP, - UNIQUE KEY uk_id_f (id,filename) - ); -` - sql2 := fmt.Sprintf(createTable, cp.tableName) - cp.connMutex.Lock() - err := cp.conn.executeSQL(tctx, []string{sql2}) - cp.connMutex.Unlock() - return terror.WithScope(err, terror.ScopeDownstream) -} - -// Load implements CheckPoint.Load. -func (cp *RemoteCheckPoint) Load(tctx *tcontext.Context) error { - begin := time.Now() - defer func() { - cp.logger.Info("load checkpoint", zap.Duration("cost time", time.Since(begin))) - }() - - query := fmt.Sprintf("SELECT `filename`,`cp_schema`,`cp_table`,`offset`,`end_pos` from %s where `id`=?", cp.tableName) - cp.connMutex.Lock() - rows, err := cp.conn.querySQL(tctx, query, cp.id) - cp.connMutex.Unlock() - if err != nil { - return terror.WithScope(err, terror.ScopeDownstream) - } - defer rows.Close() - - var ( - filename string - schema string - table string - offset int64 - endPos int64 - ) - - cp.restoringFiles.Lock() - defer cp.restoringFiles.Unlock() - cp.restoringFiles.pos = make(map[string]map[string]FilePosSet) // reset to empty - for rows.Next() { - err := rows.Scan(&filename, &schema, &table, &offset, &endPos) - if err != nil { - return terror.DBErrorAdapt(err, cp.conn.Scope(), terror.ErrDBDriverError) - } - - if _, ok := cp.restoringFiles.pos[schema]; !ok { - cp.restoringFiles.pos[schema] = make(map[string]FilePosSet) - } - tables := cp.restoringFiles.pos[schema] - if _, ok := tables[table]; !ok { - tables[table] = make(map[string][]int64) - } - restoringFiles := tables[table] - restoringFiles[filename] = []int64{offset, endPos} - } - - return terror.DBErrorAdapt(rows.Err(), cp.conn.Scope(), terror.ErrDBDriverError) -} - -// GetRestoringFileInfo implements CheckPoint.GetRestoringFileInfo. -func (cp *RemoteCheckPoint) GetRestoringFileInfo(db, table string) map[string][]int64 { - cp.restoringFiles.RLock() - defer cp.restoringFiles.RUnlock() - results := make(map[string][]int64) - if tables, ok := cp.restoringFiles.pos[db]; ok { - if restoringFiles, ok := tables[table]; ok { - // make a copy of restoringFiles, and its slice value - for k, v := range restoringFiles { - results[k] = make([]int64, len(v)) - copy(results[k], v) - } - return results - } - } - return results -} - -// GetAllRestoringFileInfo implements CheckPoint.GetAllRestoringFileInfo. -func (cp *RemoteCheckPoint) GetAllRestoringFileInfo() map[string][]int64 { - cp.restoringFiles.RLock() - defer cp.restoringFiles.RUnlock() - results := make(map[string][]int64) - for _, tables := range cp.restoringFiles.pos { - for _, files := range tables { - for file, pos := range files { - results[file] = make([]int64, len(pos)) - copy(results[file], pos) - } - } - } - return results -} - -// IsTableCreated implements CheckPoint.IsTableCreated. -func (cp *RemoteCheckPoint) IsTableCreated(db, table string) bool { - cp.restoringFiles.RLock() - defer cp.restoringFiles.RUnlock() - tables, ok := cp.restoringFiles.pos[db] - if !ok { - return false - } - if table == "" { - return true - } - _, ok = tables[table] - return ok -} - -// IsTableFinished implements CheckPoint.IsTableFinished. -func (cp *RemoteCheckPoint) IsTableFinished(db, table string) bool { - key := strings.Join([]string{db, table}, ".") - if _, ok := cp.finishedTables[key]; ok { - return true - } - return false -} - -// CalcProgress implements CheckPoint.CalcProgress. -func (cp *RemoteCheckPoint) CalcProgress(allFiles map[string]Tables2DataFiles) error { - cp.restoringFiles.RLock() - defer cp.restoringFiles.RUnlock() - cp.finishedTables = make(map[string]struct{}) // reset to empty - for db, tables := range cp.restoringFiles.pos { - dbTables, ok := allFiles[db] - if !ok { - return terror.ErrCheckpointDBNotExistInFile.Generate(db) - } - - for table, restoringFiles := range tables { - files, ok := dbTables[table] - if !ok { - return terror.ErrCheckpointTableNotExistInFile.Generate(table, db) - } - - restoringCount := len(restoringFiles) - totalCount := len(files) - - t := strings.Join([]string{db, table}, ".") - if restoringCount == totalCount { - // compare offset. - if cp.allFilesFinished(restoringFiles) { - cp.finishedTables[t] = struct{}{} - } - } else if restoringCount > totalCount { - return terror.ErrCheckpointRestoreCountGreater.Generate(table) - } - } - } - - cp.logger.Info("calculate checkpoint finished.", zap.Any("finished tables", cp.finishedTables)) - return nil -} - -func (cp *RemoteCheckPoint) allFilesFinished(files map[string][]int64) bool { - for file, pos := range files { - if len(pos) != 2 { - cp.logger.Error("unexpected checkpoint record", zap.String("data file", file), zap.Int64s("position", pos)) - return false - } - if pos[0] != pos[1] { - return false - } - } - return true -} - -// AllFinished implements CheckPoint.AllFinished. -func (cp *RemoteCheckPoint) AllFinished() bool { - cp.restoringFiles.RLock() - defer cp.restoringFiles.RUnlock() - for _, tables := range cp.restoringFiles.pos { - for _, restoringFiles := range tables { - if !cp.allFilesFinished(restoringFiles) { - return false - } - } - } - return true -} - -// Init implements CheckPoint.Init. -func (cp *RemoteCheckPoint) Init(tctx *tcontext.Context, filename string, endPos int64) error { - // fields[0] -> db name, fields[1] -> table name - schema, table, err := getDBAndTableFromFilename(filename) - if err != nil { - return terror.ErrCheckpointInvalidTableFile.Generate(filename) - } - sql2 := fmt.Sprintf("INSERT INTO %s (`id`, `filename`, `cp_schema`, `cp_table`, `offset`, `end_pos`) VALUES(?,?,?,?,?,?)", cp.tableName) - cp.logger.Info("initial checkpoint record", - zap.String("sql", sql2), - zap.String("id", cp.id), - zap.String("filename", filename), - zap.String("schema", schema), - zap.String("table", table), - zap.Int64("offset", 0), - zap.Int64("end position", endPos)) - args := []interface{}{cp.id, filename, schema, table, 0, endPos} - cp.connMutex.Lock() - err = cp.conn.executeSQL(tctx, []string{sql2}, args) - cp.connMutex.Unlock() - if err != nil { - if isErrDupEntry(err) { - cp.logger.Warn("checkpoint record already exists, skip it.", zap.String("id", cp.id), zap.String("filename", filename)) - return nil - } - return terror.WithScope(terror.Annotate(err, "initialize checkpoint"), terror.ScopeDownstream) - } - // checkpoint not exists and no error, cache endPos in memory - cp.restoringFiles.Lock() - defer cp.restoringFiles.Unlock() - if _, ok := cp.restoringFiles.pos[schema]; !ok { - cp.restoringFiles.pos[schema] = make(map[string]FilePosSet) - } - tables := cp.restoringFiles.pos[schema] - if _, ok := tables[table]; !ok { - tables[table] = make(map[string][]int64) - } - restoringFiles := tables[table] - if _, ok := restoringFiles[filename]; !ok { - restoringFiles[filename] = []int64{0, endPos} - } - return nil -} - -// ResetConn implements CheckPoint.ResetConn. -func (cp *RemoteCheckPoint) ResetConn(tctx *tcontext.Context) error { - cp.connMutex.Lock() - defer cp.connMutex.Unlock() - return cp.conn.resetConn(tctx) -} - -// Close implements CheckPoint.Close. -func (cp *RemoteCheckPoint) Close() { - if err := cp.db.Close(); err != nil { - cp.logger.Error("close checkpoint db", log.ShortError(err)) - } -} - -// GenSQL implements CheckPoint.GenSQL. -func (cp *RemoteCheckPoint) GenSQL(filename string, offset int64) string { - sql := fmt.Sprintf("UPDATE %s SET `offset`=%d WHERE `id` ='%s' AND `filename`='%s';", - cp.tableName, offset, cp.id, filename) - return sql -} - -// UpdateOffset implements CheckPoint.UpdateOffset. -func (cp *RemoteCheckPoint) UpdateOffset(filename string, offset int64) error { - cp.restoringFiles.Lock() - defer cp.restoringFiles.Unlock() - db, table, err := getDBAndTableFromFilename(filename) - if err != nil { - return terror.Annotatef(terror.ErrLoadTaskCheckPointNotMatch.Generate(err), "wrong filename=%s", filename) - } - - if _, ok := cp.restoringFiles.pos[db]; ok { - if _, ok := cp.restoringFiles.pos[db][table]; ok { - if _, ok := cp.restoringFiles.pos[db][table][filename]; ok { - cp.restoringFiles.pos[db][table][filename][0] = offset - return nil - } - } - } - return terror.ErrLoadTaskCheckPointNotMatch.Generatef("db=%s table=%s not in checkpoint", db, filename) -} - -// Clear implements CheckPoint.Clear. -func (cp *RemoteCheckPoint) Clear(tctx *tcontext.Context) error { - sql2 := fmt.Sprintf("DELETE FROM %s WHERE `id` = '%s'", cp.tableName, cp.id) - cp.connMutex.Lock() - err := cp.conn.executeSQL(tctx, []string{sql2}) - cp.connMutex.Unlock() - return terror.WithScope(err, terror.ScopeDownstream) -} - -// Count implements CheckPoint.Count. -func (cp *RemoteCheckPoint) Count(tctx *tcontext.Context) (int, error) { - query := fmt.Sprintf("SELECT COUNT(id) FROM %s WHERE `id` = ?", cp.tableName) - cp.connMutex.Lock() - rows, err := cp.conn.querySQL(tctx, query, cp.id) - cp.connMutex.Unlock() - if err != nil { - return 0, terror.WithScope(err, terror.ScopeDownstream) - } - defer rows.Close() - count := 0 - for rows.Next() { - err = rows.Scan(&count) - if err != nil { - return 0, terror.DBErrorAdapt(err, cp.conn.Scope(), terror.ErrDBDriverError) - } - } - if rows.Err() != nil { - return 0, terror.DBErrorAdapt(rows.Err(), cp.conn.Scope(), terror.ErrDBDriverError) - } - cp.logger.Debug("checkpoint record", zap.Int("count", count)) - return count, nil -} - -func (cp *RemoteCheckPoint) String() string { - cp.restoringFiles.RLock() - defer cp.restoringFiles.RUnlock() - result := make(map[string][]int64) - for _, tables := range cp.restoringFiles.pos { - for _, files := range tables { - for file, set := range files { - result[file] = set - } - } - } - bytes, err := json.Marshal(result) - if err != nil { - return err.Error() - } - return string(bytes) -} - type lightingLoadStatus int const ( diff --git a/dm/loader/checkpoint_test.go b/dm/loader/checkpoint_test.go index 0cc4fbdcc90..f9c367986ee 100644 --- a/dm/loader/checkpoint_test.go +++ b/dm/loader/checkpoint_test.go @@ -17,255 +17,18 @@ import ( "context" "database/sql" "fmt" - "os" - "strconv" "github.com/DATA-DOG/go-sqlmock" . "github.com/pingcap/check" - "github.com/pingcap/tiflow/dm/config" "github.com/pingcap/tiflow/dm/config/dbconfig" "github.com/pingcap/tiflow/dm/pkg/conn" - tcontext "github.com/pingcap/tiflow/dm/pkg/context" - "github.com/pingcap/tiflow/dm/pkg/cputil" "github.com/pingcap/tiflow/dm/pkg/log" ) var ( - _ = Suite(&testCheckPointSuite{}) _ = Suite(&lightningCpListSuite{}) ) -var ( - schemaCreateSQL = "" - tableCreateSQL = "" - clearCheckPointSQL = "" - loadCheckPointSQL = "" - countCheckPointSQL = "" - flushCheckPointSQL = "" - deleteCheckPointSQL = "" -) - -type testCheckPointSuite struct { - cfg *config.SubTaskConfig -} - -func (t *testCheckPointSuite) SetUpSuite(c *C) { - host := os.Getenv("MYSQL_HOST") - if host == "" { - host = "127.0.0.1" - } - port, _ := strconv.Atoi(os.Getenv("MYSQL_PORT")) - if port == 0 { - port = 3306 - } - user := os.Getenv("MYSQL_USER") - if user == "" { - user = "root" - } - pswd := os.Getenv("MYSQL_PSWD") - - t.cfg = &config.SubTaskConfig{ - To: dbconfig.DBConfig{ - Host: host, - User: user, - Password: pswd, - Port: port, - }, - MetaSchema: "test", - } - t.cfg.To.Adjust() - - schemaCreateSQL = fmt.Sprintf("CREATE SCHEMA IF NOT EXISTS `%s`", t.cfg.MetaSchema) - tableCreateSQL = fmt.Sprintf("CREATE TABLE IF NOT EXISTS `%s`.`%s` .*", t.cfg.MetaSchema, cputil.LoaderCheckpoint(t.cfg.Name)) - clearCheckPointSQL = fmt.Sprintf("DELETE FROM `%s`.`%s` WHERE `id` = .*", t.cfg.MetaSchema, cputil.LoaderCheckpoint(t.cfg.Name)) - loadCheckPointSQL = fmt.Sprintf("SELECT `filename`,`cp_schema`,`cp_table`,`offset`,`end_pos` from `%s`.`%s` where `id`.*", t.cfg.MetaSchema, cputil.LoaderCheckpoint(t.cfg.Name)) - countCheckPointSQL = fmt.Sprintf("SELECT COUNT.* FROM `%s`.`%s` WHERE `id` = ?", t.cfg.MetaSchema, cputil.LoaderCheckpoint(t.cfg.Name)) - flushCheckPointSQL = fmt.Sprintf("INSERT INTO `%s`.`%s` .* VALUES.*", t.cfg.MetaSchema, cputil.LoaderCheckpoint(t.cfg.Name)) - deleteCheckPointSQL = fmt.Sprintf("DELETE FROM `%s`.`%s` WHERE `id` = .*", t.cfg.MetaSchema, cputil.LoaderCheckpoint(t.cfg.Name)) -} - -func (t *testCheckPointSuite) TearDownSuite(c *C) { -} - -// test checkpoint's db operation. -func (t *testCheckPointSuite) TestForDB(c *C) { - cases := []struct { - filename string - endPos int64 - }{ - {"db1.tbl1.sql", 123}, - {"db1.tbl2.sql", 456}, - {"db1.tbl3.sql", 789}, - } - - allFiles := map[string]Tables2DataFiles{ - "db1": { - "tbl1": {cases[0].filename}, - "tbl2": {cases[1].filename}, - "tbl3": {cases[2].filename}, - }, - } - - mock := conn.InitMockDB(c) - // mock for cp prepare - mock.ExpectBegin() - mock.ExpectExec(schemaCreateSQL).WillReturnResult(sqlmock.NewResult(0, 1)) - mock.ExpectCommit() - mock.ExpectBegin() - mock.ExpectExec(tableCreateSQL).WillReturnResult(sqlmock.NewResult(0, 1)) - mock.ExpectCommit() - - id := "test_for_db" - tctx := tcontext.Background() - cp, err := newRemoteCheckPoint(tctx, t.cfg, id) - c.Assert(err, IsNil) - defer cp.Close() - - // mock cp clear - mock.ExpectBegin() - mock.ExpectExec(clearCheckPointSQL).WillReturnResult(sqlmock.NewResult(0, 1)) - mock.ExpectCommit() - c.Assert(cp.Clear(tctx), IsNil) - - // mock cp load - mock.ExpectQuery(loadCheckPointSQL).WillReturnRows(sqlmock.NewRows(nil)) - // no checkpoint exist - err = cp.Load(tctx) - c.Assert(err, IsNil) - - infos := cp.GetAllRestoringFileInfo() - c.Assert(len(infos), Equals, 0) - - // mock cp count - mock.ExpectQuery(countCheckPointSQL).WillReturnRows(sqlmock.NewRows([]string{"COUNT(id)"}).AddRow(0)) - count, err := cp.Count(tctx) - c.Assert(err, IsNil) - c.Assert(count, Equals, 0) - - c.Assert(cp.IsTableCreated("db1", ""), IsFalse) - c.Assert(cp.IsTableCreated("db1", "tbl1"), IsFalse) - c.Assert(cp.CalcProgress(allFiles), IsNil) - c.Assert(cp.IsTableFinished("db1", "tbl1"), IsFalse) - - // insert default checkpoints - for _, cs := range cases { - // mock init - mock.ExpectBegin() - mock.ExpectExec(flushCheckPointSQL).WillReturnResult(sqlmock.NewResult(0, 1)) - mock.ExpectCommit() - err = cp.Init(tctx, cs.filename, cs.endPos) - c.Assert(err, IsNil) - } - - c.Assert(cp.IsTableCreated("db1", ""), IsTrue) - c.Assert(cp.IsTableCreated("db1", "tbl1"), IsTrue) - c.Assert(cp.CalcProgress(allFiles), IsNil) - c.Assert(cp.IsTableFinished("db1", "tbl1"), IsFalse) - - info := cp.GetRestoringFileInfo("db1", "tbl1") - c.Assert(info, HasLen, 1) - c.Assert(info[cases[0].filename], DeepEquals, []int64{0, cases[0].endPos}) - - // mock cp load - rows := sqlmock.NewRows([]string{"filename", "cp_schema", "cp_table", "offset", "end_pos"}) - for i, cs := range cases { - rows = rows.AddRow(cs.filename, "db1", fmt.Sprintf("tbl%d", i+1), 0, cs.endPos) - } - mock.ExpectQuery(loadCheckPointSQL).WillReturnRows(rows) - err = cp.Load(tctx) - c.Assert(err, IsNil) - infos = cp.GetAllRestoringFileInfo() - c.Assert(len(infos), Equals, len(cases)) - for _, cs := range cases { - info, ok := infos[cs.filename] - c.Assert(ok, IsTrue) - c.Assert(len(info), Equals, 2) - c.Assert(info[0], Equals, int64(0)) - c.Assert(info[1], Equals, cs.endPos) - } - - mock.ExpectQuery(countCheckPointSQL).WillReturnRows(sqlmock.NewRows([]string{"COUNT(id)"}).AddRow(3)) - count, err = cp.Count(tctx) - c.Assert(err, IsNil) - c.Assert(count, Equals, len(cases)) - - // update checkpoint to finished - rows = sqlmock.NewRows([]string{"filename", "cp_schema", "cp_table", "offset", "end_pos"}) - for i, cs := range cases { - rows = rows.AddRow(cs.filename, "db1", fmt.Sprintf("tbl%d", i+1), cs.endPos, cs.endPos) - } - mock.ExpectQuery(loadCheckPointSQL).WillReturnRows(rows) - err = cp.Load(tctx) - c.Assert(err, IsNil) - c.Assert(cp.IsTableCreated("db1", ""), IsTrue) - c.Assert(cp.IsTableCreated("db1", "tbl1"), IsTrue) - c.Assert(cp.CalcProgress(allFiles), IsNil) - c.Assert(cp.IsTableFinished("db1", "tbl1"), IsTrue) - - info = cp.GetRestoringFileInfo("db1", "tbl1") - c.Assert(info, HasLen, 1) - c.Assert(info[cases[0].filename], DeepEquals, []int64{cases[0].endPos, cases[0].endPos}) - - infos = cp.GetAllRestoringFileInfo() - c.Assert(len(infos), Equals, len(cases)) - for _, cs := range cases { - info, ok := infos[cs.filename] - c.Assert(ok, IsTrue) - c.Assert(len(info), Equals, 2) - c.Assert(info[0], Equals, cs.endPos) - c.Assert(info[1], Equals, cs.endPos) - } - - mock.ExpectQuery(countCheckPointSQL).WillReturnRows(sqlmock.NewRows([]string{"COUNT(id)"}).AddRow(3)) - count, err = cp.Count(tctx) - c.Assert(err, IsNil) - c.Assert(count, Equals, len(cases)) - - // clear all - mock.ExpectBegin() - mock.ExpectExec(deleteCheckPointSQL).WillReturnResult(sqlmock.NewResult(0, 3)) - mock.ExpectCommit() - c.Assert(cp.Clear(tctx), IsNil) - - // no checkpoint exist - mock.ExpectQuery(loadCheckPointSQL).WillReturnRows(sqlmock.NewRows(nil)) - err = cp.Load(tctx) - c.Assert(err, IsNil) - - c.Assert(cp.IsTableCreated("db1", ""), IsFalse) - c.Assert(cp.IsTableCreated("db1", "tbl1"), IsFalse) - c.Assert(cp.CalcProgress(allFiles), IsNil) - c.Assert(cp.IsTableFinished("db1", "tbl1"), IsFalse) - - infos = cp.GetAllRestoringFileInfo() - c.Assert(len(infos), Equals, 0) - - // obtain count again - mock.ExpectQuery(countCheckPointSQL).WillReturnRows(sqlmock.NewRows([]string{"COUNT(id)"}).AddRow(0)) - count, err = cp.Count(tctx) - c.Assert(err, IsNil) - c.Assert(count, Equals, 0) - c.Assert(mock.ExpectationsWereMet(), IsNil) -} - -func (t *testCheckPointSuite) TestDeepCopy(c *C) { - cp := RemoteCheckPoint{} - cp.restoringFiles.pos = make(map[string]map[string]FilePosSet) - cp.restoringFiles.pos["db"] = make(map[string]FilePosSet) - cp.restoringFiles.pos["db"]["table"] = make(map[string][]int64) - cp.restoringFiles.pos["db"]["table"]["file"] = []int64{0, 100} - - ret := cp.GetRestoringFileInfo("db", "table") - cp.restoringFiles.pos["db"]["table"]["file"][0] = 10 - cp.restoringFiles.pos["db"]["table"]["file2"] = []int64{0, 100} - c.Assert(ret, DeepEquals, map[string][]int64{"file": {0, 100}}) - - ret = cp.GetAllRestoringFileInfo() - cp.restoringFiles.pos["db"]["table"]["file"][0] = 20 - cp.restoringFiles.pos["db"]["table"]["file3"] = []int64{0, 100} - c.Assert(ret, DeepEquals, map[string][]int64{"file": {10, 100}, "file2": {0, 100}}) -} - type lightningCpListSuite struct { mock sqlmock.Sqlmock cpList *LightningCheckpointList diff --git a/dm/loader/convert_data.go b/dm/loader/convert_data.go deleted file mode 100644 index f7e7954c3c5..00000000000 --- a/dm/loader/convert_data.go +++ /dev/null @@ -1,329 +0,0 @@ -// Copyright 2019 PingCAP, Inc. -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// See the License for the specific language governing permissions and -// limitations under the License. - -package loader - -import ( - "bufio" - "bytes" - "fmt" - "io" - "os" - "strings" - "unsafe" - - "github.com/pingcap/errors" - cm "github.com/pingcap/tidb-tools/pkg/column-mapping" - "github.com/pingcap/tidb/parser/ast" - regexprrouter "github.com/pingcap/tidb/util/regexpr-router" - "github.com/pingcap/tiflow/dm/pkg/conn" - tcontext "github.com/pingcap/tiflow/dm/pkg/context" - parserpkg "github.com/pingcap/tiflow/dm/pkg/parser" - "github.com/pingcap/tiflow/dm/pkg/terror" -) - -func bytes2str(bs []byte) string { - return *(*string)(unsafe.Pointer(&bs)) -} - -// poor man's parse insert stmtement code -// learn from tidb-lightning and refactor it as format of mydumper file -// https://github.com/maxbube/mydumper/blob/master/mydumper.c#L2853 -// later let it a package. -func parseInsertStmt(sql []byte, table *tableInfo, columnMapping *cm.Mapping) ([][]string, error) { - var s, e, size int - rows := make([][]string, 0, 1024) - VALUES := []byte("VALUES") - - // If table has generated column, the dumped SQL file has a different `INSERT INTO` line, - // which provides column names except generated column. such as following: - // INSERT INTO `t1` (`id`,`uid`,`name`,`info`) VALUES - // (1,10001,"Gabriel García Márquez",NULL), - // (2,10002,"Cien años de soledad",NULL); - // otherwise dumped SQL file has content like following: - // INSERT INTO `t1` VALUES - // (1,"hello"), - // (2,"world"); - - for { - sql = sql[s:] - size = len(sql) - - // seek start "(" - s = bytes.IndexByte(sql, '(') - if s < 0 { - break - } - - // seek one line, it's one row - for e = s + 3; e < size; e++ { - if sql[e] == '\n' && (sql[e-1] == ',' || sql[e-1] == ';') && sql[e-2] == ')' { - break - } - if sql[e] == '\n' && e-6 > s && bytes.Equal(sql[e-6:e], VALUES) { - s = e + 1 - continue - } - } - if e == size { - return nil, terror.ErrLoadUnitInvalidFileEnding.Generate() - } - - rp := e - 2 - // extract columns' values - row, err := parseRowValues(sql[s+1:rp], table, columnMapping) - if err != nil { - return nil, err - } - rows = append(rows, row) - - s = e + 1 - if s >= size { - break - } - } - - return rows, nil -} - -func parseRowValues(str []byte, table *tableInfo, columnMapping *cm.Mapping) ([]string, error) { - // values are separated by comma, but we can not split using comma directly - // string is enclosed by single quote - - // a poor implementation, may be more robust later. - values := make([]interface{}, 0, len(table.columnNameList)) - row := make([]string, 0, len(table.columnNameList)) - isChars := make([]byte, 0, len(table.columnNameList)) - size := len(str) - var ch byte - for i := 0; i < size; { - ch = str[i] - if ch == ' ' || ch == '\n' { - i++ - continue - } - - if ch != '\'' && ch != '"' { - // no string, read until comma - j := i + 1 - for ; j < size && str[j] != ','; j++ { - } - - val := bytes.TrimSpace(str[i:j]) - - values = append(values, bytes2str(val)) // ?? no need to trim ?? - isChars = append(isChars, 0x0) - // skip , - i = j + 1 - } else { - // read string until another single quote - j := i + 1 - - sch := ch - for j < size { - // nolint:gocritic - if str[j] == '\\' { - // skip escaped character - j += 2 - continue - } else if str[j] == sch { - // matchup ending - break - } else { - j++ - } - } - - if j >= size { - return nil, terror.ErrLoadUnitParseQuoteValues.Generate() - } - - val := str[i+1 : j] - values = append(values, bytes2str(val)) - isChars = append(isChars, sch) - - i = j + 2 // skip ' and , - } - } - - if columnMapping != nil { - cmValues, _, err := columnMapping.HandleRowValue(table.sourceSchema, table.sourceTable, table.columnNameList, values) - if err != nil { - return nil, terror.ErrLoadUnitDoColumnMapping.Delegate(err, values, table) - } - values = cmValues - } - - for i := range values { - val, ok := values[i].(string) - if !ok { - panic(fmt.Sprintf("%v is not string", values[i])) - } - if isChars[i] != 0x0 { - columnVal := make([]byte, 0, len(val)+2) - columnVal = append(columnVal, isChars[i]) - columnVal = append(columnVal, val...) - columnVal = append(columnVal, isChars[i]) - row = append(row, string(columnVal)) - } else { - row = append(row, val) - } - } - if len(table.extendCol) > 0 { - for _, v := range table.extendVal { - row = append(row, "'"+v+"'") - } - } - return row, nil -} - -// exportStatement returns schema structure in sqlFile. -func exportStatement(sqlFile string) ([]byte, error) { - fd, err := os.Open(sqlFile) - if err != nil { - return nil, terror.ErrLoadUnitReadSchemaFile.Delegate(err, sqlFile) - } - defer fd.Close() - - br := bufio.NewReader(fd) - f, err := os.Stat(sqlFile) - if err != nil { - return nil, terror.ErrLoadUnitReadSchemaFile.Delegate(err, sqlFile) - } - - data := make([]byte, 0, f.Size()+1) - buffer := make([]byte, 0, f.Size()+1) - for { - line, err := br.ReadString('\n') - if errors.Cause(err) == io.EOF { - break - } - - line = strings.TrimSpace(line[:len(line)-1]) - if len(line) == 0 { - continue - } - - buffer = append(buffer, []byte(line)...) - if buffer[len(buffer)-1] == ';' { - statement := string(buffer) - if !(strings.HasPrefix(statement, "/*") && strings.HasSuffix(statement, "*/;")) { - data = append(data, buffer...) - } - buffer = buffer[:0] - } else { - buffer = append(buffer, '\n') - } - } - - return data, nil -} - -func tableName(schema, table string) string { - return fmt.Sprintf("`%s`.`%s`", schema, table) -} - -func parseTable(ctx *tcontext.Context, r *regexprrouter.RouteTable, schema, table, file, sqlMode, sourceID string) (*tableInfo, error) { - statement, err := exportStatement(file) - if err != nil { - return nil, err - } - - parser2, err := conn.GetParserFromSQLModeStr(sqlMode) - if err != nil { - return nil, err - } - - stmts, err := parserpkg.Parse(parser2, string(statement), "", "") - if err != nil { - return nil, terror.ErrLoadUnitParseStatement.Delegate(err, statement) - } - - var ( - ct *ast.CreateTableStmt - hasCreateTableStmt bool - ) - for _, stmt := range stmts { - ct, hasCreateTableStmt = stmt.(*ast.CreateTableStmt) - if hasCreateTableStmt { - break - } - } - if !hasCreateTableStmt { - return nil, terror.ErrLoadUnitNotCreateTable.Generate(statement, schema, table) - } - - var ( - columns = make([]string, 0, len(ct.Cols)) - hasGeneragedCols = false - columnNameFields = "" - ) - for _, col := range ct.Cols { - skip := false - for _, opt := range col.Options { - if opt.Tp == ast.ColumnOptionGenerated { - hasGeneragedCols = true - skip = true - break - } - } - if !skip { - columns = append(columns, col.Name.Name.O) - } - } - extendCol, extendVal := r.FetchExtendColumn(schema, table, sourceID) - if len(extendCol) > 0 { - columns = append(columns, extendCol...) - } - if hasGeneragedCols { - var escapeColumns []string - for _, column := range columns { - escapeColumns = append(escapeColumns, fmt.Sprintf("`%s`", column)) - } - columnNameFields = "(" + strings.Join(escapeColumns, ",") + ") " - } - - dstSchema, dstTable := fetchMatchedLiteral(ctx, r, schema, table) - return &tableInfo{ - sourceSchema: schema, - sourceTable: table, - targetSchema: dstSchema, - targetTable: dstTable, - columnNameList: columns, - insertHeadStmt: fmt.Sprintf("INSERT INTO `%s` %sVALUES", dstTable, columnNameFields), - extendCol: extendCol, - extendVal: extendVal, - }, nil -} - -// refine it later. -func reassemble(data []byte, table *tableInfo, columnMapping *cm.Mapping) (string, error) { - rows, err := parseInsertStmt(data, table, columnMapping) - if err != nil { - return "", err - } - - query := bytes.NewBuffer(make([]byte, 0, len(data))) - fmt.Fprint(query, table.insertHeadStmt) - seq := "," - - for i, row := range rows { - if i == len(rows)-1 { - seq = ";" - } - - fmt.Fprintf(query, "(%s)%s", strings.Join(row, ","), seq) - } - - return query.String(), nil -} diff --git a/dm/loader/convert_data_test.go b/dm/loader/convert_data_test.go deleted file mode 100644 index d09f5b30726..00000000000 --- a/dm/loader/convert_data_test.go +++ /dev/null @@ -1,464 +0,0 @@ -// Copyright 2019 PingCAP, Inc. -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// See the License for the specific language governing permissions and -// limitations under the License. - -package loader - -import ( - . "github.com/pingcap/check" - cm "github.com/pingcap/tidb-tools/pkg/column-mapping" - regexprrouter "github.com/pingcap/tidb/util/regexpr-router" - router "github.com/pingcap/tidb/util/table-router" - tcontext "github.com/pingcap/tiflow/dm/pkg/context" -) - -var _ = Suite(&testConvertDataSuite{}) - -type testConvertDataSuite struct{} - -func (t *testConvertDataSuite) TestReassemble(c *C) { - table := &tableInfo{ - sourceSchema: "test2", - sourceTable: "t3", - targetSchema: "test", - targetTable: "t", - columnNameList: []string{ - "id", - "t_boolean", - "t_bigint", - "t_double", - "t_decimal", - "t_bit", - "t_date", - "t_datetime", - "t_timestamp", - "t_time", - "t_year", - "t_char", - "t_varchar", - "t_blob", - "t_text", - "t_enum", - "t_set", - }, - insertHeadStmt: "INSERT INTO t VALUES", - } - - // nolint:stylecheck - sql := `INSERT INTO t1 VALUES -(10,1,9223372036854775807,123.123,123456789012.1234567890120000000,"\0\0\0\0\0\0\0A","1000-01-01","9999-12-31 23:59:59","1973-12-30 15:30:00","23:59:59",1970,"x","x\"x","blob","text","enum2","a,b"), -(9,1,9223372036854775807,123.123,123456789012.1234567890120000000,"\0\0\0\0\0\0\0A","1000-01-01","9999-12-31 23:59:59","1973-12-30 15:30:00","23:59:59",1970,"x","x\",\nx","blob","text","enum2","a,b"); -(8,1,9223372036854775807,123.123,123456789012.1234567890120000000,"\0\0\0\0\0\0\0A","1000-01-01","9999-12-31 23:59:59","1973-12-30 15:30:00","23:59:59",1970,"x","x\",\nx","blob"," text\n","enum2", " a,b "); -` - - expected := []string{ - // nolint:stylecheck - `INSERT INTO t VALUES(585520728116297738,1,9223372036854775807,123.123,123456789012.1234567890120000000,"\0\0\0\0\0\0\0A","1000-01-01","9999-12-31 23:59:59","1973-12-30 15:30:00","23:59:59",1970,"x","x\"x","blob","text","enum2","a,b"),(585520728116297737,1,9223372036854775807,123.123,123456789012.1234567890120000000,"\0\0\0\0\0\0\0A","1000-01-01","9999-12-31 23:59:59","1973-12-30 15:30:00","23:59:59",1970,"x","x\",\nx","blob","text","enum2","a,b"),(585520728116297736,1,9223372036854775807,123.123,123456789012.1234567890120000000,"\0\0\0\0\0\0\0A","1000-01-01","9999-12-31 23:59:59","1973-12-30 15:30:00","23:59:59",1970,"x","x\",\nx","blob"," text\n","enum2"," a,b ");`, - // nolint:stylecheck - `INSERT INTO t VALUES(10,1,9223372036854775807,123.123,123456789012.1234567890120000000,"\0\0\0\0\0\0\0A","1000-01-01","9999-12-31 23:59:59","1973-12-30 15:30:00","23:59:59",1970,"test:x","x\"x","blob","text","enum2","a,b"),(9,1,9223372036854775807,123.123,123456789012.1234567890120000000,"\0\0\0\0\0\0\0A","1000-01-01","9999-12-31 23:59:59","1973-12-30 15:30:00","23:59:59",1970,"test:x","x\",\nx","blob","text","enum2","a,b"),(8,1,9223372036854775807,123.123,123456789012.1234567890120000000,"\0\0\0\0\0\0\0A","1000-01-01","9999-12-31 23:59:59","1973-12-30 15:30:00","23:59:59",1970,"test:x","x\",\nx","blob"," text\n","enum2"," a,b ");`, - } - - rules := []*cm.Rule{ - { - PatternSchema: "test*", - PatternTable: "t*", - TargetColumn: "id", - Expression: cm.PartitionID, - Arguments: []string{"1", "test", "t"}, - }, { - PatternSchema: "test*", - PatternTable: "t*", - TargetColumn: "t_char", - Expression: cm.AddPrefix, - Arguments: []string{"test:"}, - }, - } - - for i, r := range rules { - columnMapping, err := cm.NewMapping(false, []*cm.Rule{r}) - c.Assert(err, IsNil) - - query, err := reassemble([]byte(sql), table, columnMapping) - c.Assert(err, IsNil) - c.Assert(expected[i], Equals, query) - } -} - -func (t *testConvertDataSuite) TestReassembleWithGeneratedColumn(c *C) { - table := &tableInfo{ - sourceSchema: "test2", - sourceTable: "t3", - targetSchema: "test", - targetTable: "t", - columnNameList: []string{ - "id", - "t_json", - }, - insertHeadStmt: "INSERT INTO t (`id`,`t_json`) VALUES", - } - sql := `INSERT INTO t1 (id,t_json) VALUES -(10,'{}'), -(9,NULL); -(8,'{"a":123}'); -` - expected := "INSERT INTO t (`id`,`t_json`) VALUES(585520728116297738,'{}'),(585520728116297737,NULL),(585520728116297736,'{\"a\":123}');" - rules := []*cm.Rule{ - { - PatternSchema: "test*", - PatternTable: "t*", - TargetColumn: "id", - Expression: cm.PartitionID, - Arguments: []string{"1", "test", "t"}, - }, - } - - columnMapping, err := cm.NewMapping(false, rules) - c.Assert(err, IsNil) - query, err := reassemble([]byte(sql), table, columnMapping) - c.Assert(err, IsNil) - c.Assert(query, Equals, expected) -} - -func (t *testConvertDataSuite) TestParseTable(c *C) { - rules := []*router.TableRule{{ - SchemaPattern: "test*", - TablePattern: "t*", - TargetSchema: "test", - TargetTable: "t", - }} - - expectedTableInfo := &tableInfo{ - sourceSchema: "test1", - sourceTable: "t2", - targetSchema: "test", - targetTable: "t", - columnNameList: []string{ - "id", - "t_boolean", - "t_bigint", - "t_double", - "t_decimal", - "t_bit", - "t_date", - "t_datetime", - "t_timestamp", - "t_time", - "t_year", - "t_char", - "t_varchar", - "t_blob", - "t_text", - "t_enum", - "t_set", - "t_json", - }, - insertHeadStmt: "INSERT INTO `t` VALUES", - } - - r, err := regexprrouter.NewRegExprRouter(false, rules) - c.Assert(err, IsNil) - - tableInfo, err := parseTable(tcontext.Background(), r, "test1", "t2", "./dumpfile/test1.t2-schema.sql", "ANSI_QUOTES", "source-mysql-01") - c.Assert(err, IsNil) - c.Assert(tableInfo, DeepEquals, expectedTableInfo) -} - -func (t *testConvertDataSuite) TestParseTableWithGeneratedColumn(c *C) { - rules := []*router.TableRule{{ - SchemaPattern: "test*", - TablePattern: "t*", - TargetSchema: "test", - TargetTable: "t", - }} - - expectedTableInfo := &tableInfo{ - sourceSchema: "test1", - sourceTable: "t3", - targetSchema: "test", - targetTable: "t", - columnNameList: []string{ - "id", - "t_json", - }, - insertHeadStmt: "INSERT INTO `t` (`id`,`t_json`) VALUES", - } - - r, err := regexprrouter.NewRegExprRouter(false, rules) - c.Assert(err, IsNil) - - tableInfo, err := parseTable(tcontext.Background(), r, "test1", "t3", "./dumpfile/test1.t3-schema.sql", "", "source-mysql-01") - c.Assert(err, IsNil) - c.Assert(tableInfo, DeepEquals, expectedTableInfo) -} - -func (t *testConvertDataSuite) TestParseRowValues(c *C) { - var ( - data = []byte("585520728116297738") - ti = &tableInfo{ - sourceSchema: "test_parse_rows_values", - sourceTable: "tbl_1", - targetSchema: "test_parse_rows_values", - targetTable: "tbl_1", - columnNameList: []string{"c1"}, - } - rules = []*cm.Rule{ - { - PatternSchema: "test_parse_rows_values", - PatternTable: "tbl_1", - TargetColumn: "c1", - Expression: cm.PartitionID, - Arguments: []string{"1", "", ""}, - }, - } - ) - - columnMapping, err := cm.NewMapping(false, rules) - c.Assert(err, IsNil) - - values, err := parseRowValues(data, ti, columnMapping) - c.Assert(err, ErrorMatches, ".*mapping row data \\[585520728116297738\\] for table.*") - c.Assert(values, IsNil) -} - -func (t *testConvertDataSuite) TestReassembleExtractor(c *C) { - table := &tableInfo{ - sourceSchema: "test2", - sourceTable: "t3", - targetSchema: "test", - targetTable: "t", - columnNameList: []string{ - "id", - "t_boolean", - "t_bigint", - "t_double", - "t_decimal", - "t_bit", - "t_date", - "t_datetime", - "t_timestamp", - "t_time", - "t_year", - "t_char", - "t_varchar", - "t_blob", - "t_text", - "t_enum", - "t_set", - "table_name", - "schema_name", - "source_name", - }, - insertHeadStmt: "INSERT INTO t VALUES", - extendCol: []string{"table_name", "schema_name", "source_name"}, - extendVal: []string{"table1", "schema1", "source1"}, - } - - // nolint:stylecheck - sql := `INSERT INTO t1 VALUES -(10,1,9223372036854775807,123.123,123456789012.1234567890120000000,"\0\0\0\0\0\0\0A","1000-01-01","9999-12-31 23:59:59","1973-12-30 15:30:00","23:59:59",1970,"x","x\"x","blob","text","enum2","a,b"), -(9,1,9223372036854775807,123.123,123456789012.1234567890120000000,"\0\0\0\0\0\0\0A","1000-01-01","9999-12-31 23:59:59","1973-12-30 15:30:00","23:59:59",1970,"x","x\",\nx","blob","text","enum2","a,b"); -(8,1,9223372036854775807,123.123,123456789012.1234567890120000000,"\0\0\0\0\0\0\0A","1000-01-01","9999-12-31 23:59:59","1973-12-30 15:30:00","23:59:59",1970,"x","x\",\nx","blob"," text\n","enum2", " a,b "); -` - - expected := []string{ - // nolint:stylecheck - `INSERT INTO t VALUES(585520728116297738,1,9223372036854775807,123.123,123456789012.1234567890120000000,"\0\0\0\0\0\0\0A","1000-01-01","9999-12-31 23:59:59","1973-12-30 15:30:00","23:59:59",1970,"x","x\"x","blob","text","enum2","a,b",'table1','schema1','source1'),(585520728116297737,1,9223372036854775807,123.123,123456789012.1234567890120000000,"\0\0\0\0\0\0\0A","1000-01-01","9999-12-31 23:59:59","1973-12-30 15:30:00","23:59:59",1970,"x","x\",\nx","blob","text","enum2","a,b",'table1','schema1','source1'),(585520728116297736,1,9223372036854775807,123.123,123456789012.1234567890120000000,"\0\0\0\0\0\0\0A","1000-01-01","9999-12-31 23:59:59","1973-12-30 15:30:00","23:59:59",1970,"x","x\",\nx","blob"," text\n","enum2"," a,b ",'table1','schema1','source1');`, - // nolint:stylecheck - `INSERT INTO t VALUES(10,1,9223372036854775807,123.123,123456789012.1234567890120000000,"\0\0\0\0\0\0\0A","1000-01-01","9999-12-31 23:59:59","1973-12-30 15:30:00","23:59:59",1970,"test:x","x\"x","blob","text","enum2","a,b",'table1','schema1','source1'),(9,1,9223372036854775807,123.123,123456789012.1234567890120000000,"\0\0\0\0\0\0\0A","1000-01-01","9999-12-31 23:59:59","1973-12-30 15:30:00","23:59:59",1970,"test:x","x\",\nx","blob","text","enum2","a,b",'table1','schema1','source1'),(8,1,9223372036854775807,123.123,123456789012.1234567890120000000,"\0\0\0\0\0\0\0A","1000-01-01","9999-12-31 23:59:59","1973-12-30 15:30:00","23:59:59",1970,"test:x","x\",\nx","blob"," text\n","enum2"," a,b ",'table1','schema1','source1');`, - } - - rules := []*cm.Rule{ - { - PatternSchema: "test*", - PatternTable: "t*", - TargetColumn: "id", - Expression: cm.PartitionID, - Arguments: []string{"1", "test", "t"}, - }, { - PatternSchema: "test*", - PatternTable: "t*", - TargetColumn: "t_char", - Expression: cm.AddPrefix, - Arguments: []string{"test:"}, - }, - } - - for i, r := range rules { - columnMapping, err := cm.NewMapping(false, []*cm.Rule{r}) - c.Assert(err, IsNil) - // extract column with column mapping - query, err := reassemble([]byte(sql), table, columnMapping) - c.Assert(err, IsNil) - c.Assert(expected[i], Equals, query) - } - - expected = []string{ - // nolint:stylecheck - `INSERT INTO t VALUES(10,1,9223372036854775807,123.123,123456789012.1234567890120000000,"\0\0\0\0\0\0\0A","1000-01-01","9999-12-31 23:59:59","1973-12-30 15:30:00","23:59:59",1970,"x","x\"x","blob","text","enum2","a,b",'table1','schema1','source1'),(9,1,9223372036854775807,123.123,123456789012.1234567890120000000,"\0\0\0\0\0\0\0A","1000-01-01","9999-12-31 23:59:59","1973-12-30 15:30:00","23:59:59",1970,"x","x\",\nx","blob","text","enum2","a,b",'table1','schema1','source1'),(8,1,9223372036854775807,123.123,123456789012.1234567890120000000,"\0\0\0\0\0\0\0A","1000-01-01","9999-12-31 23:59:59","1973-12-30 15:30:00","23:59:59",1970,"x","x\",\nx","blob"," text\n","enum2"," a,b ",'table1','schema1','source1');`, - } - // only extract column - query, err := reassemble([]byte(sql), table, nil) - c.Assert(err, IsNil) - c.Assert(expected[0], Equals, query) -} - -func (t *testConvertDataSuite) TestReassembleWithGeneratedColumnExtractor(c *C) { - table := &tableInfo{ - sourceSchema: "test2", - sourceTable: "t3", - targetSchema: "test", - targetTable: "t", - columnNameList: []string{ - "id", - "t_json", - "table_name", - "schema_name", - "source_name", - }, - insertHeadStmt: "INSERT INTO t (`id`,`t_json`,`table_name`,`schema_name`,`source_name`) VALUES", - extendCol: []string{"table_name", "schema_name", "source_name"}, - extendVal: []string{"table1", "schema1", "source1"}, - } - sql := `INSERT INTO t1 (id,t_json) VALUES -(10,'{}'), -(9,NULL); -(8,'{"a":123}'); -` - expected := "INSERT INTO t (`id`,`t_json`,`table_name`,`schema_name`,`source_name`) VALUES(585520728116297738,'{}','table1','schema1','source1'),(585520728116297737,NULL,'table1','schema1','source1'),(585520728116297736,'{\"a\":123}','table1','schema1','source1');" - rules := []*cm.Rule{ - { - PatternSchema: "test*", - PatternTable: "t*", - TargetColumn: "id", - Expression: cm.PartitionID, - Arguments: []string{"1", "test", "t"}, - }, - } - - columnMapping, err := cm.NewMapping(false, rules) - c.Assert(err, IsNil) - query, err := reassemble([]byte(sql), table, columnMapping) - c.Assert(err, IsNil) - c.Assert(query, Equals, expected) - - // only extract - expected2 := "INSERT INTO t (`id`,`t_json`,`table_name`,`schema_name`,`source_name`) VALUES(10,'{}','table1','schema1','source1'),(9,NULL,'table1','schema1','source1'),(8,'{\"a\":123}','table1','schema1','source1');" - query2, err := reassemble([]byte(sql), table, nil) - c.Assert(err, IsNil) - c.Assert(query2, Equals, expected2) -} - -func (t *testConvertDataSuite) TestParseTableWithExtendColumn(c *C) { - rules := []*router.TableRule{{ - SchemaPattern: "test*", - TablePattern: "t*", - TargetSchema: "test", - TargetTable: "t", - TableExtractor: &router.TableExtractor{ - TargetColumn: "table_name", - TableRegexp: "(.*)", - }, - SchemaExtractor: &router.SchemaExtractor{ - TargetColumn: "schema_name", - SchemaRegexp: "(.*)", - }, - SourceExtractor: &router.SourceExtractor{ - TargetColumn: "source_name", - SourceRegexp: "(.*)", - }, - }} - - expectedTableInfo := &tableInfo{ - sourceSchema: "test1", - sourceTable: "t2", - targetSchema: "test", - targetTable: "t", - columnNameList: []string{ - "id", - "t_boolean", - "t_bigint", - "t_double", - "t_decimal", - "t_bit", - "t_date", - "t_datetime", - "t_timestamp", - "t_time", - "t_year", - "t_char", - "t_varchar", - "t_blob", - "t_text", - "t_enum", - "t_set", - "t_json", - "table_name", - "schema_name", - "source_name", - }, - insertHeadStmt: "INSERT INTO `t` VALUES", - extendCol: []string{"table_name", "schema_name", "source_name"}, - extendVal: []string{"t2", "test1", "source1"}, - } - - r, err := regexprrouter.NewRegExprRouter(false, rules) - c.Assert(err, IsNil) - - tableInfo, err := parseTable(tcontext.Background(), r, "test1", "t2", "./dumpfile/test1.t2-schema.sql", "ANSI_QUOTES", "source1") - c.Assert(err, IsNil) - c.Assert(tableInfo, DeepEquals, expectedTableInfo) -} - -func (t *testConvertDataSuite) TestParseTableWithGeneratedColumnExtendColumn(c *C) { - rules := []*router.TableRule{{ - SchemaPattern: "test*", - TablePattern: "t*", - TargetSchema: "test", - TargetTable: "t", - TableExtractor: &router.TableExtractor{ - TargetColumn: "table_name", - TableRegexp: "(.*)", - }, - SchemaExtractor: &router.SchemaExtractor{ - TargetColumn: "schema_name", - SchemaRegexp: "(.*)", - }, - SourceExtractor: &router.SourceExtractor{ - TargetColumn: "source_name", - SourceRegexp: "(.*)", - }, - }} - - expectedTableInfo := &tableInfo{ - sourceSchema: "test1", - sourceTable: "t3", - targetSchema: "test", - targetTable: "t", - columnNameList: []string{ - "id", - "t_json", - "table_name", - "schema_name", - "source_name", - }, - insertHeadStmt: "INSERT INTO `t` (`id`,`t_json`,`table_name`,`schema_name`,`source_name`) VALUES", - extendCol: []string{"table_name", "schema_name", "source_name"}, - extendVal: []string{"t3", "test1", "source1"}, - } - - r, err := regexprrouter.NewRegExprRouter(false, rules) - c.Assert(err, IsNil) - - tableInfo, err := parseTable(tcontext.Background(), r, "test1", "t3", "./dumpfile/test1.t3-schema.sql", "", "source1") - c.Assert(err, IsNil) - c.Assert(tableInfo, DeepEquals, expectedTableInfo) -} diff --git a/dm/loader/lightning.go b/dm/loader/lightning.go index 99c88aa2053..01d969f87f1 100644 --- a/dm/loader/lightning.go +++ b/dm/loader/lightning.go @@ -19,6 +19,7 @@ import ( "path/filepath" "strings" "sync" + "time" "github.com/pingcap/errors" "github.com/pingcap/failpoint" @@ -49,7 +50,6 @@ const ( // checkpoint file name for lightning loader // this file is used to store the real checkpoint data for lightning. lightningCheckpointFileName = "tidb_lightning_checkpoint.pb" - TmpTLSConfigPath = "lightning_tls" ) // LightningLoader can load your mydumper data into TiDB database. @@ -436,6 +436,13 @@ func (l *LightningLoader) Process(ctx context.Context, pr chan pb.ProcessResult) l.metaBinlogGTID.Store(gtid) } + failpoint.Inject("longLoadProcess", func(val failpoint.Value) { + if sec, ok := val.(int); ok { + l.logger.Info("long loader unit", zap.Int("second", sec)) + time.Sleep(time.Duration(sec) * time.Second) + } + }) + if err := l.restore(ctx); err != nil && !utils.IsContextCanceledError(err) { l.logger.Error("process error", zap.Error(err)) processError := unit.NewProcessError(err) diff --git a/dm/loader/loader.go b/dm/loader/loader.go deleted file mode 100644 index 8b15308ab82..00000000000 --- a/dm/loader/loader.go +++ /dev/null @@ -1,1553 +0,0 @@ -// Copyright 2019 PingCAP, Inc. -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// See the License for the specific language governing permissions and -// limitations under the License. - -package loader - -import ( - "bufio" - "bytes" - "context" - "encoding/hex" - "fmt" - "io" - "os" - "path/filepath" - "strings" - "sync" - "time" - - "github.com/pingcap/errors" - "github.com/pingcap/failpoint" - cm "github.com/pingcap/tidb-tools/pkg/column-mapping" - "github.com/pingcap/tidb/dumpling/export" - "github.com/pingcap/tidb/util/filter" - regexprrouter "github.com/pingcap/tidb/util/regexpr-router" - router "github.com/pingcap/tidb/util/table-router" - "github.com/pingcap/tiflow/dm/config" - "github.com/pingcap/tiflow/dm/config/dbconfig" - "github.com/pingcap/tiflow/dm/pb" - "github.com/pingcap/tiflow/dm/pkg/conn" - tcontext "github.com/pingcap/tiflow/dm/pkg/context" - fr "github.com/pingcap/tiflow/dm/pkg/func-rollback" - "github.com/pingcap/tiflow/dm/pkg/log" - "github.com/pingcap/tiflow/dm/pkg/terror" - "github.com/pingcap/tiflow/dm/pkg/utils" - "github.com/pingcap/tiflow/dm/unit" - clientv3 "go.etcd.io/etcd/client/v3" - "go.uber.org/atomic" - "go.uber.org/zap" - "golang.org/x/sync/errgroup" -) - -const ( - jobCount = 1000 - uninitializedOffset = -1 -) - -// FilePosSet represents a set in mathematics. -type FilePosSet map[string][]int64 - -// DataFiles represent all data files for a single table. -type DataFiles []string - -// Tables2DataFiles represent all data files of a table collection as a map. -type Tables2DataFiles map[string]DataFiles - -type dataJob struct { - sql string - schema string - table string - sourceTable string - sourceSchema string - file string - absPath string - offset int64 - lastOffset int64 -} - -type fileJob struct { - schema string - table string - dataFile string - offset int64 - info *tableInfo -} - -// Worker represents a worker. -type Worker struct { - id int - cfg *config.SubTaskConfig - checkPoint CheckPoint - conn *DBConn - wg sync.WaitGroup - jobQueue chan *dataJob - loader *Loader - - logger log.Logger - - closed atomic.Bool -} - -// NewWorker returns a Worker. -func NewWorker(loader *Loader, id int) *Worker { - w := &Worker{ - id: id, - cfg: loader.cfg, - checkPoint: loader.checkPoint, - conn: loader.toDBConns[id], - jobQueue: make(chan *dataJob, jobCount), - loader: loader, - logger: loader.logger.WithFields(zap.Int("worker ID", id)), - } - - failpoint.Inject("workerChanSize", func(val failpoint.Value) { - size := val.(int) - w.logger.Info("", zap.String("failpoint", "workerChanSize"), zap.Int("size", size)) - w.jobQueue = make(chan *dataJob, size) - }) - - return w -} - -// Close closes worker. -func (w *Worker) Close() { - // simulate the case that doesn't wait all doJob goroutine exit - failpoint.Inject("workerCantClose", func(_ failpoint.Value) { - w.logger.Info("", zap.String("failpoint", "workerCantClose")) - failpoint.Return() - }) - - if !w.closed.CAS(false, true) { - w.wg.Wait() - w.logger.Info("already closed...") - return - } - - w.logger.Info("start to close...") - close(w.jobQueue) - w.wg.Wait() - w.logger.Info("closed !!!") -} - -func (w *Worker) run(ctx context.Context, fileJobQueue chan *fileJob, runFatalChan chan *pb.ProcessError) { - w.closed.Store(false) - - newCtx, cancel := context.WithCancel(ctx) - defer func() { - cancel() - // make sure all doJob goroutines exit - w.Close() - }() - - ctctx := tcontext.NewContext(newCtx, w.logger) - - doJob := func() { - hasError := false - for { - job, ok := <-w.jobQueue - if !ok { - w.logger.Info("job queue was closed, execution goroutine exits") - return - } - if job == nil { - w.logger.Info("jobs are finished, execution goroutine exits") - return - } - if hasError { - continue // continue to read so than the sender will not be blocked - } - - sqls := make([]string, 0, 3) - sqls = append(sqls, "USE `"+unescapePercent(job.schema, w.logger)+"`;") - sqls = append(sqls, job.sql) - - offsetSQL := w.checkPoint.GenSQL(job.file, job.offset) - sqls = append(sqls, offsetSQL) - - failpoint.Inject("LoadExceedOffsetExit", func(val failpoint.Value) { - threshold, _ := val.(int) - if job.offset >= int64(threshold) { - w.logger.Warn("load offset execeeds threshold, it will exit", zap.Int64("load offset", job.offset), zap.Int("value", threshold), zap.String("failpoint", "LoadExceedOffsetExit")) - utils.OsExit(1) - } - }) - - failpoint.Inject("LoadDataSlowDown", nil) - - failpoint.Inject("LoadDataSlowDownByTask", func(val failpoint.Value) { - tasks := val.(string) - taskNames := strings.Split(tasks, ",") - for _, taskName := range taskNames { - if w.cfg.Name == taskName { - w.logger.Info("inject failpoint LoadDataSlowDownByTask", zap.String("task", taskName)) - <-newCtx.Done() - } - } - }) - - startTime := time.Now() - err := w.conn.executeSQL(ctctx, sqls) - failpoint.Inject("executeSQLError", func(_ failpoint.Value) { - w.logger.Info("", zap.String("failpoint", "executeSQLError")) - err = errors.New("inject failpoint executeSQLError") - }) - if err != nil { - // expect pause rather than exit - err = terror.WithScope(terror.Annotatef(err, "file %s", job.file), terror.ScopeDownstream) - if !utils.IsContextCanceledError(err) { - runFatalChan <- unit.NewProcessError(err) - } - hasError = true - failpoint.Inject("returnDoJobError", func(_ failpoint.Value) { - w.logger.Info("", zap.String("failpoint", "returnDoJobError")) - failpoint.Return() - }) - continue - } - txnHistogram.WithLabelValues(w.cfg.Name, w.cfg.WorkerName, w.cfg.SourceID, job.schema, job.table).Observe(time.Since(startTime).Seconds()) - failpoint.Inject("loaderCPUpdateOffsetError", func(_ failpoint.Value) { - job.file = "notafile" + job.file - }) - if err := w.loader.checkPoint.UpdateOffset(job.file, job.offset); err != nil { - runFatalChan <- unit.NewProcessError(err) - hasError = true - continue - } - // update finished offset after checkpoint updated - w.loader.finishedDataSize.Add(job.offset - job.lastOffset) - if _, ok := w.loader.dbTableDataFinishedSize[job.sourceSchema]; ok { - if _, ok := w.loader.dbTableDataFinishedSize[job.sourceSchema][job.sourceTable]; ok { - w.loader.dbTableDataFinishedSize[job.sourceSchema][job.sourceTable].Store(job.offset) - } - } - } - } - - // worker main routine - for { - select { - case <-newCtx.Done(): - w.logger.Info("context canceled, main goroutine exits") - return - case job, ok := <-fileJobQueue: - if !ok { - w.logger.Info("file queue was closed, main routine exit.") - return - } - - w.wg.Add(1) - go func() { - defer w.wg.Done() - doJob() - }() - - // restore a table - if err := w.restoreDataFile(ctx, filepath.Join(w.cfg.Dir, job.dataFile), job.offset, job.info); err != nil { - // expect pause rather than exit - err = terror.Annotatef(err, "restore data file (%v) failed", job.dataFile) - if !utils.IsContextCanceledError(err) { - runFatalChan <- unit.NewProcessError(err) - } - return - } - } - } -} - -func (w *Worker) restoreDataFile(ctx context.Context, filePath string, offset int64, table *tableInfo) error { - w.logger.Info("start to restore dump sql file", zap.String("data file", filePath)) - err := w.dispatchSQL(ctx, filePath, offset, table) - if err != nil { - return err - } - - failpoint.Inject("dispatchError", func(_ failpoint.Value) { - w.logger.Info("", zap.String("failpoint", "dispatchError")) - failpoint.Return(errors.New("inject failpoint dispatchError")) - }) - - // dispatchSQL completed, send nil to make sure all dmls are applied to target database - // we don't want to close and re-make chan frequently - // but if we need to re-call w.run, we need re-make jobQueue chan - w.jobQueue <- nil - w.wg.Wait() - - w.logger.Info("finish to restore dump sql file", zap.String("data file", filePath)) - return nil -} - -func (w *Worker) dispatchSQL(ctx context.Context, file string, offset int64, table *tableInfo) error { - var ( - f *os.File - err error - cur int64 - ) - - baseFile := filepath.Base(file) - - f, err = os.Open(file) - if err != nil { - return terror.ErrLoadUnitDispatchSQLFromFile.Delegate(err) - } - defer f.Close() - - // file was not found in checkpoint - if offset == uninitializedOffset { - offset = 0 - - finfo, err2 := f.Stat() - if err2 != nil { - return terror.ErrLoadUnitDispatchSQLFromFile.Delegate(err2) - } - - tctx := tcontext.NewContext(ctx, w.logger) - err2 = w.checkPoint.Init(tctx, baseFile, finfo.Size()) - failpoint.Inject("WaitLoaderStopAfterInitCheckpoint", func(v failpoint.Value) { - t := v.(int) - w.logger.Info("wait loader stop after init checkpoint") - w.wg.Add(1) - time.Sleep(time.Duration(t) * time.Second) - w.wg.Done() - }) - - if err2 != nil { - w.logger.Error("fail to initialize checkpoint", zap.String("data file", file), zap.Int64("offset", offset), log.ShortError(err2)) - return err2 - } - } - - cur, err = f.Seek(offset, io.SeekStart) - if err != nil { - return terror.ErrLoadUnitDispatchSQLFromFile.Delegate(err) - } - w.logger.Debug("read file", zap.String("data file", file), zap.Int64("offset", offset)) - - lastOffset := cur - - data := make([]byte, 0, 1024*1024) - br := bufio.NewReader(f) - for { - select { - case <-ctx.Done(): - w.logger.Info("sql dispatcher is ready to quit.", zap.String("data file", file), zap.Int64("offset", offset)) - return nil - default: - // do nothing - } - line, err := br.ReadString('\n') - if err == io.EOF { - w.logger.Info("data are scanned finished.", zap.String("data file", file), zap.Int64("offset", offset)) - break - } - cur += int64(len(line)) - - realLine := strings.TrimSpace(line[:len(line)-1]) - if len(realLine) == 0 { - continue - } - - data = append(data, []byte(line)...) - if realLine[len(realLine)-1] == ';' { - query := strings.TrimSpace(string(data)) - if strings.HasPrefix(query, "/*") && strings.HasSuffix(query, "*/;") { - data = data[0:0] - continue - } - - // extend column also need use reassemble to write SQL and the table name has been renamed - if w.loader.columnMapping != nil || len(table.extendCol) > 0 { - // column mapping and route table - query, err = reassemble(data, table, w.loader.columnMapping) - if err != nil { - return terror.Annotatef(err, "file %s", file) - } - } else if table.sourceTable != table.targetTable { - // dumped data files always use backquote as quotes - query = renameShardingTable(query, table.sourceTable, table.targetTable, false) - } - - idx := strings.Index(query, "INSERT INTO") - if idx < 0 { - return terror.ErrLoadUnitInvalidInsertSQL.Generate(query) - } - - data = data[0:0] - - j := &dataJob{ - sql: query, - schema: table.targetSchema, - table: table.targetTable, - sourceSchema: table.sourceSchema, - sourceTable: table.sourceTable, - file: baseFile, - absPath: file, - offset: cur, - lastOffset: lastOffset, - } - lastOffset = cur - - w.jobQueue <- j - } - } - - return nil -} - -type tableInfo struct { - sourceSchema string - sourceTable string - targetSchema string - targetTable string - columnNameList []string - insertHeadStmt string - extendCol []string - extendVal []string -} - -// Loader can load your mydumper data into TiDB database. -type Loader struct { - sync.RWMutex - - cfg *config.SubTaskConfig - cli *clientv3.Client - workerName string - checkPoint CheckPoint - - logger log.Logger - - // db -> tables - // table -> data files - db2Tables map[string]Tables2DataFiles - tableInfos map[string]*tableInfo - - fileJobQueue chan *fileJob - - tableRouter *regexprrouter.RouteTable - baList *filter.Filter - columnMapping *cm.Mapping - - toDB *conn.BaseDB - toDBConns []*DBConn - - totalFileCount atomic.Int64 // schema + table + data - totalDataSize atomic.Int64 - finishedDataSize atomic.Int64 - - // to calculate remainingTimeGauge metric, map will be init in `l.prepare.prepareDataFiles` - dbTableDataTotalSize map[string]map[string]*atomic.Int64 - dbTableDataFinishedSize map[string]map[string]*atomic.Int64 - dbTableDataLastFinishedSize map[string]map[string]*atomic.Int64 - dbTableDataLastUpdatedTime atomic.Time - speedRecorder *export.SpeedRecorder - - metaBinlog atomic.String - metaBinlogGTID atomic.String - - // record process error rather than log.Fatal - runFatalChan chan *pb.ProcessError - - // for every worker goroutine, not for every data file - workerWg *sync.WaitGroup - // for other goroutines - wg sync.WaitGroup - - fileJobQueueClosed atomic.Bool - finish atomic.Bool - closed atomic.Bool -} - -// NewLoader creates a new Loader. -func NewLoader(cfg *config.SubTaskConfig, cli *clientv3.Client, workerName string) *Loader { - loader := &Loader{ - cfg: cfg, - cli: cli, - db2Tables: make(map[string]Tables2DataFiles), - tableInfos: make(map[string]*tableInfo), - workerWg: new(sync.WaitGroup), - logger: log.With(zap.String("task", cfg.Name), zap.String("unit", "load")), - workerName: workerName, - speedRecorder: export.NewSpeedRecorder(), - } - loader.fileJobQueueClosed.Store(true) // not open yet - return loader -} - -// Type implements Unit.Type. -func (l *Loader) Type() pb.UnitType { - return pb.UnitType_Load -} - -// Init initializes loader for a load task, but not start Process. -// if fail, it should not call l.Close. -func (l *Loader) Init(ctx context.Context) (err error) { - rollbackHolder := fr.NewRollbackHolder("loader") - defer func() { - if err != nil { - rollbackHolder.RollbackReverseOrder() - } - }() - - tctx := tcontext.NewContext(ctx, l.logger) - - checkpoint, err := newRemoteCheckPoint(tctx, l.cfg, l.checkpointID()) - failpoint.Inject("ignoreLoadCheckpointErr", func(_ failpoint.Value) { - l.logger.Info("", zap.String("failpoint", "ignoreLoadCheckpointErr")) - err = nil - }) - if err != nil { - return err - } - l.checkPoint = checkpoint - rollbackHolder.Add(fr.FuncRollback{Name: "close-checkpoint", Fn: l.checkPoint.Close}) - - l.baList, err = filter.New(l.cfg.CaseSensitive, l.cfg.BAList) - if err != nil { - return terror.ErrLoadUnitGenBAList.Delegate(err) - } - - err = l.genRouter(l.cfg.RouteRules) - if err != nil { - return err - } - - if len(l.cfg.ColumnMappingRules) > 0 { - l.columnMapping, err = cm.NewMapping(l.cfg.CaseSensitive, l.cfg.ColumnMappingRules) - if err != nil { - return terror.ErrLoadUnitGenColumnMapping.Delegate(err) - } - } - - dbCfg := l.cfg.To - dbCfg.RawDBCfg = dbconfig.DefaultRawDBConfig(). - SetMaxIdleConns(l.cfg.PoolSize) - - // used to change loader's specified DB settings, currently SQL Mode - lcfg, err := l.cfg.Clone() - if err != nil { - return err - } - // fix nil map after clone, which we will use below - // TODO: we may develop `SafeClone` in future - if lcfg.To.Session == nil { - lcfg.To.Session = make(map[string]string) - } - timeZone := l.cfg.Timezone - if len(timeZone) == 0 { - baseDB, err2 := conn.GetDownstreamDB(&l.cfg.To) - if err2 != nil { - return err2 - } - defer baseDB.Close() - var err1 error - timeZone, err1 = config.FetchTimeZoneSetting(ctx, baseDB.DB) - if err1 != nil { - return err1 - } - } - lcfg.To.Session["time_zone"] = timeZone - - hasSQLMode := false - for k := range l.cfg.To.Session { - if strings.ToLower(k) == "sql_mode" { - hasSQLMode = true - break - } - } - - if !hasSQLMode { - sqlModes, err3 := conn.AdjustSQLModeCompatible(l.cfg.LoaderConfig.SQLMode) - if err3 != nil { - l.logger.Warn("cannot adjust sql_mode compatible, the sql_mode will stay the same", log.ShortError(err3)) - } - lcfg.To.Session["sql_mode"] = sqlModes - } - - l.logger.Info("loader's sql_mode is", zap.String("sqlmode", lcfg.To.Session["sql_mode"])) - - l.toDB, l.toDBConns, err = createConns(tctx, lcfg, lcfg.Name, lcfg.SourceID, l.cfg.PoolSize) - if err != nil { - return err - } - - return nil -} - -func (l *Loader) handleExitErrMetric(err *pb.ProcessError) { - resumable := fmt.Sprintf("%t", unit.IsResumableError(err)) - loaderExitWithErrorCounter.WithLabelValues(l.cfg.Name, l.cfg.SourceID, resumable).Inc() -} - -// Process implements Unit.Process. -func (l *Loader) Process(ctx context.Context, pr chan pb.ProcessResult) { - loaderExitWithErrorCounter.WithLabelValues(l.cfg.Name, l.cfg.SourceID, "true").Add(0) - loaderExitWithErrorCounter.WithLabelValues(l.cfg.Name, l.cfg.SourceID, "false").Add(0) - - newCtx, cancel := context.WithCancel(ctx) - defer cancel() - - l.newFileJobQueue() - binlog, gtid, err := getMydumpMetadata(ctx, l.cli, l.cfg, l.workerName) - if err != nil { - processError := unit.NewProcessError(err) - l.handleExitErrMetric(processError) - pr <- pb.ProcessResult{ - Errors: []*pb.ProcessError{processError}, - } - return - } - if binlog != "" { - l.metaBinlog.Store(binlog) - } - if gtid != "" { - l.metaBinlogGTID.Store(gtid) - } - - l.runFatalChan = make(chan *pb.ProcessError, 2*l.cfg.PoolSize) - errs := make([]*pb.ProcessError, 0, 2) - - var wg sync.WaitGroup - wg.Add(1) - go func() { - defer wg.Done() - for err := range l.runFatalChan { - cancel() // cancel l.Restore - errs = append(errs, err) - } - }() - failpoint.Inject("longLoadProcess", func(val failpoint.Value) { - if sec, ok := val.(int); ok { - l.logger.Info("long loader unit", zap.Int("second", sec)) - time.Sleep(time.Duration(sec) * time.Second) - } - }) - err = l.Restore(newCtx) - close(l.runFatalChan) // Restore returned, all potential fatal sent to l.runFatalChan - cancel() // cancel the goroutines created in `Restore`. - - failpoint.Inject("dontWaitWorkerExit", func(_ failpoint.Value) { - l.logger.Info("", zap.String("failpoint", "dontWaitWorkerExit")) - l.workerWg.Wait() - }) - - wg.Wait() // wait for receive all fatal from l.runFatalChan - - if err != nil { - if utils.IsContextCanceledError(err) { - l.logger.Info("filter out error caused by user cancel") - } else { - processError := unit.NewProcessError(err) - l.handleExitErrMetric(processError) - errs = append(errs, processError) - } - } - - isCanceled := false - select { - case <-ctx.Done(): - isCanceled = true - default: - } - - pr <- pb.ProcessResult{ - IsCanceled: isCanceled, - Errors: errs, - } -} - -func (l *Loader) newFileJobQueue() { - l.closeFileJobQueue() - l.fileJobQueue = make(chan *fileJob, jobCount) - l.fileJobQueueClosed.Store(false) -} - -func (l *Loader) closeFileJobQueue() { - if l.fileJobQueueClosed.Load() { - return - } - close(l.fileJobQueue) - l.fileJobQueueClosed.Store(true) -} - -// align with https://github.com/pingcap/dumpling/pull/140 -// if input is malformed, return original string and print log. -func unescapePercent(input string, logger log.Logger) string { - buf := bytes.Buffer{} - buf.Grow(len(input)) - i := 0 - for i < len(input) { - if input[i] != '%' { - buf.WriteByte(input[i]) - i++ - } else { - if i+2 >= len(input) { - logger.Error("malformed filename while unescapePercent", zap.String("filename", input)) - return input - } - ascii, err := hex.DecodeString(input[i+1 : i+3]) - if err != nil { - logger.Error("malformed filename while unescapePercent", zap.String("filename", input)) - return input - } - buf.Write(ascii) - i += 3 - } - } - return buf.String() -} - -func (l *Loader) skipSchemaAndTable(table *filter.Table) bool { - if filter.IsSystemSchema(table.Schema) { - return true - } - - table.Schema = unescapePercent(table.Schema, l.logger) - table.Name = unescapePercent(table.Name, l.logger) - - tbs := []*filter.Table{table} - tbs = l.baList.Apply(tbs) - return len(tbs) == 0 -} - -func (l *Loader) isClosed() bool { - return l.closed.Load() -} - -// IsFreshTask implements Unit.IsFreshTask. -func (l *Loader) IsFreshTask(ctx context.Context) (bool, error) { - count, err := l.checkPoint.Count(tcontext.NewContext(ctx, l.logger)) - return count == 0, err -} - -// Restore begins the restore process. -func (l *Loader) Restore(ctx context.Context) error { - if err := putLoadTask(l.cli, l.cfg, l.workerName); err != nil { - return err - } - - if err := l.prepare(); err != nil { - l.logger.Error("scan directory failed", zap.String("directory", l.cfg.Dir), log.ShortError(err)) - return err - } - - failpoint.Inject("WaitLoaderStopBeforeLoadCheckpoint", func(v failpoint.Value) { - t := v.(int) - l.logger.Info("wait loader stop before load checkpoint") - l.wg.Add(1) - time.Sleep(time.Duration(t) * time.Second) - l.wg.Done() - }) - - // not update checkpoint in memory when restoring, so when re-Restore, we need to load checkpoint from DB - err := l.checkPoint.Load(tcontext.NewContext(ctx, l.logger)) - if err != nil { - return err - } - err = l.checkPoint.CalcProgress(l.db2Tables) - if err != nil { - l.logger.Error("calc load process", log.ShortError(err)) - return err - } - l.loadFinishedSize() - if err2 := l.initAndStartWorkerPool(ctx); err2 != nil { - l.logger.Error("initial and start worker pools failed", log.ShortError(err)) - return err2 - } - - begin := time.Now() - err = l.restoreData(ctx) - - failpoint.Inject("dontWaitWorkerExit", func(_ failpoint.Value) { - l.logger.Info("", zap.String("failpoint", "dontWaitWorkerExit")) - failpoint.Return(nil) - }) - - // make sure all workers exit - l.closeFileJobQueue() // all data file dispatched, close it - l.workerWg.Wait() - - if err == nil { - l.finish.Store(true) - l.logger.Info("all data files have been finished", zap.Duration("cost time", time.Since(begin))) - if l.checkPoint.AllFinished() { - if l.cfg.Mode == config.ModeFull { - if err = delLoadTask(l.cli, l.cfg, l.workerName); err != nil { - return err - } - } - if l.cfg.CleanDumpFile { - cleanDumpFiles(ctx, l.cfg) - } - } - } else if errors.Cause(err) != context.Canceled { - return err - } - - return nil -} - -func (l *Loader) loadFinishedSize() { - results := l.checkPoint.GetAllRestoringFileInfo() - for file, pos := range results { - db, table, err := getDBAndTableFromFilename(file) - if err != nil { - l.logger.Warn("invalid db table sql file", zap.String("file", file), zap.Error(err)) - continue - } - l.finishedDataSize.Add(pos[0]) - l.dbTableDataFinishedSize[db][table].Add(pos[0]) - } -} - -// Close does graceful shutdown. -func (l *Loader) Close() { - l.Lock() - defer l.Unlock() - if l.isClosed() { - return - } - - l.stopLoad() - - if err := l.toDB.Close(); err != nil { - l.logger.Error("close downstream DB error", log.ShortError(err)) - } - l.checkPoint.Close() - l.removeLabelValuesWithTaskInMetrics(l.cfg.Name) - l.closed.Store(true) -} - -// Kill kill the loader without graceful. -func (l *Loader) Kill() { - // TODO: implement kill - l.Close() -} - -// stopLoad stops loading, now it used by Close and Pause -// maybe we can refine the workflow more clear. -func (l *Loader) stopLoad() { - // before re-write workflow, simply close all job queue and job workers - // when resuming, re-create them - l.logger.Info("stop importing data process") - - l.closeFileJobQueue() - l.workerWg.Wait() - l.logger.Debug("all workers have been closed") - - l.wg.Wait() - l.logger.Debug("all loader's go-routines have been closed") -} - -// Pause implements Unit.Pause. -func (l *Loader) Pause() { - if l.isClosed() { - l.logger.Warn("try to pause, but already closed") - return - } - - l.stopLoad() -} - -// Resume resumes the paused process. -func (l *Loader) Resume(ctx context.Context, pr chan pb.ProcessResult) { - if l.isClosed() { - l.logger.Warn("try to resume, but already closed") - return - } - - if err := l.resetDBs(ctx); err != nil { - pr <- pb.ProcessResult{ - IsCanceled: false, - Errors: []*pb.ProcessError{ - unit.NewProcessError(err), - }, - } - return - } - // continue the processing - l.Process(ctx, pr) -} - -func (l *Loader) resetDBs(ctx context.Context) error { - var err error - tctx := tcontext.NewContext(ctx, l.logger) - - for i := 0; i < len(l.toDBConns); i++ { - err = l.toDBConns[i].resetConn(tctx) - if err != nil { - return terror.WithScope(err, terror.ScopeDownstream) - } - } - - err = l.checkPoint.ResetConn(tctx) - if err != nil { - return terror.WithScope(err, terror.ScopeDownstream) - } - - return nil -} - -// Update implements Unit.Update -// now, only support to update config for routes, filters, column-mappings, block-allow-list -// now no config diff implemented, so simply re-init use new config -// no binlog filter for loader need to update. -func (l *Loader) Update(ctx context.Context, cfg *config.SubTaskConfig) error { - var ( - err error - oldBaList *filter.Filter - oldTableRouter *regexprrouter.RouteTable - oldColumnMapping *cm.Mapping - ) - - defer func() { - if err == nil { - return - } - if oldBaList != nil { - l.baList = oldBaList - } - if oldTableRouter != nil { - l.tableRouter = oldTableRouter - } - if oldColumnMapping != nil { - l.columnMapping = oldColumnMapping - } - }() - - // update block-allow-list - oldBaList = l.baList - l.baList, err = filter.New(cfg.CaseSensitive, cfg.BAList) - if err != nil { - return terror.ErrLoadUnitGenBAList.Delegate(err) - } - - // update route, for loader, this almost useless, because schemas often have been restored - oldTableRouter = l.tableRouter - l.tableRouter, err = regexprrouter.NewRegExprRouter(cfg.CaseSensitive, cfg.RouteRules) - if err != nil { - return terror.ErrLoadUnitGenTableRouter.Delegate(err) - } - - // update column-mappings - oldColumnMapping = l.columnMapping - l.columnMapping, err = cm.NewMapping(cfg.CaseSensitive, cfg.ColumnMappingRules) - if err != nil { - return terror.ErrLoadUnitGenColumnMapping.Delegate(err) - } - - // update l.cfg - l.cfg.BAList = cfg.BAList - l.cfg.RouteRules = cfg.RouteRules - l.cfg.ColumnMappingRules = cfg.ColumnMappingRules - return nil -} - -func (l *Loader) genRouter(rules []*router.TableRule) error { - l.tableRouter, _ = regexprrouter.NewRegExprRouter(l.cfg.CaseSensitive, []*router.TableRule{}) - for _, rule := range rules { - err := l.tableRouter.AddRule(rule) - if err != nil { - return terror.ErrLoadUnitGenTableRouter.Delegate(err) - } - } - schemaRules, tableRules := l.tableRouter.AllRules() - l.logger.Debug("all route rules", zap.Reflect("schema route rules", schemaRules), zap.Reflect("table route rules", tableRules)) - return nil -} - -func (l *Loader) initAndStartWorkerPool(ctx context.Context) error { - for i := 0; i < l.cfg.PoolSize; i++ { - worker := NewWorker(l, i) - l.workerWg.Add(1) // for every worker goroutine, Add(1) - go func() { - defer l.workerWg.Done() - worker.run(ctx, l.fileJobQueue, l.runFatalChan) - }() - } - return nil -} - -func (l *Loader) prepareDBFiles(files map[string]struct{}) error { - // reset some variables - l.db2Tables = make(map[string]Tables2DataFiles) - l.totalFileCount.Store(0) // reset - schemaFileCount := 0 - for file := range files { - db, ok := utils.GetDBFromDumpFilename(file) - if !ok { - continue - } - schemaFileCount++ - if l.skipSchemaAndTable(&filter.Table{Schema: db}) { - l.logger.Warn("ignore schema file", zap.String("schema file", file)) - continue - } - - l.db2Tables[db] = make(Tables2DataFiles) - l.totalFileCount.Add(1) // for schema - } - - if schemaFileCount == 0 { - l.logger.Warn("invalid mydumper files for there are no `-schema-create.sql` files found, and will generate later") - } - if len(l.db2Tables) == 0 { - l.logger.Warn("no available `-schema-create.sql` files, check mydumper parameter matches block-allow-list in task config, will generate later") - } - - return nil -} - -func (l *Loader) prepareTableFiles(files map[string]struct{}) error { - var tablesNumber float64 - for file := range files { - db, table, ok := utils.GetTableFromDumpFilename(file) - if !ok { - continue - } - if l.skipSchemaAndTable(&filter.Table{Schema: db, Name: table}) { - l.logger.Warn("ignore table file", zap.String("table file", file)) - continue - } - tables, ok := l.db2Tables[db] - if !ok { - l.logger.Warn("can't find schema create file, will generate one", zap.String("schema", db)) - if err := generateSchemaCreateFile(l.cfg.Dir, db); err != nil { - return err - } - l.db2Tables[db] = make(Tables2DataFiles) - tables = l.db2Tables[db] - l.totalFileCount.Add(1) - } - - if _, ok := tables[table]; ok { - return terror.ErrLoadUnitDuplicateTableFile.Generate(file) - } - tablesNumber++ - tables[table] = make(DataFiles, 0, 16) - l.totalFileCount.Add(1) // for table - } - - tableGauge.WithLabelValues(l.cfg.Name, l.cfg.SourceID).Set(tablesNumber) - return nil -} - -func (l *Loader) prepareDataFiles(files map[string]struct{}) error { - var dataFilesNumber float64 - - for file := range files { - if !strings.HasSuffix(file, ".sql") || strings.Contains(file, "-schema.sql") || - strings.Contains(file, "-schema-create.sql") { - continue - } - - // ignore view / triggers - if strings.Contains(file, "-schema-view.sql") || strings.Contains(file, "-schema-triggers.sql") || - strings.Contains(file, "-schema-post.sql") { - l.logger.Warn("ignore unsupport view/trigger file", zap.String("file", file)) - continue - } - - db, table, err := getDBAndTableFromFilename(file) - if err != nil { - l.logger.Warn("invalid db table sql file", zap.String("file", file), zap.Error(err)) - continue - } - if l.skipSchemaAndTable(&filter.Table{Schema: db, Name: table}) { - l.logger.Warn("ignore data file", zap.String("data file", file)) - continue - } - tables, ok := l.db2Tables[db] - if !ok { - return terror.ErrLoadUnitNoDBFile.Generate(file) - } - - dataFiles, ok := tables[table] - if !ok { - return terror.ErrLoadUnitNoTableFile.Generate(file) - } - - size, err := utils.GetFileSize(filepath.Join(l.cfg.Dir, file)) - if err != nil { - return err - } - l.totalDataSize.Add(size) - l.totalFileCount.Add(1) // for data - if _, ok := l.dbTableDataTotalSize[db]; !ok { - l.dbTableDataTotalSize[db] = make(map[string]*atomic.Int64) - l.dbTableDataFinishedSize[db] = make(map[string]*atomic.Int64) - l.dbTableDataLastFinishedSize[db] = make(map[string]*atomic.Int64) - } - if _, ok := l.dbTableDataTotalSize[db][table]; !ok { - l.dbTableDataTotalSize[db][table] = atomic.NewInt64(0) - l.dbTableDataFinishedSize[db][table] = atomic.NewInt64(0) - l.dbTableDataLastFinishedSize[db][table] = atomic.NewInt64(0) - } - l.dbTableDataTotalSize[db][table].Add(size) - - dataFiles = append(dataFiles, file) - dataFilesNumber++ - tables[table] = dataFiles - } - - dataFileGauge.WithLabelValues(l.cfg.Name, l.cfg.SourceID).Set(dataFilesNumber) - dataSizeGauge.WithLabelValues(l.cfg.Name, l.cfg.SourceID).Set(float64(l.totalDataSize.Load())) - return nil -} - -func (l *Loader) prepare() error { - begin := time.Now() - defer func() { - l.logger.Info("prepare loading", zap.Duration("cost time", time.Since(begin))) - }() - - // reset some counter used to calculate progress - l.totalDataSize.Store(0) - l.finishedDataSize.Store(0) // reset before load from checkpoint - l.dbTableDataTotalSize = make(map[string]map[string]*atomic.Int64) - l.dbTableDataFinishedSize = make(map[string]map[string]*atomic.Int64) - l.dbTableDataLastFinishedSize = make(map[string]map[string]*atomic.Int64) - - // check if mydumper dir data exists. - if !utils.IsDirExists(l.cfg.Dir) { - // compatibility with no `.name` suffix - dirSuffix := "." + l.cfg.Name - var trimmed bool - if strings.HasSuffix(l.cfg.Dir, dirSuffix) { - dirPrefix := strings.TrimSuffix(l.cfg.Dir, dirSuffix) - if utils.IsDirExists(dirPrefix) { - l.logger.Warn("directory doesn't exist, try to load data from old fashion directory", zap.String("directory", l.cfg.Dir), zap.String("old fashion directory", dirPrefix)) - l.cfg.Dir = dirPrefix - trimmed = true - } - } - if !trimmed { - return terror.ErrLoadUnitDumpDirNotFound.Generate(l.cfg.Dir) - } - } - - // collect dir files. - files, err := utils.CollectDirFiles(l.cfg.Dir) - if err != nil { - return err - } - - l.logger.Debug("collected files", zap.Reflect("files", files)) - - /* Mydumper file names format - * db {db}-schema-create.sql - * table {db}.{table}-schema.sql - * sql {db}.{table}.{part}.sql or {db}.{table}.sql - */ - - // Sql file for create db - if err := l.prepareDBFiles(files); err != nil { - return err - } - - // Sql file for create table - if err := l.prepareTableFiles(files); err != nil { - return err - } - - // Sql file for restore data - return l.prepareDataFiles(files) -} - -// restoreSchema creates schema. -func (l *Loader) restoreSchema(ctx context.Context, conn *DBConn, sqlFile, schema string) error { - if l.checkPoint.IsTableCreated(schema, "") { - l.logger.Info("database already exists in checkpoint, skip creating it", zap.String("schema", schema), zap.String("db schema file", sqlFile)) - return nil - } - err := l.restoreStructure(ctx, conn, sqlFile, schema, "") - if err != nil { - if isErrDBExists(err) { - l.logger.Info("database already exists, skip it", zap.String("db schema file", sqlFile)) - } else { - return terror.Annotatef(err, "run db schema failed - dbfile %s", sqlFile) - } - } - return nil -} - -// restoreTable creates table. -func (l *Loader) restoreTable(ctx context.Context, conn *DBConn, sqlFile, schema, table string) error { - if l.checkPoint.IsTableCreated(schema, table) { - l.logger.Info("table already exists in checkpoint, skip creating it", zap.String("schema", schema), zap.String("table", table), zap.String("db schema file", sqlFile)) - return nil - } - err := l.restoreStructure(ctx, conn, sqlFile, schema, table) - if err != nil { - if isErrTableExists(err) { - l.logger.Info("table already exists, skip it", zap.String("table schema file", sqlFile)) - } else { - return terror.Annotatef(err, "run table schema failed - dbfile %s", sqlFile) - } - } - return nil -} - -// restoreStruture creates schema or table. -func (l *Loader) restoreStructure(ctx context.Context, conn *DBConn, sqlFile string, schema string, table string) error { - f, err := os.Open(sqlFile) - if err != nil { - return terror.ErrLoadUnitReadSchemaFile.Delegate(err) - } - defer f.Close() - - tctx := tcontext.NewContext(ctx, l.logger) - ansiquote := strings.Contains(l.cfg.SQLMode, "ANSI_QUOTES") - - data := make([]byte, 0, 1024*1024) - br := bufio.NewReader(f) - for { - line, err := br.ReadString('\n') - if err == io.EOF { - break - } - - realLine := strings.TrimSpace(line[:len(line)-1]) - if len(realLine) == 0 { - continue - } - - data = append(data, []byte(realLine)...) - if data[len(data)-1] == ';' { - query := string(data) - data = data[0:0] - if strings.HasPrefix(query, "/*") && strings.HasSuffix(query, "*/;") { - continue - } - - var sqls []string - dstSchema, dstTable := fetchMatchedLiteral(tctx, l.tableRouter, schema, table) - // for table - if table != "" { - sqls = append(sqls, "USE `"+unescapePercent(dstSchema, l.logger)+"`;") - query = renameShardingTable(query, table, dstTable, ansiquote) - } else { - query = renameShardingSchema(query, schema, dstSchema, ansiquote) - } - - l.logger.Debug("schema create statement", zap.String("sql", query)) - - sqls = append(sqls, query) - err = conn.executeSQL(tctx, sqls) - if err != nil { - return terror.WithScope(err, terror.ScopeDownstream) - } - } - } - - return nil -} - -// renameShardingTable replaces srcTable with dstTable in query. -func renameShardingTable(query, srcTable, dstTable string, ansiquote bool) string { - return SQLReplace(query, srcTable, dstTable, ansiquote) -} - -// renameShardingSchema replaces srcSchema with dstSchema in query. -func renameShardingSchema(query, srcSchema, dstSchema string, ansiquote bool) string { - return SQLReplace(query, srcSchema, dstSchema, ansiquote) -} - -func fetchMatchedLiteral(ctx *tcontext.Context, router *regexprrouter.RouteTable, schema, table string) (targetSchema string, targetTable string) { - if schema == "" { - // nothing change - return schema, table - } - - targetSchema, targetTable, err := router.Route(schema, table) - if err != nil { - ctx.L().Error("fail to route table", zap.Error(err)) // log the error, but still continue - } - if targetSchema == "" { - // nothing change - return schema, table - } - if targetTable == "" { - // table still same; - targetTable = table - } - - return targetSchema, targetTable -} - -// `restore Schema Job` present a data structure of schema restoring job. -type restoreSchemaJob struct { - loader *Loader - session *DBConn - database string // database name - table string // table name, empty if it's a schema of database - filepath string // file path of dumpped schema file -} - -// `jobQueue` of schema restoring which (only) support consumptions concurrently. -type jobQueue struct { - ctx context.Context - msgq chan *restoreSchemaJob // job message queue channel - consumerCount int // count of consumers - eg *errgroup.Group // err wait group of consumer's go-routines -} - -// `newJobQueue` consturct a jobQueue. -func newJobQueue(ctx context.Context, consumerCount, length int) *jobQueue { - eg, selfCtx := errgroup.WithContext(ctx) - return &jobQueue{ - ctx: selfCtx, - msgq: make(chan *restoreSchemaJob, length), - consumerCount: consumerCount, - eg: eg, - } -} - -// `push` will append a job to the queue. -func (q *jobQueue) push(job *restoreSchemaJob) error { - var err error - select { - case <-q.ctx.Done(): - err = q.ctx.Err() - case q.msgq <- job: - } - return terror.WithScope(err, terror.ScopeInternal) -} - -// `close` wait jobs done and close queue forever. -func (q *jobQueue) close() error { - // queue is closing - close(q.msgq) - // wait until go-routines of consumption was exited - return q.eg.Wait() -} - -// `startConsumers` run multiple go-routines of job consumption with user defined handler. -func (q *jobQueue) startConsumers(handler func(ctx context.Context, job *restoreSchemaJob) error) { - for i := 0; i < q.consumerCount; i++ { - q.eg.Go(func() error { - var session *DBConn - consumeLoop: - for { - select { - case <-q.ctx.Done(): - err := q.ctx.Err() - return err - case job, active := <-q.msgq: - if !active { - break consumeLoop - } - // test condition for `job.session` means db session still could be controlled outside, - // it's used in unit test for now. - if session == nil && job.session == nil { - baseConn, err2 := job.loader.toDB.GetBaseConn(q.ctx) - if err2 != nil { - return err2 - } - defer func(baseConn *conn.BaseConn) { - err2 := job.loader.toDB.ForceCloseConn(baseConn) - if err2 != nil { - job.loader.logger.Warn("fail to close connection", zap.Error(err2)) - } - }(baseConn) - session = &DBConn{ - name: job.loader.cfg.Name, - sourceID: job.loader.cfg.SourceID, - baseConn: baseConn, - resetBaseConnFn: func(*tcontext.Context, *conn.BaseConn) (*conn.BaseConn, error) { - return nil, terror.WithScope(terror.ErrDBBadConn.Generate("bad connection error restoreData"), terror.ScopeDownstream) - }, - } - } - if job.session == nil { - job.session = session - } - err := handler(q.ctx, job) - if err != nil { - return err - } - } - } - return nil - }) - } -} - -func (l *Loader) restoreData(ctx context.Context) error { - begin := time.Now() - dispatchMap := make(map[string]*fileJob) - concurrency := l.cfg.PoolSize - // `for v := range map` would present random order - // `dbs` array keep same order for restore schema job generating - var err error - dbs := make([]string, 0, len(l.db2Tables)) - for db := range l.db2Tables { - dbs = append(dbs, db) - } - tctx := tcontext.NewContext(ctx, l.logger) - - // run consumers of restore database schema queue - dbRestoreQueue := newJobQueue(ctx, concurrency, concurrency /** length of queue */) - dbRestoreQueue.startConsumers(func(ctx context.Context, job *restoreSchemaJob) error { - // restore database schema - job.loader.logger.Info("start to create schema", zap.String("schema file", job.filepath)) - err2 := job.loader.restoreSchema(ctx, job.session, job.filepath, job.database) - if err2 != nil { - return err2 - } - job.loader.logger.Info("finish to create schema", zap.String("schema file", job.filepath)) - return nil - }) - - // push database schema restoring jobs to the queue - for _, db := range dbs { - schemaFile := l.cfg.Dir + "/" + db + "-schema-create.sql" // cache friendly - err = dbRestoreQueue.push(&restoreSchemaJob{ - loader: l, - database: db, - table: "", - filepath: schemaFile, - }) - if err != nil { - break - } - } - - // check producing error - if err != nil { - runtimeErr := dbRestoreQueue.close() - if errors.ErrorEqual(err, context.Canceled) { - err = runtimeErr - } - return err - } - // wait whole task done & close queue - err = dbRestoreQueue.close() - if err != nil { - return err - } - - // run consumers of restore table schema queue - tblRestoreQueue := newJobQueue(ctx, concurrency, concurrency /** length of queue */) - tblRestoreQueue.startConsumers(func(ctx context.Context, job *restoreSchemaJob) error { - job.loader.logger.Info("start to create table", zap.String("table file", job.filepath)) - err2 := job.loader.restoreTable(ctx, job.session, job.filepath, job.database, job.table) - if err2 != nil { - return err2 - } - job.loader.logger.Info("finish to create table", zap.String("table file", job.filepath)) - return nil - }) - - // push table schema restoring jobs to the queue -tblSchemaLoop: - for _, db := range dbs { - for table := range l.db2Tables[db] { - schemaFile := l.cfg.Dir + "/" + db + "." + table + "-schema.sql" // cache friendly - if _, ok := l.tableInfos[tableName(db, table)]; !ok { - l.tableInfos[tableName(db, table)], err = parseTable(tctx, l.tableRouter, db, table, schemaFile, l.cfg.LoaderConfig.SQLMode, l.cfg.SourceID) - if err != nil { - err = terror.Annotatef(err, "parse table %s/%s", db, table) - break tblSchemaLoop - } - } - if l.checkPoint.IsTableFinished(db, table) { - l.logger.Info("table has finished, skip it.", zap.String("schema", db), zap.String("table", table)) - continue - } - err = tblRestoreQueue.push(&restoreSchemaJob{ - loader: l, - database: db, - table: table, - filepath: schemaFile, - }) - if err != nil { - break tblSchemaLoop - } - } - } - - // check producing error - if err != nil { - runtimeErr := tblRestoreQueue.close() - if errors.ErrorEqual(err, context.Canceled) { - err = runtimeErr - } - return err - } - // wait whole task done & close queue - err = tblRestoreQueue.close() - if err != nil { - return err - } - - // all schemas was restored - l.logger.Info("finish to create tables", zap.Duration("cost time", time.Since(begin))) - - // generate restore table data file job - for _, db := range dbs { - table2DataFileMap := l.db2Tables[db] - for table := range table2DataFileMap { - restoringFiles := l.checkPoint.GetRestoringFileInfo(db, table) - l.logger.Debug("restoring table data", zap.String("schema", db), zap.String("table", table), zap.Reflect("data files", restoringFiles)) - - for _, file := range table2DataFileMap[table] { - select { - case <-ctx.Done(): - l.logger.Warn("stop generate data file job", log.ShortError(ctx.Err())) - return ctx.Err() - default: - // do nothing - } - l.logger.Debug("dispatch data file", zap.String("schema", db), zap.String("table", table), zap.String("data file", file)) - - offset := int64(uninitializedOffset) - posSet, ok := restoringFiles[file] - if ok { - offset = posSet[0] - } - dispatchMap[db+"_"+table+"_"+file] = &fileJob{ - schema: db, - table: table, - dataFile: file, - offset: offset, - info: l.tableInfos[tableName(db, table)], - } - } - } - } - - // a simple and naive approach to dispatch files randomly based on the feature of golang map(range by random) - for _, j := range dispatchMap { - select { - case <-ctx.Done(): - l.logger.Warn("stop dispatch data file job", log.ShortError(ctx.Err())) - return ctx.Err() - case l.fileJobQueue <- j: - } - } - - l.logger.Info("all data files have been dispatched, waiting for them finished") - return nil -} - -// checkpointID returns ID which used for checkpoint table. -func (l *Loader) checkpointID() string { - if len(l.cfg.SourceID) > 0 { - return l.cfg.SourceID - } - dir, err := filepath.Abs(l.cfg.Dir) - if err != nil { - l.logger.Warn("get abs dir", zap.String("directory", l.cfg.Dir), log.ShortError(err)) - return l.cfg.Dir - } - return shortSha1(dir) -} diff --git a/dm/loader/loader_test.go b/dm/loader/loader_test.go deleted file mode 100644 index e124a30398a..00000000000 --- a/dm/loader/loader_test.go +++ /dev/null @@ -1,82 +0,0 @@ -// Copyright 2021 PingCAP, Inc. -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// See the License for the specific language governing permissions and -// limitations under the License. - -package loader - -import ( - "context" - - . "github.com/pingcap/check" - "github.com/pingcap/errors" -) - -var _ = Suite(&testLoaderSuite{}) - -type testLoaderSuite struct{} - -func (*testLoaderSuite) TestJobQueue(c *C) { - procedure := func(ctx context.Context, jobsCount int, handler func(ctx context.Context, job *restoreSchemaJob) error) error { - jobQueue := newJobQueue(ctx, 16, 16) - jobQueue.startConsumers(handler) - for i := 0; i < jobsCount; i++ { - job := &restoreSchemaJob{ - session: &DBConn{}, // just for testing - } - if i == jobsCount/2 { - job.database = "error" - } - err := jobQueue.push(job) - if err != nil { - runtimeErr := jobQueue.close() - if errors.ErrorEqual(err, context.Canceled) { - err = runtimeErr - } - return err - } - } - return jobQueue.close() - } - - injectErr := errors.New("random injected error") - cases := []struct { - ctx context.Context - jobsCount int - handler func(ctx context.Context, job *restoreSchemaJob) error - exceptedErr error - }{ - { - ctx: context.Background(), - jobsCount: 128, - handler: func(ctx context.Context, job *restoreSchemaJob) error { - if job.database == "error" { - return injectErr - } - return nil - }, - exceptedErr: injectErr, - }, - { - ctx: context.Background(), - jobsCount: 128, - handler: func(ctx context.Context, job *restoreSchemaJob) error { - return nil - }, - exceptedErr: nil, - }, - } - - for _, testcase := range cases { - err := procedure(testcase.ctx, testcase.jobsCount, testcase.handler) - c.Assert(err, Equals, testcase.exceptedErr) - } -} diff --git a/dm/loader/metrics.go b/dm/loader/metrics.go index 2ee4d7339f5..99f6b8d08b9 100644 --- a/dm/loader/metrics.go +++ b/dm/loader/metrics.go @@ -119,16 +119,3 @@ func RegisterMetrics(registry *prometheus.Registry) { registry.MustRegister(loaderExitWithErrorCounter) registry.MustRegister(remainingTimeGauge) } - -func (l *Loader) removeLabelValuesWithTaskInMetrics(task string) { - tidbExecutionErrorCounter.DeletePartialMatch(prometheus.Labels{"task": task}) - txnHistogram.DeletePartialMatch(prometheus.Labels{"task": task}) - queryHistogram.DeletePartialMatch(prometheus.Labels{"task": task}) - stmtHistogram.DeletePartialMatch(prometheus.Labels{"task": task}) - dataFileGauge.DeletePartialMatch(prometheus.Labels{"task": task}) - tableGauge.DeletePartialMatch(prometheus.Labels{"task": task}) - dataSizeGauge.DeletePartialMatch(prometheus.Labels{"task": task}) - progressGauge.DeletePartialMatch(prometheus.Labels{"task": task}) - loaderExitWithErrorCounter.DeletePartialMatch(prometheus.Labels{"task": task}) - remainingTimeGauge.DeletePartialMatch(prometheus.Labels{"task": task}) -} diff --git a/dm/loader/status.go b/dm/loader/status.go deleted file mode 100644 index 0e370efcac5..00000000000 --- a/dm/loader/status.go +++ /dev/null @@ -1,75 +0,0 @@ -// Copyright 2019 PingCAP, Inc. -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// See the License for the specific language governing permissions and -// limitations under the License. - -package loader - -import ( - "time" - - "github.com/pingcap/tiflow/dm/pb" - "github.com/pingcap/tiflow/dm/pkg/binlog" - "go.uber.org/zap" -) - -// Status implements Unit.Status. -func (l *Loader) Status(_ *binlog.SourceStatus) interface{} { - finishedSize := l.finishedDataSize.Load() - totalSize := l.totalDataSize.Load() - progress := percent(finishedSize, totalSize, l.finish.Load()) - currentSpeed := int64(l.speedRecorder.GetSpeed(float64(finishedSize))) - - s := &pb.LoadStatus{ - FinishedBytes: finishedSize, - TotalBytes: totalSize, - Progress: progress, - MetaBinlog: l.metaBinlog.Load(), - MetaBinlogGTID: l.metaBinlogGTID.Load(), - Bps: currentSpeed, - } - go l.printStatus() - return s -} - -// printStatus prints status like progress percentage. -func (l *Loader) printStatus() { - finishedSize := l.finishedDataSize.Load() - totalSize := l.totalDataSize.Load() - totalFileCount := l.totalFileCount.Load() - - interval := time.Since(l.dbTableDataLastUpdatedTime.Load()) - intervalSecond := interval.Seconds() - if intervalSecond == 0 { - return - } - - for db, tables := range l.dbTableDataFinishedSize { - for table, size := range tables { - curFinished := size.Load() - lastFinished := l.dbTableDataFinishedSize[db][table].Load() - speed := float64(curFinished-lastFinished) / intervalSecond - l.dbTableDataLastFinishedSize[db][table].Store(curFinished) - if speed > 0 { - remainingSeconds := float64(l.dbTableDataTotalSize[db][table].Load()-curFinished) / speed - remainingTimeGauge.WithLabelValues(l.cfg.Name, l.cfg.WorkerName, l.cfg.SourceID, db, table).Set(remainingSeconds) - } - } - } - l.dbTableDataLastUpdatedTime.Store(time.Now()) - - l.logger.Info("progress status of load", - zap.Int64("finished_bytes", finishedSize), - zap.Int64("total_bytes", totalSize), - zap.Int64("total_file_count", totalFileCount), - zap.String("progress", percent(finishedSize, totalSize, l.finish.Load()))) - progressGauge.WithLabelValues(l.cfg.Name, l.cfg.SourceID).Set(progress(finishedSize, totalSize, l.finish.Load())) -} diff --git a/dm/loader/status_test.go b/dm/loader/status_test.go deleted file mode 100644 index c0e54380014..00000000000 --- a/dm/loader/status_test.go +++ /dev/null @@ -1,56 +0,0 @@ -// Copyright 2021 PingCAP, Inc. -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// See the License for the specific language governing permissions and -// limitations under the License. - -package loader - -import ( - "sync" - - . "github.com/pingcap/check" - "github.com/pingcap/tidb/dumpling/export" - "github.com/pingcap/tiflow/dm/config" - "github.com/pingcap/tiflow/dm/pkg/log" - "go.uber.org/atomic" -) - -func (*testLoaderSuite) TestConcurrentStatus(c *C) { - l := &Loader{speedRecorder: export.NewSpeedRecorder()} - l.cfg = &config.SubTaskConfig{} - l.logger = log.L() - l.finishedDataSize.Store(100) - l.totalDataSize.Store(200) - l.totalFileCount.Store(10) - l.dbTableDataFinishedSize = map[string]map[string]*atomic.Int64{ - "db1": { - "table1": atomic.NewInt64(10), - "table2": atomic.NewInt64(20), - }, - } - l.dbTableDataLastFinishedSize = map[string]map[string]*atomic.Int64{ - "db1": { - "table1": atomic.NewInt64(0), - "table2": atomic.NewInt64(0), - }, - } - - // test won't race or panic - wg := sync.WaitGroup{} - wg.Add(20) - for i := 0; i < 20; i++ { - go func() { - l.Status(nil) - wg.Done() - }() - } - wg.Wait() -} diff --git a/dm/pkg/checker/conn_checker.go b/dm/pkg/checker/conn_checker.go index 1c072a44367..7a91ef295ca 100644 --- a/dm/pkg/checker/conn_checker.go +++ b/dm/pkg/checker/conn_checker.go @@ -175,19 +175,14 @@ func (l *LoaderConnNumberChecker) Check(ctx context.Context) *Result { if !l.unlimitedConn && result.State == StateFailure { // if the max_connections is set as a specific number // and we failed because of the number connecions needed is smaller than max_connections - for _, stCfg := range l.stCfgs { - if stCfg.NeedUseLightning() { - // if we're using lightning, this error should be omitted - // because lightning doesn't need to keep connections while restoring. - result.Errors = append( - result.Errors, - NewWarn("task precheck cannot accurately check the number of connection needed for Lightning."), - ) - result.State = StateWarning - result.Instruction = "You need to set a larger connection for TiDB." - break - } - } + // if we're using lightning, this error should be omitted + // because lightning doesn't need to keep connections while restoring. + result.Errors = append( + result.Errors, + NewWarn("task precheck cannot accurately check the number of connection needed for Lightning."), + ) + result.State = StateWarning + result.Instruction = "You need to set a larger connection for TiDB." } return result } diff --git a/dm/pkg/terror/error_list.go b/dm/pkg/terror/error_list.go index 6f913724faa..597770608e3 100644 --- a/dm/pkg/terror/error_list.go +++ b/dm/pkg/terror/error_list.go @@ -267,6 +267,7 @@ const ( codeConfigInvalidSafeModeDuration codeConfigConfictSafeModeDurationAndSafeMode codeConfigInvalidLoadPhysicalDuplicateResolution + codeConfigColumnMappingDeprecated ) // Binlog operation error code list. @@ -974,6 +975,7 @@ var ( ErrConfigInvalidSafeModeDuration = New(codeConfigInvalidSafeModeDuration, ClassConfig, ScopeInternal, LevelMedium, "safe-mode-duration '%s' parsed failed: %v", "Please check the `safe-mode-duration` is correct.") ErrConfigConfictSafeModeDurationAndSafeMode = New(codeConfigConfictSafeModeDurationAndSafeMode, ClassConfig, ScopeInternal, LevelLow, "safe-mode(true) conflicts with safe-mode-duration(0s)", "Please set safe-mode to false or safe-mode-duration to non-zero.") ErrConfigInvalidPhysicalDuplicateResolution = New(codeConfigInvalidLoadPhysicalDuplicateResolution, ClassConfig, ScopeInternal, LevelMedium, "invalid load on-duplicate-physical option '%s'", "Please choose a valid value in ['none', 'manual'] or leave it empty.") + ErrConfigColumnMappingDeprecated = New(codeConfigColumnMappingDeprecated, ClassConfig, ScopeInternal, LevelHigh, "column-mapping is not supported since v6.6.0", "Please use extract-table/extract-schema/extract-source to handle data conflict when merge tables. See https://docs.pingcap.com/tidb/v6.4/task-configuration-file-full#task-configuration-file-template-advanced") // Binlog operation error. ErrBinlogExtractPosition = New(codeBinlogExtractPosition, ClassBinlogOp, ScopeInternal, LevelHigh, "", "") diff --git a/dm/tests/all_mode/run.sh b/dm/tests/all_mode/run.sh index b0bf3d09084..2bad386fab7 100755 --- a/dm/tests/all_mode/run.sh +++ b/dm/tests/all_mode/run.sh @@ -147,74 +147,6 @@ function test_query_timeout() { echo "[$(date)] <<<<<< finish test_query_timeout >>>>>>" } -function test_stop_task_before_checkpoint() { - echo "[$(date)] <<<<<< start test_stop_task_before_checkpoint >>>>>>" - run_sql_file $cur/data/db1.prepare.sql $MYSQL_HOST1 $MYSQL_PORT1 $MYSQL_PASSWORD1 - check_contains 'Query OK, 2 rows affected' - run_sql_file $cur/data/db2.prepare.sql $MYSQL_HOST2 $MYSQL_PORT2 $MYSQL_PASSWORD2 - check_contains 'Query OK, 3 rows affected' - - # start DM worker and master - run_dm_master $WORK_DIR/master $MASTER_PORT $cur/conf/dm-master.toml - check_rpc_alive $cur/../bin/check_master_online 127.0.0.1:$MASTER_PORT - check_metric $MASTER_PORT 'start_leader_counter' 3 0 2 - - export GO_FAILPOINTS='github.com/pingcap/tiflow/dm/loader/WaitLoaderStopAfterInitCheckpoint=return(5)' - run_dm_worker $WORK_DIR/worker1 $WORKER1_PORT $cur/conf/dm-worker1.toml - check_rpc_alive $cur/../bin/check_worker_online 127.0.0.1:$WORKER1_PORT - run_dm_worker $WORK_DIR/worker2 $WORKER2_PORT $cur/conf/dm-worker2.toml - check_rpc_alive $cur/../bin/check_worker_online 127.0.0.1:$WORKER2_PORT - - # operate mysql config to worker - cp $cur/conf/source1.yaml $WORK_DIR/source1.yaml - cp $cur/conf/source2.yaml $WORK_DIR/source2.yaml - sed -i "/relay-binlog-name/i\relay-dir: $WORK_DIR/worker1/relay_log" $WORK_DIR/source1.yaml - sed -i "/relay-binlog-name/i\relay-dir: $WORK_DIR/worker2/relay_log" $WORK_DIR/source2.yaml - dmctl_operate_source create $WORK_DIR/source1.yaml $SOURCE_ID1 - dmctl_operate_source create $WORK_DIR/source2.yaml $SOURCE_ID2 - - # generate uncomplete checkpoint - cp $cur/conf/dm-task.yaml $WORK_DIR/dm-task.yaml - sed -i "s/import-mode: sql/import-mode: loader/" $WORK_DIR/dm-task.yaml - dmctl_start_task "$WORK_DIR/dm-task.yaml" "--remove-meta" - check_log_contain_with_retry 'wait loader stop after init checkpoint' $WORK_DIR/worker1/log/dm-worker.log - check_log_contain_with_retry 'wait loader stop after init checkpoint' $WORK_DIR/worker2/log/dm-worker.log - run_dm_ctl $WORK_DIR "127.0.0.1:$MASTER_PORT" \ - "stop-task test" \ - "\"result\": true" 3 - - # restart dm-worker - pkill -9 dm-worker.test 2>/dev/null || true - check_port_offline $WORKER1_PORT 20 - check_port_offline $WORKER2_PORT 20 - - export GO_FAILPOINTS='github.com/pingcap/tiflow/dm/loader/WaitLoaderStopBeforeLoadCheckpoint=return(5)' - run_dm_worker $WORK_DIR/worker1 $WORKER1_PORT $cur/conf/dm-worker1.toml - run_dm_worker $WORK_DIR/worker2 $WORKER2_PORT $cur/conf/dm-worker2.toml - check_rpc_alive $cur/../bin/check_worker_online 127.0.0.1:$WORKER1_PORT - check_rpc_alive $cur/../bin/check_worker_online 127.0.0.1:$WORKER2_PORT - - # stop-task before load checkpoint - dmctl_start_task $WORK_DIR/dm-task.yaml - check_log_contain_with_retry 'wait loader stop before load checkpoint' $WORK_DIR/worker1/log/dm-worker.log - check_log_contain_with_retry 'wait loader stop before load checkpoint' $WORK_DIR/worker2/log/dm-worker.log - run_dm_ctl $WORK_DIR "127.0.0.1:$MASTER_PORT" \ - "stop-task test" \ - "\"result\": true" 3 - - dmctl_start_task $WORK_DIR/dm-task.yaml - check_sync_diff $WORK_DIR $cur/conf/diff_config.toml - run_dm_ctl $WORK_DIR "127.0.0.1:$MASTER_PORT" \ - "stop-task test" \ - "\"result\": true" 3 - - cleanup_process - cleanup_data all_mode - - export GO_FAILPOINTS='' - echo "[$(date)] <<<<<< finish test_stop_task_before_checkpoint >>>>>>" -} - function test_fail_job_between_event() { echo "[$(date)] <<<<<< start test_fail_job_between_event >>>>>>" run_sql_file $cur/data/db1.prepare.sql $MYSQL_HOST1 $MYSQL_PORT1 $MYSQL_PASSWORD1 @@ -416,7 +348,6 @@ function run() { test_fail_job_between_event test_session_config test_query_timeout - test_stop_task_before_checkpoint test_regexpr_router regexpr-task.yaml test_regexpr_router regexpr-task-lightning.yaml diff --git a/dm/tests/dmctl_command/run.sh b/dm/tests/dmctl_command/run.sh index 9a9dd3f9e2a..dfb2795c455 100644 --- a/dm/tests/dmctl_command/run.sh +++ b/dm/tests/dmctl_command/run.sh @@ -154,20 +154,7 @@ function run() { fi } -function checktask_full_mode_conn() { - # full mode - # dumpers: (2 + 2) for each - # loaders: 5 + 1 = 6 - run_sql_source1 "set @@GLOBAL.max_connections=3;" - check_task_not_pass $cur/conf/dm-task3.yaml # dumper threads too few - run_sql_source1 "set @@GLOBAL.max_connections=4;" - check_task_pass $cur/conf/dm-task3.yaml - - run_sql_tidb "set @@GLOBAL.max_connections=5;" # loader threads too few - check_task_not_pass $cur/conf/dm-task3.yaml - run_sql_tidb "set @@GLOBAL.max_connections=6;" - check_task_pass $cur/conf/dm-task3.yaml - +function check_privilege() { # test no enough privilege cp $cur/conf/dm-task3.yaml $WORK_DIR/temp.yaml sed -i "s/ user: \"root\"/ user: \"test1\"/g" $WORK_DIR/temp.yaml @@ -199,38 +186,6 @@ function check_task_lightning() { "task precheck cannot accurately check the number of connection needed for Lightning" 1 } -function check_full_mode_conn() { - # TODO: currently, pool-size are not efficacious for Lightning - # which simply determines the concurrency by hardware conditions. - # This should be solved in the future. - run_sql_tidb "set @@GLOBAL.max_connections=151;" - run_sql_source1 "set @@GLOBAL.max_connections=151;" - run_sql_source2 "set @@GLOBAL.max_connections=151;" - run_sql_tidb "drop database if exists dmctl_conn" - run_sql_both_source "drop database if exists dmctl_conn" - run_sql_both_source "create database dmctl_conn" - # ref: many_tables/run.sh - for ((i = 0; i <= 1000; ++i)); do - run_sql_source1 "create table dmctl_conn.test_$i(id int primary key)" - run_sql_source1 "insert into dmctl_conn.test_$i values (1),(2),(3),(4),(5)" - done - dmctl_start_task_standalone "$cur/conf/dm-task3.yaml" --remove-meta - run_sql_source1 'SHOW PROCESSLIST;' - check_rows_equal 5 # 4 + 1 for SHOWPROCESSLIST - - run_dm_ctl_with_retry $WORK_DIR "127.0.0.1:$MASTER_PORT" \ - "query-status test" \ - "Load" 1 - run_sql_tidb 'SHOW PROCESSLIST;' - check_rows_equal 7 # (5 + 1) + 1 for SHOW PROCESSLIST= 7 - - run_dm_ctl $WORK_DIR "127.0.0.1:$MASTER_PORT" \ - "stop-task test" \ - "\"result\": true" 2 - run_sql_tidb "drop database if exists dm_meta" # cleanup checkpoint - run_sql_tidb "drop database if exists dmctl_conn" -} - function run_validation_start_stop_cmd { cleanup_data dmctl_command cleanup_data_upstream dmctl_command @@ -763,8 +718,7 @@ function run_check_task() { run_sql_source1 "set @@GLOBAL.max_connections=151;" run_sql_source2 "set @@GLOBAL.max_connections=151;" check_task_lightning - check_full_mode_conn - checktask_full_mode_conn + check_privilege run_sql_source1 "set @@GLOBAL.max_connections=151;" run_sql_source2 "set @@GLOBAL.max_connections=151;" run_sql_tidb "set @@GLOBAL.max_connections=0;" # set default (unlimited), or other tests will fail diff --git a/dm/tests/import_goroutine_leak/conf/diff_config.toml b/dm/tests/import_goroutine_leak/conf/diff_config.toml deleted file mode 100644 index 7d91505b329..00000000000 --- a/dm/tests/import_goroutine_leak/conf/diff_config.toml +++ /dev/null @@ -1,27 +0,0 @@ -check-thread-count = 4 - -export-fix-sql = true - -check-struct-only = false - -[task] - output-dir = "/tmp/ticdc_dm_test/output" - - source-instances = ["mysql1"] - - target-instance = "tidb0" - - target-check-tables = ["import_goroutine_leak.t?*"] - -[data-sources] -[data-sources.mysql1] -host = "127.0.0.1" -port = 3306 -user = "root" -password = "123456" - -[data-sources.tidb0] -host = "127.0.0.1" -port = 4000 -user = "test" -password = "123456" diff --git a/dm/tests/import_goroutine_leak/conf/dm-master.toml b/dm/tests/import_goroutine_leak/conf/dm-master.toml deleted file mode 100644 index 7cecf59ad86..00000000000 --- a/dm/tests/import_goroutine_leak/conf/dm-master.toml +++ /dev/null @@ -1,4 +0,0 @@ -# Master Configuration. -master-addr = ":8261" -advertise-addr = "127.0.0.1:8261" -auto-compaction-retention = "3s" diff --git a/dm/tests/import_goroutine_leak/conf/dm-task.yaml b/dm/tests/import_goroutine_leak/conf/dm-task.yaml deleted file mode 100644 index d10c5591722..00000000000 --- a/dm/tests/import_goroutine_leak/conf/dm-task.yaml +++ /dev/null @@ -1,42 +0,0 @@ ---- -name: test -task-mode: full -is-sharding: false -meta-schema: "dm_meta" -# enable-heartbeat: true - -target-database: - host: "127.0.0.1" - port: 4000 - user: "root" - password: "" - -mysql-instances: - - source-id: "mysql-replica-01" - block-allow-list: "instance" - mydumper-config-name: "global" - loader-config-name: "global" - syncer-config-name: "global" - -block-allow-list: - instance: - do-dbs: ["import_goroutine_leak"] - -mydumpers: - global: - threads: 4 - chunk-filesize: 0 - skip-tz-utc: true - statement-size: 100 - extra-args: "" - -loaders: - global: - pool-size: 16 - dir: "./dumped_data" - import-mode: "loader" - -syncers: - global: - worker-count: 16 - batch: 100 diff --git a/dm/tests/import_goroutine_leak/conf/dm-worker1.toml b/dm/tests/import_goroutine_leak/conf/dm-worker1.toml deleted file mode 100644 index 7a72ea72bf8..00000000000 --- a/dm/tests/import_goroutine_leak/conf/dm-worker1.toml +++ /dev/null @@ -1,2 +0,0 @@ -name = "worker1" -join = "127.0.0.1:8261" diff --git a/dm/tests/import_goroutine_leak/conf/source1.yaml b/dm/tests/import_goroutine_leak/conf/source1.yaml deleted file mode 100644 index 6d0d3ba09c7..00000000000 --- a/dm/tests/import_goroutine_leak/conf/source1.yaml +++ /dev/null @@ -1,11 +0,0 @@ -source-id: mysql-replica-01 -flavor: '' -enable-gtid: false -enable-relay: false # in this case enable-relay will trigger a subtask reset, which fails the test -relay-binlog-name: '' -relay-binlog-gtid: '' -from: - host: 127.0.0.1 - user: root - password: /Q7B9DizNLLTTfiZHv9WoEAKamfpIUs= - port: 3306 diff --git a/dm/tests/import_goroutine_leak/run.sh b/dm/tests/import_goroutine_leak/run.sh deleted file mode 100644 index 03a60ec41d5..00000000000 --- a/dm/tests/import_goroutine_leak/run.sh +++ /dev/null @@ -1,149 +0,0 @@ -#!/bin/bash - -set -eu - -cur=$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd) -source $cur/../_utils/test_prepare -WORK_DIR=$TEST_DIR/$TEST_NAME - -# only use one DM-worker instance to avoid re-schedule after restart process. - -COUNT=200 -function prepare_datafile() { - data_file="$WORK_DIR/db1.prepare.sql" - echo 'DROP DATABASE if exists import_goroutine_leak;' >>$data_file - echo 'CREATE DATABASE import_goroutine_leak;' >>$data_file - echo 'USE import_goroutine_leak;' >>$data_file - echo "CREATE TABLE t1(i TINYINT, j INT UNIQUE KEY);" >>$data_file - for j in $(seq $COUNT); do - echo "INSERT INTO t1 VALUES (1,${j}0001),(1,${j}0011);" >>$data_file - done -} - -function run() { - prepare_datafile - - run_sql_file $WORK_DIR/db1.prepare.sql $MYSQL_HOST1 $MYSQL_PORT1 $MYSQL_PASSWORD1 - - echo "dm-worker panic, doJob of import unit workers don't exit" - # send to closed `runFatalChan` - inject_points=("github.com/pingcap/tiflow/dm/loader/LoadDataSlowDown=sleep(1000)" - "github.com/pingcap/tiflow/dm/loader/dispatchError=return(1)" - "github.com/pingcap/tiflow/dm/loader/executeSQLError=return(1)" - "github.com/pingcap/tiflow/dm/loader/returnDoJobError=return(1)" - "github.com/pingcap/tiflow/dm/loader/workerCantClose=return(1)" - ) - export GO_FAILPOINTS="$(join_string \; ${inject_points[@]})" - - run_dm_master $WORK_DIR/master $MASTER_PORT $cur/conf/dm-master.toml - check_rpc_alive $cur/../bin/check_master_online 127.0.0.1:$MASTER_PORT - run_dm_worker $WORK_DIR/worker1 $WORKER1_PORT $cur/conf/dm-worker1.toml - check_rpc_alive $cur/../bin/check_worker_online 127.0.0.1:$WORKER1_PORT - # operate mysql config to worker - cp $cur/conf/source1.yaml $WORK_DIR/source1.yaml - sed -i "/relay-binlog-name/i\relay-dir: $WORK_DIR/worker1/relay_log" $WORK_DIR/source1.yaml - dmctl_operate_source create $WORK_DIR/source1.yaml $SOURCE_ID1 - - run_dm_ctl $WORK_DIR "127.0.0.1:$MASTER_PORT" \ - "start-task $cur/conf/dm-task.yaml" \ - "\"source\": \"$SOURCE_ID1\"" 1 - - check_port_offline $WORKER1_PORT 20 - - # dm-worker1 panics - err_cnt=$(grep "panic" $WORK_DIR/worker1/log/stdout.log | wc -l) - if [ $err_cnt -ne 1 ]; then - echo "dm-worker1 doesn't panic, panic count ${err_cnt}" - exit 2 - fi - - echo "dm-worker panic again, workers of import unit don't exit" - # send to closed `runFatalChan` - inject_points=("github.com/pingcap/tiflow/dm/loader/LoadDataSlowDown=sleep(1000)" - "github.com/pingcap/tiflow/dm/loader/dispatchError=return(1)" - "github.com/pingcap/tiflow/dm/loader/executeSQLError=return(1)" - "github.com/pingcap/tiflow/dm/loader/returnDoJobError=return(1)" - "github.com/pingcap/tiflow/dm/loader/dontWaitWorkerExit=return(1)" - ) - export GO_FAILPOINTS="$(join_string \; ${inject_points[@]})" - run_dm_worker $WORK_DIR/worker1 $WORKER1_PORT $cur/conf/dm-worker1.toml - - check_port_offline $WORKER1_PORT 20 - sleep 2 - - # dm-worker1 panics - err_cnt=$(grep "panic" $WORK_DIR/worker1/log/stdout.log | wc -l) - # there may be more panic - if [ $err_cnt -lt 2 ]; then - echo "dm-worker1 doesn't panic again, panic count ${err_cnt}" - exit 2 - fi - - echo "restart dm-workers with errros to pause" - # paused with injected error - inject_points=("github.com/pingcap/tiflow/dm/loader/LoadDataSlowDown=sleep(1000)" - "github.com/pingcap/tiflow/dm/loader/dispatchError=return(1)" - "github.com/pingcap/tiflow/dm/loader/executeSQLError=return(1)" - "github.com/pingcap/tiflow/dm/loader/returnDoJobError=return(1)" - ) - export GO_FAILPOINTS="$(join_string \; ${inject_points[@]})" - run_dm_worker $WORK_DIR/worker1 $WORKER1_PORT $cur/conf/dm-worker1.toml - check_rpc_alive $cur/../bin/check_worker_online 127.0.0.1:$WORKER1_PORT - - run_dm_ctl_with_retry $WORK_DIR "127.0.0.1:$MASTER_PORT" \ - "query-status test" \ - "inject failpoint dispatchError" 1 - - echo "restart dm-workers block in sending to chan" - kill_process dm-worker - check_port_offline $WORKER1_PORT 20 - - # use a small job chan size to block the sender - inject_points=("github.com/pingcap/tiflow/dm/loader/LoadDataSlowDown=sleep(1000)" - "github.com/pingcap/tiflow/dm/loader/executeSQLError=return(1)" - "github.com/pingcap/tiflow/dm/loader/returnDoJobError=return(1)" - "github.com/pingcap/tiflow/dm/loader/workerChanSize=return(10)" - ) - export GO_FAILPOINTS="$(join_string \; ${inject_points[@]})" - run_dm_worker $WORK_DIR/worker1 $WORKER1_PORT $cur/conf/dm-worker1.toml - check_rpc_alive $cur/../bin/check_worker_online 127.0.0.1:$WORKER1_PORT - - # wait until the task running - run_dm_ctl_with_retry $WORK_DIR "127.0.0.1:$MASTER_PORT" \ - "query-status test" \ - '"stage": "Running"' 1 - sleep 2 # wait to be blocked - - # check to be blocked - curl -X POST 127.0.0.1:$WORKER1_PORT/debug/pprof/goroutine?debug=2 >$WORK_DIR/goroutine.worker1 - check_log_contains $WORK_DIR/goroutine.worker1 "chan send" - - ps aux | grep dm-worker | awk '{print $2}' | xargs kill || true - sleep 5 - worker_cnt=$(ps aux | grep dm-worker | grep -v "grep" | wc -l) - if [ $worker_cnt -lt 1 ]; then - echo "some dm-workers exit, remain count ${worker_cnt}" - exit 2 - fi - - echo "force to restart dm-workers without errors" - ps aux | grep dm-worker | grep -v "grep" | awk '{print $2}' | xargs kill -9 || true - - export GO_FAILPOINTS='' - run_dm_worker $WORK_DIR/worker1 $WORKER1_PORT $cur/conf/dm-worker1.toml - check_rpc_alive $cur/../bin/check_worker_online 127.0.0.1:$WORKER1_PORT - - run_dm_ctl_with_retry $WORK_DIR "127.0.0.1:$MASTER_PORT" \ - "query-status test" \ - '"stage": "Finished"' 1 - - check_sync_diff $WORK_DIR $cur/conf/diff_config.toml -} - -cleanup_data import_goroutine_leak -# also cleanup dm processes in case of last run failed -cleanup_process $* -run $* -cleanup_process $* - -echo "[$(date)] <<<<<< test case $TEST_NAME success! >>>>>>" diff --git a/dm/tests/load_interrupt/conf/diff_config.toml b/dm/tests/load_interrupt/conf/diff_config.toml deleted file mode 100644 index 5933b9634b6..00000000000 --- a/dm/tests/load_interrupt/conf/diff_config.toml +++ /dev/null @@ -1,29 +0,0 @@ -# diff Configuration. - -check-thread-count = 4 - -export-fix-sql = true - -check-struct-only = false - -[task] - output-dir = "/tmp/ticdc_dm_test/output" - - source-instances = ["mysql1"] - - target-instance = "tidb0" - - target-check-tables = ["load_interrupt.t?*"] - -[data-sources] -[data-sources.mysql1] -host = "127.0.0.1" -port = 3306 -user = "root" -password = "123456" - -[data-sources.tidb0] -host = "127.0.0.1" -port = 4000 -user = "test" -password = "123456" diff --git a/dm/tests/load_interrupt/conf/dm-master.toml b/dm/tests/load_interrupt/conf/dm-master.toml deleted file mode 100644 index 7cecf59ad86..00000000000 --- a/dm/tests/load_interrupt/conf/dm-master.toml +++ /dev/null @@ -1,4 +0,0 @@ -# Master Configuration. -master-addr = ":8261" -advertise-addr = "127.0.0.1:8261" -auto-compaction-retention = "3s" diff --git a/dm/tests/load_interrupt/conf/dm-task.yaml b/dm/tests/load_interrupt/conf/dm-task.yaml deleted file mode 100644 index 833ba15b113..00000000000 --- a/dm/tests/load_interrupt/conf/dm-task.yaml +++ /dev/null @@ -1,42 +0,0 @@ ---- -name: test -task-mode: full -is-sharding: false -meta-schema: "dm_meta" -# enable-heartbeat: true - -target-database: - host: "127.0.0.1" - port: 4000 - user: "root" - password: "" - -mysql-instances: - - source-id: "mysql-replica-01" - block-allow-list: "instance" - mydumper-config-name: "global" - loader-config-name: "global" - syncer-config-name: "global" - -block-allow-list: - instance: - do-dbs: ["load_interrupt"] - -mydumpers: - global: - threads: 4 - chunk-filesize: 0 - skip-tz-utc: true - statement-size: 100 - extra-args: "" - -loaders: - global: - pool-size: 16 - dir: "./dumped_data" - import-mode: "loader" - -syncers: - global: - worker-count: 16 - batch: 100 diff --git a/dm/tests/load_interrupt/conf/dm-worker1.toml b/dm/tests/load_interrupt/conf/dm-worker1.toml deleted file mode 100644 index 7a72ea72bf8..00000000000 --- a/dm/tests/load_interrupt/conf/dm-worker1.toml +++ /dev/null @@ -1,2 +0,0 @@ -name = "worker1" -join = "127.0.0.1:8261" diff --git a/dm/tests/load_interrupt/conf/source1.yaml b/dm/tests/load_interrupt/conf/source1.yaml deleted file mode 100644 index 7d67feb8cfd..00000000000 --- a/dm/tests/load_interrupt/conf/source1.yaml +++ /dev/null @@ -1,11 +0,0 @@ -source-id: mysql-replica-01 -flavor: '' -enable-gtid: false -enable-relay: true -relay-binlog-name: '' -relay-binlog-gtid: '' -from: - host: 127.0.0.1 - user: root - password: /Q7B9DizNLLTTfiZHv9WoEAKamfpIUs= - port: 3306 diff --git a/dm/tests/load_interrupt/run.sh b/dm/tests/load_interrupt/run.sh deleted file mode 100755 index e0e96aebb9c..00000000000 --- a/dm/tests/load_interrupt/run.sh +++ /dev/null @@ -1,133 +0,0 @@ -#!/bin/bash - -set -eu - -cur=$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd) -source $cur/../_utils/test_prepare -WORK_DIR=$TEST_DIR/$TEST_NAME - -COUNT=200 -function prepare_datafile() { - for i in $(seq 2); do - data_file="$WORK_DIR/db$i.prepare.sql" - echo 'DROP DATABASE if exists load_interrupt;' >>$data_file - echo 'CREATE DATABASE load_interrupt;' >>$data_file - echo 'USE load_interrupt;' >>$data_file - echo "CREATE TABLE t$i(i TINYINT, j INT UNIQUE KEY);" >>$data_file - for j in $(seq $COUNT); do - echo "INSERT INTO t$i VALUES ($i,${j}000$i),($i,${j}001$i);" >>$data_file - done - done -} - -function check_row_count() { - index=$1 - lines=$(($(wc -l $WORK_DIR/db$index.prepare.sql | awk '{print $1}') - 4)) - # each line has two insert values - lines=$((lines * 2)) - run_sql "SELECT FLOOR(offset / end_pos * $lines) as cnt from dm_meta.test_loader_checkpoint where cp_table = 't$index'" $TIDB_PORT $TIDB_PASSWORD - estimate=$(tail -n 1 "$TEST_DIR/sql_res.$TEST_NAME.txt") - run_sql "SELECT count(1) as cnt from $TEST_NAME.t$index" $TIDB_PORT $TIDB_PASSWORD - row_count=$(tail -n 1 "$TEST_DIR/sql_res.$TEST_NAME.txt") - echo "estimate row count: $estimate, real row count: $row_count" - [ "$estimate" == "$row_count" ] -} - -function test_save_checkpoint_failed() { - prepare_datafile - run_sql_file $WORK_DIR/db1.prepare.sql $MYSQL_HOST1 $MYSQL_PORT1 $MYSQL_PASSWORD1 - - export GO_FAILPOINTS="github.com/pingcap/tiflow/dm/loader/loaderCPUpdateOffsetError=return()" - - run_dm_master $WORK_DIR/master $MASTER_PORT $cur/conf/dm-master.toml - check_rpc_alive $cur/../bin/check_master_online 127.0.0.1:$MASTER_PORT - run_dm_worker $WORK_DIR/worker1 $WORKER1_PORT $cur/conf/dm-worker1.toml - check_rpc_alive $cur/../bin/check_worker_online 127.0.0.1:$WORKER1_PORT - # operate mysql config to worker - cp $cur/conf/source1.yaml $WORK_DIR/source1.yaml - sed -i "/relay-binlog-name/i\relay-dir: $WORK_DIR/worker1/relay_log" $WORK_DIR/source1.yaml - dmctl_operate_source create $WORK_DIR/source1.yaml $SOURCE_ID1 - - run_dm_ctl $WORK_DIR "127.0.0.1:$MASTER_PORT" \ - "start-task $cur/conf/dm-task.yaml" - - # load task should Paused because job file is not right - run_dm_ctl_with_retry $WORK_DIR "127.0.0.1:$MASTER_PORT" \ - "query-status test" \ - "Paused" 1 - - # check dump files are generated before worker down - ls $WORK_DIR/worker1/dumped_data.test - - echo "test_save_checkpoint_failed SUCCESS!" - cleanup_process $* - cleanup_data load_interrupt -} - -function run() { - test_save_checkpoint_failed - - prepare_datafile - run_sql_file $WORK_DIR/db1.prepare.sql $MYSQL_HOST1 $MYSQL_PORT1 $MYSQL_PASSWORD1 - THRESHOLD=1024 - export GO_FAILPOINTS="github.com/pingcap/tiflow/dm/loader/LoadExceedOffsetExit=return($THRESHOLD)" - - run_dm_master $WORK_DIR/master $MASTER_PORT $cur/conf/dm-master.toml - check_rpc_alive $cur/../bin/check_master_online 127.0.0.1:$MASTER_PORT - run_dm_worker $WORK_DIR/worker1 $WORKER1_PORT $cur/conf/dm-worker1.toml - check_rpc_alive $cur/../bin/check_worker_online 127.0.0.1:$WORKER1_PORT - - # operate mysql config to worker - cp $cur/conf/source1.yaml $WORK_DIR/source1.yaml - sed -i "/relay-binlog-name/i\relay-dir: $WORK_DIR/worker1/relay_log" $WORK_DIR/source1.yaml - dmctl_operate_source create $WORK_DIR/source1.yaml $SOURCE_ID1 - - # don't check result, because worker may meet failpoint `LoadExceedOffsetExit` before correct response. - # let following check (port offline, dump data) ensure task has been started - run_dm_ctl $WORK_DIR "127.0.0.1:$MASTER_PORT" \ - "start-task $cur/conf/dm-task.yaml" - - check_port_offline $WORKER1_PORT 20 - - # check dump files are generated before worker down - ls $WORK_DIR/worker1/dumped_data.test - - run_sql "SELECT count(*) from dm_meta.test_loader_checkpoint where cp_schema = '$TEST_NAME' and offset < $THRESHOLD" $TIDB_PORT $TIDB_PASSWORD - check_contains "count(*): 1" - # TODO: block for dumpling temporarily - # check_row_count 1 - - # only failed at the first two time, will retry later and success - export GO_FAILPOINTS='github.com/pingcap/tiflow/dm/loader/LoadExecCreateTableFailed=3*return("1213")' # ER_LOCK_DEADLOCK, retryable error code - run_dm_worker $WORK_DIR/worker1 $WORKER1_PORT $cur/conf/dm-worker1.toml - check_rpc_alive $cur/../bin/check_worker_online 127.0.0.1:$WORKER1_PORT - - sleep 8 - echo "check sync diff after restarted dm-worker" - check_sync_diff $WORK_DIR $cur/conf/diff_config.toml - - # LoadExecCreateTableFailed error return twice - sleep 8 - err_cnt=$(grep LoadExecCreateTableFailed $WORK_DIR/worker1/log/dm-worker.log | wc -l) - if [ $err_cnt -ne 2 ]; then - echo "error LoadExecCreateTableFailed's count is not 2" - exit 2 - fi - - # strange, TiDB (at least with mockTiKV) needs a long time to see the update of `test_loader_checkpoint`, - # and even later txn may see the older state than the earlier txn. - sleep 8 - run_sql "SELECT count(*) from dm_meta.test_loader_checkpoint where cp_schema = '$TEST_NAME' and offset = end_pos" $TIDB_PORT $TIDB_PASSWORD - check_contains "count(*): 1" - - export GO_FAILPOINTS='' - ls $WORK_DIR/worker1/dumped_data.test && exit 1 || echo "worker1 auto removed dump files" -} - -cleanup_data load_interrupt -# also cleanup dm processes in case of last run failed -cleanup_process $* -run $* -cleanup_process $* - -echo "[$(date)] <<<<<< test case $TEST_NAME success! >>>>>>" diff --git a/dm/tests/load_task/conf/diff_config1.toml b/dm/tests/load_task/conf/diff_config1.toml deleted file mode 100644 index c1b51cdbde0..00000000000 --- a/dm/tests/load_task/conf/diff_config1.toml +++ /dev/null @@ -1,35 +0,0 @@ -# diff Configuration. - -check-thread-count = 4 - -export-fix-sql = true - -check-struct-only = false - -[task] - output-dir = "/tmp/ticdc_dm_test/output" - - source-instances = ["mysql1", "mysql2"] - - target-instance = "tidb0" - - target-check-tables = ["load_task1.t?*"] - -[data-sources] -[data-sources.mysql1] -host = "127.0.0.1" -port = 3306 -user = "root" -password = "123456" - -[data-sources.mysql2] -host = "127.0.0.1" -port = 3307 -user = "root" -password = "123456" - -[data-sources.tidb0] -host = "127.0.0.1" -port = 4000 -user = "test" -password = "123456" diff --git a/dm/tests/load_task/conf/diff_config2.toml b/dm/tests/load_task/conf/diff_config2.toml deleted file mode 100644 index ec0038ccd61..00000000000 --- a/dm/tests/load_task/conf/diff_config2.toml +++ /dev/null @@ -1,35 +0,0 @@ -# diff Configuration. - -check-thread-count = 4 - -export-fix-sql = true - -check-struct-only = false - -[task] - output-dir = "/tmp/ticdc_dm_test/output" - - source-instances = ["mysql1", "mysql2"] - - target-instance = "tidb0" - - target-check-tables = ["load_task2.t?*"] - -[data-sources] -[data-sources.mysql1] -host = "127.0.0.1" -port = 3306 -user = "root" -password = "123456" - -[data-sources.mysql2] -host = "127.0.0.1" -port = 3307 -user = "root" -password = "123456" - -[data-sources.tidb0] -host = "127.0.0.1" -port = 4000 -user = "test" -password = "123456" diff --git a/dm/tests/load_task/conf/diff_config3.toml b/dm/tests/load_task/conf/diff_config3.toml deleted file mode 100644 index 3dc14b45ba5..00000000000 --- a/dm/tests/load_task/conf/diff_config3.toml +++ /dev/null @@ -1,29 +0,0 @@ -# diff Configuration. - -check-thread-count = 4 - -export-fix-sql = true - -check-struct-only = false - -[task] - output-dir = "/tmp/ticdc_dm_test/output" - - source-instances = ["mysql1"] - - target-instance = "tidb0" - - target-check-tables = ["load_task3.t?*"] - -[data-sources] -[data-sources.mysql1] -host = "127.0.0.1" -port = 3307 -user = "root" -password = "123456" - -[data-sources.tidb0] -host = "127.0.0.1" -port = 4000 -user = "test" -password = "123456" diff --git a/dm/tests/load_task/conf/diff_config4.toml b/dm/tests/load_task/conf/diff_config4.toml deleted file mode 100644 index 33f675f7e1c..00000000000 --- a/dm/tests/load_task/conf/diff_config4.toml +++ /dev/null @@ -1,29 +0,0 @@ -# diff Configuration. - -check-thread-count = 4 - -export-fix-sql = true - -check-struct-only = false - -[task] - output-dir = "/tmp/ticdc_dm_test/output" - - source-instances = ["mysql1"] - - target-instance = "tidb0" - - target-check-tables = ["load_task4.t?*"] - -[data-sources] -[data-sources.mysql1] -host = "127.0.0.1" -port = 3306 -user = "root" -password = "123456" - -[data-sources.tidb0] -host = "127.0.0.1" -port = 4000 -user = "test" -password = "123456" diff --git a/dm/tests/load_task/conf/dm-master.toml b/dm/tests/load_task/conf/dm-master.toml deleted file mode 100644 index 53a294e7d07..00000000000 --- a/dm/tests/load_task/conf/dm-master.toml +++ /dev/null @@ -1,6 +0,0 @@ -# Master Configuration. -master-addr = ":8261" -advertise-addr = "127.0.0.1:8261" - -rpc-timeout = "30s" -auto-compaction-retention = "3s" diff --git a/dm/tests/load_task/conf/dm-task-standalone.yaml b/dm/tests/load_task/conf/dm-task-standalone.yaml deleted file mode 100644 index 0d293423e43..00000000000 --- a/dm/tests/load_task/conf/dm-task-standalone.yaml +++ /dev/null @@ -1,41 +0,0 @@ ---- -name: load_task1 -task-mode: all -is-sharding: false -meta-schema: "dm_meta" -heartbeat-update-interval: 1 -heartbeat-report-interval: 1 - -target-database: - host: "127.0.0.1" - port: 4000 - user: "test" - password: "/Q7B9DizNLLTTfiZHv9WoEAKamfpIUs=" - -mysql-instances: - - source-id: "mysql-replica-01" - block-allow-list: "instance" - mydumper-config-name: "global" - loader-config-name: "global" - syncer-config-name: "global" - -block-allow-list: - instance: - do-dbs: ["load_task1"] - -mydumpers: - global: - threads: 4 - chunk-filesize: 64 - skip-tz-utc: true - extra-args: "" - -loaders: - global: - pool-size: 16 - dir: "./dumped_data" - -syncers: - global: - worker-count: 16 - batch: 100 diff --git a/dm/tests/load_task/conf/dm-task.yaml b/dm/tests/load_task/conf/dm-task.yaml deleted file mode 100644 index 32cacf0379a..00000000000 --- a/dm/tests/load_task/conf/dm-task.yaml +++ /dev/null @@ -1,47 +0,0 @@ ---- -name: load_task1 -task-mode: all -is-sharding: false -meta-schema: "dm_meta" -heartbeat-update-interval: 1 -heartbeat-report-interval: 1 - -target-database: - host: "127.0.0.1" - port: 4000 - user: "test" - password: "/Q7B9DizNLLTTfiZHv9WoEAKamfpIUs=" - -mysql-instances: - - source-id: "mysql-replica-01" - block-allow-list: "instance" - mydumper-config-name: "global" - loader-config-name: "global" - syncer-config-name: "global" - - - source-id: "mysql-replica-02" - block-allow-list: "instance" - mydumper-config-name: "global" - loader-config-name: "global" - syncer-config-name: "global" - -block-allow-list: - instance: - do-dbs: ["load_task1"] - -mydumpers: - global: - threads: 4 - chunk-filesize: 64 - skip-tz-utc: true - extra-args: "" - -loaders: - global: - pool-size: 16 - dir: "./dumped_data" - -syncers: - global: - worker-count: 16 - batch: 100 diff --git a/dm/tests/load_task/conf/dm-task2-standalone.yaml b/dm/tests/load_task/conf/dm-task2-standalone.yaml deleted file mode 100644 index bc98e4efac3..00000000000 --- a/dm/tests/load_task/conf/dm-task2-standalone.yaml +++ /dev/null @@ -1,41 +0,0 @@ ---- -name: load_task2 -task-mode: all -is-sharding: false -meta-schema: "dm_meta" -heartbeat-update-interval: 1 -heartbeat-report-interval: 1 - -target-database: - host: "127.0.0.1" - port: 4000 - user: "test" - password: "/Q7B9DizNLLTTfiZHv9WoEAKamfpIUs=" - -mysql-instances: - - source-id: "mysql-replica-01" - block-allow-list: "instance" - mydumper-config-name: "global" - loader-config-name: "global" - syncer-config-name: "global" - -block-allow-list: - instance: - do-dbs: ["load_task2"] - -mydumpers: - global: - threads: 4 - chunk-filesize: 64 - skip-tz-utc: true - extra-args: "" - -loaders: - global: - pool-size: 16 - dir: "./dumped_data" - -syncers: - global: - worker-count: 16 - batch: 100 diff --git a/dm/tests/load_task/conf/dm-task2.yaml b/dm/tests/load_task/conf/dm-task2.yaml deleted file mode 100644 index 14c8b07645d..00000000000 --- a/dm/tests/load_task/conf/dm-task2.yaml +++ /dev/null @@ -1,47 +0,0 @@ ---- -name: load_task2 -task-mode: all -is-sharding: false -meta-schema: "dm_meta" -heartbeat-update-interval: 1 -heartbeat-report-interval: 1 - -target-database: - host: "127.0.0.1" - port: 4000 - user: "test" - password: "/Q7B9DizNLLTTfiZHv9WoEAKamfpIUs=" - -mysql-instances: - - source-id: "mysql-replica-01" - block-allow-list: "instance" - mydumper-config-name: "global" - loader-config-name: "global" - syncer-config-name: "global" - - - source-id: "mysql-replica-02" - block-allow-list: "instance" - mydumper-config-name: "global" - loader-config-name: "global" - syncer-config-name: "global" - -block-allow-list: - instance: - do-dbs: ["load_task2"] - -mydumpers: - global: - threads: 4 - chunk-filesize: 64 - skip-tz-utc: true - extra-args: "" - -loaders: - global: - pool-size: 16 - dir: "./dumped_data" - -syncers: - global: - worker-count: 16 - batch: 100 diff --git a/dm/tests/load_task/conf/dm-task3.yaml b/dm/tests/load_task/conf/dm-task3.yaml deleted file mode 100644 index 0a06a4eabfb..00000000000 --- a/dm/tests/load_task/conf/dm-task3.yaml +++ /dev/null @@ -1,41 +0,0 @@ ---- -name: load_task3 -task-mode: all -is-sharding: false -meta-schema: "dm_meta" -heartbeat-update-interval: 1 -heartbeat-report-interval: 1 - -target-database: - host: "127.0.0.1" - port: 4000 - user: "test" - password: "/Q7B9DizNLLTTfiZHv9WoEAKamfpIUs=" - -mysql-instances: - - source-id: "mysql-replica-02" - block-allow-list: "instance" - mydumper-config-name: "global" - loader-config-name: "global" - syncer-config-name: "global" - -block-allow-list: - instance: - do-dbs: ["load_task3"] - -mydumpers: - global: - threads: 4 - chunk-filesize: 64 - skip-tz-utc: true - extra-args: "" - -loaders: - global: - pool-size: 16 - dir: "./dumped_data" - -syncers: - global: - worker-count: 16 - batch: 100 diff --git a/dm/tests/load_task/conf/dm-task4.yaml b/dm/tests/load_task/conf/dm-task4.yaml deleted file mode 100644 index 7e153eb4c49..00000000000 --- a/dm/tests/load_task/conf/dm-task4.yaml +++ /dev/null @@ -1,41 +0,0 @@ ---- -name: load_task4 -task-mode: all -is-sharding: false -meta-schema: "dm_meta" -heartbeat-update-interval: 1 -heartbeat-report-interval: 1 - -target-database: - host: "127.0.0.1" - port: 4000 - user: "test" - password: "/Q7B9DizNLLTTfiZHv9WoEAKamfpIUs=" - -mysql-instances: - - source-id: "mysql-replica-01" - block-allow-list: "instance" - mydumper-config-name: "global" - loader-config-name: "global" - syncer-config-name: "global" - -block-allow-list: - instance: - do-dbs: ["load_task4"] - -mydumpers: - global: - threads: 4 - chunk-filesize: 64 - skip-tz-utc: true - extra-args: "" - -loaders: - global: - pool-size: 16 - dir: "./dumped_data" - -syncers: - global: - worker-count: 16 - batch: 100 diff --git a/dm/tests/load_task/conf/dm-worker1.toml b/dm/tests/load_task/conf/dm-worker1.toml deleted file mode 100644 index 7a72ea72bf8..00000000000 --- a/dm/tests/load_task/conf/dm-worker1.toml +++ /dev/null @@ -1,2 +0,0 @@ -name = "worker1" -join = "127.0.0.1:8261" diff --git a/dm/tests/load_task/conf/dm-worker2.toml b/dm/tests/load_task/conf/dm-worker2.toml deleted file mode 100644 index 010e21c73eb..00000000000 --- a/dm/tests/load_task/conf/dm-worker2.toml +++ /dev/null @@ -1,2 +0,0 @@ -name = "worker2" -join = "127.0.0.1:8261" diff --git a/dm/tests/load_task/conf/dm-worker3.toml b/dm/tests/load_task/conf/dm-worker3.toml deleted file mode 100644 index ab7e1b9cb32..00000000000 --- a/dm/tests/load_task/conf/dm-worker3.toml +++ /dev/null @@ -1,2 +0,0 @@ -name = "worker3" -join = "127.0.0.1:8261" diff --git a/dm/tests/load_task/conf/source1.yaml b/dm/tests/load_task/conf/source1.yaml deleted file mode 100644 index c2b659d3fba..00000000000 --- a/dm/tests/load_task/conf/source1.yaml +++ /dev/null @@ -1,13 +0,0 @@ -source-id: mysql-replica-01 -flavor: '' -enable-gtid: true -enable-relay: false -from: - host: 127.0.0.1 - user: root - password: /Q7B9DizNLLTTfiZHv9WoEAKamfpIUs= - port: 3306 -checker: - check-enable: true - backoff-rollback: 5m - backoff-max: 5m diff --git a/dm/tests/load_task/conf/source2.yaml b/dm/tests/load_task/conf/source2.yaml deleted file mode 100644 index fb1985ca354..00000000000 --- a/dm/tests/load_task/conf/source2.yaml +++ /dev/null @@ -1,9 +0,0 @@ -source-id: mysql-replica-02 -flavor: '' -enable-gtid: false -enable-relay: false -from: - host: 127.0.0.1 - user: root - password: /Q7B9DizNLLTTfiZHv9WoEAKamfpIUs= - port: 3307 diff --git a/dm/tests/load_task/data/db1.increment.sql b/dm/tests/load_task/data/db1.increment.sql deleted file mode 100644 index 8db36ca7875..00000000000 --- a/dm/tests/load_task/data/db1.increment.sql +++ /dev/null @@ -1,11 +0,0 @@ -use load_task1; -insert into t1 (id, name) values (3, 'Eddard Stark'); -insert into t1 (id, name) values (4, 'haha'); - -use load_task2; -insert into t1 (id, name) values (3, 'Eddard Stark'); -insert into t1 (id, name) values (4, 'haha'); - -use load_task4; -insert into t1 (id, name) values (3, 'Eddard Stark'); -insert into t1 (id, name) values (4, 'haha'); \ No newline at end of file diff --git a/dm/tests/load_task/data/db1.prepare.sql b/dm/tests/load_task/data/db1.prepare.sql deleted file mode 100644 index 5a6eec15812..00000000000 --- a/dm/tests/load_task/data/db1.prepare.sql +++ /dev/null @@ -1,17 +0,0 @@ -drop database if exists `load_task1`; -create database `load_task1`; -use `load_task1`; -create table t1 (id int, name varchar(20), primary key(`id`)); -insert into t1 (id, name) values (1, 'arya'), (2, 'catelyn'); - -drop database if exists `load_task2`; -create database `load_task2`; -use `load_task2`; -create table t1 (id int, name varchar(20), primary key(`id`)); -insert into t1 (id, name) values (1, 'arya'), (2, 'catelyn'); - -drop database if exists `load_task4`; -create database `load_task4`; -use `load_task4`; -create table t1 (id int, name varchar(20), primary key(`id`)); -insert into t1 (id, name) values (1, 'arya'), (2, 'catelyn'); diff --git a/dm/tests/load_task/data/db2.increment.sql b/dm/tests/load_task/data/db2.increment.sql deleted file mode 100644 index 45a9dca6778..00000000000 --- a/dm/tests/load_task/data/db2.increment.sql +++ /dev/null @@ -1,8 +0,0 @@ -use load_task1; -delete from t2 where name = 'Sansa'; - -use load_task2; -delete from t2 where name = 'Sansa'; - -use load_task3; -delete from t2 where name = 'Sansa'; diff --git a/dm/tests/load_task/data/db2.prepare.sql b/dm/tests/load_task/data/db2.prepare.sql deleted file mode 100644 index d23bd3500f0..00000000000 --- a/dm/tests/load_task/data/db2.prepare.sql +++ /dev/null @@ -1,17 +0,0 @@ -drop database if exists `load_task1`; -create database `load_task1`; -use `load_task1`; -create table t2 (id int auto_increment, name varchar(20), primary key (`id`)); -insert into t2 (name) values ('Arya'), ('Bran'), ('Sansa'); - -drop database if exists `load_task2`; -create database `load_task2`; -use `load_task2`; -create table t2 (id int auto_increment, name varchar(20), primary key (`id`)); -insert into t2 (name) values ('Arya'), ('Bran'), ('Sansa'); - -drop database if exists `load_task3`; -create database `load_task3`; -use `load_task3`; -create table t2 (id int auto_increment, name varchar(20), primary key (`id`)); -insert into t2 (name) values ('Arya'), ('Bran'), ('Sansa'); diff --git a/dm/tests/load_task/run.sh b/dm/tests/load_task/run.sh deleted file mode 100755 index 18bfaf3bcf5..00000000000 --- a/dm/tests/load_task/run.sh +++ /dev/null @@ -1,322 +0,0 @@ -#!/bin/bash - -set -eu - -cur=$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd) -source $cur/../_utils/test_prepare -WORK_DIR=$TEST_DIR/$TEST_NAME -API_VERSION="v1alpha1" -WORKER1="worker1" -WORKER2="worker2" -WORKER3="worker3" - -function test_worker_restart() { - echo "test worker restart" - # worker1 offline - kill_process dm-worker1 - check_port_offline $WORKER1_PORT 20 - - # source1 bound to worker3 - run_dm_ctl_with_retry $WORK_DIR "127.0.0.1:$MASTER_PORT" \ - "list-member -w -n worker3" \ - "\"stage\": \"bound\"" 1 \ - "\"source\": \"mysql-replica-01\"" 1 - run_dm_ctl_with_retry $WORK_DIR "127.0.0.1:$MASTER_PORT" \ - "list-member -w -n worker1" \ - "\"stage\": \"offline\"" 1 - - run_dm_ctl_with_retry $WORK_DIR "127.0.0.1:$MASTER_PORT" \ - "query-status load_task1" \ - "different worker in load stage, previous worker: $WORKER1, current worker: $WORKER3" 1 \ - "Please check if the previous worker is online." 1 - - # worker1 online - export GO_FAILPOINTS="github.com/pingcap/tiflow/dm/loader/LoadDataSlowDownByTask=return(\"load_task1\")" - run_dm_worker $WORK_DIR/worker1 $WORKER1_PORT $cur/conf/dm-worker1.toml - check_rpc_alive $cur/../bin/check_worker_online 127.0.0.1:$WORKER1_PORT - - # transfer to worker1 - check_log_contain_with_retry 'transfer source and worker.*worker1.*worker3.*mysql-replica-01' $WORK_DIR/master/log/dm-master.log - run_dm_ctl_with_retry $WORK_DIR "127.0.0.1:$MASTER_PORT" \ - "list-member -w -n worker3" \ - "\"stage\": \"free\"" 1 - run_dm_ctl_with_retry $WORK_DIR "127.0.0.1:$MASTER_PORT" \ - "list-member -w -n worker1" \ - "\"stage\": \"bound\"" 1 \ - "\"source\": \"mysql-replica-01\"" 1 - - run_dm_ctl_with_retry $WORK_DIR "127.0.0.1:$MASTER_PORT" \ - "query-status load_task1" \ - "\"unit\": \"Load\"" 1 \ - "\"unit\": \"Sync\"" 1 - run_dm_ctl_with_retry $WORK_DIR "127.0.0.1:$MASTER_PORT" \ - "query-status load_task2" \ - "\"unit\": \"Load\"" 1 \ - "\"unit\": \"Sync\"" 1 -} - -# almost never happen since user hardly start a load task after another load task failed. -function test_transfer_two_sources() { - echo "test_transfer_two_sources" - # worker2 offline - kill_process dm-worker2 - check_port_offline $WORKER2_PORT 20 - - # source2 bound to worker3 - run_dm_ctl_with_retry $WORK_DIR "127.0.0.1:$MASTER_PORT" \ - "list-member -w -n worker3" \ - "\"stage\": \"bound\"" 1 \ - "\"source\": \"mysql-replica-02\"" 1 - run_dm_ctl_with_retry $WORK_DIR "127.0.0.1:$MASTER_PORT" \ - "query-status load_task2" \ - "different worker in load stage, previous worker: $WORKER2, current worker: $WORKER3" 1 - - # start load task for worker3 - run_dm_ctl_with_retry $WORK_DIR "127.0.0.1:$MASTER_PORT" \ - "start-task $cur/conf/dm-task3.yaml --remove-meta" \ - "\"result\": true" 2 - run_dm_ctl_with_retry $WORK_DIR "127.0.0.1:$MASTER_PORT" \ - "query-status load_task3" \ - "\"unit\": \"Load\"" 1 - - # worker2 online - export GO_FAILPOINTS="github.com/pingcap/tiflow/dm/loader/LoadDataSlowDown=sleep(15000)" - run_dm_worker $WORK_DIR/worker2 $WORKER2_PORT $cur/conf/dm-worker2.toml - check_rpc_alive $cur/../bin/check_worker_online 127.0.0.1:$WORKER2_PORT - - # worker2 free since (worker3, source2) has load task(load_task3) - run_dm_ctl_with_retry $WORK_DIR "127.0.0.1:$MASTER_PORT" \ - "list-member -w -n worker2" \ - "\"stage\": \"free\"" 1 - - # worker1 offline - kill_process dm-worker1 - check_port_offline $WORKER1_PORT 20 - - # source1 bound to worker2 - run_dm_ctl_with_retry $WORK_DIR "127.0.0.1:$MASTER_PORT" \ - "list-member -w -n worker2" \ - "\"stage\": \"bound\"" 1 \ - "\"source\": \"mysql-replica-01\"" 1 - - # start load_task4 on worker2 - run_dm_ctl_with_retry $WORK_DIR "127.0.0.1:$MASTER_PORT" \ - "start-task $cur/conf/dm-task4.yaml --remove-meta" \ - "\"result\": true" 2 - run_dm_ctl_with_retry $WORK_DIR "127.0.0.1:$MASTER_PORT" \ - "query-status load_task4" \ - "\"unit\": \"Load\"" 1 - - # worker1 online - export GO_FAILPOINTS="" - run_dm_worker $WORK_DIR/worker1 $WORKER1_PORT $cur/conf/dm-worker1.toml - check_rpc_alive $cur/../bin/check_worker_online 127.0.0.1:$WORKER1_PORT - - # worker1 free since (worker2, source1) has load task(load_task4) - run_dm_ctl_with_retry $WORK_DIR "127.0.0.1:$MASTER_PORT" \ - "list-member -w -n worker1" \ - "\"stage\": \"free\"" 1 - - # now, worker2 waiting worker3 finish load_task3, worker1 waiting worker2 finish load_task4 - # worker3 offline - kill_process dm-worker3 - check_port_offline $WORKER3_PORT 20 - - # source2 bound to worker1 - run_dm_ctl_with_retry $WORK_DIR "127.0.0.1:$MASTER_PORT" \ - "list-member -w -n worker1" \ - "\"stage\": \"bound\"" 1 \ - "\"source\": \"mysql-replica-02\"" 1 - - # (worker1, source2), (worker2, source1) - run_dm_ctl_with_retry $WORK_DIR "127.0.0.1:$MASTER_PORT" \ - "query-status load_task1" \ - "different worker in load stage, previous worker: $WORKER1, current worker: $WORKER2" 1 - run_dm_ctl_with_retry $WORK_DIR "127.0.0.1:$MASTER_PORT" \ - "query-status load_task2" \ - "different worker in load stage, previous worker: $WORKER2, current worker: $WORKER1" 1 - - # worker2 finish load_task4 - # master transfer (worker1, source2), (worker2, source1) to (worker1, source1), (worker2, source2) - run_dm_ctl_with_retry $WORK_DIR "127.0.0.1:$MASTER_PORT" \ - "list-member -w -n worker1" \ - "\"stage\": \"bound\"" 1 \ - "\"source\": \"mysql-replica-01\"" 1 - run_dm_ctl_with_retry $WORK_DIR "127.0.0.1:$MASTER_PORT" \ - "list-member -w -n worker2" \ - "\"stage\": \"bound\"" 1 \ - "\"source\": \"mysql-replica-02\"" 1 - - # task1, 2, 4 running, task3 fail - run_dm_ctl_with_retry $WORK_DIR "127.0.0.1:$MASTER_PORT" \ - "query-status" \ - "\"taskStatus\": \"Running\"" 3 \ - "taskStatus.*Error" 1 - - # worker3 online - export GO_FAILPOINTS="" - run_dm_worker $WORK_DIR/worker3 $WORKER3_PORT $cur/conf/dm-worker3.toml - check_rpc_alive $cur/../bin/check_worker_online 127.0.0.1:$WORKER3_PORT - - # source2 bound to worker3 since load_task3 - run_dm_ctl_with_retry $WORK_DIR "127.0.0.1:$MASTER_PORT" \ - "list-member -w -n worker2" \ - "\"stage\": \"bound\"" 1 \ - "\"source\": \"mysql-replica-02\"" 1 - - # all task running - run_dm_ctl_with_retry $WORK_DIR "127.0.0.1:$MASTER_PORT" \ - "query-status" \ - "\"taskStatus\": \"Running\"" 4 -} - -function stop_task_left_load() { - echo "start DM master, workers and sources" - run_dm_master $WORK_DIR/master $MASTER_PORT1 $cur/conf/dm-master.toml - check_rpc_alive $cur/../bin/check_master_online 127.0.0.1:$MASTER_PORT1 - - export GO_FAILPOINTS="github.com/pingcap/tiflow/dm/loader/LoadDataSlowDownByTask=return(\"load_task1\")" - run_dm_worker $WORK_DIR/worker1 $WORKER1_PORT $cur/conf/dm-worker1.toml - check_rpc_alive $cur/../bin/check_worker_online 127.0.0.1:$WORKER1_PORT - cp $cur/conf/source1.yaml $WORK_DIR/source1.yaml - sed -i "/relay-binlog-name/i\relay-dir: $WORK_DIR/worker1/relay_log" $WORK_DIR/source1.yaml - dmctl_operate_source create $WORK_DIR/source1.yaml $SOURCE_ID1 - - dmctl_start_task_standalone "$cur/conf/dm-task-standalone.yaml" "--remove-meta" - - export GO_FAILPOINTS="" - run_dm_worker $WORK_DIR/worker2 $WORKER2_PORT $cur/conf/dm-worker2.toml - check_rpc_alive $cur/../bin/check_worker_online 127.0.0.1:$WORKER2_PORT - - # kill worker1, load_task1 will be transferred to worker2, but lack local files - kill_process dm-worker1 - check_port_offline $WORKER1_PORT 20 - run_dm_ctl_with_retry $WORK_DIR "127.0.0.1:$MASTER_PORT" \ - "query-status load_task1" \ - "different worker in load stage, previous worker: worker1, current worker: worker2" 1 - - # now stop this task without clean meta (left a load_task KV in etcd) - run_dm_ctl $WORK_DIR "127.0.0.1:$MASTER_PORT" \ - "stop-task load_task1" \ - "\"result\": true" 2 - - dmctl_start_task_standalone "$cur/conf/dm-task2-standalone.yaml" "--remove-meta" - run_dm_ctl_with_retry $WORK_DIR "127.0.0.1:$MASTER_PORT" \ - "query-status load_task2" \ - "\"unit\": \"Sync\"" 1 - - # after worker1 goes online, although it has unfinished load_task1, but load_task1 is stopped so should not rebound - export GO_FAILPOINTS="github.com/pingcap/tiflow/dm/loader/LoadDataSlowDownByTask=return(\"load_task1\")" - run_dm_worker $WORK_DIR/worker1 $WORKER1_PORT $cur/conf/dm-worker1.toml - check_rpc_alive $cur/../bin/check_worker_online 127.0.0.1:$WORKER1_PORT - - run_dm_ctl_with_retry $WORK_DIR "127.0.0.1:$MASTER_PORT" \ - "list-member --name worker1" \ - "\"source\": \"\"" 1 - - # start-task again, expect the source is auto transferred back - run_dm_ctl $WORK_DIR "127.0.0.1:$MASTER_PORT" \ - "start-task $cur/conf/dm-task-standalone.yaml" - - run_dm_ctl_with_retry $WORK_DIR "127.0.0.1:$MASTER_PORT" \ - "list-member --name worker1" \ - "\"source\": \"mysql-replica-01\"" 1 - - # repeat again and check start-task --remove-meta will not cause transfer - run_dm_ctl $WORK_DIR "127.0.0.1:$MASTER_PORT" \ - "stop-task load_task1" \ - "\"result\": true" 2 - kill_process dm-worker1 - check_port_offline $WORKER1_PORT 20 - - run_dm_ctl_with_retry $WORK_DIR "127.0.0.1:$MASTER_PORT" \ - "query-status load_task2" \ - "\"unit\": \"Sync\"" 1 - - run_dm_worker $WORK_DIR/worker1 $WORKER1_PORT $cur/conf/dm-worker1.toml - check_rpc_alive $cur/../bin/check_worker_online 127.0.0.1:$WORKER1_PORT - - dmctl_start_task_standalone "$cur/conf/dm-task-standalone.yaml" "--remove-meta" - run_dm_ctl_with_retry $WORK_DIR "127.0.0.1:$MASTER_PORT" \ - "query-status load_task1" \ - "\"unit\": \"Sync\"" 1 - run_dm_ctl_with_retry $WORK_DIR "127.0.0.1:$MASTER_PORT" \ - "list-member --name worker1" \ - "\"source\": \"\"" 1 - - cleanup_process $* - cleanup_data load_task1 - cleanup_data load_task2 -} - -function run() { - echo "import prepare data" - run_sql_file $cur/data/db1.prepare.sql $MYSQL_HOST1 $MYSQL_PORT1 $MYSQL_PASSWORD1 - check_contains 'Query OK, 2 rows affected' - run_sql_file $cur/data/db2.prepare.sql $MYSQL_HOST2 $MYSQL_PORT2 $MYSQL_PASSWORD2 - check_contains 'Query OK, 3 rows affected' - - stop_task_left_load - - echo "start DM master, workers and sources" - run_dm_master $WORK_DIR/master $MASTER_PORT1 $cur/conf/dm-master.toml - check_rpc_alive $cur/../bin/check_master_online 127.0.0.1:$MASTER_PORT1 - - # worker1 loading load_task1 - export GO_FAILPOINTS="github.com/pingcap/tiflow/dm/loader/LoadDataSlowDownByTask=return(\"load_task1\")" - run_dm_worker $WORK_DIR/worker1 $WORKER1_PORT $cur/conf/dm-worker1.toml - check_rpc_alive $cur/../bin/check_worker_online 127.0.0.1:$WORKER1_PORT - cp $cur/conf/source1.yaml $WORK_DIR/source1.yaml - sed -i "/relay-binlog-name/i\relay-dir: $WORK_DIR/worker1/relay_log" $WORK_DIR/source1.yaml - dmctl_operate_source create $WORK_DIR/source1.yaml $SOURCE_ID1 - - # worker2 loading load_task2 - export GO_FAILPOINTS="github.com/pingcap/tiflow/dm/loader/LoadDataSlowDownByTask=return(\"load_task2\")" - run_dm_worker $WORK_DIR/worker2 $WORKER2_PORT $cur/conf/dm-worker2.toml - check_rpc_alive $cur/../bin/check_worker_online 127.0.0.1:$WORKER2_PORT - cp $cur/conf/source2.yaml $WORK_DIR/source2.yaml - sed -i "/relay-binlog-name/i\relay-dir: $WORK_DIR/worker2/relay_log" $WORK_DIR/source2.yaml - dmctl_operate_source create $WORK_DIR/source2.yaml $SOURCE_ID2 - - # worker3 loading load_task3 - export GO_FAILPOINTS="github.com/pingcap/tiflow/dm/loader/LoadDataSlowDownByTask=return(\"load_task3\")" - run_dm_worker $WORK_DIR/worker3 $WORKER3_PORT $cur/conf/dm-worker3.toml - check_rpc_alive $cur/../bin/check_worker_online 127.0.0.1:$WORKER3_PORT - - echo "start DM task" - dmctl_start_task "$cur/conf/dm-task.yaml" "--remove-meta" - dmctl_start_task "$cur/conf/dm-task2.yaml" "--remove-meta" - - check_log_contain_with_retry 'inject failpoint LoadDataSlowDownByTask' $WORK_DIR/worker1/log/dm-worker.log - check_log_contain_with_retry 'inject failpoint LoadDataSlowDownByTask' $WORK_DIR/worker2/log/dm-worker.log - run_dm_ctl_with_retry $WORK_DIR "127.0.0.1:$MASTER_PORT" \ - "query-status load_task1" \ - "\"unit\": \"Load\"" 1 \ - "\"unit\": \"Sync\"" 1 - run_dm_ctl_with_retry $WORK_DIR "127.0.0.1:$MASTER_PORT" \ - "query-status load_task2" \ - "\"unit\": \"Load\"" 1 \ - "\"unit\": \"Sync\"" 1 - - test_worker_restart - - test_transfer_two_sources - - run_sql_file $cur/data/db1.increment.sql $MYSQL_HOST1 $MYSQL_PORT1 $MYSQL_PASSWORD1 - run_sql_file $cur/data/db2.increment.sql $MYSQL_HOST2 $MYSQL_PORT2 $MYSQL_PASSWORD2 - check_sync_diff $WORK_DIR $cur/conf/diff_config1.toml - check_sync_diff $WORK_DIR $cur/conf/diff_config2.toml - check_sync_diff $WORK_DIR $cur/conf/diff_config3.toml - check_sync_diff $WORK_DIR $cur/conf/diff_config4.toml -} - -cleanup_data load_task1 -cleanup_data load_task2 -cleanup_data load_task3 -cleanup_data load_task4 -# also cleanup dm processes in case of last run failed -cleanup_process $* -run $* -cleanup_process $* - -echo "[$(date)] <<<<<< test case $TEST_NAME success! >>>>>>" diff --git a/dm/worker/subtask.go b/dm/worker/subtask.go index b0a178d6eda..9679f9a4170 100644 --- a/dm/worker/subtask.go +++ b/dm/worker/subtask.go @@ -59,12 +59,12 @@ func createRealUnits(cfg *config.SubTaskConfig, etcdClient *clientv3.Client, wor switch cfg.Mode { case config.ModeAll: us = append(us, dumpling.NewDumpling(cfg)) - us = append(us, newLoadUnit(cfg, etcdClient, workerName)) + us = append(us, loader.NewLightning(cfg, etcdClient, workerName)) us = append(us, syncer.NewSyncer(cfg, etcdClient, relay)) case config.ModeFull: // NOTE: maybe need another checker in the future? us = append(us, dumpling.NewDumpling(cfg)) - us = append(us, newLoadUnit(cfg, etcdClient, workerName)) + us = append(us, loader.NewLightning(cfg, etcdClient, workerName)) case config.ModeIncrement: us = append(us, syncer.NewSyncer(cfg, etcdClient, relay)) default: @@ -73,14 +73,6 @@ func createRealUnits(cfg *config.SubTaskConfig, etcdClient *clientv3.Client, wor return us } -func newLoadUnit(cfg *config.SubTaskConfig, etcdClient *clientv3.Client, workerName string) unit.Unit { - // tidb-lightning doesn't support column mapping currently - if !cfg.NeedUseLightning() || len(cfg.ColumnMappingRules) > 0 { - return loader.NewLoader(cfg, etcdClient, workerName) - } - return loader.NewLightning(cfg, etcdClient, workerName) -} - // SubTask represents a sub task of data migration. type SubTask struct { cfg *config.SubTaskConfig @@ -139,13 +131,6 @@ func NewSubTaskWithStage(cfg *config.SubTaskConfig, stage pb.Stage, etcdClient * // initUnits initializes the sub task processing units. func (st *SubTask) initUnits(relay relay.Process) error { - // NOTE: because lightning not support init tls with raw certs bytes, we write the certs data to a file. - if st.cfg.NeedUseLightning() && st.cfg.To.Security != nil { - // NOTE: LoaderConfig.Dir may be a s3 path, but Lightning just supports local tls files, we need to use a new local dir. - if err := st.cfg.To.Security.DumpTLSContent("./" + loader.TmpTLSConfigPath + "_" + st.cfg.Name); err != nil { - return terror.Annotatef(err, "fail to dump tls cert data for lightning, subtask %s ", st.cfg.Name) - } - } st.units = createUnits(st.cfg, st.etcdClient, st.workerName, relay) if len(st.units) < 1 { return terror.ErrWorkerNoAvailUnits.Generate(st.cfg.Name, st.cfg.Mode) From f6f5b7e9e28839d9f1838ced2e6d5a36307e6138 Mon Sep 17 00:00:00 2001 From: lance6716 Date: Tue, 20 Dec 2022 11:40:11 +0800 Subject: [PATCH 02/26] remove more Signed-off-by: lance6716 --- dm/loader/checkpoint.go | 48 ----------------------------------------- 1 file changed, 48 deletions(-) diff --git a/dm/loader/checkpoint.go b/dm/loader/checkpoint.go index b7502995326..4db11aac4a1 100644 --- a/dm/loader/checkpoint.go +++ b/dm/loader/checkpoint.go @@ -26,54 +26,6 @@ import ( "go.uber.org/zap" ) -// CheckPoint represents checkpoint status. -type CheckPoint interface { - // Load loads all checkpoints recorded before. - // because of no checkpoints updated in memory when error occurred - // when resuming, Load will be called again to load checkpoints - Load(tctx *tcontext.Context) error - - // GetRestoringFileInfo get restoring data files for table - GetRestoringFileInfo(db, table string) map[string][]int64 - - // GetAllRestoringFileInfo return all restoring files position - GetAllRestoringFileInfo() map[string][]int64 - - // IsTableCreated checks if db / table was created. set `table` to "" when check db - IsTableCreated(db, table string) bool - - // IsTableFinished query if table has finished - IsTableFinished(db, table string) bool - - // CalcProgress calculate which table has finished and which table partial restored - CalcProgress(allFiles map[string]Tables2DataFiles) error - - // Init initialize checkpoint data in tidb - Init(tctx *tcontext.Context, filename string, endpos int64) error - - // ResetConn resets database connections owned by the Checkpoint - ResetConn(tctx *tcontext.Context) error - - // Close closes the CheckPoint - Close() - - // Clear clears all recorded checkpoints - Clear(tctx *tcontext.Context) error - - // Count returns recorded checkpoints' count - Count(tctx *tcontext.Context) (int, error) - - // GenSQL generates sql to update checkpoint to DB - GenSQL(filename string, offset int64) string - - // UpdateOffset keeps `cp.restoringFiles` in memory same with checkpoint in DB, - // should be called after update checkpoint in DB - UpdateOffset(filename string, offset int64) error - - // AllFinished returns `true` when all restoring job are finished - AllFinished() bool -} - type lightingLoadStatus int const ( From 3eb55d444aed20bf0617fe2b592749b167bc06af Mon Sep 17 00:00:00 2001 From: lance6716 Date: Tue, 20 Dec 2022 14:09:35 +0800 Subject: [PATCH 03/26] fix some UT Signed-off-by: lance6716 --- dm/config/task_test.go | 38 ------------------- dm/master/server_test.go | 19 ---------- dm/syncer/syncer.go | 28 ++------------ dm/tests/dmctl_basic/conf/dm-task.yaml | 19 ---------- dm/tests/dmctl_basic/conf/dm-task5.yaml | 19 ---------- dm/tests/dmctl_basic/conf/dm-task6.yaml | 19 ---------- dm/tests/dmctl_basic/conf/dm-task7.yaml | 19 ---------- dm/tests/dmctl_basic/conf/get_task.yaml | 32 ++-------------- dm/tests/online_ddl/conf/dm-task.yaml | 37 +----------------- dm/tests/sequence_safe_mode/conf/dm-task.yaml | 19 ---------- dm/tests/sequence_sharding/conf/dm-task.yaml | 19 ---------- .../conf/dm-task.yaml | 19 ---------- dm/tests/sharding/conf/dm-task.yaml | 19 ---------- engine/jobmaster/dm/config/job_template.yaml | 29 -------------- engine/test/e2e/dm-job.yaml | 29 -------------- 15 files changed, 8 insertions(+), 356 deletions(-) diff --git a/dm/config/task_test.go b/dm/config/task_test.go index dadb1432db6..b12e41b3c87 100644 --- a/dm/config/task_test.go +++ b/dm/config/task_test.go @@ -64,22 +64,6 @@ filters: events: ["all dml"] action: Do -column-mappings: - column-mapping-rule-1: - schema-pattern: "test_*" - table-pattern: "t_*" - expression: "partition id" - source-column: "id" - target-column: "id" - arguments: ["1", "test", "t", "_"] - column-mapping-rule-2: - schema-pattern: "test_*" - table-pattern: "t_*" - expression: "partition id" - source-column: "id" - target-column: "id" - arguments: ["2", "test", "t", "_"] - mydumpers: global1: threads: 4 @@ -122,7 +106,6 @@ mysql-instances: - source-id: "mysql-replica-01" route-rules: ["route-rule-2"] filter-rules: ["filter-rule-2"] - column-mapping-rules: ["column-mapping-rule-2"] mydumper-config-name: "global1" loader-config-name: "global1" syncer-config-name: "global1" @@ -131,7 +114,6 @@ mysql-instances: - source-id: "mysql-replica-02" route-rules: ["route-rule-1"] filter-rules: ["filter-rule-1"] - column-mapping-rules: ["column-mapping-rule-1"] mydumper-config-name: "global2" loader-config-name: "global2" syncer-config-name: "global2" @@ -177,22 +159,6 @@ filters: events: ["all dml"] action: Do -column-mappings: - column-mapping-rule-1: - schema-pattern: "test_*" - table-pattern: "t_*" - expression: "partition id" - source-column: "id" - target-column: "id" - arguments: ["1", "test", "t", "_"] - column-mapping-rule-2: - schema-pattern: "test_*" - table-pattern: "t_*" - expression: "partition id" - source-column: "id" - target-column: "id" - arguments: ["2", "test", "t", "_"] - mydumpers: global1: threads: 4 @@ -235,7 +201,6 @@ mysql-instances: - source-id: "mysql-replica-01" route-rules: ["route-rule-1"] filter-rules: ["filter-rule-1"] - column-mapping-rules: ["column-mapping-rule-1"] mydumper-config-name: "global1" loader-config-name: "global1" syncer-config-name: "global1" @@ -243,7 +208,6 @@ mysql-instances: - source-id: "mysql-replica-02" route-rules: ["route-rule-1"] filter-rules: ["filter-rule-1"] - column-mapping-rules: ["column-mapping-rule-1"] mydumper-config-name: "global2" loader-config-name: "global2" syncer-config-name: "global2" @@ -276,7 +240,6 @@ mysql-instances: server-id: 101 block-allow-list: "instance" route-rules: ["sharding-route-rules-table", "sharding-route-rules-schema"] - column-mapping-rules: ["instance-1"] mydumper-config-name: "global" loader-config-name: "global" syncer-config-name: "global" @@ -301,7 +264,6 @@ mysql-instances: - source-id: "mysql-replica-01" block-allow-list: "instance" route-rules: ["sharding-route-rules-table", "sharding-route-rules-schema"] - column-mapping-rules: ["instance-1"] mydumper-config-name: "global" loader-config-name: "global" syncer-config-name: "global" diff --git a/dm/master/server_test.go b/dm/master/server_test.go index 4a4b7badc8b..3ecbffefb67 100644 --- a/dm/master/server_test.go +++ b/dm/master/server_test.go @@ -93,7 +93,6 @@ mysql-instances: - source-id: "mysql-replica-01" block-allow-list: "instance" route-rules: ["sharding-route-rules-table", "sharding-route-rules-schema"] - column-mapping-rules: ["instance-1"] mydumper-config-name: "global" loader-config-name: "global" syncer-config-name: "global" @@ -101,7 +100,6 @@ mysql-instances: - source-id: "mysql-replica-02" block-allow-list: "instance" route-rules: ["sharding-route-rules-table", "sharding-route-rules-schema"] - column-mapping-rules: ["instance-2"] mydumper-config-name: "global" loader-config-name: "global" syncer-config-name: "global" @@ -124,23 +122,6 @@ routes: schema-pattern: sharding* target-schema: db_target -column-mappings: - instance-1: - schema-pattern: "sharding*" - table-pattern: "t*" - expression: "partition id" - source-column: "id" - target-column: "id" - arguments: ["1", "sharding", "t"] - - instance-2: - schema-pattern: "sharding*" - table-pattern: "t*" - expression: "partition id" - source-column: "id" - target-column: "id" - arguments: ["2", "sharding", "t"] - mydumpers: global: threads: 4 diff --git a/dm/syncer/syncer.go b/dm/syncer/syncer.go index d206b856482..653c55864c4 100644 --- a/dm/syncer/syncer.go +++ b/dm/syncer/syncer.go @@ -32,7 +32,6 @@ import ( "github.com/pingcap/errors" "github.com/pingcap/failpoint" bf "github.com/pingcap/tidb-tools/pkg/binlog-filter" - cm "github.com/pingcap/tidb-tools/pkg/column-mapping" "github.com/pingcap/tidb/parser" "github.com/pingcap/tidb/parser/ast" "github.com/pingcap/tidb/parser/model" @@ -161,7 +160,6 @@ type Syncer struct { tableRouter *regexprrouter.RouteTable binlogFilter *bf.BinlogEvent - columnMapping *cm.Mapping baList *filter.Filter exprFilterGroup *ExprFilterGroup sessCtx sessionctx.Context @@ -411,13 +409,6 @@ func (s *Syncer) Init(ctx context.Context) (err error) { // create an empty Tracker and will be initialized in `Run` s.schemaTracker = schema.NewTracker() - if len(s.cfg.ColumnMappingRules) > 0 { - s.columnMapping, err = cm.NewMapping(s.cfg.CaseSensitive, s.cfg.ColumnMappingRules) - if err != nil { - return terror.ErrSyncerUnitGenColumnMapping.Delegate(err) - } - } - if s.cfg.OnlineDDL { s.onlineDDL, err = onlineddl.NewRealOnlinePlugin(tctx, s.cfg, s.metricsProxies) if err != nil { @@ -3257,11 +3248,10 @@ func (s *Syncer) Update(ctx context.Context, cfg *config.SubTaskConfig) error { } var ( - err error - oldBaList *filter.Filter - oldTableRouter *regexprrouter.RouteTable - oldBinlogFilter *bf.BinlogEvent - oldColumnMapping *cm.Mapping + err error + oldBaList *filter.Filter + oldTableRouter *regexprrouter.RouteTable + oldBinlogFilter *bf.BinlogEvent ) defer func() { @@ -3277,9 +3267,6 @@ func (s *Syncer) Update(ctx context.Context, cfg *config.SubTaskConfig) error { if oldBinlogFilter != nil { s.binlogFilter = oldBinlogFilter } - if oldColumnMapping != nil { - s.columnMapping = oldColumnMapping - } }() // update block-allow-list @@ -3303,13 +3290,6 @@ func (s *Syncer) Update(ctx context.Context, cfg *config.SubTaskConfig) error { return terror.ErrSyncerUnitGenBinlogEventFilter.Delegate(err) } - // update column-mappings - oldColumnMapping = s.columnMapping - s.columnMapping, err = cm.NewMapping(cfg.CaseSensitive, cfg.ColumnMappingRules) - if err != nil { - return terror.ErrSyncerUnitGenColumnMapping.Delegate(err) - } - switch s.cfg.ShardMode { case config.ShardPessimistic: // re-init sharding group diff --git a/dm/tests/dmctl_basic/conf/dm-task.yaml b/dm/tests/dmctl_basic/conf/dm-task.yaml index b9ea640e7f1..a5e3793d83e 100644 --- a/dm/tests/dmctl_basic/conf/dm-task.yaml +++ b/dm/tests/dmctl_basic/conf/dm-task.yaml @@ -15,7 +15,6 @@ mysql-instances: - source-id: "mysql-replica-01" block-allow-list: "instance" route-rules: ["sharding-route-rules-table", "sharding-route-rules-schema"] - column-mapping-rules: ["instance-1"] mydumper-config-name: "global" loader-config-name: "global" syncer-config-name: "global" @@ -26,7 +25,6 @@ mysql-instances: - source-id: "mysql-replica-02" block-allow-list: "instance" route-rules: ["sharding-route-rules-table", "sharding-route-rules-schema"] - column-mapping-rules: ["instance-2"] mydumper-config-name: "global" loader-config-name: "global" syncer-config-name: "global" @@ -53,23 +51,6 @@ routes: schema-pattern: dmctl target-schema: dmctl -column-mappings: - instance-1: - schema-pattern: "dmctl" - table-pattern: "t_*" - expression: "partition id" - source-column: "id" - target-column: "id" - arguments: ["1", "", "t_"] - - instance-2: - schema-pattern: "dmctl" - table-pattern: "t_*" - expression: "partition id" - source-column: "id" - target-column: "id" - arguments: ["2", "", "t_"] - mydumpers: global: threads: 4 diff --git a/dm/tests/dmctl_basic/conf/dm-task5.yaml b/dm/tests/dmctl_basic/conf/dm-task5.yaml index 07277194340..88a09c530f0 100644 --- a/dm/tests/dmctl_basic/conf/dm-task5.yaml +++ b/dm/tests/dmctl_basic/conf/dm-task5.yaml @@ -16,7 +16,6 @@ mysql-instances: - source-id: "mysql-replica-01" block-allow-list: "instance" route-rules: ["sharding-route-rules-table", "sharding-route-rules-schema"] - column-mapping-rules: ["instance-1"] mydumper-config-name: "global" loader-config-name: "global" syncer-config-name: "global" @@ -27,7 +26,6 @@ mysql-instances: - source-id: "mysql-replica-02" block-allow-list: "instance" route-rules: ["sharding-route-rules-table", "sharding-route-rules-schema"] - column-mapping-rules: ["instance-2"] mydumper-config-name: "global" loader-config-name: "global" syncer-config-name: "global" @@ -52,23 +50,6 @@ routes: schema-pattern: dmctl target-schema: dmctl -column-mappings: - instance-1: - schema-pattern: "dmctl" - table-pattern: "t_*" - expression: "partition id" - source-column: "id" - target-column: "id" - arguments: ["1", "", "t_"] - - instance-2: - schema-pattern: "dmctl" - table-pattern: "t_*" - expression: "partition id" - source-column: "id" - target-column: "id" - arguments: ["2", "", "t_"] - mydumpers: global: threads: 4 diff --git a/dm/tests/dmctl_basic/conf/dm-task6.yaml b/dm/tests/dmctl_basic/conf/dm-task6.yaml index edd6bd8a1d3..e45227a2927 100644 --- a/dm/tests/dmctl_basic/conf/dm-task6.yaml +++ b/dm/tests/dmctl_basic/conf/dm-task6.yaml @@ -16,7 +16,6 @@ mysql-instances: - source-id: "mysql-replica-01" block-allow-list: "instance" route-rules: ["sharding-route-rules-table", "sharding-route-rules-schema"] - column-mapping-rules: ["instance-1"] mydumper-config-name: "global" loader-config-name: "global" syncer-config-name: "global" @@ -27,7 +26,6 @@ mysql-instances: - source-id: "mysql-replica-02" block-allow-list: "instance" route-rules: ["sharding-route-rules-table", "sharding-route-rules-schema"] - column-mapping-rules: ["instance-2"] mydumper-config-name: "global" loader-config-name: "global" syncer-config-name: "global" @@ -52,23 +50,6 @@ routes: schema-pattern: dmctl target-schema: dmctl -column-mappings: - instance-1: - schema-pattern: "dmctl" - table-pattern: "t_*" - expression: "partition id" - source-column: "id" - target-column: "id" - arguments: ["1", "", "t_"] - - instance-2: - schema-pattern: "dmctl" - table-pattern: "t_*" - expression: "partition id" - source-column: "id" - target-column: "id" - arguments: ["2", "", "t_"] - mydumpers: global: threads: 4 diff --git a/dm/tests/dmctl_basic/conf/dm-task7.yaml b/dm/tests/dmctl_basic/conf/dm-task7.yaml index 611a0577420..36e628b2933 100644 --- a/dm/tests/dmctl_basic/conf/dm-task7.yaml +++ b/dm/tests/dmctl_basic/conf/dm-task7.yaml @@ -16,7 +16,6 @@ mysql-instances: - source-id: "mysql-replica-01" block-allow-list: "instance" route-rules: ["sharding-route-rules-table", "sharding-route-rules-schema"] - column-mapping-rules: ["instance-1"] mydumper-config-name: "global" loader-config-name: "global" syncer-config-name: "global" @@ -29,7 +28,6 @@ mysql-instances: - source-id: "mysql-replica-02" block-allow-list: "instance" route-rules: ["sharding-route-rules-table", "sharding-route-rules-schema"] - column-mapping-rules: ["instance-2"] mydumper-config-name: "global" loader-config-name: "global" syncer-config-name: "global" @@ -58,23 +56,6 @@ routes: schema-pattern: dmctl target-schema: dmctl -column-mappings: - instance-1: - schema-pattern: "dmctl" - table-pattern: "t_*" - expression: "partition id" - source-column: "id" - target-column: "id" - arguments: ["1", "", "t_"] - - instance-2: - schema-pattern: "dmctl" - table-pattern: "t_*" - expression: "partition id" - source-column: "id" - target-column: "id" - arguments: ["2", "", "t_"] - mydumpers: global: threads: 4 diff --git a/dm/tests/dmctl_basic/conf/get_task.yaml b/dm/tests/dmctl_basic/conf/get_task.yaml index afb2da67e59..fa03d9228ee 100644 --- a/dm/tests/dmctl_basic/conf/get_task.yaml +++ b/dm/tests/dmctl_basic/conf/get_task.yaml @@ -24,8 +24,7 @@ mysql-instances: meta: null filter-rules: - filter-01 - column-mapping-rules: - - cm-01 + column-mapping-rules: [] route-rules: - route-01 - route-02 @@ -46,8 +45,7 @@ mysql-instances: meta: null filter-rules: - filter-02 - column-mapping-rules: - - cm-02 + column-mapping-rules: [] route-rules: - route-01 - route-02 @@ -94,31 +92,7 @@ filters: sql-pattern: - alter table .* add column aaa int action: Ignore -column-mappings: - cm-01: - schema-pattern: dmctl - table-pattern: t_* - source-column: id - target-column: id - expression: partition id - arguments: - - "1" - - "" - - t_ - - "" - create-table-query: "" - cm-02: - schema-pattern: dmctl - table-pattern: t_* - source-column: id - target-column: id - expression: partition id - arguments: - - "2" - - "" - - t_ - - "" - create-table-query: "" +column-mappings: {} expression-filter: {} black-white-list: {} block-allow-list: diff --git a/dm/tests/online_ddl/conf/dm-task.yaml b/dm/tests/online_ddl/conf/dm-task.yaml index abd5ffc3aa3..7240e49f439 100644 --- a/dm/tests/online_ddl/conf/dm-task.yaml +++ b/dm/tests/online_ddl/conf/dm-task.yaml @@ -16,7 +16,6 @@ mysql-instances: - source-id: "mysql-replica-01" block-allow-list: "instance" route-rules: ["gho-sharding-route-rules-table", "pt-sharding-route-rules-table", "sharding-route-rules-schema"] - column-mapping-rules: ["gho-instance-1", "pt-instance-1"] mydumper-config-name: "global" loader-config-name: "global" syncer-config-name: "global" @@ -28,7 +27,6 @@ mysql-instances: binlog-pos: 4 block-allow-list: "instance" route-rules: ["gho-sharding-route-rules-table", "pt-sharding-route-rules-table", "sharding-route-rules-schema"] - column-mapping-rules: ["gho-instance-2", "pt-instance-2"] mydumper-config-name: "global" loader-config-name: "global" syncer-config-name: "global" @@ -58,39 +56,6 @@ routes: schema-pattern: online_ddl target-schema: online_ddl -column-mappings: - gho-instance-1: - schema-pattern: "online_ddl" - table-pattern: "gho_t*" - expression: "partition id" - source-column: "id" - target-column: "id" - arguments: ["1", "", "gho_t"] - - pt-instance-1: - schema-pattern: "online_ddl" - table-pattern: "pt_t*" - expression: "partition id" - source-column: "id" - target-column: "id" - arguments: ["1", "", "pt_t"] - - gho-instance-2: - schema-pattern: "online_ddl" - table-pattern: "gho_t*" - expression: "partition id" - source-column: "id" - target-column: "id" - arguments: ["2", "", "gho_t"] - - pt-instance-2: - schema-pattern: "online_ddl" - table-pattern: "pt_t*" - expression: "partition id" - source-column: "id" - target-column: "id" - arguments: ["2", "", "pt_t"] - filters: filter-rule-index: schema-pattern: "*" @@ -114,4 +79,4 @@ loaders: syncers: global: worker-count: 16 - batch: 100 \ No newline at end of file + batch: 100 diff --git a/dm/tests/sequence_safe_mode/conf/dm-task.yaml b/dm/tests/sequence_safe_mode/conf/dm-task.yaml index 7074d98535f..ec1038b502c 100644 --- a/dm/tests/sequence_safe_mode/conf/dm-task.yaml +++ b/dm/tests/sequence_safe_mode/conf/dm-task.yaml @@ -15,7 +15,6 @@ mysql-instances: - source-id: "mysql-replica-01" block-allow-list: "instance" route-rules: ["sharding-route-rules-table", "sharding-route-rules-schema"] - column-mapping-rules: ["instance-1"] mydumper-config-name: "global" loader-config-name: "global" syncer-config-name: "global" @@ -23,7 +22,6 @@ mysql-instances: - source-id: "mysql-replica-02" block-allow-list: "instance" route-rules: ["sharding-route-rules-table", "sharding-route-rules-schema"] - column-mapping-rules: ["instance-2"] mydumper-config-name: "global" loader-config-name: "global" syncer-config-name: "global" @@ -46,23 +44,6 @@ routes: schema-pattern: sequence_safe_mode_test target-schema: sequence_safe_mode_target -column-mappings: - instance-1: - schema-pattern: "sequence_safe_mode_test" - table-pattern: "t*" - expression: "partition id" - source-column: "id" - target-column: "id" - arguments: ["1", "", "t"] - - instance-2: - schema-pattern: "sequence_safe_mode_test" - table-pattern: "t*" - expression: "partition id" - source-column: "id" - target-column: "id" - arguments: ["2", "", "t"] - mydumpers: global: threads: 4 diff --git a/dm/tests/sequence_sharding/conf/dm-task.yaml b/dm/tests/sequence_sharding/conf/dm-task.yaml index 4dd936aac5f..9437ffdbe57 100644 --- a/dm/tests/sequence_sharding/conf/dm-task.yaml +++ b/dm/tests/sequence_sharding/conf/dm-task.yaml @@ -15,7 +15,6 @@ mysql-instances: - source-id: "mysql-replica-01" block-allow-list: "instance" route-rules: ["sharding-route-rules-table", "sharding-route-rules-schema"] - column-mapping-rules: ["instance-1"] mydumper-config-name: "global" loader-config-name: "global" syncer-config-name: "global" @@ -23,7 +22,6 @@ mysql-instances: - source-id: "mysql-replica-02" block-allow-list: "instance" route-rules: ["sharding-route-rules-table", "sharding-route-rules-schema"] - column-mapping-rules: ["instance-2"] mydumper-config-name: "global" loader-config-name: "global" syncer-config-name: "global" @@ -46,23 +44,6 @@ routes: schema-pattern: sharding_seq* target-schema: sharding_target2 -column-mappings: - instance-1: - schema-pattern: "sharding_seq*" - table-pattern: "t*" - expression: "partition id" - source-column: "id" - target-column: "id" - arguments: ["1", "", "t"] - - instance-2: - schema-pattern: "sharding_seq*" - table-pattern: "t*" - expression: "partition id" - source-column: "id" - target-column: "id" - arguments: ["2", "", "t"] - mydumpers: global: threads: 4 diff --git a/dm/tests/sequence_sharding_removemeta/conf/dm-task.yaml b/dm/tests/sequence_sharding_removemeta/conf/dm-task.yaml index 1f5395c2c2b..ad0f6d1d62c 100644 --- a/dm/tests/sequence_sharding_removemeta/conf/dm-task.yaml +++ b/dm/tests/sequence_sharding_removemeta/conf/dm-task.yaml @@ -15,7 +15,6 @@ mysql-instances: - source-id: "mysql-replica-01" block-allow-list: "instance" route-rules: ["sharding-route-rules-table", "sharding-route-rules-schema"] - column-mapping-rules: ["instance-1"] mydumper-config-name: "global" loader-config-name: "global" syncer-config-name: "global" @@ -23,7 +22,6 @@ mysql-instances: - source-id: "mysql-replica-02" block-allow-list: "instance" route-rules: ["sharding-route-rules-table", "sharding-route-rules-schema"] - column-mapping-rules: ["instance-2"] mydumper-config-name: "global" loader-config-name: "global" syncer-config-name: "global" @@ -46,23 +44,6 @@ routes: schema-pattern: sharding_seq* target-schema: sharding_target3 -column-mappings: - instance-1: - schema-pattern: "sharding_seq*" - table-pattern: "t*" - expression: "partition id" - source-column: "id" - target-column: "id" - arguments: ["1", "", "t"] - - instance-2: - schema-pattern: "sharding_seq*" - table-pattern: "t*" - expression: "partition id" - source-column: "id" - target-column: "id" - arguments: ["2", "", "t"] - mydumpers: global: threads: 4 diff --git a/dm/tests/sharding/conf/dm-task.yaml b/dm/tests/sharding/conf/dm-task.yaml index fa225c0785e..a1bbeda5d8b 100644 --- a/dm/tests/sharding/conf/dm-task.yaml +++ b/dm/tests/sharding/conf/dm-task.yaml @@ -15,7 +15,6 @@ mysql-instances: - source-id: "mysql-replica-01" block-allow-list: "instance" route-rules: ["sharding-route-rules-table", "sharding-route-rules-schema"] - column-mapping-rules: ["instance-1"] mydumper-config-name: "global" loader-config-name: "global" syncer-config-name: "global" @@ -23,7 +22,6 @@ mysql-instances: - source-id: "mysql-replica-02" block-allow-list: "instance" route-rules: ["sharding-route-rules-table", "sharding-route-rules-schema"] - column-mapping-rules: ["instance-2"] mydumper-config-name: "global" loader-config-name: "global" syncer-config-name: "global" @@ -46,23 +44,6 @@ routes: schema-pattern: sharding* target-schema: db_target -column-mappings: - instance-1: - schema-pattern: "sharding*" - table-pattern: "t*" - expression: "partition id" - source-column: "id" - target-column: "id" - arguments: ["1", "sharding", "t"] - - instance-2: - schema-pattern: "sharding*" - table-pattern: "t*" - expression: "partition id" - source-column: "id" - target-column: "id" - arguments: ["2", "sharding", "t"] - mydumpers: global: threads: 4 diff --git a/engine/jobmaster/dm/config/job_template.yaml b/engine/jobmaster/dm/config/job_template.yaml index 6d1c4091e5a..69610db3eb0 100644 --- a/engine/jobmaster/dm/config/job_template.yaml +++ b/engine/jobmaster/dm/config/job_template.yaml @@ -28,8 +28,6 @@ upstreams: meta: null filter-rules: - filter-01 - column-mapping-rules: - - cm-01 route-rules: - route-01 - route-02 @@ -58,8 +56,6 @@ upstreams: meta: null filter-rules: - filter-02 - column-mapping-rules: - - cm-02 route-rules: - route-01 - route-02 @@ -107,31 +103,6 @@ filters: sql-pattern: - alter table .* add column aaa int action: Ignore -column-mappings: - cm-01: - schema-pattern: dmctl - table-pattern: t_* - source-column: id - target-column: id - expression: partition id - arguments: - - "1" - - "" - - t_ - - "" - create-table-query: "" - cm-02: - schema-pattern: dmctl - table-pattern: t_* - source-column: id - target-column: id - expression: partition id - arguments: - - "2" - - "" - - t_ - - "" - create-table-query: "" expression-filter: c_null: schema: "expr_filter" diff --git a/engine/test/e2e/dm-job.yaml b/engine/test/e2e/dm-job.yaml index 1c645a1ddb9..3a94c4201cc 100644 --- a/engine/test/e2e/dm-job.yaml +++ b/engine/test/e2e/dm-job.yaml @@ -28,8 +28,6 @@ upstreams: source-id: mysql-replica-01 #filter-rules: #- filter-01 - #column-mapping-rules: - #- cm-01 #route-rules: #- route-01 #- route-02 @@ -56,8 +54,6 @@ upstreams: # meta: null # filter-rules: # - filter-02 -# column-mapping-rules: -# - cm-02 # route-rules: # - route-01 # - route-02 @@ -104,31 +100,6 @@ upstreams: # sql-pattern: # - alter table .* add column aaa int # action: Ignore -#column-mappings: -# cm-01: -# schema-pattern: dmctl -# table-pattern: t_* -# source-column: id -# target-column: id -# expression: partition id -# arguments: -# - "1" -# - "" -# - t_ -# - "" -# create-table-query: "" -# cm-02: -# schema-pattern: dmctl -# table-pattern: t_* -# source-column: id -# target-column: id -# expression: partition id -# arguments: -# - "2" -# - "" -# - t_ -# - "" -# create-table-query: "" #expression-filter: {} #black-white-list: {} block-allow-list: From 1ddc651666efc889a6499ee976ed9e8ec248b306 Mon Sep 17 00:00:00 2001 From: lance6716 Date: Tue, 20 Dec 2022 14:20:06 +0800 Subject: [PATCH 04/26] fix more Signed-off-by: lance6716 --- dm/config/security/security.go | 39 ---------------------------------- dm/config/security_test.go | 31 --------------------------- dm/syncer/dml.go | 23 -------------------- dm/syncer/syncer.go | 5 +---- 4 files changed, 1 insertion(+), 97 deletions(-) diff --git a/dm/config/security/security.go b/dm/config/security/security.go index adf3111d6ec..6854bb3cea0 100644 --- a/dm/config/security/security.go +++ b/dm/config/security/security.go @@ -17,9 +17,6 @@ import ( "encoding/base64" "fmt" "os" - "path" - - "github.com/pingcap/tiflow/dm/pkg/utils" ) // Security config. @@ -81,42 +78,6 @@ func (s *Security) LoadTLSContent() error { return firstErr } -// DumpTLSContent dump tls certs data to file. -// if user specified the path for certs but the cert doesn't exist or user didn't specify the path for certs -// dump certs to dm-worker folder and change the cert path. -// see more here https://github.com/pingcap/tiflow/pull/3260#discussion_r749052994 -func (s *Security) DumpTLSContent(baseDirPath string) error { - isSSLCANotExist := s.SSLCA == "" || !utils.IsFileExists(s.SSLCA) - isSSLCertNotExist := s.SSLCert == "" || !utils.IsFileExists(s.SSLCert) - isSSLKeyNotExist := s.SSLKey == "" || !utils.IsFileExists(s.SSLKey) - if isSSLCANotExist || isSSLCertNotExist || isSSLKeyNotExist { - if !utils.IsDirExists(baseDirPath) { - if err := os.MkdirAll(baseDirPath, 0o700); err != nil { - return err - } - } - } - if isSSLCANotExist { - s.SSLCA = path.Join(baseDirPath, "ca.pem") - if err := utils.WriteFileAtomic(s.SSLCA, s.SSLCABytes, 0o600); err != nil { - return err - } - } - if isSSLCertNotExist { - s.SSLCert = path.Join(baseDirPath, "cert.pem") - if err := utils.WriteFileAtomic(s.SSLCert, s.SSLCertBytes, 0o600); err != nil { - return err - } - } - if isSSLKeyNotExist { - s.SSLKey = path.Join(baseDirPath, "key.pem") - if err := utils.WriteFileAtomic(s.SSLKey, s.SSLKeyBytes, 0o600); err != nil { - return err - } - } - return nil -} - // ClearSSLBytesData clear all tls config bytes data. func (s *Security) ClearSSLBytesData() { s.SSLCABytes = s.SSLCABytes[:0] diff --git a/dm/config/security_test.go b/dm/config/security_test.go index e9b4c7b8a53..599caeb4e10 100644 --- a/dm/config/security_test.go +++ b/dm/config/security_test.go @@ -171,34 +171,3 @@ func (c *testTLSConfig) TestClone() { clone.CertAllowedCN[0] = "g" c.Require().NotEqual(s, clone) } - -func (c *testTLSConfig) TestLoadDumpTLSContent() { - s := &security.Security{ - SSLCA: caFilePath, - SSLCert: certFilePath, - SSLKey: keyFilePath, - } - err := s.LoadTLSContent() - c.Require().NoError(err) - c.Require().Greater(len(s.SSLCABytes), 0) - c.Require().Greater(len(s.SSLCertBytes), 0) - c.Require().Greater(len(s.SSLKeyBytes), 0) - - // cert file not exist - s.SSLCA += ".new" - s.SSLCert += ".new" - s.SSLKey += ".new" - c.Require().NoError(s.DumpTLSContent(c.T().TempDir())) - c.Require().FileExists(s.SSLCA) - c.Require().FileExists(s.SSLCert) - c.Require().FileExists(s.SSLKey) - - // user not specify cert file - s.SSLCA = "" - s.SSLCert = "" - s.SSLKey = "" - c.Require().NoError(s.DumpTLSContent(c.T().TempDir())) - c.Require().FileExists(s.SSLCA) - c.Require().FileExists(s.SSLCert) - c.Require().FileExists(s.SSLKey) -} diff --git a/dm/syncer/dml.go b/dm/syncer/dml.go index 614595db7e0..bc58a7d1a15 100644 --- a/dm/syncer/dml.go +++ b/dm/syncer/dml.go @@ -311,29 +311,6 @@ func castUnsigned(data interface{}, ft *types.FieldType) interface{} { return data } -func (s *Syncer) mappingDML(table *filter.Table, ti *model.TableInfo, data [][]interface{}) ([][]interface{}, error) { - if s.columnMapping == nil { - return data, nil - } - - columns := make([]string, 0, len(ti.Columns)) - for _, col := range ti.Columns { - columns = append(columns, col.Name.O) - } - - var ( - err error - rows = make([][]interface{}, len(data)) - ) - for i := range data { - rows[i], _, err = s.columnMapping.HandleRowValue(table.Schema, table.Name, columns, data[i]) - if err != nil { - return nil, terror.ErrSyncerUnitDoColumnMapping.Delegate(err, data[i], table) - } - } - return rows, nil -} - // checkLogColumns returns error when not all rows in skipped is empty, which means the binlog doesn't contain all // columns. // TODO: don't return error when all skipped columns is non-PK. diff --git a/dm/syncer/syncer.go b/dm/syncer/syncer.go index 653c55864c4..3101e2632e0 100644 --- a/dm/syncer/syncer.go +++ b/dm/syncer/syncer.go @@ -2516,10 +2516,7 @@ func (s *Syncer) handleRowsEvent(ev *replication.RowsEvent, ec eventContext) (*f if err != nil { return nil, terror.WithScope(err, terror.ScopeDownstream) } - originRows, err := s.mappingDML(sourceTable, tableInfo, ev.Rows) - if err != nil { - return nil, err - } + originRows := ev.Rows if err2 := checkLogColumns(ev.SkippedColumns); err2 != nil { return nil, err2 } From 65b3588f60b9114a71ceb9dc9203f6e17f824ccc Mon Sep 17 00:00:00 2001 From: lance6716 Date: Tue, 20 Dec 2022 14:53:07 +0800 Subject: [PATCH 05/26] fix more Signed-off-by: lance6716 --- dm/syncer/syncer.go | 1 - engine/jobmaster/dm/config/config.go | 7 +------ engine/jobmaster/dm/config/dm_subtask_3306.toml | 9 --------- engine/jobmaster/dm/config/dm_subtask_3307.toml | 9 --------- 4 files changed, 1 insertion(+), 25 deletions(-) diff --git a/dm/syncer/syncer.go b/dm/syncer/syncer.go index 3101e2632e0..6c14b26b71c 100644 --- a/dm/syncer/syncer.go +++ b/dm/syncer/syncer.go @@ -2511,7 +2511,6 @@ func (s *Syncer) handleRowsEvent(ev *replication.RowsEvent, ec eventContext) (*f return nil, nil } - // TODO(csuzhangxc): check performance of `getTable` from schema tracker. tableInfo, err := s.getTableInfo(ec.tctx, sourceTable, targetTable) if err != nil { return nil, terror.WithScope(err, terror.ScopeDownstream) diff --git a/engine/jobmaster/dm/config/config.go b/engine/jobmaster/dm/config/config.go index 3a32c0d7622..e52f422a46d 100644 --- a/engine/jobmaster/dm/config/config.go +++ b/engine/jobmaster/dm/config/config.go @@ -310,12 +310,7 @@ func (c *TaskCfg) ToDMSubTaskCfg(jobID string) *dmconfig.SubTaskConfig { for j, name := range c.Upstreams[0].FilterRules { cfg.FilterRules[j] = c.Filters[name] } - - cfg.ColumnMappingRules = make([]*column.Rule, len(c.Upstreams[0].ColumnMappingRules)) - for j, name := range c.Upstreams[0].ColumnMappingRules { - cfg.ColumnMappingRules[j] = c.ColumnMappings[name] - } - + cfg.ExprFilter = make([]*dmconfig.ExpressionFilter, len(c.Upstreams[0].ExpressionFilters)) for j, name := range c.Upstreams[0].ExpressionFilters { cfg.ExprFilter[j] = c.ExprFilter[name] diff --git a/engine/jobmaster/dm/config/dm_subtask_3306.toml b/engine/jobmaster/dm/config/dm_subtask_3306.toml index 872a3f19cf3..58d33e32f53 100644 --- a/engine/jobmaster/dm/config/dm_subtask_3306.toml +++ b/engine/jobmaster/dm/config/dm_subtask_3306.toml @@ -89,15 +89,6 @@ ansi-quotes = false sql-pattern = [] action = "Do" -[[mapping-rule]] - schema-pattern = "dmctl" - table-pattern = "t_*" - source-column = "id" - target-column = "id" - expression = "partition id" - arguments = ["1", "", "t_", ""] - create-table-query = "" - [[expression-filter]] schema = "expr_filter" table = "t1" diff --git a/engine/jobmaster/dm/config/dm_subtask_3307.toml b/engine/jobmaster/dm/config/dm_subtask_3307.toml index 9b0425cd826..13c4467a5e4 100644 --- a/engine/jobmaster/dm/config/dm_subtask_3307.toml +++ b/engine/jobmaster/dm/config/dm_subtask_3307.toml @@ -89,15 +89,6 @@ ansi-quotes = false sql-pattern = ["alter table .* add column aaa int"] action = "Ignore" -[[mapping-rule]] - schema-pattern = "dmctl" - table-pattern = "t_*" - source-column = "id" - target-column = "id" - expression = "partition id" - arguments = ["2", "", "t_", ""] - create-table-query = "" - [[expression-filter]] schema = "expr_filter" table = "t1" From 49f05c52948c691fdc271d4ec9204ccef3d0fd17 Mon Sep 17 00:00:00 2001 From: lance6716 Date: Tue, 20 Dec 2022 15:26:59 +0800 Subject: [PATCH 06/26] fix fmt Signed-off-by: lance6716 --- engine/jobmaster/dm/config/config.go | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/engine/jobmaster/dm/config/config.go b/engine/jobmaster/dm/config/config.go index e52f422a46d..daa765b7d5d 100644 --- a/engine/jobmaster/dm/config/config.go +++ b/engine/jobmaster/dm/config/config.go @@ -310,7 +310,7 @@ func (c *TaskCfg) ToDMSubTaskCfg(jobID string) *dmconfig.SubTaskConfig { for j, name := range c.Upstreams[0].FilterRules { cfg.FilterRules[j] = c.Filters[name] } - + cfg.ExprFilter = make([]*dmconfig.ExpressionFilter, len(c.Upstreams[0].ExpressionFilters)) for j, name := range c.Upstreams[0].ExpressionFilters { cfg.ExprFilter[j] = c.ExprFilter[name] From 1ce46a1fad5f2b146086b977276eb550b3efd50d Mon Sep 17 00:00:00 2001 From: lance6716 Date: Tue, 20 Dec 2022 15:54:35 +0800 Subject: [PATCH 07/26] fix fmt Signed-off-by: lance6716 --- dm/loader/checkpoint_test.go | 4 +--- 1 file changed, 1 insertion(+), 3 deletions(-) diff --git a/dm/loader/checkpoint_test.go b/dm/loader/checkpoint_test.go index f9c367986ee..f3dbfca37af 100644 --- a/dm/loader/checkpoint_test.go +++ b/dm/loader/checkpoint_test.go @@ -25,9 +25,7 @@ import ( "github.com/pingcap/tiflow/dm/pkg/log" ) -var ( - _ = Suite(&lightningCpListSuite{}) -) +var _ = Suite(&lightningCpListSuite{}) type lightningCpListSuite struct { mock sqlmock.Sqlmock From cf9a4ee9b5502251ab5ff97e032fc1d74c05a037 Mon Sep 17 00:00:00 2001 From: lance6716 Date: Tue, 20 Dec 2022 16:14:35 +0800 Subject: [PATCH 08/26] fix again Signed-off-by: lance6716 --- dm/config/task_test.go | 2 +- dm/pkg/checker/conn_checker.go | 2 -- dm/pkg/checker/conn_checker_test.go | 16 ++++++++-------- 3 files changed, 9 insertions(+), 11 deletions(-) diff --git a/dm/config/task_test.go b/dm/config/task_test.go index b12e41b3c87..47db07e0a8d 100644 --- a/dm/config/task_test.go +++ b/dm/config/task_test.go @@ -214,7 +214,7 @@ mysql-instances: ` taskConfig = NewTaskConfig() err = taskConfig.Decode(errorTaskConfig) - require.ErrorContains(t, err, "The configurations as following [column-mapping-rule-2 expr-1 filter-rule-2 route-rule-2] are set in global configuration") + require.ErrorContains(t, err, "The configurations as following [expr-1 filter-rule-2 route-rule-2] are set in global configuration") } func TestName(t *testing.T) { diff --git a/dm/pkg/checker/conn_checker.go b/dm/pkg/checker/conn_checker.go index 7a91ef295ca..265bc7d33b8 100644 --- a/dm/pkg/checker/conn_checker.go +++ b/dm/pkg/checker/conn_checker.go @@ -173,8 +173,6 @@ func (l *LoaderConnNumberChecker) Check(ctx context.Context) *Result { mysql.SuperPriv: {needGlobal: true}, }) if !l.unlimitedConn && result.State == StateFailure { - // if the max_connections is set as a specific number - // and we failed because of the number connecions needed is smaller than max_connections // if we're using lightning, this error should be omitted // because lightning doesn't need to keep connections while restoring. result.Errors = append( diff --git a/dm/pkg/checker/conn_checker_test.go b/dm/pkg/checker/conn_checker_test.go index 742fb9e9cf7..5aa50e66f28 100644 --- a/dm/pkg/checker/conn_checker_test.go +++ b/dm/pkg/checker/conn_checker_test.go @@ -24,7 +24,6 @@ import ( ) func TestConnNumberChecker(t *testing.T) { - var err error db, dbMock, err := sqlmock.New() require.NoError(t, err) stCfgs := []*config.SubTaskConfig{ @@ -38,7 +37,7 @@ func TestConnNumberChecker(t *testing.T) { }, } baseDB := conn.NewBaseDBForTest(db, func() {}) - // test loader: fail + // test lightning: warning dbMock.ExpectQuery("SHOW GLOBAL VARIABLES LIKE 'max_connections'").WillReturnRows(sqlmock.NewRows([]string{"Variable_name", "Value"}). AddRow("max_connections", 16)) dbMock.ExpectQuery("SHOW GRANTS").WillReturnRows(sqlmock.NewRows([]string{"Grants for User"}). @@ -49,11 +48,12 @@ func TestConnNumberChecker(t *testing.T) { ) loaderChecker := NewLoaderConnNumberChecker(baseDB, stCfgs) result := loaderChecker.Check(context.Background()) - require.Equal(t, 1, len(result.Errors)) - require.Equal(t, StateFailure, result.State) - require.Regexp(t, "(.|\n)*is less than the number loader(.|\n)*", result.Errors[0].ShortErr) + require.Equal(t, StateWarning, result.State) + require.Equal(t, 2, len(result.Errors)) + require.Contains(t, result.Errors[0].ShortErr, "is less than the number loader") + require.Contains(t, result.Errors[1].ShortErr, "task precheck cannot accurately check the number of connection needed for Lightning") - // test loader: success + // test lightning: success db, dbMock, err = sqlmock.New() require.NoError(t, err) baseDB = conn.NewBaseDBForTest(db, func() {}) @@ -70,7 +70,7 @@ func TestConnNumberChecker(t *testing.T) { require.Equal(t, 0, len(result.Errors)) require.Equal(t, StateSuccess, result.State) - // test loader maxConn - usedConn < neededConn: warn + // test lightning maxConn - usedConn < neededConn: warn db, dbMock, err = sqlmock.New() require.NoError(t, err) baseDB = conn.NewBaseDBForTest(db, func() {}) @@ -89,7 +89,7 @@ func TestConnNumberChecker(t *testing.T) { require.Equal(t, StateWarning, result.State) require.Regexp(t, "(.|\n)*is less than loader needs(.|\n)*", result.Errors[0].ShortErr) - // test loader no enough privilege: warn + // test lightning no enough privilege: warn db, dbMock, err = sqlmock.New() require.NoError(t, err) baseDB = conn.NewBaseDBForTest(db, func() {}) From 3f827089757581d9b3201a1127dc8bcb3fdaf101 Mon Sep 17 00:00:00 2001 From: lance6716 Date: Tue, 20 Dec 2022 16:29:45 +0800 Subject: [PATCH 09/26] fix Signed-off-by: lance6716 --- dm/syncer/syncer_test.go | 15 +-------------- 1 file changed, 1 insertion(+), 14 deletions(-) diff --git a/dm/syncer/syncer_test.go b/dm/syncer/syncer_test.go index 7a5f962f3b3..7fd9afbf269 100644 --- a/dm/syncer/syncer_test.go +++ b/dm/syncer/syncer_test.go @@ -752,18 +752,7 @@ func (s *testSyncerSuite) TestRun(c *C) { {Schema: "test_1", Name: "t_2"}, }, } - - s.cfg.ColumnMappingRules = []*cm.Rule{ - { - PatternSchema: "test_*", - PatternTable: "t_*", - SourceColumn: "id", - TargetColumn: "id", - Expression: cm.PartitionID, - Arguments: []string{"1", "test_", "t_"}, - }, - } - + s.cfg.Batch = 1000 s.cfg.WorkerCount = 2 s.cfg.MaxRetry = 1 @@ -802,8 +791,6 @@ func (s *testSyncerSuite) TestRun(c *C) { syncer.exprFilterGroup = NewExprFilterGroup(tcontext.Background(), utils.NewSessionCtx(nil), nil) c.Assert(syncer.Type(), Equals, pb.UnitType_Sync) - syncer.columnMapping, err = cm.NewMapping(s.cfg.CaseSensitive, s.cfg.ColumnMappingRules) - c.Assert(err, IsNil) c.Assert(syncer.genRouter(), IsNil) syncer.metricsProxies = metrics.DefaultMetricsProxies.CacheForOneTask("task", "worker", "source") From 3235b15f078c788f6d211c6bf3fb70f6eff5eda5 Mon Sep 17 00:00:00 2001 From: lance6716 Date: Tue, 20 Dec 2022 16:37:46 +0800 Subject: [PATCH 10/26] fix make fmt Signed-off-by: lance6716 --- dm/syncer/syncer_test.go | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/dm/syncer/syncer_test.go b/dm/syncer/syncer_test.go index 7fd9afbf269..cc7e03ae3f8 100644 --- a/dm/syncer/syncer_test.go +++ b/dm/syncer/syncer_test.go @@ -752,7 +752,7 @@ func (s *testSyncerSuite) TestRun(c *C) { {Schema: "test_1", Name: "t_2"}, }, } - + s.cfg.Batch = 1000 s.cfg.WorkerCount = 2 s.cfg.MaxRetry = 1 From e340ab706f24586cff09836603a1dced5de52b7a Mon Sep 17 00:00:00 2001 From: lance6716 Date: Tue, 20 Dec 2022 16:58:51 +0800 Subject: [PATCH 11/26] fix again Signed-off-by: lance6716 --- dm/loader/db.go | 247 ----------------------------------------- dm/loader/lightning.go | 11 +- dm/loader/util.go | 11 -- 3 files changed, 2 insertions(+), 267 deletions(-) delete mode 100644 dm/loader/db.go diff --git a/dm/loader/db.go b/dm/loader/db.go deleted file mode 100644 index fea42b3b4c3..00000000000 --- a/dm/loader/db.go +++ /dev/null @@ -1,247 +0,0 @@ -// Copyright 2019 PingCAP, Inc. -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// See the License for the specific language governing permissions and -// limitations under the License. - -package loader - -import ( - "database/sql" - "strconv" - "strings" - "time" - - "github.com/go-sql-driver/mysql" - "github.com/pingcap/failpoint" - tmysql "github.com/pingcap/tidb/parser/mysql" - "github.com/pingcap/tidb/util/dbutil" - "github.com/pingcap/tiflow/dm/config" - "github.com/pingcap/tiflow/dm/pkg/conn" - tcontext "github.com/pingcap/tiflow/dm/pkg/context" - "github.com/pingcap/tiflow/dm/pkg/log" - "github.com/pingcap/tiflow/dm/pkg/retry" - "github.com/pingcap/tiflow/dm/pkg/terror" - "github.com/pingcap/tiflow/dm/pkg/utils" - "go.uber.org/zap" -) - -// DBConn represents a live DB connection -// it's not thread-safe. -type DBConn struct { - name string - sourceID string - baseConn *conn.BaseConn - - // generate new BaseConn and close old one - resetBaseConnFn func(*tcontext.Context, *conn.BaseConn) (*conn.BaseConn, error) -} - -// Scope return connection scope. -func (conn *DBConn) Scope() terror.ErrScope { - if conn == nil || conn.baseConn == nil { - return terror.ScopeNotSet - } - return conn.baseConn.Scope -} - -func (conn *DBConn) querySQL(ctx *tcontext.Context, query string, args ...interface{}) (*sql.Rows, error) { - if conn == nil || conn.baseConn == nil { - return nil, terror.ErrDBUnExpect.Generate("database connection not valid") - } - - params := retry.Params{ - RetryCount: 10, - FirstRetryDuration: time.Second, - BackoffStrategy: retry.Stable, - IsRetryableFn: func(retryTime int, err error) bool { - if retry.IsConnectionError(err) { - err = conn.resetConn(ctx) - if err != nil { - ctx.L().Error("reset connection failed", zap.Int("retry", retryTime), - zap.String("query", utils.TruncateInterface(query, -1)), - zap.String("arguments", utils.TruncateInterface(args, -1)), - log.ShortError(err)) - return false - } - return true - } - if dbutil.IsRetryableError(err) { - ctx.L().Warn("query statement", zap.Int("retry", retryTime), - zap.String("query", utils.TruncateString(query, -1)), - zap.String("argument", utils.TruncateInterface(args, -1)), - log.ShortError(err)) - return true - } - return false - }, - } - - ret, _, err := conn.baseConn.ApplyRetryStrategy( - ctx, - params, - func(ctx *tcontext.Context) (interface{}, error) { - startTime := time.Now() - ret, err := conn.baseConn.QuerySQL(ctx, query, args...) - if err == nil { - if ret.Err() != nil { - return ret, ret.Err() - } - cost := time.Since(startTime) - // duration seconds - ds := cost.Seconds() - queryHistogram.WithLabelValues(conn.name, conn.sourceID).Observe(ds) - if ds > 1 { - ctx.L().Warn("query statement too slow", - zap.Duration("cost time", cost), - zap.String("query", utils.TruncateString(query, -1)), - zap.String("argument", utils.TruncateInterface(args, -1))) - } - } - return ret, err - }) - if err != nil { - ctx.L().ErrorFilterContextCanceled("query statement failed after retry", - zap.String("query", utils.TruncateString(query, -1)), - zap.String("argument", utils.TruncateInterface(args, -1)), - log.ShortError(err)) - return nil, err - } - return ret.(*sql.Rows), nil -} - -func (conn *DBConn) executeSQL(ctx *tcontext.Context, queries []string, args ...[]interface{}) error { - if len(queries) == 0 { - return nil - } - - if conn == nil || conn.baseConn == nil { - return terror.ErrDBUnExpect.Generate("database connection not valid") - } - - params := retry.Params{ - RetryCount: 10, - FirstRetryDuration: 2 * time.Second, - BackoffStrategy: retry.LinearIncrease, - IsRetryableFn: func(retryTime int, err error) bool { - tidbExecutionErrorCounter.WithLabelValues(conn.name, conn.sourceID).Inc() - if retry.IsConnectionError(err) { - err = conn.resetConn(ctx) - if err != nil { - ctx.L().Error("reset connection failed", zap.Int("retry", retryTime), - zap.String("queries", utils.TruncateInterface(queries, -1)), - zap.String("arguments", utils.TruncateInterface(args, -1)), - log.ShortError(err)) - return false - } - return true - } - if dbutil.IsRetryableError(err) { - ctx.L().Warn("execute statements", zap.Int("retry", retryTime), - zap.String("queries", utils.TruncateInterface(queries, -1)), - zap.String("arguments", utils.TruncateInterface(args, -1)), - log.ShortError(err)) - return true - } - return false - }, - } - - _, _, err := conn.baseConn.ApplyRetryStrategy( - ctx, - params, - func(ctx *tcontext.Context) (interface{}, error) { - startTime := time.Now() - _, err := conn.baseConn.ExecuteSQL(ctx, stmtHistogram, conn.name, queries, args...) - failpoint.Inject("LoadExecCreateTableFailed", func(val failpoint.Value) { - errCode, err1 := strconv.ParseUint(val.(string), 10, 16) - if err1 != nil { - ctx.L().Fatal("failpoint LoadExecCreateTableFailed's value is invalid", zap.String("val", val.(string))) - } - - if len(queries) == 1 && strings.Contains(queries[0], "CREATE TABLE") { - err = &mysql.MySQLError{Number: uint16(errCode), Message: ""} - ctx.L().Warn("executeSQL failed", zap.String("failpoint", "LoadExecCreateTableFailed"), zap.Error(err)) - } - }) - if err == nil { - cost := time.Since(startTime) - // duration seconds - ds := cost.Seconds() - if ds > 1 { - ctx.L().Warn("execute transaction too slow", - zap.Duration("cost time", cost), - zap.String("query", utils.TruncateInterface(queries, -1)), - zap.String("argument", utils.TruncateInterface(args, -1))) - } - } - return nil, err - }) - if err != nil { - ctx.L().ErrorFilterContextCanceled("execute statements failed after retry", - zap.String("queries", utils.TruncateInterface(queries, -1)), - zap.String("arguments", utils.TruncateInterface(args, -1)), - log.ShortError(err)) - } - - return err -} - -// resetConn reset one worker connection from specify *BaseDB. -func (conn *DBConn) resetConn(tctx *tcontext.Context) error { - baseConn, err := conn.resetBaseConnFn(tctx, conn.baseConn) - if err != nil { - return err - } - conn.baseConn = baseConn - return nil -} - -func createConns(tctx *tcontext.Context, cfg *config.SubTaskConfig, - name, sourceID string, - workerCount int, -) (*conn.BaseDB, []*DBConn, error) { - baseDB, err := conn.GetDownstreamDB(&cfg.To) - if err != nil { - return nil, nil, terror.WithScope(err, terror.ScopeDownstream) - } - conns := make([]*DBConn, 0, workerCount) - for i := 0; i < workerCount; i++ { - baseConn, err := baseDB.GetBaseConn(tctx.Context()) - if err != nil { - terr := baseDB.Close() - if terr != nil { - tctx.L().Error("failed to close baseDB", zap.Error(terr)) - } - return nil, nil, terror.WithScope(err, terror.ScopeDownstream) - } - resetBaseConnFn := func(tctx *tcontext.Context, baseConn *conn.BaseConn) (*conn.BaseConn, error) { - err := baseDB.ForceCloseConn(baseConn) - if err != nil { - tctx.L().Warn("failed to close baseConn in reset") - } - return baseDB.GetBaseConn(tctx.Context()) - } - conns = append(conns, &DBConn{baseConn: baseConn, name: name, sourceID: sourceID, resetBaseConnFn: resetBaseConnFn}) - } - return baseDB, conns, nil -} - -func isErrDBExists(err error) bool { - return conn.IsMySQLError(err, tmysql.ErrDBCreateExists) -} - -func isErrTableExists(err error) bool { - return conn.IsMySQLError(err, tmysql.ErrTableExists) -} - -func isErrDupEntry(err error) bool { - return conn.IsMySQLError(err, tmysql.ErrDupEntry) -} diff --git a/dm/loader/lightning.go b/dm/loader/lightning.go index 01d969f87f1..b68b393fee4 100644 --- a/dm/loader/lightning.go +++ b/dm/loader/lightning.go @@ -33,7 +33,6 @@ import ( "github.com/pingcap/tiflow/dm/pb" "github.com/pingcap/tiflow/dm/pkg/binlog" "github.com/pingcap/tiflow/dm/pkg/conn" - tcontext "github.com/pingcap/tiflow/dm/pkg/context" "github.com/pingcap/tiflow/dm/pkg/log" "github.com/pingcap/tiflow/dm/pkg/storage" "github.com/pingcap/tiflow/dm/pkg/terror" @@ -68,8 +67,7 @@ type LightningLoader struct { core *lightning.Lightning cancel context.CancelFunc // for per task context, which maybe different from lightning context - toDBConns []*DBConn - toDB *conn.BaseDB + toDB *conn.BaseDB workerName string finish atomic.Bool @@ -132,12 +130,7 @@ func (l *LightningLoader) Type() pb.UnitType { // Init initializes loader for a load task, but not start Process. // if fail, it should not call l.Close. func (l *LightningLoader) Init(ctx context.Context) (err error) { - tctx := tcontext.NewContext(ctx, l.logger) - toCfg, err := l.cfg.Clone() - if err != nil { - return err - } - l.toDB, l.toDBConns, err = createConns(tctx, l.cfg, toCfg.Name, toCfg.SourceID, 1) + l.toDB, err = conn.GetDownstreamDB(&l.cfg.To) if err != nil { return err } diff --git a/dm/loader/util.go b/dm/loader/util.go index 21ba0c30a13..95fac2530a5 100644 --- a/dm/loader/util.go +++ b/dm/loader/util.go @@ -75,17 +75,6 @@ func percent(a int64, b int64, finish bool) string { return fmt.Sprintf("%.2f %%", float64(a)/float64(b)*100) } -// progress calculates progress of a/b. -func progress(a int64, b int64, finish bool) float64 { - if b == 0 { - if finish { - return 1 - } - return 0 - } - return float64(a) / float64(b) -} - func generateSchemaCreateFile(dir string, schema string) error { file, err := os.Create(path.Join(dir, fmt.Sprintf("%s-schema-create.sql", schema))) if err != nil { From 095cab9732e3f5044b0d3fbd2fd4a4886f69e462 Mon Sep 17 00:00:00 2001 From: lance6716 Date: Tue, 20 Dec 2022 17:18:22 +0800 Subject: [PATCH 12/26] fix another UT Signed-off-by: lance6716 --- dm/syncer/syncer_test.go | 17 ++++++++--------- 1 file changed, 8 insertions(+), 9 deletions(-) diff --git a/dm/syncer/syncer_test.go b/dm/syncer/syncer_test.go index cc7e03ae3f8..c8cb0002147 100644 --- a/dm/syncer/syncer_test.go +++ b/dm/syncer/syncer_test.go @@ -727,11 +727,10 @@ func (s *testSyncerSuite) TestcheckpointID(c *C) { // TODO: add `TestSharding` later. func (s *testSyncerSuite) TestRun(c *C) { - // 1. run syncer with column mapping - // 2. execute some sqls which will trigger causality - // 3. check the generated jobs - // 4. update config, add route rules, and update syncer - // 5. execute some sqls and then check jobs generated + // 1. execute some sqls which will trigger causality + // 2. check the generated jobs + // 3. update config, add route rules, and update syncer + // 4. execute some sqls and then check jobs generated db, mock, err := sqlmock.New() c.Assert(err, IsNil) @@ -866,7 +865,7 @@ func (s *testSyncerSuite) TestRun(c *C) { }, { dml, []string{"REPLACE INTO `test_1`.`t_1` (`id`,`name`) VALUES (?,?)"}, - [][]interface{}{{int64(580981944116838401), "a"}}, + [][]interface{}{{int64(1), "a"}}, }, { flush, nil, @@ -878,16 +877,16 @@ func (s *testSyncerSuite) TestRun(c *C) { }, { dml, []string{"REPLACE INTO `test_1`.`t_1` (`id`,`name`) VALUES (?,?)"}, - [][]interface{}{{int64(580981944116838402), "b"}}, + [][]interface{}{{int64(2), "b"}}, }, { dml, []string{"DELETE FROM `test_1`.`t_1` WHERE `id` = ? LIMIT 1"}, - [][]interface{}{{int64(580981944116838401)}}, + [][]interface{}{{int64(1)}}, }, { // safe mode is true, will split update to delete + replace dml, []string{"DELETE FROM `test_1`.`t_1` WHERE `id` = ? LIMIT 1", "REPLACE INTO `test_1`.`t_1` (`id`,`name`) VALUES (?,?)"}, - [][]interface{}{{int64(580981944116838402)}, {int64(580981944116838401), "b"}}, + [][]interface{}{{int64(2)}, {int64(1), "b"}}, }, { flush, nil, From 568e16e2d4f80d711efb6bc214c577ae24458e59 Mon Sep 17 00:00:00 2001 From: lance6716 Date: Tue, 20 Dec 2022 17:47:36 +0800 Subject: [PATCH 13/26] fix go-generate Signed-off-by: lance6716 --- dm/pkg/terror/errcode_string.go | 774 ++++++++++++++++---------------- 1 file changed, 388 insertions(+), 386 deletions(-) diff --git a/dm/pkg/terror/errcode_string.go b/dm/pkg/terror/errcode_string.go index f949c70b7c5..8e6a4d6ad9c 100644 --- a/dm/pkg/terror/errcode_string.go +++ b/dm/pkg/terror/errcode_string.go @@ -206,6 +206,7 @@ func _() { _ = x[codeConfigInvalidSafeModeDuration-20060] _ = x[codeConfigConfictSafeModeDurationAndSafeMode-20061] _ = x[codeConfigInvalidLoadPhysicalDuplicateResolution-20062] + _ = x[codeConfigColumnMappingDeprecated-20063] _ = x[codeBinlogExtractPosition-22001] _ = x[codeBinlogInvalidFilename-22002] _ = x[codeBinlogParsePosFromStr-22003] @@ -593,7 +594,7 @@ func _() { _ = x[codeNotSet-50000] } -const _ErrCode_name = "DBDriverErrorDBBadConnDBInvalidConnDBUnExpectDBQueryFailedDBExecuteFailedParseMydumperMetaGetFileSizeDropMultipleTablesRenameMultipleTablesAlterMultipleTablesParseSQLUnknownTypeDDLRestoreASTNodeParseGTIDNotSupportedFlavorNotMySQLGTIDNotMariaDBGTIDNotUUIDStringMariaDBDomainIDInvalidServerIDGetSQLModeFromStrVerifySQLOperateArgsStatFileSizeReaderAlreadyRunningReaderAlreadyStartedReaderStateCannotCloseReaderShouldStartSyncEmptyRelayDirReadDirBaseFileNotFoundBinFileCmpCondNotSupportBinlogFileNotValidBinlogFilesNotFoundGetRelayLogStatAddWatchForRelayLogDirWatcherStartWatcherChanClosedWatcherChanRecvErrorRelayLogFileSizeSmallerBinlogFileNotSpecifiedNoRelayLogMatchPosFirstRelayLogNotMatchPosParserParseRelayLogNoSubdirToSwitchNeedSyncAgainSyncClosedSchemaTableNameNotValidGenTableRouterEncryptSecretKeyNotValidEncryptGenCipherEncryptGenIVCiphertextLenNotValidCiphertextContextNotValidInvalidBinlogPosStrEncCipherTextBase64DecodeBinlogWriteBinaryDataBinlogWriteDataToBufferBinlogHeaderLengthNotValidBinlogEventDecodeBinlogEmptyNextBinNameBinlogParseSIDBinlogEmptyGTIDBinlogGTIDSetNotValidBinlogGTIDMySQLNotValidBinlogGTIDMariaDBNotValidBinlogMariaDBServerIDMismatchBinlogOnlyOneGTIDSupportBinlogOnlyOneIntervalInUUIDBinlogIntervalValueNotValidBinlogEmptyQueryBinlogTableMapEvNotValidBinlogExpectFormatDescEvBinlogExpectTableMapEvBinlogExpectRowsEvBinlogUnexpectedEvBinlogParseSingleEvBinlogEventTypeNotValidBinlogEventNoRowsBinlogEventNoColumnsBinlogEventRowLengthNotEqBinlogColumnTypeNotSupportBinlogGoMySQLTypeNotSupportBinlogColumnTypeMisMatchBinlogDummyEvSizeTooSmallBinlogFlavorNotSupportBinlogDMLEmptyDataBinlogLatestGTIDNotInPrevBinlogReadFileByGTIDBinlogWriterNotStateNewBinlogWriterStateCannotCloseBinlogWriterNeedStartBinlogWriterOpenFileBinlogWriterGetFileStatBinlogWriterWriteDataLenBinlogWriterFileNotOpenedBinlogWriterFileSyncBinlogPrevGTIDEvNotValidBinlogDecodeMySQLGTIDSetBinlogNeedMariaDBGTIDSetBinlogParseMariaDBGTIDSetBinlogMariaDBAddGTIDSetTracingEventDataNotValidTracingUploadDataTracingEventTypeNotValidTracingGetTraceCodeTracingDataChecksumTracingGetTSOBackoffArgsNotValidInitLoggerFailGTIDTruncateInvalidRelayLogGivenPosTooBigElectionCampaignFailElectionGetLeaderIDFailBinlogInvalidFilenameWithUUIDSuffixDecodeEtcdKeyFailShardDDLOptimismTrySyncFailConnInvalidTLSConfigConnRegistryTLSConfigUpgradeVersionEtcdFailInvalidV1WorkerMetaPathFailUpdateV1DBSchemaBinlogStatusVarsParseVerifyHandleErrorArgsRewriteSQLNoUUIDDirMatchGTIDNoRelayPosMatchGTIDReaderReachEndOfFileMetadataNoBinlogLocPreviousGTIDNotExistNoMasterStatusBinlogNotLogColumnShardDDLOptimismNeedSkipAndRedirectShardDDLOptimismAddNotFullyDroppedColumnSyncerCancelledDDLIncorrectReturnColumnsNumConfigCheckItemNotSupportConfigTomlTransformConfigYamlTransformConfigTaskNameEmptyConfigEmptySourceIDConfigTooLongSourceIDConfigOnlineSchemeNotSupportConfigInvalidTimezoneConfigParseFlagSetConfigDecryptDBPasswordConfigMetaInvalidConfigMySQLInstNotFoundConfigMySQLInstsAtLeastOneConfigMySQLInstSameSourceIDConfigMydumperCfgConflictConfigLoaderCfgConflictConfigSyncerCfgConflictConfigReadCfgFromFileConfigNeedUniqueTaskNameConfigInvalidTaskModeConfigNeedTargetDBConfigMetadataNotSetConfigRouteRuleNotFoundConfigFilterRuleNotFoundConfigColumnMappingNotFoundConfigBAListNotFoundConfigMydumperCfgNotFoundConfigMydumperPathNotValidConfigLoaderCfgNotFoundConfigSyncerCfgNotFoundConfigSourceIDNotFoundConfigDuplicateCfgItemConfigShardModeNotSupportConfigMoreThanOneConfigEtcdParseConfigMissingForBoundConfigBinlogEventFilterConfigGlobalConfigsUnusedConfigExprFilterManyExprConfigExprFilterNotFoundConfigExprFilterWrongGrammarConfigExprFilterEmptyNameConfigCheckerMaxTooSmallConfigGenBAListConfigGenTableRouterConfigGenColumnMappingConfigInvalidChunkFileSizeConfigOnlineDDLInvalidRegexConfigOnlineDDLMistakeRegexConfigOpenAPITaskConfigExistConfigOpenAPITaskConfigNotExistCollationCompatibleNotSupportConfigInvalidLoadModeConfigInvalidLoadDuplicateResolutionConfigValidationModeContinuousValidatorCfgNotFoundConfigStartTimeTooLateConfigLoaderDirInvalidConfigLoaderS3NotSupportConfigInvalidSafeModeDurationConfigConfictSafeModeDurationAndSafeModeConfigInvalidLoadPhysicalDuplicateResolutionBinlogExtractPositionBinlogInvalidFilenameBinlogParsePosFromStrCheckpointInvalidTaskModeCheckpointSaveInvalidPosCheckpointInvalidTableFileCheckpointDBNotExistInFileCheckpointTableNotExistInFileCheckpointRestoreCountGreaterTaskCheckSameTableNameTaskCheckFailedOpenDBTaskCheckGenTableRouterTaskCheckGenColumnMappingTaskCheckSyncConfigErrorTaskCheckGenBAListSourceCheckGTIDRelayParseUUIDIndexRelayParseUUIDSuffixRelayUUIDWithSuffixNotFoundRelayGenFakeRotateEventRelayNoValidRelaySubDirRelayUUIDSuffixNotValidRelayUUIDSuffixLessThanPrevRelayLoadMetaDataRelayBinlogNameNotValidRelayNoCurrentUUIDRelayFlushLocalMetaRelayUpdateIndexFileRelayLogDirpathEmptyRelayReaderNotStateNewRelayReaderStateCannotCloseRelayReaderNeedStartRelayTCPReaderStartSyncRelayTCPReaderNilGTIDRelayTCPReaderStartSyncGTIDRelayTCPReaderGetEventRelayWriterNotStateNewRelayWriterStateCannotCloseRelayWriterNeedStartRelayWriterNotOpenedRelayWriterExpectRotateEvRelayWriterRotateEvWithNoWriterRelayWriterStatusNotValidRelayWriterGetFileStatRelayWriterLatestPosGTFileSizeRelayWriterFileOperateRelayCheckBinlogFileHeaderExistRelayCheckFormatDescEventExistRelayCheckFormatDescEventParseEvRelayCheckIsDuplicateEventRelayUpdateGTIDRelayNeedPrevGTIDEvBeforeGTIDEvRelayNeedMaGTIDListEvBeforeGTIDEvRelayMkdirRelaySwitchMasterNeedGTIDRelayThisStrategyIsPurgingRelayOtherStrategyIsPurgingRelayPurgeIsForbiddenRelayNoActiveRelayLogRelayPurgeRequestNotValidRelayTrimUUIDNotFoundRelayRemoveFileFailRelayPurgeArgsNotValidPreviousGTIDsNotValidRotateEventWithDifferentServerIDDumpUnitRuntimeDumpUnitGenTableRouterDumpUnitGenBAListDumpUnitGlobalLockLoadUnitCreateSchemaFileLoadUnitInvalidFileEndingLoadUnitParseQuoteValuesLoadUnitDoColumnMappingLoadUnitReadSchemaFileLoadUnitParseStatementLoadUnitNotCreateTableLoadUnitDispatchSQLFromFileLoadUnitInvalidInsertSQLLoadUnitGenTableRouterLoadUnitGenColumnMappingLoadUnitNoDBFileLoadUnitNoTableFileLoadUnitDumpDirNotFoundLoadUnitDuplicateTableFileLoadUnitGenBAListLoadTaskWorkerNotMatchLoadCheckPointNotMatchLoadLightningRuntimeLoadLightningHasDupSyncerUnitPanicSyncUnitInvalidTableNameSyncUnitTableNameQuerySyncUnitNotSupportedDMLSyncUnitAddTableInShardingSyncUnitDropSchemaTableInShardingSyncUnitInvalidShardMetaSyncUnitDDLWrongSequenceSyncUnitDDLActiveIndexLargerSyncUnitDupTableGroupSyncUnitShardingGroupNotFoundSyncUnitSafeModeSetCountSyncUnitCausalityConflictSyncUnitDMLStatementFoundSyncerUnitBinlogEventFilterSyncerUnitInvalidReplicaEventSyncerUnitParseStmtSyncerUnitUUIDNotLatestSyncerUnitDDLExecChanCloseOrBusySyncerUnitDDLChanDoneSyncerUnitDDLChanCanceledSyncerUnitDDLOnMultipleTableSyncerUnitInjectDDLOnlySyncerUnitInjectDDLWithoutSchemaSyncerUnitNotSupportedOperateSyncerUnitNilOperatorReqSyncerUnitDMLColumnNotMatchSyncerUnitDMLOldNewValueMismatchSyncerUnitDMLPruneColumnMismatchSyncerUnitGenBinlogEventFilterSyncerUnitGenTableRouterSyncerUnitGenColumnMappingSyncerUnitDoColumnMappingSyncerUnitCacheKeyNotFoundSyncerUnitHeartbeatCheckConfigSyncerUnitHeartbeatRecordExistsSyncerUnitHeartbeatRecordNotFoundSyncerUnitHeartbeatRecordNotValidSyncerUnitOnlineDDLInvalidMetaSyncerUnitOnlineDDLSchemeNotSupportSyncerUnitOnlineDDLOnMultipleTableSyncerUnitGhostApplyEmptyTableSyncerUnitGhostRenameTableNotValidSyncerUnitGhostRenameToGhostTableSyncerUnitGhostRenameGhostTblToOtherSyncerUnitGhostOnlineDDLOnGhostTblSyncerUnitPTApplyEmptyTableSyncerUnitPTRenameTableNotValidSyncerUnitPTRenameToPTTableSyncerUnitPTRenamePTTblToOtherSyncerUnitPTOnlineDDLOnPTTblSyncerUnitRemoteSteamerWithGTIDSyncerUnitRemoteSteamerStartSyncSyncerUnitGetTableFromDBSyncerUnitFirstEndPosNotFoundSyncerUnitResolveCasualityFailSyncerUnitReopenStreamNotSupportSyncerUnitUpdateConfigInShardingSyncerUnitExecWithNoBlockingDDLSyncerUnitGenBAListSyncerUnitHandleDDLFailedSyncerShardDDLConflictSyncerFailpointSyncerEventSyncerOperatorNotExistSyncerEventNotExistSyncerParseDDLSyncerUnsupportedStmtSyncerGetEventSyncerDownstreamTableNotFoundSyncerReprocessWithSafeModeFailMasterSQLOpNilRequestMasterSQLOpNotSupportMasterSQLOpWithoutShardingMasterGRPCCreateConnMasterGRPCSendOnCloseConnMasterGRPCClientCloseMasterGRPCInvalidReqTypeMasterGRPCRequestErrorMasterDeployMapperVerifyMasterConfigParseFlagSetMasterConfigUnknownItemMasterConfigInvalidFlagMasterConfigTomlTransformMasterConfigTimeoutParseMasterConfigUpdateCfgFileMasterShardingDDLDiffMasterStartServiceMasterNoEmitTokenMasterLockNotFoundMasterLockIsResolvingMasterWorkerCliNotFoundMasterWorkerNotWaitLockMasterHandleSQLReqFailMasterOwnerExecDDLMasterPartWorkerExecDDLFailMasterWorkerExistDDLLockMasterGetWorkerCfgExtractorMasterTaskConfigExtractorMasterWorkerArgsExtractorMasterQueryWorkerConfigMasterOperNotFoundMasterOperRespNotSuccessMasterOperRequestTimeoutMasterHandleHTTPApisMasterHostPortNotValidMasterGetHostnameFailMasterGenEmbedEtcdConfigFailMasterStartEmbedEtcdFailMasterParseURLFailMasterJoinEmbedEtcdFailMasterInvalidOperateOpMasterAdvertiseAddrNotValidMasterRequestIsNotForwardToLeaderMasterIsNotAsyncRequestMasterFailToGetExpectResultMasterPessimistNotStartedMasterOptimistNotStartedMasterMasterNameNotExistMasterInvalidOfflineTypeMasterAdvertisePeerURLsNotValidMasterTLSConfigNotValidMasterBoundChangingMasterFailToImportFromV10xMasterInconsistentOptimistDDLsAndInfoMasterOptimisticTableInfobeforeNotExistMasterOptimisticDownstreamMetaNotFoundMasterInvalidClusterIDMasterStartTaskWorkerParseFlagSetWorkerInvalidFlagWorkerDecodeConfigFromFileWorkerUndecodedItemFromFileWorkerNeedSourceIDWorkerTooLongSourceIDWorkerRelayBinlogNameWorkerWriteConfigFileWorkerLogInvalidHandlerWorkerLogPointerInvalidWorkerLogFetchPointerWorkerLogUnmarshalPointerWorkerLogClearPointerWorkerLogTaskKeyNotValidWorkerLogUnmarshalTaskKeyWorkerLogFetchLogIterWorkerLogGetTaskLogWorkerLogUnmarshalBinaryWorkerLogForwardPointerWorkerLogMarshalTaskWorkerLogSaveTaskWorkerLogDeleteKVWorkerLogDeleteKVIterWorkerLogUnmarshalTaskMetaWorkerLogFetchTaskFromMetaWorkerLogVerifyTaskMetaWorkerLogSaveTaskMetaWorkerLogGetTaskMetaWorkerLogDeleteTaskMetaWorkerMetaTomlTransformWorkerMetaOldFileStatWorkerMetaOldReadFileWorkerMetaEncodeTaskWorkerMetaRemoveOldDirWorkerMetaTaskLogNotFoundWorkerMetaHandleTaskOrderWorkerMetaOpenTxnWorkerMetaCommitTxnWorkerRelayStageNotValidWorkerRelayOperNotSupportWorkerOpenKVDBFileWorkerUpgradeCheckKVDirWorkerMarshalVerBinaryWorkerUnmarshalVerBinaryWorkerGetVersionFromKVWorkerSaveVersionToKVWorkerVerAutoDowngradeWorkerStartServiceWorkerAlreadyClosedWorkerNotRunningStageWorkerNotPausedStageWorkerUpdateTaskStageWorkerMigrateStopRelayWorkerSubTaskNotFoundWorkerSubTaskExistsWorkerOperSyncUnitOnlyWorkerRelayUnitStageWorkerNoSyncerRunningWorkerCannotUpdateSourceIDWorkerNoAvailUnitsWorkerDDLLockInfoNotFoundWorkerDDLLockInfoExistsWorkerCacheDDLInfoExistsWorkerExecSkipDDLConflictWorkerExecDDLSyncerOnlyWorkerExecDDLTimeoutWorkerWaitRelayCatchupTimeoutWorkerRelayIsPurgingWorkerHostPortNotValidWorkerNoStartWorkerAlreadyStartedWorkerSourceNotMatchWorkerFailToGetSubtaskConfigFromEtcdWorkerFailToGetSourceConfigFromEtcdWorkerDDLLockOpNotFoundWorkerTLSConfigNotValidWorkerFailConnectMasterWorkerWaitRelayCatchupGTIDWorkerRelayConfigChangingWorkerRouteTableDupMatchWorkerUpdateSubTaskConfigWorkerValidatorNotPausedWorkerServerClosedTracerParseFlagSetTracerConfigTomlTransformTracerConfigInvalidFlagTracerTraceEventNotFoundTracerTraceIDNotProvidedTracerParamNotValidTracerPostMethodOnlyTracerEventAssertionFailTracerEventTypeNotValidTracerStartServiceHAFailTxnOperationHAInvalidItemHAFailWatchEtcdHAFailLeaseOperationHAFailKeepaliveValidatorLoadPersistedDataValidatorPersistDataValidatorGetEventValidatorProcessRowEventValidatorValidateChangeValidatorNotFoundValidatorPanicValidatorTooMuchPendingSchemaTrackerInvalidJSONSchemaTrackerCannotCreateSchemaSchemaTrackerCannotCreateTableSchemaTrackerCannotSerializeSchemaTrackerCannotGetTableSchemaTrackerCannotExecDDLSchemaTrackerCannotFetchDownstreamTableSchemaTrackerCannotParseDownstreamTableSchemaTrackerInvalidCreateTableStmtSchemaTrackerRestoreStmtFailSchemaTrackerCannotDropTableSchemaTrackerInitSchemaTrackerMarshalJSONSchemaTrackerUnMarshalJSONSchemaTrackerUnSchemaNotExistSchemaTrackerCannotSetDownstreamSQLModeSchemaTrackerCannotInitDownstreamParserSchemaTrackerCannotMockDownstreamTableSchemaTrackerCannotFetchDownstreamCreateTableStmtSchemaTrackerIsClosedSchedulerNotStartedSchedulerStartedSchedulerWorkerExistSchedulerWorkerNotExistSchedulerWorkerOnlineSchedulerWorkerInvalidTransSchedulerSourceCfgExistSchedulerSourceCfgNotExistSchedulerSourcesUnboundSchedulerSourceOpTaskExistSchedulerRelayStageInvalidUpdateSchedulerRelayStageSourceNotExistSchedulerMultiTaskSchedulerSubTaskExistSchedulerSubTaskStageInvalidUpdateSchedulerSubTaskOpTaskNotExistSchedulerSubTaskOpSourceNotExistSchedulerTaskNotExistSchedulerRequireRunningTaskInSyncUnitSchedulerRelayWorkersBusySchedulerRelayWorkersBoundSchedulerRelayWorkersWrongRelaySchedulerSourceOpRelayExistSchedulerLatchInUseSchedulerSourceCfgUpdateSchedulerWrongWorkerInputSchedulerCantTransferToRelayWorkerSchedulerStartRelayOnSpecifiedSchedulerStopRelayOnSpecifiedSchedulerStartRelayOnBoundSchedulerStopRelayOnBoundSchedulerPauseTaskForTransferSourceSchedulerWorkerNotFreeSchedulerSubTaskNotExistSchedulerSubTaskCfgUpdateCtlGRPCCreateConnCtlInvalidTLSCfgCtlLoadTLSCfgOpenAPICommonOpenAPITaskSourceNotFoundNotSet" +const _ErrCode_name = "DBDriverErrorDBBadConnDBInvalidConnDBUnExpectDBQueryFailedDBExecuteFailedParseMydumperMetaGetFileSizeDropMultipleTablesRenameMultipleTablesAlterMultipleTablesParseSQLUnknownTypeDDLRestoreASTNodeParseGTIDNotSupportedFlavorNotMySQLGTIDNotMariaDBGTIDNotUUIDStringMariaDBDomainIDInvalidServerIDGetSQLModeFromStrVerifySQLOperateArgsStatFileSizeReaderAlreadyRunningReaderAlreadyStartedReaderStateCannotCloseReaderShouldStartSyncEmptyRelayDirReadDirBaseFileNotFoundBinFileCmpCondNotSupportBinlogFileNotValidBinlogFilesNotFoundGetRelayLogStatAddWatchForRelayLogDirWatcherStartWatcherChanClosedWatcherChanRecvErrorRelayLogFileSizeSmallerBinlogFileNotSpecifiedNoRelayLogMatchPosFirstRelayLogNotMatchPosParserParseRelayLogNoSubdirToSwitchNeedSyncAgainSyncClosedSchemaTableNameNotValidGenTableRouterEncryptSecretKeyNotValidEncryptGenCipherEncryptGenIVCiphertextLenNotValidCiphertextContextNotValidInvalidBinlogPosStrEncCipherTextBase64DecodeBinlogWriteBinaryDataBinlogWriteDataToBufferBinlogHeaderLengthNotValidBinlogEventDecodeBinlogEmptyNextBinNameBinlogParseSIDBinlogEmptyGTIDBinlogGTIDSetNotValidBinlogGTIDMySQLNotValidBinlogGTIDMariaDBNotValidBinlogMariaDBServerIDMismatchBinlogOnlyOneGTIDSupportBinlogOnlyOneIntervalInUUIDBinlogIntervalValueNotValidBinlogEmptyQueryBinlogTableMapEvNotValidBinlogExpectFormatDescEvBinlogExpectTableMapEvBinlogExpectRowsEvBinlogUnexpectedEvBinlogParseSingleEvBinlogEventTypeNotValidBinlogEventNoRowsBinlogEventNoColumnsBinlogEventRowLengthNotEqBinlogColumnTypeNotSupportBinlogGoMySQLTypeNotSupportBinlogColumnTypeMisMatchBinlogDummyEvSizeTooSmallBinlogFlavorNotSupportBinlogDMLEmptyDataBinlogLatestGTIDNotInPrevBinlogReadFileByGTIDBinlogWriterNotStateNewBinlogWriterStateCannotCloseBinlogWriterNeedStartBinlogWriterOpenFileBinlogWriterGetFileStatBinlogWriterWriteDataLenBinlogWriterFileNotOpenedBinlogWriterFileSyncBinlogPrevGTIDEvNotValidBinlogDecodeMySQLGTIDSetBinlogNeedMariaDBGTIDSetBinlogParseMariaDBGTIDSetBinlogMariaDBAddGTIDSetTracingEventDataNotValidTracingUploadDataTracingEventTypeNotValidTracingGetTraceCodeTracingDataChecksumTracingGetTSOBackoffArgsNotValidInitLoggerFailGTIDTruncateInvalidRelayLogGivenPosTooBigElectionCampaignFailElectionGetLeaderIDFailBinlogInvalidFilenameWithUUIDSuffixDecodeEtcdKeyFailShardDDLOptimismTrySyncFailConnInvalidTLSConfigConnRegistryTLSConfigUpgradeVersionEtcdFailInvalidV1WorkerMetaPathFailUpdateV1DBSchemaBinlogStatusVarsParseVerifyHandleErrorArgsRewriteSQLNoUUIDDirMatchGTIDNoRelayPosMatchGTIDReaderReachEndOfFileMetadataNoBinlogLocPreviousGTIDNotExistNoMasterStatusBinlogNotLogColumnShardDDLOptimismNeedSkipAndRedirectShardDDLOptimismAddNotFullyDroppedColumnSyncerCancelledDDLIncorrectReturnColumnsNumConfigCheckItemNotSupportConfigTomlTransformConfigYamlTransformConfigTaskNameEmptyConfigEmptySourceIDConfigTooLongSourceIDConfigOnlineSchemeNotSupportConfigInvalidTimezoneConfigParseFlagSetConfigDecryptDBPasswordConfigMetaInvalidConfigMySQLInstNotFoundConfigMySQLInstsAtLeastOneConfigMySQLInstSameSourceIDConfigMydumperCfgConflictConfigLoaderCfgConflictConfigSyncerCfgConflictConfigReadCfgFromFileConfigNeedUniqueTaskNameConfigInvalidTaskModeConfigNeedTargetDBConfigMetadataNotSetConfigRouteRuleNotFoundConfigFilterRuleNotFoundConfigColumnMappingNotFoundConfigBAListNotFoundConfigMydumperCfgNotFoundConfigMydumperPathNotValidConfigLoaderCfgNotFoundConfigSyncerCfgNotFoundConfigSourceIDNotFoundConfigDuplicateCfgItemConfigShardModeNotSupportConfigMoreThanOneConfigEtcdParseConfigMissingForBoundConfigBinlogEventFilterConfigGlobalConfigsUnusedConfigExprFilterManyExprConfigExprFilterNotFoundConfigExprFilterWrongGrammarConfigExprFilterEmptyNameConfigCheckerMaxTooSmallConfigGenBAListConfigGenTableRouterConfigGenColumnMappingConfigInvalidChunkFileSizeConfigOnlineDDLInvalidRegexConfigOnlineDDLMistakeRegexConfigOpenAPITaskConfigExistConfigOpenAPITaskConfigNotExistCollationCompatibleNotSupportConfigInvalidLoadModeConfigInvalidLoadDuplicateResolutionConfigValidationModeContinuousValidatorCfgNotFoundConfigStartTimeTooLateConfigLoaderDirInvalidConfigLoaderS3NotSupportConfigInvalidSafeModeDurationConfigConfictSafeModeDurationAndSafeModeConfigInvalidLoadPhysicalDuplicateResolutionConfigColumnMappingDeprecatedBinlogExtractPositionBinlogInvalidFilenameBinlogParsePosFromStrCheckpointInvalidTaskModeCheckpointSaveInvalidPosCheckpointInvalidTableFileCheckpointDBNotExistInFileCheckpointTableNotExistInFileCheckpointRestoreCountGreaterTaskCheckSameTableNameTaskCheckFailedOpenDBTaskCheckGenTableRouterTaskCheckGenColumnMappingTaskCheckSyncConfigErrorTaskCheckGenBAListSourceCheckGTIDRelayParseUUIDIndexRelayParseUUIDSuffixRelayUUIDWithSuffixNotFoundRelayGenFakeRotateEventRelayNoValidRelaySubDirRelayUUIDSuffixNotValidRelayUUIDSuffixLessThanPrevRelayLoadMetaDataRelayBinlogNameNotValidRelayNoCurrentUUIDRelayFlushLocalMetaRelayUpdateIndexFileRelayLogDirpathEmptyRelayReaderNotStateNewRelayReaderStateCannotCloseRelayReaderNeedStartRelayTCPReaderStartSyncRelayTCPReaderNilGTIDRelayTCPReaderStartSyncGTIDRelayTCPReaderGetEventRelayWriterNotStateNewRelayWriterStateCannotCloseRelayWriterNeedStartRelayWriterNotOpenedRelayWriterExpectRotateEvRelayWriterRotateEvWithNoWriterRelayWriterStatusNotValidRelayWriterGetFileStatRelayWriterLatestPosGTFileSizeRelayWriterFileOperateRelayCheckBinlogFileHeaderExistRelayCheckFormatDescEventExistRelayCheckFormatDescEventParseEvRelayCheckIsDuplicateEventRelayUpdateGTIDRelayNeedPrevGTIDEvBeforeGTIDEvRelayNeedMaGTIDListEvBeforeGTIDEvRelayMkdirRelaySwitchMasterNeedGTIDRelayThisStrategyIsPurgingRelayOtherStrategyIsPurgingRelayPurgeIsForbiddenRelayNoActiveRelayLogRelayPurgeRequestNotValidRelayTrimUUIDNotFoundRelayRemoveFileFailRelayPurgeArgsNotValidPreviousGTIDsNotValidRotateEventWithDifferentServerIDDumpUnitRuntimeDumpUnitGenTableRouterDumpUnitGenBAListDumpUnitGlobalLockLoadUnitCreateSchemaFileLoadUnitInvalidFileEndingLoadUnitParseQuoteValuesLoadUnitDoColumnMappingLoadUnitReadSchemaFileLoadUnitParseStatementLoadUnitNotCreateTableLoadUnitDispatchSQLFromFileLoadUnitInvalidInsertSQLLoadUnitGenTableRouterLoadUnitGenColumnMappingLoadUnitNoDBFileLoadUnitNoTableFileLoadUnitDumpDirNotFoundLoadUnitDuplicateTableFileLoadUnitGenBAListLoadTaskWorkerNotMatchLoadCheckPointNotMatchLoadLightningRuntimeLoadLightningHasDupSyncerUnitPanicSyncUnitInvalidTableNameSyncUnitTableNameQuerySyncUnitNotSupportedDMLSyncUnitAddTableInShardingSyncUnitDropSchemaTableInShardingSyncUnitInvalidShardMetaSyncUnitDDLWrongSequenceSyncUnitDDLActiveIndexLargerSyncUnitDupTableGroupSyncUnitShardingGroupNotFoundSyncUnitSafeModeSetCountSyncUnitCausalityConflictSyncUnitDMLStatementFoundSyncerUnitBinlogEventFilterSyncerUnitInvalidReplicaEventSyncerUnitParseStmtSyncerUnitUUIDNotLatestSyncerUnitDDLExecChanCloseOrBusySyncerUnitDDLChanDoneSyncerUnitDDLChanCanceledSyncerUnitDDLOnMultipleTableSyncerUnitInjectDDLOnlySyncerUnitInjectDDLWithoutSchemaSyncerUnitNotSupportedOperateSyncerUnitNilOperatorReqSyncerUnitDMLColumnNotMatchSyncerUnitDMLOldNewValueMismatchSyncerUnitDMLPruneColumnMismatchSyncerUnitGenBinlogEventFilterSyncerUnitGenTableRouterSyncerUnitGenColumnMappingSyncerUnitDoColumnMappingSyncerUnitCacheKeyNotFoundSyncerUnitHeartbeatCheckConfigSyncerUnitHeartbeatRecordExistsSyncerUnitHeartbeatRecordNotFoundSyncerUnitHeartbeatRecordNotValidSyncerUnitOnlineDDLInvalidMetaSyncerUnitOnlineDDLSchemeNotSupportSyncerUnitOnlineDDLOnMultipleTableSyncerUnitGhostApplyEmptyTableSyncerUnitGhostRenameTableNotValidSyncerUnitGhostRenameToGhostTableSyncerUnitGhostRenameGhostTblToOtherSyncerUnitGhostOnlineDDLOnGhostTblSyncerUnitPTApplyEmptyTableSyncerUnitPTRenameTableNotValidSyncerUnitPTRenameToPTTableSyncerUnitPTRenamePTTblToOtherSyncerUnitPTOnlineDDLOnPTTblSyncerUnitRemoteSteamerWithGTIDSyncerUnitRemoteSteamerStartSyncSyncerUnitGetTableFromDBSyncerUnitFirstEndPosNotFoundSyncerUnitResolveCasualityFailSyncerUnitReopenStreamNotSupportSyncerUnitUpdateConfigInShardingSyncerUnitExecWithNoBlockingDDLSyncerUnitGenBAListSyncerUnitHandleDDLFailedSyncerShardDDLConflictSyncerFailpointSyncerEventSyncerOperatorNotExistSyncerEventNotExistSyncerParseDDLSyncerUnsupportedStmtSyncerGetEventSyncerDownstreamTableNotFoundSyncerReprocessWithSafeModeFailMasterSQLOpNilRequestMasterSQLOpNotSupportMasterSQLOpWithoutShardingMasterGRPCCreateConnMasterGRPCSendOnCloseConnMasterGRPCClientCloseMasterGRPCInvalidReqTypeMasterGRPCRequestErrorMasterDeployMapperVerifyMasterConfigParseFlagSetMasterConfigUnknownItemMasterConfigInvalidFlagMasterConfigTomlTransformMasterConfigTimeoutParseMasterConfigUpdateCfgFileMasterShardingDDLDiffMasterStartServiceMasterNoEmitTokenMasterLockNotFoundMasterLockIsResolvingMasterWorkerCliNotFoundMasterWorkerNotWaitLockMasterHandleSQLReqFailMasterOwnerExecDDLMasterPartWorkerExecDDLFailMasterWorkerExistDDLLockMasterGetWorkerCfgExtractorMasterTaskConfigExtractorMasterWorkerArgsExtractorMasterQueryWorkerConfigMasterOperNotFoundMasterOperRespNotSuccessMasterOperRequestTimeoutMasterHandleHTTPApisMasterHostPortNotValidMasterGetHostnameFailMasterGenEmbedEtcdConfigFailMasterStartEmbedEtcdFailMasterParseURLFailMasterJoinEmbedEtcdFailMasterInvalidOperateOpMasterAdvertiseAddrNotValidMasterRequestIsNotForwardToLeaderMasterIsNotAsyncRequestMasterFailToGetExpectResultMasterPessimistNotStartedMasterOptimistNotStartedMasterMasterNameNotExistMasterInvalidOfflineTypeMasterAdvertisePeerURLsNotValidMasterTLSConfigNotValidMasterBoundChangingMasterFailToImportFromV10xMasterInconsistentOptimistDDLsAndInfoMasterOptimisticTableInfobeforeNotExistMasterOptimisticDownstreamMetaNotFoundMasterInvalidClusterIDMasterStartTaskWorkerParseFlagSetWorkerInvalidFlagWorkerDecodeConfigFromFileWorkerUndecodedItemFromFileWorkerNeedSourceIDWorkerTooLongSourceIDWorkerRelayBinlogNameWorkerWriteConfigFileWorkerLogInvalidHandlerWorkerLogPointerInvalidWorkerLogFetchPointerWorkerLogUnmarshalPointerWorkerLogClearPointerWorkerLogTaskKeyNotValidWorkerLogUnmarshalTaskKeyWorkerLogFetchLogIterWorkerLogGetTaskLogWorkerLogUnmarshalBinaryWorkerLogForwardPointerWorkerLogMarshalTaskWorkerLogSaveTaskWorkerLogDeleteKVWorkerLogDeleteKVIterWorkerLogUnmarshalTaskMetaWorkerLogFetchTaskFromMetaWorkerLogVerifyTaskMetaWorkerLogSaveTaskMetaWorkerLogGetTaskMetaWorkerLogDeleteTaskMetaWorkerMetaTomlTransformWorkerMetaOldFileStatWorkerMetaOldReadFileWorkerMetaEncodeTaskWorkerMetaRemoveOldDirWorkerMetaTaskLogNotFoundWorkerMetaHandleTaskOrderWorkerMetaOpenTxnWorkerMetaCommitTxnWorkerRelayStageNotValidWorkerRelayOperNotSupportWorkerOpenKVDBFileWorkerUpgradeCheckKVDirWorkerMarshalVerBinaryWorkerUnmarshalVerBinaryWorkerGetVersionFromKVWorkerSaveVersionToKVWorkerVerAutoDowngradeWorkerStartServiceWorkerAlreadyClosedWorkerNotRunningStageWorkerNotPausedStageWorkerUpdateTaskStageWorkerMigrateStopRelayWorkerSubTaskNotFoundWorkerSubTaskExistsWorkerOperSyncUnitOnlyWorkerRelayUnitStageWorkerNoSyncerRunningWorkerCannotUpdateSourceIDWorkerNoAvailUnitsWorkerDDLLockInfoNotFoundWorkerDDLLockInfoExistsWorkerCacheDDLInfoExistsWorkerExecSkipDDLConflictWorkerExecDDLSyncerOnlyWorkerExecDDLTimeoutWorkerWaitRelayCatchupTimeoutWorkerRelayIsPurgingWorkerHostPortNotValidWorkerNoStartWorkerAlreadyStartedWorkerSourceNotMatchWorkerFailToGetSubtaskConfigFromEtcdWorkerFailToGetSourceConfigFromEtcdWorkerDDLLockOpNotFoundWorkerTLSConfigNotValidWorkerFailConnectMasterWorkerWaitRelayCatchupGTIDWorkerRelayConfigChangingWorkerRouteTableDupMatchWorkerUpdateSubTaskConfigWorkerValidatorNotPausedWorkerServerClosedTracerParseFlagSetTracerConfigTomlTransformTracerConfigInvalidFlagTracerTraceEventNotFoundTracerTraceIDNotProvidedTracerParamNotValidTracerPostMethodOnlyTracerEventAssertionFailTracerEventTypeNotValidTracerStartServiceHAFailTxnOperationHAInvalidItemHAFailWatchEtcdHAFailLeaseOperationHAFailKeepaliveValidatorLoadPersistedDataValidatorPersistDataValidatorGetEventValidatorProcessRowEventValidatorValidateChangeValidatorNotFoundValidatorPanicValidatorTooMuchPendingSchemaTrackerInvalidJSONSchemaTrackerCannotCreateSchemaSchemaTrackerCannotCreateTableSchemaTrackerCannotSerializeSchemaTrackerCannotGetTableSchemaTrackerCannotExecDDLSchemaTrackerCannotFetchDownstreamTableSchemaTrackerCannotParseDownstreamTableSchemaTrackerInvalidCreateTableStmtSchemaTrackerRestoreStmtFailSchemaTrackerCannotDropTableSchemaTrackerInitSchemaTrackerMarshalJSONSchemaTrackerUnMarshalJSONSchemaTrackerUnSchemaNotExistSchemaTrackerCannotSetDownstreamSQLModeSchemaTrackerCannotInitDownstreamParserSchemaTrackerCannotMockDownstreamTableSchemaTrackerCannotFetchDownstreamCreateTableStmtSchemaTrackerIsClosedSchedulerNotStartedSchedulerStartedSchedulerWorkerExistSchedulerWorkerNotExistSchedulerWorkerOnlineSchedulerWorkerInvalidTransSchedulerSourceCfgExistSchedulerSourceCfgNotExistSchedulerSourcesUnboundSchedulerSourceOpTaskExistSchedulerRelayStageInvalidUpdateSchedulerRelayStageSourceNotExistSchedulerMultiTaskSchedulerSubTaskExistSchedulerSubTaskStageInvalidUpdateSchedulerSubTaskOpTaskNotExistSchedulerSubTaskOpSourceNotExistSchedulerTaskNotExistSchedulerRequireRunningTaskInSyncUnitSchedulerRelayWorkersBusySchedulerRelayWorkersBoundSchedulerRelayWorkersWrongRelaySchedulerSourceOpRelayExistSchedulerLatchInUseSchedulerSourceCfgUpdateSchedulerWrongWorkerInputSchedulerCantTransferToRelayWorkerSchedulerStartRelayOnSpecifiedSchedulerStopRelayOnSpecifiedSchedulerStartRelayOnBoundSchedulerStopRelayOnBoundSchedulerPauseTaskForTransferSourceSchedulerWorkerNotFreeSchedulerSubTaskNotExistSchedulerSubTaskCfgUpdateCtlGRPCCreateConnCtlInvalidTLSCfgCtlLoadTLSCfgOpenAPICommonOpenAPITaskSourceNotFoundNotSet" var _ErrCode_map = map[ErrCode]string{ 10001: _ErrCode_name[0:13], @@ -794,391 +795,392 @@ var _ErrCode_map = map[ErrCode]string{ 20060: _ErrCode_name[4042:4071], 20061: _ErrCode_name[4071:4111], 20062: _ErrCode_name[4111:4155], - 22001: _ErrCode_name[4155:4176], - 22002: _ErrCode_name[4176:4197], - 22003: _ErrCode_name[4197:4218], - 24001: _ErrCode_name[4218:4243], - 24002: _ErrCode_name[4243:4267], - 24003: _ErrCode_name[4267:4293], - 24004: _ErrCode_name[4293:4319], - 24005: _ErrCode_name[4319:4348], - 24006: _ErrCode_name[4348:4377], - 26001: _ErrCode_name[4377:4399], - 26002: _ErrCode_name[4399:4420], - 26003: _ErrCode_name[4420:4443], - 26004: _ErrCode_name[4443:4468], - 26005: _ErrCode_name[4468:4492], - 26006: _ErrCode_name[4492:4510], - 26007: _ErrCode_name[4510:4525], - 28001: _ErrCode_name[4525:4544], - 28002: _ErrCode_name[4544:4564], - 28003: _ErrCode_name[4564:4591], - 28004: _ErrCode_name[4591:4614], - 28005: _ErrCode_name[4614:4637], - 30001: _ErrCode_name[4637:4660], - 30002: _ErrCode_name[4660:4687], - 30003: _ErrCode_name[4687:4704], - 30004: _ErrCode_name[4704:4727], - 30005: _ErrCode_name[4727:4745], - 30006: _ErrCode_name[4745:4764], - 30007: _ErrCode_name[4764:4784], - 30008: _ErrCode_name[4784:4804], - 30009: _ErrCode_name[4804:4826], - 30010: _ErrCode_name[4826:4853], - 30011: _ErrCode_name[4853:4873], - 30012: _ErrCode_name[4873:4896], - 30013: _ErrCode_name[4896:4917], - 30014: _ErrCode_name[4917:4944], - 30015: _ErrCode_name[4944:4966], - 30016: _ErrCode_name[4966:4988], - 30017: _ErrCode_name[4988:5015], - 30018: _ErrCode_name[5015:5035], - 30019: _ErrCode_name[5035:5055], - 30020: _ErrCode_name[5055:5080], - 30021: _ErrCode_name[5080:5111], - 30022: _ErrCode_name[5111:5136], - 30023: _ErrCode_name[5136:5158], - 30024: _ErrCode_name[5158:5188], - 30025: _ErrCode_name[5188:5210], - 30026: _ErrCode_name[5210:5241], - 30027: _ErrCode_name[5241:5271], - 30028: _ErrCode_name[5271:5303], - 30029: _ErrCode_name[5303:5329], - 30030: _ErrCode_name[5329:5344], - 30031: _ErrCode_name[5344:5375], - 30032: _ErrCode_name[5375:5408], - 30033: _ErrCode_name[5408:5418], - 30034: _ErrCode_name[5418:5443], - 30035: _ErrCode_name[5443:5469], - 30036: _ErrCode_name[5469:5496], - 30037: _ErrCode_name[5496:5517], - 30038: _ErrCode_name[5517:5538], - 30039: _ErrCode_name[5538:5563], - 30040: _ErrCode_name[5563:5584], - 30041: _ErrCode_name[5584:5603], - 30042: _ErrCode_name[5603:5625], - 30043: _ErrCode_name[5625:5646], - 30044: _ErrCode_name[5646:5678], - 32001: _ErrCode_name[5678:5693], - 32002: _ErrCode_name[5693:5715], - 32003: _ErrCode_name[5715:5732], - 32004: _ErrCode_name[5732:5750], - 34001: _ErrCode_name[5750:5774], - 34002: _ErrCode_name[5774:5799], - 34003: _ErrCode_name[5799:5823], - 34004: _ErrCode_name[5823:5846], - 34005: _ErrCode_name[5846:5868], - 34006: _ErrCode_name[5868:5890], - 34007: _ErrCode_name[5890:5912], - 34008: _ErrCode_name[5912:5939], - 34009: _ErrCode_name[5939:5963], - 34010: _ErrCode_name[5963:5985], - 34011: _ErrCode_name[5985:6009], - 34012: _ErrCode_name[6009:6025], - 34013: _ErrCode_name[6025:6044], - 34014: _ErrCode_name[6044:6067], - 34015: _ErrCode_name[6067:6093], - 34016: _ErrCode_name[6093:6110], - 34017: _ErrCode_name[6110:6132], - 34018: _ErrCode_name[6132:6154], - 34019: _ErrCode_name[6154:6174], - 34020: _ErrCode_name[6174:6193], - 36001: _ErrCode_name[6193:6208], - 36002: _ErrCode_name[6208:6232], - 36003: _ErrCode_name[6232:6254], - 36004: _ErrCode_name[6254:6277], - 36005: _ErrCode_name[6277:6303], - 36006: _ErrCode_name[6303:6336], - 36007: _ErrCode_name[6336:6360], - 36008: _ErrCode_name[6360:6384], - 36009: _ErrCode_name[6384:6412], - 36010: _ErrCode_name[6412:6433], - 36011: _ErrCode_name[6433:6462], - 36012: _ErrCode_name[6462:6486], - 36013: _ErrCode_name[6486:6511], - 36014: _ErrCode_name[6511:6536], - 36015: _ErrCode_name[6536:6563], - 36016: _ErrCode_name[6563:6592], - 36017: _ErrCode_name[6592:6611], - 36018: _ErrCode_name[6611:6634], - 36019: _ErrCode_name[6634:6666], - 36020: _ErrCode_name[6666:6687], - 36021: _ErrCode_name[6687:6712], - 36022: _ErrCode_name[6712:6740], - 36023: _ErrCode_name[6740:6763], - 36024: _ErrCode_name[6763:6795], - 36025: _ErrCode_name[6795:6824], - 36026: _ErrCode_name[6824:6848], - 36027: _ErrCode_name[6848:6875], - 36028: _ErrCode_name[6875:6907], - 36029: _ErrCode_name[6907:6939], - 36030: _ErrCode_name[6939:6969], - 36031: _ErrCode_name[6969:6993], - 36032: _ErrCode_name[6993:7019], - 36033: _ErrCode_name[7019:7044], - 36034: _ErrCode_name[7044:7070], - 36035: _ErrCode_name[7070:7100], - 36036: _ErrCode_name[7100:7131], - 36037: _ErrCode_name[7131:7164], - 36038: _ErrCode_name[7164:7197], - 36039: _ErrCode_name[7197:7227], - 36040: _ErrCode_name[7227:7262], - 36041: _ErrCode_name[7262:7296], - 36042: _ErrCode_name[7296:7326], - 36043: _ErrCode_name[7326:7360], - 36044: _ErrCode_name[7360:7393], - 36045: _ErrCode_name[7393:7429], - 36046: _ErrCode_name[7429:7463], - 36047: _ErrCode_name[7463:7490], - 36048: _ErrCode_name[7490:7521], - 36049: _ErrCode_name[7521:7548], - 36050: _ErrCode_name[7548:7578], - 36051: _ErrCode_name[7578:7606], - 36052: _ErrCode_name[7606:7637], - 36053: _ErrCode_name[7637:7669], - 36054: _ErrCode_name[7669:7693], - 36055: _ErrCode_name[7693:7722], - 36056: _ErrCode_name[7722:7752], - 36057: _ErrCode_name[7752:7784], - 36058: _ErrCode_name[7784:7816], - 36059: _ErrCode_name[7816:7847], - 36060: _ErrCode_name[7847:7866], - 36061: _ErrCode_name[7866:7891], - 36062: _ErrCode_name[7891:7913], - 36063: _ErrCode_name[7913:7928], - 36064: _ErrCode_name[7928:7939], - 36065: _ErrCode_name[7939:7961], - 36066: _ErrCode_name[7961:7980], - 36067: _ErrCode_name[7980:7994], - 36068: _ErrCode_name[7994:8015], - 36069: _ErrCode_name[8015:8029], - 36070: _ErrCode_name[8029:8058], - 36071: _ErrCode_name[8058:8089], - 38001: _ErrCode_name[8089:8110], - 38002: _ErrCode_name[8110:8131], - 38003: _ErrCode_name[8131:8157], - 38004: _ErrCode_name[8157:8177], - 38005: _ErrCode_name[8177:8202], - 38006: _ErrCode_name[8202:8223], - 38007: _ErrCode_name[8223:8247], - 38008: _ErrCode_name[8247:8269], - 38009: _ErrCode_name[8269:8293], - 38010: _ErrCode_name[8293:8317], - 38011: _ErrCode_name[8317:8340], - 38012: _ErrCode_name[8340:8363], - 38013: _ErrCode_name[8363:8388], - 38014: _ErrCode_name[8388:8412], - 38015: _ErrCode_name[8412:8437], - 38016: _ErrCode_name[8437:8458], - 38017: _ErrCode_name[8458:8476], - 38018: _ErrCode_name[8476:8493], - 38019: _ErrCode_name[8493:8511], - 38020: _ErrCode_name[8511:8532], - 38021: _ErrCode_name[8532:8555], - 38022: _ErrCode_name[8555:8578], - 38023: _ErrCode_name[8578:8600], - 38024: _ErrCode_name[8600:8618], - 38025: _ErrCode_name[8618:8645], - 38026: _ErrCode_name[8645:8669], - 38027: _ErrCode_name[8669:8696], - 38028: _ErrCode_name[8696:8721], - 38029: _ErrCode_name[8721:8746], - 38030: _ErrCode_name[8746:8769], - 38031: _ErrCode_name[8769:8787], - 38032: _ErrCode_name[8787:8811], - 38033: _ErrCode_name[8811:8835], - 38034: _ErrCode_name[8835:8855], - 38035: _ErrCode_name[8855:8877], - 38036: _ErrCode_name[8877:8898], - 38037: _ErrCode_name[8898:8926], - 38038: _ErrCode_name[8926:8950], - 38039: _ErrCode_name[8950:8968], - 38040: _ErrCode_name[8968:8991], - 38041: _ErrCode_name[8991:9013], - 38042: _ErrCode_name[9013:9040], - 38043: _ErrCode_name[9040:9073], - 38044: _ErrCode_name[9073:9096], - 38045: _ErrCode_name[9096:9123], - 38046: _ErrCode_name[9123:9148], - 38047: _ErrCode_name[9148:9172], - 38048: _ErrCode_name[9172:9196], - 38049: _ErrCode_name[9196:9220], - 38050: _ErrCode_name[9220:9251], - 38051: _ErrCode_name[9251:9274], - 38052: _ErrCode_name[9274:9293], - 38053: _ErrCode_name[9293:9319], - 38054: _ErrCode_name[9319:9356], - 38055: _ErrCode_name[9356:9395], - 38056: _ErrCode_name[9395:9433], - 38057: _ErrCode_name[9433:9455], - 38058: _ErrCode_name[9455:9470], - 40001: _ErrCode_name[9470:9488], - 40002: _ErrCode_name[9488:9505], - 40003: _ErrCode_name[9505:9531], - 40004: _ErrCode_name[9531:9558], - 40005: _ErrCode_name[9558:9576], - 40006: _ErrCode_name[9576:9597], - 40007: _ErrCode_name[9597:9618], - 40008: _ErrCode_name[9618:9639], - 40009: _ErrCode_name[9639:9662], - 40010: _ErrCode_name[9662:9685], - 40011: _ErrCode_name[9685:9706], - 40012: _ErrCode_name[9706:9731], - 40013: _ErrCode_name[9731:9752], - 40014: _ErrCode_name[9752:9776], - 40015: _ErrCode_name[9776:9801], - 40016: _ErrCode_name[9801:9822], - 40017: _ErrCode_name[9822:9841], - 40018: _ErrCode_name[9841:9865], - 40019: _ErrCode_name[9865:9888], - 40020: _ErrCode_name[9888:9908], - 40021: _ErrCode_name[9908:9925], - 40022: _ErrCode_name[9925:9942], - 40023: _ErrCode_name[9942:9963], - 40024: _ErrCode_name[9963:9989], - 40025: _ErrCode_name[9989:10015], - 40026: _ErrCode_name[10015:10038], - 40027: _ErrCode_name[10038:10059], - 40028: _ErrCode_name[10059:10079], - 40029: _ErrCode_name[10079:10102], - 40030: _ErrCode_name[10102:10125], - 40031: _ErrCode_name[10125:10146], - 40032: _ErrCode_name[10146:10167], - 40033: _ErrCode_name[10167:10187], - 40034: _ErrCode_name[10187:10209], - 40035: _ErrCode_name[10209:10234], - 40036: _ErrCode_name[10234:10259], - 40037: _ErrCode_name[10259:10276], - 40038: _ErrCode_name[10276:10295], - 40039: _ErrCode_name[10295:10319], - 40040: _ErrCode_name[10319:10344], - 40041: _ErrCode_name[10344:10362], - 40042: _ErrCode_name[10362:10385], - 40043: _ErrCode_name[10385:10407], - 40044: _ErrCode_name[10407:10431], - 40045: _ErrCode_name[10431:10453], - 40046: _ErrCode_name[10453:10474], - 40047: _ErrCode_name[10474:10496], - 40048: _ErrCode_name[10496:10514], - 40049: _ErrCode_name[10514:10533], - 40050: _ErrCode_name[10533:10554], - 40051: _ErrCode_name[10554:10574], - 40052: _ErrCode_name[10574:10595], - 40053: _ErrCode_name[10595:10617], - 40054: _ErrCode_name[10617:10638], - 40055: _ErrCode_name[10638:10657], - 40056: _ErrCode_name[10657:10679], - 40057: _ErrCode_name[10679:10699], - 40058: _ErrCode_name[10699:10720], - 40059: _ErrCode_name[10720:10746], - 40060: _ErrCode_name[10746:10764], - 40061: _ErrCode_name[10764:10789], - 40062: _ErrCode_name[10789:10812], - 40063: _ErrCode_name[10812:10836], - 40064: _ErrCode_name[10836:10861], - 40065: _ErrCode_name[10861:10884], - 40066: _ErrCode_name[10884:10904], - 40067: _ErrCode_name[10904:10933], - 40068: _ErrCode_name[10933:10953], - 40069: _ErrCode_name[10953:10975], - 40070: _ErrCode_name[10975:10988], - 40071: _ErrCode_name[10988:11008], - 40072: _ErrCode_name[11008:11028], - 40073: _ErrCode_name[11028:11064], - 40074: _ErrCode_name[11064:11099], - 40075: _ErrCode_name[11099:11122], - 40076: _ErrCode_name[11122:11145], - 40077: _ErrCode_name[11145:11168], - 40078: _ErrCode_name[11168:11194], - 40079: _ErrCode_name[11194:11219], - 40080: _ErrCode_name[11219:11243], - 40081: _ErrCode_name[11243:11268], - 40082: _ErrCode_name[11268:11292], - 40083: _ErrCode_name[11292:11310], - 42001: _ErrCode_name[11310:11328], - 42002: _ErrCode_name[11328:11353], - 42003: _ErrCode_name[11353:11376], - 42004: _ErrCode_name[11376:11400], - 42005: _ErrCode_name[11400:11424], - 42006: _ErrCode_name[11424:11443], - 42007: _ErrCode_name[11443:11463], - 42008: _ErrCode_name[11463:11487], - 42009: _ErrCode_name[11487:11510], - 42010: _ErrCode_name[11510:11528], - 42501: _ErrCode_name[11528:11546], - 42502: _ErrCode_name[11546:11559], - 42503: _ErrCode_name[11559:11574], - 42504: _ErrCode_name[11574:11594], - 42505: _ErrCode_name[11594:11609], - 43001: _ErrCode_name[11609:11635], - 43002: _ErrCode_name[11635:11655], - 43003: _ErrCode_name[11655:11672], - 43004: _ErrCode_name[11672:11696], - 43005: _ErrCode_name[11696:11719], - 43006: _ErrCode_name[11719:11736], - 43007: _ErrCode_name[11736:11750], - 43008: _ErrCode_name[11750:11773], - 44001: _ErrCode_name[11773:11797], - 44002: _ErrCode_name[11797:11828], - 44003: _ErrCode_name[11828:11858], - 44004: _ErrCode_name[11858:11886], - 44005: _ErrCode_name[11886:11913], - 44006: _ErrCode_name[11913:11939], - 44007: _ErrCode_name[11939:11978], - 44008: _ErrCode_name[11978:12017], - 44009: _ErrCode_name[12017:12052], - 44010: _ErrCode_name[12052:12080], - 44011: _ErrCode_name[12080:12108], - 44012: _ErrCode_name[12108:12125], - 44013: _ErrCode_name[12125:12149], - 44014: _ErrCode_name[12149:12175], - 44015: _ErrCode_name[12175:12204], - 44016: _ErrCode_name[12204:12243], - 44017: _ErrCode_name[12243:12282], - 44018: _ErrCode_name[12282:12320], - 44019: _ErrCode_name[12320:12369], - 44020: _ErrCode_name[12369:12390], - 46001: _ErrCode_name[12390:12409], - 46002: _ErrCode_name[12409:12425], - 46003: _ErrCode_name[12425:12445], - 46004: _ErrCode_name[12445:12468], - 46005: _ErrCode_name[12468:12489], - 46006: _ErrCode_name[12489:12516], - 46007: _ErrCode_name[12516:12539], - 46008: _ErrCode_name[12539:12565], - 46009: _ErrCode_name[12565:12588], - 46010: _ErrCode_name[12588:12614], - 46011: _ErrCode_name[12614:12646], - 46012: _ErrCode_name[12646:12679], - 46013: _ErrCode_name[12679:12697], - 46014: _ErrCode_name[12697:12718], - 46015: _ErrCode_name[12718:12752], - 46016: _ErrCode_name[12752:12782], - 46017: _ErrCode_name[12782:12814], - 46018: _ErrCode_name[12814:12835], - 46019: _ErrCode_name[12835:12872], - 46020: _ErrCode_name[12872:12897], - 46021: _ErrCode_name[12897:12923], - 46022: _ErrCode_name[12923:12954], - 46023: _ErrCode_name[12954:12981], - 46024: _ErrCode_name[12981:13000], - 46025: _ErrCode_name[13000:13024], - 46026: _ErrCode_name[13024:13049], - 46027: _ErrCode_name[13049:13083], - 46028: _ErrCode_name[13083:13113], - 46029: _ErrCode_name[13113:13142], - 46030: _ErrCode_name[13142:13168], - 46031: _ErrCode_name[13168:13193], - 46032: _ErrCode_name[13193:13228], - 46033: _ErrCode_name[13228:13250], - 46034: _ErrCode_name[13250:13274], - 46035: _ErrCode_name[13274:13299], - 48001: _ErrCode_name[13299:13316], - 48002: _ErrCode_name[13316:13332], - 48003: _ErrCode_name[13332:13345], - 49001: _ErrCode_name[13345:13358], - 49002: _ErrCode_name[13358:13383], - 50000: _ErrCode_name[13383:13389], + 20063: _ErrCode_name[4155:4184], + 22001: _ErrCode_name[4184:4205], + 22002: _ErrCode_name[4205:4226], + 22003: _ErrCode_name[4226:4247], + 24001: _ErrCode_name[4247:4272], + 24002: _ErrCode_name[4272:4296], + 24003: _ErrCode_name[4296:4322], + 24004: _ErrCode_name[4322:4348], + 24005: _ErrCode_name[4348:4377], + 24006: _ErrCode_name[4377:4406], + 26001: _ErrCode_name[4406:4428], + 26002: _ErrCode_name[4428:4449], + 26003: _ErrCode_name[4449:4472], + 26004: _ErrCode_name[4472:4497], + 26005: _ErrCode_name[4497:4521], + 26006: _ErrCode_name[4521:4539], + 26007: _ErrCode_name[4539:4554], + 28001: _ErrCode_name[4554:4573], + 28002: _ErrCode_name[4573:4593], + 28003: _ErrCode_name[4593:4620], + 28004: _ErrCode_name[4620:4643], + 28005: _ErrCode_name[4643:4666], + 30001: _ErrCode_name[4666:4689], + 30002: _ErrCode_name[4689:4716], + 30003: _ErrCode_name[4716:4733], + 30004: _ErrCode_name[4733:4756], + 30005: _ErrCode_name[4756:4774], + 30006: _ErrCode_name[4774:4793], + 30007: _ErrCode_name[4793:4813], + 30008: _ErrCode_name[4813:4833], + 30009: _ErrCode_name[4833:4855], + 30010: _ErrCode_name[4855:4882], + 30011: _ErrCode_name[4882:4902], + 30012: _ErrCode_name[4902:4925], + 30013: _ErrCode_name[4925:4946], + 30014: _ErrCode_name[4946:4973], + 30015: _ErrCode_name[4973:4995], + 30016: _ErrCode_name[4995:5017], + 30017: _ErrCode_name[5017:5044], + 30018: _ErrCode_name[5044:5064], + 30019: _ErrCode_name[5064:5084], + 30020: _ErrCode_name[5084:5109], + 30021: _ErrCode_name[5109:5140], + 30022: _ErrCode_name[5140:5165], + 30023: _ErrCode_name[5165:5187], + 30024: _ErrCode_name[5187:5217], + 30025: _ErrCode_name[5217:5239], + 30026: _ErrCode_name[5239:5270], + 30027: _ErrCode_name[5270:5300], + 30028: _ErrCode_name[5300:5332], + 30029: _ErrCode_name[5332:5358], + 30030: _ErrCode_name[5358:5373], + 30031: _ErrCode_name[5373:5404], + 30032: _ErrCode_name[5404:5437], + 30033: _ErrCode_name[5437:5447], + 30034: _ErrCode_name[5447:5472], + 30035: _ErrCode_name[5472:5498], + 30036: _ErrCode_name[5498:5525], + 30037: _ErrCode_name[5525:5546], + 30038: _ErrCode_name[5546:5567], + 30039: _ErrCode_name[5567:5592], + 30040: _ErrCode_name[5592:5613], + 30041: _ErrCode_name[5613:5632], + 30042: _ErrCode_name[5632:5654], + 30043: _ErrCode_name[5654:5675], + 30044: _ErrCode_name[5675:5707], + 32001: _ErrCode_name[5707:5722], + 32002: _ErrCode_name[5722:5744], + 32003: _ErrCode_name[5744:5761], + 32004: _ErrCode_name[5761:5779], + 34001: _ErrCode_name[5779:5803], + 34002: _ErrCode_name[5803:5828], + 34003: _ErrCode_name[5828:5852], + 34004: _ErrCode_name[5852:5875], + 34005: _ErrCode_name[5875:5897], + 34006: _ErrCode_name[5897:5919], + 34007: _ErrCode_name[5919:5941], + 34008: _ErrCode_name[5941:5968], + 34009: _ErrCode_name[5968:5992], + 34010: _ErrCode_name[5992:6014], + 34011: _ErrCode_name[6014:6038], + 34012: _ErrCode_name[6038:6054], + 34013: _ErrCode_name[6054:6073], + 34014: _ErrCode_name[6073:6096], + 34015: _ErrCode_name[6096:6122], + 34016: _ErrCode_name[6122:6139], + 34017: _ErrCode_name[6139:6161], + 34018: _ErrCode_name[6161:6183], + 34019: _ErrCode_name[6183:6203], + 34020: _ErrCode_name[6203:6222], + 36001: _ErrCode_name[6222:6237], + 36002: _ErrCode_name[6237:6261], + 36003: _ErrCode_name[6261:6283], + 36004: _ErrCode_name[6283:6306], + 36005: _ErrCode_name[6306:6332], + 36006: _ErrCode_name[6332:6365], + 36007: _ErrCode_name[6365:6389], + 36008: _ErrCode_name[6389:6413], + 36009: _ErrCode_name[6413:6441], + 36010: _ErrCode_name[6441:6462], + 36011: _ErrCode_name[6462:6491], + 36012: _ErrCode_name[6491:6515], + 36013: _ErrCode_name[6515:6540], + 36014: _ErrCode_name[6540:6565], + 36015: _ErrCode_name[6565:6592], + 36016: _ErrCode_name[6592:6621], + 36017: _ErrCode_name[6621:6640], + 36018: _ErrCode_name[6640:6663], + 36019: _ErrCode_name[6663:6695], + 36020: _ErrCode_name[6695:6716], + 36021: _ErrCode_name[6716:6741], + 36022: _ErrCode_name[6741:6769], + 36023: _ErrCode_name[6769:6792], + 36024: _ErrCode_name[6792:6824], + 36025: _ErrCode_name[6824:6853], + 36026: _ErrCode_name[6853:6877], + 36027: _ErrCode_name[6877:6904], + 36028: _ErrCode_name[6904:6936], + 36029: _ErrCode_name[6936:6968], + 36030: _ErrCode_name[6968:6998], + 36031: _ErrCode_name[6998:7022], + 36032: _ErrCode_name[7022:7048], + 36033: _ErrCode_name[7048:7073], + 36034: _ErrCode_name[7073:7099], + 36035: _ErrCode_name[7099:7129], + 36036: _ErrCode_name[7129:7160], + 36037: _ErrCode_name[7160:7193], + 36038: _ErrCode_name[7193:7226], + 36039: _ErrCode_name[7226:7256], + 36040: _ErrCode_name[7256:7291], + 36041: _ErrCode_name[7291:7325], + 36042: _ErrCode_name[7325:7355], + 36043: _ErrCode_name[7355:7389], + 36044: _ErrCode_name[7389:7422], + 36045: _ErrCode_name[7422:7458], + 36046: _ErrCode_name[7458:7492], + 36047: _ErrCode_name[7492:7519], + 36048: _ErrCode_name[7519:7550], + 36049: _ErrCode_name[7550:7577], + 36050: _ErrCode_name[7577:7607], + 36051: _ErrCode_name[7607:7635], + 36052: _ErrCode_name[7635:7666], + 36053: _ErrCode_name[7666:7698], + 36054: _ErrCode_name[7698:7722], + 36055: _ErrCode_name[7722:7751], + 36056: _ErrCode_name[7751:7781], + 36057: _ErrCode_name[7781:7813], + 36058: _ErrCode_name[7813:7845], + 36059: _ErrCode_name[7845:7876], + 36060: _ErrCode_name[7876:7895], + 36061: _ErrCode_name[7895:7920], + 36062: _ErrCode_name[7920:7942], + 36063: _ErrCode_name[7942:7957], + 36064: _ErrCode_name[7957:7968], + 36065: _ErrCode_name[7968:7990], + 36066: _ErrCode_name[7990:8009], + 36067: _ErrCode_name[8009:8023], + 36068: _ErrCode_name[8023:8044], + 36069: _ErrCode_name[8044:8058], + 36070: _ErrCode_name[8058:8087], + 36071: _ErrCode_name[8087:8118], + 38001: _ErrCode_name[8118:8139], + 38002: _ErrCode_name[8139:8160], + 38003: _ErrCode_name[8160:8186], + 38004: _ErrCode_name[8186:8206], + 38005: _ErrCode_name[8206:8231], + 38006: _ErrCode_name[8231:8252], + 38007: _ErrCode_name[8252:8276], + 38008: _ErrCode_name[8276:8298], + 38009: _ErrCode_name[8298:8322], + 38010: _ErrCode_name[8322:8346], + 38011: _ErrCode_name[8346:8369], + 38012: _ErrCode_name[8369:8392], + 38013: _ErrCode_name[8392:8417], + 38014: _ErrCode_name[8417:8441], + 38015: _ErrCode_name[8441:8466], + 38016: _ErrCode_name[8466:8487], + 38017: _ErrCode_name[8487:8505], + 38018: _ErrCode_name[8505:8522], + 38019: _ErrCode_name[8522:8540], + 38020: _ErrCode_name[8540:8561], + 38021: _ErrCode_name[8561:8584], + 38022: _ErrCode_name[8584:8607], + 38023: _ErrCode_name[8607:8629], + 38024: _ErrCode_name[8629:8647], + 38025: _ErrCode_name[8647:8674], + 38026: _ErrCode_name[8674:8698], + 38027: _ErrCode_name[8698:8725], + 38028: _ErrCode_name[8725:8750], + 38029: _ErrCode_name[8750:8775], + 38030: _ErrCode_name[8775:8798], + 38031: _ErrCode_name[8798:8816], + 38032: _ErrCode_name[8816:8840], + 38033: _ErrCode_name[8840:8864], + 38034: _ErrCode_name[8864:8884], + 38035: _ErrCode_name[8884:8906], + 38036: _ErrCode_name[8906:8927], + 38037: _ErrCode_name[8927:8955], + 38038: _ErrCode_name[8955:8979], + 38039: _ErrCode_name[8979:8997], + 38040: _ErrCode_name[8997:9020], + 38041: _ErrCode_name[9020:9042], + 38042: _ErrCode_name[9042:9069], + 38043: _ErrCode_name[9069:9102], + 38044: _ErrCode_name[9102:9125], + 38045: _ErrCode_name[9125:9152], + 38046: _ErrCode_name[9152:9177], + 38047: _ErrCode_name[9177:9201], + 38048: _ErrCode_name[9201:9225], + 38049: _ErrCode_name[9225:9249], + 38050: _ErrCode_name[9249:9280], + 38051: _ErrCode_name[9280:9303], + 38052: _ErrCode_name[9303:9322], + 38053: _ErrCode_name[9322:9348], + 38054: _ErrCode_name[9348:9385], + 38055: _ErrCode_name[9385:9424], + 38056: _ErrCode_name[9424:9462], + 38057: _ErrCode_name[9462:9484], + 38058: _ErrCode_name[9484:9499], + 40001: _ErrCode_name[9499:9517], + 40002: _ErrCode_name[9517:9534], + 40003: _ErrCode_name[9534:9560], + 40004: _ErrCode_name[9560:9587], + 40005: _ErrCode_name[9587:9605], + 40006: _ErrCode_name[9605:9626], + 40007: _ErrCode_name[9626:9647], + 40008: _ErrCode_name[9647:9668], + 40009: _ErrCode_name[9668:9691], + 40010: _ErrCode_name[9691:9714], + 40011: _ErrCode_name[9714:9735], + 40012: _ErrCode_name[9735:9760], + 40013: _ErrCode_name[9760:9781], + 40014: _ErrCode_name[9781:9805], + 40015: _ErrCode_name[9805:9830], + 40016: _ErrCode_name[9830:9851], + 40017: _ErrCode_name[9851:9870], + 40018: _ErrCode_name[9870:9894], + 40019: _ErrCode_name[9894:9917], + 40020: _ErrCode_name[9917:9937], + 40021: _ErrCode_name[9937:9954], + 40022: _ErrCode_name[9954:9971], + 40023: _ErrCode_name[9971:9992], + 40024: _ErrCode_name[9992:10018], + 40025: _ErrCode_name[10018:10044], + 40026: _ErrCode_name[10044:10067], + 40027: _ErrCode_name[10067:10088], + 40028: _ErrCode_name[10088:10108], + 40029: _ErrCode_name[10108:10131], + 40030: _ErrCode_name[10131:10154], + 40031: _ErrCode_name[10154:10175], + 40032: _ErrCode_name[10175:10196], + 40033: _ErrCode_name[10196:10216], + 40034: _ErrCode_name[10216:10238], + 40035: _ErrCode_name[10238:10263], + 40036: _ErrCode_name[10263:10288], + 40037: _ErrCode_name[10288:10305], + 40038: _ErrCode_name[10305:10324], + 40039: _ErrCode_name[10324:10348], + 40040: _ErrCode_name[10348:10373], + 40041: _ErrCode_name[10373:10391], + 40042: _ErrCode_name[10391:10414], + 40043: _ErrCode_name[10414:10436], + 40044: _ErrCode_name[10436:10460], + 40045: _ErrCode_name[10460:10482], + 40046: _ErrCode_name[10482:10503], + 40047: _ErrCode_name[10503:10525], + 40048: _ErrCode_name[10525:10543], + 40049: _ErrCode_name[10543:10562], + 40050: _ErrCode_name[10562:10583], + 40051: _ErrCode_name[10583:10603], + 40052: _ErrCode_name[10603:10624], + 40053: _ErrCode_name[10624:10646], + 40054: _ErrCode_name[10646:10667], + 40055: _ErrCode_name[10667:10686], + 40056: _ErrCode_name[10686:10708], + 40057: _ErrCode_name[10708:10728], + 40058: _ErrCode_name[10728:10749], + 40059: _ErrCode_name[10749:10775], + 40060: _ErrCode_name[10775:10793], + 40061: _ErrCode_name[10793:10818], + 40062: _ErrCode_name[10818:10841], + 40063: _ErrCode_name[10841:10865], + 40064: _ErrCode_name[10865:10890], + 40065: _ErrCode_name[10890:10913], + 40066: _ErrCode_name[10913:10933], + 40067: _ErrCode_name[10933:10962], + 40068: _ErrCode_name[10962:10982], + 40069: _ErrCode_name[10982:11004], + 40070: _ErrCode_name[11004:11017], + 40071: _ErrCode_name[11017:11037], + 40072: _ErrCode_name[11037:11057], + 40073: _ErrCode_name[11057:11093], + 40074: _ErrCode_name[11093:11128], + 40075: _ErrCode_name[11128:11151], + 40076: _ErrCode_name[11151:11174], + 40077: _ErrCode_name[11174:11197], + 40078: _ErrCode_name[11197:11223], + 40079: _ErrCode_name[11223:11248], + 40080: _ErrCode_name[11248:11272], + 40081: _ErrCode_name[11272:11297], + 40082: _ErrCode_name[11297:11321], + 40083: _ErrCode_name[11321:11339], + 42001: _ErrCode_name[11339:11357], + 42002: _ErrCode_name[11357:11382], + 42003: _ErrCode_name[11382:11405], + 42004: _ErrCode_name[11405:11429], + 42005: _ErrCode_name[11429:11453], + 42006: _ErrCode_name[11453:11472], + 42007: _ErrCode_name[11472:11492], + 42008: _ErrCode_name[11492:11516], + 42009: _ErrCode_name[11516:11539], + 42010: _ErrCode_name[11539:11557], + 42501: _ErrCode_name[11557:11575], + 42502: _ErrCode_name[11575:11588], + 42503: _ErrCode_name[11588:11603], + 42504: _ErrCode_name[11603:11623], + 42505: _ErrCode_name[11623:11638], + 43001: _ErrCode_name[11638:11664], + 43002: _ErrCode_name[11664:11684], + 43003: _ErrCode_name[11684:11701], + 43004: _ErrCode_name[11701:11725], + 43005: _ErrCode_name[11725:11748], + 43006: _ErrCode_name[11748:11765], + 43007: _ErrCode_name[11765:11779], + 43008: _ErrCode_name[11779:11802], + 44001: _ErrCode_name[11802:11826], + 44002: _ErrCode_name[11826:11857], + 44003: _ErrCode_name[11857:11887], + 44004: _ErrCode_name[11887:11915], + 44005: _ErrCode_name[11915:11942], + 44006: _ErrCode_name[11942:11968], + 44007: _ErrCode_name[11968:12007], + 44008: _ErrCode_name[12007:12046], + 44009: _ErrCode_name[12046:12081], + 44010: _ErrCode_name[12081:12109], + 44011: _ErrCode_name[12109:12137], + 44012: _ErrCode_name[12137:12154], + 44013: _ErrCode_name[12154:12178], + 44014: _ErrCode_name[12178:12204], + 44015: _ErrCode_name[12204:12233], + 44016: _ErrCode_name[12233:12272], + 44017: _ErrCode_name[12272:12311], + 44018: _ErrCode_name[12311:12349], + 44019: _ErrCode_name[12349:12398], + 44020: _ErrCode_name[12398:12419], + 46001: _ErrCode_name[12419:12438], + 46002: _ErrCode_name[12438:12454], + 46003: _ErrCode_name[12454:12474], + 46004: _ErrCode_name[12474:12497], + 46005: _ErrCode_name[12497:12518], + 46006: _ErrCode_name[12518:12545], + 46007: _ErrCode_name[12545:12568], + 46008: _ErrCode_name[12568:12594], + 46009: _ErrCode_name[12594:12617], + 46010: _ErrCode_name[12617:12643], + 46011: _ErrCode_name[12643:12675], + 46012: _ErrCode_name[12675:12708], + 46013: _ErrCode_name[12708:12726], + 46014: _ErrCode_name[12726:12747], + 46015: _ErrCode_name[12747:12781], + 46016: _ErrCode_name[12781:12811], + 46017: _ErrCode_name[12811:12843], + 46018: _ErrCode_name[12843:12864], + 46019: _ErrCode_name[12864:12901], + 46020: _ErrCode_name[12901:12926], + 46021: _ErrCode_name[12926:12952], + 46022: _ErrCode_name[12952:12983], + 46023: _ErrCode_name[12983:13010], + 46024: _ErrCode_name[13010:13029], + 46025: _ErrCode_name[13029:13053], + 46026: _ErrCode_name[13053:13078], + 46027: _ErrCode_name[13078:13112], + 46028: _ErrCode_name[13112:13142], + 46029: _ErrCode_name[13142:13171], + 46030: _ErrCode_name[13171:13197], + 46031: _ErrCode_name[13197:13222], + 46032: _ErrCode_name[13222:13257], + 46033: _ErrCode_name[13257:13279], + 46034: _ErrCode_name[13279:13303], + 46035: _ErrCode_name[13303:13328], + 48001: _ErrCode_name[13328:13345], + 48002: _ErrCode_name[13345:13361], + 48003: _ErrCode_name[13361:13374], + 49001: _ErrCode_name[13374:13387], + 49002: _ErrCode_name[13387:13412], + 50000: _ErrCode_name[13412:13418], } func (i ErrCode) String() string { From 2a8474ab84c30afb0cb8ddebd9ecaa6d44af89cb Mon Sep 17 00:00:00 2001 From: lance6716 Date: Wed, 21 Dec 2022 11:08:46 +0800 Subject: [PATCH 14/26] sync code Signed-off-by: lance6716 --- dm/tests/dmctl_basic/conf/diff_config.toml | 2 +- dm/tests/dmctl_basic/conf/dm-task.yaml | 6 ++++++ dm/tests/dmctl_basic/run.sh | 6 ++++-- 3 files changed, 11 insertions(+), 3 deletions(-) diff --git a/dm/tests/dmctl_basic/conf/diff_config.toml b/dm/tests/dmctl_basic/conf/diff_config.toml index 7f3079908c9..e38b01dfe87 100644 --- a/dm/tests/dmctl_basic/conf/diff_config.toml +++ b/dm/tests/dmctl_basic/conf/diff_config.toml @@ -20,7 +20,7 @@ check-struct-only = false [table-configs] [table-configs.config1] target-tables = ["dmctl.t_target"] -ignore-columns = ["id"] +ignore-columns = ["c_table", "c_source"] [routes.rule1] schema-pattern = "dmctl" diff --git a/dm/tests/dmctl_basic/conf/dm-task.yaml b/dm/tests/dmctl_basic/conf/dm-task.yaml index a5e3793d83e..e9651fcb64d 100644 --- a/dm/tests/dmctl_basic/conf/dm-task.yaml +++ b/dm/tests/dmctl_basic/conf/dm-task.yaml @@ -46,6 +46,12 @@ routes: table-pattern: t_* target-schema: dmctl target-table: t_target + extract-table: + table-regexp: "(.*)" + target-column: "c_table" + extract-source: + source-regexp: "(.*)" + target-column: "c_source" sharding-route-rules-schema: schema-pattern: dmctl diff --git a/dm/tests/dmctl_basic/run.sh b/dm/tests/dmctl_basic/run.sh index b81e2f49e5c..cf91b80d254 100755 --- a/dm/tests/dmctl_basic/run.sh +++ b/dm/tests/dmctl_basic/run.sh @@ -319,6 +319,8 @@ function run() { "stop-task test_incremental_no_source_meta" \ "\"result\": true" 3 run_sql_tidb "DROP DATABASE if exists dmctl;" + run_sql_tidb "CREATE DATABASE dmctl;" + run_sql_tidb "CREATE DATABASE dmctl.t_target(id bigint, b int, c varchar(20), d varchar(10), c_table varchar(255), c_source varchar(255), primary key id(id, c_table, c_source), unique key b(b));" # start task dmctl_start_task check_sync_diff $WORK_DIR $cur/conf/diff_config.toml @@ -412,8 +414,8 @@ function run() { md5_new_worker2=$(md5sum $dm_worker2_conf | awk '{print $1}') md5_old_worker1=$(md5sum $cur/conf/dm-worker1.toml | awk '{print $1}') md5_old_worker2=$(md5sum $cur/conf/dm-worker2.toml | awk '{print $1}') - [ "md5_new_worker1" != "md5_old_worker1" ] - [ "md5_new_worker2" != "md5_old_worker2" ] + [ "$md5_new_worker1" != "$md5_old_worker1" ] + [ "$md5_new_worker2" != "$md5_old_worker2" ] # update_master_config_success $dm_master_conf # cmp $dm_master_conf $cur/conf/dm-master.toml From 2f8fe9125fb73d92b5f6046ce43451546dd96820 Mon Sep 17 00:00:00 2001 From: lance6716 Date: Wed, 21 Dec 2022 12:35:58 +0800 Subject: [PATCH 15/26] sync code Signed-off-by: lance6716 --- dm/tests/dmctl_basic/run.sh | 2 ++ 1 file changed, 2 insertions(+) diff --git a/dm/tests/dmctl_basic/run.sh b/dm/tests/dmctl_basic/run.sh index cf91b80d254..7d515126ef8 100755 --- a/dm/tests/dmctl_basic/run.sh +++ b/dm/tests/dmctl_basic/run.sh @@ -278,6 +278,8 @@ function run() { query_status_with_no_tasks echo "dmctl_check_task" + run_sql_tidb "CREATE DATABASE dmctl;" + run_sql_tidb "CREATE DATABASE dmctl.t_target(id bigint, b int, c varchar(20), d varchar(10), c_table varchar(255), c_source varchar(255), primary key id(id, c_table, c_source), unique key b(b));" check_task_pass $TASK_CONF check_task_wrong_no_source_meta $cur/conf/dm-task7.yaml check_task_wrong_start_time_format $cur/conf/dm-task3.yaml From afd72ec6ac8de24ba6c85aeb00cee3a8421efc54 Mon Sep 17 00:00:00 2001 From: lance6716 Date: Wed, 21 Dec 2022 12:38:05 +0800 Subject: [PATCH 16/26] sync code Signed-off-by: lance6716 --- dm/tests/dmctl_basic/run.sh | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/dm/tests/dmctl_basic/run.sh b/dm/tests/dmctl_basic/run.sh index 7d515126ef8..f7e3db2dd9b 100755 --- a/dm/tests/dmctl_basic/run.sh +++ b/dm/tests/dmctl_basic/run.sh @@ -279,7 +279,7 @@ function run() { echo "dmctl_check_task" run_sql_tidb "CREATE DATABASE dmctl;" - run_sql_tidb "CREATE DATABASE dmctl.t_target(id bigint, b int, c varchar(20), d varchar(10), c_table varchar(255), c_source varchar(255), primary key id(id, c_table, c_source), unique key b(b));" + run_sql_tidb "CREATE TABLE dmctl.t_target(id bigint, b int, c varchar(20), d varchar(10), c_table varchar(255), c_source varchar(255), primary key id(id, c_table, c_source), unique key b(b));" check_task_pass $TASK_CONF check_task_wrong_no_source_meta $cur/conf/dm-task7.yaml check_task_wrong_start_time_format $cur/conf/dm-task3.yaml @@ -322,7 +322,7 @@ function run() { "\"result\": true" 3 run_sql_tidb "DROP DATABASE if exists dmctl;" run_sql_tidb "CREATE DATABASE dmctl;" - run_sql_tidb "CREATE DATABASE dmctl.t_target(id bigint, b int, c varchar(20), d varchar(10), c_table varchar(255), c_source varchar(255), primary key id(id, c_table, c_source), unique key b(b));" + run_sql_tidb "CREATE TABLE dmctl.t_target(id bigint, b int, c varchar(20), d varchar(10), c_table varchar(255), c_source varchar(255), primary key id(id, c_table, c_source), unique key b(b));" # start task dmctl_start_task check_sync_diff $WORK_DIR $cur/conf/diff_config.toml From de8c3449d3830ded9336232614e7e53370852007 Mon Sep 17 00:00:00 2001 From: lance6716 Date: Wed, 21 Dec 2022 13:11:02 +0800 Subject: [PATCH 17/26] sync code --- dm/tests/dmctl_basic/check_list/check_task.sh | 1 - dm/tests/dmctl_basic/conf/diff_config.toml | 1 - dm/tests/dmctl_basic/conf/dm-task.yaml | 6 --- dm/tests/dmctl_basic/data/db1.increment.sql | 36 +++++++------- dm/tests/dmctl_basic/data/db1.increment2.sql | 28 +++++------ dm/tests/dmctl_basic/data/db1.prepare.sql | 38 +++++++-------- dm/tests/dmctl_basic/data/db2.increment.sql | 48 +++++++++---------- dm/tests/dmctl_basic/data/db2.increment2.sql | 32 ++++++------- dm/tests/dmctl_basic/data/db2.prepare.sql | 36 +++++++------- dm/tests/dmctl_basic/run.sh | 4 -- 10 files changed, 109 insertions(+), 121 deletions(-) diff --git a/dm/tests/dmctl_basic/check_list/check_task.sh b/dm/tests/dmctl_basic/check_list/check_task.sh index 87d9a186997..c33e2b2ce96 100644 --- a/dm/tests/dmctl_basic/check_list/check_task.sh +++ b/dm/tests/dmctl_basic/check_list/check_task.sh @@ -17,7 +17,6 @@ function check_task_pass() { run_dm_ctl $WORK_DIR "127.0.0.1:$MASTER_PORT" \ "check-task $task_conf" \ "\"result\": true" 1 - # "\"passed\": true" 1 delete this because sometimes precheck will only return 'pre-check is passed. ' } function check_task_not_pass() { diff --git a/dm/tests/dmctl_basic/conf/diff_config.toml b/dm/tests/dmctl_basic/conf/diff_config.toml index e38b01dfe87..10d148b8c5b 100644 --- a/dm/tests/dmctl_basic/conf/diff_config.toml +++ b/dm/tests/dmctl_basic/conf/diff_config.toml @@ -20,7 +20,6 @@ check-struct-only = false [table-configs] [table-configs.config1] target-tables = ["dmctl.t_target"] -ignore-columns = ["c_table", "c_source"] [routes.rule1] schema-pattern = "dmctl" diff --git a/dm/tests/dmctl_basic/conf/dm-task.yaml b/dm/tests/dmctl_basic/conf/dm-task.yaml index e9651fcb64d..a5e3793d83e 100644 --- a/dm/tests/dmctl_basic/conf/dm-task.yaml +++ b/dm/tests/dmctl_basic/conf/dm-task.yaml @@ -46,12 +46,6 @@ routes: table-pattern: t_* target-schema: dmctl target-table: t_target - extract-table: - table-regexp: "(.*)" - target-column: "c_table" - extract-source: - source-regexp: "(.*)" - target-column: "c_source" sharding-route-rules-schema: schema-pattern: dmctl diff --git a/dm/tests/dmctl_basic/data/db1.increment.sql b/dm/tests/dmctl_basic/data/db1.increment.sql index 1e2e6c30fe0..652b3d29329 100644 --- a/dm/tests/dmctl_basic/data/db1.increment.sql +++ b/dm/tests/dmctl_basic/data/db1.increment.sql @@ -1,18 +1,18 @@ -UPDATE `dmctl`.`t_2` SET `c` = 'lnVaSBHOTnAatvUmHZIZ' WHERE `id` = 6; -INSERT INTO `dmctl`.`t_1` (`b`,`c`,`d`,`id`) VALUES (1207302373,'slOcgVAMoRUmxBpRdUHe','h',10); -UPDATE `dmctl`.`t_1` SET `c` = 'uvSLJRmOHYLDDMM' WHERE `id` = 9; -DELETE FROM `dmctl`.`t_1` WHERE `id` = 2; -INSERT INTO `dmctl`.`t_1` (`b`,`c`,`d`,`id`) VALUES (617131993,'Klf','dZLD',11); -UPDATE `dmctl`.`t_2` SET `d` = 'k' WHERE `id` = 3; -UPDATE `dmctl`.`t_1` SET `d` = 'Nlh' WHERE `id` = 4; -INSERT INTO `dmctl`.`t_1` (`b`,`c`,`d`,`id`) VALUES (241968486,'LT','JRvYDYFX',12); -UPDATE `dmctl`.`t_1` SET `c` = 'BZbKSuZKitYG' WHERE `id` = 7; -DELETE FROM `dmctl`.`t_2` WHERE `id` = 3; -INSERT INTO `dmctl`.`t_2` (`b`,`c`,`d`,`id`) VALUES (207449096,'upYYtysTjztxkYGprAHD','drBN',11); -UPDATE `dmctl`.`t_2` SET `c` = 'AVKxHwiUZHXXKqODQoE' WHERE `id` = 8; -UPDATE `dmctl`.`t_2` SET `d` = 'eLmCZwCA' WHERE `id` = 9; -INSERT INTO `dmctl`.`t_2` (`b`,`c`,`d`,`id`) VALUES (683005299,'lsATr','mNOpyfu',12); -UPDATE `dmctl`.`t_2` SET `d` = 'uVNNR' WHERE `id` = 6; -DELETE FROM `dmctl`.`t_2` WHERE `id` = 5; -INSERT INTO `dmctl`.`t_1` (`b`,`c`,`d`,`id`) VALUES (373155634,'uoxJTwvCSEuaRbWGq','xzCteQqZwU',13); -UPDATE `dmctl`.`t_1` SET `d` = 'GzYx' WHERE `id` = 6; +UPDATE `dmctl`.`t_2` SET `c` = 'lnVaSBHOTnAatvUmHZIZ' WHERE `id` = 1206; +INSERT INTO `dmctl`.`t_1` (`b`,`c`,`d`,`id`) VALUES (1207302373,'slOcgVAMoRUmxBpRdUHe','h',1110); +UPDATE `dmctl`.`t_1` SET `c` = 'uvSLJRmOHYLDDMM' WHERE `id` = 1109; +DELETE FROM `dmctl`.`t_1` WHERE `id` = 1102; +INSERT INTO `dmctl`.`t_1` (`b`,`c`,`d`,`id`) VALUES (617131993,'Klf','dZLD',1111); +UPDATE `dmctl`.`t_2` SET `d` = 'k' WHERE `id` = 1203; +UPDATE `dmctl`.`t_1` SET `d` = 'Nlh' WHERE `id` = 1104; +INSERT INTO `dmctl`.`t_1` (`b`,`c`,`d`,`id`) VALUES (241968486,'LT','JRvYDYFX',1112); +UPDATE `dmctl`.`t_1` SET `c` = 'BZbKSuZKitYG' WHERE `id` = 1107; +DELETE FROM `dmctl`.`t_2` WHERE `id` = 1203; +INSERT INTO `dmctl`.`t_2` (`b`,`c`,`d`,`id`) VALUES (207449096,'upYYtysTjztxkYGprAHD','drBN',1211); +UPDATE `dmctl`.`t_2` SET `c` = 'AVKxHwiUZHXXKqODQoE' WHERE `id` = 1208; +UPDATE `dmctl`.`t_2` SET `d` = 'eLmCZwCA' WHERE `id` = 1209; +INSERT INTO `dmctl`.`t_2` (`b`,`c`,`d`,`id`) VALUES (683005299,'lsATr','mNOpyfu',1212); +UPDATE `dmctl`.`t_2` SET `d` = 'uVNNR' WHERE `id` = 1206; +DELETE FROM `dmctl`.`t_2` WHERE `id` = 1205; +INSERT INTO `dmctl`.`t_1` (`b`,`c`,`d`,`id`) VALUES (373155634,'uoxJTwvCSEuaRbWGq','xzCteQqZwU',113); +UPDATE `dmctl`.`t_1` SET `d` = 'GzYx' WHERE `id` = 1106; diff --git a/dm/tests/dmctl_basic/data/db1.increment2.sql b/dm/tests/dmctl_basic/data/db1.increment2.sql index a2fb356b83f..097e393a078 100644 --- a/dm/tests/dmctl_basic/data/db1.increment2.sql +++ b/dm/tests/dmctl_basic/data/db1.increment2.sql @@ -1,16 +1,16 @@ alter table `dmctl`.`t_1` drop column d; alter table `dmctl`.`t_2` drop column d; -UPDATE `dmctl`.`t_1` SET `c` = 'QFEcT' WHERE `id` = 10; -DELETE FROM `dmctl`.`t_1` WHERE `id` = 11; -UPDATE `dmctl`.`t_1` SET `c` = 'ZRmjDMtWnZPBZvhSro' WHERE `id` = 6; -INSERT INTO `dmctl`.`t_2` (`b`,`c`,`id`) VALUES (313784820,'ZkbuUB',13); -UPDATE `dmctl`.`t_2` SET `c` = 'svFHgFGGyNxhYgVbBQb' WHERE `id` = 11; -DELETE FROM `dmctl`.`t_1` WHERE `id` = 7; -UPDATE `dmctl`.`t_1` SET `c` = 'qgS' WHERE `id` = 6; -UPDATE `dmctl`.`t_2` SET `c` = 'tGkT' WHERE `id` = 6; -DELETE FROM `dmctl`.`t_1` WHERE `id` = 3; -UPDATE `dmctl`.`t_2` SET `c` = 'VbHXy' WHERE `id` = 11; -INSERT INTO `dmctl`.`t_1` (`b`,`c`,`id`) VALUES (1650592031,'zPoGCCQaFWKK',14); -UPDATE `dmctl`.`t_1` SET `c` = 'FXKXbvEYVFCuohnU' WHERE `id` = 6; -DELETE FROM `dmctl`.`t_2` WHERE `id` = 2; -UPDATE `dmctl`.`t_1` SET `c` = 'hZGaWSRlrcmd' WHERE `id` = 5; +UPDATE `dmctl`.`t_1` SET `c` = 'QFEcT' WHERE `id` = 1110; +DELETE FROM `dmctl`.`t_1` WHERE `id` = 1111; +UPDATE `dmctl`.`t_1` SET `c` = 'ZRmjDMtWnZPBZvhSro' WHERE `id` = 1106; +INSERT INTO `dmctl`.`t_2` (`b`,`c`,`id`) VALUES (313784820,'ZkbuUB',1213); +UPDATE `dmctl`.`t_2` SET `c` = 'svFHgFGGyNxhYgVbBQb' WHERE `id` = 1211; +DELETE FROM `dmctl`.`t_1` WHERE `id` = 1107; +UPDATE `dmctl`.`t_1` SET `c` = 'qgS' WHERE `id` = 1106; +UPDATE `dmctl`.`t_2` SET `c` = 'tGkT' WHERE `id` = 1206; +DELETE FROM `dmctl`.`t_1` WHERE `id` = 1103; +UPDATE `dmctl`.`t_2` SET `c` = 'VbHXy' WHERE `id` = 1211; +INSERT INTO `dmctl`.`t_1` (`b`,`c`,`id`) VALUES (1650592031,'zPoGCCQaFWKK',1114); +UPDATE `dmctl`.`t_1` SET `c` = 'FXKXbvEYVFCuohnU' WHERE `id` = 1106; +DELETE FROM `dmctl`.`t_2` WHERE `id` = 1202; +UPDATE `dmctl`.`t_1` SET `c` = 'hZGaWSRlrcmd' WHERE `id` = 1105; diff --git a/dm/tests/dmctl_basic/data/db1.prepare.sql b/dm/tests/dmctl_basic/data/db1.prepare.sql index c7b4094ebdb..485c881b15d 100644 --- a/dm/tests/dmctl_basic/data/db1.prepare.sql +++ b/dm/tests/dmctl_basic/data/db1.prepare.sql @@ -3,25 +3,25 @@ create database `dmctl`; use `dmctl`; create table t_1(id bigint auto_increment, b int, c varchar(20), d varchar(10), primary key id(id), unique key b(b)); create table t_2(id bigint auto_increment, b int, c varchar(20), d varchar(10), primary key id(id), unique key b(b)); -INSERT INTO `dmctl`.`t_2` (`b`,`c`,`d`,`id`) VALUES (1795844527,'mpNYtz','JugWqaHw',1); -INSERT INTO `dmctl`.`t_2` (`b`,`c`,`d`,`id`) VALUES (816772144,'jjoPwqhBWpJyUUvgGWkp','FgPbiUqrvS',2); -INSERT INTO `dmctl`.`t_2` (`b`,`c`,`d`,`id`) VALUES (1058572812,'dCmAIAuZrNUJxBl','wiaFgp',3); -INSERT INTO `dmctl`.`t_1` (`b`,`c`,`d`,`id`) VALUES (1825468799,'DWzgtMAwUcoqZvupwm','GsusfUlbB',1); -INSERT INTO `dmctl`.`t_1` (`b`,`c`,`d`,`id`) VALUES (265700472,'rEsjuTsIS','JPTd',2); -INSERT INTO `dmctl`.`t_2` (`b`,`c`,`d`,`id`) VALUES (763390433,'TE','jbO',4); -INSERT INTO `dmctl`.`t_1` (`b`,`c`,`d`,`id`) VALUES (1112494892,'XDbXXvYTtJFLaF','zByU',3); -INSERT INTO `dmctl`.`t_2` (`b`,`c`,`d`,`id`) VALUES (61186151,'gXhXNtk','Hi',5); -INSERT INTO `dmctl`.`t_2` (`b`,`c`,`d`,`id`) VALUES (1190671373,'WGP','jUXxu',6); -INSERT INTO `dmctl`.`t_2` (`b`,`c`,`d`,`id`) VALUES (1192770284,'SyMVcUeK','MIZNFu',7); -INSERT INTO `dmctl`.`t_1` (`b`,`c`,`d`,`id`) VALUES (1647531504,'yNvqWnrbtTxc','ogSwAofM',4); -INSERT INTO `dmctl`.`t_1` (`b`,`c`,`d`,`id`) VALUES (1041099481,'zrO','C',5); -INSERT INTO `dmctl`.`t_2` (`b`,`c`,`d`,`id`) VALUES (1635431660,'pum','MMtT',8); -INSERT INTO `dmctl`.`t_1` (`b`,`c`,`d`,`id`) VALUES (208389298,'ZvhKh','Zt',6); -INSERT INTO `dmctl`.`t_2` (`b`,`c`,`d`,`id`) VALUES (2128788808,'hgWB','poUlMgBSX',9); -INSERT INTO `dmctl`.`t_2` (`b`,`c`,`d`,`id`) VALUES (1758036092,'CxSfGQNebY','OY',10); -INSERT INTO `dmctl`.`t_1` (`b`,`c`,`d`,`id`) VALUES (1649664004,'eIXDUjODpLjRkXu','NWlGjQq',7); -INSERT INTO `dmctl`.`t_1` (`b`,`c`,`d`,`id`) VALUES (1402446429,'xQMCGsfckXpoe','R',8); -INSERT INTO `dmctl`.`t_1` (`b`,`c`,`d`,`id`) VALUES (800180420,'JuUIxUacksp','sX',9); +INSERT INTO `dmctl`.`t_2` (`b`,`c`,`d`,`id`) VALUES (1795844527,'mpNYtz','JugWqaHw',1201); +INSERT INTO `dmctl`.`t_2` (`b`,`c`,`d`,`id`) VALUES (816772144,'jjoPwqhBWpJyUUvgGWkp','FgPbiUqrvS',1202); +INSERT INTO `dmctl`.`t_2` (`b`,`c`,`d`,`id`) VALUES (1058572812,'dCmAIAuZrNUJxBl','wiaFgp',1203); +INSERT INTO `dmctl`.`t_1` (`b`,`c`,`d`,`id`) VALUES (1825468799,'DWzgtMAwUcoqZvupwm','GsusfUlbB',1101); +INSERT INTO `dmctl`.`t_1` (`b`,`c`,`d`,`id`) VALUES (265700472,'rEsjuTsIS','JPTd',1102); +INSERT INTO `dmctl`.`t_2` (`b`,`c`,`d`,`id`) VALUES (763390433,'TE','jbO',1204); +INSERT INTO `dmctl`.`t_1` (`b`,`c`,`d`,`id`) VALUES (1112494892,'XDbXXvYTtJFLaF','zByU',1103); +INSERT INTO `dmctl`.`t_2` (`b`,`c`,`d`,`id`) VALUES (61186151,'gXhXNtk','Hi',1205); +INSERT INTO `dmctl`.`t_2` (`b`,`c`,`d`,`id`) VALUES (1190671373,'WGP','jUXxu',1206); +INSERT INTO `dmctl`.`t_2` (`b`,`c`,`d`,`id`) VALUES (1192770284,'SyMVcUeK','MIZNFu',1207); +INSERT INTO `dmctl`.`t_1` (`b`,`c`,`d`,`id`) VALUES (1647531504,'yNvqWnrbtTxc','ogSwAofM',1104); +INSERT INTO `dmctl`.`t_1` (`b`,`c`,`d`,`id`) VALUES (1041099481,'zrO','C',1105); +INSERT INTO `dmctl`.`t_2` (`b`,`c`,`d`,`id`) VALUES (1635431660,'pum','MMtT',1208); +INSERT INTO `dmctl`.`t_1` (`b`,`c`,`d`,`id`) VALUES (208389298,'ZvhKh','Zt',1106); +INSERT INTO `dmctl`.`t_2` (`b`,`c`,`d`,`id`) VALUES (2128788808,'hgWB','poUlMgBSX',1209); +INSERT INTO `dmctl`.`t_2` (`b`,`c`,`d`,`id`) VALUES (1758036092,'CxSfGQNebY','OY',1210); +INSERT INTO `dmctl`.`t_1` (`b`,`c`,`d`,`id`) VALUES (1649664004,'eIXDUjODpLjRkXu','NWlGjQq',1107); +INSERT INTO `dmctl`.`t_1` (`b`,`c`,`d`,`id`) VALUES (1402446429,'xQMCGsfckXpoe','R',1108); +INSERT INTO `dmctl`.`t_1` (`b`,`c`,`d`,`id`) VALUES (800180420,'JuUIxUacksp','sX',1109); create table tb_1(a INT, b INT); create table tb_2(a INT, c INT); diff --git a/dm/tests/dmctl_basic/data/db2.increment.sql b/dm/tests/dmctl_basic/data/db2.increment.sql index 250cf448a45..00e76aca1db 100644 --- a/dm/tests/dmctl_basic/data/db2.increment.sql +++ b/dm/tests/dmctl_basic/data/db2.increment.sql @@ -1,24 +1,24 @@ -UPDATE `dmctl`.`t_2` SET `d` = 'S' WHERE `id` = 8; -DELETE FROM `dmctl`.`t_1` WHERE `id` = 7; -UPDATE `dmctl`.`t_2` SET `c` = 'pvHeDtXdNQBzc' WHERE `id` = 9; -INSERT INTO `dmctl`.`t_1` (`b`,`c`,`d`,`id`) VALUES (1533333567,'BEY','nQE',10); -UPDATE `dmctl`.`t_1` SET `c` = 'UcRjvtdgeAMmmTdd' WHERE `id` = 2; -DELETE FROM `dmctl`.`t_1` WHERE `id` = 7; -UPDATE `dmctl`.`t_1` SET `c` = 'oZTluknwkIgUYEkPdB' WHERE `id` = 7; -UPDATE `dmctl`.`t_1` SET `c` = 'ZcjYaJnh' WHERE `id` = 5; -DELETE FROM `dmctl`.`t_2` WHERE `id` = 6; -UPDATE `dmctl`.`t_1` SET `d` = 'QVAzYmqx' WHERE `id` = 4; -INSERT INTO `dmctl`.`t_1` (`b`,`c`,`d`,`id`) VALUES (815122675,'AkDfRfFpyuMPyU','nOZwdjK',11); -UPDATE `dmctl`.`t_1` SET `c` = 'SNceJrNMU' WHERE `id` = 6; -DELETE FROM `dmctl`.`t_2` WHERE `id` = 9; -UPDATE `dmctl`.`t_1` SET `d` = 'VACozkE' WHERE `id` = 11; -UPDATE `dmctl`.`t_2` SET `d` = 'Xczvw' WHERE `id` = 4; -DELETE FROM `dmctl`.`t_1` WHERE `id` = 2; -UPDATE `dmctl`.`t_2` SET `c` = 'WgFBYecVDGxyytTQNF' WHERE `id` = 7; -INSERT INTO `dmctl`.`t_1` (`b`,`c`,`d`,`id`) VALUES (1929704764,'hKXJjNu','PzUETJSZdU',12); -UPDATE `dmctl`.`t_1` SET `d` = 'ZMdgI' WHERE `id` = 4; -DELETE FROM `dmctl`.`t_2` WHERE `id` = 1; -UPDATE `dmctl`.`t_1` SET `c` = 'oP' WHERE `id` = 9; -UPDATE `dmctl`.`t_1` SET `c` = 'fNxtLysT' WHERE `id` = 10; -DELETE FROM `dmctl`.`t_1` WHERE `id` = 3; -UPDATE `dmctl`.`t_1` SET `d` = 'gQjjJwwJ' WHERE `id` = 10; +UPDATE `dmctl`.`t_2` SET `d` = 'S' WHERE `id` = 2208; +DELETE FROM `dmctl`.`t_1` WHERE `id` = 2107; +UPDATE `dmctl`.`t_2` SET `c` = 'pvHeDtXdNQBzc' WHERE `id` = 2209; +INSERT INTO `dmctl`.`t_1` (`b`,`c`,`d`,`id`) VALUES (1533333567,'BEY','nQE',2110); +UPDATE `dmctl`.`t_1` SET `c` = 'UcRjvtdgeAMmmTdd' WHERE `id` = 2102; +DELETE FROM `dmctl`.`t_1` WHERE `id` = 2107; +UPDATE `dmctl`.`t_1` SET `c` = 'oZTluknwkIgUYEkPdB' WHERE `id` = 2107; +UPDATE `dmctl`.`t_1` SET `c` = 'ZcjYaJnh' WHERE `id` = 2105; +DELETE FROM `dmctl`.`t_2` WHERE `id` = 2206; +UPDATE `dmctl`.`t_1` SET `d` = 'QVAzYmqx' WHERE `id` = 2104; +INSERT INTO `dmctl`.`t_1` (`b`,`c`,`d`,`id`) VALUES (815122675,'AkDfRfFpyuMPyU','nOZwdjK',2111); +UPDATE `dmctl`.`t_1` SET `c` = 'SNceJrNMU' WHERE `id` = 2106; +DELETE FROM `dmctl`.`t_2` WHERE `id` = 2209; +UPDATE `dmctl`.`t_1` SET `d` = 'VACozkE' WHERE `id` = 2111; +UPDATE `dmctl`.`t_2` SET `d` = 'Xczvw' WHERE `id` = 2204; +DELETE FROM `dmctl`.`t_1` WHERE `id` = 2102; +UPDATE `dmctl`.`t_2` SET `c` = 'WgFBYecVDGxyytTQNF' WHERE `id` = 2207; +INSERT INTO `dmctl`.`t_1` (`b`,`c`,`d`,`id`) VALUES (1929704764,'hKXJjNu','PzUETJSZdU',2112); +UPDATE `dmctl`.`t_1` SET `d` = 'ZMdgI' WHERE `id` = 2104; +DELETE FROM `dmctl`.`t_2` WHERE `id` = 2201; +UPDATE `dmctl`.`t_1` SET `c` = 'oP' WHERE `id` = 2109; +UPDATE `dmctl`.`t_1` SET `c` = 'fNxtLysT' WHERE `id` = 2110; +DELETE FROM `dmctl`.`t_1` WHERE `id` = 2103; +UPDATE `dmctl`.`t_1` SET `d` = 'gQjjJwwJ' WHERE `id` = 2110; diff --git a/dm/tests/dmctl_basic/data/db2.increment2.sql b/dm/tests/dmctl_basic/data/db2.increment2.sql index 01e90cc9aca..d5d882a4e76 100644 --- a/dm/tests/dmctl_basic/data/db2.increment2.sql +++ b/dm/tests/dmctl_basic/data/db2.increment2.sql @@ -1,18 +1,18 @@ alter table `dmctl`.`t_1` drop column d; alter table `dmctl`.`t_2` drop column d; -UPDATE `dmctl`.`t_1` SET `c` = 'ex' WHERE `id` = 12; -DELETE FROM `dmctl`.`t_1` WHERE `id` = 5; -UPDATE `dmctl`.`t_2` SET `c` = 'KOhDazkRuLagpqgq' WHERE `id` = 7; -INSERT INTO `dmctl`.`t_1` (`b`,`c`,`id`) VALUES (2046853891,'YiPcpg',13); -UPDATE `dmctl`.`t_1` SET `c` = 'TRBVSNLTT' WHERE `id` = 5; -DELETE FROM `dmctl`.`t_2` WHERE `id` = 3; -UPDATE `dmctl`.`t_2` SET `c` = 'pEUUHDOepmEqQ' WHERE `id` = 3; -UPDATE `dmctl`.`t_1` SET `c` = 'yYVYjZd' WHERE `id` = 1; -DELETE FROM `dmctl`.`t_2` WHERE `id` = 8; -UPDATE `dmctl`.`t_2` SET `c` = 'kQPauwPFQgoNkBK' WHERE `id` = 2; -INSERT INTO `dmctl`.`t_2` (`b`,`c`,`id`) VALUES (1328787895,'zGpfwmts',9); -UPDATE `dmctl`.`t_1` SET `c` = 'CDzG' WHERE `id` = 12; -DELETE FROM `dmctl`.`t_1` WHERE `id` = 1; -UPDATE `dmctl`.`t_2` SET `c` = 'IWAyZsgmEdrO' WHERE `id` = 2; -UPDATE `dmctl`.`t_1` SET `c` = 'OQyXDeydLVAEIwrRrTG' WHERE `id` = 11; -DELETE FROM `dmctl`.`t_2` WHERE `id` = 2; +UPDATE `dmctl`.`t_1` SET `c` = 'ex' WHERE `id` = 2112; +DELETE FROM `dmctl`.`t_1` WHERE `id` = 2105; +UPDATE `dmctl`.`t_2` SET `c` = 'KOhDazkRuLagpqgq' WHERE `id` = 2207; +INSERT INTO `dmctl`.`t_1` (`b`,`c`,`id`) VALUES (2046853891,'YiPcpg',2113); +UPDATE `dmctl`.`t_1` SET `c` = 'TRBVSNLTT' WHERE `id` = 2105; +DELETE FROM `dmctl`.`t_2` WHERE `id` = 2203; +UPDATE `dmctl`.`t_2` SET `c` = 'pEUUHDOepmEqQ' WHERE `id` = 2203; +UPDATE `dmctl`.`t_1` SET `c` = 'yYVYjZd' WHERE `id` = 2101; +DELETE FROM `dmctl`.`t_2` WHERE `id` = 2208; +UPDATE `dmctl`.`t_2` SET `c` = 'kQPauwPFQgoNkBK' WHERE `id` = 2202; +INSERT INTO `dmctl`.`t_2` (`b`,`c`,`id`) VALUES (1328787895,'zGpfwmts',2209); +UPDATE `dmctl`.`t_1` SET `c` = 'CDzG' WHERE `id` = 2112; +DELETE FROM `dmctl`.`t_1` WHERE `id` = 2101; +UPDATE `dmctl`.`t_2` SET `c` = 'IWAyZsgmEdrO' WHERE `id` = 2202; +UPDATE `dmctl`.`t_1` SET `c` = 'OQyXDeydLVAEIwrRrTG' WHERE `id` = 2111; +DELETE FROM `dmctl`.`t_2` WHERE `id` = 2202; diff --git a/dm/tests/dmctl_basic/data/db2.prepare.sql b/dm/tests/dmctl_basic/data/db2.prepare.sql index bdc6c149fa5..72907fdbd45 100644 --- a/dm/tests/dmctl_basic/data/db2.prepare.sql +++ b/dm/tests/dmctl_basic/data/db2.prepare.sql @@ -3,21 +3,21 @@ create database `dmctl`; use `dmctl`; create table t_1(id bigint auto_increment, b int, c varchar(20), d varchar(10), primary key id(id), unique key b(b)); create table t_2(id bigint auto_increment, b int, c varchar(20), d varchar(10), primary key id(id), unique key b(b)); -INSERT INTO `dmctl`.`t_2` (`b`,`c`,`d`,`id`) VALUES (536247973,'FdkqoydEmwL','cnmrQHkMMD',1); -INSERT INTO `dmctl`.`t_1` (`b`,`c`,`d`,`id`) VALUES (625546463,'dgwvtEydzZa','rvuaa',1); -INSERT INTO `dmctl`.`t_1` (`b`,`c`,`d`,`id`) VALUES (1339059405,'DS','rRbbWfXU',2); -INSERT INTO `dmctl`.`t_2` (`b`,`c`,`d`,`id`) VALUES (1021801725,'izDZdmGeoeYpyn','YHX',2); -INSERT INTO `dmctl`.`t_2` (`b`,`c`,`d`,`id`) VALUES (1547191572,'qkdHwHe','OZI',3); -INSERT INTO `dmctl`.`t_1` (`b`,`c`,`d`,`id`) VALUES (804011834,'cDVHrSBD','ZGzWk',3); -INSERT INTO `dmctl`.`t_2` (`b`,`c`,`d`,`id`) VALUES (1258473138,'nnTUUdsTOnYsHD','Nz',4); -INSERT INTO `dmctl`.`t_1` (`b`,`c`,`d`,`id`) VALUES (161128134,'arowzAebwoHBc','Wnux',4); -INSERT INTO `dmctl`.`t_1` (`b`,`c`,`d`,`id`) VALUES (783636444,'OYlOy','BMkikT',5); -INSERT INTO `dmctl`.`t_1` (`b`,`c`,`d`,`id`) VALUES (1272018652,'DYpcqArfWolhOhiab','QyzbNqC',6); -INSERT INTO `dmctl`.`t_1` (`b`,`c`,`d`,`id`) VALUES (1674064434,'HcgEkwijLzTp','CKAuh',7); -INSERT INTO `dmctl`.`t_1` (`b`,`c`,`d`,`id`) VALUES (275890580,'iroFHGFVsqQyTOvILGTB','jyo',8); -INSERT INTO `dmctl`.`t_2` (`b`,`c`,`d`,`id`) VALUES (751446388,'MJcpP','aNMJ',5); -INSERT INTO `dmctl`.`t_1` (`b`,`c`,`d`,`id`) VALUES (844436321,'IxWEwsmadiKId','fw',9); -INSERT INTO `dmctl`.`t_2` (`b`,`c`,`d`,`id`) VALUES (1394310997,'DLRg','KzhFXaFV',6); -INSERT INTO `dmctl`.`t_2` (`b`,`c`,`d`,`id`) VALUES (399622170,'sE','DwDwBOEiJ',7); -INSERT INTO `dmctl`.`t_2` (`b`,`c`,`d`,`id`) VALUES (584262288,'caXN','SN',8); -INSERT INTO `dmctl`.`t_2` (`b`,`c`,`d`,`id`) VALUES (611154797,'zcsTrw','WRqA',9); +INSERT INTO `dmctl`.`t_2` (`b`,`c`,`d`,`id`) VALUES (536247973,'FdkqoydEmwL','cnmrQHkMMD',2201); +INSERT INTO `dmctl`.`t_1` (`b`,`c`,`d`,`id`) VALUES (625546463,'dgwvtEydzZa','rvuaa',2101); +INSERT INTO `dmctl`.`t_1` (`b`,`c`,`d`,`id`) VALUES (1339059405,'DS','rRbbWfXU',2102); +INSERT INTO `dmctl`.`t_2` (`b`,`c`,`d`,`id`) VALUES (1021801725,'izDZdmGeoeYpyn','YHX',2202); +INSERT INTO `dmctl`.`t_2` (`b`,`c`,`d`,`id`) VALUES (1547191572,'qkdHwHe','OZI',2203); +INSERT INTO `dmctl`.`t_1` (`b`,`c`,`d`,`id`) VALUES (804011834,'cDVHrSBD','ZGzWk',2103); +INSERT INTO `dmctl`.`t_2` (`b`,`c`,`d`,`id`) VALUES (1258473138,'nnTUUdsTOnYsHD','Nz',2204); +INSERT INTO `dmctl`.`t_1` (`b`,`c`,`d`,`id`) VALUES (161128134,'arowzAebwoHBc','Wnux',2104); +INSERT INTO `dmctl`.`t_1` (`b`,`c`,`d`,`id`) VALUES (783636444,'OYlOy','BMkikT',2105); +INSERT INTO `dmctl`.`t_1` (`b`,`c`,`d`,`id`) VALUES (1272018652,'DYpcqArfWolhOhiab','QyzbNqC',2106); +INSERT INTO `dmctl`.`t_1` (`b`,`c`,`d`,`id`) VALUES (1674064434,'HcgEkwijLzTp','CKAuh',2107); +INSERT INTO `dmctl`.`t_1` (`b`,`c`,`d`,`id`) VALUES (275890580,'iroFHGFVsqQyTOvILGTB','jyo',2108); +INSERT INTO `dmctl`.`t_2` (`b`,`c`,`d`,`id`) VALUES (751446388,'MJcpP','aNMJ',2205); +INSERT INTO `dmctl`.`t_1` (`b`,`c`,`d`,`id`) VALUES (844436321,'IxWEwsmadiKId','fw',2109); +INSERT INTO `dmctl`.`t_2` (`b`,`c`,`d`,`id`) VALUES (1394310997,'DLRg','KzhFXaFV',2206); +INSERT INTO `dmctl`.`t_2` (`b`,`c`,`d`,`id`) VALUES (399622170,'sE','DwDwBOEiJ',2207); +INSERT INTO `dmctl`.`t_2` (`b`,`c`,`d`,`id`) VALUES (584262288,'caXN','SN',2208); +INSERT INTO `dmctl`.`t_2` (`b`,`c`,`d`,`id`) VALUES (611154797,'zcsTrw','WRqA',2209); diff --git a/dm/tests/dmctl_basic/run.sh b/dm/tests/dmctl_basic/run.sh index f7e3db2dd9b..86b244a8096 100755 --- a/dm/tests/dmctl_basic/run.sh +++ b/dm/tests/dmctl_basic/run.sh @@ -278,8 +278,6 @@ function run() { query_status_with_no_tasks echo "dmctl_check_task" - run_sql_tidb "CREATE DATABASE dmctl;" - run_sql_tidb "CREATE TABLE dmctl.t_target(id bigint, b int, c varchar(20), d varchar(10), c_table varchar(255), c_source varchar(255), primary key id(id, c_table, c_source), unique key b(b));" check_task_pass $TASK_CONF check_task_wrong_no_source_meta $cur/conf/dm-task7.yaml check_task_wrong_start_time_format $cur/conf/dm-task3.yaml @@ -321,8 +319,6 @@ function run() { "stop-task test_incremental_no_source_meta" \ "\"result\": true" 3 run_sql_tidb "DROP DATABASE if exists dmctl;" - run_sql_tidb "CREATE DATABASE dmctl;" - run_sql_tidb "CREATE TABLE dmctl.t_target(id bigint, b int, c varchar(20), d varchar(10), c_table varchar(255), c_source varchar(255), primary key id(id, c_table, c_source), unique key b(b));" # start task dmctl_start_task check_sync_diff $WORK_DIR $cur/conf/diff_config.toml From a5bc678e27770dfb0c4069cde26283853ad64d98 Mon Sep 17 00:00:00 2001 From: lance6716 Date: Wed, 21 Dec 2022 14:27:07 +0800 Subject: [PATCH 18/26] sync code Signed-off-by: lance6716 --- dm/tests/online_ddl/conf/diff_config.toml | 4 ++-- dm/tests/online_ddl/conf/dm-task.yaml | 12 ++++++++++++ dm/tests/online_ddl/run.sh | 4 ++++ dm/tests/others_integration_1.txt | 1 - dm/tests/others_integration_2.txt | 2 +- dm/tests/sequence_safe_mode/conf/diff_config.toml | 4 ++-- dm/tests/sequence_safe_mode/conf/dm-task.yaml | 6 ++++++ dm/tests/sequence_safe_mode/run.sh | 2 ++ 8 files changed, 29 insertions(+), 6 deletions(-) diff --git a/dm/tests/online_ddl/conf/diff_config.toml b/dm/tests/online_ddl/conf/diff_config.toml index bce21d71a01..992d34075ef 100644 --- a/dm/tests/online_ddl/conf/diff_config.toml +++ b/dm/tests/online_ddl/conf/diff_config.toml @@ -4,7 +4,7 @@ check-thread-count = 4 export-fix-sql = true -check-struct-only = false +check-data-only = true [task] output-dir = "/tmp/ticdc_dm_test/output" @@ -32,7 +32,7 @@ target-table = "pt_t_target" [table-configs] [table-configs.config1] target-tables = ["online_ddl.*"] -ignore-columns = ["id"] +ignore-columns = ["c_table", "c_source"] [data-sources] [data-sources.mysql1] diff --git a/dm/tests/online_ddl/conf/dm-task.yaml b/dm/tests/online_ddl/conf/dm-task.yaml index 7240e49f439..9a8e5a851a7 100644 --- a/dm/tests/online_ddl/conf/dm-task.yaml +++ b/dm/tests/online_ddl/conf/dm-task.yaml @@ -45,12 +45,24 @@ routes: table-pattern: gho_t* target-schema: online_ddl target-table: gho_t_target + extract-table: + table-regexp: "(.*)" + target-column: "c_table" + extract-source: + source-regexp: "(.*)" + target-column: "c_source" pt-sharding-route-rules-table: schema-pattern: online_ddl table-pattern: pt_t* target-schema: online_ddl target-table: pt_t_target + extract-table: + table-regexp: "(.*)" + target-column: "c_table" + extract-source: + source-regexp: "(.*)" + target-column: "c_source" sharding-route-rules-schema: schema-pattern: online_ddl diff --git a/dm/tests/online_ddl/run.sh b/dm/tests/online_ddl/run.sh index 4991ccbb0a2..a2455759017 100755 --- a/dm/tests/online_ddl/run.sh +++ b/dm/tests/online_ddl/run.sh @@ -41,6 +41,10 @@ function run() { run_sql_file $cur/data/pt.db2.prepare.sql $MYSQL_HOST2 $MYSQL_PORT2 $MYSQL_PASSWORD2 check_contains 'Query OK, 3 rows affected' + run_sql_tidb "create database if not exists online_ddl;" + run_sql_tidb "create table online_ddl.pt_t_target (id bigint auto_increment, uid int, name varchar(80), info varchar(100), c_table varchar(255), c_source varchar(255), primary key (id, c_table, c_source), unique key(uid)) DEFAULT CHARSET=utf8mb4 COLLATE=utf8mb4_bin;" + run_sql_tidb "create table online_ddl.gho_t_target (id bigint auto_increment, uid int, name varchar(80), info varchar(100), c_table varchar(255), c_source varchar(255), primary key (id, c_table, c_source), unique key(uid)) DEFAULT CHARSET=utf8mb4 COLLATE=utf8mb4_bin;" + inject_points=( "github.com/pingcap/tiflow/dm/syncer/online-ddl-tools/ExitAfterSaveOnlineDDL=return()" "github.com/pingcap/tiflow/dm/syncer/ExitAfterSaveOnlineDDL=return()" diff --git a/dm/tests/others_integration_1.txt b/dm/tests/others_integration_1.txt index 011a45247f6..087ab335e08 100644 --- a/dm/tests/others_integration_1.txt +++ b/dm/tests/others_integration_1.txt @@ -8,4 +8,3 @@ full_mode sequence_sharding_optimistic sequence_sharding_removemeta gtid -only_dml diff --git a/dm/tests/others_integration_2.txt b/dm/tests/others_integration_2.txt index 23c02171201..fbd5f0ef558 100644 --- a/dm/tests/others_integration_2.txt +++ b/dm/tests/others_integration_2.txt @@ -1,5 +1,4 @@ foreign_key -load_task downstream_more_column expression_filter fake_rotate_event @@ -10,3 +9,4 @@ http_proxies openapi duplicate_event binlog_parse +only_dml diff --git a/dm/tests/sequence_safe_mode/conf/diff_config.toml b/dm/tests/sequence_safe_mode/conf/diff_config.toml index 5cdbd0c2ef9..3a6014fffe3 100644 --- a/dm/tests/sequence_safe_mode/conf/diff_config.toml +++ b/dm/tests/sequence_safe_mode/conf/diff_config.toml @@ -4,7 +4,7 @@ check-thread-count = 4 export-fix-sql = true -check-struct-only = false +check-data-only = true [task] output-dir = "/tmp/ticdc_dm_test/output" @@ -26,7 +26,7 @@ target-table = "t_target" [table-configs] [table-configs.config1] target-tables = ["sequence_safe_mode_target.t_target"] -ignore-columns = ["id"] +ignore-columns = ["c_table", "c_source"] [data-sources] [data-sources.mysql1] diff --git a/dm/tests/sequence_safe_mode/conf/dm-task.yaml b/dm/tests/sequence_safe_mode/conf/dm-task.yaml index ec1038b502c..156161d536b 100644 --- a/dm/tests/sequence_safe_mode/conf/dm-task.yaml +++ b/dm/tests/sequence_safe_mode/conf/dm-task.yaml @@ -39,6 +39,12 @@ routes: table-pattern: t* target-schema: sequence_safe_mode_target target-table: t_target + extract-table: + table-regexp: "(.*)" + target-column: "c_table" + extract-source: + source-regexp: "(.*)" + target-column: "c_source" sharding-route-rules-schema: schema-pattern: sequence_safe_mode_test diff --git a/dm/tests/sequence_safe_mode/run.sh b/dm/tests/sequence_safe_mode/run.sh index 81b7e0c37c7..c17df405406 100755 --- a/dm/tests/sequence_safe_mode/run.sh +++ b/dm/tests/sequence_safe_mode/run.sh @@ -27,6 +27,8 @@ function run() { dmctl_operate_source create $WORK_DIR/source1.yaml $SOURCE_ID1 dmctl_operate_source create $WORK_DIR/source2.yaml $SOURCE_ID2 + run_sql_tidb "create database if not exists sequence_safe_mode_target;" + run_sql_tidb "create table sequence_safe_mode_target.t_target (id bigint auto_increment, uid int, name varchar(80), info varchar(100), c_table varchar(255), c_source varchar(255), primary key (id, c_table, c_source), unique key(uid)) DEFAULT CHARSET=utf8mb4 COLLATE=utf8mb4_bin;" dmctl_start_task "$cur/conf/dm-task.yaml" "--remove-meta" check_sync_diff $WORK_DIR $cur/conf/diff_config.toml From 50420e6267f74a3c70f393ecd0b9d3c345a83914 Mon Sep 17 00:00:00 2001 From: lance6716 Date: Wed, 21 Dec 2022 14:44:11 +0800 Subject: [PATCH 19/26] sync code Signed-off-by: lance6716 --- dm/tests/sequence_safe_mode/data/db1.increment.sql | 4 ++-- dm/tests/sequence_safe_mode/data/db1.increment2.sql | 4 ++-- dm/tests/sequence_safe_mode/data/db2.increment.sql | 4 ++-- dm/tests/sequence_safe_mode/data/db2.increment2.sql | 4 ++-- dm/tests/sequence_safe_mode/run.sh | 2 +- 5 files changed, 9 insertions(+), 9 deletions(-) diff --git a/dm/tests/sequence_safe_mode/data/db1.increment.sql b/dm/tests/sequence_safe_mode/data/db1.increment.sql index 7b523569a1d..ef370a22014 100644 --- a/dm/tests/sequence_safe_mode/data/db1.increment.sql +++ b/dm/tests/sequence_safe_mode/data/db1.increment.sql @@ -1,12 +1,12 @@ use sequence_safe_mode_test; insert into t1 (uid, name) values (10003, 'Buenos Aires'); -alter table t1 add column (age int, level int); +alter table t1 add column age int after name, add column level int after name; alter table t1 add index age(age); alter table t1 add index level(level); insert into t1 (uid, name, age) values (10005, 'Buenos Aires', 200); insert into t2 (uid, name) values (20005, 'Aureliano José'); insert into t1 (uid, name, age) values (10006, 'Buenos Aires', 200); -alter table t2 add column (age int, level int); +alter table t2 add column age int after name, add column level int after name; alter table t2 add index age(age); alter table t2 add index level(level); insert into t1 (uid, name, age) values (10007, 'Buenos Aires', 300); diff --git a/dm/tests/sequence_safe_mode/data/db1.increment2.sql b/dm/tests/sequence_safe_mode/data/db1.increment2.sql index 40fa9c8d9cb..f58d8028140 100644 --- a/dm/tests/sequence_safe_mode/data/db1.increment2.sql +++ b/dm/tests/sequence_safe_mode/data/db1.increment2.sql @@ -1,7 +1,7 @@ use sequence_safe_mode_test; insert into t1 (uid, name, age) values (10009, 'Buenos Aires', 100); insert into t2 (uid, name, age) values (20008, 'Colonel Aureliano Buendía', 402); -alter table t1 add column age2 int; +alter table t1 add column age2 int after age; alter table t1 add index age2(age2); insert into t1 (uid, name, age, age2) values (10010, 'Buenos Aires', 200, 404); insert into t2 (uid, name, age) values (20009, 'Colonel Aureliano Buendía', 100); @@ -10,7 +10,7 @@ update t2 set age = age + 2 where uid = 20007; update t2 set age = age + 1 where uid = 20009; update t1 set age = age + 1 where uid = 10008; update t1 set age = age + 1 where uid = 10009; -alter table t2 add column age2 int; +alter table t2 add column age2 int after age; alter table t2 add index age2(age2); update t1 set age = age + 1 where uid in (10010, 10011); update t2 set age = age + 1 where uid in (20009, 20010); diff --git a/dm/tests/sequence_safe_mode/data/db2.increment.sql b/dm/tests/sequence_safe_mode/data/db2.increment.sql index 2c8de84fa92..672ff181db8 100644 --- a/dm/tests/sequence_safe_mode/data/db2.increment.sql +++ b/dm/tests/sequence_safe_mode/data/db2.increment.sql @@ -1,11 +1,11 @@ use sequence_safe_mode_test; delete from t3 where name = 'Santa Sofía de la Piedad'; -alter table t2 add column (age int, level int); +alter table t2 add column age int after name, add column level int after name; alter table t2 add index age(age); alter table t2 add index level(level); insert into t2 (uid, name, age) values (40002, 'Remedios Moscote', 100), (40003, 'Amaranta', 103); insert into t3 (uid, name) values (30004, 'Aureliano José'), (30005, 'Santa Sofía de la Piedad'), (30006, '17 Aurelianos'); -alter table t3 add column (age int, level int); +alter table t3 add column age int after name, add column level int after name; alter table t3 add index age(age); alter table t3 add index level(level); update t3 set age = 1; diff --git a/dm/tests/sequence_safe_mode/data/db2.increment2.sql b/dm/tests/sequence_safe_mode/data/db2.increment2.sql index e911d71ade5..7b59ba505b9 100644 --- a/dm/tests/sequence_safe_mode/data/db2.increment2.sql +++ b/dm/tests/sequence_safe_mode/data/db2.increment2.sql @@ -1,10 +1,10 @@ use sequence_safe_mode_test; -alter table t2 add column age2 int; +alter table t2 add column age2 int after age; alter table t2 add index age2(age2); insert into t2 (uid, name, age, age2) values (40004, 'Remedios Moscote', 100, 300), (40005, 'Amaranta', 103, 301); insert into t3 (uid, name, age) values (30007, 'Aureliano José', 99), (30008, 'Santa Sofía de la Piedad', 999), (30009, '17 Aurelianos', 9999); update t2 set age = age + 33 where uid = 40004; update t3 set age = age + 44 where uid > 30006 and uid < 30010; -alter table t3 add column age2 int; +alter table t3 add column age2 int after age; alter table t3 add index age2(age2); update t3 set age2 = 100; diff --git a/dm/tests/sequence_safe_mode/run.sh b/dm/tests/sequence_safe_mode/run.sh index c17df405406..36ade3bee0b 100755 --- a/dm/tests/sequence_safe_mode/run.sh +++ b/dm/tests/sequence_safe_mode/run.sh @@ -28,7 +28,7 @@ function run() { dmctl_operate_source create $WORK_DIR/source2.yaml $SOURCE_ID2 run_sql_tidb "create database if not exists sequence_safe_mode_target;" - run_sql_tidb "create table sequence_safe_mode_target.t_target (id bigint auto_increment, uid int, name varchar(80), info varchar(100), c_table varchar(255), c_source varchar(255), primary key (id, c_table, c_source), unique key(uid)) DEFAULT CHARSET=utf8mb4 COLLATE=utf8mb4_bin;" + run_sql_tidb "create table sequence_safe_mode_target.t_target (id bigint auto_increment, uid int, name varchar(80), c_table varchar(255), c_source varchar(255), primary key (id, c_table, c_source), unique key(uid)) DEFAULT CHARSET=utf8mb4 COLLATE=utf8mb4_bin;" dmctl_start_task "$cur/conf/dm-task.yaml" "--remove-meta" check_sync_diff $WORK_DIR $cur/conf/diff_config.toml From 2b94ebbf1e9d0b9d8cbc40d80953a2324f84279b Mon Sep 17 00:00:00 2001 From: lance6716 Date: Wed, 21 Dec 2022 15:04:06 +0800 Subject: [PATCH 20/26] sync code Signed-off-by: lance6716 --- dm/tests/online_ddl/data/gho.db1.increment.sql | 8 ++++---- .../online_ddl/data/gho.db1.increment2.sql | 4 ++-- dm/tests/online_ddl/data/gho.db2.increment.sql | 8 ++++---- .../online_ddl/data/gho.db2.increment2.sql | 4 ++-- dm/tests/online_ddl/data/pt.db1.increment.sql | 8 ++++---- dm/tests/online_ddl/data/pt.db1.increment2.sql | 4 ++-- dm/tests/online_ddl/data/pt.db2.increment.sql | 8 ++++---- dm/tests/online_ddl/data/pt.db2.increment2.sql | 4 ++-- dm/tests/sequence_safe_mode/run.sh | 4 ++-- dm/tests/sharding/conf/diff_config.toml | 4 ++-- dm/tests/sharding/conf/dm-task.yaml | 6 ++++++ dm/tests/sharding/data/db1.increment.sql | 18 +++++++++--------- dm/tests/sharding/data/db2.increment.sql | 18 +++++++++--------- dm/tests/sharding/run.sh | 2 ++ 14 files changed, 54 insertions(+), 46 deletions(-) diff --git a/dm/tests/online_ddl/data/gho.db1.increment.sql b/dm/tests/online_ddl/data/gho.db1.increment.sql index 7b85ec73005..79f307c691e 100644 --- a/dm/tests/online_ddl/data/gho.db1.increment.sql +++ b/dm/tests/online_ddl/data/gho.db1.increment.sql @@ -2,15 +2,15 @@ use online_ddl; insert into gho_t1 (uid, name) values (10003, 'Buenos Aires'); update gho_t1 set name = 'Gabriel José de la Concordia García Márquez' where `uid` = 10001; update gho_t1 set name = 'One Hundred Years of Solitude' where name = 'Cien años de soledad'; -alter table gho_t1 add column age int; -alter table gho_t2 add column age int; +alter table gho_t1 add column age int after name; +alter table gho_t2 add column age int after name; alter table gho_t2 add key name (name); alter table gho_t1 add key name (name); insert into gho_t2 (uid, name, age, info) values (20004, 'Colonel Aureliano Buendía', 301, '{}'); -alter table gho_t2 add column info_json json GENERATED ALWAYS AS (`info`) VIRTUAL; +alter table gho_t2 add column info_json json GENERATED ALWAYS AS (`info`) VIRTUAL after name; insert into gho_t1 (uid, name, info) values (10004, 'Buenos Aires', '{"age": 10}'); insert into gho_t2 (uid, name, info) values (20005, 'Buenos Aires', '{"age": 100}'); insert into gho_t2 (uid, name, info) values (20006, 'Buenos Aires', '{"age": 1000}'); -alter table gho_t1 add column info_json json GENERATED ALWAYS AS (`info`) VIRTUAL; +alter table gho_t1 add column info_json json GENERATED ALWAYS AS (`info`) VIRTUAL after name; insert into gho_t1 (uid, name, info) values (10005, 'Buenos Aires', '{"age": 100}'); insert into gho_t2 (uid, name, info) values (20007, 'Buenos Aires', '{"age": 200}'); diff --git a/dm/tests/online_ddl/data/gho.db1.increment2.sql b/dm/tests/online_ddl/data/gho.db1.increment2.sql index ff3fec0cc91..e6d576473a4 100644 --- a/dm/tests/online_ddl/data/gho.db1.increment2.sql +++ b/dm/tests/online_ddl/data/gho.db1.increment2.sql @@ -1,8 +1,8 @@ use online_ddl; insert into gho_t1 (uid, name, info) values (10006, 'name of 10006', '{"age": 10006}'); insert into gho_t2 (uid, name, info) values (20008, 'name of 20008', '{"age": 20008}'); -alter table gho_t1 add column address varchar(255); -alter table gho_t2 add column address varchar(255); +alter table gho_t1 add column address varchar(255) after name; +alter table gho_t2 add column address varchar(255) after name; alter table gho_t1 add key address (address); alter table gho_t2 add key address (address); insert into gho_t2 (uid, name, info, address) values (20009, 'name of 20009', '{"age": 20009}', 'address of 20009'); diff --git a/dm/tests/online_ddl/data/gho.db2.increment.sql b/dm/tests/online_ddl/data/gho.db2.increment.sql index 087c51244e4..5f674778ce6 100644 --- a/dm/tests/online_ddl/data/gho.db2.increment.sql +++ b/dm/tests/online_ddl/data/gho.db2.increment.sql @@ -1,12 +1,12 @@ use online_ddl; delete from gho_t3 where name = 'Santa Sofía de la Piedad'; -alter table gho_t2 add column age int; +alter table gho_t2 add column age int after name; update gho_t2 set uid = uid + 10000; -alter table gho_t3 add column age int; +alter table gho_t3 add column age int after name; update gho_t3 set age = 1; alter table gho_t2 add key name (name); alter table gho_t3 add key name (name); -alter table gho_t2 add column info_json json GENERATED ALWAYS AS (`info`) VIRTUAL; +alter table gho_t2 add column info_json json GENERATED ALWAYS AS (`info`) VIRTUAL after name; update gho_t3 set age = age + 10; -alter table gho_t3 add column info_json json GENERATED ALWAYS AS (`info`) VIRTUAL; +alter table gho_t3 add column info_json json GENERATED ALWAYS AS (`info`) VIRTUAL after name; update gho_t2 set age = age + 10; diff --git a/dm/tests/online_ddl/data/gho.db2.increment2.sql b/dm/tests/online_ddl/data/gho.db2.increment2.sql index 717ad86be8b..2bfc9baff8f 100644 --- a/dm/tests/online_ddl/data/gho.db2.increment2.sql +++ b/dm/tests/online_ddl/data/gho.db2.increment2.sql @@ -1,8 +1,8 @@ use online_ddl; insert into gho_t3 (uid, name, info) values (30004, 'name of 30004', '{"age": 30004}'); insert into gho_t2 (uid, name, info) values (50002, 'name of 50002', '{"age": 50002}'); -alter table gho_t3 add column address varchar(255); -alter table gho_t2 add column address varchar(255); +alter table gho_t3 add column address varchar(255) after name; +alter table gho_t2 add column address varchar(255) after name; alter table gho_t2 add key address (address); alter table gho_t3 add key address (address); insert into gho_t2 (uid, name, info, address) values (50003, 'name of 50003', '{"age": 50003}', 'address of 50003'); diff --git a/dm/tests/online_ddl/data/pt.db1.increment.sql b/dm/tests/online_ddl/data/pt.db1.increment.sql index 6b2a0ce51e2..19a97705b38 100644 --- a/dm/tests/online_ddl/data/pt.db1.increment.sql +++ b/dm/tests/online_ddl/data/pt.db1.increment.sql @@ -2,15 +2,15 @@ use online_ddl; insert into pt_t1 (uid, name) values (10003, 'Buenos Aires'); update pt_t1 set name = 'Gabriel José de la Concordia García Márquez' where `uid` = 10001; update pt_t1 set name = 'One Hundred Years of Solitude' where name = 'Cien años de soledad'; -alter table pt_t1 add column age int; -alter table pt_t2 add column age int; +alter table pt_t1 add column age int after name; +alter table pt_t2 add column age int after name; alter table pt_t2 add key name (name); alter table pt_t1 add key name (name); insert into pt_t2 (uid, name, age, info) values (20004, 'Colonel Aureliano Buendía', 301, '{}'); -alter table pt_t2 add column info_json json GENERATED ALWAYS AS (`info`) VIRTUAL; +alter table pt_t2 add column info_json json GENERATED ALWAYS AS (`info`) VIRTUAL after name; insert into pt_t1 (uid, name, info) values (10004, 'Buenos Aires', '{"age": 10}'); insert into pt_t2 (uid, name, info) values (20005, 'Buenos Aires', '{"age": 100}'); insert into pt_t2 (uid, name, info) values (20006, 'Buenos Aires', '{"age": 1000}'); -alter table pt_t1 add column info_json json GENERATED ALWAYS AS (`info`) VIRTUAL; +alter table pt_t1 add column info_json json GENERATED ALWAYS AS (`info`) VIRTUAL after name; insert into pt_t1 (uid, name, info) values (10005, 'Buenos Aires', '{"age": 100}'); insert into pt_t2 (uid, name, info) values (20007, 'Buenos Aires', '{"age": 200}'); diff --git a/dm/tests/online_ddl/data/pt.db1.increment2.sql b/dm/tests/online_ddl/data/pt.db1.increment2.sql index 3197f478b66..73a96305f54 100644 --- a/dm/tests/online_ddl/data/pt.db1.increment2.sql +++ b/dm/tests/online_ddl/data/pt.db1.increment2.sql @@ -1,8 +1,8 @@ use online_ddl; insert into pt_t1 (uid, name, info) values (10006, 'name of 10006', '{"age": 10006}'); insert into pt_t2 (uid, name, info) values (20008, 'name of 20008', '{"age": 20008}'); -alter table pt_t1 add column address varchar(255); -alter table pt_t2 add column address varchar(255); +alter table pt_t1 add column address varchar(255) after name; +alter table pt_t2 add column address varchar(255) after name; alter table pt_t1 add key address (address); alter table pt_t2 add key address (address); insert into pt_t2 (uid, name, info, address) values (20009, 'name of 20009', '{"age": 20009}', 'address of 20009'); diff --git a/dm/tests/online_ddl/data/pt.db2.increment.sql b/dm/tests/online_ddl/data/pt.db2.increment.sql index cf4a1cc0752..04646923ee5 100644 --- a/dm/tests/online_ddl/data/pt.db2.increment.sql +++ b/dm/tests/online_ddl/data/pt.db2.increment.sql @@ -1,12 +1,12 @@ use online_ddl; delete from pt_t3 where name = 'Santa Sofía de la Piedad'; -alter table pt_t2 add column age int; +alter table pt_t2 add column age int after name; update pt_t2 set uid = uid + 10000; -alter table pt_t3 add column age int; +alter table pt_t3 add column age int after name; update pt_t3 set age = 1; alter table pt_t2 add key name (name); alter table pt_t3 add key name (name); -alter table pt_t2 add column info_json json GENERATED ALWAYS AS (`info`) VIRTUAL; +alter table pt_t2 add column info_json json GENERATED ALWAYS AS (`info`) VIRTUAL after name; update pt_t3 set age = age + 10; -alter table pt_t3 add column info_json json GENERATED ALWAYS AS (`info`) VIRTUAL; +alter table pt_t3 add column info_json json GENERATED ALWAYS AS (`info`) VIRTUAL after name; update pt_t2 set age = age + 10; diff --git a/dm/tests/online_ddl/data/pt.db2.increment2.sql b/dm/tests/online_ddl/data/pt.db2.increment2.sql index 2fea0930568..2a4adf345e2 100644 --- a/dm/tests/online_ddl/data/pt.db2.increment2.sql +++ b/dm/tests/online_ddl/data/pt.db2.increment2.sql @@ -1,8 +1,8 @@ use online_ddl; insert into pt_t3 (uid, name, info) values (30004, 'name of 30004', '{"age": 30004}'); insert into pt_t2 (uid, name, info) values (50002, 'name of 50002', '{"age": 50002}'); -alter table pt_t3 add column address varchar(255); -alter table pt_t2 add column address varchar(255); +alter table pt_t3 add column address varchar(255) after name; +alter table pt_t2 add column address varchar(255) after name; alter table pt_t2 add key address (address); alter table pt_t3 add key address (address); insert into pt_t2 (uid, name, info, address) values (50003, 'name of 50003', '{"age": 50003}', 'address of 50003'); diff --git a/dm/tests/sequence_safe_mode/run.sh b/dm/tests/sequence_safe_mode/run.sh index 36ade3bee0b..02625591930 100755 --- a/dm/tests/sequence_safe_mode/run.sh +++ b/dm/tests/sequence_safe_mode/run.sh @@ -27,8 +27,8 @@ function run() { dmctl_operate_source create $WORK_DIR/source1.yaml $SOURCE_ID1 dmctl_operate_source create $WORK_DIR/source2.yaml $SOURCE_ID2 - run_sql_tidb "create database if not exists sequence_safe_mode_target;" - run_sql_tidb "create table sequence_safe_mode_target.t_target (id bigint auto_increment, uid int, name varchar(80), c_table varchar(255), c_source varchar(255), primary key (id, c_table, c_source), unique key(uid)) DEFAULT CHARSET=utf8mb4 COLLATE=utf8mb4_bin;" + run_sql_tidb "create database if not exists sequence_safe_mode_target;" + run_sql_tidb "create table sequence_safe_mode_target.t_target (id bigint auto_increment, uid int, name varchar(80), c_table varchar(255), c_source varchar(255), primary key (id, c_table, c_source), unique key(uid)) DEFAULT CHARSET=utf8mb4 COLLATE=utf8mb4_bin;" dmctl_start_task "$cur/conf/dm-task.yaml" "--remove-meta" check_sync_diff $WORK_DIR $cur/conf/diff_config.toml diff --git a/dm/tests/sharding/conf/diff_config.toml b/dm/tests/sharding/conf/diff_config.toml index f69fc0306ff..049e291d8c0 100644 --- a/dm/tests/sharding/conf/diff_config.toml +++ b/dm/tests/sharding/conf/diff_config.toml @@ -4,7 +4,7 @@ check-thread-count = 4 export-fix-sql = true -check-struct-only = false +check-data-only = true [task] output-dir = "/tmp/ticdc_dm_test/output" @@ -34,7 +34,7 @@ target-table = "t_target" [table-configs.config1] target-tables = ["db_target.t_target"] # currently sync_diff does not support json fields well -ignore-columns = ["id", "info_json"] +ignore-columns = ["info_json", "c_table", "c_source"] index-fields = ["uid"] # range-placeholder diff --git a/dm/tests/sharding/conf/dm-task.yaml b/dm/tests/sharding/conf/dm-task.yaml index a1bbeda5d8b..d65d2ec0182 100644 --- a/dm/tests/sharding/conf/dm-task.yaml +++ b/dm/tests/sharding/conf/dm-task.yaml @@ -39,6 +39,12 @@ routes: table-pattern: t* target-schema: db_target target-table: t_target + extract-table: + table-regexp: "(.*)" + target-column: "c_table" + extract-source: + source-regexp: "(.*)" + target-column: "c_source" sharding-route-rules-schema: schema-pattern: sharding* diff --git a/dm/tests/sharding/data/db1.increment.sql b/dm/tests/sharding/data/db1.increment.sql index 7f8e39f03c7..f090ab9dcfc 100644 --- a/dm/tests/sharding/data/db1.increment.sql +++ b/dm/tests/sharding/data/db1.increment.sql @@ -3,18 +3,18 @@ insert into t1 (uid, name) values (10003, 'Buenos Aires'); update t1 set name = 'Gabriel José de la Concordia García Márquez' where `uid` = 10001; update t1 set name = 'One Hundred Years of Solitude' where name = 'Cien años de soledad'; insert into t2 (uid, name, info) values (20013, 'Colonel', '{}'); # DML to trigger fetch schema from downstream before DDL -alter table t1 add column age int; +alter table t1 add column age int after name; insert into t2 (uid, name, info) values (20023, 'Aureliano', '{}'); insert into t2 (uid, name, info) values (20033, 'Buendía', '{}'); -alter table t2 add column age int; +alter table t2 add column age int after name; insert into t2 (uid, name, age, info) values (20004, 'Colonel Aureliano Buendía', 301, '{}'); -alter table t2 add column info_json json GENERATED ALWAYS AS (`info`) VIRTUAL; +alter table t2 add column info_json json GENERATED ALWAYS AS (`info`) VIRTUAL after name; insert into t1 (uid, name, info) values (10004, 'Buenos Aires', '{"age": 10}'); insert into t2 (uid, name, info) values (20005, 'Buenos Aires', '{"age": 100}'); insert into t2 (uid, name, info) values (20006, 'Buenos Aires', '{"age": 1000}'); -alter table t1 add column info_json json GENERATED ALWAYS AS (`info`) VIRTUAL; -alter table t1 add column id_gen int as (uid + 1); -alter table t2 add column id_gen int as (uid + 1); +alter table t1 add column info_json json GENERATED ALWAYS AS (`info`) VIRTUAL after name; +alter table t1 add column id_gen int as (uid + 1) after name; +alter table t2 add column id_gen int as (uid + 1) after name; alter table t1 add unique (id_gen); alter table t2 add unique (id_gen); insert into t1 (uid, name, info) values (10005, 'Buenos Aires', '{"age": 100}'); @@ -25,9 +25,9 @@ insert into t1 (uid, name, info) values (10006, 'Buenos Aires', '{"age": 100}'); insert into t2 (uid, name, info) values (20008, 'Buenos Aires', '{"age": 200}'); -- test ZERO_DATE -alter table t2 add column create_by DATETIME NOT NULL DEFAULT '0000-00-00 00:00:00'; +alter table t2 add column create_by DATETIME NOT NULL DEFAULT '0000-00-00 00:00:00' after name; insert into t2 (uid, name, info, create_by) values (1, 'HaHa', '{"age": 300}', now()); insert into t2 (uid, name, info, create_by) values (2, 'HiHi', '{"age": 400}', '0000-00-00 00:00:01'); -alter table t1 add column create_by DATETIME NOT NULL DEFAULT '0000-00-00 00:00:00'; +alter table t1 add column create_by DATETIME NOT NULL DEFAULT '0000-00-00 00:00:00' after name; insert into t1 (uid, name, info, create_by) values (3, 'HaHa', '{"age": 300}', now()); -insert into t1 (uid, name, info, create_by) values (4, 'HiHi', '{"age": 400}', '0000-00-00 00:00:01'); \ No newline at end of file +insert into t1 (uid, name, info, create_by) values (4, 'HiHi', '{"age": 400}', '0000-00-00 00:00:01'); diff --git a/dm/tests/sharding/data/db2.increment.sql b/dm/tests/sharding/data/db2.increment.sql index 909b67af7c5..1a512609356 100644 --- a/dm/tests/sharding/data/db2.increment.sql +++ b/dm/tests/sharding/data/db2.increment.sql @@ -1,15 +1,15 @@ use sharding1; delete from t3 where name = 'Santa Sofía de la Piedad'; insert into t2 (uid, name, info) values (40001, 'Amaranta', '{"age": 0}'); # DML to trigger fetch schema from downstream before DDL -alter table t2 add column age int; +alter table t2 add column age int after name; update t2 set uid = uid + 10000; -alter table t3 add column age int; +alter table t3 add column age int after name; update t3 set age = 1; -alter table t2 add column info_json json GENERATED ALWAYS AS (`info`) VIRTUAL; +alter table t2 add column info_json json GENERATED ALWAYS AS (`info`) VIRTUAL after name; update t3 set age = age + 10; -alter table t3 add column info_json json GENERATED ALWAYS AS (`info`) VIRTUAL; -alter table t3 add column id_gen int as (uid + 1); -alter table t2 add column id_gen int as (uid + 1); +alter table t3 add column info_json json GENERATED ALWAYS AS (`info`) VIRTUAL after name; +alter table t3 add column id_gen int as (uid + 1) after name; +alter table t2 add column id_gen int as (uid + 1) after name; alter table t2 add unique (id_gen); alter table t3 add unique (id_gen); update t2 set age = age + 10; @@ -18,9 +18,9 @@ alter table t3 add key multi_col_idx(uid, id_gen); update t3 set age = age + 10; -- test ZERO_DATE -alter table t2 add column create_by DATETIME NOT NULL DEFAULT '0000-00-00 00:00:00'; +alter table t2 add column create_by DATETIME NOT NULL DEFAULT '0000-00-00 00:00:00' after name; insert into t2 (uid, name, info, create_by) values (5, 'HaHa', '{"age": 300}', now()); insert into t2 (uid, name, info, create_by) values (6, 'HiHi', '{"age": 400}', '0000-00-00 00:00:01'); -alter table t3 add column create_by DATETIME NOT NULL DEFAULT '0000-00-00 00:00:00'; +alter table t3 add column create_by DATETIME NOT NULL DEFAULT '0000-00-00 00:00:00' after name; insert into t3 (uid, name, info, create_by) values (7, 'HaHa', '{"age": 300}', now()); -insert into t3 (uid, name, info, create_by) values (8, 'HiHi', '{"age": 400}', '0000-00-00 00:00:01') \ No newline at end of file +insert into t3 (uid, name, info, create_by) values (8, 'HiHi', '{"age": 400}', '0000-00-00 00:00:01') diff --git a/dm/tests/sharding/run.sh b/dm/tests/sharding/run.sh index 96c82868ad9..bf003a7f52c 100755 --- a/dm/tests/sharding/run.sh +++ b/dm/tests/sharding/run.sh @@ -52,6 +52,8 @@ function run() { check_rpc_alive $cur/../bin/check_worker_online 127.0.0.1:$WORKER2_PORT dmctl_operate_source create $WORK_DIR/source2.yaml $SOURCE_ID2 + run_sql_tidb "create database if not exists db_target;" + run_sql_tidb "create table db_target.t_target (id bigint auto_increment, uid int, name varchar(80), info varchar(100), c_table varchar(255), c_source varchar(255), primary key (id, c_table, c_source), unique key(uid)) DEFAULT CHARSET=utf8mb4 COLLATE=utf8mb4_bin;" # start DM task only dmctl_start_task "$cur/conf/dm-task.yaml" "--remove-meta" run_dm_ctl_with_retry $WORK_DIR "127.0.0.1:$MASTER_PORT" \ From 2acfa129b4c916c95a690d29f67f4fa48a4fe2f1 Mon Sep 17 00:00:00 2001 From: lance6716 Date: Wed, 21 Dec 2022 15:15:06 +0800 Subject: [PATCH 21/26] sync code Signed-off-by: lance6716 --- dm/tests/sharding/conf/diff_config.toml | 4 +- dm/tests/sharding/data/db1.increment.sql | 16 ++--- dm/tests/sharding/data/db1.increment2.sql | 6 +- dm/tests/sharding/data/db1.prepare.sql | 4 +- dm/tests/sharding/data/db2.increment.sql | 16 ++--- dm/tests/sharding/data/db2.increment2.sql | 2 +- dm/tests/sharding/data/db2.prepare.sql | 4 +- dm/tests/sharding/run.sh | 2 - dm/tests/tls/run.sh | 71 ----------------------- 9 files changed, 25 insertions(+), 100 deletions(-) diff --git a/dm/tests/sharding/conf/diff_config.toml b/dm/tests/sharding/conf/diff_config.toml index 049e291d8c0..d6605365ffc 100644 --- a/dm/tests/sharding/conf/diff_config.toml +++ b/dm/tests/sharding/conf/diff_config.toml @@ -4,8 +4,6 @@ check-thread-count = 4 export-fix-sql = true -check-data-only = true - [task] output-dir = "/tmp/ticdc_dm_test/output" @@ -34,7 +32,7 @@ target-table = "t_target" [table-configs.config1] target-tables = ["db_target.t_target"] # currently sync_diff does not support json fields well -ignore-columns = ["info_json", "c_table", "c_source"] +ignore-columns = ["info_json"] index-fields = ["uid"] # range-placeholder diff --git a/dm/tests/sharding/data/db1.increment.sql b/dm/tests/sharding/data/db1.increment.sql index f090ab9dcfc..a0b93495b4d 100644 --- a/dm/tests/sharding/data/db1.increment.sql +++ b/dm/tests/sharding/data/db1.increment.sql @@ -3,18 +3,18 @@ insert into t1 (uid, name) values (10003, 'Buenos Aires'); update t1 set name = 'Gabriel José de la Concordia García Márquez' where `uid` = 10001; update t1 set name = 'One Hundred Years of Solitude' where name = 'Cien años de soledad'; insert into t2 (uid, name, info) values (20013, 'Colonel', '{}'); # DML to trigger fetch schema from downstream before DDL -alter table t1 add column age int after name; +alter table t1 add column age int; insert into t2 (uid, name, info) values (20023, 'Aureliano', '{}'); insert into t2 (uid, name, info) values (20033, 'Buendía', '{}'); -alter table t2 add column age int after name; +alter table t2 add column age int; insert into t2 (uid, name, age, info) values (20004, 'Colonel Aureliano Buendía', 301, '{}'); -alter table t2 add column info_json json GENERATED ALWAYS AS (`info`) VIRTUAL after name; +alter table t2 add column info_json json GENERATED ALWAYS AS (`info`) VIRTUAL; insert into t1 (uid, name, info) values (10004, 'Buenos Aires', '{"age": 10}'); insert into t2 (uid, name, info) values (20005, 'Buenos Aires', '{"age": 100}'); insert into t2 (uid, name, info) values (20006, 'Buenos Aires', '{"age": 1000}'); -alter table t1 add column info_json json GENERATED ALWAYS AS (`info`) VIRTUAL after name; -alter table t1 add column id_gen int as (uid + 1) after name; -alter table t2 add column id_gen int as (uid + 1) after name; +alter table t1 add column info_json json GENERATED ALWAYS AS (`info`) VIRTUAL; +alter table t1 add column id_gen int as (uid + 1); +alter table t2 add column id_gen int as (uid + 1); alter table t1 add unique (id_gen); alter table t2 add unique (id_gen); insert into t1 (uid, name, info) values (10005, 'Buenos Aires', '{"age": 100}'); @@ -25,9 +25,9 @@ insert into t1 (uid, name, info) values (10006, 'Buenos Aires', '{"age": 100}'); insert into t2 (uid, name, info) values (20008, 'Buenos Aires', '{"age": 200}'); -- test ZERO_DATE -alter table t2 add column create_by DATETIME NOT NULL DEFAULT '0000-00-00 00:00:00' after name; +alter table t2 add column create_by DATETIME NOT NULL DEFAULT '0000-00-00 00:00:00'; insert into t2 (uid, name, info, create_by) values (1, 'HaHa', '{"age": 300}', now()); insert into t2 (uid, name, info, create_by) values (2, 'HiHi', '{"age": 400}', '0000-00-00 00:00:01'); -alter table t1 add column create_by DATETIME NOT NULL DEFAULT '0000-00-00 00:00:00' after name; +alter table t1 add column create_by DATETIME NOT NULL DEFAULT '0000-00-00 00:00:00'; insert into t1 (uid, name, info, create_by) values (3, 'HaHa', '{"age": 300}', now()); insert into t1 (uid, name, info, create_by) values (4, 'HiHi', '{"age": 400}', '0000-00-00 00:00:01'); diff --git a/dm/tests/sharding/data/db1.increment2.sql b/dm/tests/sharding/data/db1.increment2.sql index 13cbdc7a010..bcabeb7a4d1 100644 --- a/dm/tests/sharding/data/db1.increment2.sql +++ b/dm/tests/sharding/data/db1.increment2.sql @@ -1,8 +1,8 @@ create database `sharding2`; use `sharding2`; -CREATE TABLE `t1` (`id` bigint(20) AUTO_INCREMENT, `uid` int(11), `name` varchar(80), `info` varchar(100), `age` int(11), `info_json` json GENERATED ALWAYS AS (`info`) VIRTUAL, `id_gen` int(11) GENERATED ALWAYS AS ((`uid` + 1)) VIRTUAL, create_by DATETIME NOT NULL DEFAULT '0000-00-00 00:00:00', PRIMARY KEY (`id`), UNIQUE KEY `uid` (`uid`), UNIQUE KEY `id_gen` (`id_gen`), KEY `multi_col_idx` (`uid`,`id_gen`)); -CREATE TABLE `t2` (`id` bigint(20) AUTO_INCREMENT, `uid` int(11), `name` varchar(80), `info` varchar(100), `age` int(11), `info_json` json GENERATED ALWAYS AS (`info`) VIRTUAL, `id_gen` int(11) GENERATED ALWAYS AS ((`uid` + 1)) VIRTUAL, create_by DATETIME NOT NULL DEFAULT '0000-00-00 00:00:00', PRIMARY KEY (`id`), UNIQUE KEY `uid` (`uid`), UNIQUE KEY `id_gen` (`id_gen`), KEY `multi_col_idx` (`uid`,`id_gen`)); -CREATE TABLE `t3` (`id` bigint(20) AUTO_INCREMENT, `uid` int(11), `name` varchar(80), `info` varchar(100), `age` int(11), `info_json` json GENERATED ALWAYS AS (`info`) VIRTUAL, `id_gen` int(11) GENERATED ALWAYS AS ((`uid` + 1)) VIRTUAL, create_by DATETIME NOT NULL DEFAULT '0000-00-00 00:00:00', PRIMARY KEY (`id`), UNIQUE KEY `uid` (`uid`), UNIQUE KEY `id_gen` (`id_gen`), KEY `multi_col_idx` (`uid`,`id_gen`)); +CREATE TABLE `t1` (`id` bigint(20) AUTO_INCREMENT, `uid` int(11), `name` varchar(80), `info` varchar(100), `age` int(11), `info_json` json GENERATED ALWAYS AS (`info`) VIRTUAL, `id_gen` int(11) GENERATED ALWAYS AS ((`uid` + 1)) VIRTUAL, create_by DATETIME NOT NULL DEFAULT '0000-00-00 00:00:00', PRIMARY KEY (`id`), UNIQUE KEY `uid` (`uid`), UNIQUE KEY `id_gen` (`id_gen`), KEY `multi_col_idx` (`uid`,`id_gen`)) auto_increment=4000; +CREATE TABLE `t2` (`id` bigint(20) AUTO_INCREMENT, `uid` int(11), `name` varchar(80), `info` varchar(100), `age` int(11), `info_json` json GENERATED ALWAYS AS (`info`) VIRTUAL, `id_gen` int(11) GENERATED ALWAYS AS ((`uid` + 1)) VIRTUAL, create_by DATETIME NOT NULL DEFAULT '0000-00-00 00:00:00', PRIMARY KEY (`id`), UNIQUE KEY `uid` (`uid`), UNIQUE KEY `id_gen` (`id_gen`), KEY `multi_col_idx` (`uid`,`id_gen`)) auto_increment=5000; +CREATE TABLE `t3` (`id` bigint(20) AUTO_INCREMENT, `uid` int(11), `name` varchar(80), `info` varchar(100), `age` int(11), `info_json` json GENERATED ALWAYS AS (`info`) VIRTUAL, `id_gen` int(11) GENERATED ALWAYS AS ((`uid` + 1)) VIRTUAL, create_by DATETIME NOT NULL DEFAULT '0000-00-00 00:00:00', PRIMARY KEY (`id`), UNIQUE KEY `uid` (`uid`), UNIQUE KEY `id_gen` (`id_gen`), KEY `multi_col_idx` (`uid`,`id_gen`)) auto_increment=6000; insert into `sharding1`.t1 (uid, name, info, age) values (10007, 'bruce wayne', '{"wealtch": "unknown"}', 1000), (10008, 'connelly', '{"boolean": true}', 1001); insert into `sharding1`.t2 (uid, name, info, age) values (20009, 'haxwell', '{"key": "value000"}', 3003); insert into `sharding2`.t1 (uid, name, info, age) values (60001, 'hello', '{"age": 201}', 133), (60002, 'world', '{"age": 202}', 233), (60003, 'final', '{"key": "value", "age": 203}', 333); diff --git a/dm/tests/sharding/data/db1.prepare.sql b/dm/tests/sharding/data/db1.prepare.sql index ddf9e6cddbe..faf726d8965 100644 --- a/dm/tests/sharding/data/db1.prepare.sql +++ b/dm/tests/sharding/data/db1.prepare.sql @@ -2,7 +2,7 @@ drop database if exists `sharding1`; drop database if exists `sharding2`; create database `sharding1`; use `sharding1`; -create table t1 (id bigint auto_increment, uid int, name varchar(80), info varchar(100), primary key (`id`), unique key(`uid`)); -create table t2 (id bigint auto_increment, uid int, name varchar(80), info varchar(100), primary key (`id`), unique key(`uid`)); +create table t1 (id bigint auto_increment, uid int, name varchar(80), info varchar(100), primary key (`id`), unique key(`uid`)) auto_increment=1; +create table t2 (id bigint auto_increment, uid int, name varchar(80), info varchar(100), primary key (`id`), unique key(`uid`)) auto_increment=1000; insert into t1 (uid, name) values (10001, 'Gabriel García Márquez'), (10002, 'Cien años de soledad'); insert into t2 (uid, name) values (20001, 'José Arcadio Buendía'), (20002, 'Úrsula Iguarán'), (20003, 'José Arcadio'); diff --git a/dm/tests/sharding/data/db2.increment.sql b/dm/tests/sharding/data/db2.increment.sql index 1a512609356..0460388eed0 100644 --- a/dm/tests/sharding/data/db2.increment.sql +++ b/dm/tests/sharding/data/db2.increment.sql @@ -1,15 +1,15 @@ use sharding1; delete from t3 where name = 'Santa Sofía de la Piedad'; insert into t2 (uid, name, info) values (40001, 'Amaranta', '{"age": 0}'); # DML to trigger fetch schema from downstream before DDL -alter table t2 add column age int after name; +alter table t2 add column age int; update t2 set uid = uid + 10000; -alter table t3 add column age int after name; +alter table t3 add column age int; update t3 set age = 1; -alter table t2 add column info_json json GENERATED ALWAYS AS (`info`) VIRTUAL after name; +alter table t2 add column info_json json GENERATED ALWAYS AS (`info`) VIRTUAL; update t3 set age = age + 10; -alter table t3 add column info_json json GENERATED ALWAYS AS (`info`) VIRTUAL after name; -alter table t3 add column id_gen int as (uid + 1) after name; -alter table t2 add column id_gen int as (uid + 1) after name; +alter table t3 add column info_json json GENERATED ALWAYS AS (`info`) VIRTUAL; +alter table t3 add column id_gen int as (uid + 1); +alter table t2 add column id_gen int as (uid + 1); alter table t2 add unique (id_gen); alter table t3 add unique (id_gen); update t2 set age = age + 10; @@ -18,9 +18,9 @@ alter table t3 add key multi_col_idx(uid, id_gen); update t3 set age = age + 10; -- test ZERO_DATE -alter table t2 add column create_by DATETIME NOT NULL DEFAULT '0000-00-00 00:00:00' after name; +alter table t2 add column create_by DATETIME NOT NULL DEFAULT '0000-00-00 00:00:00'; insert into t2 (uid, name, info, create_by) values (5, 'HaHa', '{"age": 300}', now()); insert into t2 (uid, name, info, create_by) values (6, 'HiHi', '{"age": 400}', '0000-00-00 00:00:01'); -alter table t3 add column create_by DATETIME NOT NULL DEFAULT '0000-00-00 00:00:00' after name; +alter table t3 add column create_by DATETIME NOT NULL DEFAULT '0000-00-00 00:00:00'; insert into t3 (uid, name, info, create_by) values (7, 'HaHa', '{"age": 300}', now()); insert into t3 (uid, name, info, create_by) values (8, 'HiHi', '{"age": 400}', '0000-00-00 00:00:01') diff --git a/dm/tests/sharding/data/db2.increment2.sql b/dm/tests/sharding/data/db2.increment2.sql index 5eec6602d51..8e3f0904647 100644 --- a/dm/tests/sharding/data/db2.increment2.sql +++ b/dm/tests/sharding/data/db2.increment2.sql @@ -1,6 +1,6 @@ create database `sharding2`; use `sharding2`; -CREATE TABLE `t4` (`id` bigint(20) AUTO_INCREMENT, `uid` int(11), `name` varchar(80), `info` varchar(100), `age` int(11), `info_json` json GENERATED ALWAYS AS (`info`) VIRTUAL, `id_gen` int(11) GENERATED ALWAYS AS ((`uid` + 1)) VIRTUAL, create_by DATETIME NOT NULL DEFAULT '0000-00-00 00:00:00', PRIMARY KEY (`id`), UNIQUE KEY `uid` (`uid`), UNIQUE KEY `id_gen` (`id_gen`), KEY `multi_col_idx` (`uid`,`id_gen`)); +CREATE TABLE `t4` (`id` bigint(20) AUTO_INCREMENT, `uid` int(11), `name` varchar(80), `info` varchar(100), `age` int(11), `info_json` json GENERATED ALWAYS AS (`info`) VIRTUAL, `id_gen` int(11) GENERATED ALWAYS AS ((`uid` + 1)) VIRTUAL, create_by DATETIME NOT NULL DEFAULT '0000-00-00 00:00:00', PRIMARY KEY (`id`), UNIQUE KEY `uid` (`uid`), UNIQUE KEY `id_gen` (`id_gen`), KEY `multi_col_idx` (`uid`,`id_gen`)) auto_increment=7000; insert into `sharding2`.t4 (uid, name, info) values (70001, 'golang', '{"age": 6}'), (70002, 'rust con', '{"age": 1}'), (70003, 'javascript', '{"key": "value", "age": 20}'); insert into `sharding1`.t2 (uid, name, info) values (50002, 'tyuil', '{"ghjkl;": "as a standby"}'), (50003, 'xxxxx', '{"boolean": false, "k": "v"}'); insert into `sharding1`.t3 (uid, name, info) values (30004, 'hhhhhh', '{"key1": "value000"}'); diff --git a/dm/tests/sharding/data/db2.prepare.sql b/dm/tests/sharding/data/db2.prepare.sql index 70e8dcdd911..d63bfaba1ea 100644 --- a/dm/tests/sharding/data/db2.prepare.sql +++ b/dm/tests/sharding/data/db2.prepare.sql @@ -2,7 +2,7 @@ drop database if exists `sharding1`; drop database if exists `sharding2`; create database `sharding1`; use `sharding1`; -create table t2 (id bigint auto_increment, uid int, name varchar(80), info varchar(100), primary key (`id`), unique key(`uid`)); -create table t3 (id bigint auto_increment, uid int, name varchar(80), info varchar(100), primary key (`id`), unique key(`uid`)); +create table t2 (id bigint auto_increment, uid int, name varchar(80), info varchar(100), primary key (`id`), unique key(`uid`)) auto_increment=2000; +create table t3 (id bigint auto_increment, uid int, name varchar(80), info varchar(100), primary key (`id`), unique key(`uid`)) auto_increment=3000; insert into t2 (uid, name, info) values (40000, 'Remedios Moscote', '{}'); insert into t3 (uid, name, info) values (30001, 'Aureliano José', '{}'), (30002, 'Santa Sofía de la Piedad', '{}'), (30003, '17 Aurelianos', NULL); diff --git a/dm/tests/sharding/run.sh b/dm/tests/sharding/run.sh index bf003a7f52c..96c82868ad9 100755 --- a/dm/tests/sharding/run.sh +++ b/dm/tests/sharding/run.sh @@ -52,8 +52,6 @@ function run() { check_rpc_alive $cur/../bin/check_worker_online 127.0.0.1:$WORKER2_PORT dmctl_operate_source create $WORK_DIR/source2.yaml $SOURCE_ID2 - run_sql_tidb "create database if not exists db_target;" - run_sql_tidb "create table db_target.t_target (id bigint auto_increment, uid int, name varchar(80), info varchar(100), c_table varchar(255), c_source varchar(255), primary key (id, c_table, c_source), unique key(uid)) DEFAULT CHARSET=utf8mb4 COLLATE=utf8mb4_bin;" # start DM task only dmctl_start_task "$cur/conf/dm-task.yaml" "--remove-meta" run_dm_ctl_with_retry $WORK_DIR "127.0.0.1:$MASTER_PORT" \ diff --git a/dm/tests/tls/run.sh b/dm/tests/tls/run.sh index c117173188e..9fa05145d03 100644 --- a/dm/tests/tls/run.sh +++ b/dm/tests/tls/run.sh @@ -150,76 +150,6 @@ function test_worker_handle_multi_tls_tasks() { echo "============================== test_worker_handle_multi_tls_tasks success ==================================" } -function test_worker_download_certs_from_master() { - prepare_test - - cp $cur/conf/dm-master1.toml $WORK_DIR/ - cp $cur/conf/dm-master2.toml $WORK_DIR/ - cp $cur/conf/dm-master3.toml $WORK_DIR/ - cp $cur/conf/dm-worker1.toml $WORK_DIR/ - cp $cur/conf/dm-worker2.toml $WORK_DIR/ - cp $cur/conf/dm-task.yaml $WORK_DIR/ - - sed -i "s%dir-placeholer%$cur\/conf%g" $WORK_DIR/dm-master1.toml - sed -i "s%dir-placeholer%$cur\/conf%g" $WORK_DIR/dm-master2.toml - sed -i "s%dir-placeholer%$cur\/conf%g" $WORK_DIR/dm-master3.toml - sed -i "s%dir-placeholer%$cur\/conf%g" $WORK_DIR/dm-worker1.toml - sed -i "s%dir-placeholer%$cur\/conf%g" $WORK_DIR/dm-worker2.toml - sed -i "s%dir-placeholer%$cur\/conf%g" $WORK_DIR/dm-task.yaml - - run_dm_master $WORK_DIR/master1 $MASTER_PORT1 $WORK_DIR/dm-master1.toml - run_dm_master $WORK_DIR/master2 $MASTER_PORT2 $WORK_DIR/dm-master2.toml - run_dm_master $WORK_DIR/master3 $MASTER_PORT3 $WORK_DIR/dm-master3.toml - check_rpc_alive $cur/../bin/check_master_online 127.0.0.1:$MASTER_PORT1 "$cur/conf/ca.pem" "$cur/conf/dm.pem" "$cur/conf/dm.key" - check_rpc_alive $cur/../bin/check_master_online 127.0.0.1:$MASTER_PORT2 "$cur/conf/ca.pem" "$cur/conf/dm.pem" "$cur/conf/dm.key" - check_rpc_alive $cur/../bin/check_master_online 127.0.0.1:$MASTER_PORT3 "$cur/conf/ca.pem" "$cur/conf/dm.pem" "$cur/conf/dm.key" - - # add failpoint to make loader always fail - export GO_FAILPOINTS="github.com/pingcap/tiflow/dm/loader/lightningAlwaysErr=return()" - run_dm_worker $WORK_DIR/worker1 $WORKER1_PORT $WORK_DIR/dm-worker1.toml - check_rpc_alive $cur/../bin/check_worker_online 127.0.0.1:$WORKER1_PORT "$cur/conf/ca.pem" "$cur/conf/dm.pem" "$cur/conf/dm.key" - - # operate mysql config to worker - run_dm_ctl_with_tls_and_retry $WORK_DIR "127.0.0.1:$MASTER_PORT" $cur/conf/ca.pem $cur/conf/dm.pem $cur/conf/dm.key \ - "operate-source create $WORK_DIR/source1.yaml" \ - "\"result\": true" 2 \ - "\"source\": \"$SOURCE_ID1\"" 1 - - echo "start task and check stage" - run_dm_ctl_with_tls_and_retry $WORK_DIR "127.0.0.1:$MASTER_PORT" $cur/conf/ca.pem $cur/conf/dm.pem $cur/conf/dm.key \ - "start-task $WORK_DIR/dm-task.yaml --remove-meta=true" - - # task should be paused. - run_dm_ctl_with_tls_and_retry $WORK_DIR "127.0.0.1:$MASTER_PORT" $cur/conf/ca.pem $cur/conf/dm.pem $cur/conf/dm.key \ - "query-status test" \ - "\"result\": true" 2 \ - "\"stage\": \"Paused\"" 1 - - # change task-ca.pem name to test wheather dm-worker will dump certs from dm-master - mv "$cur/conf/task-ca.pem" "$cur/conf/task-ca.pem.bak" - - # kill dm-worker 1 and clean the failpoint - export GO_FAILPOINTS='' - kill_process dm-worker1 - check_port_offline $WORKER1_PORT 20 - - rm -rf "$WORK_DIR/tidb_lightning_tls_config*" - run_dm_worker $WORK_DIR/worker1 $WORKER1_PORT $WORK_DIR/dm-worker1.toml - check_rpc_alive $cur/../bin/check_worker_online 127.0.0.1:$WORKER1_PORT "$cur/conf/ca.pem" "$cur/conf/dm.pem" "$cur/conf/dm.key" - - # dm-worker will dump task-ca.pem from dm-master and save it to dm-worker-dir/tidb_lightning_taskname/ca.pem - # let's try use this file to connect dm-master - run_dm_ctl_with_tls_and_retry $WORK_DIR "127.0.0.1:$MASTER_PORT" "$WORK_DIR/worker1/lightning_tls_test/ca.pem" $cur/conf/dm.pem $cur/conf/dm.key \ - "query-status test" \ - "\"result\": true" 2 \ - "\"unit\": \"Sync\"" 1 - - echo "check data" - check_sync_diff $WORK_DIR $cur/conf/diff_config.toml - mv "$cur/conf/task-ca.pem.bak" "$cur/conf/task-ca.pem" - echo "============================== test_worker_download_certs_from_master success ==================================" -} - function test_worker_ha_when_enable_source_tls() { prepare_test @@ -449,7 +379,6 @@ function run() { test_master_ha_when_enable_tidb_and_only_ca_source_tls test_worker_handle_multi_tls_tasks - test_worker_download_certs_from_master test_worker_ha_when_enable_source_tls test_source_and_target_with_empty_tlsconfig From 615f2cd0cb026a1ab272e56690b8719aaa265947 Mon Sep 17 00:00:00 2001 From: lance6716 Date: Wed, 21 Dec 2022 15:19:22 +0800 Subject: [PATCH 22/26] should finish Signed-off-by: lance6716 --- dm/tests/sharding/conf/dm-task.yaml | 6 ------ 1 file changed, 6 deletions(-) diff --git a/dm/tests/sharding/conf/dm-task.yaml b/dm/tests/sharding/conf/dm-task.yaml index d65d2ec0182..a1bbeda5d8b 100644 --- a/dm/tests/sharding/conf/dm-task.yaml +++ b/dm/tests/sharding/conf/dm-task.yaml @@ -39,12 +39,6 @@ routes: table-pattern: t* target-schema: db_target target-table: t_target - extract-table: - table-regexp: "(.*)" - target-column: "c_table" - extract-source: - source-regexp: "(.*)" - target-column: "c_source" sharding-route-rules-schema: schema-pattern: sharding* From 781a65a8e0c8eea6acdfaae746416b995c7f27ae Mon Sep 17 00:00:00 2001 From: lance6716 Date: Thu, 22 Dec 2022 11:40:08 +0800 Subject: [PATCH 23/26] sync code Signed-off-by: lance6716 --- dm/tests/sequence_sharding/data/db1.prepare.sql | 4 ++-- dm/tests/sequence_sharding/data/db2.increment.sql | 2 +- dm/tests/sequence_sharding/data/db2.prepare.sql | 6 +++--- 3 files changed, 6 insertions(+), 6 deletions(-) diff --git a/dm/tests/sequence_sharding/data/db1.prepare.sql b/dm/tests/sequence_sharding/data/db1.prepare.sql index 98e635d6b46..a4b361e81d0 100644 --- a/dm/tests/sequence_sharding/data/db1.prepare.sql +++ b/dm/tests/sequence_sharding/data/db1.prepare.sql @@ -1,7 +1,7 @@ drop database if exists `sharding_seq`; create database `sharding_seq`; use `sharding_seq`; -create table t1 (id bigint auto_increment,uid int,name varchar(20),primary key (`id`),unique key(`uid`)) DEFAULT CHARSET=utf8mb4 COLLATE=utf8mb4_bin; -create table t2 (id bigint auto_increment,uid int,name varchar(20),primary key (`id`),unique key(`uid`)) DEFAULT CHARSET=utf8mb4 COLLATE=utf8mb4_bin; +create table t1 (id bigint auto_increment,uid int,name varchar(20),primary key (`id`),unique key(`uid`)) DEFAULT CHARSET=utf8mb4 COLLATE=utf8mb4_bin auto_increment=100000; +create table t2 (id bigint auto_increment,uid int,name varchar(20),primary key (`id`),unique key(`uid`)) DEFAULT CHARSET=utf8mb4 COLLATE=utf8mb4_bin auto_increment=200000; insert into t1 (uid,name) values (100001,'igvApUx'),(100002,'qUyrcOBkwDK'); insert into t2 (uid,name) values (200001,'EycletJHetWHMfH'),(200002,'ytkIaCOwXnWmy'),(200003,'MWQeWw""''rNmtGxzGp'); diff --git a/dm/tests/sequence_sharding/data/db2.increment.sql b/dm/tests/sequence_sharding/data/db2.increment.sql index 6bd92ca1900..5a782490031 100644 --- a/dm/tests/sequence_sharding/data/db2.increment.sql +++ b/dm/tests/sequence_sharding/data/db2.increment.sql @@ -1,5 +1,5 @@ use sharding_seq; -delete from t3 where id = 400002; +delete from t3 where uid = 400002; insert into t4 (uid,name) values(500005,'`.`'),(500006,'exit'); alter table t2 add column c int; alter table t2 add index c(c); diff --git a/dm/tests/sequence_sharding/data/db2.prepare.sql b/dm/tests/sequence_sharding/data/db2.prepare.sql index b06fe6e6b2b..dd0c9778490 100644 --- a/dm/tests/sequence_sharding/data/db2.prepare.sql +++ b/dm/tests/sequence_sharding/data/db2.prepare.sql @@ -1,9 +1,9 @@ drop database if exists `sharding_seq`; create database `sharding_seq`; use `sharding_seq`; -create table t2 (id bigint auto_increment,uid int,name varchar(20),primary key (`id`),unique key(`uid`)) DEFAULT CHARSET=utf8mb4 COLLATE=utf8mb4_bin; -create table t3 (id bigint auto_increment,uid int,name varchar(20),primary key (`id`),unique key(`uid`)) DEFAULT CHARSET=utf8mb4 COLLATE=utf8mb4_bin; -create table t4 (id bigint auto_increment,uid int,name varchar(20),primary key (`id`),unique key(`uid`)) DEFAULT CHARSET=utf8mb4 COLLATE=utf8mb4_bin; +create table t2 (id bigint auto_increment,uid int,name varchar(20),primary key (`id`),unique key(`uid`)) DEFAULT CHARSET=utf8mb4 COLLATE=utf8mb4_bin auto_increment=300000; +create table t3 (id bigint auto_increment,uid int,name varchar(20),primary key (`id`),unique key(`uid`)) DEFAULT CHARSET=utf8mb4 COLLATE=utf8mb4_bin auto_increment=400000; +create table t4 (id bigint auto_increment,uid int,name varchar(20),primary key (`id`),unique key(`uid`)) DEFAULT CHARSET=utf8mb4 COLLATE=utf8mb4_bin auto_increment=500000; insert into t2 (uid,name) values (300001,'io'),(300002,'xOKvsDsofmAzEF'); insert into t3 (uid,name) values (400001,'eXcRSo'),(400002,'QOP'),(400003,'DUotcCayM'); insert into t4 (uid,name) values (500001,'`id` = 15'),(500002,'942032497'),(500003,'UrhcHUbwsDMZrvJxM'); From 26fea3afc35e993e2489a328869715e0b0afff9d Mon Sep 17 00:00:00 2001 From: lance6716 Date: Thu, 22 Dec 2022 11:50:07 +0800 Subject: [PATCH 24/26] fix CI Signed-off-by: lance6716 --- dm/tests/online_ddl/conf/diff_config.toml | 3 +-- dm/tests/online_ddl/conf/dm-task.yaml | 14 +------------- dm/tests/online_ddl/data/gho.db1.increment.sql | 8 ++++---- dm/tests/online_ddl/data/gho.db1.increment2.sql | 4 ++-- dm/tests/online_ddl/data/gho.db1.prepare.sql | 4 ++-- dm/tests/online_ddl/data/gho.db2.increment.sql | 8 ++++---- dm/tests/online_ddl/data/gho.db2.increment2.sql | 4 ++-- dm/tests/online_ddl/data/gho.db2.prepare.sql | 4 ++-- dm/tests/online_ddl/data/pt.db1.increment.sql | 8 ++++---- dm/tests/online_ddl/data/pt.db1.increment2.sql | 4 ++-- dm/tests/online_ddl/data/pt.db1.prepare.sql | 4 ++-- dm/tests/online_ddl/data/pt.db2.increment.sql | 8 ++++---- dm/tests/online_ddl/data/pt.db2.increment2.sql | 4 ++-- dm/tests/online_ddl/data/pt.db2.prepare.sql | 4 ++-- dm/tests/online_ddl/run.sh | 4 ---- 15 files changed, 34 insertions(+), 51 deletions(-) diff --git a/dm/tests/online_ddl/conf/diff_config.toml b/dm/tests/online_ddl/conf/diff_config.toml index 992d34075ef..dbb6555f386 100644 --- a/dm/tests/online_ddl/conf/diff_config.toml +++ b/dm/tests/online_ddl/conf/diff_config.toml @@ -4,7 +4,7 @@ check-thread-count = 4 export-fix-sql = true -check-data-only = true +check-struct-only = false [task] output-dir = "/tmp/ticdc_dm_test/output" @@ -32,7 +32,6 @@ target-table = "pt_t_target" [table-configs] [table-configs.config1] target-tables = ["online_ddl.*"] -ignore-columns = ["c_table", "c_source"] [data-sources] [data-sources.mysql1] diff --git a/dm/tests/online_ddl/conf/dm-task.yaml b/dm/tests/online_ddl/conf/dm-task.yaml index 9a8e5a851a7..636051aad25 100644 --- a/dm/tests/online_ddl/conf/dm-task.yaml +++ b/dm/tests/online_ddl/conf/dm-task.yaml @@ -45,29 +45,17 @@ routes: table-pattern: gho_t* target-schema: online_ddl target-table: gho_t_target - extract-table: - table-regexp: "(.*)" - target-column: "c_table" - extract-source: - source-regexp: "(.*)" - target-column: "c_source" pt-sharding-route-rules-table: schema-pattern: online_ddl table-pattern: pt_t* target-schema: online_ddl target-table: pt_t_target - extract-table: - table-regexp: "(.*)" - target-column: "c_table" - extract-source: - source-regexp: "(.*)" - target-column: "c_source" sharding-route-rules-schema: schema-pattern: online_ddl target-schema: online_ddl - + filters: filter-rule-index: schema-pattern: "*" diff --git a/dm/tests/online_ddl/data/gho.db1.increment.sql b/dm/tests/online_ddl/data/gho.db1.increment.sql index 79f307c691e..7b85ec73005 100644 --- a/dm/tests/online_ddl/data/gho.db1.increment.sql +++ b/dm/tests/online_ddl/data/gho.db1.increment.sql @@ -2,15 +2,15 @@ use online_ddl; insert into gho_t1 (uid, name) values (10003, 'Buenos Aires'); update gho_t1 set name = 'Gabriel José de la Concordia García Márquez' where `uid` = 10001; update gho_t1 set name = 'One Hundred Years of Solitude' where name = 'Cien años de soledad'; -alter table gho_t1 add column age int after name; -alter table gho_t2 add column age int after name; +alter table gho_t1 add column age int; +alter table gho_t2 add column age int; alter table gho_t2 add key name (name); alter table gho_t1 add key name (name); insert into gho_t2 (uid, name, age, info) values (20004, 'Colonel Aureliano Buendía', 301, '{}'); -alter table gho_t2 add column info_json json GENERATED ALWAYS AS (`info`) VIRTUAL after name; +alter table gho_t2 add column info_json json GENERATED ALWAYS AS (`info`) VIRTUAL; insert into gho_t1 (uid, name, info) values (10004, 'Buenos Aires', '{"age": 10}'); insert into gho_t2 (uid, name, info) values (20005, 'Buenos Aires', '{"age": 100}'); insert into gho_t2 (uid, name, info) values (20006, 'Buenos Aires', '{"age": 1000}'); -alter table gho_t1 add column info_json json GENERATED ALWAYS AS (`info`) VIRTUAL after name; +alter table gho_t1 add column info_json json GENERATED ALWAYS AS (`info`) VIRTUAL; insert into gho_t1 (uid, name, info) values (10005, 'Buenos Aires', '{"age": 100}'); insert into gho_t2 (uid, name, info) values (20007, 'Buenos Aires', '{"age": 200}'); diff --git a/dm/tests/online_ddl/data/gho.db1.increment2.sql b/dm/tests/online_ddl/data/gho.db1.increment2.sql index e6d576473a4..ff3fec0cc91 100644 --- a/dm/tests/online_ddl/data/gho.db1.increment2.sql +++ b/dm/tests/online_ddl/data/gho.db1.increment2.sql @@ -1,8 +1,8 @@ use online_ddl; insert into gho_t1 (uid, name, info) values (10006, 'name of 10006', '{"age": 10006}'); insert into gho_t2 (uid, name, info) values (20008, 'name of 20008', '{"age": 20008}'); -alter table gho_t1 add column address varchar(255) after name; -alter table gho_t2 add column address varchar(255) after name; +alter table gho_t1 add column address varchar(255); +alter table gho_t2 add column address varchar(255); alter table gho_t1 add key address (address); alter table gho_t2 add key address (address); insert into gho_t2 (uid, name, info, address) values (20009, 'name of 20009', '{"age": 20009}', 'address of 20009'); diff --git a/dm/tests/online_ddl/data/gho.db1.prepare.sql b/dm/tests/online_ddl/data/gho.db1.prepare.sql index 6c67c982868..d0b97fe8b03 100644 --- a/dm/tests/online_ddl/data/gho.db1.prepare.sql +++ b/dm/tests/online_ddl/data/gho.db1.prepare.sql @@ -1,7 +1,7 @@ drop database if exists `online_ddl`; create database `online_ddl`; use `online_ddl`; -create table gho_t1 (id bigint auto_increment, uid int, name varchar(80), info varchar(100), primary key (`id`), unique key(`uid`)) DEFAULT CHARSET=utf8mb4 COLLATE=utf8mb4_bin; -create table gho_t2 (id bigint auto_increment, uid int, name varchar(80), info varchar(100), primary key (`id`), unique key(`uid`)) DEFAULT CHARSET=utf8mb4 COLLATE=utf8mb4_bin; +create table gho_t1 (id bigint auto_increment, uid int, name varchar(80), info varchar(100), primary key (`id`), unique key(`uid`)) DEFAULT CHARSET=utf8mb4 COLLATE=utf8mb4_bin auto_increment=100000; +create table gho_t2 (id bigint auto_increment, uid int, name varchar(80), info varchar(100), primary key (`id`), unique key(`uid`)) DEFAULT CHARSET=utf8mb4 COLLATE=utf8mb4_bin auto_increment=200000; insert into gho_t1 (uid, name) values (10001, 'Gabriel García Márquez'), (10002, 'Cien años de soledad'); insert into gho_t2 (uid, name) values (20001, 'José Arcadio Buendía'), (20002, 'Úrsula Iguarán'), (20003, 'José Arcadio'); diff --git a/dm/tests/online_ddl/data/gho.db2.increment.sql b/dm/tests/online_ddl/data/gho.db2.increment.sql index 5f674778ce6..087c51244e4 100644 --- a/dm/tests/online_ddl/data/gho.db2.increment.sql +++ b/dm/tests/online_ddl/data/gho.db2.increment.sql @@ -1,12 +1,12 @@ use online_ddl; delete from gho_t3 where name = 'Santa Sofía de la Piedad'; -alter table gho_t2 add column age int after name; +alter table gho_t2 add column age int; update gho_t2 set uid = uid + 10000; -alter table gho_t3 add column age int after name; +alter table gho_t3 add column age int; update gho_t3 set age = 1; alter table gho_t2 add key name (name); alter table gho_t3 add key name (name); -alter table gho_t2 add column info_json json GENERATED ALWAYS AS (`info`) VIRTUAL after name; +alter table gho_t2 add column info_json json GENERATED ALWAYS AS (`info`) VIRTUAL; update gho_t3 set age = age + 10; -alter table gho_t3 add column info_json json GENERATED ALWAYS AS (`info`) VIRTUAL after name; +alter table gho_t3 add column info_json json GENERATED ALWAYS AS (`info`) VIRTUAL; update gho_t2 set age = age + 10; diff --git a/dm/tests/online_ddl/data/gho.db2.increment2.sql b/dm/tests/online_ddl/data/gho.db2.increment2.sql index 2bfc9baff8f..717ad86be8b 100644 --- a/dm/tests/online_ddl/data/gho.db2.increment2.sql +++ b/dm/tests/online_ddl/data/gho.db2.increment2.sql @@ -1,8 +1,8 @@ use online_ddl; insert into gho_t3 (uid, name, info) values (30004, 'name of 30004', '{"age": 30004}'); insert into gho_t2 (uid, name, info) values (50002, 'name of 50002', '{"age": 50002}'); -alter table gho_t3 add column address varchar(255) after name; -alter table gho_t2 add column address varchar(255) after name; +alter table gho_t3 add column address varchar(255); +alter table gho_t2 add column address varchar(255); alter table gho_t2 add key address (address); alter table gho_t3 add key address (address); insert into gho_t2 (uid, name, info, address) values (50003, 'name of 50003', '{"age": 50003}', 'address of 50003'); diff --git a/dm/tests/online_ddl/data/gho.db2.prepare.sql b/dm/tests/online_ddl/data/gho.db2.prepare.sql index b2d67976b92..661b82991f5 100644 --- a/dm/tests/online_ddl/data/gho.db2.prepare.sql +++ b/dm/tests/online_ddl/data/gho.db2.prepare.sql @@ -1,7 +1,7 @@ drop database if exists `online_ddl`; create database `online_ddl`; use `online_ddl`; -create table gho_t2 (id bigint auto_increment, uid int, name varchar(80), info varchar(100), primary key (`id`), unique key(`uid`)) DEFAULT CHARSET=utf8mb4 COLLATE=utf8mb4_bin; -create table gho_t3 (id bigint auto_increment, uid int, name varchar(80), info varchar(100), primary key (`id`), unique key(`uid`)) DEFAULT CHARSET=utf8mb4 COLLATE=utf8mb4_bin; +create table gho_t2 (id bigint auto_increment, uid int, name varchar(80), info varchar(100), primary key (`id`), unique key(`uid`)) DEFAULT CHARSET=utf8mb4 COLLATE=utf8mb4_bin auto_increment=300000; +create table gho_t3 (id bigint auto_increment, uid int, name varchar(80), info varchar(100), primary key (`id`), unique key(`uid`)) DEFAULT CHARSET=utf8mb4 COLLATE=utf8mb4_bin auto_increment=400000; insert into gho_t2 (uid, name, info) values (40000, 'Remedios Moscote', '{}'), (40001, 'Amaranta', '{"age": 0}'); insert into gho_t3 (uid, name, info) values (30001, 'Aureliano José', '{}'), (30002, 'Santa Sofía de la Piedad', '{}'), (30003, '17 Aurelianos', NULL); diff --git a/dm/tests/online_ddl/data/pt.db1.increment.sql b/dm/tests/online_ddl/data/pt.db1.increment.sql index 19a97705b38..6b2a0ce51e2 100644 --- a/dm/tests/online_ddl/data/pt.db1.increment.sql +++ b/dm/tests/online_ddl/data/pt.db1.increment.sql @@ -2,15 +2,15 @@ use online_ddl; insert into pt_t1 (uid, name) values (10003, 'Buenos Aires'); update pt_t1 set name = 'Gabriel José de la Concordia García Márquez' where `uid` = 10001; update pt_t1 set name = 'One Hundred Years of Solitude' where name = 'Cien años de soledad'; -alter table pt_t1 add column age int after name; -alter table pt_t2 add column age int after name; +alter table pt_t1 add column age int; +alter table pt_t2 add column age int; alter table pt_t2 add key name (name); alter table pt_t1 add key name (name); insert into pt_t2 (uid, name, age, info) values (20004, 'Colonel Aureliano Buendía', 301, '{}'); -alter table pt_t2 add column info_json json GENERATED ALWAYS AS (`info`) VIRTUAL after name; +alter table pt_t2 add column info_json json GENERATED ALWAYS AS (`info`) VIRTUAL; insert into pt_t1 (uid, name, info) values (10004, 'Buenos Aires', '{"age": 10}'); insert into pt_t2 (uid, name, info) values (20005, 'Buenos Aires', '{"age": 100}'); insert into pt_t2 (uid, name, info) values (20006, 'Buenos Aires', '{"age": 1000}'); -alter table pt_t1 add column info_json json GENERATED ALWAYS AS (`info`) VIRTUAL after name; +alter table pt_t1 add column info_json json GENERATED ALWAYS AS (`info`) VIRTUAL; insert into pt_t1 (uid, name, info) values (10005, 'Buenos Aires', '{"age": 100}'); insert into pt_t2 (uid, name, info) values (20007, 'Buenos Aires', '{"age": 200}'); diff --git a/dm/tests/online_ddl/data/pt.db1.increment2.sql b/dm/tests/online_ddl/data/pt.db1.increment2.sql index 73a96305f54..3197f478b66 100644 --- a/dm/tests/online_ddl/data/pt.db1.increment2.sql +++ b/dm/tests/online_ddl/data/pt.db1.increment2.sql @@ -1,8 +1,8 @@ use online_ddl; insert into pt_t1 (uid, name, info) values (10006, 'name of 10006', '{"age": 10006}'); insert into pt_t2 (uid, name, info) values (20008, 'name of 20008', '{"age": 20008}'); -alter table pt_t1 add column address varchar(255) after name; -alter table pt_t2 add column address varchar(255) after name; +alter table pt_t1 add column address varchar(255); +alter table pt_t2 add column address varchar(255); alter table pt_t1 add key address (address); alter table pt_t2 add key address (address); insert into pt_t2 (uid, name, info, address) values (20009, 'name of 20009', '{"age": 20009}', 'address of 20009'); diff --git a/dm/tests/online_ddl/data/pt.db1.prepare.sql b/dm/tests/online_ddl/data/pt.db1.prepare.sql index 845572e35f9..be9d090c1d8 100644 --- a/dm/tests/online_ddl/data/pt.db1.prepare.sql +++ b/dm/tests/online_ddl/data/pt.db1.prepare.sql @@ -1,5 +1,5 @@ use `online_ddl`; -create table pt_t1 (id bigint auto_increment, uid int, name varchar(80), info varchar(100), primary key (`id`), unique key(`uid`)) DEFAULT CHARSET=utf8mb4 COLLATE=utf8mb4_bin; -create table pt_t2 (id bigint auto_increment, uid int, name varchar(80), info varchar(100), primary key (`id`), unique key(`uid`)) DEFAULT CHARSET=utf8mb4 COLLATE=utf8mb4_bin; +create table pt_t1 (id bigint auto_increment, uid int, name varchar(80), info varchar(100), primary key (`id`), unique key(`uid`)) DEFAULT CHARSET=utf8mb4 COLLATE=utf8mb4_bin auto_increment=100000; +create table pt_t2 (id bigint auto_increment, uid int, name varchar(80), info varchar(100), primary key (`id`), unique key(`uid`)) DEFAULT CHARSET=utf8mb4 COLLATE=utf8mb4_bin auto_increment=200000; insert into pt_t1 (uid, name) values (10001, 'Gabriel García Márquez'), (10002, 'Cien años de soledad'); insert into pt_t2 (uid, name) values (20001, 'José Arcadio Buendía'), (20002, 'Úrsula Iguarán'), (20003, 'José Arcadio'); diff --git a/dm/tests/online_ddl/data/pt.db2.increment.sql b/dm/tests/online_ddl/data/pt.db2.increment.sql index 04646923ee5..cf4a1cc0752 100644 --- a/dm/tests/online_ddl/data/pt.db2.increment.sql +++ b/dm/tests/online_ddl/data/pt.db2.increment.sql @@ -1,12 +1,12 @@ use online_ddl; delete from pt_t3 where name = 'Santa Sofía de la Piedad'; -alter table pt_t2 add column age int after name; +alter table pt_t2 add column age int; update pt_t2 set uid = uid + 10000; -alter table pt_t3 add column age int after name; +alter table pt_t3 add column age int; update pt_t3 set age = 1; alter table pt_t2 add key name (name); alter table pt_t3 add key name (name); -alter table pt_t2 add column info_json json GENERATED ALWAYS AS (`info`) VIRTUAL after name; +alter table pt_t2 add column info_json json GENERATED ALWAYS AS (`info`) VIRTUAL; update pt_t3 set age = age + 10; -alter table pt_t3 add column info_json json GENERATED ALWAYS AS (`info`) VIRTUAL after name; +alter table pt_t3 add column info_json json GENERATED ALWAYS AS (`info`) VIRTUAL; update pt_t2 set age = age + 10; diff --git a/dm/tests/online_ddl/data/pt.db2.increment2.sql b/dm/tests/online_ddl/data/pt.db2.increment2.sql index 2a4adf345e2..2fea0930568 100644 --- a/dm/tests/online_ddl/data/pt.db2.increment2.sql +++ b/dm/tests/online_ddl/data/pt.db2.increment2.sql @@ -1,8 +1,8 @@ use online_ddl; insert into pt_t3 (uid, name, info) values (30004, 'name of 30004', '{"age": 30004}'); insert into pt_t2 (uid, name, info) values (50002, 'name of 50002', '{"age": 50002}'); -alter table pt_t3 add column address varchar(255) after name; -alter table pt_t2 add column address varchar(255) after name; +alter table pt_t3 add column address varchar(255); +alter table pt_t2 add column address varchar(255); alter table pt_t2 add key address (address); alter table pt_t3 add key address (address); insert into pt_t2 (uid, name, info, address) values (50003, 'name of 50003', '{"age": 50003}', 'address of 50003'); diff --git a/dm/tests/online_ddl/data/pt.db2.prepare.sql b/dm/tests/online_ddl/data/pt.db2.prepare.sql index 4e76366f6c7..898ae34f4e9 100644 --- a/dm/tests/online_ddl/data/pt.db2.prepare.sql +++ b/dm/tests/online_ddl/data/pt.db2.prepare.sql @@ -1,5 +1,5 @@ use `online_ddl`; -create table pt_t2 (id bigint auto_increment, uid int, name varchar(80), info varchar(100), primary key (`id`), unique key(`uid`)) DEFAULT CHARSET=utf8mb4 COLLATE=utf8mb4_bin; -create table pt_t3 (id bigint auto_increment, uid int, name varchar(80), info varchar(100), primary key (`id`), unique key(`uid`)) DEFAULT CHARSET=utf8mb4 COLLATE=utf8mb4_bin; +create table pt_t2 (id bigint auto_increment, uid int, name varchar(80), info varchar(100), primary key (`id`), unique key(`uid`)) DEFAULT CHARSET=utf8mb4 COLLATE=utf8mb4_bin auto_increment=300000; +create table pt_t3 (id bigint auto_increment, uid int, name varchar(80), info varchar(100), primary key (`id`), unique key(`uid`)) DEFAULT CHARSET=utf8mb4 COLLATE=utf8mb4_bin auto_increment=400000; insert into pt_t2 (uid, name, info) values (40000, 'Remedios Moscote', '{}'), (40001, 'Amaranta', '{"age": 0}'); insert into pt_t3 (uid, name, info) values (30001, 'Aureliano José', '{}'), (30002, 'Santa Sofía de la Piedad', '{}'), (30003, '17 Aurelianos', NULL); diff --git a/dm/tests/online_ddl/run.sh b/dm/tests/online_ddl/run.sh index a2455759017..4991ccbb0a2 100755 --- a/dm/tests/online_ddl/run.sh +++ b/dm/tests/online_ddl/run.sh @@ -41,10 +41,6 @@ function run() { run_sql_file $cur/data/pt.db2.prepare.sql $MYSQL_HOST2 $MYSQL_PORT2 $MYSQL_PASSWORD2 check_contains 'Query OK, 3 rows affected' - run_sql_tidb "create database if not exists online_ddl;" - run_sql_tidb "create table online_ddl.pt_t_target (id bigint auto_increment, uid int, name varchar(80), info varchar(100), c_table varchar(255), c_source varchar(255), primary key (id, c_table, c_source), unique key(uid)) DEFAULT CHARSET=utf8mb4 COLLATE=utf8mb4_bin;" - run_sql_tidb "create table online_ddl.gho_t_target (id bigint auto_increment, uid int, name varchar(80), info varchar(100), c_table varchar(255), c_source varchar(255), primary key (id, c_table, c_source), unique key(uid)) DEFAULT CHARSET=utf8mb4 COLLATE=utf8mb4_bin;" - inject_points=( "github.com/pingcap/tiflow/dm/syncer/online-ddl-tools/ExitAfterSaveOnlineDDL=return()" "github.com/pingcap/tiflow/dm/syncer/ExitAfterSaveOnlineDDL=return()" From dfa235913c59429b9c9a40e6008e125508f3cea8 Mon Sep 17 00:00:00 2001 From: lance6716 Date: Thu, 22 Dec 2022 14:18:59 +0800 Subject: [PATCH 25/26] sync code Signed-off-by: lance6716 --- dm/tests/sequence_sharding_removemeta/data/db1.prepare.sql | 4 ++-- dm/tests/sequence_sharding_removemeta/data/db2.prepare.sql | 6 +++--- 2 files changed, 5 insertions(+), 5 deletions(-) diff --git a/dm/tests/sequence_sharding_removemeta/data/db1.prepare.sql b/dm/tests/sequence_sharding_removemeta/data/db1.prepare.sql index 98e635d6b46..144b4543ed0 100644 --- a/dm/tests/sequence_sharding_removemeta/data/db1.prepare.sql +++ b/dm/tests/sequence_sharding_removemeta/data/db1.prepare.sql @@ -1,7 +1,7 @@ drop database if exists `sharding_seq`; create database `sharding_seq`; use `sharding_seq`; -create table t1 (id bigint auto_increment,uid int,name varchar(20),primary key (`id`),unique key(`uid`)) DEFAULT CHARSET=utf8mb4 COLLATE=utf8mb4_bin; -create table t2 (id bigint auto_increment,uid int,name varchar(20),primary key (`id`),unique key(`uid`)) DEFAULT CHARSET=utf8mb4 COLLATE=utf8mb4_bin; +create table t1 (id bigint auto_increment,uid int,name varchar(20),primary key (`id`),unique key(`uid`)) DEFAULT CHARSET=utf8mb4 COLLATE=utf8mb4_bin auto_increment=10000; +create table t2 (id bigint auto_increment,uid int,name varchar(20),primary key (`id`),unique key(`uid`)) DEFAULT CHARSET=utf8mb4 COLLATE=utf8mb4_bin auto_increment=20000; insert into t1 (uid,name) values (100001,'igvApUx'),(100002,'qUyrcOBkwDK'); insert into t2 (uid,name) values (200001,'EycletJHetWHMfH'),(200002,'ytkIaCOwXnWmy'),(200003,'MWQeWw""''rNmtGxzGp'); diff --git a/dm/tests/sequence_sharding_removemeta/data/db2.prepare.sql b/dm/tests/sequence_sharding_removemeta/data/db2.prepare.sql index b06fe6e6b2b..159748aca21 100644 --- a/dm/tests/sequence_sharding_removemeta/data/db2.prepare.sql +++ b/dm/tests/sequence_sharding_removemeta/data/db2.prepare.sql @@ -1,9 +1,9 @@ drop database if exists `sharding_seq`; create database `sharding_seq`; use `sharding_seq`; -create table t2 (id bigint auto_increment,uid int,name varchar(20),primary key (`id`),unique key(`uid`)) DEFAULT CHARSET=utf8mb4 COLLATE=utf8mb4_bin; -create table t3 (id bigint auto_increment,uid int,name varchar(20),primary key (`id`),unique key(`uid`)) DEFAULT CHARSET=utf8mb4 COLLATE=utf8mb4_bin; -create table t4 (id bigint auto_increment,uid int,name varchar(20),primary key (`id`),unique key(`uid`)) DEFAULT CHARSET=utf8mb4 COLLATE=utf8mb4_bin; +create table t2 (id bigint auto_increment,uid int,name varchar(20),primary key (`id`),unique key(`uid`)) DEFAULT CHARSET=utf8mb4 COLLATE=utf8mb4_bin auto_increment=30000; +create table t3 (id bigint auto_increment,uid int,name varchar(20),primary key (`id`),unique key(`uid`)) DEFAULT CHARSET=utf8mb4 COLLATE=utf8mb4_bin auto_increment=40000; +create table t4 (id bigint auto_increment,uid int,name varchar(20),primary key (`id`),unique key(`uid`)) DEFAULT CHARSET=utf8mb4 COLLATE=utf8mb4_bin auto_increment=50000; insert into t2 (uid,name) values (300001,'io'),(300002,'xOKvsDsofmAzEF'); insert into t3 (uid,name) values (400001,'eXcRSo'),(400002,'QOP'),(400003,'DUotcCayM'); insert into t4 (uid,name) values (500001,'`id` = 15'),(500002,'942032497'),(500003,'UrhcHUbwsDMZrvJxM'); From 5fd3e733aafb70d9ef90c29ada825d883f5fc7fd Mon Sep 17 00:00:00 2001 From: lance6716 Date: Fri, 23 Dec 2022 15:16:17 +0800 Subject: [PATCH 26/26] fix error code --- dm/_utils/terror_gen/errors_release.txt | 2 +- dm/errors.toml | 10 +- dm/pkg/terror/errcode_string.go | 1195 +++++++++++++++++++++++ 3 files changed, 1202 insertions(+), 5 deletions(-) create mode 100644 dm/pkg/terror/errcode_string.go diff --git a/dm/_utils/terror_gen/errors_release.txt b/dm/_utils/terror_gen/errors_release.txt index 8040eb9fc62..d4399a4a73e 100644 --- a/dm/_utils/terror_gen/errors_release.txt +++ b/dm/_utils/terror_gen/errors_release.txt @@ -195,8 +195,8 @@ ErrConfigLoaderS3NotSupport,[code=20059:class=config:scope=internal:level=high], ErrConfigInvalidSafeModeDuration,[code=20060:class=config:scope=internal:level=medium], "Message: safe-mode-duration '%s' parsed failed: %v, Workaround: Please check the `safe-mode-duration` is correct." ErrConfigConfictSafeModeDurationAndSafeMode,[code=20061:class=config:scope=internal:level=low], "Message: safe-mode(true) conflicts with safe-mode-duration(0s), Workaround: Please set safe-mode to false or safe-mode-duration to non-zero." ErrConfigInvalidPhysicalDuplicateResolution,[code=20062:class=config:scope=internal:level=medium], "Message: invalid load on-duplicate-physical option '%s', Workaround: Please choose a valid value in ['none', 'manual'] or leave it empty." -ErrConfigColumnMappingDeprecated,[code=20063:class=config:scope=internal:level=high], "Message: column-mapping is not supported since v6.6.0, Workaround: Please use extract-table/extract-schema/extract-source to handle data conflict when merge tables. See https://docs.pingcap.com/tidb/v6.4/task-configuration-file-full#task-configuration-file-template-advanced" ErrConfigInvalidPhysicalChecksum,[code=20063:class=config:scope=internal:level=medium], "Message: invalid load checksum-physical option '%s', Workaround: Please choose a valid value in ['required', 'optional', 'off'] or leave it empty." +ErrConfigColumnMappingDeprecated,[code=20064:class=config:scope=internal:level=high], "Message: column-mapping is not supported since v6.6.0, Workaround: Please use extract-table/extract-schema/extract-source to handle data conflict when merge tables. See https://docs.pingcap.com/tidb/v6.4/task-configuration-file-full#task-configuration-file-template-advanced" ErrBinlogExtractPosition,[code=22001:class=binlog-op:scope=internal:level=high] ErrBinlogInvalidFilename,[code=22002:class=binlog-op:scope=internal:level=high], "Message: invalid binlog filename" ErrBinlogParsePosFromStr,[code=22003:class=binlog-op:scope=internal:level=high] diff --git a/dm/errors.toml b/dm/errors.toml index 85edcc18896..52f57a9b3a9 100644 --- a/dm/errors.toml +++ b/dm/errors.toml @@ -1187,15 +1187,17 @@ workaround = "Please choose a valid value in ['none', 'manual'] or leave it empt tags = ["internal", "medium"] [error.DM-config-20063] -message = "column-mapping is not supported since v6.6.0" -description = "" -workaround = "Please use extract-table/extract-schema/extract-source to handle data conflict when merge tables. See https://docs.pingcap.com/tidb/v6.4/task-configuration-file-full#task-configuration-file-template-advanced" -tags = ["internal", "high"] message = "invalid load checksum-physical option '%s'" description = "" workaround = "Please choose a valid value in ['required', 'optional', 'off'] or leave it empty." tags = ["internal", "medium"] +[error.DM-config-20064] +message = "column-mapping is not supported since v6.6.0" +description = "" +workaround = "Please use extract-table/extract-schema/extract-source to handle data conflict when merge tables. See https://docs.pingcap.com/tidb/v6.4/task-configuration-file-full#task-configuration-file-template-advanced" +tags = ["internal", "high"] + [error.DM-binlog-op-22001] message = "" description = "" diff --git a/dm/pkg/terror/errcode_string.go b/dm/pkg/terror/errcode_string.go new file mode 100644 index 00000000000..a119a14049e --- /dev/null +++ b/dm/pkg/terror/errcode_string.go @@ -0,0 +1,1195 @@ +// Code generated by "stringer -type=ErrCode -trimprefix=code"; DO NOT EDIT. + +package terror + +import "strconv" + +func _() { + // An "invalid array index" compiler error signifies that the constant values have changed. + // Re-run the stringer command to generate them again. + var x [1]struct{} + _ = x[codeDBDriverError-10001] + _ = x[codeDBBadConn-10002] + _ = x[codeDBInvalidConn-10003] + _ = x[codeDBUnExpect-10004] + _ = x[codeDBQueryFailed-10005] + _ = x[codeDBExecuteFailed-10006] + _ = x[codeParseMydumperMeta-11001] + _ = x[codeGetFileSize-11002] + _ = x[codeDropMultipleTables-11003] + _ = x[codeRenameMultipleTables-11004] + _ = x[codeAlterMultipleTables-11005] + _ = x[codeParseSQL-11006] + _ = x[codeUnknownTypeDDL-11007] + _ = x[codeRestoreASTNode-11008] + _ = x[codeParseGTID-11009] + _ = x[codeNotSupportedFlavor-11010] + _ = x[codeNotMySQLGTID-11011] + _ = x[codeNotMariaDBGTID-11012] + _ = x[codeNotUUIDString-11013] + _ = x[codeMariaDBDomainID-11014] + _ = x[codeInvalidServerID-11015] + _ = x[codeGetSQLModeFromStr-11016] + _ = x[codeVerifySQLOperateArgs-11017] + _ = x[codeStatFileSize-11018] + _ = x[codeReaderAlreadyRunning-11019] + _ = x[codeReaderAlreadyStarted-11020] + _ = x[codeReaderStateCannotClose-11021] + _ = x[codeReaderShouldStartSync-11022] + _ = x[codeEmptyRelayDir-11023] + _ = x[codeReadDir-11024] + _ = x[codeBaseFileNotFound-11025] + _ = x[codeBinFileCmpCondNotSupport-11026] + _ = x[codeBinlogFileNotValid-11027] + _ = x[codeBinlogFilesNotFound-11028] + _ = x[codeGetRelayLogStat-11029] + _ = x[codeAddWatchForRelayLogDir-11030] + _ = x[codeWatcherStart-11031] + _ = x[codeWatcherChanClosed-11032] + _ = x[codeWatcherChanRecvError-11033] + _ = x[codeRelayLogFileSizeSmaller-11034] + _ = x[codeBinlogFileNotSpecified-11035] + _ = x[codeNoRelayLogMatchPos-11036] + _ = x[codeFirstRelayLogNotMatchPos-11037] + _ = x[codeParserParseRelayLog-11038] + _ = x[codeNoSubdirToSwitch-11039] + _ = x[codeNeedSyncAgain-11040] + _ = x[codeSyncClosed-11041] + _ = x[codeSchemaTableNameNotValid-11042] + _ = x[codeGenTableRouter-11043] + _ = x[codeEncryptSecretKeyNotValid-11044] + _ = x[codeEncryptGenCipher-11045] + _ = x[codeEncryptGenIV-11046] + _ = x[codeCiphertextLenNotValid-11047] + _ = x[codeCiphertextContextNotValid-11048] + _ = x[codeInvalidBinlogPosStr-11049] + _ = x[codeEncCipherTextBase64Decode-11050] + _ = x[codeBinlogWriteBinaryData-11051] + _ = x[codeBinlogWriteDataToBuffer-11052] + _ = x[codeBinlogHeaderLengthNotValid-11053] + _ = x[codeBinlogEventDecode-11054] + _ = x[codeBinlogEmptyNextBinName-11055] + _ = x[codeBinlogParseSID-11056] + _ = x[codeBinlogEmptyGTID-11057] + _ = x[codeBinlogGTIDSetNotValid-11058] + _ = x[codeBinlogGTIDMySQLNotValid-11059] + _ = x[codeBinlogGTIDMariaDBNotValid-11060] + _ = x[codeBinlogMariaDBServerIDMismatch-11061] + _ = x[codeBinlogOnlyOneGTIDSupport-11062] + _ = x[codeBinlogOnlyOneIntervalInUUID-11063] + _ = x[codeBinlogIntervalValueNotValid-11064] + _ = x[codeBinlogEmptyQuery-11065] + _ = x[codeBinlogTableMapEvNotValid-11066] + _ = x[codeBinlogExpectFormatDescEv-11067] + _ = x[codeBinlogExpectTableMapEv-11068] + _ = x[codeBinlogExpectRowsEv-11069] + _ = x[codeBinlogUnexpectedEv-11070] + _ = x[codeBinlogParseSingleEv-11071] + _ = x[codeBinlogEventTypeNotValid-11072] + _ = x[codeBinlogEventNoRows-11073] + _ = x[codeBinlogEventNoColumns-11074] + _ = x[codeBinlogEventRowLengthNotEq-11075] + _ = x[codeBinlogColumnTypeNotSupport-11076] + _ = x[codeBinlogGoMySQLTypeNotSupport-11077] + _ = x[codeBinlogColumnTypeMisMatch-11078] + _ = x[codeBinlogDummyEvSizeTooSmall-11079] + _ = x[codeBinlogFlavorNotSupport-11080] + _ = x[codeBinlogDMLEmptyData-11081] + _ = x[codeBinlogLatestGTIDNotInPrev-11082] + _ = x[codeBinlogReadFileByGTID-11083] + _ = x[codeBinlogWriterNotStateNew-11084] + _ = x[codeBinlogWriterStateCannotClose-11085] + _ = x[codeBinlogWriterNeedStart-11086] + _ = x[codeBinlogWriterOpenFile-11087] + _ = x[codeBinlogWriterGetFileStat-11088] + _ = x[codeBinlogWriterWriteDataLen-11089] + _ = x[codeBinlogWriterFileNotOpened-11090] + _ = x[codeBinlogWriterFileSync-11091] + _ = x[codeBinlogPrevGTIDEvNotValid-11092] + _ = x[codeBinlogDecodeMySQLGTIDSet-11093] + _ = x[codeBinlogNeedMariaDBGTIDSet-11094] + _ = x[codeBinlogParseMariaDBGTIDSet-11095] + _ = x[codeBinlogMariaDBAddGTIDSet-11096] + _ = x[codeTracingEventDataNotValid-11097] + _ = x[codeTracingUploadData-11098] + _ = x[codeTracingEventTypeNotValid-11099] + _ = x[codeTracingGetTraceCode-11100] + _ = x[codeTracingDataChecksum-11101] + _ = x[codeTracingGetTSO-11102] + _ = x[codeBackoffArgsNotValid-11103] + _ = x[codeInitLoggerFail-11104] + _ = x[codeGTIDTruncateInvalid-11105] + _ = x[codeRelayLogGivenPosTooBig-11106] + _ = x[codeElectionCampaignFail-11107] + _ = x[codeElectionGetLeaderIDFail-11108] + _ = x[codeBinlogInvalidFilenameWithUUIDSuffix-11109] + _ = x[codeDecodeEtcdKeyFail-11110] + _ = x[codeShardDDLOptimismTrySyncFail-11111] + _ = x[codeConnInvalidTLSConfig-11112] + _ = x[codeConnRegistryTLSConfig-11113] + _ = x[codeUpgradeVersionEtcdFail-11114] + _ = x[codeInvalidV1WorkerMetaPath-11115] + _ = x[codeFailUpdateV1DBSchema-11116] + _ = x[codeBinlogStatusVarsParse-11117] + _ = x[codeVerifyHandleErrorArgs-11118] + _ = x[codeRewriteSQL-11119] + _ = x[codeNoUUIDDirMatchGTID-11120] + _ = x[codeNoRelayPosMatchGTID-11121] + _ = x[codeReaderReachEndOfFile-11122] + _ = x[codeMetadataNoBinlogLoc-11123] + _ = x[codePreviousGTIDNotExist-11124] + _ = x[codeNoMasterStatus-11125] + _ = x[codeBinlogNotLogColumn-11126] + _ = x[codeShardDDLOptimismNeedSkipAndRedirect-11127] + _ = x[codeShardDDLOptimismAddNotFullyDroppedColumn-11128] + _ = x[codeSyncerCancelledDDL-11129] + _ = x[codeIncorrectReturnColumnsNum-11130] + _ = x[codeConfigCheckItemNotSupport-20001] + _ = x[codeConfigTomlTransform-20002] + _ = x[codeConfigYamlTransform-20003] + _ = x[codeConfigTaskNameEmpty-20004] + _ = x[codeConfigEmptySourceID-20005] + _ = x[codeConfigTooLongSourceID-20006] + _ = x[codeConfigOnlineSchemeNotSupport-20007] + _ = x[codeConfigInvalidTimezone-20008] + _ = x[codeConfigParseFlagSet-20009] + _ = x[codeConfigDecryptDBPassword-20010] + _ = x[codeConfigMetaInvalid-20011] + _ = x[codeConfigMySQLInstNotFound-20012] + _ = x[codeConfigMySQLInstsAtLeastOne-20013] + _ = x[codeConfigMySQLInstSameSourceID-20014] + _ = x[codeConfigMydumperCfgConflict-20015] + _ = x[codeConfigLoaderCfgConflict-20016] + _ = x[codeConfigSyncerCfgConflict-20017] + _ = x[codeConfigReadCfgFromFile-20018] + _ = x[codeConfigNeedUniqueTaskName-20019] + _ = x[codeConfigInvalidTaskMode-20020] + _ = x[codeConfigNeedTargetDB-20021] + _ = x[codeConfigMetadataNotSet-20022] + _ = x[codeConfigRouteRuleNotFound-20023] + _ = x[codeConfigFilterRuleNotFound-20024] + _ = x[codeConfigColumnMappingNotFound-20025] + _ = x[codeConfigBAListNotFound-20026] + _ = x[codeConfigMydumperCfgNotFound-20027] + _ = x[codeConfigMydumperPathNotValid-20028] + _ = x[codeConfigLoaderCfgNotFound-20029] + _ = x[codeConfigSyncerCfgNotFound-20030] + _ = x[codeConfigSourceIDNotFound-20031] + _ = x[codeConfigDuplicateCfgItem-20032] + _ = x[codeConfigShardModeNotSupport-20033] + _ = x[codeConfigMoreThanOne-20034] + _ = x[codeConfigEtcdParse-20035] + _ = x[codeConfigMissingForBound-20036] + _ = x[codeConfigBinlogEventFilter-20037] + _ = x[codeConfigGlobalConfigsUnused-20038] + _ = x[codeConfigExprFilterManyExpr-20039] + _ = x[codeConfigExprFilterNotFound-20040] + _ = x[codeConfigExprFilterWrongGrammar-20041] + _ = x[codeConfigExprFilterEmptyName-20042] + _ = x[codeConfigCheckerMaxTooSmall-20043] + _ = x[codeConfigGenBAList-20044] + _ = x[codeConfigGenTableRouter-20045] + _ = x[codeConfigGenColumnMapping-20046] + _ = x[codeConfigInvalidChunkFileSize-20047] + _ = x[codeConfigOnlineDDLInvalidRegex-20048] + _ = x[codeConfigOnlineDDLMistakeRegex-20049] + _ = x[codeConfigOpenAPITaskConfigExist-20050] + _ = x[codeConfigOpenAPITaskConfigNotExist-20051] + _ = x[codeCollationCompatibleNotSupport-20052] + _ = x[codeConfigInvalidLoadMode-20053] + _ = x[codeConfigInvalidLoadDuplicateResolution-20054] + _ = x[codeConfigValidationMode-20055] + _ = x[codeContinuousValidatorCfgNotFound-20056] + _ = x[codeConfigStartTimeTooLate-20057] + _ = x[codeConfigLoaderDirInvalid-20058] + _ = x[codeConfigLoaderS3NotSupport-20059] + _ = x[codeConfigInvalidSafeModeDuration-20060] + _ = x[codeConfigConfictSafeModeDurationAndSafeMode-20061] + _ = x[codeConfigInvalidLoadPhysicalDuplicateResolution-20062] + _ = x[codeConfigInvalidLoadPhysicalChecksum-20063] + _ = x[codeConfigColumnMappingDeprecated-20064] + _ = x[codeBinlogExtractPosition-22001] + _ = x[codeBinlogInvalidFilename-22002] + _ = x[codeBinlogParsePosFromStr-22003] + _ = x[codeCheckpointInvalidTaskMode-24001] + _ = x[codeCheckpointSaveInvalidPos-24002] + _ = x[codeCheckpointInvalidTableFile-24003] + _ = x[codeCheckpointDBNotExistInFile-24004] + _ = x[codeCheckpointTableNotExistInFile-24005] + _ = x[codeCheckpointRestoreCountGreater-24006] + _ = x[codeTaskCheckSameTableName-26001] + _ = x[codeTaskCheckFailedOpenDB-26002] + _ = x[codeTaskCheckGenTableRouter-26003] + _ = x[codeTaskCheckGenColumnMapping-26004] + _ = x[codeTaskCheckSyncConfigError-26005] + _ = x[codeTaskCheckGenBAList-26006] + _ = x[codeSourceCheckGTID-26007] + _ = x[codeRelayParseUUIDIndex-28001] + _ = x[codeRelayParseUUIDSuffix-28002] + _ = x[codeRelayUUIDWithSuffixNotFound-28003] + _ = x[codeRelayGenFakeRotateEvent-28004] + _ = x[codeRelayNoValidRelaySubDir-28005] + _ = x[codeRelayUUIDSuffixNotValid-30001] + _ = x[codeRelayUUIDSuffixLessThanPrev-30002] + _ = x[codeRelayLoadMetaData-30003] + _ = x[codeRelayBinlogNameNotValid-30004] + _ = x[codeRelayNoCurrentUUID-30005] + _ = x[codeRelayFlushLocalMeta-30006] + _ = x[codeRelayUpdateIndexFile-30007] + _ = x[codeRelayLogDirpathEmpty-30008] + _ = x[codeRelayReaderNotStateNew-30009] + _ = x[codeRelayReaderStateCannotClose-30010] + _ = x[codeRelayReaderNeedStart-30011] + _ = x[codeRelayTCPReaderStartSync-30012] + _ = x[codeRelayTCPReaderNilGTID-30013] + _ = x[codeRelayTCPReaderStartSyncGTID-30014] + _ = x[codeRelayTCPReaderGetEvent-30015] + _ = x[codeRelayWriterNotStateNew-30016] + _ = x[codeRelayWriterStateCannotClose-30017] + _ = x[codeRelayWriterNeedStart-30018] + _ = x[codeRelayWriterNotOpened-30019] + _ = x[codeRelayWriterExpectRotateEv-30020] + _ = x[codeRelayWriterRotateEvWithNoWriter-30021] + _ = x[codeRelayWriterStatusNotValid-30022] + _ = x[codeRelayWriterGetFileStat-30023] + _ = x[codeRelayWriterLatestPosGTFileSize-30024] + _ = x[codeRelayWriterFileOperate-30025] + _ = x[codeRelayCheckBinlogFileHeaderExist-30026] + _ = x[codeRelayCheckFormatDescEventExist-30027] + _ = x[codeRelayCheckFormatDescEventParseEv-30028] + _ = x[codeRelayCheckIsDuplicateEvent-30029] + _ = x[codeRelayUpdateGTID-30030] + _ = x[codeRelayNeedPrevGTIDEvBeforeGTIDEv-30031] + _ = x[codeRelayNeedMaGTIDListEvBeforeGTIDEv-30032] + _ = x[codeRelayMkdir-30033] + _ = x[codeRelaySwitchMasterNeedGTID-30034] + _ = x[codeRelayThisStrategyIsPurging-30035] + _ = x[codeRelayOtherStrategyIsPurging-30036] + _ = x[codeRelayPurgeIsForbidden-30037] + _ = x[codeRelayNoActiveRelayLog-30038] + _ = x[codeRelayPurgeRequestNotValid-30039] + _ = x[codeRelayTrimUUIDNotFound-30040] + _ = x[codeRelayRemoveFileFail-30041] + _ = x[codeRelayPurgeArgsNotValid-30042] + _ = x[codePreviousGTIDsNotValid-30043] + _ = x[codeRotateEventWithDifferentServerID-30044] + _ = x[codeDumpUnitRuntime-32001] + _ = x[codeDumpUnitGenTableRouter-32002] + _ = x[codeDumpUnitGenBAList-32003] + _ = x[codeDumpUnitGlobalLock-32004] + _ = x[codeLoadUnitCreateSchemaFile-34001] + _ = x[codeLoadUnitInvalidFileEnding-34002] + _ = x[codeLoadUnitParseQuoteValues-34003] + _ = x[codeLoadUnitDoColumnMapping-34004] + _ = x[codeLoadUnitReadSchemaFile-34005] + _ = x[codeLoadUnitParseStatement-34006] + _ = x[codeLoadUnitNotCreateTable-34007] + _ = x[codeLoadUnitDispatchSQLFromFile-34008] + _ = x[codeLoadUnitInvalidInsertSQL-34009] + _ = x[codeLoadUnitGenTableRouter-34010] + _ = x[codeLoadUnitGenColumnMapping-34011] + _ = x[codeLoadUnitNoDBFile-34012] + _ = x[codeLoadUnitNoTableFile-34013] + _ = x[codeLoadUnitDumpDirNotFound-34014] + _ = x[codeLoadUnitDuplicateTableFile-34015] + _ = x[codeLoadUnitGenBAList-34016] + _ = x[codeLoadTaskWorkerNotMatch-34017] + _ = x[codeLoadCheckPointNotMatch-34018] + _ = x[codeLoadLightningRuntime-34019] + _ = x[codeLoadLightningHasDup-34020] + _ = x[codeLoadLightningChecksum-34021] + _ = x[codeSyncerUnitPanic-36001] + _ = x[codeSyncUnitInvalidTableName-36002] + _ = x[codeSyncUnitTableNameQuery-36003] + _ = x[codeSyncUnitNotSupportedDML-36004] + _ = x[codeSyncUnitAddTableInSharding-36005] + _ = x[codeSyncUnitDropSchemaTableInSharding-36006] + _ = x[codeSyncUnitInvalidShardMeta-36007] + _ = x[codeSyncUnitDDLWrongSequence-36008] + _ = x[codeSyncUnitDDLActiveIndexLarger-36009] + _ = x[codeSyncUnitDupTableGroup-36010] + _ = x[codeSyncUnitShardingGroupNotFound-36011] + _ = x[codeSyncUnitSafeModeSetCount-36012] + _ = x[codeSyncUnitCausalityConflict-36013] + _ = x[codeSyncUnitDMLStatementFound-36014] + _ = x[codeSyncerUnitBinlogEventFilter-36015] + _ = x[codeSyncerUnitInvalidReplicaEvent-36016] + _ = x[codeSyncerUnitParseStmt-36017] + _ = x[codeSyncerUnitUUIDNotLatest-36018] + _ = x[codeSyncerUnitDDLExecChanCloseOrBusy-36019] + _ = x[codeSyncerUnitDDLChanDone-36020] + _ = x[codeSyncerUnitDDLChanCanceled-36021] + _ = x[codeSyncerUnitDDLOnMultipleTable-36022] + _ = x[codeSyncerUnitInjectDDLOnly-36023] + _ = x[codeSyncerUnitInjectDDLWithoutSchema-36024] + _ = x[codeSyncerUnitNotSupportedOperate-36025] + _ = x[codeSyncerUnitNilOperatorReq-36026] + _ = x[codeSyncerUnitDMLColumnNotMatch-36027] + _ = x[codeSyncerUnitDMLOldNewValueMismatch-36028] + _ = x[codeSyncerUnitDMLPruneColumnMismatch-36029] + _ = x[codeSyncerUnitGenBinlogEventFilter-36030] + _ = x[codeSyncerUnitGenTableRouter-36031] + _ = x[codeSyncerUnitGenColumnMapping-36032] + _ = x[codeSyncerUnitDoColumnMapping-36033] + _ = x[codeSyncerUnitCacheKeyNotFound-36034] + _ = x[codeSyncerUnitHeartbeatCheckConfig-36035] + _ = x[codeSyncerUnitHeartbeatRecordExists-36036] + _ = x[codeSyncerUnitHeartbeatRecordNotFound-36037] + _ = x[codeSyncerUnitHeartbeatRecordNotValid-36038] + _ = x[codeSyncerUnitOnlineDDLInvalidMeta-36039] + _ = x[codeSyncerUnitOnlineDDLSchemeNotSupport-36040] + _ = x[codeSyncerUnitOnlineDDLOnMultipleTable-36041] + _ = x[codeSyncerUnitGhostApplyEmptyTable-36042] + _ = x[codeSyncerUnitGhostRenameTableNotValid-36043] + _ = x[codeSyncerUnitGhostRenameToGhostTable-36044] + _ = x[codeSyncerUnitGhostRenameGhostTblToOther-36045] + _ = x[codeSyncerUnitGhostOnlineDDLOnGhostTbl-36046] + _ = x[codeSyncerUnitPTApplyEmptyTable-36047] + _ = x[codeSyncerUnitPTRenameTableNotValid-36048] + _ = x[codeSyncerUnitPTRenameToPTTable-36049] + _ = x[codeSyncerUnitPTRenamePTTblToOther-36050] + _ = x[codeSyncerUnitPTOnlineDDLOnPTTbl-36051] + _ = x[codeSyncerUnitRemoteSteamerWithGTID-36052] + _ = x[codeSyncerUnitRemoteSteamerStartSync-36053] + _ = x[codeSyncerUnitGetTableFromDB-36054] + _ = x[codeSyncerUnitFirstEndPosNotFound-36055] + _ = x[codeSyncerUnitResolveCasualityFail-36056] + _ = x[codeSyncerUnitReopenStreamNotSupport-36057] + _ = x[codeSyncerUnitUpdateConfigInSharding-36058] + _ = x[codeSyncerUnitExecWithNoBlockingDDL-36059] + _ = x[codeSyncerUnitGenBAList-36060] + _ = x[codeSyncerUnitHandleDDLFailed-36061] + _ = x[codeSyncerShardDDLConflict-36062] + _ = x[codeSyncerFailpoint-36063] + _ = x[codeSyncerEvent-36064] + _ = x[codeSyncerOperatorNotExist-36065] + _ = x[codeSyncerEventNotExist-36066] + _ = x[codeSyncerParseDDL-36067] + _ = x[codeSyncerUnsupportedStmt-36068] + _ = x[codeSyncerGetEvent-36069] + _ = x[codeSyncerDownstreamTableNotFound-36070] + _ = x[codeSyncerReprocessWithSafeModeFail-36071] + _ = x[codeMasterSQLOpNilRequest-38001] + _ = x[codeMasterSQLOpNotSupport-38002] + _ = x[codeMasterSQLOpWithoutSharding-38003] + _ = x[codeMasterGRPCCreateConn-38004] + _ = x[codeMasterGRPCSendOnCloseConn-38005] + _ = x[codeMasterGRPCClientClose-38006] + _ = x[codeMasterGRPCInvalidReqType-38007] + _ = x[codeMasterGRPCRequestError-38008] + _ = x[codeMasterDeployMapperVerify-38009] + _ = x[codeMasterConfigParseFlagSet-38010] + _ = x[codeMasterConfigUnknownItem-38011] + _ = x[codeMasterConfigInvalidFlag-38012] + _ = x[codeMasterConfigTomlTransform-38013] + _ = x[codeMasterConfigTimeoutParse-38014] + _ = x[codeMasterConfigUpdateCfgFile-38015] + _ = x[codeMasterShardingDDLDiff-38016] + _ = x[codeMasterStartService-38017] + _ = x[codeMasterNoEmitToken-38018] + _ = x[codeMasterLockNotFound-38019] + _ = x[codeMasterLockIsResolving-38020] + _ = x[codeMasterWorkerCliNotFound-38021] + _ = x[codeMasterWorkerNotWaitLock-38022] + _ = x[codeMasterHandleSQLReqFail-38023] + _ = x[codeMasterOwnerExecDDL-38024] + _ = x[codeMasterPartWorkerExecDDLFail-38025] + _ = x[codeMasterWorkerExistDDLLock-38026] + _ = x[codeMasterGetWorkerCfgExtractor-38027] + _ = x[codeMasterTaskConfigExtractor-38028] + _ = x[codeMasterWorkerArgsExtractor-38029] + _ = x[codeMasterQueryWorkerConfig-38030] + _ = x[codeMasterOperNotFound-38031] + _ = x[codeMasterOperRespNotSuccess-38032] + _ = x[codeMasterOperRequestTimeout-38033] + _ = x[codeMasterHandleHTTPApis-38034] + _ = x[codeMasterHostPortNotValid-38035] + _ = x[codeMasterGetHostnameFail-38036] + _ = x[codeMasterGenEmbedEtcdConfigFail-38037] + _ = x[codeMasterStartEmbedEtcdFail-38038] + _ = x[codeMasterParseURLFail-38039] + _ = x[codeMasterJoinEmbedEtcdFail-38040] + _ = x[codeMasterInvalidOperateOp-38041] + _ = x[codeMasterAdvertiseAddrNotValid-38042] + _ = x[codeMasterRequestIsNotForwardToLeader-38043] + _ = x[codeMasterIsNotAsyncRequest-38044] + _ = x[codeMasterFailToGetExpectResult-38045] + _ = x[codeMasterPessimistNotStarted-38046] + _ = x[codeMasterOptimistNotStarted-38047] + _ = x[codeMasterMasterNameNotExist-38048] + _ = x[codeMasterInvalidOfflineType-38049] + _ = x[codeMasterAdvertisePeerURLsNotValid-38050] + _ = x[codeMasterTLSConfigNotValid-38051] + _ = x[codeMasterBoundChanging-38052] + _ = x[codeMasterFailToImportFromV10x-38053] + _ = x[codeMasterInconsistentOptimistDDLsAndInfo-38054] + _ = x[codeMasterOptimisticTableInfobeforeNotExist-38055] + _ = x[codeMasterOptimisticDownstreamMetaNotFound-38056] + _ = x[codeMasterInvalidClusterID-38057] + _ = x[codeMasterStartTask-38058] + _ = x[codeWorkerParseFlagSet-40001] + _ = x[codeWorkerInvalidFlag-40002] + _ = x[codeWorkerDecodeConfigFromFile-40003] + _ = x[codeWorkerUndecodedItemFromFile-40004] + _ = x[codeWorkerNeedSourceID-40005] + _ = x[codeWorkerTooLongSourceID-40006] + _ = x[codeWorkerRelayBinlogName-40007] + _ = x[codeWorkerWriteConfigFile-40008] + _ = x[codeWorkerLogInvalidHandler-40009] + _ = x[codeWorkerLogPointerInvalid-40010] + _ = x[codeWorkerLogFetchPointer-40011] + _ = x[codeWorkerLogUnmarshalPointer-40012] + _ = x[codeWorkerLogClearPointer-40013] + _ = x[codeWorkerLogTaskKeyNotValid-40014] + _ = x[codeWorkerLogUnmarshalTaskKey-40015] + _ = x[codeWorkerLogFetchLogIter-40016] + _ = x[codeWorkerLogGetTaskLog-40017] + _ = x[codeWorkerLogUnmarshalBinary-40018] + _ = x[codeWorkerLogForwardPointer-40019] + _ = x[codeWorkerLogMarshalTask-40020] + _ = x[codeWorkerLogSaveTask-40021] + _ = x[codeWorkerLogDeleteKV-40022] + _ = x[codeWorkerLogDeleteKVIter-40023] + _ = x[codeWorkerLogUnmarshalTaskMeta-40024] + _ = x[codeWorkerLogFetchTaskFromMeta-40025] + _ = x[codeWorkerLogVerifyTaskMeta-40026] + _ = x[codeWorkerLogSaveTaskMeta-40027] + _ = x[codeWorkerLogGetTaskMeta-40028] + _ = x[codeWorkerLogDeleteTaskMeta-40029] + _ = x[codeWorkerMetaTomlTransform-40030] + _ = x[codeWorkerMetaOldFileStat-40031] + _ = x[codeWorkerMetaOldReadFile-40032] + _ = x[codeWorkerMetaEncodeTask-40033] + _ = x[codeWorkerMetaRemoveOldDir-40034] + _ = x[codeWorkerMetaTaskLogNotFound-40035] + _ = x[codeWorkerMetaHandleTaskOrder-40036] + _ = x[codeWorkerMetaOpenTxn-40037] + _ = x[codeWorkerMetaCommitTxn-40038] + _ = x[codeWorkerRelayStageNotValid-40039] + _ = x[codeWorkerRelayOperNotSupport-40040] + _ = x[codeWorkerOpenKVDBFile-40041] + _ = x[codeWorkerUpgradeCheckKVDir-40042] + _ = x[codeWorkerMarshalVerBinary-40043] + _ = x[codeWorkerUnmarshalVerBinary-40044] + _ = x[codeWorkerGetVersionFromKV-40045] + _ = x[codeWorkerSaveVersionToKV-40046] + _ = x[codeWorkerVerAutoDowngrade-40047] + _ = x[codeWorkerStartService-40048] + _ = x[codeWorkerAlreadyClosed-40049] + _ = x[codeWorkerNotRunningStage-40050] + _ = x[codeWorkerNotPausedStage-40051] + _ = x[codeWorkerUpdateTaskStage-40052] + _ = x[codeWorkerMigrateStopRelay-40053] + _ = x[codeWorkerSubTaskNotFound-40054] + _ = x[codeWorkerSubTaskExists-40055] + _ = x[codeWorkerOperSyncUnitOnly-40056] + _ = x[codeWorkerRelayUnitStage-40057] + _ = x[codeWorkerNoSyncerRunning-40058] + _ = x[codeWorkerCannotUpdateSourceID-40059] + _ = x[codeWorkerNoAvailUnits-40060] + _ = x[codeWorkerDDLLockInfoNotFound-40061] + _ = x[codeWorkerDDLLockInfoExists-40062] + _ = x[codeWorkerCacheDDLInfoExists-40063] + _ = x[codeWorkerExecSkipDDLConflict-40064] + _ = x[codeWorkerExecDDLSyncerOnly-40065] + _ = x[codeWorkerExecDDLTimeout-40066] + _ = x[codeWorkerWaitRelayCatchupTimeout-40067] + _ = x[codeWorkerRelayIsPurging-40068] + _ = x[codeWorkerHostPortNotValid-40069] + _ = x[codeWorkerNoStart-40070] + _ = x[codeWorkerAlreadyStarted-40071] + _ = x[codeWorkerSourceNotMatch-40072] + _ = x[codeWorkerFailToGetSubtaskConfigFromEtcd-40073] + _ = x[codeWorkerFailToGetSourceConfigFromEtcd-40074] + _ = x[codeWorkerDDLLockOpNotFound-40075] + _ = x[codeWorkerTLSConfigNotValid-40076] + _ = x[codeWorkerFailConnectMaster-40077] + _ = x[codeWorkerWaitRelayCatchupGTID-40078] + _ = x[codeWorkerRelayConfigChanging-40079] + _ = x[codeWorkerRouteTableDupMatch-40080] + _ = x[codeWorkerUpdateSubTaskConfig-40081] + _ = x[codeWorkerValidatorNotPaused-40082] + _ = x[codeWorkerServerClosed-40083] + _ = x[codeTracerParseFlagSet-42001] + _ = x[codeTracerConfigTomlTransform-42002] + _ = x[codeTracerConfigInvalidFlag-42003] + _ = x[codeTracerTraceEventNotFound-42004] + _ = x[codeTracerTraceIDNotProvided-42005] + _ = x[codeTracerParamNotValid-42006] + _ = x[codeTracerPostMethodOnly-42007] + _ = x[codeTracerEventAssertionFail-42008] + _ = x[codeTracerEventTypeNotValid-42009] + _ = x[codeTracerStartService-42010] + _ = x[codeHAFailTxnOperation-42501] + _ = x[codeHAInvalidItem-42502] + _ = x[codeHAFailWatchEtcd-42503] + _ = x[codeHAFailLeaseOperation-42504] + _ = x[codeHAFailKeepalive-42505] + _ = x[codeValidatorLoadPersistedData-43001] + _ = x[codeValidatorPersistData-43002] + _ = x[codeValidatorGetEvent-43003] + _ = x[codeValidatorProcessRowEvent-43004] + _ = x[codeValidatorValidateChange-43005] + _ = x[codeValidatorNotFound-43006] + _ = x[codeValidatorPanic-43007] + _ = x[codeValidatorTooMuchPending-43008] + _ = x[codeSchemaTrackerInvalidJSON-44001] + _ = x[codeSchemaTrackerCannotCreateSchema-44002] + _ = x[codeSchemaTrackerCannotCreateTable-44003] + _ = x[codeSchemaTrackerCannotSerialize-44004] + _ = x[codeSchemaTrackerCannotGetTable-44005] + _ = x[codeSchemaTrackerCannotExecDDL-44006] + _ = x[codeSchemaTrackerCannotFetchDownstreamTable-44007] + _ = x[codeSchemaTrackerCannotParseDownstreamTable-44008] + _ = x[codeSchemaTrackerInvalidCreateTableStmt-44009] + _ = x[codeSchemaTrackerRestoreStmtFail-44010] + _ = x[codeSchemaTrackerCannotDropTable-44011] + _ = x[codeSchemaTrackerInit-44012] + _ = x[codeSchemaTrackerMarshalJSON-44013] + _ = x[codeSchemaTrackerUnMarshalJSON-44014] + _ = x[codeSchemaTrackerUnSchemaNotExist-44015] + _ = x[codeSchemaTrackerCannotSetDownstreamSQLMode-44016] + _ = x[codeSchemaTrackerCannotInitDownstreamParser-44017] + _ = x[codeSchemaTrackerCannotMockDownstreamTable-44018] + _ = x[codeSchemaTrackerCannotFetchDownstreamCreateTableStmt-44019] + _ = x[codeSchemaTrackerIsClosed-44020] + _ = x[codeSchedulerNotStarted-46001] + _ = x[codeSchedulerStarted-46002] + _ = x[codeSchedulerWorkerExist-46003] + _ = x[codeSchedulerWorkerNotExist-46004] + _ = x[codeSchedulerWorkerOnline-46005] + _ = x[codeSchedulerWorkerInvalidTrans-46006] + _ = x[codeSchedulerSourceCfgExist-46007] + _ = x[codeSchedulerSourceCfgNotExist-46008] + _ = x[codeSchedulerSourcesUnbound-46009] + _ = x[codeSchedulerSourceOpTaskExist-46010] + _ = x[codeSchedulerRelayStageInvalidUpdate-46011] + _ = x[codeSchedulerRelayStageSourceNotExist-46012] + _ = x[codeSchedulerMultiTask-46013] + _ = x[codeSchedulerSubTaskExist-46014] + _ = x[codeSchedulerSubTaskStageInvalidUpdate-46015] + _ = x[codeSchedulerSubTaskOpTaskNotExist-46016] + _ = x[codeSchedulerSubTaskOpSourceNotExist-46017] + _ = x[codeSchedulerTaskNotExist-46018] + _ = x[codeSchedulerRequireRunningTaskInSyncUnit-46019] + _ = x[codeSchedulerRelayWorkersBusy-46020] + _ = x[codeSchedulerRelayWorkersBound-46021] + _ = x[codeSchedulerRelayWorkersWrongRelay-46022] + _ = x[codeSchedulerSourceOpRelayExist-46023] + _ = x[codeSchedulerLatchInUse-46024] + _ = x[codeSchedulerSourceCfgUpdate-46025] + _ = x[codeSchedulerWrongWorkerInput-46026] + _ = x[codeSchedulerCantTransferToRelayWorker-46027] + _ = x[codeSchedulerStartRelayOnSpecified-46028] + _ = x[codeSchedulerStopRelayOnSpecified-46029] + _ = x[codeSchedulerStartRelayOnBound-46030] + _ = x[codeSchedulerStopRelayOnBound-46031] + _ = x[codeSchedulerPauseTaskForTransferSource-46032] + _ = x[codeSchedulerWorkerNotFree-46033] + _ = x[codeSchedulerSubTaskNotExist-46034] + _ = x[codeSchedulerSubTaskCfgUpdate-46035] + _ = x[codeCtlGRPCCreateConn-48001] + _ = x[codeCtlInvalidTLSCfg-48002] + _ = x[codeCtlLoadTLSCfg-48003] + _ = x[codeOpenAPICommon-49001] + _ = x[codeOpenAPITaskSourceNotFound-49002] + _ = x[codeNotSet-50000] +} + +const _ErrCode_name = "DBDriverErrorDBBadConnDBInvalidConnDBUnExpectDBQueryFailedDBExecuteFailedParseMydumperMetaGetFileSizeDropMultipleTablesRenameMultipleTablesAlterMultipleTablesParseSQLUnknownTypeDDLRestoreASTNodeParseGTIDNotSupportedFlavorNotMySQLGTIDNotMariaDBGTIDNotUUIDStringMariaDBDomainIDInvalidServerIDGetSQLModeFromStrVerifySQLOperateArgsStatFileSizeReaderAlreadyRunningReaderAlreadyStartedReaderStateCannotCloseReaderShouldStartSyncEmptyRelayDirReadDirBaseFileNotFoundBinFileCmpCondNotSupportBinlogFileNotValidBinlogFilesNotFoundGetRelayLogStatAddWatchForRelayLogDirWatcherStartWatcherChanClosedWatcherChanRecvErrorRelayLogFileSizeSmallerBinlogFileNotSpecifiedNoRelayLogMatchPosFirstRelayLogNotMatchPosParserParseRelayLogNoSubdirToSwitchNeedSyncAgainSyncClosedSchemaTableNameNotValidGenTableRouterEncryptSecretKeyNotValidEncryptGenCipherEncryptGenIVCiphertextLenNotValidCiphertextContextNotValidInvalidBinlogPosStrEncCipherTextBase64DecodeBinlogWriteBinaryDataBinlogWriteDataToBufferBinlogHeaderLengthNotValidBinlogEventDecodeBinlogEmptyNextBinNameBinlogParseSIDBinlogEmptyGTIDBinlogGTIDSetNotValidBinlogGTIDMySQLNotValidBinlogGTIDMariaDBNotValidBinlogMariaDBServerIDMismatchBinlogOnlyOneGTIDSupportBinlogOnlyOneIntervalInUUIDBinlogIntervalValueNotValidBinlogEmptyQueryBinlogTableMapEvNotValidBinlogExpectFormatDescEvBinlogExpectTableMapEvBinlogExpectRowsEvBinlogUnexpectedEvBinlogParseSingleEvBinlogEventTypeNotValidBinlogEventNoRowsBinlogEventNoColumnsBinlogEventRowLengthNotEqBinlogColumnTypeNotSupportBinlogGoMySQLTypeNotSupportBinlogColumnTypeMisMatchBinlogDummyEvSizeTooSmallBinlogFlavorNotSupportBinlogDMLEmptyDataBinlogLatestGTIDNotInPrevBinlogReadFileByGTIDBinlogWriterNotStateNewBinlogWriterStateCannotCloseBinlogWriterNeedStartBinlogWriterOpenFileBinlogWriterGetFileStatBinlogWriterWriteDataLenBinlogWriterFileNotOpenedBinlogWriterFileSyncBinlogPrevGTIDEvNotValidBinlogDecodeMySQLGTIDSetBinlogNeedMariaDBGTIDSetBinlogParseMariaDBGTIDSetBinlogMariaDBAddGTIDSetTracingEventDataNotValidTracingUploadDataTracingEventTypeNotValidTracingGetTraceCodeTracingDataChecksumTracingGetTSOBackoffArgsNotValidInitLoggerFailGTIDTruncateInvalidRelayLogGivenPosTooBigElectionCampaignFailElectionGetLeaderIDFailBinlogInvalidFilenameWithUUIDSuffixDecodeEtcdKeyFailShardDDLOptimismTrySyncFailConnInvalidTLSConfigConnRegistryTLSConfigUpgradeVersionEtcdFailInvalidV1WorkerMetaPathFailUpdateV1DBSchemaBinlogStatusVarsParseVerifyHandleErrorArgsRewriteSQLNoUUIDDirMatchGTIDNoRelayPosMatchGTIDReaderReachEndOfFileMetadataNoBinlogLocPreviousGTIDNotExistNoMasterStatusBinlogNotLogColumnShardDDLOptimismNeedSkipAndRedirectShardDDLOptimismAddNotFullyDroppedColumnSyncerCancelledDDLIncorrectReturnColumnsNumConfigCheckItemNotSupportConfigTomlTransformConfigYamlTransformConfigTaskNameEmptyConfigEmptySourceIDConfigTooLongSourceIDConfigOnlineSchemeNotSupportConfigInvalidTimezoneConfigParseFlagSetConfigDecryptDBPasswordConfigMetaInvalidConfigMySQLInstNotFoundConfigMySQLInstsAtLeastOneConfigMySQLInstSameSourceIDConfigMydumperCfgConflictConfigLoaderCfgConflictConfigSyncerCfgConflictConfigReadCfgFromFileConfigNeedUniqueTaskNameConfigInvalidTaskModeConfigNeedTargetDBConfigMetadataNotSetConfigRouteRuleNotFoundConfigFilterRuleNotFoundConfigColumnMappingNotFoundConfigBAListNotFoundConfigMydumperCfgNotFoundConfigMydumperPathNotValidConfigLoaderCfgNotFoundConfigSyncerCfgNotFoundConfigSourceIDNotFoundConfigDuplicateCfgItemConfigShardModeNotSupportConfigMoreThanOneConfigEtcdParseConfigMissingForBoundConfigBinlogEventFilterConfigGlobalConfigsUnusedConfigExprFilterManyExprConfigExprFilterNotFoundConfigExprFilterWrongGrammarConfigExprFilterEmptyNameConfigCheckerMaxTooSmallConfigGenBAListConfigGenTableRouterConfigGenColumnMappingConfigInvalidChunkFileSizeConfigOnlineDDLInvalidRegexConfigOnlineDDLMistakeRegexConfigOpenAPITaskConfigExistConfigOpenAPITaskConfigNotExistCollationCompatibleNotSupportConfigInvalidLoadModeConfigInvalidLoadDuplicateResolutionConfigValidationModeContinuousValidatorCfgNotFoundConfigStartTimeTooLateConfigLoaderDirInvalidConfigLoaderS3NotSupportConfigInvalidSafeModeDurationConfigConfictSafeModeDurationAndSafeModeConfigInvalidLoadPhysicalDuplicateResolutionConfigInvalidLoadPhysicalChecksumConfigColumnMappingDeprecatedBinlogExtractPositionBinlogInvalidFilenameBinlogParsePosFromStrCheckpointInvalidTaskModeCheckpointSaveInvalidPosCheckpointInvalidTableFileCheckpointDBNotExistInFileCheckpointTableNotExistInFileCheckpointRestoreCountGreaterTaskCheckSameTableNameTaskCheckFailedOpenDBTaskCheckGenTableRouterTaskCheckGenColumnMappingTaskCheckSyncConfigErrorTaskCheckGenBAListSourceCheckGTIDRelayParseUUIDIndexRelayParseUUIDSuffixRelayUUIDWithSuffixNotFoundRelayGenFakeRotateEventRelayNoValidRelaySubDirRelayUUIDSuffixNotValidRelayUUIDSuffixLessThanPrevRelayLoadMetaDataRelayBinlogNameNotValidRelayNoCurrentUUIDRelayFlushLocalMetaRelayUpdateIndexFileRelayLogDirpathEmptyRelayReaderNotStateNewRelayReaderStateCannotCloseRelayReaderNeedStartRelayTCPReaderStartSyncRelayTCPReaderNilGTIDRelayTCPReaderStartSyncGTIDRelayTCPReaderGetEventRelayWriterNotStateNewRelayWriterStateCannotCloseRelayWriterNeedStartRelayWriterNotOpenedRelayWriterExpectRotateEvRelayWriterRotateEvWithNoWriterRelayWriterStatusNotValidRelayWriterGetFileStatRelayWriterLatestPosGTFileSizeRelayWriterFileOperateRelayCheckBinlogFileHeaderExistRelayCheckFormatDescEventExistRelayCheckFormatDescEventParseEvRelayCheckIsDuplicateEventRelayUpdateGTIDRelayNeedPrevGTIDEvBeforeGTIDEvRelayNeedMaGTIDListEvBeforeGTIDEvRelayMkdirRelaySwitchMasterNeedGTIDRelayThisStrategyIsPurgingRelayOtherStrategyIsPurgingRelayPurgeIsForbiddenRelayNoActiveRelayLogRelayPurgeRequestNotValidRelayTrimUUIDNotFoundRelayRemoveFileFailRelayPurgeArgsNotValidPreviousGTIDsNotValidRotateEventWithDifferentServerIDDumpUnitRuntimeDumpUnitGenTableRouterDumpUnitGenBAListDumpUnitGlobalLockLoadUnitCreateSchemaFileLoadUnitInvalidFileEndingLoadUnitParseQuoteValuesLoadUnitDoColumnMappingLoadUnitReadSchemaFileLoadUnitParseStatementLoadUnitNotCreateTableLoadUnitDispatchSQLFromFileLoadUnitInvalidInsertSQLLoadUnitGenTableRouterLoadUnitGenColumnMappingLoadUnitNoDBFileLoadUnitNoTableFileLoadUnitDumpDirNotFoundLoadUnitDuplicateTableFileLoadUnitGenBAListLoadTaskWorkerNotMatchLoadCheckPointNotMatchLoadLightningRuntimeLoadLightningHasDupLoadLightningChecksumSyncerUnitPanicSyncUnitInvalidTableNameSyncUnitTableNameQuerySyncUnitNotSupportedDMLSyncUnitAddTableInShardingSyncUnitDropSchemaTableInShardingSyncUnitInvalidShardMetaSyncUnitDDLWrongSequenceSyncUnitDDLActiveIndexLargerSyncUnitDupTableGroupSyncUnitShardingGroupNotFoundSyncUnitSafeModeSetCountSyncUnitCausalityConflictSyncUnitDMLStatementFoundSyncerUnitBinlogEventFilterSyncerUnitInvalidReplicaEventSyncerUnitParseStmtSyncerUnitUUIDNotLatestSyncerUnitDDLExecChanCloseOrBusySyncerUnitDDLChanDoneSyncerUnitDDLChanCanceledSyncerUnitDDLOnMultipleTableSyncerUnitInjectDDLOnlySyncerUnitInjectDDLWithoutSchemaSyncerUnitNotSupportedOperateSyncerUnitNilOperatorReqSyncerUnitDMLColumnNotMatchSyncerUnitDMLOldNewValueMismatchSyncerUnitDMLPruneColumnMismatchSyncerUnitGenBinlogEventFilterSyncerUnitGenTableRouterSyncerUnitGenColumnMappingSyncerUnitDoColumnMappingSyncerUnitCacheKeyNotFoundSyncerUnitHeartbeatCheckConfigSyncerUnitHeartbeatRecordExistsSyncerUnitHeartbeatRecordNotFoundSyncerUnitHeartbeatRecordNotValidSyncerUnitOnlineDDLInvalidMetaSyncerUnitOnlineDDLSchemeNotSupportSyncerUnitOnlineDDLOnMultipleTableSyncerUnitGhostApplyEmptyTableSyncerUnitGhostRenameTableNotValidSyncerUnitGhostRenameToGhostTableSyncerUnitGhostRenameGhostTblToOtherSyncerUnitGhostOnlineDDLOnGhostTblSyncerUnitPTApplyEmptyTableSyncerUnitPTRenameTableNotValidSyncerUnitPTRenameToPTTableSyncerUnitPTRenamePTTblToOtherSyncerUnitPTOnlineDDLOnPTTblSyncerUnitRemoteSteamerWithGTIDSyncerUnitRemoteSteamerStartSyncSyncerUnitGetTableFromDBSyncerUnitFirstEndPosNotFoundSyncerUnitResolveCasualityFailSyncerUnitReopenStreamNotSupportSyncerUnitUpdateConfigInShardingSyncerUnitExecWithNoBlockingDDLSyncerUnitGenBAListSyncerUnitHandleDDLFailedSyncerShardDDLConflictSyncerFailpointSyncerEventSyncerOperatorNotExistSyncerEventNotExistSyncerParseDDLSyncerUnsupportedStmtSyncerGetEventSyncerDownstreamTableNotFoundSyncerReprocessWithSafeModeFailMasterSQLOpNilRequestMasterSQLOpNotSupportMasterSQLOpWithoutShardingMasterGRPCCreateConnMasterGRPCSendOnCloseConnMasterGRPCClientCloseMasterGRPCInvalidReqTypeMasterGRPCRequestErrorMasterDeployMapperVerifyMasterConfigParseFlagSetMasterConfigUnknownItemMasterConfigInvalidFlagMasterConfigTomlTransformMasterConfigTimeoutParseMasterConfigUpdateCfgFileMasterShardingDDLDiffMasterStartServiceMasterNoEmitTokenMasterLockNotFoundMasterLockIsResolvingMasterWorkerCliNotFoundMasterWorkerNotWaitLockMasterHandleSQLReqFailMasterOwnerExecDDLMasterPartWorkerExecDDLFailMasterWorkerExistDDLLockMasterGetWorkerCfgExtractorMasterTaskConfigExtractorMasterWorkerArgsExtractorMasterQueryWorkerConfigMasterOperNotFoundMasterOperRespNotSuccessMasterOperRequestTimeoutMasterHandleHTTPApisMasterHostPortNotValidMasterGetHostnameFailMasterGenEmbedEtcdConfigFailMasterStartEmbedEtcdFailMasterParseURLFailMasterJoinEmbedEtcdFailMasterInvalidOperateOpMasterAdvertiseAddrNotValidMasterRequestIsNotForwardToLeaderMasterIsNotAsyncRequestMasterFailToGetExpectResultMasterPessimistNotStartedMasterOptimistNotStartedMasterMasterNameNotExistMasterInvalidOfflineTypeMasterAdvertisePeerURLsNotValidMasterTLSConfigNotValidMasterBoundChangingMasterFailToImportFromV10xMasterInconsistentOptimistDDLsAndInfoMasterOptimisticTableInfobeforeNotExistMasterOptimisticDownstreamMetaNotFoundMasterInvalidClusterIDMasterStartTaskWorkerParseFlagSetWorkerInvalidFlagWorkerDecodeConfigFromFileWorkerUndecodedItemFromFileWorkerNeedSourceIDWorkerTooLongSourceIDWorkerRelayBinlogNameWorkerWriteConfigFileWorkerLogInvalidHandlerWorkerLogPointerInvalidWorkerLogFetchPointerWorkerLogUnmarshalPointerWorkerLogClearPointerWorkerLogTaskKeyNotValidWorkerLogUnmarshalTaskKeyWorkerLogFetchLogIterWorkerLogGetTaskLogWorkerLogUnmarshalBinaryWorkerLogForwardPointerWorkerLogMarshalTaskWorkerLogSaveTaskWorkerLogDeleteKVWorkerLogDeleteKVIterWorkerLogUnmarshalTaskMetaWorkerLogFetchTaskFromMetaWorkerLogVerifyTaskMetaWorkerLogSaveTaskMetaWorkerLogGetTaskMetaWorkerLogDeleteTaskMetaWorkerMetaTomlTransformWorkerMetaOldFileStatWorkerMetaOldReadFileWorkerMetaEncodeTaskWorkerMetaRemoveOldDirWorkerMetaTaskLogNotFoundWorkerMetaHandleTaskOrderWorkerMetaOpenTxnWorkerMetaCommitTxnWorkerRelayStageNotValidWorkerRelayOperNotSupportWorkerOpenKVDBFileWorkerUpgradeCheckKVDirWorkerMarshalVerBinaryWorkerUnmarshalVerBinaryWorkerGetVersionFromKVWorkerSaveVersionToKVWorkerVerAutoDowngradeWorkerStartServiceWorkerAlreadyClosedWorkerNotRunningStageWorkerNotPausedStageWorkerUpdateTaskStageWorkerMigrateStopRelayWorkerSubTaskNotFoundWorkerSubTaskExistsWorkerOperSyncUnitOnlyWorkerRelayUnitStageWorkerNoSyncerRunningWorkerCannotUpdateSourceIDWorkerNoAvailUnitsWorkerDDLLockInfoNotFoundWorkerDDLLockInfoExistsWorkerCacheDDLInfoExistsWorkerExecSkipDDLConflictWorkerExecDDLSyncerOnlyWorkerExecDDLTimeoutWorkerWaitRelayCatchupTimeoutWorkerRelayIsPurgingWorkerHostPortNotValidWorkerNoStartWorkerAlreadyStartedWorkerSourceNotMatchWorkerFailToGetSubtaskConfigFromEtcdWorkerFailToGetSourceConfigFromEtcdWorkerDDLLockOpNotFoundWorkerTLSConfigNotValidWorkerFailConnectMasterWorkerWaitRelayCatchupGTIDWorkerRelayConfigChangingWorkerRouteTableDupMatchWorkerUpdateSubTaskConfigWorkerValidatorNotPausedWorkerServerClosedTracerParseFlagSetTracerConfigTomlTransformTracerConfigInvalidFlagTracerTraceEventNotFoundTracerTraceIDNotProvidedTracerParamNotValidTracerPostMethodOnlyTracerEventAssertionFailTracerEventTypeNotValidTracerStartServiceHAFailTxnOperationHAInvalidItemHAFailWatchEtcdHAFailLeaseOperationHAFailKeepaliveValidatorLoadPersistedDataValidatorPersistDataValidatorGetEventValidatorProcessRowEventValidatorValidateChangeValidatorNotFoundValidatorPanicValidatorTooMuchPendingSchemaTrackerInvalidJSONSchemaTrackerCannotCreateSchemaSchemaTrackerCannotCreateTableSchemaTrackerCannotSerializeSchemaTrackerCannotGetTableSchemaTrackerCannotExecDDLSchemaTrackerCannotFetchDownstreamTableSchemaTrackerCannotParseDownstreamTableSchemaTrackerInvalidCreateTableStmtSchemaTrackerRestoreStmtFailSchemaTrackerCannotDropTableSchemaTrackerInitSchemaTrackerMarshalJSONSchemaTrackerUnMarshalJSONSchemaTrackerUnSchemaNotExistSchemaTrackerCannotSetDownstreamSQLModeSchemaTrackerCannotInitDownstreamParserSchemaTrackerCannotMockDownstreamTableSchemaTrackerCannotFetchDownstreamCreateTableStmtSchemaTrackerIsClosedSchedulerNotStartedSchedulerStartedSchedulerWorkerExistSchedulerWorkerNotExistSchedulerWorkerOnlineSchedulerWorkerInvalidTransSchedulerSourceCfgExistSchedulerSourceCfgNotExistSchedulerSourcesUnboundSchedulerSourceOpTaskExistSchedulerRelayStageInvalidUpdateSchedulerRelayStageSourceNotExistSchedulerMultiTaskSchedulerSubTaskExistSchedulerSubTaskStageInvalidUpdateSchedulerSubTaskOpTaskNotExistSchedulerSubTaskOpSourceNotExistSchedulerTaskNotExistSchedulerRequireRunningTaskInSyncUnitSchedulerRelayWorkersBusySchedulerRelayWorkersBoundSchedulerRelayWorkersWrongRelaySchedulerSourceOpRelayExistSchedulerLatchInUseSchedulerSourceCfgUpdateSchedulerWrongWorkerInputSchedulerCantTransferToRelayWorkerSchedulerStartRelayOnSpecifiedSchedulerStopRelayOnSpecifiedSchedulerStartRelayOnBoundSchedulerStopRelayOnBoundSchedulerPauseTaskForTransferSourceSchedulerWorkerNotFreeSchedulerSubTaskNotExistSchedulerSubTaskCfgUpdateCtlGRPCCreateConnCtlInvalidTLSCfgCtlLoadTLSCfgOpenAPICommonOpenAPITaskSourceNotFoundNotSet" + +var _ErrCode_map = map[ErrCode]string{ + 10001: _ErrCode_name[0:13], + 10002: _ErrCode_name[13:22], + 10003: _ErrCode_name[22:35], + 10004: _ErrCode_name[35:45], + 10005: _ErrCode_name[45:58], + 10006: _ErrCode_name[58:73], + 11001: _ErrCode_name[73:90], + 11002: _ErrCode_name[90:101], + 11003: _ErrCode_name[101:119], + 11004: _ErrCode_name[119:139], + 11005: _ErrCode_name[139:158], + 11006: _ErrCode_name[158:166], + 11007: _ErrCode_name[166:180], + 11008: _ErrCode_name[180:194], + 11009: _ErrCode_name[194:203], + 11010: _ErrCode_name[203:221], + 11011: _ErrCode_name[221:233], + 11012: _ErrCode_name[233:247], + 11013: _ErrCode_name[247:260], + 11014: _ErrCode_name[260:275], + 11015: _ErrCode_name[275:290], + 11016: _ErrCode_name[290:307], + 11017: _ErrCode_name[307:327], + 11018: _ErrCode_name[327:339], + 11019: _ErrCode_name[339:359], + 11020: _ErrCode_name[359:379], + 11021: _ErrCode_name[379:401], + 11022: _ErrCode_name[401:422], + 11023: _ErrCode_name[422:435], + 11024: _ErrCode_name[435:442], + 11025: _ErrCode_name[442:458], + 11026: _ErrCode_name[458:482], + 11027: _ErrCode_name[482:500], + 11028: _ErrCode_name[500:519], + 11029: _ErrCode_name[519:534], + 11030: _ErrCode_name[534:556], + 11031: _ErrCode_name[556:568], + 11032: _ErrCode_name[568:585], + 11033: _ErrCode_name[585:605], + 11034: _ErrCode_name[605:628], + 11035: _ErrCode_name[628:650], + 11036: _ErrCode_name[650:668], + 11037: _ErrCode_name[668:692], + 11038: _ErrCode_name[692:711], + 11039: _ErrCode_name[711:727], + 11040: _ErrCode_name[727:740], + 11041: _ErrCode_name[740:750], + 11042: _ErrCode_name[750:773], + 11043: _ErrCode_name[773:787], + 11044: _ErrCode_name[787:811], + 11045: _ErrCode_name[811:827], + 11046: _ErrCode_name[827:839], + 11047: _ErrCode_name[839:860], + 11048: _ErrCode_name[860:885], + 11049: _ErrCode_name[885:904], + 11050: _ErrCode_name[904:929], + 11051: _ErrCode_name[929:950], + 11052: _ErrCode_name[950:973], + 11053: _ErrCode_name[973:999], + 11054: _ErrCode_name[999:1016], + 11055: _ErrCode_name[1016:1038], + 11056: _ErrCode_name[1038:1052], + 11057: _ErrCode_name[1052:1067], + 11058: _ErrCode_name[1067:1088], + 11059: _ErrCode_name[1088:1111], + 11060: _ErrCode_name[1111:1136], + 11061: _ErrCode_name[1136:1165], + 11062: _ErrCode_name[1165:1189], + 11063: _ErrCode_name[1189:1216], + 11064: _ErrCode_name[1216:1243], + 11065: _ErrCode_name[1243:1259], + 11066: _ErrCode_name[1259:1283], + 11067: _ErrCode_name[1283:1307], + 11068: _ErrCode_name[1307:1329], + 11069: _ErrCode_name[1329:1347], + 11070: _ErrCode_name[1347:1365], + 11071: _ErrCode_name[1365:1384], + 11072: _ErrCode_name[1384:1407], + 11073: _ErrCode_name[1407:1424], + 11074: _ErrCode_name[1424:1444], + 11075: _ErrCode_name[1444:1469], + 11076: _ErrCode_name[1469:1495], + 11077: _ErrCode_name[1495:1522], + 11078: _ErrCode_name[1522:1546], + 11079: _ErrCode_name[1546:1571], + 11080: _ErrCode_name[1571:1593], + 11081: _ErrCode_name[1593:1611], + 11082: _ErrCode_name[1611:1636], + 11083: _ErrCode_name[1636:1656], + 11084: _ErrCode_name[1656:1679], + 11085: _ErrCode_name[1679:1707], + 11086: _ErrCode_name[1707:1728], + 11087: _ErrCode_name[1728:1748], + 11088: _ErrCode_name[1748:1771], + 11089: _ErrCode_name[1771:1795], + 11090: _ErrCode_name[1795:1820], + 11091: _ErrCode_name[1820:1840], + 11092: _ErrCode_name[1840:1864], + 11093: _ErrCode_name[1864:1888], + 11094: _ErrCode_name[1888:1912], + 11095: _ErrCode_name[1912:1937], + 11096: _ErrCode_name[1937:1960], + 11097: _ErrCode_name[1960:1984], + 11098: _ErrCode_name[1984:2001], + 11099: _ErrCode_name[2001:2025], + 11100: _ErrCode_name[2025:2044], + 11101: _ErrCode_name[2044:2063], + 11102: _ErrCode_name[2063:2076], + 11103: _ErrCode_name[2076:2095], + 11104: _ErrCode_name[2095:2109], + 11105: _ErrCode_name[2109:2128], + 11106: _ErrCode_name[2128:2150], + 11107: _ErrCode_name[2150:2170], + 11108: _ErrCode_name[2170:2193], + 11109: _ErrCode_name[2193:2228], + 11110: _ErrCode_name[2228:2245], + 11111: _ErrCode_name[2245:2272], + 11112: _ErrCode_name[2272:2292], + 11113: _ErrCode_name[2292:2313], + 11114: _ErrCode_name[2313:2335], + 11115: _ErrCode_name[2335:2358], + 11116: _ErrCode_name[2358:2378], + 11117: _ErrCode_name[2378:2399], + 11118: _ErrCode_name[2399:2420], + 11119: _ErrCode_name[2420:2430], + 11120: _ErrCode_name[2430:2448], + 11121: _ErrCode_name[2448:2467], + 11122: _ErrCode_name[2467:2487], + 11123: _ErrCode_name[2487:2506], + 11124: _ErrCode_name[2506:2526], + 11125: _ErrCode_name[2526:2540], + 11126: _ErrCode_name[2540:2558], + 11127: _ErrCode_name[2558:2593], + 11128: _ErrCode_name[2593:2633], + 11129: _ErrCode_name[2633:2651], + 11130: _ErrCode_name[2651:2676], + 20001: _ErrCode_name[2676:2701], + 20002: _ErrCode_name[2701:2720], + 20003: _ErrCode_name[2720:2739], + 20004: _ErrCode_name[2739:2758], + 20005: _ErrCode_name[2758:2777], + 20006: _ErrCode_name[2777:2798], + 20007: _ErrCode_name[2798:2826], + 20008: _ErrCode_name[2826:2847], + 20009: _ErrCode_name[2847:2865], + 20010: _ErrCode_name[2865:2888], + 20011: _ErrCode_name[2888:2905], + 20012: _ErrCode_name[2905:2928], + 20013: _ErrCode_name[2928:2954], + 20014: _ErrCode_name[2954:2981], + 20015: _ErrCode_name[2981:3006], + 20016: _ErrCode_name[3006:3029], + 20017: _ErrCode_name[3029:3052], + 20018: _ErrCode_name[3052:3073], + 20019: _ErrCode_name[3073:3097], + 20020: _ErrCode_name[3097:3118], + 20021: _ErrCode_name[3118:3136], + 20022: _ErrCode_name[3136:3156], + 20023: _ErrCode_name[3156:3179], + 20024: _ErrCode_name[3179:3203], + 20025: _ErrCode_name[3203:3230], + 20026: _ErrCode_name[3230:3250], + 20027: _ErrCode_name[3250:3275], + 20028: _ErrCode_name[3275:3301], + 20029: _ErrCode_name[3301:3324], + 20030: _ErrCode_name[3324:3347], + 20031: _ErrCode_name[3347:3369], + 20032: _ErrCode_name[3369:3391], + 20033: _ErrCode_name[3391:3416], + 20034: _ErrCode_name[3416:3433], + 20035: _ErrCode_name[3433:3448], + 20036: _ErrCode_name[3448:3469], + 20037: _ErrCode_name[3469:3492], + 20038: _ErrCode_name[3492:3517], + 20039: _ErrCode_name[3517:3541], + 20040: _ErrCode_name[3541:3565], + 20041: _ErrCode_name[3565:3593], + 20042: _ErrCode_name[3593:3618], + 20043: _ErrCode_name[3618:3642], + 20044: _ErrCode_name[3642:3657], + 20045: _ErrCode_name[3657:3677], + 20046: _ErrCode_name[3677:3699], + 20047: _ErrCode_name[3699:3725], + 20048: _ErrCode_name[3725:3752], + 20049: _ErrCode_name[3752:3779], + 20050: _ErrCode_name[3779:3807], + 20051: _ErrCode_name[3807:3838], + 20052: _ErrCode_name[3838:3867], + 20053: _ErrCode_name[3867:3888], + 20054: _ErrCode_name[3888:3924], + 20055: _ErrCode_name[3924:3944], + 20056: _ErrCode_name[3944:3974], + 20057: _ErrCode_name[3974:3996], + 20058: _ErrCode_name[3996:4018], + 20059: _ErrCode_name[4018:4042], + 20060: _ErrCode_name[4042:4071], + 20061: _ErrCode_name[4071:4111], + 20062: _ErrCode_name[4111:4155], + 20063: _ErrCode_name[4155:4188], + 20064: _ErrCode_name[4188:4217], + 22001: _ErrCode_name[4217:4238], + 22002: _ErrCode_name[4238:4259], + 22003: _ErrCode_name[4259:4280], + 24001: _ErrCode_name[4280:4305], + 24002: _ErrCode_name[4305:4329], + 24003: _ErrCode_name[4329:4355], + 24004: _ErrCode_name[4355:4381], + 24005: _ErrCode_name[4381:4410], + 24006: _ErrCode_name[4410:4439], + 26001: _ErrCode_name[4439:4461], + 26002: _ErrCode_name[4461:4482], + 26003: _ErrCode_name[4482:4505], + 26004: _ErrCode_name[4505:4530], + 26005: _ErrCode_name[4530:4554], + 26006: _ErrCode_name[4554:4572], + 26007: _ErrCode_name[4572:4587], + 28001: _ErrCode_name[4587:4606], + 28002: _ErrCode_name[4606:4626], + 28003: _ErrCode_name[4626:4653], + 28004: _ErrCode_name[4653:4676], + 28005: _ErrCode_name[4676:4699], + 30001: _ErrCode_name[4699:4722], + 30002: _ErrCode_name[4722:4749], + 30003: _ErrCode_name[4749:4766], + 30004: _ErrCode_name[4766:4789], + 30005: _ErrCode_name[4789:4807], + 30006: _ErrCode_name[4807:4826], + 30007: _ErrCode_name[4826:4846], + 30008: _ErrCode_name[4846:4866], + 30009: _ErrCode_name[4866:4888], + 30010: _ErrCode_name[4888:4915], + 30011: _ErrCode_name[4915:4935], + 30012: _ErrCode_name[4935:4958], + 30013: _ErrCode_name[4958:4979], + 30014: _ErrCode_name[4979:5006], + 30015: _ErrCode_name[5006:5028], + 30016: _ErrCode_name[5028:5050], + 30017: _ErrCode_name[5050:5077], + 30018: _ErrCode_name[5077:5097], + 30019: _ErrCode_name[5097:5117], + 30020: _ErrCode_name[5117:5142], + 30021: _ErrCode_name[5142:5173], + 30022: _ErrCode_name[5173:5198], + 30023: _ErrCode_name[5198:5220], + 30024: _ErrCode_name[5220:5250], + 30025: _ErrCode_name[5250:5272], + 30026: _ErrCode_name[5272:5303], + 30027: _ErrCode_name[5303:5333], + 30028: _ErrCode_name[5333:5365], + 30029: _ErrCode_name[5365:5391], + 30030: _ErrCode_name[5391:5406], + 30031: _ErrCode_name[5406:5437], + 30032: _ErrCode_name[5437:5470], + 30033: _ErrCode_name[5470:5480], + 30034: _ErrCode_name[5480:5505], + 30035: _ErrCode_name[5505:5531], + 30036: _ErrCode_name[5531:5558], + 30037: _ErrCode_name[5558:5579], + 30038: _ErrCode_name[5579:5600], + 30039: _ErrCode_name[5600:5625], + 30040: _ErrCode_name[5625:5646], + 30041: _ErrCode_name[5646:5665], + 30042: _ErrCode_name[5665:5687], + 30043: _ErrCode_name[5687:5708], + 30044: _ErrCode_name[5708:5740], + 32001: _ErrCode_name[5740:5755], + 32002: _ErrCode_name[5755:5777], + 32003: _ErrCode_name[5777:5794], + 32004: _ErrCode_name[5794:5812], + 34001: _ErrCode_name[5812:5836], + 34002: _ErrCode_name[5836:5861], + 34003: _ErrCode_name[5861:5885], + 34004: _ErrCode_name[5885:5908], + 34005: _ErrCode_name[5908:5930], + 34006: _ErrCode_name[5930:5952], + 34007: _ErrCode_name[5952:5974], + 34008: _ErrCode_name[5974:6001], + 34009: _ErrCode_name[6001:6025], + 34010: _ErrCode_name[6025:6047], + 34011: _ErrCode_name[6047:6071], + 34012: _ErrCode_name[6071:6087], + 34013: _ErrCode_name[6087:6106], + 34014: _ErrCode_name[6106:6129], + 34015: _ErrCode_name[6129:6155], + 34016: _ErrCode_name[6155:6172], + 34017: _ErrCode_name[6172:6194], + 34018: _ErrCode_name[6194:6216], + 34019: _ErrCode_name[6216:6236], + 34020: _ErrCode_name[6236:6255], + 34021: _ErrCode_name[6255:6276], + 36001: _ErrCode_name[6276:6291], + 36002: _ErrCode_name[6291:6315], + 36003: _ErrCode_name[6315:6337], + 36004: _ErrCode_name[6337:6360], + 36005: _ErrCode_name[6360:6386], + 36006: _ErrCode_name[6386:6419], + 36007: _ErrCode_name[6419:6443], + 36008: _ErrCode_name[6443:6467], + 36009: _ErrCode_name[6467:6495], + 36010: _ErrCode_name[6495:6516], + 36011: _ErrCode_name[6516:6545], + 36012: _ErrCode_name[6545:6569], + 36013: _ErrCode_name[6569:6594], + 36014: _ErrCode_name[6594:6619], + 36015: _ErrCode_name[6619:6646], + 36016: _ErrCode_name[6646:6675], + 36017: _ErrCode_name[6675:6694], + 36018: _ErrCode_name[6694:6717], + 36019: _ErrCode_name[6717:6749], + 36020: _ErrCode_name[6749:6770], + 36021: _ErrCode_name[6770:6795], + 36022: _ErrCode_name[6795:6823], + 36023: _ErrCode_name[6823:6846], + 36024: _ErrCode_name[6846:6878], + 36025: _ErrCode_name[6878:6907], + 36026: _ErrCode_name[6907:6931], + 36027: _ErrCode_name[6931:6958], + 36028: _ErrCode_name[6958:6990], + 36029: _ErrCode_name[6990:7022], + 36030: _ErrCode_name[7022:7052], + 36031: _ErrCode_name[7052:7076], + 36032: _ErrCode_name[7076:7102], + 36033: _ErrCode_name[7102:7127], + 36034: _ErrCode_name[7127:7153], + 36035: _ErrCode_name[7153:7183], + 36036: _ErrCode_name[7183:7214], + 36037: _ErrCode_name[7214:7247], + 36038: _ErrCode_name[7247:7280], + 36039: _ErrCode_name[7280:7310], + 36040: _ErrCode_name[7310:7345], + 36041: _ErrCode_name[7345:7379], + 36042: _ErrCode_name[7379:7409], + 36043: _ErrCode_name[7409:7443], + 36044: _ErrCode_name[7443:7476], + 36045: _ErrCode_name[7476:7512], + 36046: _ErrCode_name[7512:7546], + 36047: _ErrCode_name[7546:7573], + 36048: _ErrCode_name[7573:7604], + 36049: _ErrCode_name[7604:7631], + 36050: _ErrCode_name[7631:7661], + 36051: _ErrCode_name[7661:7689], + 36052: _ErrCode_name[7689:7720], + 36053: _ErrCode_name[7720:7752], + 36054: _ErrCode_name[7752:7776], + 36055: _ErrCode_name[7776:7805], + 36056: _ErrCode_name[7805:7835], + 36057: _ErrCode_name[7835:7867], + 36058: _ErrCode_name[7867:7899], + 36059: _ErrCode_name[7899:7930], + 36060: _ErrCode_name[7930:7949], + 36061: _ErrCode_name[7949:7974], + 36062: _ErrCode_name[7974:7996], + 36063: _ErrCode_name[7996:8011], + 36064: _ErrCode_name[8011:8022], + 36065: _ErrCode_name[8022:8044], + 36066: _ErrCode_name[8044:8063], + 36067: _ErrCode_name[8063:8077], + 36068: _ErrCode_name[8077:8098], + 36069: _ErrCode_name[8098:8112], + 36070: _ErrCode_name[8112:8141], + 36071: _ErrCode_name[8141:8172], + 38001: _ErrCode_name[8172:8193], + 38002: _ErrCode_name[8193:8214], + 38003: _ErrCode_name[8214:8240], + 38004: _ErrCode_name[8240:8260], + 38005: _ErrCode_name[8260:8285], + 38006: _ErrCode_name[8285:8306], + 38007: _ErrCode_name[8306:8330], + 38008: _ErrCode_name[8330:8352], + 38009: _ErrCode_name[8352:8376], + 38010: _ErrCode_name[8376:8400], + 38011: _ErrCode_name[8400:8423], + 38012: _ErrCode_name[8423:8446], + 38013: _ErrCode_name[8446:8471], + 38014: _ErrCode_name[8471:8495], + 38015: _ErrCode_name[8495:8520], + 38016: _ErrCode_name[8520:8541], + 38017: _ErrCode_name[8541:8559], + 38018: _ErrCode_name[8559:8576], + 38019: _ErrCode_name[8576:8594], + 38020: _ErrCode_name[8594:8615], + 38021: _ErrCode_name[8615:8638], + 38022: _ErrCode_name[8638:8661], + 38023: _ErrCode_name[8661:8683], + 38024: _ErrCode_name[8683:8701], + 38025: _ErrCode_name[8701:8728], + 38026: _ErrCode_name[8728:8752], + 38027: _ErrCode_name[8752:8779], + 38028: _ErrCode_name[8779:8804], + 38029: _ErrCode_name[8804:8829], + 38030: _ErrCode_name[8829:8852], + 38031: _ErrCode_name[8852:8870], + 38032: _ErrCode_name[8870:8894], + 38033: _ErrCode_name[8894:8918], + 38034: _ErrCode_name[8918:8938], + 38035: _ErrCode_name[8938:8960], + 38036: _ErrCode_name[8960:8981], + 38037: _ErrCode_name[8981:9009], + 38038: _ErrCode_name[9009:9033], + 38039: _ErrCode_name[9033:9051], + 38040: _ErrCode_name[9051:9074], + 38041: _ErrCode_name[9074:9096], + 38042: _ErrCode_name[9096:9123], + 38043: _ErrCode_name[9123:9156], + 38044: _ErrCode_name[9156:9179], + 38045: _ErrCode_name[9179:9206], + 38046: _ErrCode_name[9206:9231], + 38047: _ErrCode_name[9231:9255], + 38048: _ErrCode_name[9255:9279], + 38049: _ErrCode_name[9279:9303], + 38050: _ErrCode_name[9303:9334], + 38051: _ErrCode_name[9334:9357], + 38052: _ErrCode_name[9357:9376], + 38053: _ErrCode_name[9376:9402], + 38054: _ErrCode_name[9402:9439], + 38055: _ErrCode_name[9439:9478], + 38056: _ErrCode_name[9478:9516], + 38057: _ErrCode_name[9516:9538], + 38058: _ErrCode_name[9538:9553], + 40001: _ErrCode_name[9553:9571], + 40002: _ErrCode_name[9571:9588], + 40003: _ErrCode_name[9588:9614], + 40004: _ErrCode_name[9614:9641], + 40005: _ErrCode_name[9641:9659], + 40006: _ErrCode_name[9659:9680], + 40007: _ErrCode_name[9680:9701], + 40008: _ErrCode_name[9701:9722], + 40009: _ErrCode_name[9722:9745], + 40010: _ErrCode_name[9745:9768], + 40011: _ErrCode_name[9768:9789], + 40012: _ErrCode_name[9789:9814], + 40013: _ErrCode_name[9814:9835], + 40014: _ErrCode_name[9835:9859], + 40015: _ErrCode_name[9859:9884], + 40016: _ErrCode_name[9884:9905], + 40017: _ErrCode_name[9905:9924], + 40018: _ErrCode_name[9924:9948], + 40019: _ErrCode_name[9948:9971], + 40020: _ErrCode_name[9971:9991], + 40021: _ErrCode_name[9991:10008], + 40022: _ErrCode_name[10008:10025], + 40023: _ErrCode_name[10025:10046], + 40024: _ErrCode_name[10046:10072], + 40025: _ErrCode_name[10072:10098], + 40026: _ErrCode_name[10098:10121], + 40027: _ErrCode_name[10121:10142], + 40028: _ErrCode_name[10142:10162], + 40029: _ErrCode_name[10162:10185], + 40030: _ErrCode_name[10185:10208], + 40031: _ErrCode_name[10208:10229], + 40032: _ErrCode_name[10229:10250], + 40033: _ErrCode_name[10250:10270], + 40034: _ErrCode_name[10270:10292], + 40035: _ErrCode_name[10292:10317], + 40036: _ErrCode_name[10317:10342], + 40037: _ErrCode_name[10342:10359], + 40038: _ErrCode_name[10359:10378], + 40039: _ErrCode_name[10378:10402], + 40040: _ErrCode_name[10402:10427], + 40041: _ErrCode_name[10427:10445], + 40042: _ErrCode_name[10445:10468], + 40043: _ErrCode_name[10468:10490], + 40044: _ErrCode_name[10490:10514], + 40045: _ErrCode_name[10514:10536], + 40046: _ErrCode_name[10536:10557], + 40047: _ErrCode_name[10557:10579], + 40048: _ErrCode_name[10579:10597], + 40049: _ErrCode_name[10597:10616], + 40050: _ErrCode_name[10616:10637], + 40051: _ErrCode_name[10637:10657], + 40052: _ErrCode_name[10657:10678], + 40053: _ErrCode_name[10678:10700], + 40054: _ErrCode_name[10700:10721], + 40055: _ErrCode_name[10721:10740], + 40056: _ErrCode_name[10740:10762], + 40057: _ErrCode_name[10762:10782], + 40058: _ErrCode_name[10782:10803], + 40059: _ErrCode_name[10803:10829], + 40060: _ErrCode_name[10829:10847], + 40061: _ErrCode_name[10847:10872], + 40062: _ErrCode_name[10872:10895], + 40063: _ErrCode_name[10895:10919], + 40064: _ErrCode_name[10919:10944], + 40065: _ErrCode_name[10944:10967], + 40066: _ErrCode_name[10967:10987], + 40067: _ErrCode_name[10987:11016], + 40068: _ErrCode_name[11016:11036], + 40069: _ErrCode_name[11036:11058], + 40070: _ErrCode_name[11058:11071], + 40071: _ErrCode_name[11071:11091], + 40072: _ErrCode_name[11091:11111], + 40073: _ErrCode_name[11111:11147], + 40074: _ErrCode_name[11147:11182], + 40075: _ErrCode_name[11182:11205], + 40076: _ErrCode_name[11205:11228], + 40077: _ErrCode_name[11228:11251], + 40078: _ErrCode_name[11251:11277], + 40079: _ErrCode_name[11277:11302], + 40080: _ErrCode_name[11302:11326], + 40081: _ErrCode_name[11326:11351], + 40082: _ErrCode_name[11351:11375], + 40083: _ErrCode_name[11375:11393], + 42001: _ErrCode_name[11393:11411], + 42002: _ErrCode_name[11411:11436], + 42003: _ErrCode_name[11436:11459], + 42004: _ErrCode_name[11459:11483], + 42005: _ErrCode_name[11483:11507], + 42006: _ErrCode_name[11507:11526], + 42007: _ErrCode_name[11526:11546], + 42008: _ErrCode_name[11546:11570], + 42009: _ErrCode_name[11570:11593], + 42010: _ErrCode_name[11593:11611], + 42501: _ErrCode_name[11611:11629], + 42502: _ErrCode_name[11629:11642], + 42503: _ErrCode_name[11642:11657], + 42504: _ErrCode_name[11657:11677], + 42505: _ErrCode_name[11677:11692], + 43001: _ErrCode_name[11692:11718], + 43002: _ErrCode_name[11718:11738], + 43003: _ErrCode_name[11738:11755], + 43004: _ErrCode_name[11755:11779], + 43005: _ErrCode_name[11779:11802], + 43006: _ErrCode_name[11802:11819], + 43007: _ErrCode_name[11819:11833], + 43008: _ErrCode_name[11833:11856], + 44001: _ErrCode_name[11856:11880], + 44002: _ErrCode_name[11880:11911], + 44003: _ErrCode_name[11911:11941], + 44004: _ErrCode_name[11941:11969], + 44005: _ErrCode_name[11969:11996], + 44006: _ErrCode_name[11996:12022], + 44007: _ErrCode_name[12022:12061], + 44008: _ErrCode_name[12061:12100], + 44009: _ErrCode_name[12100:12135], + 44010: _ErrCode_name[12135:12163], + 44011: _ErrCode_name[12163:12191], + 44012: _ErrCode_name[12191:12208], + 44013: _ErrCode_name[12208:12232], + 44014: _ErrCode_name[12232:12258], + 44015: _ErrCode_name[12258:12287], + 44016: _ErrCode_name[12287:12326], + 44017: _ErrCode_name[12326:12365], + 44018: _ErrCode_name[12365:12403], + 44019: _ErrCode_name[12403:12452], + 44020: _ErrCode_name[12452:12473], + 46001: _ErrCode_name[12473:12492], + 46002: _ErrCode_name[12492:12508], + 46003: _ErrCode_name[12508:12528], + 46004: _ErrCode_name[12528:12551], + 46005: _ErrCode_name[12551:12572], + 46006: _ErrCode_name[12572:12599], + 46007: _ErrCode_name[12599:12622], + 46008: _ErrCode_name[12622:12648], + 46009: _ErrCode_name[12648:12671], + 46010: _ErrCode_name[12671:12697], + 46011: _ErrCode_name[12697:12729], + 46012: _ErrCode_name[12729:12762], + 46013: _ErrCode_name[12762:12780], + 46014: _ErrCode_name[12780:12801], + 46015: _ErrCode_name[12801:12835], + 46016: _ErrCode_name[12835:12865], + 46017: _ErrCode_name[12865:12897], + 46018: _ErrCode_name[12897:12918], + 46019: _ErrCode_name[12918:12955], + 46020: _ErrCode_name[12955:12980], + 46021: _ErrCode_name[12980:13006], + 46022: _ErrCode_name[13006:13037], + 46023: _ErrCode_name[13037:13064], + 46024: _ErrCode_name[13064:13083], + 46025: _ErrCode_name[13083:13107], + 46026: _ErrCode_name[13107:13132], + 46027: _ErrCode_name[13132:13166], + 46028: _ErrCode_name[13166:13196], + 46029: _ErrCode_name[13196:13225], + 46030: _ErrCode_name[13225:13251], + 46031: _ErrCode_name[13251:13276], + 46032: _ErrCode_name[13276:13311], + 46033: _ErrCode_name[13311:13333], + 46034: _ErrCode_name[13333:13357], + 46035: _ErrCode_name[13357:13382], + 48001: _ErrCode_name[13382:13399], + 48002: _ErrCode_name[13399:13415], + 48003: _ErrCode_name[13415:13428], + 49001: _ErrCode_name[13428:13441], + 49002: _ErrCode_name[13441:13466], + 50000: _ErrCode_name[13466:13472], +} + +func (i ErrCode) String() string { + if str, ok := _ErrCode_map[i]; ok { + return str + } + return "ErrCode(" + strconv.FormatInt(int64(i), 10) + ")" +}