diff --git a/br/cmd/br/BUILD.bazel b/br/cmd/br/BUILD.bazel index 7254e0b4c3fdb..a8605aae2f3ea 100644 --- a/br/cmd/br/BUILD.bazel +++ b/br/cmd/br/BUILD.bazel @@ -30,7 +30,6 @@ go_library( "//br/pkg/utils", "//br/pkg/version/build", "//config", - "//ddl", "//parser/model", "//session", "//util", diff --git a/br/cmd/br/backup.go b/br/cmd/br/backup.go index 942fd8d4a46db..a6f973d26829c 100644 --- a/br/cmd/br/backup.go +++ b/br/cmd/br/backup.go @@ -11,7 +11,7 @@ import ( "github.com/pingcap/tidb/br/pkg/trace" "github.com/pingcap/tidb/br/pkg/utils" "github.com/pingcap/tidb/br/pkg/version/build" - "github.com/pingcap/tidb/ddl" + "github.com/pingcap/tidb/config" "github.com/pingcap/tidb/session" "github.com/spf13/cobra" "go.uber.org/zap" @@ -78,7 +78,7 @@ func NewBackupCommand() *cobra.Command { task.LogArguments(c) // Do not run ddl worker in BR. - ddl.RunWorker = false + config.GetGlobalConfig().Instance.TiDBEnableDDL.Store(false) summary.SetUnit(summary.BackupUnit) return nil diff --git a/cmd/ddltest/BUILD.bazel b/cmd/ddltest/BUILD.bazel index 6870dddb5fd04..220fc51cbaa25 100644 --- a/cmd/ddltest/BUILD.bazel +++ b/cmd/ddltest/BUILD.bazel @@ -11,7 +11,7 @@ go_test( ], flaky = True, deps = [ - "//ddl", + "//config", "//domain", "//kv", "//parser/model", diff --git a/cmd/ddltest/ddl_test.go b/cmd/ddltest/ddl_test.go index 6fa05c1ddec21..974094d3a1689 100644 --- a/cmd/ddltest/ddl_test.go +++ b/cmd/ddltest/ddl_test.go @@ -33,7 +33,7 @@ import ( _ "github.com/go-sql-driver/mysql" "github.com/pingcap/errors" "github.com/pingcap/log" - "github.com/pingcap/tidb/ddl" + "github.com/pingcap/tidb/config" "github.com/pingcap/tidb/domain" "github.com/pingcap/tidb/kv" "github.com/pingcap/tidb/parser/model" @@ -116,7 +116,7 @@ func createDDLSuite(t *testing.T) (s *ddlSuite) { // Stop current DDL worker, so that we can't be the owner now. err = domain.GetDomain(s.ctx).DDL().Stop() require.NoError(t, err) - ddl.RunWorker = false + config.GetGlobalConfig().Instance.TiDBEnableDDL.Store(false) session.ResetStoreForWithTiKVTest(s.store) s.dom.Close() require.NoError(t, s.store.Close()) diff --git a/config/config.go b/config/config.go index 193beee961f63..a11a105580b3e 100644 --- a/config/config.go +++ b/config/config.go @@ -119,6 +119,7 @@ var ( "check-mb4-value-in-utf8": "tidb_check_mb4_value_in_utf8", "enable-collect-execution-info": "tidb_enable_collect_execution_info", "max-server-connections": "max_connections", + "run-ddl": "tidb_enable_ddl", }, }, { @@ -480,10 +481,11 @@ type Instance struct { ForcePriority string `toml:"tidb_force_priority" json:"tidb_force_priority"` MemoryUsageAlarmRatio float64 `toml:"tidb_memory_usage_alarm_ratio" json:"tidb_memory_usage_alarm_ratio"` // EnableCollectExecutionInfo enables the TiDB to collect execution info. - EnableCollectExecutionInfo bool `toml:"tidb_enable_collect_execution_info" json:"tidb_enable_collect_execution_info"` - PluginDir string `toml:"plugin_dir" json:"plugin_dir"` - PluginLoad string `toml:"plugin_load" json:"plugin_load"` - MaxConnections uint32 `toml:"max_connections" json:"max_connections"` + EnableCollectExecutionInfo bool `toml:"tidb_enable_collect_execution_info" json:"tidb_enable_collect_execution_info"` + PluginDir string `toml:"plugin_dir" json:"plugin_dir"` + PluginLoad string `toml:"plugin_load" json:"plugin_load"` + MaxConnections uint32 `toml:"max_connections" json:"max_connections"` + TiDBEnableDDL AtomicBool `toml:"tidb_enable_ddl" json:"tidb_enable_ddl"` } func (l *Log) getDisableTimestamp() bool { @@ -859,6 +861,7 @@ var defaultConf = Config{ PluginDir: "/data/deploy/plugin", PluginLoad: "", MaxConnections: 0, + TiDBEnableDDL: *NewAtomicBool(true), }, Status: Status{ ReportStatus: true, @@ -1194,7 +1197,7 @@ func (c *Config) Valid() error { } return fmt.Errorf("invalid store=%s, valid storages=%v", c.Store, nameList) } - if c.Store == "mocktikv" && !c.RunDDL { + if c.Store == "mocktikv" && !c.Instance.TiDBEnableDDL.Load() { return fmt.Errorf("can't disable DDL on mocktikv") } if c.MaxIndexLength < DefMaxIndexLength || c.MaxIndexLength > DefMaxOfMaxIndexLength { diff --git a/config/config.toml.example b/config/config.toml.example index e30ebdb3c4b37..3b6b42791dcda 100644 --- a/config/config.toml.example +++ b/config/config.toml.example @@ -18,9 +18,6 @@ path = "/tmp/tidb" # The socket file to use for connection. socket = "/tmp/tidb-{Port}.sock" -# Run ddl worker on this tidb-server. -run-ddl = true - # Schema lease duration, very dangerous to change only if you know what you do. lease = "45s" @@ -462,3 +459,6 @@ tidb_record_plan_in_slow_log = 1 # The maximum permitted number of simultaneous client connections. When the value is 0, the number of connections is unlimited. max_connections = 0 + +# Run ddl worker on this tidb-server. +tidb_enable_ddl = true diff --git a/config/config_test.go b/config/config_test.go index 1471ff719b4e6..ed8cafa0f088d 100644 --- a/config/config_test.go +++ b/config/config_test.go @@ -195,9 +195,6 @@ path = "/tmp/tidb" # The socket file to use for connection. socket = "/tmp/tidb-{Port}.sock" -# Run ddl worker on this tidb-server. -run-ddl = true - # Schema lease duration, very dangerous to change only if you know what you do. lease = "45s" @@ -310,6 +307,9 @@ enable-enum-length-limit = true # The maximum permitted number of simultaneous client connections. When the value is 0, the number of connections is unlimited. max_connections = 0 +# Run ddl worker on this tidb-server. +tidb_enable_ddl = true + [log] # Log level: debug, info, warn, error, fatal. level = "info" @@ -1022,7 +1022,10 @@ func TestConflictInstanceConfig(t *testing.T) { // Just receive a warning and keep their respective values. expectedConflictOptions := map[string]InstanceConfigSection{ "": { - "", map[string]string{"check-mb4-value-in-utf8": "tidb_check_mb4_value_in_utf8"}, + "", map[string]string{ + "check-mb4-value-in-utf8": "tidb_check_mb4_value_in_utf8", + "run-ddl": "tidb_enable_ddl", + }, }, "log": { "log", map[string]string{"enable-slow-log": "tidb_enable_slow_log"}, @@ -1031,10 +1034,10 @@ func TestConflictInstanceConfig(t *testing.T) { "performance", map[string]string{"force-priority": "tidb_force_priority"}, }, } - _, err = f.WriteString("check-mb4-value-in-utf8 = true \n" + + _, err = f.WriteString("check-mb4-value-in-utf8 = true \nrun-ddl = true \n" + "[log] \nenable-slow-log = true \n" + "[performance] \nforce-priority = \"NO_PRIORITY\"\n" + - "[instance] \ntidb_check_mb4_value_in_utf8 = false \ntidb_enable_slow_log = false \ntidb_force_priority = \"LOW_PRIORITY\"") + "[instance] \ntidb_check_mb4_value_in_utf8 = false \ntidb_enable_slow_log = false \ntidb_force_priority = \"LOW_PRIORITY\"\ntidb_enable_ddl = false") require.NoError(t, err) require.NoError(t, f.Sync()) err = conf.Load(configFile) @@ -1046,6 +1049,8 @@ func TestConflictInstanceConfig(t *testing.T) { require.Equal(t, false, conf.Instance.EnableSlowLog.Load()) require.Equal(t, "NO_PRIORITY", conf.Performance.ForcePriority) require.Equal(t, "LOW_PRIORITY", conf.Instance.ForcePriority) + require.Equal(t, true, conf.RunDDL) + require.Equal(t, false, conf.Instance.TiDBEnableDDL.Load()) require.Equal(t, 0, len(DeprecatedOptions)) for _, conflictOption := range ConflictOptions { expectedConflictOption, ok := expectedConflictOptions[conflictOption.SectionName] @@ -1075,6 +1080,7 @@ func TestDeprecatedConfig(t *testing.T) { "": { "", map[string]string{ "enable-collect-execution-info": "tidb_enable_collect_execution_info", + "run-ddl": "tidb_enable_ddl", }, }, "log": { @@ -1090,7 +1096,7 @@ func TestDeprecatedConfig(t *testing.T) { }, }, } - _, err = f.WriteString("enable-collect-execution-info = false \n" + + _, err = f.WriteString("enable-collect-execution-info = false \nrun-ddl = false \n" + "[plugin] \ndir=\"/plugin-path\" \nload=\"audit-1,whitelist-1\" \n" + "[log] \nslow-threshold = 100 \n" + "[performance] \nmemory-usage-alarm-ratio = 0.5") diff --git a/ddl/ddl.go b/ddl/ddl.go index ffcd149bf3a3f..bad7c9b7d088b 100644 --- a/ddl/ddl.go +++ b/ddl/ddl.go @@ -37,6 +37,7 @@ import ( "github.com/pingcap/kvproto/pkg/kvrpcpb" "github.com/pingcap/tidb/config" "github.com/pingcap/tidb/ddl/util" + "github.com/pingcap/tidb/domain/infosync" "github.com/pingcap/tidb/infoschema" "github.com/pingcap/tidb/kv" "github.com/pingcap/tidb/meta" @@ -565,6 +566,9 @@ func newDDL(ctx context.Context, options ...Option) *ddl { ddlJobCh: make(chan struct{}, 100), } + // Register functions for enable/disable ddl when changing system variable `tidb_enable_ddl`. + variable.EnableDDL = d.EnableDDL + variable.DisableDDL = d.DisableDDL variable.SwitchConcurrentDDL = d.SwitchConcurrentDDL return d @@ -638,37 +642,36 @@ func (d *ddl) prepareWorkers4legacyDDL() { // Start implements DDL.Start interface. func (d *ddl) Start(ctxPool *pools.ResourcePool) error { - logutil.BgLogger().Info("[ddl] start DDL", zap.String("ID", d.uuid), zap.Bool("runWorker", RunWorker)) + logutil.BgLogger().Info("[ddl] start DDL", zap.String("ID", d.uuid), zap.Bool("runWorker", config.GetGlobalConfig().Instance.TiDBEnableDDL.Load())) d.wg.Run(d.limitDDLJobs) d.sessPool = newSessionPool(ctxPool, d.store) - // If RunWorker is true, we need campaign owner and do DDL job. - // Otherwise, we needn't do that. - if RunWorker { - d.ownerManager.SetBeOwnerHook(func() { - var err error - d.ddlSeqNumMu.seqNum, err = d.GetNextDDLSeqNum() - if err != nil { - logutil.BgLogger().Error("error when getting the ddl history count", zap.Error(err)) - } - }) - - err := d.ownerManager.CampaignOwner() + d.ownerManager.SetBeOwnerHook(func() { + var err error + d.ddlSeqNumMu.seqNum, err = d.GetNextDDLSeqNum() if err != nil { - return errors.Trace(err) + logutil.BgLogger().Error("error when getting the ddl history count", zap.Error(err)) } + }) + + d.delRangeMgr = d.newDeleteRangeManager(ctxPool == nil) - d.delRangeMgr = d.newDeleteRangeManager(ctxPool == nil) + d.prepareWorkers4ConcurrencyDDL() + d.prepareWorkers4legacyDDL() - d.prepareWorkers4ConcurrencyDDL() - d.prepareWorkers4legacyDDL() + go d.schemaSyncer.StartCleanWork() + if config.TableLockEnabled() { + d.wg.Add(1) + go d.startCleanDeadTableLock() + } + metrics.DDLCounter.WithLabelValues(metrics.StartCleanWork).Inc() - go d.schemaSyncer.StartCleanWork() - if config.TableLockEnabled() { - d.wg.Add(1) - go d.startCleanDeadTableLock() + // If tidb_enable_ddl is true, we need campaign owner and do DDL job. + // Otherwise, we needn't do that. + if config.GetGlobalConfig().Instance.TiDBEnableDDL.Load() { + if err := d.EnableDDL(); err != nil { + return err } - metrics.DDLCounter.WithLabelValues(metrics.StartCleanWork).Inc() } variable.RegisterStatistics(d) @@ -681,6 +684,35 @@ func (d *ddl) Start(ctxPool *pools.ResourcePool) error { return nil } +// EnableDDL enable this node to execute ddl. +// Since ownerManager.CampaignOwner will start a new goroutine to run ownerManager.campaignLoop, +// we should make sure that before invoking EnableDDL(), ddl is DISABLE. +func (d *ddl) EnableDDL() error { + err := d.ownerManager.CampaignOwner() + return errors.Trace(err) +} + +// DisableDDL disable this node to execute ddl. +// We should make sure that before invoking DisableDDL(), ddl is ENABLE. +func (d *ddl) DisableDDL() error { + if d.ownerManager.IsOwner() { + // If there is only one node, we should NOT disable ddl. + serverInfo, err := infosync.GetAllServerInfo(d.ctx) + if err != nil { + logutil.BgLogger().Error("[ddl] error when GetAllServerInfo", zap.Error(err)) + return err + } + if len(serverInfo) <= 1 { + return dbterror.ErrDDLSetting.GenWithStackByArgs("can not disable ddl when there is only one instance") + } + // FIXME: if possible, when this node is the only node with DDL, ths setting of DisableDDL should fail. + } + + // disable campaign by interrupting campaignLoop + d.ownerManager.CampaignCancel() + return nil +} + // GetNextDDLSeqNum return the next DDL seq num. func (d *ddl) GetNextDDLSeqNum() (uint64, error) { var count uint64 @@ -830,7 +862,7 @@ func getJobCheckInterval(job *model.Job, i int) (time.Duration, bool) { func (d *ddl) asyncNotifyWorker(job *model.Job) { // If the workers don't run, we needn't notify workers. - if !RunWorker { + if !config.GetGlobalConfig().Instance.TiDBEnableDDL.Load() { return } if variable.EnableConcurrentDDL.Load() { diff --git a/ddl/ddl_worker.go b/ddl/ddl_worker.go index 4c22af0b35025..bac8d9d6318c9 100644 --- a/ddl/ddl_worker.go +++ b/ddl/ddl_worker.go @@ -50,8 +50,6 @@ import ( ) var ( - // RunWorker indicates if this TiDB server starts DDL worker and can run DDL job. - RunWorker = true // ddlWorkerID is used for generating the next DDL worker ID. ddlWorkerID = atomicutil.NewInt32(0) // WaitTimeWhenErrorOccurred is waiting interval when processing DDL jobs encounter errors. diff --git a/errno/errcode.go b/errno/errcode.go index 4054229f8dce5..4b6f08c46ac7d 100644 --- a/errno/errcode.go +++ b/errno/errcode.go @@ -1073,6 +1073,8 @@ const ( ErrHTTPServiceError = 8243 ErrPartitionColumnStatsMissing = 8244 ErrColumnInChange = 8245 + ErrDDLSetting = 8246 + // TiKV/PD/TiFlash errors. ErrPDServerTimeout = 9001 ErrTiKVServerTimeout = 9002 diff --git a/errno/errname.go b/errno/errname.go index a8be48e6eed06..94e7b9f3a1887 100644 --- a/errno/errname.go +++ b/errno/errname.go @@ -1066,6 +1066,7 @@ var MySQLErrName = map[uint16]*mysql.ErrMessage{ ErrJSONObjectKeyTooLong: mysql.Message("TiDB does not yet support JSON objects with the key length >= 65536", nil), ErrPartitionStatsMissing: mysql.Message("Build table: %s global-level stats failed due to missing partition-level stats", nil), ErrPartitionColumnStatsMissing: mysql.Message("Build table: %s global-level stats failed due to missing partition-level column stats, please run analyze table to refresh columns of all partitions", nil), + ErrDDLSetting: mysql.Message("Error happened when enable/disable DDL: %s", nil), ErrNotSupportedWithSem: mysql.Message("Feature '%s' is not supported when security enhanced mode is enabled", nil), ErrPlacementPolicyCheck: mysql.Message("Placement policy didn't meet the constraint, reason: %s", nil), diff --git a/errors.toml b/errors.toml index f47c968f0cc0e..9998a985f4f91 100755 --- a/errors.toml +++ b/errors.toml @@ -1261,6 +1261,11 @@ error = ''' column %s id %d does not exist, this column may have been updated by other DDL ran in parallel ''' +["ddl:8246"] +error = ''' +Error happened when enable/disable DDL: %s +''' + ["domain:8027"] error = ''' Information schema is out of date: schema failed to update in 1 lease, please make sure TiDB can connect to TiKV diff --git a/owner/manager.go b/owner/manager.go index 756b44b7688cf..4223a433b8b55 100644 --- a/owner/manager.go +++ b/owner/manager.go @@ -50,10 +50,12 @@ type Manager interface { CampaignOwner() error // ResignOwner lets the owner start a new election. ResignOwner(ctx context.Context) error - // Cancel cancels this etcd ownerManager campaign. + // Cancel cancels this etcd ownerManager. Cancel() // RequireOwner requires the ownerManager is owner. RequireOwner(ctx context.Context) error + // CampaignCancel cancels one etcd campaign + CampaignCancel() // SetBeOwnerHook sets a hook. The hook is called before becoming an owner. SetBeOwnerHook(hook func()) @@ -71,17 +73,18 @@ type DDLOwnerChecker interface { // ownerManager represents the structure which is used for electing owner. type ownerManager struct { - id string // id is the ID of the manager. - key string - ctx context.Context - prompt string - logPrefix string - logCtx context.Context - etcdCli *clientv3.Client - cancel context.CancelFunc - elec unsafe.Pointer - wg sync.WaitGroup - beOwnerHook func() + id string // id is the ID of the manager. + key string + ctx context.Context + prompt string + logPrefix string + logCtx context.Context + etcdCli *clientv3.Client + cancel context.CancelFunc + elec unsafe.Pointer + wg sync.WaitGroup + beOwnerHook func() + campaignCancel context.CancelFunc } // NewOwnerManager creates a new Manager. @@ -185,11 +188,17 @@ func (m *ownerManager) RetireOwner() { atomic.StorePointer(&m.elec, nil) } +// CampaignCancel implements Manager.CampaignCancel interface. +func (m *ownerManager) CampaignCancel() { + m.campaignCancel() + m.wg.Wait() +} + func (m *ownerManager) campaignLoop(etcdSession *concurrency.Session) { - var cancel context.CancelFunc - ctx, cancel := context.WithCancel(m.ctx) + var campaignContext context.Context + campaignContext, m.campaignCancel = context.WithCancel(m.ctx) defer func() { - cancel() + m.campaignCancel() if r := recover(); r != nil { logutil.BgLogger().Error("recover panic", zap.String("prompt", m.prompt), zap.Any("error", r), zap.Stack("buffer")) metrics.PanicCounter.WithLabelValues(metrics.LabelDDLOwner).Inc() @@ -209,13 +218,13 @@ func (m *ownerManager) campaignLoop(etcdSession *concurrency.Session) { case <-etcdSession.Done(): logutil.Logger(logCtx).Info("etcd session is done, creates a new one") leaseID := etcdSession.Lease() - etcdSession, err = util2.NewSession(ctx, logPrefix, m.etcdCli, util2.NewSessionRetryUnlimited, ManagerSessionTTL) + etcdSession, err = util2.NewSession(campaignContext, logPrefix, m.etcdCli, util2.NewSessionRetryUnlimited, ManagerSessionTTL) if err != nil { logutil.Logger(logCtx).Info("break campaign loop, NewSession failed", zap.Error(err)) m.revokeSession(logPrefix, leaseID) return } - case <-ctx.Done(): + case <-campaignContext.Done(): logutil.Logger(logCtx).Info("break campaign loop, context is done") m.revokeSession(logPrefix, etcdSession.Lease()) return @@ -233,19 +242,19 @@ func (m *ownerManager) campaignLoop(etcdSession *concurrency.Session) { } elec := concurrency.NewElection(etcdSession, m.key) - err = elec.Campaign(ctx, m.id) + err = elec.Campaign(campaignContext, m.id) if err != nil { logutil.Logger(logCtx).Info("failed to campaign", zap.Error(err)) continue } - ownerKey, err := GetOwnerInfo(ctx, logCtx, elec, m.id) + ownerKey, err := GetOwnerInfo(campaignContext, logCtx, elec, m.id) if err != nil { continue } m.toBeOwner(elec) - m.watchOwner(ctx, etcdSession, ownerKey) + m.watchOwner(campaignContext, etcdSession, ownerKey) m.RetireOwner() metrics.CampaignOwnerCounter.WithLabelValues(m.prompt, metrics.NoLongerOwner).Inc() diff --git a/owner/mock.go b/owner/mock.go index 29552bfd3d14c..546f955c47268 100644 --- a/owner/mock.go +++ b/owner/mock.go @@ -99,3 +99,8 @@ func (*mockManager) RequireOwner(context.Context) error { func (m *mockManager) SetBeOwnerHook(hook func()) { m.beOwnerHook = hook } + +// CampaignCancel implements Manager.CampaignCancel interface +func (m *mockManager) CampaignCancel() { + // do nothing +} diff --git a/sessionctx/variable/sysvar.go b/sessionctx/variable/sysvar.go index 8acfa65d37138..f0a489e469cc6 100644 --- a/sessionctx/variable/sysvar.go +++ b/sessionctx/variable/sysvar.go @@ -433,6 +433,20 @@ var defaultSysVars = []*SysVar{ }, GetGlobal: func(s *SessionVars) (string, error) { return strconv.FormatUint(uint64(config.GetGlobalConfig().Instance.MaxConnections), 10), nil }}, + {Scope: ScopeInstance, Name: TiDBEnableDDL, Value: BoolToOnOff(config.GetGlobalConfig().Instance.TiDBEnableDDL.Load()), Type: TypeBool, + SetGlobal: func(s *SessionVars, val string) error { + oldVal, newVal := config.GetGlobalConfig().Instance.TiDBEnableDDL.Load(), TiDBOptOn(val) + if oldVal != newVal { + err := switchDDL(newVal) + config.GetGlobalConfig().Instance.TiDBEnableDDL.Store(newVal) + return err + } + return nil + }, + GetGlobal: func(s *SessionVars) (string, error) { + return BoolToOnOff(config.GetGlobalConfig().Instance.TiDBEnableDDL.Load()), nil + }, + }, /* The system variables below have GLOBAL scope */ {Scope: ScopeGlobal, Name: MaxPreparedStmtCount, Value: strconv.FormatInt(DefMaxPreparedStmtCount, 10), Type: TypeInt, MinValue: -1, MaxValue: 1048576}, @@ -1848,6 +1862,8 @@ const ( PluginDir = "plugin_dir" // PluginLoad is the name of 'plugin_load' system variable. PluginLoad = "plugin_load" + // TiDBEnableDDL indicates whether the tidb-server runs DDL statements, + TiDBEnableDDL = "tidb_enable_ddl" // Port is the name for 'port' system variable. Port = "port" // DataDir is the name for 'datadir' system variable. diff --git a/sessionctx/variable/tidb_vars.go b/sessionctx/variable/tidb_vars.go index 11c1d4afa01b8..ad03e5191705d 100644 --- a/sessionctx/variable/tidb_vars.go +++ b/sessionctx/variable/tidb_vars.go @@ -1060,4 +1060,18 @@ var ( SetStatsCacheCapacity atomic.Value // SwitchConcurrentDDL is the func registered by DDL to switch concurrent DDL. SwitchConcurrentDDL func(bool) error = nil + // EnableDDL is the func registered by ddl to enable running ddl in this instance. + EnableDDL func() error = nil + // DisableDDL is the func registered by ddl to disable running ddl in this instance. + DisableDDL func() error = nil ) + +// switchDDL turns on/off DDL in an instance. +func switchDDL(on bool) error { + if on && EnableDDL != nil { + return EnableDDL() + } else if !on && DisableDDL != nil { + return DisableDDL() + } + return nil +} diff --git a/tidb-server/main.go b/tidb-server/main.go index 5a4ef5b62fae2..63cc9d81fec34 100644 --- a/tidb-server/main.go +++ b/tidb-server/main.go @@ -451,7 +451,7 @@ func overrideConfig(cfg *config.Config) { cfg.Binlog.Enable = *enableBinlog } if actualFlags[nmRunDDL] { - cfg.RunDDL = *runDDL + cfg.Instance.TiDBEnableDDL.Store(*runDDL) } if actualFlags[nmDdlLease] { cfg.Lease = *ddlLease @@ -565,6 +565,8 @@ func setGlobalVars() { cfg.Instance.EnableCollectExecutionInfo = cfg.EnableCollectExecutionInfo case "max-server-connections": cfg.Instance.MaxConnections = cfg.MaxServerConnections + case "run-ddl": + cfg.Instance.TiDBEnableDDL.Store(cfg.RunDDL) } case "log": switch oldName { @@ -615,7 +617,6 @@ func setGlobalVars() { session.SetPlanReplayerGCLease(planReplayerGCLease) bindinfo.Lease = parseDuration(cfg.Performance.BindInfoLease) statistics.RatioOfPseudoEstimate.Store(cfg.Performance.PseudoEstimateRatio) - ddl.RunWorker = cfg.RunDDL if cfg.SplitTable { atomic.StoreUint32(&ddl.EnableSplitTableRegion, 1) } diff --git a/util/dbterror/ddl_terror.go b/util/dbterror/ddl_terror.go index 2e90d25ffdd2c..6e35142ef5e64 100644 --- a/util/dbterror/ddl_terror.go +++ b/util/dbterror/ddl_terror.go @@ -382,6 +382,8 @@ var ( ErrCancelFinishedDDLJob = ClassDDL.NewStd(mysql.ErrCancelFinishedDDLJob) // ErrCannotCancelDDLJob returns when cancel a almost finished ddl job, because cancel in now may cause data inconsistency. ErrCannotCancelDDLJob = ClassDDL.NewStd(mysql.ErrCannotCancelDDLJob) + // ErrDDLSetting returns when failing to enable/disable DDL + ErrDDLSetting = ClassDDL.NewStd(mysql.ErrDDLSetting) // ErrColumnInChange indicates there is modification on the column in parallel. ErrColumnInChange = ClassDDL.NewStd(mysql.ErrColumnInChange)