diff --git a/ddl/column_test.go b/ddl/column_test.go index 0b9ffc1b6e2ca..67f7e5ce1de44 100644 --- a/ddl/column_test.go +++ b/ddl/column_test.go @@ -61,8 +61,8 @@ func (s *testColumnSuite) TearDownSuite(c *C) { testleak.AfterTest(c)() } -func testCreateColumn(c *C, ctx sessionctx.Context, d *ddl, dbInfo *model.DBInfo, tblInfo *model.TableInfo, - colName string, pos *ast.ColumnPosition, defaultValue interface{}) *model.Job { +func buildCreateColumnJob(dbInfo *model.DBInfo, tblInfo *model.TableInfo, colName string, + pos *ast.ColumnPosition, defaultValue interface{}) *model.Job { col := &model.ColumnInfo{ Name: model.NewCIStr(colName), Offset: len(tblInfo.Columns), @@ -79,7 +79,12 @@ func testCreateColumn(c *C, ctx sessionctx.Context, d *ddl, dbInfo *model.DBInfo BinlogInfo: &model.HistoryInfo{}, Args: []interface{}{col, pos, 0}, } + return job +} +func testCreateColumn(c *C, ctx sessionctx.Context, d *ddl, dbInfo *model.DBInfo, tblInfo *model.TableInfo, + colName string, pos *ast.ColumnPosition, defaultValue interface{}) *model.Job { + job := buildCreateColumnJob(dbInfo, tblInfo, colName, pos, defaultValue) err := d.doDDLJob(ctx, job) c.Assert(err, IsNil) v := getSchemaVer(c, ctx) @@ -87,14 +92,18 @@ func testCreateColumn(c *C, ctx sessionctx.Context, d *ddl, dbInfo *model.DBInfo return job } -func testDropColumn(c *C, ctx sessionctx.Context, d *ddl, dbInfo *model.DBInfo, tblInfo *model.TableInfo, colName string, isError bool) *model.Job { - job := &model.Job{ +func buildDropColumnJob(dbInfo *model.DBInfo, tblInfo *model.TableInfo, colName string) *model.Job { + return &model.Job{ SchemaID: dbInfo.ID, TableID: tblInfo.ID, Type: model.ActionDropColumn, BinlogInfo: &model.HistoryInfo{}, Args: []interface{}{model.NewCIStr(colName)}, } +} + +func testDropColumn(c *C, ctx sessionctx.Context, d *ddl, dbInfo *model.DBInfo, tblInfo *model.TableInfo, colName string, isError bool) *model.Job { + job := buildDropColumnJob(dbInfo, tblInfo, colName) err := d.doDDLJob(ctx, job) if isError { c.Assert(err, NotNil) diff --git a/ddl/db_change_test.go b/ddl/db_change_test.go index b3d7f02ec90e1..907ded50ca487 100644 --- a/ddl/db_change_test.go +++ b/ddl/db_change_test.go @@ -28,13 +28,13 @@ import ( "github.com/pingcap/tidb/executor" "github.com/pingcap/tidb/infoschema" "github.com/pingcap/tidb/kv" - "github.com/pingcap/tidb/meta" "github.com/pingcap/tidb/model" "github.com/pingcap/tidb/parser" "github.com/pingcap/tidb/session" "github.com/pingcap/tidb/sessionctx" "github.com/pingcap/tidb/store/mockstore" "github.com/pingcap/tidb/terror" + "github.com/pingcap/tidb/util/admin" "github.com/pingcap/tidb/util/testkit" "github.com/pingcap/tidb/util/testleak" "golang.org/x/net/context" @@ -580,15 +580,14 @@ func (s *testStateChangeSuite) testControlParallelExecSQL(c *C, sql1, sql2 strin if times != 0 { return } - var qLen int64 - var err1 error + var qLen int for { kv.RunInNewTxn(s.store, false, func(txn kv.Transaction) error { - m := meta.NewMeta(txn) - qLen, err1 = m.DDLJobQueueLen() + jobs, err1 := admin.GetDDLJobs(txn) if err1 != nil { return err1 } + qLen = len(jobs) return nil }) if qLen == 2 { diff --git a/ddl/db_test.go b/ddl/db_test.go index 799e6651d3bd5..a0bc2f87d397e 100644 --- a/ddl/db_test.go +++ b/ddl/db_test.go @@ -485,7 +485,7 @@ LOOP: } ctx := s.s.(sessionctx.Context) - idx := tables.NewIndex(t.Meta().ID, c3IdxInfo) + idx := tables.NewIndex(t.Meta().ID, t.Meta(), c3IdxInfo) checkDelRangeDone(c, ctx, idx) s.mustExec(c, "drop table t1") @@ -795,7 +795,7 @@ LOOP: } c.Assert(nidx, IsNil) - idx := tables.NewIndex(t.Meta().ID, c3idx.Meta()) + idx := tables.NewIndex(t.Meta().ID, t.Meta(), c3idx.Meta()) checkDelRangeDone(c, ctx, idx) s.tk.MustExec("drop table test_drop_index") } diff --git a/ddl/ddl.go b/ddl/ddl.go index 4fd023bce78df..9f27b07e51096 100644 --- a/ddl/ddl.go +++ b/ddl/ddl.go @@ -227,7 +227,7 @@ type ddl struct { quitCh chan struct{} *ddlCtx - workers []*worker + workers map[workerType]*worker } // ddlCtx is the context when we use worker to handle DDL jobs. @@ -236,7 +236,6 @@ type ddlCtx struct { store kv.Storage ownerManager owner.Manager schemaSyncer SchemaSyncer - ddlJobCh chan struct{} ddlJobDoneCh chan struct{} ddlEventCh chan<- *util.Event lease time.Duration // lease is schema lease. @@ -317,7 +316,6 @@ func newDDL(ctx context.Context, etcdCli *clientv3.Client, store kv.Storage, uuid: id, store: store, lease: lease, - ddlJobCh: make(chan struct{}, 1), ddlJobDoneCh: make(chan struct{}, 1), ownerManager: manager, schemaSyncer: syncer, @@ -359,20 +357,20 @@ func (d *ddl) start(ctx context.Context, ctxPool *pools.ResourcePool) { err := d.ownerManager.CampaignOwner(ctx) terror.Log(errors.Trace(err)) - d.workers = make([]*worker, 1) - // TODO: Add addIdxWorker. - d.workers[0] = newWorker(generalWorker, 0, d.store, ctxPool) + d.workers = make(map[workerType]*worker, 2) + d.workers[generalWorker] = newWorker(generalWorker, 0, d.store, ctxPool) + d.workers[addIdxWorker] = newWorker(addIdxWorker, 1, d.store, ctxPool) for _, worker := range d.workers { worker.wg.Add(1) go worker.start(d.ddlCtx) // TODO: Add the type of DDL worker. metrics.DDLCounter.WithLabelValues(metrics.CreateDDLWorker).Inc() + + // When the start function is called, we will send a fake job to let worker + // checks owner firstly and try to find whether a job exists and run. + asyncNotify(worker.ddlJobCh) } } - - // For every start, we will send a fake job to let worker - // check owner firstly and try to find whether a job exists and run. - asyncNotify(d.ddlJobCh) } func (d *ddl) close() { @@ -418,16 +416,15 @@ func (d *ddl) genGlobalID() (int64, error) { globalID, err = meta.NewMeta(txn).GenGlobalID() return errors.Trace(err) }) + return globalID, errors.Trace(err) } -// generalWorker returns the first worker. The ddl structure has only one worker before we implement the parallel worker. +// generalWorker returns the general worker. // It's used for testing. +// TODO: Remove this function. func (d *ddl) generalWorker() *worker { - if len(d.workers) == 0 { - return nil - } - return d.workers[0] + return d.workers[generalWorker] } // SchemaSyncer implements DDL.SchemaSyncer interface. @@ -449,6 +446,19 @@ func checkJobMaxInterval(job *model.Job) time.Duration { return 1 * time.Second } +func (d *ddl) asyncNotifyWorker(jobTp model.ActionType) { + // If the workers don't run, we needn't to notify workers. + if !RunWorker { + return + } + + if jobTp == model.ActionAddIndex { + asyncNotify(d.workers[addIdxWorker].ddlJobCh) + } else { + asyncNotify(d.workers[generalWorker].ddlJobCh) + } +} + func (d *ddl) doDDLJob(ctx sessionctx.Context, job *model.Job) error { // For every DDL, we must commit current transaction. if err := ctx.NewTxn(); err != nil { @@ -463,7 +473,7 @@ func (d *ddl) doDDLJob(ctx sessionctx.Context, job *model.Job) error { ctx.GetSessionVars().StmtCtx.IsDDLJobInQueue = true // Notice worker that we push a new job and wait the job done. - asyncNotify(d.ddlJobCh) + d.asyncNotifyWorker(job.Type) log.Infof("[ddl] start DDL job %s, Query:%s", job, job.Query) var historyJob *model.Job diff --git a/ddl/ddl_test.go b/ddl/ddl_test.go index f07ce685ada6d..cc795ccae7ddb 100644 --- a/ddl/ddl_test.go +++ b/ddl/ddl_test.go @@ -134,8 +134,8 @@ func checkHistoryJobArgs(c *C, ctx sessionctx.Context, id int64, args *historyJo } } -func testCreateIndex(c *C, ctx sessionctx.Context, d *ddl, dbInfo *model.DBInfo, tblInfo *model.TableInfo, unique bool, indexName string, colName string) *model.Job { - job := &model.Job{ +func buildCreateIdxJob(dbInfo *model.DBInfo, tblInfo *model.TableInfo, unique bool, indexName string, colName string) *model.Job { + return &model.Job{ SchemaID: dbInfo.ID, TableID: tblInfo.ID, Type: model.ActionAddIndex, @@ -145,7 +145,10 @@ func testCreateIndex(c *C, ctx sessionctx.Context, d *ddl, dbInfo *model.DBInfo, Column: &ast.ColumnName{Name: model.NewCIStr(colName)}, Length: types.UnspecifiedLength}}}, } +} +func testCreateIndex(c *C, ctx sessionctx.Context, d *ddl, dbInfo *model.DBInfo, tblInfo *model.TableInfo, unique bool, indexName string, colName string) *model.Job { + job := buildCreateIdxJob(dbInfo, tblInfo, unique, indexName, colName) err := d.doDDLJob(ctx, job) c.Assert(err, IsNil) v := getSchemaVer(c, ctx) @@ -153,18 +156,31 @@ func testCreateIndex(c *C, ctx sessionctx.Context, d *ddl, dbInfo *model.DBInfo, return job } -func testDropIndex(c *C, ctx sessionctx.Context, d *ddl, dbInfo *model.DBInfo, tblInfo *model.TableInfo, indexName string) *model.Job { - job := &model.Job{ +func buildDropIdxJob(dbInfo *model.DBInfo, tblInfo *model.TableInfo, indexName string) *model.Job { + return &model.Job{ SchemaID: dbInfo.ID, TableID: tblInfo.ID, Type: model.ActionDropIndex, BinlogInfo: &model.HistoryInfo{}, Args: []interface{}{model.NewCIStr(indexName)}, } +} +func testDropIndex(c *C, ctx sessionctx.Context, d *ddl, dbInfo *model.DBInfo, tblInfo *model.TableInfo, indexName string) *model.Job { + job := buildDropIdxJob(dbInfo, tblInfo, indexName) err := d.doDDLJob(ctx, job) c.Assert(err, IsNil) v := getSchemaVer(c, ctx) checkHistoryJobArgs(c, ctx, job.ID, &historyJobArgs{ver: v, tbl: tblInfo}) return job } + +func buildRebaseAutoIDJobJob(dbInfo *model.DBInfo, tblInfo *model.TableInfo, newBaseID int64) *model.Job { + return &model.Job{ + SchemaID: dbInfo.ID, + TableID: tblInfo.ID, + Type: model.ActionRebaseAutoID, + BinlogInfo: &model.HistoryInfo{}, + Args: []interface{}{newBaseID}, + } +} diff --git a/ddl/ddl_worker.go b/ddl/ddl_worker.go index 51fd0537f5dd7..c31bd0caf19b1 100644 --- a/ddl/ddl_worker.go +++ b/ddl/ddl_worker.go @@ -38,20 +38,24 @@ var RunWorker = true type workerType byte const ( - // generalWorker is the worker who handles all DDL worker now. - // TODO: update the comments when we support the addIdxWorker. + // generalWorker is the worker who handles all DDL statements except “add index”. generalWorker workerType = 0 - addIdxWorker workerType = 1 + // addIdxWorker is the worker who handles the operation of adding indexes. + addIdxWorker workerType = 1 + // waitDependencyJobInterval is the interval when the dependency job doesn't be done. + waitDependencyJobInterval = 200 * time.Millisecond + // noneDependencyJob means a job has no dependency-job. + noneDependencyJob = 0 ) // worker is used for handling DDL jobs. -// Now we have two kinds of workers, but we only use the generalWorker. -// TODO: update the comments when we support the addIdxWorker. +// Now we have two kinds of workers. type worker struct { - id int - tp workerType - quitCh chan struct{} - wg sync.WaitGroup + id int + tp workerType + ddlJobCh chan struct{} + quitCh chan struct{} + wg sync.WaitGroup reorgCtx *reorgCtx // reorgCtx is used for reorganization. delRangeManager delRangeManager @@ -61,6 +65,7 @@ func newWorker(tp workerType, id int, store kv.Storage, ctxPool *pools.ResourceP worker := &worker{ id: id, tp: tp, + ddlJobCh: make(chan struct{}, 1), quitCh: make(chan struct{}), reorgCtx: &reorgCtx{notifyCancelReorgJob: 0}, } @@ -74,17 +79,21 @@ func newWorker(tp workerType, id int, store kv.Storage, ctxPool *pools.ResourceP return worker } -func (w *worker) String() string { +func (w *worker) typeStr() string { var str string switch w.tp { case generalWorker: str = "general" case addIdxWorker: - str = "add index" + str = model.AddIndexStr default: str = "unknow" } - return fmt.Sprintf("%d, tp %s", w.id, str) + return str +} + +func (w *worker) String() string { + return fmt.Sprintf("%d, tp %s", w.id, w.typeStr()) } func (w *worker) close() { @@ -118,23 +127,18 @@ func (w *worker) start(d *ddlCtx) { } }() - // shouldCleanJobs is used to determine whether to clean up the job in adding index queue. - shouldCleanJobs := true for { select { case <-ticker.C: log.Debugf("[ddl] worker %s waits %s to check DDL status again", w, checkTime) - case <-d.ddlJobCh: + case <-w.ddlJobCh: case <-w.quitCh: return } - err := w.handleDDLJobQueue(d, shouldCleanJobs) + err := w.handleDDLJobQueue(d) if err != nil { log.Errorf("[ddl] worker %s handles DDL job err %v", w, errors.ErrorStack(err)) - } else if shouldCleanJobs { - log.Infof("[ddl] worker %s cleans jobs in the adding index queue finished.", w) - shouldCleanJobs = false } } } @@ -149,10 +153,20 @@ func asyncNotify(ch chan struct{}) { // buildJobDependence sets the curjob's dependency-ID. // The dependency-job's ID must less than the current job's ID, and we need the largest one in the list. func buildJobDependence(t *meta.Meta, curJob *model.Job) error { - jobs, err := t.GetAllDDLJobs() + // Jobs in the same queue are ordered. If we want to find a job's dependency-job, we need to look for + // it from the other queue. So if the job is "ActionAddIndex" job, we need find its dependency-job from DefaultJobList. + var jobs []*model.Job + var err error + switch curJob.Type { + case model.ActionAddIndex: + jobs, err = t.GetAllDDLJobsInQueue(meta.DefaultJobListKey) + default: + jobs, err = t.GetAllDDLJobsInQueue(meta.AddIndexJobListKey) + } if err != nil { return errors.Trace(err) } + for _, job := range jobs { if curJob.ID < job.ID { continue @@ -162,6 +176,7 @@ func buildJobDependence(t *meta.Meta, curJob *model.Job) error { return errors.Trace(err) } if isDependent { + log.Infof("[ddl] current DDL job %v depends on job %v", curJob, job) curJob.DependencyID = job.ID break } @@ -175,14 +190,18 @@ func (d *ddl) addDDLJob(ctx sessionctx.Context, job *model.Job) error { job.Version = currentVersion job.Query, _ = ctx.Value(sessionctx.QueryString).(string) err := kv.RunInNewTxn(d.store, true, func(txn kv.Transaction) error { - t := meta.NewMeta(txn) + t := newMetaWithQueueTp(txn, job.Type.String()) var err error job.ID, err = t.GenGlobalID() if err != nil { return errors.Trace(err) } job.StartTS = txn.StartTS() + if err = buildJobDependence(t, job); err != nil { + return errors.Trace(err) + } err = t.EnQueueDDLJob(job) + return errors.Trace(err) }) metrics.DDLWorkerHistogram.WithLabelValues(metrics.WorkerAddDDLJob, metrics.RetLabel(err)).Observe(time.Since(startTime).Seconds()) @@ -282,41 +301,61 @@ func (w *worker) finishDDLJob(t *meta.Meta, job *model.Job) (err error) { return errors.Trace(err) } +func isDependencyJobDone(t *meta.Meta, job *model.Job) (bool, error) { + if job.DependencyID == noneDependencyJob { + return true, nil + } + + historyJob, err := t.GetHistoryDDLJob(job.DependencyID) + if err != nil { + return false, errors.Trace(err) + } + if historyJob == nil { + return false, nil + } + log.Infof("[ddl] current DDL job %v dependent job ID %d is finished", job, job.DependencyID) + job.DependencyID = noneDependencyJob + return true, nil +} + +func newMetaWithQueueTp(txn kv.Transaction, tp string) *meta.Meta { + if tp == model.AddIndexStr { + return meta.NewMeta(txn, meta.AddIndexJobListKey) + } + return meta.NewMeta(txn) +} + // handleDDLJobQueue handles DDL jobs in DDL Job queue. -// shouldCleanJobs is used to determine whether to clean up the job in adding index queue. -func (w *worker) handleDDLJobQueue(d *ddlCtx, shouldCleanJobs bool) error { +func (w *worker) handleDDLJobQueue(d *ddlCtx) error { once := true + waitDependencyJobCnt := 0 for { if isChanClosed(w.quitCh) { return nil } - waitTime := 2 * d.lease - var ( job *model.Job schemaVer int64 runJobErr error ) + waitTime := 2 * d.lease err := kv.RunInNewTxn(d.store, false, func(txn kv.Transaction) error { // We are not owner, return and retry checking later. if !d.isOwner() { return nil } - // It's used for clean up the job in adding index queue before we support adding index queue. - // TODO: Remove this logic after we support the adding index queue. - if shouldCleanJobs { - return errors.Trace(w.cleanAddIndexQueueJobs(d, txn)) - } - var err error - t := meta.NewMeta(txn) + t := newMetaWithQueueTp(txn, w.typeStr()) // We become the owner. Get the first job and run it. job, err = w.getFirstDDLJob(t) if job == nil || err != nil { return errors.Trace(err) } + if isDone, err1 := isDependencyJobDone(t, job); err1 != nil || !isDone { + return errors.Trace(err1) + } if once { w.waitSchemaSynced(d, job, waitTime) @@ -362,6 +401,7 @@ func (w *worker) handleDDLJobQueue(d *ddlCtx, shouldCleanJobs bool) error { // No job now, return and retry getting later. return nil } + w.waitDependencyJobFinished(job, &waitDependencyJobCnt) d.mu.RLock() d.mu.hook.OnJobUpdated(job) @@ -379,6 +419,21 @@ func (w *worker) handleDDLJobQueue(d *ddlCtx, shouldCleanJobs bool) error { } } +// waitDependencyJobFinished waits for the dependency-job to be finished. +// If the dependency job isn't finished yet, we'd better wait a moment. +func (w *worker) waitDependencyJobFinished(job *model.Job, cnt *int) { + if job.DependencyID != noneDependencyJob { + intervalCnt := int(3 * time.Second / waitDependencyJobInterval) + if *cnt%intervalCnt == 0 { + log.Infof("[ddl] worker %s job %d needs to wait dependency job %d, sleeps a while:%v then retries it.", w, job.ID, job.DependencyID, waitDependencyJobInterval) + } + time.Sleep(waitDependencyJobInterval) + *cnt++ + } else { + *cnt = 0 + } +} + func chooseLeaseTime(t, max time.Duration) time.Duration { if t == 0 || t > max { return max @@ -587,69 +642,6 @@ func updateSchemaVersion(t *meta.Meta, job *model.Job) (int64, error) { return schemaVersion, errors.Trace(err) } -// cleanAddIndexQueueJobs cleans jobs in adding index queue. -// It's only done once after the worker become the owner. -// TODO: Remove this logic after we support the adding index queue. -func (w *worker) cleanAddIndexQueueJobs(d *ddlCtx, txn kv.Transaction) error { - startTime := time.Now() - m := meta.NewMeta(txn) - m.SetJobListKey(meta.AddIndexJobListKey) - for { - job, err := w.getFirstDDLJob(m) - if err != nil { - return errors.Trace(err) - } - if job == nil { - log.Infof("[ddl] cleaning jobs in the adding index queue takes time %v.", time.Since(startTime)) - return nil - } - log.Infof("[ddl] cleaning job %v in the adding index queue.", job) - - // The types of these jobs must be ActionAddIndex. - if job.SchemaState == model.StatePublic || job.SchemaState == model.StateNone { - if job.SchemaState == model.StateNone { - job.State = model.JobStateCancelled - } else { - binloginfo.SetDDLBinlog(d.binlogCli, txn, job.ID, job.Query) - job.State = model.JobStateSynced - } - err = w.finishDDLJob(m, job) - if err != nil { - return errors.Trace(err) - } - continue - } - - // When the job not in "none" and "public" state, we need to rollback it. - schemaID := job.SchemaID - tblInfo, err := getTableInfo(m, job, schemaID) - if err != nil { - return errors.Trace(err) - } - var indexName model.CIStr - var unique bool - err = job.DecodeArgs(&unique, &indexName) - if err != nil { - return errors.Trace(err) - } - indexInfo := findIndexByName(indexName.L, tblInfo.Indices) - _, err = convert2RollbackJob(m, job, tblInfo, indexInfo, nil) - if err == nil { - _, err = m.DeQueueDDLJob() - } - if err != nil { - return errors.Trace(err) - } - // Put the job to the default job list. - m.SetJobListKey(meta.DefaultJobListKey) - err = m.EnQueueDDLJob(job) - m.SetJobListKey(meta.AddIndexJobListKey) - if err != nil { - return errors.Trace(err) - } - } -} - func isChanClosed(quitCh chan struct{}) bool { select { case <-quitCh: diff --git a/ddl/ddl_worker_test.go b/ddl/ddl_worker_test.go index f46a175a3ae67..4527a26a99ad1 100644 --- a/ddl/ddl_worker_test.go +++ b/ddl/ddl_worker_test.go @@ -14,6 +14,7 @@ package ddl import ( + "sync" "time" "github.com/juju/errors" @@ -69,26 +70,7 @@ func (s *testDDLSuite) TestRunWorker(c *C) { d := testNewDDL(context.Background(), nil, store, nil, nil, testLease) testCheckOwner(c, d, false) defer d.Stop() - ctx := testNewContext(d) - - dbInfo := testSchemaInfo(c, d, "test") - job := &model.Job{ - SchemaID: dbInfo.ID, - Type: model.ActionCreateSchema, - BinlogInfo: &model.HistoryInfo{}, - Args: []interface{}{dbInfo}, - } - exitCh := make(chan struct{}) - go func(ch chan struct{}) { - err := d.doDDLJob(ctx, job) - c.Assert(err, IsNil) - close(ch) - }(exitCh) - // Make sure the DDL job is in the DDL job queue. - // The reason for doing it twice is to eliminate the operation in the start function. - <-d.ddlJobCh - <-d.ddlJobCh // Make sure the DDL worker is nil. worker := d.generalWorker() c.Assert(worker, IsNil) @@ -97,125 +79,8 @@ func (s *testDDLSuite) TestRunWorker(c *C) { d1 := testNewDDL(context.Background(), nil, store, nil, nil, testLease) testCheckOwner(c, d1, true) defer d1.Stop() - asyncNotify(d1.ddlJobCh) - <-exitCh -} - -func (s *testDDLSuite) TestCleanJobs(c *C) { - defer testleak.AfterTest(c)() - store := testCreateStore(c, "test_clean_jobs") - defer store.Close() - d := testNewDDL(context.Background(), nil, store, nil, nil, testLease) - - ctx := testNewContext(d) - dbInfo := testSchemaInfo(c, d, "test") - testCreateSchema(c, ctx, d, dbInfo) - tblInfo := testTableInfo(c, d, "t", 2) - testCreateTable(c, ctx, d, dbInfo, tblInfo) - - var failedJobIDs []int64 - job := &model.Job{ - SchemaID: dbInfo.ID, - TableID: tblInfo.ID, - Type: model.ActionAddIndex, - BinlogInfo: &model.HistoryInfo{}, - } - idxColNames := []*ast.IndexColName{{ - Column: &ast.ColumnName{Name: model.NewCIStr("c1")}, - Length: types.UnspecifiedLength}} - // Add some adding index jobs to AddIndexJobList. - backfillAddIndexJob := func(jobArgs []interface{}) { - kv.RunInNewTxn(d.store, false, func(txn kv.Transaction) error { - var err error - t := meta.NewMeta(txn) - t.SetJobListKey(meta.AddIndexJobListKey) - job.ID, err = t.GenGlobalID() - c.Assert(err, IsNil) - failedJobIDs = append(failedJobIDs, job.ID) - job.Args = jobArgs - err = t.EnQueueDDLJob(job) - c.Assert(err, IsNil) - return nil - }) - } - - // Add a StateNone job. - indexName := model.NewCIStr("idx_none") - args := []interface{}{false, indexName, idxColNames, nil} - backfillAddIndexJob(args) - // Add a StateDeleteOnly job. - indexName = model.NewCIStr("idx_delete_only") - args = []interface{}{false, indexName, idxColNames, nil} - backfillAddIndexJob(args) - changeJobState := func() { - kv.RunInNewTxn(d.store, false, func(txn kv.Transaction) error { - t := meta.NewMeta(txn) - t.SetJobListKey(meta.AddIndexJobListKey) - lastJobID := int64(len(failedJobIDs) - 1) - job, err1 := t.GetDDLJob(lastJobID) - c.Assert(err1, IsNil) - _, err1 = d.generalWorker().runDDLJob(d.ddlCtx, t, job) - c.Assert(err1, IsNil) - _, err1 = updateSchemaVersion(t, job) - c.Assert(err1, IsNil) - err1 = t.UpdateDDLJob(lastJobID, job, true) - c.Assert(err1, IsNil) - return nil - }) - err := d.callHookOnChanged(nil) - c.Assert(err, IsNil) - } - changeJobState() - // Add a StateWriteReorganization job. - indexName = model.NewCIStr("idx_write_reorg") - args = []interface{}{false, indexName, idxColNames, nil} - backfillAddIndexJob(args) - changeJobState() // convert to delete only - changeJobState() // convert to write only - changeJobState() // convert to write reorg - - err := d.Stop() - c.Assert(err, IsNil) - // Make sure shouldCleanJobs is ture. - d = testNewDDL(context.Background(), nil, store, nil, nil, testLease) - defer d.Stop() - - // Make sure all DDL jobs are done. - for { - var isAllJobDone bool - kv.RunInNewTxn(d.store, false, func(txn kv.Transaction) error { - t := meta.NewMeta(txn) - len, err := t.DDLJobQueueLen() - c.Assert(err, IsNil) - t.SetJobListKey(meta.AddIndexJobListKey) - addIndexLen, err := t.DDLJobQueueLen() - c.Assert(err, IsNil) - if len == 0 && addIndexLen == 0 { - isAllJobDone = true - } - return nil - }) - if isAllJobDone { - break - } - time.Sleep(time.Millisecond) - } - - // Check that the jobs in add index list are finished. - kv.RunInNewTxn(d.store, false, func(txn kv.Transaction) error { - t := meta.NewMeta(txn) - for i, id := range failedJobIDs { - historyJob, err := t.GetHistoryDDLJob(id) - c.Assert(err, IsNil) - c.Assert(historyJob, NotNil, Commentf("job %v", historyJob)) - if i == 0 { - c.Assert(historyJob.State, Equals, model.JobStateCancelled) - } else { - c.Assert(historyJob.State, Equals, model.JobStateRollbackDone) - } - } - return nil - }) + worker = d1.generalWorker() + c.Assert(worker, NotNil) } func (s *testDDLSuite) TestSchemaError(c *C) { @@ -585,15 +450,17 @@ func (s *testDDLSuite) TestIgnorableSpec(c *C) { } func (s *testDDLSuite) TestBuildJobDependence(c *C) { - defer testleak.AfterTest(c)() store := testCreateStore(c, "test_set_job_relation") defer store.Close() - job1 := &model.Job{ID: 1, TableID: 1} - job2 := &model.Job{ID: 2, TableID: 1} - job3 := &model.Job{ID: 3, TableID: 2} - job6 := &model.Job{ID: 6, TableID: 1} - job7 := &model.Job{ID: 7, TableID: 2} + // Add some non-add-index jobs. + job1 := &model.Job{ID: 1, TableID: 1, Type: model.ActionAddColumn} + job2 := &model.Job{ID: 2, TableID: 1, Type: model.ActionCreateTable} + job3 := &model.Job{ID: 3, TableID: 2, Type: model.ActionDropColumn} + job6 := &model.Job{ID: 6, TableID: 1, Type: model.ActionDropTable} + job7 := &model.Job{ID: 7, TableID: 2, Type: model.ActionModifyColumn} + job9 := &model.Job{ID: 9, SchemaID: 111, Type: model.ActionDropSchema} + job11 := &model.Job{ID: 11, TableID: 2, Type: model.ActionRenameTable, Args: []interface{}{int64(111), "old db name"}} kv.RunInNewTxn(store, false, func(txn kv.Transaction) error { t := meta.NewMeta(txn) err := t.EnQueueDDLJob(job1) @@ -606,9 +473,13 @@ func (s *testDDLSuite) TestBuildJobDependence(c *C) { c.Assert(err, IsNil) err = t.EnQueueDDLJob(job7) c.Assert(err, IsNil) + err = t.EnQueueDDLJob(job9) + c.Assert(err, IsNil) + err = t.EnQueueDDLJob(job11) + c.Assert(err, IsNil) return nil }) - job4 := &model.Job{ID: 4, TableID: 1} + job4 := &model.Job{ID: 4, TableID: 1, Type: model.ActionAddIndex} kv.RunInNewTxn(store, false, func(txn kv.Transaction) error { t := meta.NewMeta(txn) err := buildJobDependence(t, job4) @@ -616,7 +487,7 @@ func (s *testDDLSuite) TestBuildJobDependence(c *C) { c.Assert(job4.DependencyID, Equals, int64(2)) return nil }) - job5 := &model.Job{ID: 5, TableID: 2} + job5 := &model.Job{ID: 5, TableID: 2, Type: model.ActionAddIndex} kv.RunInNewTxn(store, false, func(txn kv.Transaction) error { t := meta.NewMeta(txn) err := buildJobDependence(t, job5) @@ -624,7 +495,7 @@ func (s *testDDLSuite) TestBuildJobDependence(c *C) { c.Assert(job5.DependencyID, Equals, int64(3)) return nil }) - job8 := &model.Job{ID: 8, TableID: 3} + job8 := &model.Job{ID: 8, TableID: 3, Type: model.ActionAddIndex} kv.RunInNewTxn(store, false, func(txn kv.Transaction) error { t := meta.NewMeta(txn) err := buildJobDependence(t, job8) @@ -632,4 +503,210 @@ func (s *testDDLSuite) TestBuildJobDependence(c *C) { c.Assert(job8.DependencyID, Equals, int64(0)) return nil }) + job10 := &model.Job{ID: 10, SchemaID: 111, TableID: 3, Type: model.ActionAddIndex} + kv.RunInNewTxn(store, false, func(txn kv.Transaction) error { + t := meta.NewMeta(txn) + err := buildJobDependence(t, job10) + c.Assert(err, IsNil) + c.Assert(job10.DependencyID, Equals, int64(9)) + return nil + }) + job12 := &model.Job{ID: 12, SchemaID: 112, TableID: 2, Type: model.ActionAddIndex} + kv.RunInNewTxn(store, false, func(txn kv.Transaction) error { + t := meta.NewMeta(txn) + err := buildJobDependence(t, job12) + c.Assert(err, IsNil) + c.Assert(job12.DependencyID, Equals, int64(11)) + return nil + }) +} + +func (s *testDDLSuite) TestParallelDDL(c *C) { + store := testCreateStore(c, "test_parallel_ddl") + defer store.Close() + d := testNewDDL(context.Background(), nil, store, nil, nil, testLease) + defer d.Stop() + ctx := testNewContext(d) + err := ctx.NewTxn() + c.Assert(err, IsNil) + + /* + build structure: + DBs -> { + db1: test_parallel_ddl_1 + db2: test_parallel_ddl_2 + } + Tables -> { + db1.t1 (c1 int, c2 int) + db1.t2 (c1 int primary key, c2 int, c3 int) + db2.t3 (c1 int, c2 int, c3 int, c4 int) + } + Data -> { + t1: (10, 10), (20, 20) + t2: (1, 1, 1), (2, 2, 2), (3, 3, 3) + t3: (11, 22, 33, 44) + } + */ + // create database test_parallel_ddl_1; + dbInfo1 := testSchemaInfo(c, d, "test_parallel_ddl_1") + testCreateSchema(c, ctx, d, dbInfo1) + // create table t1 (c1 int, c2 int); + tblInfo1 := testTableInfo(c, d, "t1", 2) + testCreateTable(c, ctx, d, dbInfo1, tblInfo1) + // insert t1 values (10, 10), (20, 20) + tbl1 := testGetTable(c, d, dbInfo1.ID, tblInfo1.ID) + _, err = tbl1.AddRecord(ctx, types.MakeDatums(1, 1), false) + c.Assert(err, IsNil) + _, err = tbl1.AddRecord(ctx, types.MakeDatums(2, 2), false) + c.Assert(err, IsNil) + // create table t2 (c1 int primary key, c2 int, c3 int); + tblInfo2 := testTableInfo(c, d, "t2", 3) + tblInfo2.Columns[0].Flag = mysql.PriKeyFlag | mysql.NotNullFlag + tblInfo2.PKIsHandle = true + testCreateTable(c, ctx, d, dbInfo1, tblInfo2) + // insert t2 values (1, 1), (2, 2), (3, 3) + tbl2 := testGetTable(c, d, dbInfo1.ID, tblInfo2.ID) + _, err = tbl2.AddRecord(ctx, types.MakeDatums(1, 1, 1), false) + c.Assert(err, IsNil) + _, err = tbl2.AddRecord(ctx, types.MakeDatums(2, 2, 2), false) + c.Assert(err, IsNil) + _, err = tbl2.AddRecord(ctx, types.MakeDatums(3, 3, 3), false) + c.Assert(err, IsNil) + // create database test_parallel_ddl_2; + dbInfo2 := testSchemaInfo(c, d, "test_parallel_ddl_2") + testCreateSchema(c, ctx, d, dbInfo2) + // create table t3 (c1 int, c2 int, c3 int, c4 int); + tblInfo3 := testTableInfo(c, d, "t3", 4) + testCreateTable(c, ctx, d, dbInfo2, tblInfo3) + // insert t3 values (11, 22, 33, 44) + tbl3 := testGetTable(c, d, dbInfo2.ID, tblInfo3.ID) + _, err = tbl3.AddRecord(ctx, types.MakeDatums(11, 22, 33, 44), false) + c.Assert(err, IsNil) + + // set hook to execute jobs after all jobs are in queue. + jobCnt := int64(11) + tc := &TestDDLCallback{} + once := sync.Once{} + var checkErr error + tc.onJobRunBefore = func(job *model.Job) { + // TODO: extract a unified function for other tests. + once.Do(func() { + qLen1 := int64(0) + qLen2 := int64(0) + for { + checkErr = kv.RunInNewTxn(store, false, func(txn kv.Transaction) error { + m := meta.NewMeta(txn) + qLen1, err = m.DDLJobQueueLen() + if err != nil { + return err + } + qLen2, err = m.DDLJobQueueLen(meta.AddIndexJobListKey) + if err != nil { + return err + } + return nil + }) + if checkErr != nil { + break + } + if qLen1+qLen2 == jobCnt { + if qLen2 != 5 { + checkErr = errors.Errorf("add index jobs cnt %v != 5", qLen2) + } + break + } + time.Sleep(5 * time.Millisecond) + } + }) + } + d.SetHook(tc) + c.Assert(checkErr, IsNil) + + /* + prepare jobs: + / job no. / database no. / table no. / action type / + / 1 / 1 / 1 / add index / + / 2 / 1 / 1 / add column / + / 3 / 1 / 1 / add index / + / 4 / 1 / 2 / drop column / + / 5 / 1 / 1 / drop index / + / 6 / 1 / 2 / add index / + / 7 / 2 / 3 / drop column / + / 8 / 2 / 3 / rebase autoID/ + / 9 / 1 / 1 / add index / + / 10 / 2 / null / drop schema / + / 11 / 2 / 2 / add index / + */ + job1 := buildCreateIdxJob(dbInfo1, tblInfo1, false, "db1_idx1", "c1") + d.addDDLJob(ctx, job1) + job2 := buildCreateColumnJob(dbInfo1, tblInfo1, "c3", &ast.ColumnPosition{Tp: ast.ColumnPositionNone}, nil) + d.addDDLJob(ctx, job2) + job3 := buildCreateIdxJob(dbInfo1, tblInfo1, false, "db1_idx2", "c3") + d.addDDLJob(ctx, job3) + job4 := buildDropColumnJob(dbInfo1, tblInfo2, "c3") + d.addDDLJob(ctx, job4) + job5 := buildDropIdxJob(dbInfo1, tblInfo1, "db1_idx1") + d.addDDLJob(ctx, job5) + job6 := buildCreateIdxJob(dbInfo1, tblInfo2, false, "db2_idx1", "c2") + d.addDDLJob(ctx, job6) + job7 := buildDropColumnJob(dbInfo2, tblInfo3, "c4") + d.addDDLJob(ctx, job7) + job8 := buildRebaseAutoIDJobJob(dbInfo2, tblInfo3, 1024) + d.addDDLJob(ctx, job8) + job9 := buildCreateIdxJob(dbInfo1, tblInfo1, false, "db1_idx3", "c2") + d.addDDLJob(ctx, job9) + job10 := buildDropSchemaJob(dbInfo2) + d.addDDLJob(ctx, job10) + job11 := buildCreateIdxJob(dbInfo2, tblInfo3, false, "db3_idx1", "c2") + d.addDDLJob(ctx, job11) + // TODO: add rename table job + + // check results. + isChecked := false + for !isChecked { + kv.RunInNewTxn(store, false, func(txn kv.Transaction) error { + m := meta.NewMeta(txn) + lastJob, err := m.GetHistoryDDLJob(job11.ID) + c.Assert(err, IsNil) + // all jobs are finished. + if lastJob != nil { + finishedJobs, err := m.GetAllHistoryDDLJobs() + c.Assert(err, IsNil) + // get the last 11 jobs completed. + finishedJobs = finishedJobs[len(finishedJobs)-11:] + // check some jobs are ordered because of the dependence. + c.Assert(finishedJobs[0].ID, Equals, job1.ID) + c.Assert(finishedJobs[1].ID, Equals, job2.ID) + c.Assert(finishedJobs[2].ID, Equals, job3.ID) + c.Assert(finishedJobs[4].ID, Equals, job5.ID) + c.Assert(finishedJobs[10].ID, Equals, job11.ID) + // check the jobs are ordered in the adding-index-job queue or general-job queue. + addIdxJobID := int64(0) + generalJobID := int64(0) + for _, job := range finishedJobs { + // check jobs' order. + if job.Type == model.ActionAddIndex { + c.Assert(job.ID, Greater, addIdxJobID) + addIdxJobID = job.ID + } else { + c.Assert(job.ID, Greater, generalJobID) + generalJobID = job.ID + } + // check jobs' state. + if job.ID == lastJob.ID { + c.Assert(job.State, Equals, model.JobStateCancelled, Commentf("job: %v", job)) + } else { + c.Assert(job.State, Equals, model.JobStateSynced, Commentf("job: %v", job)) + } + } + + isChecked = true + } + return nil + }) + time.Sleep(10 * time.Millisecond) + } + + tc = &TestDDLCallback{} + d.SetHook(tc) } diff --git a/ddl/index.go b/ddl/index.go index b69045a7b9284..f339d91f08671 100644 --- a/ddl/index.go +++ b/ddl/index.go @@ -123,7 +123,6 @@ func buildIndexColumns(columns []*model.ColumnInfo, idxColNames []*ast.IndexColN Name: col.Name, Offset: col.Offset, Length: ic.Length, - Tp: &col.FieldType, }) } @@ -488,7 +487,8 @@ type addIndexResult struct { } func newAddIndexWorker(sessCtx sessionctx.Context, worker *worker, id int, t table.Table, indexInfo *model.IndexInfo, colFieldMap map[int64]*types.FieldType) *addIndexWorker { - index := tables.NewIndex(t.Meta().ID, indexInfo) + tblInfo := t.Meta() + index := tables.NewIndex(tblInfo.ID, tblInfo, indexInfo) return &addIndexWorker{ id: id, ddlWorker: worker, diff --git a/ddl/schema_test.go b/ddl/schema_test.go index 12dc81bd3d3fa..159f0d3cd95ad 100644 --- a/ddl/schema_test.go +++ b/ddl/schema_test.go @@ -69,15 +69,18 @@ func testCreateSchema(c *C, ctx sessionctx.Context, d *ddl, dbInfo *model.DBInfo return job } -func testDropSchema(c *C, ctx sessionctx.Context, d *ddl, dbInfo *model.DBInfo) (*model.Job, int64) { - job := &model.Job{ +func buildDropSchemaJob(dbInfo *model.DBInfo) *model.Job { + return &model.Job{ SchemaID: dbInfo.ID, Type: model.ActionDropSchema, BinlogInfo: &model.HistoryInfo{}, } +} + +func testDropSchema(c *C, ctx sessionctx.Context, d *ddl, dbInfo *model.DBInfo) (*model.Job, int64) { + job := buildDropSchemaJob(dbInfo) err := d.doDDLJob(ctx, job) c.Assert(err, IsNil) - ver := getSchemaVer(c, ctx) return job, ver } diff --git a/executor/admin_test.go b/executor/admin_test.go index c182f4f288228..0cb73039a4907 100644 --- a/executor/admin_test.go +++ b/executor/admin_test.go @@ -90,7 +90,7 @@ func (s *testSuite) TestAdminRecoverIndex(c *C) { tblInfo := tbl.Meta() idxInfo := findIndexByName("c2", tblInfo.Indices) - indexOpr := tables.NewIndex(tblInfo.ID, idxInfo) + indexOpr := tables.NewIndex(tblInfo.ID, tblInfo, idxInfo) sc := s.ctx.GetSessionVars().StmtCtx txn, err := s.store.Begin() c.Assert(err, IsNil) @@ -186,7 +186,7 @@ func (s *testSuite) TestAdminRecoverIndex1(c *C) { tblInfo := tbl.Meta() idxInfo := findIndexByName("primary", tblInfo.Indices) c.Assert(idxInfo, NotNil) - indexOpr := tables.NewIndex(tblInfo.ID, idxInfo) + indexOpr := tables.NewIndex(tblInfo.ID, tblInfo, idxInfo) txn, err := s.store.Begin() c.Assert(err, IsNil) @@ -242,9 +242,9 @@ func (s *testSuite) TestAdminCleanupIndex(c *C) { tblInfo := tbl.Meta() idxInfo2 := findIndexByName("c2", tblInfo.Indices) - indexOpr2 := tables.NewIndex(tblInfo.ID, idxInfo2) + indexOpr2 := tables.NewIndex(tblInfo.ID, tblInfo, idxInfo2) idxInfo3 := findIndexByName("c3", tblInfo.Indices) - indexOpr3 := tables.NewIndex(tblInfo.ID, idxInfo3) + indexOpr3 := tables.NewIndex(tblInfo.ID, tblInfo, idxInfo3) txn, err := s.store.Begin() c.Assert(err, IsNil) @@ -309,7 +309,7 @@ func (s *testSuite) TestAdminCleanupIndexPKNotHandle(c *C) { tblInfo := tbl.Meta() idxInfo := findIndexByName("primary", tblInfo.Indices) - indexOpr := tables.NewIndex(tblInfo.ID, idxInfo) + indexOpr := tables.NewIndex(tblInfo.ID, tblInfo, idxInfo) txn, err := s.store.Begin() c.Assert(err, IsNil) @@ -357,9 +357,9 @@ func (s *testSuite) TestAdminCleanupIndexMore(c *C) { tblInfo := tbl.Meta() idxInfo1 := findIndexByName("c1", tblInfo.Indices) - indexOpr1 := tables.NewIndex(tblInfo.ID, idxInfo1) + indexOpr1 := tables.NewIndex(tblInfo.ID, tblInfo, idxInfo1) idxInfo2 := findIndexByName("c2", tblInfo.Indices) - indexOpr2 := tables.NewIndex(tblInfo.ID, idxInfo2) + indexOpr2 := tables.NewIndex(tblInfo.ID, tblInfo, idxInfo2) txn, err := s.store.Begin() c.Assert(err, IsNil) diff --git a/executor/aggfuncs/func_avg.go b/executor/aggfuncs/func_avg.go index 2d4ac122bdcaf..06a95d58259f4 100644 --- a/executor/aggfuncs/func_avg.go +++ b/executor/aggfuncs/func_avg.go @@ -69,7 +69,7 @@ func (e *avgOriginal4Decimal) UpdatePartialResult(sctx sessionctx.Context, rowsI p := (*partialResult4AvgDecimal)(pr) newSum := new(types.MyDecimal) for _, row := range rowsInGroup { - input, isNull, err := e.args[0].EvalDecimal(sctx, row) + input, isNull, err := e.args[0].EvalDecimal(sctx, &row) if err != nil { return errors.Trace(err) } @@ -95,7 +95,7 @@ func (e *avgPartial4Decimal) UpdatePartialResult(sctx sessionctx.Context, rowsIn p := (*partialResult4AvgDecimal)(pr) newSum := new(types.MyDecimal) for _, row := range rowsInGroup { - inputSum, isNull, err := e.args[1].EvalDecimal(sctx, row) + inputSum, isNull, err := e.args[1].EvalDecimal(sctx, &row) if err != nil { return errors.Trace(err) } @@ -103,7 +103,7 @@ func (e *avgPartial4Decimal) UpdatePartialResult(sctx sessionctx.Context, rowsIn continue } - inputCount, isNull, err := e.args[0].EvalInt(sctx, row) + inputCount, isNull, err := e.args[0].EvalInt(sctx, &row) if err != nil { return errors.Trace(err) } @@ -148,7 +148,7 @@ func (e *avgOriginal4DistinctDecimal) UpdatePartialResult(sctx sessionctx.Contex p := (*partialResult4AvgDistinctDecimal)(pr) newSum := new(types.MyDecimal) for _, row := range rowsInGroup { - input, isNull, err := e.args[0].EvalDecimal(sctx, row) + input, isNull, err := e.args[0].EvalDecimal(sctx, &row) if err != nil { return errors.Trace(err) } @@ -225,7 +225,7 @@ type avgOriginal4Float64 struct { func (e *avgOriginal4Float64) UpdatePartialResult(sctx sessionctx.Context, rowsInGroup []chunk.Row, pr PartialResult) error { p := (*partialResult4AvgFloat64)(pr) for _, row := range rowsInGroup { - input, isNull, err := e.args[0].EvalReal(sctx, row) + input, isNull, err := e.args[0].EvalReal(sctx, &row) if err != nil { return errors.Trace(err) } @@ -246,7 +246,7 @@ type avgPartial4Float64 struct { func (e *avgPartial4Float64) UpdatePartialResult(sctx sessionctx.Context, rowsInGroup []chunk.Row, pr PartialResult) error { p := (*partialResult4AvgFloat64)(pr) for _, row := range rowsInGroup { - inputSum, isNull, err := e.args[1].EvalReal(sctx, row) + inputSum, isNull, err := e.args[1].EvalReal(sctx, &row) if err != nil { return errors.Trace(err) } @@ -254,7 +254,7 @@ func (e *avgPartial4Float64) UpdatePartialResult(sctx sessionctx.Context, rowsIn continue } - inputCount, isNull, err := e.args[0].EvalInt(sctx, row) + inputCount, isNull, err := e.args[0].EvalInt(sctx, &row) if err != nil { return errors.Trace(err) } @@ -294,7 +294,7 @@ func (e *avgOriginal4DistinctFloat64) ResetPartialResult(pr PartialResult) { func (e *avgOriginal4DistinctFloat64) UpdatePartialResult(sctx sessionctx.Context, rowsInGroup []chunk.Row, pr PartialResult) error { p := (*partialResult4AvgDistinctFloat64)(pr) for _, row := range rowsInGroup { - input, isNull, err := e.args[0].EvalReal(sctx, row) + input, isNull, err := e.args[0].EvalReal(sctx, &row) if err != nil { return errors.Trace(err) } diff --git a/executor/aggfuncs/func_bitfuncs.go b/executor/aggfuncs/func_bitfuncs.go index 76c8e72d00d8f..64557c9f4a636 100644 --- a/executor/aggfuncs/func_bitfuncs.go +++ b/executor/aggfuncs/func_bitfuncs.go @@ -49,7 +49,7 @@ type bitOrUint64 struct { func (e *bitOrUint64) UpdatePartialResult(sctx sessionctx.Context, rowsInGroup []chunk.Row, pr PartialResult) error { p := (*partialResult4BitFunc)(pr) for _, row := range rowsInGroup { - inputValue, isNull, err := e.args[0].EvalInt(sctx, row) + inputValue, isNull, err := e.args[0].EvalInt(sctx, &row) if err != nil { return errors.Trace(err) } @@ -68,7 +68,7 @@ type bitXorUint64 struct { func (e *bitXorUint64) UpdatePartialResult(sctx sessionctx.Context, rowsInGroup []chunk.Row, pr PartialResult) error { p := (*partialResult4BitFunc)(pr) for _, row := range rowsInGroup { - inputValue, isNull, err := e.args[0].EvalInt(sctx, row) + inputValue, isNull, err := e.args[0].EvalInt(sctx, &row) if err != nil { return errors.Trace(err) } @@ -98,7 +98,7 @@ func (e *bitAndUint64) ResetPartialResult(pr PartialResult) { func (e *bitAndUint64) UpdatePartialResult(sctx sessionctx.Context, rowsInGroup []chunk.Row, pr PartialResult) error { p := (*partialResult4BitFunc)(pr) for _, row := range rowsInGroup { - inputValue, isNull, err := e.args[0].EvalInt(sctx, row) + inputValue, isNull, err := e.args[0].EvalInt(sctx, &row) if err != nil { return errors.Trace(err) } diff --git a/executor/aggfuncs/func_count.go b/executor/aggfuncs/func_count.go index ef5f54be2c4a6..f761d386116b8 100644 --- a/executor/aggfuncs/func_count.go +++ b/executor/aggfuncs/func_count.go @@ -63,7 +63,7 @@ func (e *countOriginal4Real) UpdatePartialResult(sctx sessionctx.Context, rowsIn p := (*partialResult4Count)(pr) for _, row := range rowsInGroup { - _, isNull, err := e.args[0].EvalReal(sctx, row) + _, isNull, err := e.args[0].EvalReal(sctx, &row) if err != nil { return errors.Trace(err) } @@ -85,7 +85,7 @@ func (e *countOriginal4Decimal) UpdatePartialResult(sctx sessionctx.Context, row p := (*partialResult4Count)(pr) for _, row := range rowsInGroup { - _, isNull, err := e.args[0].EvalDecimal(sctx, row) + _, isNull, err := e.args[0].EvalDecimal(sctx, &row) if err != nil { return errors.Trace(err) } @@ -107,7 +107,7 @@ func (e *countOriginal4Time) UpdatePartialResult(sctx sessionctx.Context, rowsIn p := (*partialResult4Count)(pr) for _, row := range rowsInGroup { - _, isNull, err := e.args[0].EvalTime(sctx, row) + _, isNull, err := e.args[0].EvalTime(sctx, &row) if err != nil { return errors.Trace(err) } @@ -129,7 +129,7 @@ func (e *countOriginal4Duration) UpdatePartialResult(sctx sessionctx.Context, ro p := (*partialResult4Count)(pr) for _, row := range rowsInGroup { - _, isNull, err := e.args[0].EvalDuration(sctx, row) + _, isNull, err := e.args[0].EvalDuration(sctx, &row) if err != nil { return errors.Trace(err) } @@ -151,7 +151,7 @@ func (e *countOriginal4JSON) UpdatePartialResult(sctx sessionctx.Context, rowsIn p := (*partialResult4Count)(pr) for _, row := range rowsInGroup { - _, isNull, err := e.args[0].EvalJSON(sctx, row) + _, isNull, err := e.args[0].EvalJSON(sctx, &row) if err != nil { return errors.Trace(err) } @@ -173,7 +173,7 @@ func (e *countOriginal4String) UpdatePartialResult(sctx sessionctx.Context, rows p := (*partialResult4Count)(pr) for _, row := range rowsInGroup { - _, isNull, err := e.args[0].EvalString(sctx, row) + _, isNull, err := e.args[0].EvalString(sctx, &row) if err != nil { return errors.Trace(err) } @@ -194,7 +194,7 @@ type countPartial struct { func (e *countPartial) UpdatePartialResult(sctx sessionctx.Context, rowsInGroup []chunk.Row, pr PartialResult) error { p := (*partialResult4Count)(pr) for _, row := range rowsInGroup { - input, isNull, err := e.args[0].EvalInt(sctx, row) + input, isNull, err := e.args[0].EvalInt(sctx, &row) if err != nil { return errors.Trace(err) } @@ -249,7 +249,7 @@ func (e *countOriginalWithDistinct) UpdatePartialResult(sctx sessionctx.Context, encodedBytes = encodedBytes[:0] for i := 0; i < len(e.args) && !hasNull; i++ { - encodedBytes, isNull, err = e.evalAndEncode(sctx, e.args[i], row, buf, encodedBytes) + encodedBytes, isNull, err = e.evalAndEncode(sctx, e.args[i], &row, buf, encodedBytes) if err != nil { return } @@ -272,7 +272,7 @@ func (e *countOriginalWithDistinct) UpdatePartialResult(sctx sessionctx.Context, // evalAndEncode eval one row with an expression and encode value to bytes. func (e *countOriginalWithDistinct) evalAndEncode( sctx sessionctx.Context, arg expression.Expression, - row chunk.Row, buf, encodedBytes []byte, + row types.Row, buf, encodedBytes []byte, ) ([]byte, bool, error) { switch tp := arg.GetType().EvalType(); tp { case types.ETInt: diff --git a/executor/aggfuncs/func_first_row.go b/executor/aggfuncs/func_first_row.go index 801883eb46ef9..6f10549ab0981 100644 --- a/executor/aggfuncs/func_first_row.go +++ b/executor/aggfuncs/func_first_row.go @@ -97,7 +97,7 @@ func (e *firstRow4Int) UpdatePartialResult(sctx sessionctx.Context, rowsInGroup return nil } for _, row := range rowsInGroup { - input, isNull, err := e.args[0].EvalInt(sctx, row) + input, isNull, err := e.args[0].EvalInt(sctx, &row) if err != nil { return errors.Trace(err) } @@ -136,7 +136,7 @@ func (e *firstRow4Float32) UpdatePartialResult(sctx sessionctx.Context, rowsInGr return nil } for _, row := range rowsInGroup { - input, isNull, err := e.args[0].EvalReal(sctx, row) + input, isNull, err := e.args[0].EvalReal(sctx, &row) if err != nil { return errors.Trace(err) } @@ -175,7 +175,7 @@ func (e *firstRow4Float64) UpdatePartialResult(sctx sessionctx.Context, rowsInGr return nil } for _, row := range rowsInGroup { - input, isNull, err := e.args[0].EvalReal(sctx, row) + input, isNull, err := e.args[0].EvalReal(sctx, &row) if err != nil { return errors.Trace(err) } @@ -214,7 +214,7 @@ func (e *firstRow4String) UpdatePartialResult(sctx sessionctx.Context, rowsInGro return nil } for _, row := range rowsInGroup { - input, isNull, err := e.args[0].EvalString(sctx, row) + input, isNull, err := e.args[0].EvalString(sctx, &row) if err != nil { return errors.Trace(err) } @@ -253,7 +253,7 @@ func (e *firstRow4Time) UpdatePartialResult(sctx sessionctx.Context, rowsInGroup return nil } for _, row := range rowsInGroup { - input, isNull, err := e.args[0].EvalTime(sctx, row) + input, isNull, err := e.args[0].EvalTime(sctx, &row) if err != nil { return errors.Trace(err) } @@ -292,7 +292,7 @@ func (e *firstRow4Duration) UpdatePartialResult(sctx sessionctx.Context, rowsInG return nil } for _, row := range rowsInGroup { - input, isNull, err := e.args[0].EvalDuration(sctx, row) + input, isNull, err := e.args[0].EvalDuration(sctx, &row) if err != nil { return errors.Trace(err) } @@ -331,7 +331,7 @@ func (e *firstRow4JSON) UpdatePartialResult(sctx sessionctx.Context, rowsInGroup return nil } for _, row := range rowsInGroup { - input, isNull, err := e.args[0].EvalJSON(sctx, row) + input, isNull, err := e.args[0].EvalJSON(sctx, &row) if err != nil { return errors.Trace(err) } @@ -370,7 +370,7 @@ func (e *firstRow4Decimal) UpdatePartialResult(sctx sessionctx.Context, rowsInGr return nil } for _, row := range rowsInGroup { - input, isNull, err := e.args[0].EvalDecimal(sctx, row) + input, isNull, err := e.args[0].EvalDecimal(sctx, &row) if err != nil { return errors.Trace(err) } diff --git a/executor/aggfuncs/func_group_concat.go b/executor/aggfuncs/func_group_concat.go index 79c361ddb34ea..6cbadcb28bac5 100644 --- a/executor/aggfuncs/func_group_concat.go +++ b/executor/aggfuncs/func_group_concat.go @@ -64,7 +64,7 @@ func (e *groupConcat) UpdatePartialResult(sctx sessionctx.Context, rowsInGroup [ for _, row := range rowsInGroup { isWriteSep = false for _, arg := range e.args { - v, isNull, err = arg.EvalString(sctx, row) + v, isNull, err = arg.EvalString(sctx, &row) if err != nil { return errors.Trace(err) } @@ -115,7 +115,7 @@ func (e *groupConcatDistinct) UpdatePartialResult(sctx sessionctx.Context, rowsI for _, row := range rowsInGroup { p.valsBuf.Reset() for _, arg := range e.args { - v, isNull, err = arg.EvalString(sctx, row) + v, isNull, err = arg.EvalString(sctx, &row) if err != nil { return errors.Trace(err) } diff --git a/executor/aggfuncs/func_max_min.go b/executor/aggfuncs/func_max_min.go index a795424999461..fa873bbca3916 100644 --- a/executor/aggfuncs/func_max_min.go +++ b/executor/aggfuncs/func_max_min.go @@ -105,7 +105,7 @@ func (e *maxMin4Int) AppendFinalResult2Chunk(sctx sessionctx.Context, pr Partial func (e *maxMin4Int) UpdatePartialResult(sctx sessionctx.Context, rowsInGroup []chunk.Row, pr PartialResult) error { p := (*partialResult4MaxMinInt)(pr) for _, row := range rowsInGroup { - input, isNull, err := e.args[0].EvalInt(sctx, row) + input, isNull, err := e.args[0].EvalInt(sctx, &row) if err != nil { return errors.Trace(err) } @@ -153,7 +153,7 @@ func (e *maxMin4Uint) AppendFinalResult2Chunk(sctx sessionctx.Context, pr Partia func (e *maxMin4Uint) UpdatePartialResult(sctx sessionctx.Context, rowsInGroup []chunk.Row, pr PartialResult) error { p := (*partialResult4MaxMinUint)(pr) for _, row := range rowsInGroup { - input, isNull, err := e.args[0].EvalInt(sctx, row) + input, isNull, err := e.args[0].EvalInt(sctx, &row) if err != nil { return errors.Trace(err) } @@ -203,7 +203,7 @@ func (e *maxMin4Float32) AppendFinalResult2Chunk(sctx sessionctx.Context, pr Par func (e *maxMin4Float32) UpdatePartialResult(sctx sessionctx.Context, rowsInGroup []chunk.Row, pr PartialResult) error { p := (*partialResult4MaxMinFloat32)(pr) for _, row := range rowsInGroup { - input, isNull, err := e.args[0].EvalReal(sctx, row) + input, isNull, err := e.args[0].EvalReal(sctx, &row) if err != nil { return errors.Trace(err) } @@ -252,7 +252,7 @@ func (e *maxMin4Float64) AppendFinalResult2Chunk(sctx sessionctx.Context, pr Par func (e *maxMin4Float64) UpdatePartialResult(sctx sessionctx.Context, rowsInGroup []chunk.Row, pr PartialResult) error { p := (*partialResult4MaxMinFloat64)(pr) for _, row := range rowsInGroup { - input, isNull, err := e.args[0].EvalReal(sctx, row) + input, isNull, err := e.args[0].EvalReal(sctx, &row) if err != nil { return errors.Trace(err) } @@ -299,7 +299,7 @@ func (e *maxMin4Decimal) AppendFinalResult2Chunk(sctx sessionctx.Context, pr Par func (e *maxMin4Decimal) UpdatePartialResult(sctx sessionctx.Context, rowsInGroup []chunk.Row, pr PartialResult) error { p := (*partialResult4MaxMinDecimal)(pr) for _, row := range rowsInGroup { - input, isNull, err := e.args[0].EvalDecimal(sctx, row) + input, isNull, err := e.args[0].EvalDecimal(sctx, &row) if err != nil { return errors.Trace(err) } @@ -347,7 +347,7 @@ func (e *maxMin4String) AppendFinalResult2Chunk(sctx sessionctx.Context, pr Part func (e *maxMin4String) UpdatePartialResult(sctx sessionctx.Context, rowsInGroup []chunk.Row, pr PartialResult) error { p := (*partialResult4MaxMinString)(pr) for _, row := range rowsInGroup { - input, isNull, err := e.args[0].EvalString(sctx, row) + input, isNull, err := e.args[0].EvalString(sctx, &row) if err != nil { return errors.Trace(err) } @@ -399,7 +399,7 @@ func (e *maxMin4Time) AppendFinalResult2Chunk(sctx sessionctx.Context, pr Partia func (e *maxMin4Time) UpdatePartialResult(sctx sessionctx.Context, rowsInGroup []chunk.Row, pr PartialResult) error { p := (*partialResult4Time)(pr) for _, row := range rowsInGroup { - input, isNull, err := e.args[0].EvalTime(sctx, row) + input, isNull, err := e.args[0].EvalTime(sctx, &row) if err != nil { return errors.Trace(err) } @@ -447,7 +447,7 @@ func (e *maxMin4Duration) AppendFinalResult2Chunk(sctx sessionctx.Context, pr Pa func (e *maxMin4Duration) UpdatePartialResult(sctx sessionctx.Context, rowsInGroup []chunk.Row, pr PartialResult) error { p := (*partialResult4MaxMinDuration)(pr) for _, row := range rowsInGroup { - input, isNull, err := e.args[0].EvalDuration(sctx, row) + input, isNull, err := e.args[0].EvalDuration(sctx, &row) if err != nil { return errors.Trace(err) } @@ -495,7 +495,7 @@ func (e *maxMin4JSON) AppendFinalResult2Chunk(sctx sessionctx.Context, pr Partia func (e *maxMin4JSON) UpdatePartialResult(sctx sessionctx.Context, rowsInGroup []chunk.Row, pr PartialResult) error { p := (*partialResult4MaxMinJSON)(pr) for _, row := range rowsInGroup { - input, isNull, err := e.args[0].EvalJSON(sctx, row) + input, isNull, err := e.args[0].EvalJSON(sctx, &row) if err != nil { return errors.Trace(err) } diff --git a/executor/aggregate.go b/executor/aggregate.go index 64dc8a9cd497f..d8b4418b29372 100644 --- a/executor/aggregate.go +++ b/executor/aggregate.go @@ -376,7 +376,7 @@ func (w *HashAggPartialWorker) updatePartialResult(ctx sessionctx.Context, sc *s } aggEvalCtxs := w.getContext(sc, groupKey, w.aggCtxsMap) for i, af := range w.aggFuncs { - if err = af.Update(aggEvalCtxs[i], sc, row); err != nil { + if err = af.Update(aggEvalCtxs[i], sc, &row); err != nil { return errors.Trace(err) } } diff --git a/executor/ddl_test.go b/executor/ddl_test.go index b25211a8baa1e..f91bcf6d3be8e 100644 --- a/executor/ddl_test.go +++ b/executor/ddl_test.go @@ -26,6 +26,7 @@ import ( "github.com/pingcap/tidb/plan" "github.com/pingcap/tidb/sessionctx/variable" "github.com/pingcap/tidb/table" + "github.com/pingcap/tidb/terror" "github.com/pingcap/tidb/types" "github.com/pingcap/tidb/util/chunk" "github.com/pingcap/tidb/util/testkit" @@ -409,17 +410,15 @@ func (s *testSuite) TestSetDDLReorgWorkerCnt(c *C) { c.Assert(variable.GetDDLReorgWorkerCounter(), Equals, int32(1)) tk.MustExec("set tidb_ddl_reorg_worker_cnt = 100") c.Assert(variable.GetDDLReorgWorkerCounter(), Equals, int32(100)) - tk.MustExec("set tidb_ddl_reorg_worker_cnt = invalid_val") - c.Assert(variable.GetDDLReorgWorkerCounter(), Equals, int32(variable.DefTiDBDDLReorgWorkerCount)) + _, err := tk.Exec("set tidb_ddl_reorg_worker_cnt = invalid_val") + c.Assert(terror.ErrorEqual(err, variable.ErrWrongTypeForVar), IsTrue) tk.MustExec("set tidb_ddl_reorg_worker_cnt = 100") c.Assert(variable.GetDDLReorgWorkerCounter(), Equals, int32(100)) - tk.MustExec("set tidb_ddl_reorg_worker_cnt = -1") - c.Assert(variable.GetDDLReorgWorkerCounter(), Equals, int32(variable.DefTiDBDDLReorgWorkerCount)) + _, err = tk.Exec("set tidb_ddl_reorg_worker_cnt = -1") + c.Assert(terror.ErrorEqual(err, variable.ErrWrongValueForVar), IsTrue) - res := tk.MustQuery("select @@tidb_ddl_reorg_worker_cnt") - res.Check(testkit.Rows("-1")) tk.MustExec("set tidb_ddl_reorg_worker_cnt = 100") - res = tk.MustQuery("select @@tidb_ddl_reorg_worker_cnt") + res := tk.MustQuery("select @@tidb_ddl_reorg_worker_cnt") res.Check(testkit.Rows("100")) res = tk.MustQuery("select @@global.tidb_ddl_reorg_worker_cnt") diff --git a/executor/pkg_test.go b/executor/pkg_test.go index c2e40dd3ba4ab..40d4c5670291e 100644 --- a/executor/pkg_test.go +++ b/executor/pkg_test.go @@ -22,7 +22,7 @@ type pkgTestSuite struct { type MockExec struct { baseExecutor - Rows []types.DatumRow + Rows []chunk.MutRow curRowIdx int } @@ -31,8 +31,8 @@ func (m *MockExec) Next(ctx context.Context, chk *chunk.Chunk) error { colTypes := m.retTypes() for ; m.curRowIdx < len(m.Rows) && chk.NumRows() < m.maxChunkSize; m.curRowIdx++ { curRow := m.Rows[m.curRowIdx] - for i := 0; i < len(curRow); i++ { - curDatum := curRow.GetDatum(i, colTypes[i]) + for i := 0; i < curRow.Len(); i++ { + curDatum := curRow.ToRow().GetDatum(i, colTypes[i]) chk.AppendDatum(i, &curDatum) } } @@ -58,24 +58,24 @@ func (s *pkgTestSuite) TestNestedLoopApply(c *C) { outerSchema := expression.NewSchema(col0) outerExec := &MockExec{ baseExecutor: newBaseExecutor(sctx, outerSchema, ""), - Rows: []types.DatumRow{ - types.MakeDatums(1), - types.MakeDatums(2), - types.MakeDatums(3), - types.MakeDatums(4), - types.MakeDatums(5), - types.MakeDatums(6), + Rows: []chunk.MutRow{ + chunk.MutRowFromDatums(types.MakeDatums(1)), + chunk.MutRowFromDatums(types.MakeDatums(2)), + chunk.MutRowFromDatums(types.MakeDatums(3)), + chunk.MutRowFromDatums(types.MakeDatums(4)), + chunk.MutRowFromDatums(types.MakeDatums(5)), + chunk.MutRowFromDatums(types.MakeDatums(6)), }} innerSchema := expression.NewSchema(col1) innerExec := &MockExec{ baseExecutor: newBaseExecutor(sctx, innerSchema, ""), - Rows: []types.DatumRow{ - types.MakeDatums(1), - types.MakeDatums(2), - types.MakeDatums(3), - types.MakeDatums(4), - types.MakeDatums(5), - types.MakeDatums(6), + Rows: []chunk.MutRow{ + chunk.MutRowFromDatums(types.MakeDatums(1)), + chunk.MutRowFromDatums(types.MakeDatums(2)), + chunk.MutRowFromDatums(types.MakeDatums(3)), + chunk.MutRowFromDatums(types.MakeDatums(4)), + chunk.MutRowFromDatums(types.MakeDatums(5)), + chunk.MutRowFromDatums(types.MakeDatums(6)), }} outerFilter := expression.NewFunctionInternal(sctx, ast.LT, types.NewFieldType(mysql.TypeTiny), col0, con) innerFilter := outerFilter.Clone() diff --git a/executor/set_test.go b/executor/set_test.go index 064057f22acbf..bce2129bc7f6b 100644 --- a/executor/set_test.go +++ b/executor/set_test.go @@ -17,7 +17,9 @@ import ( . "github.com/pingcap/check" "github.com/pingcap/tidb/sessionctx" "github.com/pingcap/tidb/sessionctx/variable" + "github.com/pingcap/tidb/terror" "github.com/pingcap/tidb/util/testkit" + "github.com/pingcap/tidb/util/testutil" "golang.org/x/net/context" ) @@ -88,16 +90,16 @@ func (s *testSuite) TestSetVar(c *C) { // Set default // {ScopeGlobal | ScopeSession, "low_priority_updates", "OFF"}, // For global var - tk.MustQuery(`select @@global.low_priority_updates;`).Check(testkit.Rows("OFF")) + tk.MustQuery(`select @@global.low_priority_updates;`).Check(testkit.Rows("0")) tk.MustExec(`set @@global.low_priority_updates="ON";`) - tk.MustQuery(`select @@global.low_priority_updates;`).Check(testkit.Rows("ON")) + tk.MustQuery(`select @@global.low_priority_updates;`).Check(testkit.Rows("1")) tk.MustExec(`set @@global.low_priority_updates=DEFAULT;`) // It will be set to compiled-in default value. - tk.MustQuery(`select @@global.low_priority_updates;`).Check(testkit.Rows("OFF")) + tk.MustQuery(`select @@global.low_priority_updates;`).Check(testkit.Rows("0")) // For session - tk.MustQuery(`select @@session.low_priority_updates;`).Check(testkit.Rows("OFF")) + tk.MustQuery(`select @@session.low_priority_updates;`).Check(testkit.Rows("0")) tk.MustExec(`set @@global.low_priority_updates="ON";`) tk.MustExec(`set @@session.low_priority_updates=DEFAULT;`) // It will be set to global var value. - tk.MustQuery(`select @@session.low_priority_updates;`).Check(testkit.Rows("ON")) + tk.MustQuery(`select @@session.low_priority_updates;`).Check(testkit.Rows("1")) // For mysql jdbc driver issue. tk.MustQuery(`select @@session.tx_read_only;`).Check(testkit.Rows("0")) @@ -212,15 +214,15 @@ func (s *testSuite) TestSetVar(c *C) { tk.MustQuery("select @@session.tx_isolation").Check(testkit.Rows("READ-COMMITTED")) tk.MustExec("set global avoid_temporal_upgrade = on") - tk.MustQuery(`select @@global.avoid_temporal_upgrade;`).Check(testkit.Rows("ON")) + tk.MustQuery(`select @@global.avoid_temporal_upgrade;`).Check(testkit.Rows("1")) tk.MustExec("set @@global.avoid_temporal_upgrade = off") - tk.MustQuery(`select @@global.avoid_temporal_upgrade;`).Check(testkit.Rows("off")) + tk.MustQuery(`select @@global.avoid_temporal_upgrade;`).Check(testkit.Rows("0")) tk.MustExec("set session sql_log_bin = on") - tk.MustQuery(`select @@session.sql_log_bin;`).Check(testkit.Rows("ON")) + tk.MustQuery(`select @@session.sql_log_bin;`).Check(testkit.Rows("1")) tk.MustExec("set sql_log_bin = off") - tk.MustQuery(`select @@session.sql_log_bin;`).Check(testkit.Rows("off")) + tk.MustQuery(`select @@session.sql_log_bin;`).Check(testkit.Rows("0")) tk.MustExec("set @@sql_log_bin = on") - tk.MustQuery(`select @@session.sql_log_bin;`).Check(testkit.Rows("ON")) + tk.MustQuery(`select @@session.sql_log_bin;`).Check(testkit.Rows("1")) } func (s *testSuite) TestSetCharset(c *C) { @@ -247,3 +249,66 @@ func (s *testSuite) TestSetCharset(c *C) { // Issue 1523 tk.MustExec(`SET NAMES binary`) } + +func (s *testSuite) TestValidateSetVar(c *C) { + tk := testkit.NewTestKit(c, s.store) + + _, err := tk.Exec("set global tidb_distsql_scan_concurrency='fff';") + c.Assert(terror.ErrorEqual(err, variable.ErrWrongTypeForVar), IsTrue) + + _, err = tk.Exec("set global tidb_distsql_scan_concurrency=-1;") + c.Assert(terror.ErrorEqual(err, variable.ErrWrongValueForVar), IsTrue) + + _, err = tk.Exec("set @@tidb_distsql_scan_concurrency='fff';") + c.Assert(terror.ErrorEqual(err, variable.ErrWrongTypeForVar), IsTrue) + + _, err = tk.Exec("set @@tidb_distsql_scan_concurrency=-1;") + c.Assert(terror.ErrorEqual(err, variable.ErrWrongValueForVar), IsTrue) + + _, err = tk.Exec("set @@tidb_batch_delete='ok';") + c.Assert(terror.ErrorEqual(err, variable.ErrWrongValueForVar), IsTrue) + + tk.MustExec("set @@tidb_batch_delete='On';") + tk.MustExec("set @@tidb_batch_delete='oFf';") + tk.MustExec("set @@tidb_batch_delete=1;") + tk.MustExec("set @@tidb_batch_delete=0;") + + _, err = tk.Exec("set @@tidb_batch_delete=3;") + c.Assert(terror.ErrorEqual(err, variable.ErrWrongValueForVar), IsTrue) + + _, err = tk.Exec("set @@tidb_mem_quota_mergejoin='tidb';") + c.Assert(terror.ErrorEqual(err, variable.ErrWrongValueForVar), IsTrue) + + tk.MustExec("set @@group_concat_max_len=1") + tk.MustQuery("show warnings").Check(testutil.RowsWithSep("|", "Warning|1292|Truncated incorrect group_concat_max_len value: '1'")) + result := tk.MustQuery("select @@group_concat_max_len;") + result.Check(testkit.Rows("4")) + + _, err = tk.Exec("set @@group_concat_max_len = 18446744073709551616") + c.Assert(terror.ErrorEqual(err, variable.ErrWrongTypeForVar), IsTrue) + + // Test illegal type + _, err = tk.Exec("set @@group_concat_max_len='hello'") + c.Assert(terror.ErrorEqual(err, variable.ErrWrongTypeForVar), IsTrue) + + tk.MustExec("set @@default_week_format=-1") + tk.MustQuery("show warnings").Check(testutil.RowsWithSep("|", "Warning|1292|Truncated incorrect default_week_format value: '-1'")) + result = tk.MustQuery("select @@default_week_format;") + result.Check(testkit.Rows("0")) + + tk.MustExec("set @@default_week_format=9") + tk.MustQuery("show warnings").Check(testutil.RowsWithSep("|", "Warning|1292|Truncated incorrect default_week_format value: '9'")) + result = tk.MustQuery("select @@default_week_format;") + result.Check(testkit.Rows("7")) + + _, err = tk.Exec("set @@error_count = 0") + c.Assert(terror.ErrorEqual(err, variable.ErrReadOnly), IsTrue) + + _, err = tk.Exec("set @@warning_count = 0") + c.Assert(terror.ErrorEqual(err, variable.ErrReadOnly), IsTrue) + + tk.MustExec("set time_zone='SySTeM'") + result = tk.MustQuery("select @@time_zone;") + result.Check(testkit.Rows("SYSTEM")) + +} diff --git a/expression/aggregation/aggregation_test.go b/expression/aggregation/aggregation_test.go index 50334aedadc20..6352aff9d4dfc 100644 --- a/expression/aggregation/aggregation_test.go +++ b/expression/aggregation/aggregation_test.go @@ -22,6 +22,7 @@ import ( "github.com/pingcap/tidb/mysql" "github.com/pingcap/tidb/sessionctx" "github.com/pingcap/tidb/types" + "github.com/pingcap/tidb/util/chunk" "github.com/pingcap/tidb/util/mock" ) @@ -29,15 +30,15 @@ var _ = Suite(&testAggFuncSuit{}) type testAggFuncSuit struct { ctx sessionctx.Context - rows []types.DatumRow - nullRow types.DatumRow + rows []chunk.Row + nullRow chunk.Row } -func generateRowData() []types.DatumRow { - rows := make([]types.DatumRow, 0, 5050) +func generateRowData() []chunk.Row { + rows := make([]chunk.Row, 0, 5050) for i := 1; i <= 100; i++ { for j := 0; j < i; j++ { - rows = append(rows, types.MakeDatums(i)) + rows = append(rows, chunk.MutRowFromDatums(types.MakeDatums(i)).ToRow()) } } return rows @@ -46,7 +47,7 @@ func generateRowData() []types.DatumRow { func (s *testAggFuncSuit) SetUpSuite(c *C) { s.ctx = mock.NewContext() s.rows = generateRowData() - s.nullRow = []types.Datum{{}} + s.nullRow = chunk.MutRowFromDatums([]types.Datum{{}}).ToRow() } func (s *testAggFuncSuit) TestAvg(c *C) { @@ -161,8 +162,8 @@ func (s *testAggFuncSuit) TestBitAnd(c *C) { result := bitAndFunc.GetResult(evalCtx) c.Assert(result.GetUint64(), Equals, uint64(math.MaxUint64)) - row := types.MakeDatums(1) - err := bitAndFunc.Update(evalCtx, s.ctx.GetSessionVars().StmtCtx, types.DatumRow(row)) + row := chunk.MutRowFromDatums(types.MakeDatums(1)).ToRow() + err := bitAndFunc.Update(evalCtx, s.ctx.GetSessionVars().StmtCtx, row) c.Assert(err, IsNil) result = bitAndFunc.GetResult(evalCtx) c.Assert(result.GetUint64(), Equals, uint64(1)) @@ -172,20 +173,20 @@ func (s *testAggFuncSuit) TestBitAnd(c *C) { result = bitAndFunc.GetResult(evalCtx) c.Assert(result.GetUint64(), Equals, uint64(1)) - row = types.MakeDatums(1) - err = bitAndFunc.Update(evalCtx, s.ctx.GetSessionVars().StmtCtx, types.DatumRow(row)) + row = chunk.MutRowFromDatums(types.MakeDatums(1)).ToRow() + err = bitAndFunc.Update(evalCtx, s.ctx.GetSessionVars().StmtCtx, row) c.Assert(err, IsNil) result = bitAndFunc.GetResult(evalCtx) c.Assert(result.GetUint64(), Equals, uint64(1)) - row = types.MakeDatums(3) - err = bitAndFunc.Update(evalCtx, s.ctx.GetSessionVars().StmtCtx, types.DatumRow(row)) + row = chunk.MutRowFromDatums(types.MakeDatums(3)).ToRow() + err = bitAndFunc.Update(evalCtx, s.ctx.GetSessionVars().StmtCtx, row) c.Assert(err, IsNil) result = bitAndFunc.GetResult(evalCtx) c.Assert(result.GetUint64(), Equals, uint64(1)) - row = types.MakeDatums(2) - err = bitAndFunc.Update(evalCtx, s.ctx.GetSessionVars().StmtCtx, types.DatumRow(row)) + row = chunk.MutRowFromDatums(types.MakeDatums(2)).ToRow() + err = bitAndFunc.Update(evalCtx, s.ctx.GetSessionVars().StmtCtx, row) c.Assert(err, IsNil) result = bitAndFunc.GetResult(evalCtx) c.Assert(result.GetUint64(), Equals, uint64(0)) @@ -202,24 +203,24 @@ func (s *testAggFuncSuit) TestBitAnd(c *C) { var dec types.MyDecimal err = dec.FromString([]byte("1.234")) c.Assert(err, IsNil) - row = types.MakeDatums(&dec) - err = bitAndFunc.Update(evalCtx, s.ctx.GetSessionVars().StmtCtx, types.DatumRow(row)) + row = chunk.MutRowFromDatums(types.MakeDatums(&dec)).ToRow() + err = bitAndFunc.Update(evalCtx, s.ctx.GetSessionVars().StmtCtx, row) c.Assert(err, IsNil) result = bitAndFunc.GetResult(evalCtx) c.Assert(result.GetUint64(), Equals, uint64(1)) err = dec.FromString([]byte("3.012")) c.Assert(err, IsNil) - row = types.MakeDatums(&dec) - err = bitAndFunc.Update(evalCtx, s.ctx.GetSessionVars().StmtCtx, types.DatumRow(row)) + row = chunk.MutRowFromDatums(types.MakeDatums(&dec)).ToRow() + err = bitAndFunc.Update(evalCtx, s.ctx.GetSessionVars().StmtCtx, row) c.Assert(err, IsNil) result = bitAndFunc.GetResult(evalCtx) c.Assert(result.GetUint64(), Equals, uint64(1)) err = dec.FromString([]byte("2.12345678")) c.Assert(err, IsNil) - row = types.MakeDatums(&dec) - err = bitAndFunc.Update(evalCtx, s.ctx.GetSessionVars().StmtCtx, types.DatumRow(row)) + row = chunk.MutRowFromDatums(types.MakeDatums(&dec)).ToRow() + err = bitAndFunc.Update(evalCtx, s.ctx.GetSessionVars().StmtCtx, row) c.Assert(err, IsNil) result = bitAndFunc.GetResult(evalCtx) c.Assert(result.GetUint64(), Equals, uint64(0)) @@ -236,8 +237,8 @@ func (s *testAggFuncSuit) TestBitOr(c *C) { result := bitOrFunc.GetResult(evalCtx) c.Assert(result.GetUint64(), Equals, uint64(0)) - row := types.MakeDatums(1) - err := bitOrFunc.Update(evalCtx, s.ctx.GetSessionVars().StmtCtx, types.DatumRow(row)) + row := chunk.MutRowFromDatums(types.MakeDatums(1)).ToRow() + err := bitOrFunc.Update(evalCtx, s.ctx.GetSessionVars().StmtCtx, row) c.Assert(err, IsNil) result = bitOrFunc.GetResult(evalCtx) c.Assert(result.GetUint64(), Equals, uint64(1)) @@ -247,20 +248,20 @@ func (s *testAggFuncSuit) TestBitOr(c *C) { result = bitOrFunc.GetResult(evalCtx) c.Assert(result.GetUint64(), Equals, uint64(1)) - row = types.MakeDatums(1) - err = bitOrFunc.Update(evalCtx, s.ctx.GetSessionVars().StmtCtx, types.DatumRow(row)) + row = chunk.MutRowFromDatums(types.MakeDatums(1)).ToRow() + err = bitOrFunc.Update(evalCtx, s.ctx.GetSessionVars().StmtCtx, row) c.Assert(err, IsNil) result = bitOrFunc.GetResult(evalCtx) c.Assert(result.GetUint64(), Equals, uint64(1)) - row = types.MakeDatums(3) - err = bitOrFunc.Update(evalCtx, s.ctx.GetSessionVars().StmtCtx, types.DatumRow(row)) + row = chunk.MutRowFromDatums(types.MakeDatums(3)).ToRow() + err = bitOrFunc.Update(evalCtx, s.ctx.GetSessionVars().StmtCtx, row) c.Assert(err, IsNil) result = bitOrFunc.GetResult(evalCtx) c.Assert(result.GetUint64(), Equals, uint64(3)) - row = types.MakeDatums(2) - err = bitOrFunc.Update(evalCtx, s.ctx.GetSessionVars().StmtCtx, types.DatumRow(row)) + row = chunk.MutRowFromDatums(types.MakeDatums(2)).ToRow() + err = bitOrFunc.Update(evalCtx, s.ctx.GetSessionVars().StmtCtx, row) c.Assert(err, IsNil) result = bitOrFunc.GetResult(evalCtx) c.Assert(result.GetUint64(), Equals, uint64(3)) @@ -277,32 +278,32 @@ func (s *testAggFuncSuit) TestBitOr(c *C) { var dec types.MyDecimal err = dec.FromString([]byte("12.234")) c.Assert(err, IsNil) - row = types.MakeDatums(&dec) - err = bitOrFunc.Update(evalCtx, s.ctx.GetSessionVars().StmtCtx, types.DatumRow(row)) + row = chunk.MutRowFromDatums(types.MakeDatums(&dec)).ToRow() + err = bitOrFunc.Update(evalCtx, s.ctx.GetSessionVars().StmtCtx, row) c.Assert(err, IsNil) result = bitOrFunc.GetResult(evalCtx) c.Assert(result.GetUint64(), Equals, uint64(12)) err = dec.FromString([]byte("1.012")) c.Assert(err, IsNil) - row = types.MakeDatums(&dec) - err = bitOrFunc.Update(evalCtx, s.ctx.GetSessionVars().StmtCtx, types.DatumRow(row)) + row = chunk.MutRowFromDatums(types.MakeDatums(&dec)).ToRow() + err = bitOrFunc.Update(evalCtx, s.ctx.GetSessionVars().StmtCtx, row) c.Assert(err, IsNil) result = bitOrFunc.GetResult(evalCtx) c.Assert(result.GetUint64(), Equals, uint64(13)) err = dec.FromString([]byte("15.12345678")) c.Assert(err, IsNil) - row = types.MakeDatums(&dec) - err = bitOrFunc.Update(evalCtx, s.ctx.GetSessionVars().StmtCtx, types.DatumRow(row)) + row = chunk.MutRowFromDatums(types.MakeDatums(&dec)).ToRow() + err = bitOrFunc.Update(evalCtx, s.ctx.GetSessionVars().StmtCtx, row) c.Assert(err, IsNil) result = bitOrFunc.GetResult(evalCtx) c.Assert(result.GetUint64(), Equals, uint64(15)) err = dec.FromString([]byte("16.00")) c.Assert(err, IsNil) - row = types.MakeDatums(&dec) - err = bitOrFunc.Update(evalCtx, s.ctx.GetSessionVars().StmtCtx, types.DatumRow(row)) + row = chunk.MutRowFromDatums(types.MakeDatums(&dec)).ToRow() + err = bitOrFunc.Update(evalCtx, s.ctx.GetSessionVars().StmtCtx, row) c.Assert(err, IsNil) result = bitOrFunc.GetResult(evalCtx) c.Assert(result.GetUint64(), Equals, uint64(31)) @@ -319,8 +320,8 @@ func (s *testAggFuncSuit) TestBitXor(c *C) { result := bitXorFunc.GetResult(evalCtx) c.Assert(result.GetUint64(), Equals, uint64(0)) - row := types.MakeDatums(1) - err := bitXorFunc.Update(evalCtx, s.ctx.GetSessionVars().StmtCtx, types.DatumRow(row)) + row := chunk.MutRowFromDatums(types.MakeDatums(1)).ToRow() + err := bitXorFunc.Update(evalCtx, s.ctx.GetSessionVars().StmtCtx, row) c.Assert(err, IsNil) result = bitXorFunc.GetResult(evalCtx) c.Assert(result.GetUint64(), Equals, uint64(1)) @@ -330,20 +331,20 @@ func (s *testAggFuncSuit) TestBitXor(c *C) { result = bitXorFunc.GetResult(evalCtx) c.Assert(result.GetUint64(), Equals, uint64(1)) - row = types.MakeDatums(1) - err = bitXorFunc.Update(evalCtx, s.ctx.GetSessionVars().StmtCtx, types.DatumRow(row)) + row = chunk.MutRowFromDatums(types.MakeDatums(1)).ToRow() + err = bitXorFunc.Update(evalCtx, s.ctx.GetSessionVars().StmtCtx, row) c.Assert(err, IsNil) result = bitXorFunc.GetResult(evalCtx) c.Assert(result.GetUint64(), Equals, uint64(0)) - row = types.MakeDatums(3) - err = bitXorFunc.Update(evalCtx, s.ctx.GetSessionVars().StmtCtx, types.DatumRow(row)) + row = chunk.MutRowFromDatums(types.MakeDatums(3)).ToRow() + err = bitXorFunc.Update(evalCtx, s.ctx.GetSessionVars().StmtCtx, row) c.Assert(err, IsNil) result = bitXorFunc.GetResult(evalCtx) c.Assert(result.GetUint64(), Equals, uint64(3)) - row = types.MakeDatums(2) - err = bitXorFunc.Update(evalCtx, s.ctx.GetSessionVars().StmtCtx, types.DatumRow(row)) + row = chunk.MutRowFromDatums(types.MakeDatums(2)).ToRow() + err = bitXorFunc.Update(evalCtx, s.ctx.GetSessionVars().StmtCtx, row) c.Assert(err, IsNil) result = bitXorFunc.GetResult(evalCtx) c.Assert(result.GetUint64(), Equals, uint64(1)) @@ -360,24 +361,24 @@ func (s *testAggFuncSuit) TestBitXor(c *C) { var dec types.MyDecimal err = dec.FromString([]byte("1.234")) c.Assert(err, IsNil) - row = types.MakeDatums(&dec) - err = bitXorFunc.Update(evalCtx, s.ctx.GetSessionVars().StmtCtx, types.DatumRow(row)) + row = chunk.MutRowFromDatums(types.MakeDatums(&dec)).ToRow() + err = bitXorFunc.Update(evalCtx, s.ctx.GetSessionVars().StmtCtx, row) c.Assert(err, IsNil) result = bitXorFunc.GetResult(evalCtx) c.Assert(result.GetUint64(), Equals, uint64(1)) err = dec.FromString([]byte("1.012")) c.Assert(err, IsNil) - row = types.MakeDatums(&dec) - err = bitXorFunc.Update(evalCtx, s.ctx.GetSessionVars().StmtCtx, types.DatumRow(row)) + row = chunk.MutRowFromDatums(types.MakeDatums(&dec)).ToRow() + err = bitXorFunc.Update(evalCtx, s.ctx.GetSessionVars().StmtCtx, row) c.Assert(err, IsNil) result = bitXorFunc.GetResult(evalCtx) c.Assert(result.GetUint64(), Equals, uint64(0)) err = dec.FromString([]byte("2.12345678")) c.Assert(err, IsNil) - row = types.MakeDatums(&dec) - err = bitXorFunc.Update(evalCtx, s.ctx.GetSessionVars().StmtCtx, types.DatumRow(row)) + row = chunk.MutRowFromDatums(types.MakeDatums(&dec)).ToRow() + err = bitXorFunc.Update(evalCtx, s.ctx.GetSessionVars().StmtCtx, row) c.Assert(err, IsNil) result = bitXorFunc.GetResult(evalCtx) c.Assert(result.GetUint64(), Equals, uint64(2)) @@ -395,7 +396,7 @@ func (s *testAggFuncSuit) TestCount(c *C) { c.Assert(result.GetInt64(), Equals, int64(0)) for _, row := range s.rows { - err := countFunc.Update(evalCtx, s.ctx.GetSessionVars().StmtCtx, types.DatumRow(row)) + err := countFunc.Update(evalCtx, s.ctx.GetSessionVars().StmtCtx, row) c.Assert(err, IsNil) } result = countFunc.GetResult(evalCtx) @@ -411,7 +412,7 @@ func (s *testAggFuncSuit) TestCount(c *C) { evalCtx = distinctCountFunc.CreateContext(s.ctx.GetSessionVars().StmtCtx) for _, row := range s.rows { - err := distinctCountFunc.Update(evalCtx, s.ctx.GetSessionVars().StmtCtx, types.DatumRow(row)) + err := distinctCountFunc.Update(evalCtx, s.ctx.GetSessionVars().StmtCtx, row) c.Assert(err, IsNil) } result = distinctCountFunc.GetResult(evalCtx) @@ -433,20 +434,20 @@ func (s *testAggFuncSuit) TestConcat(c *C) { result := concatFunc.GetResult(evalCtx) c.Assert(result.IsNull(), IsTrue) - row := types.MakeDatums(1, "x") - err := concatFunc.Update(evalCtx, s.ctx.GetSessionVars().StmtCtx, types.DatumRow(row)) + row := chunk.MutRowFromDatums(types.MakeDatums(1, "x")) + err := concatFunc.Update(evalCtx, s.ctx.GetSessionVars().StmtCtx, row.ToRow()) c.Assert(err, IsNil) result = concatFunc.GetResult(evalCtx) c.Assert(result.GetString(), Equals, "1") - row[0].SetInt64(2) - err = concatFunc.Update(evalCtx, s.ctx.GetSessionVars().StmtCtx, types.DatumRow(row)) + row.SetDatum(0, types.NewIntDatum(2)) + err = concatFunc.Update(evalCtx, s.ctx.GetSessionVars().StmtCtx, row.ToRow()) c.Assert(err, IsNil) result = concatFunc.GetResult(evalCtx) c.Assert(result.GetString(), Equals, "1x2") - row[0].SetNull() - err = concatFunc.Update(evalCtx, s.ctx.GetSessionVars().StmtCtx, types.DatumRow(row)) + row.SetDatum(0, types.NewDatum(nil)) + err = concatFunc.Update(evalCtx, s.ctx.GetSessionVars().StmtCtx, row.ToRow()) c.Assert(err, IsNil) result = concatFunc.GetResult(evalCtx) c.Assert(result.GetString(), Equals, "1x2") @@ -456,14 +457,14 @@ func (s *testAggFuncSuit) TestConcat(c *C) { distinctConcatFunc := NewAggFuncDesc(s.ctx, ast.AggFuncGroupConcat, []expression.Expression{col, sep}, true).GetAggFunc() evalCtx = distinctConcatFunc.CreateContext(s.ctx.GetSessionVars().StmtCtx) - row[0].SetInt64(1) - err = distinctConcatFunc.Update(evalCtx, s.ctx.GetSessionVars().StmtCtx, types.DatumRow(row)) + row.SetDatum(0, types.NewIntDatum(1)) + err = distinctConcatFunc.Update(evalCtx, s.ctx.GetSessionVars().StmtCtx, row.ToRow()) c.Assert(err, IsNil) result = distinctConcatFunc.GetResult(evalCtx) c.Assert(result.GetString(), Equals, "1") - row[0].SetInt64(1) - err = distinctConcatFunc.Update(evalCtx, s.ctx.GetSessionVars().StmtCtx, types.DatumRow(row)) + row.SetDatum(0, types.NewIntDatum(1)) + err = distinctConcatFunc.Update(evalCtx, s.ctx.GetSessionVars().StmtCtx, row.ToRow()) c.Assert(err, IsNil) result = distinctConcatFunc.GetResult(evalCtx) c.Assert(result.GetString(), Equals, "1") @@ -478,14 +479,14 @@ func (s *testAggFuncSuit) TestFirstRow(c *C) { firstRowFunc := NewAggFuncDesc(s.ctx, ast.AggFuncFirstRow, []expression.Expression{col}, false).GetAggFunc() evalCtx := firstRowFunc.CreateContext(s.ctx.GetSessionVars().StmtCtx) - row := types.MakeDatums(1) - err := firstRowFunc.Update(evalCtx, s.ctx.GetSessionVars().StmtCtx, types.DatumRow(row)) + row := chunk.MutRowFromDatums(types.MakeDatums(1)).ToRow() + err := firstRowFunc.Update(evalCtx, s.ctx.GetSessionVars().StmtCtx, row) c.Assert(err, IsNil) result := firstRowFunc.GetResult(evalCtx) c.Assert(result.GetUint64(), Equals, uint64(1)) - row = types.MakeDatums(2) - err = firstRowFunc.Update(evalCtx, s.ctx.GetSessionVars().StmtCtx, types.DatumRow(row)) + row = chunk.MutRowFromDatums(types.MakeDatums(2)).ToRow() + err = firstRowFunc.Update(evalCtx, s.ctx.GetSessionVars().StmtCtx, row) c.Assert(err, IsNil) result = firstRowFunc.GetResult(evalCtx) c.Assert(result.GetUint64(), Equals, uint64(1)) @@ -509,42 +510,42 @@ func (s *testAggFuncSuit) TestMaxMin(c *C) { result = minFunc.GetResult(minEvalCtx) c.Assert(result.IsNull(), IsTrue) - row := types.MakeDatums(2) - err := maxFunc.Update(maxEvalCtx, s.ctx.GetSessionVars().StmtCtx, types.DatumRow(row)) + row := chunk.MutRowFromDatums(types.MakeDatums(2)) + err := maxFunc.Update(maxEvalCtx, s.ctx.GetSessionVars().StmtCtx, row.ToRow()) c.Assert(err, IsNil) result = maxFunc.GetResult(maxEvalCtx) c.Assert(result.GetInt64(), Equals, int64(2)) - err = minFunc.Update(minEvalCtx, s.ctx.GetSessionVars().StmtCtx, types.DatumRow(row)) + err = minFunc.Update(minEvalCtx, s.ctx.GetSessionVars().StmtCtx, row.ToRow()) c.Assert(err, IsNil) result = minFunc.GetResult(minEvalCtx) c.Assert(result.GetInt64(), Equals, int64(2)) - row[0].SetInt64(3) - err = maxFunc.Update(maxEvalCtx, s.ctx.GetSessionVars().StmtCtx, types.DatumRow(row)) + row.SetDatum(0, types.NewIntDatum(3)) + err = maxFunc.Update(maxEvalCtx, s.ctx.GetSessionVars().StmtCtx, row.ToRow()) c.Assert(err, IsNil) result = maxFunc.GetResult(maxEvalCtx) c.Assert(result.GetInt64(), Equals, int64(3)) - err = minFunc.Update(minEvalCtx, s.ctx.GetSessionVars().StmtCtx, types.DatumRow(row)) + err = minFunc.Update(minEvalCtx, s.ctx.GetSessionVars().StmtCtx, row.ToRow()) c.Assert(err, IsNil) result = minFunc.GetResult(minEvalCtx) c.Assert(result.GetInt64(), Equals, int64(2)) - row[0].SetInt64(1) - err = maxFunc.Update(maxEvalCtx, s.ctx.GetSessionVars().StmtCtx, types.DatumRow(row)) + row.SetDatum(0, types.NewIntDatum(1)) + err = maxFunc.Update(maxEvalCtx, s.ctx.GetSessionVars().StmtCtx, row.ToRow()) c.Assert(err, IsNil) result = maxFunc.GetResult(maxEvalCtx) c.Assert(result.GetInt64(), Equals, int64(3)) - err = minFunc.Update(minEvalCtx, s.ctx.GetSessionVars().StmtCtx, types.DatumRow(row)) + err = minFunc.Update(minEvalCtx, s.ctx.GetSessionVars().StmtCtx, row.ToRow()) c.Assert(err, IsNil) result = minFunc.GetResult(minEvalCtx) c.Assert(result.GetInt64(), Equals, int64(1)) - row[0].SetNull() - err = maxFunc.Update(maxEvalCtx, s.ctx.GetSessionVars().StmtCtx, types.DatumRow(row)) + row.SetDatum(0, types.NewDatum(nil)) + err = maxFunc.Update(maxEvalCtx, s.ctx.GetSessionVars().StmtCtx, row.ToRow()) c.Assert(err, IsNil) result = maxFunc.GetResult(maxEvalCtx) c.Assert(result.GetInt64(), Equals, int64(3)) - err = minFunc.Update(minEvalCtx, s.ctx.GetSessionVars().StmtCtx, types.DatumRow(row)) + err = minFunc.Update(minEvalCtx, s.ctx.GetSessionVars().StmtCtx, row.ToRow()) c.Assert(err, IsNil) result = minFunc.GetResult(minEvalCtx) c.Assert(result.GetInt64(), Equals, int64(1)) diff --git a/expression/builtin_cast_test.go b/expression/builtin_cast_test.go index 7d8c930d31329..fc6c00721923b 100644 --- a/expression/builtin_cast_test.go +++ b/expression/builtin_cast_test.go @@ -25,6 +25,7 @@ import ( "github.com/pingcap/tidb/types" "github.com/pingcap/tidb/types/json" "github.com/pingcap/tidb/util/charset" + "github.com/pingcap/tidb/util/chunk" ) func (s *testEvaluatorSuite) TestCast(c *C) { @@ -234,41 +235,43 @@ func (s *testEvaluatorSuite) TestCastFuncSig(c *C) { }() var sig builtinFunc + durationColumn := &Column{RetType: types.NewFieldType(mysql.TypeDuration), Index: 0} + durationColumn.RetType.Decimal = types.DefaultFsp // Test cast as Decimal. castToDecCases := []struct { before *Column after *types.MyDecimal - row types.DatumRow + row chunk.MutRow }{ // cast int as decimal. { &Column{RetType: types.NewFieldType(mysql.TypeLonglong), Index: 0}, types.NewDecFromInt(1), - []types.Datum{types.NewIntDatum(1)}, + chunk.MutRowFromDatums([]types.Datum{types.NewIntDatum(1)}), }, // cast string as decimal. { &Column{RetType: types.NewFieldType(mysql.TypeString), Index: 0}, types.NewDecFromInt(1), - []types.Datum{types.NewStringDatum("1")}, + chunk.MutRowFromDatums([]types.Datum{types.NewStringDatum("1")}), }, // cast real as decimal. { &Column{RetType: types.NewFieldType(mysql.TypeDouble), Index: 0}, types.NewDecFromInt(1), - []types.Datum{types.NewFloat64Datum(1)}, + chunk.MutRowFromDatums([]types.Datum{types.NewFloat64Datum(1)}), }, // cast Time as decimal. { &Column{RetType: types.NewFieldType(mysql.TypeDatetime), Index: 0}, types.NewDecFromInt(curTimeInt), - []types.Datum{timeDatum}, + chunk.MutRowFromDatums([]types.Datum{timeDatum}), }, // cast Duration as decimal. { - &Column{RetType: types.NewFieldType(mysql.TypeDuration), Index: 0}, + durationColumn, types.NewDecFromInt(125959), - []types.Datum{durationDatum}, + chunk.MutRowFromDatums([]types.Datum{durationDatum}), }, } for i, t := range castToDecCases { @@ -289,18 +292,19 @@ func (s *testEvaluatorSuite) TestCastFuncSig(c *C) { case 5: sig = &builtinCastDecimalAsDecimalSig{decFunc} } - res, isNull, err := sig.evalDecimal(t.row) + res, isNull, err := sig.evalDecimal(t.row.ToRow()) c.Assert(isNull, Equals, false) c.Assert(err, IsNil) c.Assert(res.Compare(t.after), Equals, 0) } + durationColumn.RetType.Decimal = 1 castToDecCases2 := []struct { before *Column flen int decimal int after *types.MyDecimal - row types.DatumRow + row chunk.MutRow }{ // cast int as decimal. { @@ -308,7 +312,7 @@ func (s *testEvaluatorSuite) TestCastFuncSig(c *C) { 7, 3, types.NewDecFromStringForTest("1234.000"), - []types.Datum{types.NewIntDatum(1234)}, + chunk.MutRowFromDatums([]types.Datum{types.NewIntDatum(1234)}), }, // cast string as decimal. { @@ -316,7 +320,7 @@ func (s *testEvaluatorSuite) TestCastFuncSig(c *C) { 7, 3, types.NewDecFromStringForTest("1234.000"), - []types.Datum{types.NewStringDatum("1234")}, + chunk.MutRowFromDatums([]types.Datum{types.NewStringDatum("1234")}), }, // cast real as decimal. { @@ -324,7 +328,7 @@ func (s *testEvaluatorSuite) TestCastFuncSig(c *C) { 8, 4, types.NewDecFromStringForTest("1234.1230"), - []types.Datum{types.NewFloat64Datum(1234.123)}, + chunk.MutRowFromDatums([]types.Datum{types.NewFloat64Datum(1234.123)}), }, // cast Time as decimal. { @@ -332,15 +336,15 @@ func (s *testEvaluatorSuite) TestCastFuncSig(c *C) { 15, 1, types.NewDecFromStringForTest(strconv.FormatInt(curTimeInt, 10) + ".0"), - []types.Datum{timeDatum}, + chunk.MutRowFromDatums([]types.Datum{timeDatum}), }, // cast Duration as decimal. { - &Column{RetType: types.NewFieldType(mysql.TypeDuration), Index: 0}, + durationColumn, 7, 1, types.NewDecFromStringForTest("125959.0"), - []types.Datum{durationDatum}, + chunk.MutRowFromDatums([]types.Datum{durationDatum}), }, // cast decimal as decimal. { @@ -348,7 +352,7 @@ func (s *testEvaluatorSuite) TestCastFuncSig(c *C) { 7, 3, types.NewDecFromStringForTest("1234.000"), - []types.Datum{types.NewDecimalDatum(types.NewDecFromStringForTest("1234"))}, + chunk.MutRowFromDatums([]types.Datum{types.NewDecimalDatum(types.NewDecFromStringForTest("1234"))}), }, } @@ -372,53 +376,54 @@ func (s *testEvaluatorSuite) TestCastFuncSig(c *C) { case 5: sig = &builtinCastDecimalAsDecimalSig{decFunc} } - res, isNull, err := sig.evalDecimal(t.row) + res, isNull, err := sig.evalDecimal(t.row.ToRow()) c.Assert(isNull, Equals, false) c.Assert(err, IsNil) c.Assert(res.ToString(), DeepEquals, t.after.ToString()) } + durationColumn.RetType.Decimal = 0 // Test cast as int. castToIntCases := []struct { before *Column after int64 - row types.DatumRow + row chunk.MutRow }{ // cast string as int. { &Column{RetType: types.NewFieldType(mysql.TypeString), Index: 0}, 1, - []types.Datum{types.NewStringDatum("1")}, + chunk.MutRowFromDatums([]types.Datum{types.NewStringDatum("1")}), }, // cast decimal as int. { &Column{RetType: types.NewFieldType(mysql.TypeNewDecimal), Index: 0}, 1, - []types.Datum{types.NewDecimalDatum(types.NewDecFromInt(1))}, + chunk.MutRowFromDatums([]types.Datum{types.NewDecimalDatum(types.NewDecFromInt(1))}), }, // cast real as int. { &Column{RetType: types.NewFieldType(mysql.TypeDouble), Index: 0}, 1, - []types.Datum{types.NewFloat64Datum(1)}, + chunk.MutRowFromDatums([]types.Datum{types.NewFloat64Datum(1)}), }, // cast Time as int. { &Column{RetType: types.NewFieldType(mysql.TypeDatetime), Index: 0}, curTimeInt, - []types.Datum{timeDatum}, + chunk.MutRowFromDatums([]types.Datum{timeDatum}), }, // cast Duration as int. { &Column{RetType: types.NewFieldType(mysql.TypeDuration), Index: 0}, 125959, - []types.Datum{durationDatum}, + chunk.MutRowFromDatums([]types.Datum{durationDatum}), }, // cast JSON as int. { &Column{RetType: types.NewFieldType(mysql.TypeJSON), Index: 0}, 3, - []types.Datum{jsonInt}, + chunk.MutRowFromDatums([]types.Datum{jsonInt}), }, } for i, t := range castToIntCases { @@ -438,7 +443,7 @@ func (s *testEvaluatorSuite) TestCastFuncSig(c *C) { case 5: sig = &builtinCastJSONAsIntSig{intFunc} } - res, isNull, err := sig.evalInt(t.row) + res, isNull, err := sig.evalInt(t.row.ToRow()) c.Assert(isNull, Equals, false) c.Assert(err, IsNil) c.Assert(res, Equals, t.after) @@ -448,43 +453,43 @@ func (s *testEvaluatorSuite) TestCastFuncSig(c *C) { castToRealCases := []struct { before *Column after float64 - row types.DatumRow + row chunk.MutRow }{ // cast string as real. { &Column{RetType: types.NewFieldType(mysql.TypeString), Index: 0}, 1.1, - []types.Datum{types.NewStringDatum("1.1")}, + chunk.MutRowFromDatums([]types.Datum{types.NewStringDatum("1.1")}), }, // cast decimal as real. { &Column{RetType: types.NewFieldType(mysql.TypeNewDecimal), Index: 0}, 1.1, - []types.Datum{types.NewDecimalDatum(types.NewDecFromFloatForTest(1.1))}, + chunk.MutRowFromDatums([]types.Datum{types.NewDecimalDatum(types.NewDecFromFloatForTest(1.1))}), }, // cast int as real. { &Column{RetType: types.NewFieldType(mysql.TypeLonglong), Index: 0}, 1, - []types.Datum{types.NewIntDatum(1)}, + chunk.MutRowFromDatums([]types.Datum{types.NewIntDatum(1)}), }, // cast Time as real. { &Column{RetType: types.NewFieldType(mysql.TypeDatetime), Index: 0}, float64(curTimeInt), - []types.Datum{timeDatum}, + chunk.MutRowFromDatums([]types.Datum{timeDatum}), }, // cast Duration as real. { - &Column{RetType: types.NewFieldType(mysql.TypeDuration), Index: 0}, + durationColumn, 125959, - []types.Datum{durationDatum}, + chunk.MutRowFromDatums([]types.Datum{durationDatum}), }, // cast JSON as real. { &Column{RetType: types.NewFieldType(mysql.TypeJSON), Index: 0}, 3.0, - []types.Datum{jsonInt}, + chunk.MutRowFromDatums([]types.Datum{jsonInt}), }, } for i, t := range castToRealCases { @@ -504,7 +509,7 @@ func (s *testEvaluatorSuite) TestCastFuncSig(c *C) { case 5: sig = &builtinCastJSONAsRealSig{realFunc} } - res, isNull, err := sig.evalReal(t.row) + res, isNull, err := sig.evalReal(t.row.ToRow()) c.Assert(isNull, Equals, false) c.Assert(err, IsNil) c.Assert(res, Equals, t.after) @@ -514,49 +519,49 @@ func (s *testEvaluatorSuite) TestCastFuncSig(c *C) { castToStringCases := []struct { before *Column after string - row types.DatumRow + row chunk.MutRow }{ // cast real as string. { &Column{RetType: types.NewFieldType(mysql.TypeDouble), Index: 0}, "1", - []types.Datum{types.NewFloat64Datum(1)}, + chunk.MutRowFromDatums([]types.Datum{types.NewFloat64Datum(1)}), }, // cast decimal as string. { &Column{RetType: types.NewFieldType(mysql.TypeNewDecimal), Index: 0}, "1", - []types.Datum{types.NewDecimalDatum(types.NewDecFromInt(1))}, + chunk.MutRowFromDatums([]types.Datum{types.NewDecimalDatum(types.NewDecFromInt(1))}), }, // cast int as string. { &Column{RetType: types.NewFieldType(mysql.TypeLonglong), Index: 0}, "1", - []types.Datum{types.NewIntDatum(1)}, + chunk.MutRowFromDatums([]types.Datum{types.NewIntDatum(1)}), }, // cast time as string. { &Column{RetType: types.NewFieldType(mysql.TypeDatetime), Index: 0}, curTimeString, - []types.Datum{timeDatum}, + chunk.MutRowFromDatums([]types.Datum{timeDatum}), }, // cast duration as string. { &Column{RetType: types.NewFieldType(mysql.TypeDuration), Index: 0}, "12:59:59", - []types.Datum{durationDatum}, + chunk.MutRowFromDatums([]types.Datum{durationDatum}), }, // cast JSON as string. { &Column{RetType: types.NewFieldType(mysql.TypeJSON), Index: 0}, "3", - []types.Datum{jsonInt}, + chunk.MutRowFromDatums([]types.Datum{jsonInt}), }, // cast string as string. { &Column{RetType: types.NewFieldType(mysql.TypeString), Index: 0}, "1234", - []types.Datum{types.NewStringDatum("1234")}, + chunk.MutRowFromDatums([]types.Datum{types.NewStringDatum("1234")}), }, } for i, t := range castToStringCases { @@ -581,7 +586,7 @@ func (s *testEvaluatorSuite) TestCastFuncSig(c *C) { case 6: sig = &builtinCastStringAsStringSig{stringFunc} } - res, isNull, err := sig.evalString(t.row) + res, isNull, err := sig.evalString(t.row.ToRow()) c.Assert(isNull, Equals, false) c.Assert(err, IsNil) c.Assert(res, Equals, t.after) @@ -592,49 +597,49 @@ func (s *testEvaluatorSuite) TestCastFuncSig(c *C) { before *Column after string flen int - row types.DatumRow + row chunk.MutRow }{ // cast real as string. { &Column{RetType: types.NewFieldType(mysql.TypeDouble), Index: 0}, "123", 3, - []types.Datum{types.NewFloat64Datum(1234.123)}, + chunk.MutRowFromDatums([]types.Datum{types.NewFloat64Datum(1234.123)}), }, // cast decimal as string. { &Column{RetType: types.NewFieldType(mysql.TypeNewDecimal), Index: 0}, "123", 3, - []types.Datum{types.NewDecimalDatum(types.NewDecFromStringForTest("1234.123"))}, + chunk.MutRowFromDatums([]types.Datum{types.NewDecimalDatum(types.NewDecFromStringForTest("1234.123"))}), }, // cast int as string. { &Column{RetType: types.NewFieldType(mysql.TypeLonglong), Index: 0}, "123", 3, - []types.Datum{types.NewIntDatum(1234)}, + chunk.MutRowFromDatums([]types.Datum{types.NewIntDatum(1234)}), }, // cast time as string. { &Column{RetType: types.NewFieldType(mysql.TypeDatetime), Index: 0}, curTimeString[:3], 3, - []types.Datum{timeDatum}, + chunk.MutRowFromDatums([]types.Datum{timeDatum}), }, // cast duration as string. { &Column{RetType: types.NewFieldType(mysql.TypeDuration), Index: 0}, "12:", 3, - []types.Datum{durationDatum}, + chunk.MutRowFromDatums([]types.Datum{durationDatum}), }, // cast string as string. { &Column{RetType: types.NewFieldType(mysql.TypeString), Index: 0}, "你好w", 3, - []types.Datum{types.NewStringDatum("你好world")}, + chunk.MutRowFromDatums([]types.Datum{types.NewStringDatum("你好world")}), }, } for i, t := range castToStringCases2 { @@ -658,7 +663,7 @@ func (s *testEvaluatorSuite) TestCastFuncSig(c *C) { stringFunc.tp.Charset = charset.CharsetUTF8 sig = &builtinCastStringAsStringSig{stringFunc} } - res, isNull, err := sig.evalString(t.row) + res, isNull, err := sig.evalString(t.row.ToRow()) c.Assert(isNull, Equals, false) c.Assert(err, IsNil) c.Assert(res, Equals, t.after) @@ -667,49 +672,49 @@ func (s *testEvaluatorSuite) TestCastFuncSig(c *C) { castToTimeCases := []struct { before *Column after types.Time - row types.DatumRow + row chunk.MutRow }{ // cast real as Time. { &Column{RetType: types.NewFieldType(mysql.TypeDouble), Index: 0}, tm, - []types.Datum{types.NewFloat64Datum(float64(curTimeInt))}, + chunk.MutRowFromDatums([]types.Datum{types.NewFloat64Datum(float64(curTimeInt))}), }, // cast decimal as Time. { &Column{RetType: types.NewFieldType(mysql.TypeNewDecimal), Index: 0}, tm, - []types.Datum{types.NewDecimalDatum(types.NewDecFromInt(curTimeInt))}, + chunk.MutRowFromDatums([]types.Datum{types.NewDecimalDatum(types.NewDecFromInt(curTimeInt))}), }, // cast int as Time. { &Column{RetType: types.NewFieldType(mysql.TypeLonglong), Index: 0}, tm, - []types.Datum{types.NewIntDatum(curTimeInt)}, + chunk.MutRowFromDatums([]types.Datum{types.NewIntDatum(curTimeInt)}), }, // cast string as Time. { &Column{RetType: types.NewFieldType(mysql.TypeString), Index: 0}, tm, - []types.Datum{types.NewStringDatum(curTimeString)}, + chunk.MutRowFromDatums([]types.Datum{types.NewStringDatum(curTimeString)}), }, // cast Duration as Time. { &Column{RetType: types.NewFieldType(mysql.TypeDuration), Index: 0}, tm, - []types.Datum{durationDatum}, + chunk.MutRowFromDatums([]types.Datum{durationDatum}), }, // cast JSON as Time. { &Column{RetType: types.NewFieldType(mysql.TypeJSON), Index: 0}, tm, - []types.Datum{jsonTime}, + chunk.MutRowFromDatums([]types.Datum{jsonTime}), }, // cast Time as Time. { &Column{RetType: types.NewFieldType(mysql.TypeDatetime), Index: 0}, tm, - []types.Datum{timeDatum}, + chunk.MutRowFromDatums([]types.Datum{timeDatum}), }, } for i, t := range castToTimeCases { @@ -734,7 +739,7 @@ func (s *testEvaluatorSuite) TestCastFuncSig(c *C) { case 6: sig = &builtinCastTimeAsTimeSig{timeFunc} } - res, isNull, err := sig.evalTime(t.row) + res, isNull, err := sig.evalTime(t.row.ToRow()) c.Assert(isNull, Equals, false) c.Assert(err, IsNil) c.Assert(res.String(), Equals, t.after.String()) @@ -743,7 +748,7 @@ func (s *testEvaluatorSuite) TestCastFuncSig(c *C) { castToTimeCases2 := []struct { before *Column after types.Time - row types.DatumRow + row chunk.MutRow fsp int tp byte }{ @@ -751,7 +756,7 @@ func (s *testEvaluatorSuite) TestCastFuncSig(c *C) { { &Column{RetType: types.NewFieldType(mysql.TypeDouble), Index: 0}, dt, - []types.Datum{types.NewFloat64Datum(float64(curTimeInt))}, + chunk.MutRowFromDatums([]types.Datum{types.NewFloat64Datum(float64(curTimeInt))}), types.DefaultFsp, mysql.TypeDate, }, @@ -759,7 +764,7 @@ func (s *testEvaluatorSuite) TestCastFuncSig(c *C) { { &Column{RetType: types.NewFieldType(mysql.TypeNewDecimal), Index: 0}, dt, - []types.Datum{types.NewDecimalDatum(types.NewDecFromInt(curTimeInt))}, + chunk.MutRowFromDatums([]types.Datum{types.NewDecimalDatum(types.NewDecFromInt(curTimeInt))}), types.DefaultFsp, mysql.TypeDate, }, @@ -767,7 +772,7 @@ func (s *testEvaluatorSuite) TestCastFuncSig(c *C) { { &Column{RetType: types.NewFieldType(mysql.TypeLonglong), Index: 0}, tm, - []types.Datum{types.NewIntDatum(curTimeInt)}, + chunk.MutRowFromDatums([]types.Datum{types.NewIntDatum(curTimeInt)}), types.MaxFsp, mysql.TypeDatetime, }, @@ -775,7 +780,7 @@ func (s *testEvaluatorSuite) TestCastFuncSig(c *C) { { &Column{RetType: types.NewFieldType(mysql.TypeString), Index: 0}, tm, - []types.Datum{types.NewStringDatum(curTimeString)}, + chunk.MutRowFromDatums([]types.Datum{types.NewStringDatum(curTimeString)}), types.MaxFsp, mysql.TypeDatetime, }, @@ -783,7 +788,7 @@ func (s *testEvaluatorSuite) TestCastFuncSig(c *C) { { &Column{RetType: types.NewFieldType(mysql.TypeDuration), Index: 0}, dt, - []types.Datum{durationDatum}, + chunk.MutRowFromDatums([]types.Datum{durationDatum}), types.DefaultFsp, mysql.TypeDate, }, @@ -791,7 +796,7 @@ func (s *testEvaluatorSuite) TestCastFuncSig(c *C) { { &Column{RetType: types.NewFieldType(mysql.TypeDatetime), Index: 0}, dt, - []types.Datum{timeDatum}, + chunk.MutRowFromDatums([]types.Datum{timeDatum}), types.DefaultFsp, mysql.TypeDate, }, @@ -816,7 +821,7 @@ func (s *testEvaluatorSuite) TestCastFuncSig(c *C) { case 5: sig = &builtinCastTimeAsTimeSig{timeFunc} } - res, isNull, err := sig.evalTime(t.row) + res, isNull, err := sig.evalTime(t.row.ToRow()) c.Assert(isNull, Equals, false) c.Assert(err, IsNil) resAfter := t.after.String() @@ -832,49 +837,49 @@ func (s *testEvaluatorSuite) TestCastFuncSig(c *C) { castToDurationCases := []struct { before *Column after types.Duration - row types.DatumRow + row chunk.MutRow }{ // cast real as Duration. { &Column{RetType: types.NewFieldType(mysql.TypeDouble), Index: 0}, duration, - []types.Datum{types.NewFloat64Datum(125959)}, + chunk.MutRowFromDatums([]types.Datum{types.NewFloat64Datum(125959)}), }, // cast decimal as Duration. { &Column{RetType: types.NewFieldType(mysql.TypeNewDecimal), Index: 0}, duration, - []types.Datum{types.NewDecimalDatum(types.NewDecFromInt(125959))}, + chunk.MutRowFromDatums([]types.Datum{types.NewDecimalDatum(types.NewDecFromInt(125959))}), }, // cast int as Duration. { &Column{RetType: types.NewFieldType(mysql.TypeLonglong), Index: 0}, duration, - []types.Datum{types.NewIntDatum(125959)}, + chunk.MutRowFromDatums([]types.Datum{types.NewIntDatum(125959)}), }, // cast string as Duration. { &Column{RetType: types.NewFieldType(mysql.TypeString), Index: 0}, duration, - []types.Datum{types.NewStringDatum("12:59:59")}, + chunk.MutRowFromDatums([]types.Datum{types.NewStringDatum("12:59:59")}), }, // cast Time as Duration. { &Column{RetType: types.NewFieldType(mysql.TypeDatetime), Index: 0}, duration, - []types.Datum{timeDatum}, + chunk.MutRowFromDatums([]types.Datum{timeDatum}), }, // cast JSON as Duration. { &Column{RetType: types.NewFieldType(mysql.TypeJSON), Index: 0}, duration, - []types.Datum{jsonDuration}, + chunk.MutRowFromDatums([]types.Datum{jsonDuration}), }, // cast Duration as Duration. { &Column{RetType: types.NewFieldType(mysql.TypeDuration), Index: 0}, duration, - []types.Datum{durationDatum}, + chunk.MutRowFromDatums([]types.Datum{durationDatum}), }, } for i, t := range castToDurationCases { @@ -899,7 +904,7 @@ func (s *testEvaluatorSuite) TestCastFuncSig(c *C) { case 6: sig = &builtinCastDurationAsDurationSig{durationFunc} } - res, isNull, err := sig.evalDuration(t.row) + res, isNull, err := sig.evalDuration(t.row.ToRow()) c.Assert(isNull, Equals, false) c.Assert(err, IsNil) c.Assert(res.String(), Equals, t.after.String()) @@ -908,49 +913,49 @@ func (s *testEvaluatorSuite) TestCastFuncSig(c *C) { castToDurationCases2 := []struct { before *Column after types.Duration - row types.DatumRow + row chunk.MutRow fsp int }{ // cast real as Duration. { &Column{RetType: types.NewFieldType(mysql.TypeDouble), Index: 0}, duration, - []types.Datum{types.NewFloat64Datum(125959)}, + chunk.MutRowFromDatums([]types.Datum{types.NewFloat64Datum(125959)}), 1, }, // cast decimal as Duration. { &Column{RetType: types.NewFieldType(mysql.TypeNewDecimal), Index: 0}, duration, - []types.Datum{types.NewDecimalDatum(types.NewDecFromInt(125959))}, + chunk.MutRowFromDatums([]types.Datum{types.NewDecimalDatum(types.NewDecFromInt(125959))}), 2, }, // cast int as Duration. { &Column{RetType: types.NewFieldType(mysql.TypeLonglong), Index: 0}, duration, - []types.Datum{types.NewIntDatum(125959)}, + chunk.MutRowFromDatums([]types.Datum{types.NewIntDatum(125959)}), 3, }, // cast string as Duration. { &Column{RetType: types.NewFieldType(mysql.TypeString), Index: 0}, duration, - []types.Datum{types.NewStringDatum("12:59:59")}, + chunk.MutRowFromDatums([]types.Datum{types.NewStringDatum("12:59:59")}), 4, }, // cast Time as Duration. { &Column{RetType: types.NewFieldType(mysql.TypeDatetime), Index: 0}, duration, - []types.Datum{timeDatum}, + chunk.MutRowFromDatums([]types.Datum{timeDatum}), 5, }, // cast Duration as Duration. { &Column{RetType: types.NewFieldType(mysql.TypeDuration), Index: 0}, duration, - []types.Datum{durationDatum}, + chunk.MutRowFromDatums([]types.Datum{durationDatum}), 6, }, } @@ -974,7 +979,7 @@ func (s *testEvaluatorSuite) TestCastFuncSig(c *C) { case 5: sig = &builtinCastDurationAsDurationSig{durationFunc} } - res, isNull, err := sig.evalDuration(t.row) + res, isNull, err := sig.evalDuration(t.row.ToRow()) c.Assert(isNull, Equals, false) c.Assert(err, IsNil) resAfter := t.after.String() @@ -989,11 +994,11 @@ func (s *testEvaluatorSuite) TestCastFuncSig(c *C) { // null case args := []Expression{&Column{RetType: types.NewFieldType(mysql.TypeDouble), Index: 0}} - row := types.DatumRow{types.NewDatum(nil)} + row := chunk.MutRowFromDatums([]types.Datum{types.NewDatum(nil)}) bf := newBaseBuiltinFunc(ctx, args) bf.tp = types.NewFieldType(mysql.TypeVarString) sig = &builtinCastRealAsStringSig{bf} - sRes, isNull, err := sig.evalString(row) + sRes, isNull, err := sig.evalString(row.ToRow()) c.Assert(sRes, Equals, "") c.Assert(isNull, Equals, true) c.Assert(err, IsNil) @@ -1011,9 +1016,13 @@ func (s *testEvaluatorSuite) TestCastFuncSig(c *C) { func (s *testEvaluatorSuite) TestWrapWithCastAsTypesClasses(c *C) { ctx := s.ctx + durationColumn0 := &Column{RetType: types.NewFieldType(mysql.TypeDuration), Index: 0} + durationColumn0.RetType.Decimal = types.DefaultFsp + durationColumn3 := &Column{RetType: types.NewFieldType(mysql.TypeDuration), Index: 0} + durationColumn3.RetType.Decimal = 3 cases := []struct { expr Expression - row types.DatumRow + row chunk.MutRow intRes int64 realRes float64 decRes *types.MyDecimal @@ -1021,62 +1030,62 @@ func (s *testEvaluatorSuite) TestWrapWithCastAsTypesClasses(c *C) { }{ { &Column{RetType: types.NewFieldType(mysql.TypeLong), Index: 0}, - []types.Datum{types.NewDatum(123)}, + chunk.MutRowFromDatums([]types.Datum{types.NewDatum(123)}), 123, 123, types.NewDecFromInt(123), "123", }, { &Column{RetType: types.NewFieldType(mysql.TypeDouble), Index: 0}, - []types.Datum{types.NewDatum(123.555)}, + chunk.MutRowFromDatums([]types.Datum{types.NewDatum(123.555)}), 124, 123.555, types.NewDecFromFloatForTest(123.555), "123.555", }, { &Column{RetType: types.NewFieldType(mysql.TypeDouble), Index: 0}, - []types.Datum{types.NewDatum(123.123)}, + chunk.MutRowFromDatums([]types.Datum{types.NewDatum(123.123)}), 123, 123.123, types.NewDecFromFloatForTest(123.123), "123.123", }, { &Column{RetType: types.NewFieldType(mysql.TypeNewDecimal), Index: 0}, - []types.Datum{types.NewDecimalDatum(types.NewDecFromStringForTest("123.123"))}, + chunk.MutRowFromDatums([]types.Datum{types.NewDecimalDatum(types.NewDecFromStringForTest("123.123"))}), 123, 123.123, types.NewDecFromFloatForTest(123.123), "123.123", }, { &Column{RetType: types.NewFieldType(mysql.TypeNewDecimal), Index: 0}, - []types.Datum{types.NewDecimalDatum(types.NewDecFromStringForTest("123.555"))}, + chunk.MutRowFromDatums([]types.Datum{types.NewDecimalDatum(types.NewDecFromStringForTest("123.555"))}), 124, 123.555, types.NewDecFromFloatForTest(123.555), "123.555", }, { &Column{RetType: types.NewFieldType(mysql.TypeVarString), Index: 0}, - []types.Datum{types.NewStringDatum("123.123")}, + chunk.MutRowFromDatums([]types.Datum{types.NewStringDatum("123.123")}), 123, 123.123, types.NewDecFromStringForTest("123.123"), "123.123", }, { &Column{RetType: types.NewFieldType(mysql.TypeDatetime), Index: 0}, - []types.Datum{timeDatum}, + chunk.MutRowFromDatums([]types.Datum{timeDatum}), curTimeInt, float64(curTimeInt), types.NewDecFromInt(curTimeInt), curTimeString, }, { &Column{RetType: types.NewFieldType(mysql.TypeDatetime), Index: 0}, - []types.Datum{timeWithFspDatum}, + chunk.MutRowFromDatums([]types.Datum{timeWithFspDatum}), int64(curDateInt*1000000 + 130000), curTimeWithFspReal, types.NewDecFromFloatForTest(curTimeWithFspReal), curTimeWithFspString, }, { - &Column{RetType: types.NewFieldType(mysql.TypeDuration), Index: 0}, - []types.Datum{durationDatum}, + durationColumn0, + chunk.MutRowFromDatums([]types.Datum{durationDatum}), 125959, 125959, types.NewDecFromFloatForTest(125959), "12:59:59", }, { - &Column{RetType: types.NewFieldType(mysql.TypeDuration), Index: 0}, - []types.Datum{durationWithFspDatum}, + durationColumn3, + chunk.MutRowFromDatums([]types.Datum{durationWithFspDatum}), 130000, 125959.555, types.NewDecFromFloatForTest(125959.555), "12:59:59.555", }, { &Column{RetType: types.NewFieldType(mysql.TypeEnum), Index: 0}, - []types.Datum{types.NewDatum(types.Enum{Name: "a", Value: 123})}, + chunk.MutRowFromDatums([]types.Datum{types.NewDatum(types.Enum{Name: "a", Value: 123})}), 123, 123, types.NewDecFromStringForTest("123"), "a", }, { &Constant{RetType: types.NewFieldType(mysql.TypeVarString), Value: types.NewBinaryLiteralDatum(types.NewBinaryLiteralFromUint(0x61, -1))}, - nil, + chunk.MutRowFromDatums([]types.Datum{types.NewDatum(nil)}), 97, 97, types.NewDecFromInt(0x61), "a", }, } @@ -1084,7 +1093,7 @@ func (s *testEvaluatorSuite) TestWrapWithCastAsTypesClasses(c *C) { // Test wrapping with CastAsInt. intExpr := WrapWithCastAsInt(ctx, t.expr) c.Assert(intExpr.GetType().EvalType(), Equals, types.ETInt) - intRes, isNull, err := intExpr.EvalInt(ctx, t.row) + intRes, isNull, err := intExpr.EvalInt(ctx, t.row.ToRow()) c.Assert(err, IsNil, Commentf("cast[%v]: %#v", i, t)) c.Assert(isNull, Equals, false) c.Assert(intRes, Equals, t.intRes) @@ -1092,7 +1101,7 @@ func (s *testEvaluatorSuite) TestWrapWithCastAsTypesClasses(c *C) { // Test wrapping with CastAsReal. realExpr := WrapWithCastAsReal(ctx, t.expr) c.Assert(realExpr.GetType().EvalType(), Equals, types.ETReal) - realRes, isNull, err := realExpr.EvalReal(ctx, t.row) + realRes, isNull, err := realExpr.EvalReal(ctx, t.row.ToRow()) c.Assert(err, IsNil) c.Assert(isNull, Equals, false) c.Assert(realRes, Equals, t.realRes, Commentf("cast[%v]: %#v", i, t)) @@ -1100,7 +1109,7 @@ func (s *testEvaluatorSuite) TestWrapWithCastAsTypesClasses(c *C) { // Test wrapping with CastAsDecimal. decExpr := WrapWithCastAsDecimal(ctx, t.expr) c.Assert(decExpr.GetType().EvalType(), Equals, types.ETDecimal) - decRes, isNull, err := decExpr.EvalDecimal(ctx, t.row) + decRes, isNull, err := decExpr.EvalDecimal(ctx, t.row.ToRow()) c.Assert(err, IsNil, Commentf("case[%v]: %#v\n", i, t)) c.Assert(isNull, Equals, false) c.Assert(decRes.Compare(t.decRes), Equals, 0, Commentf("case[%v]: %#v\n", i, t)) @@ -1108,7 +1117,7 @@ func (s *testEvaluatorSuite) TestWrapWithCastAsTypesClasses(c *C) { // Test wrapping with CastAsString. strExpr := WrapWithCastAsString(ctx, t.expr) c.Assert(strExpr.GetType().EvalType().IsStringKind(), IsTrue) - strRes, isNull, err := strExpr.EvalString(ctx, t.row) + strRes, isNull, err := strExpr.EvalString(ctx, t.row.ToRow()) c.Assert(err, IsNil) c.Assert(isNull, Equals, false) c.Assert(strRes, Equals, t.stringRes) @@ -1119,12 +1128,12 @@ func (s *testEvaluatorSuite) TestWrapWithCastAsTypesClasses(c *C) { // test cast unsigned int as string. strExpr := WrapWithCastAsString(ctx, unsignedIntExpr) c.Assert(strExpr.GetType().EvalType().IsStringKind(), IsTrue) - strRes, isNull, err := strExpr.EvalString(ctx, types.DatumRow{types.NewUintDatum(math.MaxUint64)}) + strRes, isNull, err := strExpr.EvalString(ctx, chunk.MutRowFromDatums([]types.Datum{types.NewUintDatum(math.MaxUint64)}).ToRow()) c.Assert(err, IsNil) c.Assert(strRes, Equals, strconv.FormatUint(math.MaxUint64, 10)) c.Assert(isNull, Equals, false) - strRes, isNull, err = strExpr.EvalString(ctx, types.DatumRow{types.NewUintDatum(1234)}) + strRes, isNull, err = strExpr.EvalString(ctx, chunk.MutRowFromDatums([]types.Datum{types.NewUintDatum(1234)}).ToRow()) c.Assert(err, IsNil) c.Assert(isNull, Equals, false) c.Assert(strRes, Equals, strconv.FormatUint(uint64(1234), 10)) @@ -1132,7 +1141,7 @@ func (s *testEvaluatorSuite) TestWrapWithCastAsTypesClasses(c *C) { // test cast unsigned int as decimal. decExpr := WrapWithCastAsDecimal(ctx, unsignedIntExpr) c.Assert(decExpr.GetType().EvalType(), Equals, types.ETDecimal) - decRes, isNull, err := decExpr.EvalDecimal(ctx, types.DatumRow{types.NewUintDatum(uint64(1234))}) + decRes, isNull, err := decExpr.EvalDecimal(ctx, chunk.MutRowFromDatums([]types.Datum{types.NewUintDatum(uint64(1234))}).ToRow()) c.Assert(err, IsNil) c.Assert(isNull, Equals, false) c.Assert(decRes.Compare(types.NewDecFromUint(uint64(1234))), Equals, 0) @@ -1140,7 +1149,7 @@ func (s *testEvaluatorSuite) TestWrapWithCastAsTypesClasses(c *C) { // test cast unsigned int as Time. timeExpr := WrapWithCastAsTime(ctx, unsignedIntExpr, types.NewFieldType(mysql.TypeDatetime)) c.Assert(timeExpr.GetType().Tp, Equals, mysql.TypeDatetime) - timeRes, isNull, err := timeExpr.EvalTime(ctx, types.DatumRow{types.NewUintDatum(uint64(curTimeInt))}) + timeRes, isNull, err := timeExpr.EvalTime(ctx, chunk.MutRowFromDatums([]types.Datum{types.NewUintDatum(uint64(curTimeInt))}).ToRow()) c.Assert(err, IsNil) c.Assert(isNull, Equals, false) c.Assert(timeRes.Compare(tm), Equals, 0) diff --git a/expression/builtin_info_test.go b/expression/builtin_info_test.go index bfdd6327a926f..670d2a9b36765 100644 --- a/expression/builtin_info_test.go +++ b/expression/builtin_info_test.go @@ -22,6 +22,7 @@ import ( "github.com/pingcap/tidb/types" "github.com/pingcap/tidb/util/auth" "github.com/pingcap/tidb/util/charset" + "github.com/pingcap/tidb/util/chunk" "github.com/pingcap/tidb/util/mock" "github.com/pingcap/tidb/util/printer" "github.com/pingcap/tidb/util/testleak" @@ -46,7 +47,7 @@ func (s *testEvaluatorSuite) TestDatabase(c *C) { c.Assert(fc, NotNil) f, err = fc.getFunction(ctx, nil) c.Assert(err, IsNil) - d, err = evalBuiltinFunc(f, types.DatumRow(types.MakeDatums())) + d, err = evalBuiltinFunc(f, chunk.MutRowFromDatums(types.MakeDatums()).ToRow()) c.Assert(err, IsNil) c.Assert(d.GetString(), Equals, "test") } diff --git a/expression/builtin_other_test.go b/expression/builtin_other_test.go index 8bb26c89cbbd6..c92136203ce48 100644 --- a/expression/builtin_other_test.go +++ b/expression/builtin_other_test.go @@ -24,6 +24,7 @@ import ( "github.com/pingcap/tidb/sessionctx/stmtctx" "github.com/pingcap/tidb/types" "github.com/pingcap/tidb/types/json" + "github.com/pingcap/tidb/util/chunk" "github.com/pingcap/tidb/util/hack" "github.com/pingcap/tidb/util/testleak" ) @@ -123,7 +124,7 @@ func (s *testEvaluatorSuite) TestInFunc(c *C) { for _, tc := range testCases { fn, err := fc.getFunction(s.ctx, s.datumsToConstants(types.MakeDatums(tc.args...))) c.Assert(err, IsNil) - d, err := evalBuiltinFunc(fn, types.DatumRow(types.MakeDatums(tc.args...))) + d, err := evalBuiltinFunc(fn, chunk.MutRowFromDatums(types.MakeDatums(tc.args...)).ToRow()) c.Assert(err, IsNil) c.Assert(d.GetValue(), Equals, tc.res, Commentf("%v", types.MakeDatums(tc.args))) } @@ -152,7 +153,7 @@ func (s *testEvaluatorSuite) TestSetVar(c *C) { for _, tc := range testCases { fn, err := fc.getFunction(s.ctx, s.datumsToConstants(types.MakeDatums(tc.args...))) c.Assert(err, IsNil) - d, err := evalBuiltinFunc(fn, types.DatumRow(types.MakeDatums(tc.args...))) + d, err := evalBuiltinFunc(fn, chunk.MutRowFromDatums(types.MakeDatums(tc.args...)).ToRow()) c.Assert(err, IsNil) c.Assert(d.GetString(), Equals, tc.res) if tc.args[1] != nil { @@ -193,7 +194,7 @@ func (s *testEvaluatorSuite) TestGetVar(c *C) { for _, tc := range testCases { fn, err := fc.getFunction(s.ctx, s.datumsToConstants(types.MakeDatums(tc.args...))) c.Assert(err, IsNil) - d, err := evalBuiltinFunc(fn, types.DatumRow(types.MakeDatums(tc.args...))) + d, err := evalBuiltinFunc(fn, chunk.MutRowFromDatums(types.MakeDatums(tc.args...)).ToRow()) c.Assert(err, IsNil) c.Assert(d.GetString(), Equals, tc.res) } @@ -208,11 +209,11 @@ func (s *testEvaluatorSuite) TestValues(c *C) { c.Assert(err, IsNil) _, err = evalBuiltinFunc(sig, nil) c.Assert(err.Error(), Equals, "Session current insert values is nil") - s.ctx.GetSessionVars().CurrInsertValues = types.DatumRow(types.MakeDatums("1")) + s.ctx.GetSessionVars().CurrInsertValues = chunk.MutRowFromDatums(types.MakeDatums("1")).ToRow() _, err = evalBuiltinFunc(sig, nil) c.Assert(err.Error(), Equals, fmt.Sprintf("Session current insert values len %d and column's offset %v don't match", 1, 1)) currInsertValues := types.MakeDatums("1", "2") - s.ctx.GetSessionVars().CurrInsertValues = types.DatumRow(currInsertValues) + s.ctx.GetSessionVars().CurrInsertValues = chunk.MutRowFromDatums(currInsertValues).ToRow() ret, err := evalBuiltinFunc(sig, nil) c.Assert(err, IsNil) cmp, err := ret.CompareDatum(nil, &currInsertValues[1]) diff --git a/expression/distsql_builtin_test.go b/expression/distsql_builtin_test.go index c917a477a6975..ee0a65001a1d5 100644 --- a/expression/distsql_builtin_test.go +++ b/expression/distsql_builtin_test.go @@ -20,6 +20,7 @@ import ( "github.com/pingcap/tidb/mysql" "github.com/pingcap/tidb/sessionctx/stmtctx" "github.com/pingcap/tidb/types" + "github.com/pingcap/tidb/util/chunk" "github.com/pingcap/tidb/util/codec" "github.com/pingcap/tipb/go-tipb" log "github.com/sirupsen/logrus" @@ -32,9 +33,9 @@ type testEvalSuite struct{} // TestEval test expr.Eval(). // TODO: add more tests. func (s *testEvalSuite) TestEval(c *C) { - row := types.DatumRow{types.NewDatum(100)} + row := chunk.MutRowFromDatums([]types.Datum{types.NewDatum(100)}).ToRow() fieldTps := make([]*types.FieldType, 1) - fieldTps[0] = types.NewFieldType(mysql.TypeDouble) + fieldTps[0] = types.NewFieldType(mysql.TypeLonglong) tests := []struct { expr *tipb.Expr result types.Datum diff --git a/meta/meta.go b/meta/meta.go index 0c0b4309b8e8d..2c5ed10563c81 100644 --- a/meta/meta.go +++ b/meta/meta.go @@ -86,11 +86,19 @@ type Meta struct { } // NewMeta creates a Meta in transaction txn. -func NewMeta(txn kv.Transaction) *Meta { +// If the current Meta needs to handle a job, jobListKey is the type of the job's list. +func NewMeta(txn kv.Transaction, jobListKeys ...JobListKeyType) *Meta { txn.SetOption(kv.Priority, kv.PriorityHigh) txn.SetOption(kv.SyncLog, true) t := structure.NewStructure(txn, txn, mMetaPrefix) - return &Meta{txn: t, StartTS: txn.StartTS(), jobListKey: DefaultJobListKey} + listKey := DefaultJobListKey + if len(jobListKeys) != 0 { + listKey = jobListKeys[0] + } + return &Meta{txn: t, + StartTS: txn.StartTS(), + jobListKey: listKey, + } } // NewSnapshotMeta creates a Meta with snapshot. @@ -459,11 +467,6 @@ var ( AddIndexJobListKey JobListKeyType = mDDLJobAddIdxList ) -// SetJobListKey sets the job list key. -func (m *Meta) SetJobListKey(key []byte) { - m.jobListKey = key -} - func (m *Meta) enQueueDDLJob(key []byte, job *model.Job) error { b, err := job.Encode(true) if err != nil { @@ -505,9 +508,17 @@ func (m *Meta) getDDLJob(key []byte, index int64) (*model.Job, error) { } // GetDDLJob returns the DDL job with index. -func (m *Meta) GetDDLJob(index int64) (*model.Job, error) { +// The length of jobListKeys can only be 1 or 0. +// If its length is 1, we need to replace m.jobListKey with jobListKeys[0]. +// Otherwise, we use m.jobListKey directly. +func (m *Meta) GetDDLJob(index int64, jobListKeys ...JobListKeyType) (*model.Job, error) { + listKey := m.jobListKey + if len(jobListKeys) != 0 { + listKey = jobListKeys[0] + } + startTime := time.Now() - job, err := m.getDDLJob(m.jobListKey, index) + job, err := m.getDDLJob(listKey, index) metrics.MetaHistogram.WithLabelValues(metrics.GetDDLJob, metrics.RetLabel(err)).Observe(time.Since(startTime).Seconds()) return job, errors.Trace(err) } @@ -524,21 +535,44 @@ func (m *Meta) updateDDLJob(index int64, job *model.Job, key []byte, updateRawAr // UpdateDDLJob updates the DDL job with index. // updateRawArgs is used to determine whether to update the raw args when encode the job. -func (m *Meta) UpdateDDLJob(index int64, job *model.Job, updateRawArgs bool) error { +// The length of jobListKeys can only be 1 or 0. +// If its length is 1, we need to replace m.jobListKey with jobListKeys[0]. +// Otherwise, we use m.jobListKey directly. +func (m *Meta) UpdateDDLJob(index int64, job *model.Job, updateRawArgs bool, jobListKeys ...JobListKeyType) error { + listKey := m.jobListKey + if len(jobListKeys) != 0 { + listKey = jobListKeys[0] + } + startTime := time.Now() - err := m.updateDDLJob(index, job, m.jobListKey, updateRawArgs) + err := m.updateDDLJob(index, job, listKey, updateRawArgs) metrics.MetaHistogram.WithLabelValues(metrics.UpdateDDLJob, metrics.RetLabel(err)).Observe(time.Since(startTime).Seconds()) return errors.Trace(err) } // DDLJobQueueLen returns the DDL job queue length. -func (m *Meta) DDLJobQueueLen() (int64, error) { - return m.txn.LLen(m.jobListKey) +// The length of jobListKeys can only be 1 or 0. +// If its length is 1, we need to replace m.jobListKey with jobListKeys[0]. +// Otherwise, we use m.jobListKey directly. +func (m *Meta) DDLJobQueueLen(jobListKeys ...JobListKeyType) (int64, error) { + listKey := m.jobListKey + if len(jobListKeys) != 0 { + listKey = jobListKeys[0] + } + return m.txn.LLen(listKey) } -// GetAllDDLJobs gets all DDL Jobs. -func (m *Meta) GetAllDDLJobs() ([]*model.Job, error) { - values, err := m.txn.LGetAll(mDDLJobListKey) +// GetAllDDLJobsInQueue gets all DDL Jobs in the current queue. +// The length of jobListKeys can only be 1 or 0. +// If its length is 1, we need to replace m.jobListKey with jobListKeys[0]. +// Otherwise, we use m.jobListKey directly. +func (m *Meta) GetAllDDLJobsInQueue(jobListKeys ...JobListKeyType) ([]*model.Job, error) { + listKey := m.jobListKey + if len(jobListKeys) != 0 { + listKey = jobListKeys[0] + } + + values, err := m.txn.LGetAll(listKey) if err != nil || values == nil { return nil, errors.Trace(err) } diff --git a/meta/meta_test.go b/meta/meta_test.go index d514752dc082c..88ae48a2b90ff 100644 --- a/meta/meta_test.go +++ b/meta/meta_test.go @@ -311,12 +311,12 @@ func (s *testSuite) TestDDL(c *C) { lastID = job.ID } - // Test GetAllDDLJobs. + // Test GetAllDDLJobsInQueue. err = t.EnQueueDDLJob(job) job1 := &model.Job{ID: 2} err = t.EnQueueDDLJob(job1) c.Assert(err, IsNil) - jobs, err := t.GetAllDDLJobs() + jobs, err := t.GetAllDDLJobsInQueue() c.Assert(err, IsNil) expectJobs := []*model.Job{job, job1} c.Assert(jobs, DeepEquals, expectJobs) diff --git a/model/ddl.go b/model/ddl.go index 596606fa6b7b9..c3716a2fc4f50 100644 --- a/model/ddl.go +++ b/model/ddl.go @@ -52,6 +52,9 @@ const ( ActionDropTablePartition ActionType = 20 ) +// AddIndexStr is a string related to the operation of "add index". +const AddIndexStr = "add index" + var actionMap = map[ActionType]string{ ActionCreateSchema: "create schema", ActionDropSchema: "drop schema", @@ -59,7 +62,7 @@ var actionMap = map[ActionType]string{ ActionDropTable: "drop table", ActionAddColumn: "add column", ActionDropColumn: "drop column", - ActionAddIndex: "add index", + ActionAddIndex: AddIndexStr, ActionDropIndex: "drop index", ActionAddForeignKey: "add foreign key", ActionDropForeignKey: "drop foreign key", @@ -271,6 +274,7 @@ func (job *Job) IsDependentOn(other *Job) (bool, error) { return isDependent, errors.Trace(err) } + // TODO: If a job is ActionRenameTable, we need to check table name. if other.TableID == job.TableID { return true, nil } diff --git a/model/model.go b/model/model.go index 28d534660665d..49ea950705d9a 100644 --- a/model/model.go +++ b/model/model.go @@ -298,8 +298,6 @@ type IndexColumn struct { // for indexing; // UnspecifedLength if not using prefix indexing Length int `json:"length"` - // Tp is the index column field type. - Tp *types.FieldType } // Clone clones IndexColumn. diff --git a/plan/plan_to_pb_test.go b/plan/plan_to_pb_test.go index 7bc15191c0cba..3c66b5cd48d31 100644 --- a/plan/plan_to_pb_test.go +++ b/plan/plan_to_pb_test.go @@ -140,7 +140,7 @@ func (s *testDistsqlSuite) TestIndexScanToProto(c *C) { Name: name, State: model.StatePublic, Columns: []*model.IndexColumn{ - {Tp: tp, Name: name, Length: types.UnspecifiedLength}, + {Length: types.UnspecifiedLength}, }, } p := new(PhysicalIndexScan) diff --git a/server/util_test.go b/server/util_test.go index dd3d18399589d..655b13837ead6 100644 --- a/server/util_test.go +++ b/server/util_test.go @@ -22,6 +22,7 @@ import ( "github.com/pingcap/tidb/session" "github.com/pingcap/tidb/store/mockstore" "github.com/pingcap/tidb/types" + "github.com/pingcap/tidb/util/chunk" "github.com/pingcap/tidb/util/testleak" ) @@ -86,45 +87,45 @@ func (s *testUtilSuite) TestDumpTextValue(c *C) { Type: mysql.TypeLonglong, Decimal: mysql.NotFixedDec, }} - bs, err := dumpTextRow(nil, columns, types.DatumRow{types.NewIntDatum(10)}) + bs, err := dumpTextRow(nil, columns, chunk.MutRowFromDatums([]types.Datum{types.NewIntDatum(10)}).ToRow()) c.Assert(err, IsNil) c.Assert(mustDecodeStr(c, bs), Equals, "10") - bs, err = dumpTextRow(nil, columns, types.DatumRow{types.NewUintDatum(11)}) + bs, err = dumpTextRow(nil, columns, chunk.MutRowFromDatums([]types.Datum{types.NewUintDatum(11)}).ToRow()) c.Assert(err, IsNil) c.Assert(mustDecodeStr(c, bs), Equals, "11") columns[0].Type = mysql.TypeFloat columns[0].Decimal = 1 f32 := types.NewFloat32Datum(1.2) - bs, err = dumpTextRow(nil, columns, types.DatumRow{f32}) + bs, err = dumpTextRow(nil, columns, chunk.MutRowFromDatums([]types.Datum{f32}).ToRow()) c.Assert(err, IsNil) c.Assert(mustDecodeStr(c, bs), Equals, "1.2") columns[0].Decimal = 2 - bs, err = dumpTextRow(nil, columns, types.DatumRow{f32}) + bs, err = dumpTextRow(nil, columns, chunk.MutRowFromDatums([]types.Datum{f32}).ToRow()) c.Assert(err, IsNil) c.Assert(mustDecodeStr(c, bs), Equals, "1.20") f64 := types.NewFloat64Datum(2.2) columns[0].Type = mysql.TypeDouble columns[0].Decimal = 1 - bs, err = dumpTextRow(nil, columns, types.DatumRow{f64}) + bs, err = dumpTextRow(nil, columns, chunk.MutRowFromDatums([]types.Datum{f64}).ToRow()) c.Assert(err, IsNil) c.Assert(mustDecodeStr(c, bs), Equals, "2.2") columns[0].Decimal = 2 - bs, err = dumpTextRow(nil, columns, types.DatumRow{f64}) + bs, err = dumpTextRow(nil, columns, chunk.MutRowFromDatums([]types.Datum{f64}).ToRow()) c.Assert(err, IsNil) c.Assert(mustDecodeStr(c, bs), Equals, "2.20") columns[0].Type = mysql.TypeBlob - bs, err = dumpTextRow(nil, columns, types.DatumRow{types.NewBytesDatum([]byte("foo"))}) + bs, err = dumpTextRow(nil, columns, chunk.MutRowFromDatums([]types.Datum{types.NewBytesDatum([]byte("foo"))}).ToRow()) c.Assert(err, IsNil) c.Assert(mustDecodeStr(c, bs), Equals, "foo") columns[0].Type = mysql.TypeVarchar - bs, err = dumpTextRow(nil, columns, types.DatumRow{types.NewStringDatum("bar")}) + bs, err = dumpTextRow(nil, columns, chunk.MutRowFromDatums([]types.Datum{types.NewStringDatum("bar")}).ToRow()) c.Assert(err, IsNil) c.Assert(mustDecodeStr(c, bs), Equals, "bar") @@ -134,7 +135,7 @@ func (s *testUtilSuite) TestDumpTextValue(c *C) { c.Assert(err, IsNil) d.SetMysqlTime(time) columns[0].Type = mysql.TypeDatetime - bs, err = dumpTextRow(nil, columns, types.DatumRow{d}) + bs, err = dumpTextRow(nil, columns, chunk.MutRowFromDatums([]types.Datum{d}).ToRow()) c.Assert(err, IsNil) c.Assert(mustDecodeStr(c, bs), Equals, "2017-01-06 00:00:00") @@ -142,13 +143,14 @@ func (s *testUtilSuite) TestDumpTextValue(c *C) { c.Assert(err, IsNil) d.SetMysqlDuration(duration) columns[0].Type = mysql.TypeDuration - bs, err = dumpTextRow(nil, columns, types.DatumRow{d}) + columns[0].Decimal = 0 + bs, err = dumpTextRow(nil, columns, chunk.MutRowFromDatums([]types.Datum{d}).ToRow()) c.Assert(err, IsNil) c.Assert(mustDecodeStr(c, bs), Equals, "11:30:45") d.SetMysqlDecimal(types.NewDecFromStringForTest("1.23")) columns[0].Type = mysql.TypeNewDecimal - bs, err = dumpTextRow(nil, columns, types.DatumRow{d}) + bs, err = dumpTextRow(nil, columns, chunk.MutRowFromDatums([]types.Datum{d}).ToRow()) c.Assert(err, IsNil) c.Assert(mustDecodeStr(c, bs), Equals, "1.23") } diff --git a/session/session.go b/session/session.go index 066b9427004d5..311953b787f87 100644 --- a/session/session.go +++ b/session/session.go @@ -689,16 +689,22 @@ func (s *session) GetGlobalSysVar(name string) (string, error) { } // SetGlobalSysVar implements GlobalVarAccessor.SetGlobalSysVar interface. -func (s *session) SetGlobalSysVar(name string, value string) error { +func (s *session) SetGlobalSysVar(name, value string) error { if name == variable.SQLModeVar { value = mysql.FormatSQLModeStr(value) if _, err := mysql.GetSQLMode(value); err != nil { return errors.Trace(err) } } + var sVal string + var err error + sVal, err = variable.ValidateSetSystemVar(s.sessionVars, name, value) + if err != nil { + return errors.Trace(err) + } sql := fmt.Sprintf(`REPLACE %s.%s VALUES ('%s', '%s');`, - mysql.SystemDB, mysql.GlobalVariablesTable, strings.ToLower(name), value) - _, _, err := s.ExecRestrictedSQL(s, sql) + mysql.SystemDB, mysql.GlobalVariablesTable, strings.ToLower(name), sVal) + _, _, err = s.ExecRestrictedSQL(s, sql) return errors.Trace(err) } diff --git a/sessionctx/variable/session.go b/sessionctx/variable/session.go index 05bbd02b538bf..c792411e42b3e 100644 --- a/sessionctx/variable/session.go +++ b/sessionctx/variable/session.go @@ -545,8 +545,7 @@ func (s *SessionVars) SetSystemVar(name string, val string) error { case TiDBEnableTablePartition: s.EnableTablePartition = TiDBOptOn(val) case TiDBDDLReorgWorkerCount: - workerCnt := tidbOptPositiveInt32(val, DefTiDBDDLReorgWorkerCount) - SetDDLReorgWorkerCounter(int32(workerCnt)) + SetDDLReorgWorkerCounter(int32(tidbOptPositiveInt32(val, DefTiDBDDLReorgWorkerCount))) } s.systems[name] = val return nil diff --git a/sessionctx/variable/sysvar.go b/sessionctx/variable/sysvar.go index c2112ccf811c3..54bc6c7d83fb6 100644 --- a/sessionctx/variable/sysvar.go +++ b/sessionctx/variable/sysvar.go @@ -58,20 +58,26 @@ func GetSysVar(name string) *SysVar { // Variable error codes. const ( - CodeUnknownStatusVar terror.ErrCode = 1 - CodeUnknownSystemVar terror.ErrCode = mysql.ErrUnknownSystemVariable - CodeIncorrectScope terror.ErrCode = mysql.ErrIncorrectGlobalLocalVar - CodeUnknownTimeZone terror.ErrCode = mysql.ErrUnknownTimeZone - CodeReadOnly terror.ErrCode = mysql.ErrVariableIsReadonly + CodeUnknownStatusVar terror.ErrCode = 1 + CodeUnknownSystemVar terror.ErrCode = mysql.ErrUnknownSystemVariable + CodeIncorrectScope terror.ErrCode = mysql.ErrIncorrectGlobalLocalVar + CodeUnknownTimeZone terror.ErrCode = mysql.ErrUnknownTimeZone + CodeReadOnly terror.ErrCode = mysql.ErrVariableIsReadonly + CodeWrongValueForVar terror.ErrCode = mysql.ErrWrongValueForVar + CodeWrongTypeForVar terror.ErrCode = mysql.ErrWrongTypeForVar + CodeTruncatedWrongValue terror.ErrCode = mysql.ErrTruncatedWrongValue ) // Variable errors var ( - UnknownStatusVar = terror.ClassVariable.New(CodeUnknownStatusVar, "unknown status variable") - UnknownSystemVar = terror.ClassVariable.New(CodeUnknownSystemVar, mysql.MySQLErrName[mysql.ErrUnknownSystemVariable]) - ErrIncorrectScope = terror.ClassVariable.New(CodeIncorrectScope, mysql.MySQLErrName[mysql.ErrIncorrectGlobalLocalVar]) - ErrUnknownTimeZone = terror.ClassVariable.New(CodeUnknownTimeZone, mysql.MySQLErrName[mysql.ErrUnknownTimeZone]) - ErrReadOnly = terror.ClassVariable.New(CodeReadOnly, "variable is read only") + UnknownStatusVar = terror.ClassVariable.New(CodeUnknownStatusVar, "unknown status variable") + UnknownSystemVar = terror.ClassVariable.New(CodeUnknownSystemVar, mysql.MySQLErrName[mysql.ErrUnknownSystemVariable]) + ErrIncorrectScope = terror.ClassVariable.New(CodeIncorrectScope, mysql.MySQLErrName[mysql.ErrIncorrectGlobalLocalVar]) + ErrUnknownTimeZone = terror.ClassVariable.New(CodeUnknownTimeZone, mysql.MySQLErrName[mysql.ErrUnknownTimeZone]) + ErrReadOnly = terror.ClassVariable.New(CodeReadOnly, "variable is read only") + ErrWrongValueForVar = terror.ClassVariable.New(CodeWrongValueForVar, mysql.MySQLErrName[mysql.ErrWrongValueForVar]) + ErrWrongTypeForVar = terror.ClassVariable.New(CodeWrongTypeForVar, mysql.MySQLErrName[mysql.ErrWrongTypeForVar]) + ErrTruncatedWrongValue = terror.ClassVariable.New(CodeTruncatedWrongValue, mysql.MySQLErrName[mysql.ErrTruncatedWrongValue]) ) func init() { @@ -83,10 +89,13 @@ func init() { // Register terror to mysql error map. mySQLErrCodes := map[terror.ErrCode]uint16{ - CodeUnknownSystemVar: mysql.ErrUnknownSystemVariable, - CodeIncorrectScope: mysql.ErrIncorrectGlobalLocalVar, - CodeUnknownTimeZone: mysql.ErrUnknownTimeZone, - CodeReadOnly: mysql.ErrVariableIsReadonly, + CodeUnknownSystemVar: mysql.ErrUnknownSystemVariable, + CodeIncorrectScope: mysql.ErrIncorrectGlobalLocalVar, + CodeUnknownTimeZone: mysql.ErrUnknownTimeZone, + CodeReadOnly: mysql.ErrVariableIsReadonly, + CodeWrongValueForVar: mysql.ErrWrongValueForVar, + CodeWrongTypeForVar: mysql.ErrWrongTypeForVar, + CodeTruncatedWrongValue: mysql.ErrTruncatedWrongValue, } terror.ErrClassToMySQLCodes[terror.ClassVariable] = mySQLErrCodes } @@ -101,17 +110,17 @@ func boolToIntStr(b bool) string { // we only support MySQL now var defaultSysVars = []*SysVar{ {ScopeGlobal, "gtid_mode", "OFF"}, - {ScopeGlobal, "flush_time", "0"}, - {ScopeSession, "pseudo_slave_mode", ""}, + {ScopeGlobal, FlushTime, "0"}, + {ScopeSession, PseudoSlaveMode, ""}, {ScopeNone, "performance_schema_max_mutex_classes", "200"}, - {ScopeGlobal | ScopeSession, "low_priority_updates", "OFF"}, - {ScopeGlobal | ScopeSession, "session_track_gtids", ""}, + {ScopeGlobal | ScopeSession, LowPriorityUpdates, "0"}, + {ScopeGlobal | ScopeSession, SessionTrackGtids, "OFF"}, {ScopeGlobal | ScopeSession, "ndbinfo_max_rows", ""}, {ScopeGlobal | ScopeSession, "ndb_index_stat_option", ""}, - {ScopeGlobal | ScopeSession, "old_passwords", "0"}, + {ScopeGlobal | ScopeSession, OldPasswords, "0"}, {ScopeNone, "innodb_version", "5.6.25"}, - {ScopeGlobal, "max_connections", "151"}, - {ScopeGlobal | ScopeSession, "big_tables", "OFF"}, + {ScopeGlobal, MaxConnections, "151"}, + {ScopeGlobal | ScopeSession, BigTables, "0"}, {ScopeNone, "skip_external_locking", "ON"}, {ScopeGlobal, "slave_pending_jobs_size_max", "16777216"}, {ScopeNone, "innodb_sync_array_size", "1"}, @@ -121,7 +130,7 @@ var defaultSysVars = []*SysVar{ {ScopeGlobal | ScopeSession, "sql_select_limit", "18446744073709551615"}, {ScopeGlobal, "ndb_show_foreign_key_mock_tables", ""}, {ScopeNone, "multi_range_count", "256"}, - {ScopeGlobal | ScopeSession, "default_week_format", "0"}, + {ScopeGlobal | ScopeSession, DefaultWeekFormat, "0"}, {ScopeGlobal | ScopeSession, "binlog_error_action", "IGNORE_ERROR"}, {ScopeGlobal, "slave_transaction_retries", "10"}, {ScopeGlobal | ScopeSession, "default_storage_engine", "InnoDB"}, @@ -137,7 +146,7 @@ var defaultSysVars = []*SysVar{ {ScopeNone, "lc_messages_dir", "/usr/local/mysql-5.6.25-osx10.8-x86_64/share/"}, {ScopeGlobal, "ft_boolean_syntax", "+ -><()~*:\"\"&|"}, {ScopeGlobal, "table_definition_cache", "1400"}, - {ScopeNone, "skip_name_resolve", "OFF"}, + {ScopeNone, SkipNameResolve, "0"}, {ScopeNone, "performance_schema_max_file_handles", "32768"}, {ScopeSession, "transaction_allow_batching", ""}, {ScopeGlobal | ScopeSession, SQLModeVar, mysql.DefaultSQLMode}, @@ -148,7 +157,7 @@ var defaultSysVars = []*SysVar{ {ScopeGlobal, "innodb_max_purge_lag", "0"}, {ScopeGlobal | ScopeSession, "preload_buffer_size", "32768"}, {ScopeGlobal, "slave_checkpoint_period", "300"}, - {ScopeGlobal, "check_proxy_users", ""}, + {ScopeGlobal, CheckProxyUsers, "0"}, {ScopeNone, "have_query_cache", "YES"}, {ScopeGlobal, "innodb_flush_log_at_timeout", "1"}, {ScopeGlobal, "innodb_max_undo_log_size", ""}, @@ -164,7 +173,7 @@ var defaultSysVars = []*SysVar{ {ScopeNone, "innodb_ft_sort_pll_degree", "2"}, {ScopeNone, "thread_stack", "262144"}, {ScopeGlobal, "relay_log_info_repository", "FILE"}, - {ScopeGlobal | ScopeSession, "sql_log_bin", "ON"}, + {ScopeGlobal | ScopeSession, SQLLogBin, "1"}, {ScopeGlobal, "super_read_only", "OFF"}, {ScopeGlobal | ScopeSession, "max_delayed_threads", "20"}, {ScopeNone, "protocol_version", "10"}, @@ -183,7 +192,7 @@ var defaultSysVars = []*SysVar{ {ScopeGlobal, "innodb_log_write_ahead_size", ""}, {ScopeNone, "innodb_log_group_home_dir", "./"}, {ScopeNone, "performance_schema_events_statements_history_size", "10"}, - {ScopeGlobal, "general_log", "OFF"}, + {ScopeGlobal, GeneralLog, "0"}, {ScopeGlobal, "validate_password_dictionary_file", ""}, {ScopeGlobal, "binlog_order_commits", "ON"}, {ScopeGlobal, "master_verify_checksum", "OFF"}, @@ -211,15 +220,15 @@ var defaultSysVars = []*SysVar{ {ScopeNone, "innodb_autoinc_lock_mode", "1"}, {ScopeGlobal, "slave_net_timeout", "3600"}, {ScopeGlobal, "key_buffer_size", "8388608"}, - {ScopeGlobal | ScopeSession, "foreign_key_checks", "ON"}, + {ScopeGlobal | ScopeSession, ForeignKeyChecks, "1"}, {ScopeGlobal, "host_cache_size", "279"}, - {ScopeGlobal, "delay_key_write", "ON"}, + {ScopeGlobal, DelayKeyWrite, "ON"}, {ScopeNone, "metadata_locks_cache_size", "1024"}, {ScopeNone, "innodb_force_recovery", "0"}, {ScopeGlobal, "innodb_file_format_max", "Antelope"}, {ScopeGlobal | ScopeSession, "debug", ""}, {ScopeGlobal, "log_warnings", "1"}, - {ScopeGlobal, "offline_mode", ""}, + {ScopeGlobal, OfflineMode, "0"}, {ScopeGlobal | ScopeSession, "innodb_strict_mode", "OFF"}, {ScopeGlobal, "innodb_rollback_segments", "128"}, {ScopeGlobal | ScopeSession, "join_buffer_size", "262144"}, @@ -252,7 +261,7 @@ var defaultSysVars = []*SysVar{ {ScopeNone, "thread_concurrency", "10"}, {ScopeGlobal | ScopeSession, "query_prealloc_size", "8192"}, {ScopeNone, "relay_log_space_limit", "0"}, - {ScopeGlobal | ScopeSession, "max_user_connections", "0"}, + {ScopeGlobal | ScopeSession, MaxUserConnections, "0"}, {ScopeNone, "performance_schema_max_thread_classes", "50"}, {ScopeGlobal, "innodb_api_trx_level", "0"}, {ScopeNone, "disconnect_on_expired_password", "ON"}, @@ -316,12 +325,12 @@ var defaultSysVars = []*SysVar{ {ScopeGlobal, "ndb_optimization_delay", ""}, {ScopeGlobal, "innodb_ft_num_word_optimize", "2000"}, {ScopeGlobal | ScopeSession, "max_join_size", "18446744073709551615"}, - {ScopeNone, "core_file", "OFF"}, + {ScopeNone, CoreFile, "0"}, {ScopeGlobal | ScopeSession, "max_seeks_for_key", "18446744073709551615"}, {ScopeNone, "innodb_log_buffer_size", "8388608"}, {ScopeGlobal, "delayed_insert_timeout", "300"}, {ScopeGlobal, "max_relay_log_size", "0"}, - {ScopeGlobal | ScopeSession, "max_sort_length", "1024"}, + {ScopeGlobal | ScopeSession, MaxSortLength, "1024"}, {ScopeNone, "metadata_locks_hash_instances", "8"}, {ScopeGlobal, "ndb_eventbuffer_free_percent", ""}, {ScopeNone, "large_files_support", "ON"}, @@ -461,7 +470,7 @@ var defaultSysVars = []*SysVar{ {ScopeGlobal | ScopeSession, "lock_wait_timeout", "31536000"}, {ScopeGlobal | ScopeSession, "read_buffer_size", "131072"}, {ScopeNone, "innodb_read_io_threads", "4"}, - {ScopeGlobal | ScopeSession, "max_sp_recursion_depth", "0"}, + {ScopeGlobal | ScopeSession, MaxSpRecursionDepth, "0"}, {ScopeNone, "ignore_builtin_innodb", "OFF"}, {ScopeGlobal, "rpl_semi_sync_master_enabled", ""}, {ScopeGlobal, "slow_query_log_file", "/usr/local/mysql/data/localhost-slow.log"}, @@ -558,7 +567,7 @@ var defaultSysVars = []*SysVar{ {ScopeNone, "back_log", "80"}, {ScopeNone, "lower_case_file_system", "ON"}, {ScopeGlobal, "rpl_semi_sync_master_wait_no_slave", ""}, - {ScopeGlobal | ScopeSession, "group_concat_max_len", "1024"}, + {ScopeGlobal | ScopeSession, GroupConcatMaxLen, "1024"}, {ScopeSession, "pseudo_thread_id", ""}, {ScopeNone, "socket", "/tmp/myssock"}, {ScopeNone, "have_dynamic_loading", "YES"}, @@ -591,23 +600,23 @@ var defaultSysVars = []*SysVar{ {ScopeNone, "innodb_undo_directory", "."}, {ScopeNone, "bind_address", "*"}, {ScopeGlobal, "innodb_sync_spin_loops", "30"}, - {ScopeGlobal | ScopeSession, "sql_safe_updates", "OFF"}, + {ScopeGlobal | ScopeSession, SQLSafeUpdates, "0"}, {ScopeNone, "tmpdir", "/var/tmp/"}, {ScopeGlobal, "innodb_thread_concurrency", "0"}, {ScopeGlobal, "slave_allow_batching", "OFF"}, {ScopeGlobal, "innodb_buffer_pool_dump_pct", ""}, {ScopeGlobal | ScopeSession, "lc_time_names", "en_US"}, {ScopeGlobal | ScopeSession, "max_statement_time", ""}, - {ScopeGlobal | ScopeSession, "end_markers_in_json", "OFF"}, - {ScopeGlobal, "avoid_temporal_upgrade", "OFF"}, + {ScopeGlobal | ScopeSession, EndMakersInJSON, "0"}, + {ScopeGlobal, AvoidTemporalUpgrade, "0"}, {ScopeGlobal, "key_cache_age_threshold", "300"}, {ScopeGlobal, "innodb_status_output", "OFF"}, {ScopeSession, "identity", ""}, {ScopeGlobal | ScopeSession, "min_examined_row_limit", "0"}, {ScopeGlobal, "sync_frm", "ON"}, {ScopeGlobal, "innodb_online_alter_log_max_size", "134217728"}, - {ScopeSession, "warning_count", "0"}, - {ScopeSession, "error_count", "0"}, + {ScopeSession, WarningCount, "0"}, + {ScopeSession, ErrorCount, "0"}, /* TiDB specific variables */ {ScopeSession, TiDBSnapshot, ""}, {ScopeSession, TiDBImportingData, "0"}, @@ -683,6 +692,58 @@ const ( CharsetDatabase = "character_set_database" // CollationDatabase is the name for collation_database system variable. CollationDatabase = "collation_database" + // GeneralLog is the name for 'general_log' system variable. + GeneralLog = "general_log" + // AvoidTemporalUpgrade is the name for 'avoid_temporal_upgrade' system variable. + AvoidTemporalUpgrade = "avoid_temporal_upgrade" + // BigTables is the name for 'big_tables' system variable. + BigTables = "big_tables" + // CheckProxyUsers is the name for 'check_proxy_users' system variable. + CheckProxyUsers = "check_proxy_users" + // CoreFile is the name for 'core_file' system variable. + CoreFile = "core_file" + // DefaultWeekFormat is the name for 'default_week_format' system variable. + DefaultWeekFormat = "default_week_format" + // GroupConcatMaxLen is the name for 'group_concat_max_len' system variable. + GroupConcatMaxLen = "group_concat_max_len" + // DelayKeyWrite is the name for 'delay_key_write' system variable. + DelayKeyWrite = "delay_key_write" + // EndMakersInJSON is the name for 'end_markers_in_json' system variable. + EndMakersInJSON = "end_markers_in_json" + // SQLLogBin is the name for 'sql_log_bin' system variable. + SQLLogBin = "sql_log_bin" + // MaxSortLength is the name for 'max_sort_length' system variable. + MaxSortLength = "max_sort_length" + // MaxSpRecursionDepth is the name for 'max_sp_recursion_depth' system variable. + MaxSpRecursionDepth = "max_sp_recursion_depth" + // MaxUserConnections is the name for 'max_user_connections' system variable. + MaxUserConnections = "max_user_connections" + // OfflineMode is the name for 'offline_mode' system variable. + OfflineMode = "offline_mode" + // InteractiveTimeout is the name for 'interactive_timeout' system variable. + InteractiveTimeout = "interactive_timeout" + // FlushTime is the name for 'flush_time' system variable. + FlushTime = "flush_time" + // PseudoSlaveMode is the name for 'pseudo_slave_mode' system variable. + PseudoSlaveMode = "pseudo_slave_mode" + // LowPriorityUpdates is the name for 'low_priority_updates' system variable. + LowPriorityUpdates = "low_priority_updates" + // SessionTrackGtids is the name for 'session_track_gtids' system variable. + SessionTrackGtids = "session_track_gtids" + // OldPasswords is the name for 'old_passwords' system variable. + OldPasswords = "old_passwords" + // MaxConnections is the name for 'max_connections' system variable. + MaxConnections = "max_connections" + // SkipNameResolve is the name for 'skip_name_resolve' system variable. + SkipNameResolve = "skip_name_resolve" + // ForeignKeyChecks is the name for 'foreign_key_checks' system variable. + ForeignKeyChecks = "foreign_key_checks" + // SQLSafeUpdates is the name for 'sql_safe_updates' system variable. + SQLSafeUpdates = "sql_safe_updates" + // WarningCount is the name for 'warning_count' system variable. + WarningCount = "warning_count" + // ErrorCount is the name for 'error_count' system variable. + ErrorCount = "error_count" ) // GlobalVarAccessor is the interface for accessing global scope system and status variables. diff --git a/sessionctx/variable/varsutil.go b/sessionctx/variable/varsutil.go index 8e93112bdac50..3ad457c76838a 100644 --- a/sessionctx/variable/varsutil.go +++ b/sessionctx/variable/varsutil.go @@ -130,7 +130,13 @@ func SetSessionSystemVar(vars *SessionVars, name string, value types.Datum) erro if value.IsNull() { return vars.deleteSystemVar(name) } - sVal, err := value.ToString() + var sVal string + var err error + sVal, err = value.ToString() + if err != nil { + return errors.Trace(err) + } + sVal, err = ValidateSetSystemVar(vars, name, sVal) if err != nil { return errors.Trace(err) } @@ -156,6 +162,202 @@ func ValidateGetSystemVar(name string, isGlobal bool) error { return nil } +// ValidateSetSystemVar checks if system variable satisfies specific restriction. +func ValidateSetSystemVar(vars *SessionVars, name string, value string) (string, error) { + if strings.EqualFold(value, "DEFAULT") { + if val := GetSysVar(name); val != nil { + return val.Value, nil + } + return value, UnknownSystemVar.GenByArgs(name) + } + switch name { + case DefaultWeekFormat: + val, err := strconv.Atoi(value) + if err != nil { + return value, ErrWrongTypeForVar.GenByArgs(name) + } + if val < 0 { + vars.StmtCtx.AppendWarning(ErrTruncatedWrongValue.GenByArgs(name, value)) + return "0", nil + } + if val > 7 { + vars.StmtCtx.AppendWarning(ErrTruncatedWrongValue.GenByArgs(name, value)) + return "7", nil + } + case DelayKeyWrite: + if strings.EqualFold(value, "ON") || value == "1" { + return "ON", nil + } else if strings.EqualFold(value, "OFF") || value == "0" { + return "OFF", nil + } else if strings.EqualFold(value, "ALL") || value == "2" { + return "ALL", nil + } + return value, ErrWrongValueForVar.GenByArgs(name, value) + case FlushTime: + val, err := strconv.Atoi(value) + if err != nil { + return value, ErrWrongTypeForVar.GenByArgs(name) + } + if val < 0 { + vars.StmtCtx.AppendWarning(ErrTruncatedWrongValue.GenByArgs(name, value)) + return "0", nil + } + case GroupConcatMaxLen: + val, err := strconv.ParseUint(value, 10, 64) + if err != nil { + return value, ErrWrongTypeForVar.GenByArgs(name) + } + if val < 4 { + vars.StmtCtx.AppendWarning(ErrTruncatedWrongValue.GenByArgs(name, value)) + return "4", nil + } + if val > 18446744073709551615 { + vars.StmtCtx.AppendWarning(ErrTruncatedWrongValue.GenByArgs(name, value)) + return "18446744073709551615", nil + } + case InteractiveTimeout: + val, err := strconv.Atoi(value) + if err != nil { + return value, ErrWrongTypeForVar.GenByArgs(name) + } + if val < 1 { + vars.StmtCtx.AppendWarning(ErrTruncatedWrongValue.GenByArgs(name, value)) + return "1", nil + } + case MaxConnections: + val, err := strconv.Atoi(value) + if err != nil { + return value, ErrWrongTypeForVar.GenByArgs(name) + } + if val < 1 { + vars.StmtCtx.AppendWarning(ErrTruncatedWrongValue.GenByArgs(name, value)) + return "1", nil + } + if val > 100000 { + vars.StmtCtx.AppendWarning(ErrTruncatedWrongValue.GenByArgs(name, value)) + return "100000", nil + } + case MaxSortLength: + val, err := strconv.ParseInt(value, 10, 64) + if err != nil { + return value, ErrWrongTypeForVar.GenByArgs(name) + } + if val < 4 { + vars.StmtCtx.AppendWarning(ErrTruncatedWrongValue.GenByArgs(name, value)) + return "4", nil + } + if val > 8388608 { + vars.StmtCtx.AppendWarning(ErrTruncatedWrongValue.GenByArgs(name, value)) + return "8388608", nil + } + case MaxSpRecursionDepth: + val, err := strconv.ParseInt(value, 10, 64) + if err != nil { + return value, ErrWrongTypeForVar.GenByArgs(name) + } + if val < 0 { + vars.StmtCtx.AppendWarning(ErrTruncatedWrongValue.GenByArgs(name, value)) + return "0", nil + } + if val > 255 { + vars.StmtCtx.AppendWarning(ErrTruncatedWrongValue.GenByArgs(name, value)) + return "255", nil + } + case OldPasswords: + val, err := strconv.Atoi(value) + if err != nil { + return value, ErrWrongTypeForVar.GenByArgs(name) + } + if val < 0 { + vars.StmtCtx.AppendWarning(ErrTruncatedWrongValue.GenByArgs(name, value)) + return "0", nil + } + if val > 2 { + vars.StmtCtx.AppendWarning(ErrTruncatedWrongValue.GenByArgs(name, value)) + return "2", nil + } + case MaxUserConnections: + val, err := strconv.ParseUint(value, 10, 64) + if err != nil { + return value, ErrWrongTypeForVar.GenByArgs(name) + } + if val < 0 { + vars.StmtCtx.AppendWarning(ErrTruncatedWrongValue.GenByArgs(name, value)) + return "0", nil + } + if val > 4294967295 { + vars.StmtCtx.AppendWarning(ErrTruncatedWrongValue.GenByArgs(name, value)) + return "4294967295", nil + } + case SessionTrackGtids: + if strings.EqualFold(value, "OFF") || value == "0" { + return "OFF", nil + } else if strings.EqualFold(value, "OWN_GTID") || value == "1" { + return "OWN_GTID", nil + } else if strings.EqualFold(value, "ALL_GTIDS") || value == "2" { + return "ALL_GTIDS", nil + } + return value, ErrWrongValueForVar.GenByArgs(name, value) + case TimeZone: + if strings.EqualFold(value, "SYSTEM") { + return "SYSTEM", nil + } + return value, nil + case WarningCount, ErrorCount: + return value, ErrReadOnly.GenByArgs(name) + case GeneralLog, AvoidTemporalUpgrade, BigTables, CheckProxyUsers, CoreFile, EndMakersInJSON, SQLLogBin, OfflineMode, + PseudoSlaveMode, LowPriorityUpdates, SkipNameResolve, ForeignKeyChecks, SQLSafeUpdates: + if strings.EqualFold(value, "ON") || value == "1" { + return "1", nil + } else if strings.EqualFold(value, "OFF") || value == "0" { + return "0", nil + } + return value, ErrWrongValueForVar.GenByArgs(name, value) + case AutocommitVar, TiDBImportingData, TiDBSkipUTF8Check, TiDBOptAggPushDown, + TiDBOptInSubqUnFolding, TiDBEnableTablePartition, + TiDBBatchInsert, TiDBDisableTxnAutoRetry, TiDBEnableStreaming, + TiDBBatchDelete: + if strings.EqualFold(value, "ON") || value == "1" || strings.EqualFold(value, "OFF") || value == "0" { + return value, nil + } + return value, ErrWrongValueForVar.GenByArgs(name, value) + case TiDBIndexLookupConcurrency, TiDBIndexLookupJoinConcurrency, TiDBIndexJoinBatchSize, + TiDBIndexLookupSize, + TiDBHashJoinConcurrency, + TiDBHashAggPartialConcurrency, + TiDBHashAggFinalConcurrency, + TiDBDistSQLScanConcurrency, + TiDBIndexSerialScanConcurrency, TiDBDDLReorgWorkerCount, + TiDBBackoffLockFast, TiDBMaxChunkSize, + TiDBDMLBatchSize, TiDBOptimizerSelectivityLevel, + TiDBGeneralLog: + v, err := strconv.Atoi(value) + if err != nil { + return value, ErrWrongTypeForVar.GenByArgs(name) + } + if v <= 0 { + return value, ErrWrongValueForVar.GenByArgs(name, value) + } + return value, nil + case TiDBProjectionConcurrency, + TIDBMemQuotaQuery, + TIDBMemQuotaHashJoin, + TIDBMemQuotaMergeJoin, + TIDBMemQuotaSort, + TIDBMemQuotaTopn, + TIDBMemQuotaIndexLookupReader, + TIDBMemQuotaIndexLookupJoin, + TIDBMemQuotaNestedLoopApply, + TiDBRetryLimit: + _, err := strconv.ParseInt(value, 10, 64) + if err != nil { + return value, ErrWrongValueForVar.GenByArgs(name) + } + return value, nil + } + return value, nil +} + // TiDBOptOn could be used for all tidb session variable options, we use "ON"/1 to turn on those options. func TiDBOptOn(opt string) bool { return strings.EqualFold(opt, "ON") || opt == "1" @@ -178,7 +380,7 @@ func tidbOptInt64(opt string, defaultVal int64) int64 { } func parseTimeZone(s string) (*time.Location, error) { - if s == "SYSTEM" { + if strings.EqualFold(s, "SYSTEM") { // TODO: Support global time_zone variable, it should be set to global time_zone value. return time.Local, nil } diff --git a/sessionctx/variable/varsutil_test.go b/sessionctx/variable/varsutil_test.go index 4ec98a2f26bb8..dce99c095bd6c 100644 --- a/sessionctx/variable/varsutil_test.go +++ b/sessionctx/variable/varsutil_test.go @@ -223,11 +223,11 @@ func (s *testVarsutilSuite) TestVarsutil(c *C) { SetSessionSystemVar(v, TiDBDDLReorgWorkerCount, types.NewIntDatum(1)) c.Assert(GetDDLReorgWorkerCounter(), Equals, int32(1)) - SetSessionSystemVar(v, TiDBDDLReorgWorkerCount, types.NewIntDatum(-1)) - c.Assert(GetDDLReorgWorkerCounter(), Equals, int32(DefTiDBDDLReorgWorkerCount)) + err = SetSessionSystemVar(v, TiDBDDLReorgWorkerCount, types.NewIntDatum(-1)) + c.Assert(terror.ErrorEqual(err, ErrWrongValueForVar), IsTrue) SetSessionSystemVar(v, TiDBDDLReorgWorkerCount, types.NewIntDatum(int64(maxDDLReorgWorkerCount)+1)) - c.Assert(GetDDLReorgWorkerCounter(), Equals, int32(maxDDLReorgWorkerCount)) + c.Assert(terror.ErrorEqual(err, ErrWrongValueForVar), IsTrue) err = SetSessionSystemVar(v, TiDBRetryLimit, types.NewStringDatum("3")) c.Assert(err, IsNil) diff --git a/store/mockstore/mocktikv/cop_handler_dag.go b/store/mockstore/mocktikv/cop_handler_dag.go index 74c7848f79a11..ab6fa5ca8fc46 100644 --- a/store/mockstore/mocktikv/cop_handler_dag.go +++ b/store/mockstore/mocktikv/cop_handler_dag.go @@ -86,7 +86,7 @@ func (lm *locCache) getLoc(name string) (*time.Location, error) { } lm.RUnlock() - return nil, errors.New(fmt.Sprintf("invalid name for timezone %s", name)) + return nil, fmt.Errorf("invalid name for timezone %s", name) } type dagContext struct { diff --git a/table/tables/index.go b/table/tables/index.go index ee8413c207dae..23255673b35e0 100644 --- a/table/tables/index.go +++ b/table/tables/index.go @@ -100,15 +100,18 @@ func (c *indexIter) Next() (val []types.Datum, h int64, err error) { // index is the data structure for index data in the KV store. type index struct { idxInfo *model.IndexInfo + tblInfo *model.TableInfo prefix kv.Key } // NewIndex builds a new Index object. // id may be partition or table ID, depends on whether the table is a PartitionedTable. -func NewIndex(id int64, indexInfo *model.IndexInfo) table.Index { +func NewIndex(id int64, tblInfo *model.TableInfo, indexInfo *model.IndexInfo) table.Index { index := &index{ idxInfo: indexInfo, - prefix: tablecodec.EncodeTableIndexPrefix(id, indexInfo.ID), + tblInfo: tblInfo, + // The prefix can't encode from tblInfo.ID, because table partition may change the id to partition id. + prefix: tablecodec.EncodeTableIndexPrefix(id, indexInfo.ID), } return index } @@ -132,7 +135,8 @@ func (c *index) truncateIndexValuesIfNeeded(indexedValues []types.Datum) []types v := &indexedValues[i] if v.Kind() == types.KindString || v.Kind() == types.KindBytes { ic := c.idxInfo.Columns[i] - if ic.Tp.Charset == charset.CharsetUTF8 || ic.Tp.Charset == charset.CharsetUTF8MB4 { + colCharset := c.tblInfo.Columns[ic.Offset].Charset + if colCharset == charset.CharsetUTF8 || colCharset == charset.CharsetUTF8MB4 { val := v.GetBytes() if ic.Length != types.UnspecifiedLength && utf8.RuneCount(val) > ic.Length { rs := bytes.Runes(val) diff --git a/table/tables/index_test.go b/table/tables/index_test.go index affd556c981e0..d52f7835dd6dd 100644 --- a/table/tables/index_test.go +++ b/table/tables/index_test.go @@ -69,7 +69,7 @@ func (s *testIndexSuite) TestIndex(c *C) { }, }, } - index := tables.NewIndex(tblInfo.ID, tblInfo.Indices[0]) + index := tables.NewIndex(tblInfo.ID, tblInfo, tblInfo.Indices[0]) // Test ununiq index. txn, err := s.s.Begin() @@ -154,7 +154,7 @@ func (s *testIndexSuite) TestIndex(c *C) { }, }, } - index = tables.NewIndex(tblInfo.ID, tblInfo.Indices[0]) + index = tables.NewIndex(tblInfo.ID, tblInfo, tblInfo.Indices[0]) // Test uniq index. txn, err = s.s.Begin() @@ -216,13 +216,18 @@ func (s *testIndexSuite) TestCombineIndexSeek(c *C) { ID: 2, Name: model.NewCIStr("test"), Columns: []*model.IndexColumn{ - {Tp: &types.FieldType{}}, - {Tp: &types.FieldType{}}, + {Offset: 1}, + {Offset: 2}, }, }, }, + Columns: []*model.ColumnInfo{ + {Offset: 0}, + {Offset: 1}, + {Offset: 2}, + }, } - index := tables.NewIndex(tblInfo.ID, tblInfo.Indices[0]) + index := tables.NewIndex(tblInfo.ID, tblInfo, tblInfo.Indices[0]) txn, err := s.s.Begin() c.Assert(err, IsNil) @@ -232,7 +237,7 @@ func (s *testIndexSuite) TestCombineIndexSeek(c *C) { _, err = index.Create(mockCtx, txn, values, 1) c.Assert(err, IsNil) - index2 := tables.NewIndex(tblInfo.ID, tblInfo.Indices[0]) + index2 := tables.NewIndex(tblInfo.ID, tblInfo, tblInfo.Indices[0]) sc := &stmtctx.StatementContext{TimeZone: time.Local} iter, hit, err := index2.Seek(sc, txn, types.MakeDatums("abc", nil)) c.Assert(err, IsNil) diff --git a/table/tables/tables.go b/table/tables/tables.go index 7bbd66abb940e..fe5380af5ef12 100644 --- a/table/tables/tables.go +++ b/table/tables/tables.go @@ -163,7 +163,7 @@ func initTableIndices(t *tableCommon) error { } // Use partition ID for index, because tableCommon may be table or partition. - idx := NewIndex(t.partitionID, idxInfo) + idx := NewIndex(t.partitionID, tblInfo, idxInfo) t.indices = append(t.indices, idx) } return nil diff --git a/util/admin/admin.go b/util/admin/admin.go index 5517b84f628ac..0b9c2bcc55f74 100644 --- a/util/admin/admin.go +++ b/util/admin/admin.go @@ -104,7 +104,11 @@ func CancelJobs(txn kv.Transaction, ids []int64) ([]error, error) { errs[i] = errors.Trace(err) continue } - err = t.UpdateDDLJob(int64(j), job, true) + if job.Type == model.ActionAddIndex { + err = t.UpdateDDLJob(int64(j), job, true, meta.AddIndexJobListKey) + } else { + err = t.UpdateDDLJob(int64(j), job, true) + } if err != nil { errs[i] = errors.Trace(err) } @@ -116,17 +120,14 @@ func CancelJobs(txn kv.Transaction, ids []int64) ([]error, error) { return errs, nil } -// GetDDLJobs returns the DDL jobs and an error. -func GetDDLJobs(txn kv.Transaction) ([]*model.Job, error) { - t := meta.NewMeta(txn) - cnt, err := t.DDLJobQueueLen() +func getDDLJobsInQueue(t *meta.Meta, jobListKey meta.JobListKeyType) ([]*model.Job, error) { + cnt, err := t.DDLJobQueueLen(jobListKey) if err != nil { return nil, errors.Trace(err) } - jobs := make([]*model.Job, cnt) for i := range jobs { - jobs[i], err = t.GetDDLJob(int64(i)) + jobs[i], err = t.GetDDLJob(int64(i), jobListKey) if err != nil { return nil, errors.Trace(err) } @@ -134,6 +135,21 @@ func GetDDLJobs(txn kv.Transaction) ([]*model.Job, error) { return jobs, nil } +// GetDDLJobs returns all DDL jobs. +// TODO: Sort jobs. +func GetDDLJobs(txn kv.Transaction) ([]*model.Job, error) { + t := meta.NewMeta(txn) + generalJobs, err := getDDLJobsInQueue(t, meta.DefaultJobListKey) + if err != nil { + return nil, errors.Trace(err) + } + addIdxJobs, err := getDDLJobsInQueue(t, meta.AddIndexJobListKey) + if err != nil { + return nil, errors.Trace(err) + } + return append(generalJobs, addIdxJobs...), nil +} + // MaxHistoryJobs is exported for testing. const MaxHistoryJobs = 10 diff --git a/util/admin/admin_test.go b/util/admin/admin_test.go index 490e80ec11999..a1bab70c4806a 100644 --- a/util/admin/admin_test.go +++ b/util/admin/admin_test.go @@ -259,7 +259,7 @@ func (s *testSuite) TestScan(c *C) { idxRow1 := &RecordData{Handle: int64(1), Values: types.MakeDatums(int64(10))} idxRow2 := &RecordData{Handle: int64(2), Values: types.MakeDatums(int64(20))} - kvIndex := tables.NewIndex(tb.Meta().ID, indices[0].Meta()) + kvIndex := tables.NewIndex(tb.Meta().ID, tb.Meta(), indices[0].Meta()) sc := &stmtctx.StatementContext{TimeZone: time.Local} idxRows, nextVals, err := ScanIndexData(sc, txn, kvIndex, idxRow1.Values, 2) c.Assert(err, IsNil)