diff --git a/pkg/executor/executor.go b/pkg/executor/executor.go new file mode 100644 index 0000000000000..3c685ee286166 --- /dev/null +++ b/pkg/executor/executor.go @@ -0,0 +1,2801 @@ +// Copyright 2015 PingCAP, Inc. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package executor + +import ( + "cmp" + "context" + stderrors "errors" + "fmt" + "math" + "runtime/pprof" + "slices" + "strconv" + "strings" + "sync" + "sync/atomic" + "time" + + "github.com/opentracing/opentracing-go" + "github.com/pingcap/errors" + "github.com/pingcap/failpoint" + "github.com/pingcap/kvproto/pkg/kvrpcpb" + "github.com/pingcap/tidb/pkg/config" + "github.com/pingcap/tidb/pkg/ddl" + "github.com/pingcap/tidb/pkg/ddl/schematracker" + "github.com/pingcap/tidb/pkg/domain" + "github.com/pingcap/tidb/pkg/domain/infosync" + "github.com/pingcap/tidb/pkg/executor/aggregate" + "github.com/pingcap/tidb/pkg/executor/internal/exec" + "github.com/pingcap/tidb/pkg/executor/internal/pdhelper" + "github.com/pingcap/tidb/pkg/executor/sortexec" + "github.com/pingcap/tidb/pkg/expression" + "github.com/pingcap/tidb/pkg/infoschema" + "github.com/pingcap/tidb/pkg/kv" + "github.com/pingcap/tidb/pkg/meta" + "github.com/pingcap/tidb/pkg/meta/autoid" + "github.com/pingcap/tidb/pkg/parser/ast" + "github.com/pingcap/tidb/pkg/parser/auth" + "github.com/pingcap/tidb/pkg/parser/model" + "github.com/pingcap/tidb/pkg/parser/mysql" + "github.com/pingcap/tidb/pkg/parser/terror" + plannercore "github.com/pingcap/tidb/pkg/planner/core" + "github.com/pingcap/tidb/pkg/privilege" + "github.com/pingcap/tidb/pkg/resourcemanager/pool/workerpool" + poolutil "github.com/pingcap/tidb/pkg/resourcemanager/util" + "github.com/pingcap/tidb/pkg/sessionctx" + "github.com/pingcap/tidb/pkg/sessionctx/stmtctx" + "github.com/pingcap/tidb/pkg/sessionctx/variable" + "github.com/pingcap/tidb/pkg/sessiontxn" + "github.com/pingcap/tidb/pkg/table" + "github.com/pingcap/tidb/pkg/table/tables" + "github.com/pingcap/tidb/pkg/tablecodec" + "github.com/pingcap/tidb/pkg/types" + "github.com/pingcap/tidb/pkg/util" + "github.com/pingcap/tidb/pkg/util/admin" + "github.com/pingcap/tidb/pkg/util/channel" + "github.com/pingcap/tidb/pkg/util/chunk" + "github.com/pingcap/tidb/pkg/util/codec" + "github.com/pingcap/tidb/pkg/util/dbterror/exeerrors" + "github.com/pingcap/tidb/pkg/util/deadlockhistory" + "github.com/pingcap/tidb/pkg/util/disk" + "github.com/pingcap/tidb/pkg/util/execdetails" + "github.com/pingcap/tidb/pkg/util/logutil" + "github.com/pingcap/tidb/pkg/util/logutil/consistency" + "github.com/pingcap/tidb/pkg/util/memory" + "github.com/pingcap/tidb/pkg/util/resourcegrouptag" + "github.com/pingcap/tidb/pkg/util/sqlexec" + "github.com/pingcap/tidb/pkg/util/syncutil" + "github.com/pingcap/tidb/pkg/util/topsql" + topsqlstate "github.com/pingcap/tidb/pkg/util/topsql/state" + "github.com/pingcap/tidb/pkg/util/tracing" + tikverr "github.com/tikv/client-go/v2/error" + tikvstore "github.com/tikv/client-go/v2/kv" + tikvutil "github.com/tikv/client-go/v2/util" + atomicutil "go.uber.org/atomic" + "go.uber.org/zap" +) + +var ( + _ exec.Executor = &CheckTableExec{} + _ exec.Executor = &aggregate.HashAggExec{} + _ exec.Executor = &HashJoinExec{} + _ exec.Executor = &IndexLookUpExecutor{} + _ exec.Executor = &IndexReaderExecutor{} + _ exec.Executor = &LimitExec{} + _ exec.Executor = &MaxOneRowExec{} + _ exec.Executor = &MergeJoinExec{} + _ exec.Executor = &ProjectionExec{} + _ exec.Executor = &SelectionExec{} + _ exec.Executor = &SelectLockExec{} + _ exec.Executor = &ShowNextRowIDExec{} + _ exec.Executor = &ShowDDLExec{} + _ exec.Executor = &ShowDDLJobsExec{} + _ exec.Executor = &ShowDDLJobQueriesExec{} + _ exec.Executor = &sortexec.SortExec{} + _ exec.Executor = &aggregate.StreamAggExec{} + _ exec.Executor = &TableDualExec{} + _ exec.Executor = &TableReaderExecutor{} + _ exec.Executor = &TableScanExec{} + _ exec.Executor = &sortexec.TopNExec{} + _ exec.Executor = &UnionExec{} + _ exec.Executor = &FastCheckTableExec{} + _ exec.Executor = &AdminShowBDRRoleExec{} + + // GlobalMemoryUsageTracker is the ancestor of all the Executors' memory tracker and GlobalMemory Tracker + GlobalMemoryUsageTracker *memory.Tracker + // GlobalDiskUsageTracker is the ancestor of all the Executors' disk tracker + GlobalDiskUsageTracker *disk.Tracker + // GlobalAnalyzeMemoryTracker is the ancestor of all the Analyze jobs' memory tracker and child of global Tracker + GlobalAnalyzeMemoryTracker *memory.Tracker +) + +var ( + _ dataSourceExecutor = &TableReaderExecutor{} + _ dataSourceExecutor = &IndexReaderExecutor{} + _ dataSourceExecutor = &IndexLookUpExecutor{} + _ dataSourceExecutor = &IndexMergeReaderExecutor{} + + // CheckTableFastBucketSize is the bucket size of fast check table. + CheckTableFastBucketSize = atomic.Int64{} +) + +// dataSourceExecutor is a table DataSource converted Executor. +// Currently, there are TableReader/IndexReader/IndexLookUp/IndexMergeReader. +// Note, partition reader is special and the caller should handle it carefully. +type dataSourceExecutor interface { + exec.Executor + Table() table.Table +} + +const ( + // globalPanicStorageExceed represents the panic message when out of storage quota. + globalPanicStorageExceed string = "Out Of Quota For Local Temporary Space!" + // globalPanicMemoryExceed represents the panic message when out of memory limit. + globalPanicMemoryExceed string = "Out Of Global Memory Limit!" + // globalPanicAnalyzeMemoryExceed represents the panic message when out of analyze memory limit. + globalPanicAnalyzeMemoryExceed string = "Out Of Global Analyze Memory Limit!" +) + +// globalPanicOnExceed panics when GlobalDisTracker storage usage exceeds storage quota. +type globalPanicOnExceed struct { + memory.BaseOOMAction + mutex syncutil.Mutex // For synchronization. +} + +func init() { + action := &globalPanicOnExceed{} + GlobalMemoryUsageTracker = memory.NewGlobalTracker(memory.LabelForGlobalMemory, -1) + GlobalMemoryUsageTracker.SetActionOnExceed(action) + GlobalDiskUsageTracker = disk.NewGlobalTrcaker(memory.LabelForGlobalStorage, -1) + GlobalDiskUsageTracker.SetActionOnExceed(action) + GlobalAnalyzeMemoryTracker = memory.NewTracker(memory.LabelForGlobalAnalyzeMemory, -1) + GlobalAnalyzeMemoryTracker.SetActionOnExceed(action) + // register quota funcs + variable.SetMemQuotaAnalyze = GlobalAnalyzeMemoryTracker.SetBytesLimit + variable.GetMemQuotaAnalyze = GlobalAnalyzeMemoryTracker.GetBytesLimit + // TODO: do not attach now to avoid impact to global, will attach later when analyze memory track is stable + //GlobalAnalyzeMemoryTracker.AttachToGlobalTracker(GlobalMemoryUsageTracker) + + schematracker.ConstructResultOfShowCreateDatabase = ConstructResultOfShowCreateDatabase + schematracker.ConstructResultOfShowCreateTable = ConstructResultOfShowCreateTable + + // CheckTableFastBucketSize is used to set the fast analyze bucket size for check table. + CheckTableFastBucketSize.Store(1024) +} + +// Start the backend components +func Start() { + pdhelper.GlobalPDHelper.Start() +} + +// Stop the backend components +func Stop() { + pdhelper.GlobalPDHelper.Stop() +} + +// Action panics when storage usage exceeds storage quota. +func (a *globalPanicOnExceed) Action(t *memory.Tracker) { + a.mutex.Lock() + defer a.mutex.Unlock() + msg := "" + switch t.Label() { + case memory.LabelForGlobalStorage: + msg = globalPanicStorageExceed + case memory.LabelForGlobalMemory: + msg = globalPanicMemoryExceed + case memory.LabelForGlobalAnalyzeMemory: + msg = globalPanicAnalyzeMemoryExceed + default: + msg = "Out of Unknown Resource Quota!" + } + // TODO(hawkingrei): should return error instead. + panic(msg) +} + +// GetPriority get the priority of the Action +func (*globalPanicOnExceed) GetPriority() int64 { + return memory.DefPanicPriority +} + +// newList creates a new List to buffer current executor's result. +func newList(e exec.Executor) *chunk.List { + base := e.Base() + return chunk.NewList(base.RetFieldTypes(), base.InitCap(), base.MaxChunkSize()) +} + +// CommandDDLJobsExec is the general struct for Cancel/Pause/Resume commands on +// DDL jobs. These command currently by admin have the very similar struct and +// operations, it should be a better idea to have them in the same struct. +type CommandDDLJobsExec struct { + exec.BaseExecutor + + cursor int + jobIDs []int64 + errs []error + + execute func(se sessionctx.Context, ids []int64) (errs []error, err error) +} + +// Open implements the Executor for all Cancel/Pause/Resume command on DDL jobs +// just with different processes. And, it should not be called directly by the +// Executor. +func (e *CommandDDLJobsExec) Open(context.Context) error { + // We want to use a global transaction to execute the admin command, so we don't use e.Ctx() here. + newSess, err := e.GetSysSession() + if err != nil { + return err + } + e.errs, err = e.execute(newSess, e.jobIDs) + e.ReleaseSysSession(kv.WithInternalSourceType(context.Background(), kv.InternalTxnDDL), newSess) + return err +} + +// Next implements the Executor Next interface for Cancel/Pause/Resume +func (e *CommandDDLJobsExec) Next(_ context.Context, req *chunk.Chunk) error { + req.GrowAndReset(e.MaxChunkSize()) + if e.cursor >= len(e.jobIDs) { + return nil + } + numCurBatch := min(req.Capacity(), len(e.jobIDs)-e.cursor) + for i := e.cursor; i < e.cursor+numCurBatch; i++ { + req.AppendString(0, strconv.FormatInt(e.jobIDs[i], 10)) + if e.errs != nil && e.errs[i] != nil { + req.AppendString(1, fmt.Sprintf("error: %v", e.errs[i])) + } else { + req.AppendString(1, "successful") + } + } + e.cursor += numCurBatch + return nil +} + +// CancelDDLJobsExec represents a cancel DDL jobs executor. +type CancelDDLJobsExec struct { + *CommandDDLJobsExec +} + +// PauseDDLJobsExec indicates an Executor for Pause a DDL Job. +type PauseDDLJobsExec struct { + *CommandDDLJobsExec +} + +// ResumeDDLJobsExec indicates an Executor for Resume a DDL Job. +type ResumeDDLJobsExec struct { + *CommandDDLJobsExec +} + +// ShowNextRowIDExec represents a show the next row ID executor. +type ShowNextRowIDExec struct { + exec.BaseExecutor + tblName *ast.TableName + done bool +} + +// Next implements the Executor Next interface. +func (e *ShowNextRowIDExec) Next(_ context.Context, req *chunk.Chunk) error { + req.Reset() + if e.done { + return nil + } + is := domain.GetDomain(e.Ctx()).InfoSchema() + tbl, err := is.TableByName(e.tblName.Schema, e.tblName.Name) + if err != nil { + return err + } + tblMeta := tbl.Meta() + + allocators := tbl.Allocators(e.Ctx()) + for _, alloc := range allocators.Allocs { + nextGlobalID, err := alloc.NextGlobalAutoID() + if err != nil { + return err + } + + var colName, idType string + switch alloc.GetType() { + case autoid.RowIDAllocType: + idType = "_TIDB_ROWID" + if tblMeta.PKIsHandle { + if col := tblMeta.GetAutoIncrementColInfo(); col != nil { + colName = col.Name.O + } + } else { + colName = model.ExtraHandleName.O + } + case autoid.AutoIncrementType: + idType = "AUTO_INCREMENT" + if tblMeta.PKIsHandle { + if col := tblMeta.GetAutoIncrementColInfo(); col != nil { + colName = col.Name.O + } + } else { + colName = model.ExtraHandleName.O + } + case autoid.AutoRandomType: + idType = "AUTO_RANDOM" + colName = tblMeta.GetPkName().O + case autoid.SequenceType: + idType = "SEQUENCE" + colName = "" + default: + return autoid.ErrInvalidAllocatorType.GenWithStackByArgs() + } + + req.AppendString(0, e.tblName.Schema.O) + req.AppendString(1, e.tblName.Name.O) + req.AppendString(2, colName) + req.AppendInt64(3, nextGlobalID) + req.AppendString(4, idType) + } + + e.done = true + return nil +} + +// ShowDDLExec represents a show DDL executor. +type ShowDDLExec struct { + exec.BaseExecutor + + ddlOwnerID string + selfID string + ddlInfo *ddl.Info + done bool +} + +// Next implements the Executor Next interface. +func (e *ShowDDLExec) Next(ctx context.Context, req *chunk.Chunk) error { + req.Reset() + if e.done { + return nil + } + + ddlJobs := "" + query := "" + l := len(e.ddlInfo.Jobs) + for i, job := range e.ddlInfo.Jobs { + ddlJobs += job.String() + query += job.Query + if i != l-1 { + ddlJobs += "\n" + query += "\n" + } + } + + serverInfo, err := infosync.GetServerInfoByID(ctx, e.ddlOwnerID) + if err != nil { + return err + } + + serverAddress := serverInfo.IP + ":" + + strconv.FormatUint(uint64(serverInfo.Port), 10) + + req.AppendInt64(0, e.ddlInfo.SchemaVer) + req.AppendString(1, e.ddlOwnerID) + req.AppendString(2, serverAddress) + req.AppendString(3, ddlJobs) + req.AppendString(4, e.selfID) + req.AppendString(5, query) + + e.done = true + return nil +} + +// ShowDDLJobsExec represent a show DDL jobs executor. +type ShowDDLJobsExec struct { + exec.BaseExecutor + DDLJobRetriever + + jobNumber int + is infoschema.InfoSchema + sess sessionctx.Context +} + +// DDLJobRetriever retrieve the DDLJobs. +// nolint:structcheck +type DDLJobRetriever struct { + runningJobs []*model.Job + historyJobIter meta.LastJobIterator + cursor int + is infoschema.InfoSchema + activeRoles []*auth.RoleIdentity + cacheJobs []*model.Job + TZLoc *time.Location +} + +func (e *DDLJobRetriever) initial(txn kv.Transaction, sess sessionctx.Context) error { + m := meta.NewMeta(txn) + jobs, err := ddl.GetAllDDLJobs(sess) + if err != nil { + return err + } + e.historyJobIter, err = ddl.GetLastHistoryDDLJobsIterator(m) + if err != nil { + return err + } + e.runningJobs = jobs + e.cursor = 0 + return nil +} + +func (e *DDLJobRetriever) appendJobToChunk(req *chunk.Chunk, job *model.Job, checker privilege.Manager) { + schemaName := job.SchemaName + tableName := "" + finishTS := uint64(0) + if job.BinlogInfo != nil { + finishTS = job.BinlogInfo.FinishedTS + if job.BinlogInfo.TableInfo != nil { + tableName = job.BinlogInfo.TableInfo.Name.L + } + if job.BinlogInfo.MultipleTableInfos != nil { + tablenames := new(strings.Builder) + for i, affect := range job.BinlogInfo.MultipleTableInfos { + if i > 0 { + fmt.Fprintf(tablenames, ",") + } + fmt.Fprintf(tablenames, "%s", affect.Name.L) + } + tableName = tablenames.String() + } + if len(schemaName) == 0 && job.BinlogInfo.DBInfo != nil { + schemaName = job.BinlogInfo.DBInfo.Name.L + } + } + if len(tableName) == 0 { + tableName = job.TableName + } + // For compatibility, the old version of DDL Job wasn't store the schema name and table name. + if len(schemaName) == 0 { + schemaName = getSchemaName(e.is, job.SchemaID) + } + if len(tableName) == 0 { + tableName = getTableName(e.is, job.TableID) + } + + createTime := ts2Time(job.StartTS, e.TZLoc) + startTime := ts2Time(job.RealStartTS, e.TZLoc) + finishTime := ts2Time(finishTS, e.TZLoc) + + // Check the privilege. + if checker != nil && !checker.RequestVerification(e.activeRoles, strings.ToLower(schemaName), strings.ToLower(tableName), "", mysql.AllPrivMask) { + return + } + + req.AppendInt64(0, job.ID) + req.AppendString(1, schemaName) + req.AppendString(2, tableName) + req.AppendString(3, job.Type.String()+showAddIdxReorgTp(job)) + req.AppendString(4, job.SchemaState.String()) + req.AppendInt64(5, job.SchemaID) + req.AppendInt64(6, job.TableID) + req.AppendInt64(7, job.RowCount) + req.AppendTime(8, createTime) + if job.RealStartTS > 0 { + req.AppendTime(9, startTime) + } else { + req.AppendNull(9) + } + if finishTS > 0 { + req.AppendTime(10, finishTime) + } else { + req.AppendNull(10) + } + req.AppendString(11, job.State.String()) + if job.Type == model.ActionMultiSchemaChange { + isDistTask := job.ReorgMeta != nil && job.ReorgMeta.IsDistReorg + for _, subJob := range job.MultiSchemaInfo.SubJobs { + req.AppendInt64(0, job.ID) + req.AppendString(1, schemaName) + req.AppendString(2, tableName) + req.AppendString(3, subJob.Type.String()+" /* subjob */"+showAddIdxReorgTpInSubJob(subJob, isDistTask)) + req.AppendString(4, subJob.SchemaState.String()) + req.AppendInt64(5, job.SchemaID) + req.AppendInt64(6, job.TableID) + req.AppendInt64(7, subJob.RowCount) + req.AppendTime(8, createTime) + if subJob.RealStartTS > 0 { + realStartTS := ts2Time(subJob.RealStartTS, e.TZLoc) + req.AppendTime(9, realStartTS) + } else { + req.AppendNull(9) + } + if finishTS > 0 { + req.AppendTime(10, finishTime) + } else { + req.AppendNull(10) + } + req.AppendString(11, subJob.State.String()) + } + } +} + +func showAddIdxReorgTp(job *model.Job) string { + if job.Type == model.ActionAddIndex || job.Type == model.ActionAddPrimaryKey { + if job.ReorgMeta != nil { + sb := strings.Builder{} + tp := job.ReorgMeta.ReorgTp.String() + if len(tp) > 0 { + sb.WriteString(" /* ") + sb.WriteString(tp) + if job.ReorgMeta.ReorgTp == model.ReorgTypeLitMerge && + job.ReorgMeta.IsDistReorg && + job.ReorgMeta.UseCloudStorage { + sb.WriteString(" cloud") + } + sb.WriteString(" */") + } + return sb.String() + } + } + return "" +} + +func showAddIdxReorgTpInSubJob(subJob *model.SubJob, useDistTask bool) string { + if subJob.Type == model.ActionAddIndex || subJob.Type == model.ActionAddPrimaryKey { + sb := strings.Builder{} + tp := subJob.ReorgTp.String() + if len(tp) > 0 { + sb.WriteString(" /* ") + sb.WriteString(tp) + if subJob.ReorgTp == model.ReorgTypeLitMerge && useDistTask && subJob.UseCloud { + sb.WriteString(" cloud") + } + sb.WriteString(" */") + } + return sb.String() + } + return "" +} + +func ts2Time(timestamp uint64, loc *time.Location) types.Time { + duration := time.Duration(math.Pow10(9-types.DefaultFsp)) * time.Nanosecond + t := model.TSConvert2Time(timestamp) + t.Truncate(duration) + return types.NewTime(types.FromGoTime(t.In(loc)), mysql.TypeDatetime, types.DefaultFsp) +} + +// ShowDDLJobQueriesExec represents a show DDL job queries executor. +// The jobs id that is given by 'admin show ddl job queries' statement, +// only be searched in the latest 10 history jobs. +type ShowDDLJobQueriesExec struct { + exec.BaseExecutor + + cursor int + jobs []*model.Job + jobIDs []int64 +} + +// Open implements the Executor Open interface. +func (e *ShowDDLJobQueriesExec) Open(ctx context.Context) error { + var err error + var jobs []*model.Job + if err := e.BaseExecutor.Open(ctx); err != nil { + return err + } + session, err := e.GetSysSession() + if err != nil { + return err + } + err = sessiontxn.NewTxn(context.Background(), session) + if err != nil { + return err + } + defer func() { + // ReleaseSysSession will rollbacks txn automatically. + e.ReleaseSysSession(kv.WithInternalSourceType(context.Background(), kv.InternalTxnDDL), session) + }() + txn, err := session.Txn(true) + if err != nil { + return err + } + session.GetSessionVars().SetInTxn(true) + + m := meta.NewMeta(txn) + jobs, err = ddl.GetAllDDLJobs(session) + if err != nil { + return err + } + + historyJobs, err := ddl.GetLastNHistoryDDLJobs(m, ddl.DefNumHistoryJobs) + if err != nil { + return err + } + + appendedJobID := make(map[int64]struct{}) + // deduplicate job results + // for situations when this operation happens at the same time with new DDLs being executed + for _, job := range jobs { + if _, ok := appendedJobID[job.ID]; !ok { + appendedJobID[job.ID] = struct{}{} + e.jobs = append(e.jobs, job) + } + } + for _, historyJob := range historyJobs { + if _, ok := appendedJobID[historyJob.ID]; !ok { + appendedJobID[historyJob.ID] = struct{}{} + e.jobs = append(e.jobs, historyJob) + } + } + + return nil +} + +// Next implements the Executor Next interface. +func (e *ShowDDLJobQueriesExec) Next(_ context.Context, req *chunk.Chunk) error { + req.GrowAndReset(e.MaxChunkSize()) + if e.cursor >= len(e.jobs) { + return nil + } + if len(e.jobIDs) >= len(e.jobs) { + return nil + } + numCurBatch := min(req.Capacity(), len(e.jobs)-e.cursor) + for _, id := range e.jobIDs { + for i := e.cursor; i < e.cursor+numCurBatch; i++ { + if id == e.jobs[i].ID { + req.AppendString(0, e.jobs[i].Query) + } + } + } + e.cursor += numCurBatch + return nil +} + +// ShowDDLJobQueriesWithRangeExec represents a show DDL job queries with range executor. +// The jobs id that is given by 'admin show ddl job queries' statement, +// can be searched within a specified range in history jobs using offset and limit. +type ShowDDLJobQueriesWithRangeExec struct { + exec.BaseExecutor + + cursor int + jobs []*model.Job + offset uint64 + limit uint64 +} + +// Open implements the Executor Open interface. +func (e *ShowDDLJobQueriesWithRangeExec) Open(ctx context.Context) error { + var err error + var jobs []*model.Job + if err := e.BaseExecutor.Open(ctx); err != nil { + return err + } + session, err := e.GetSysSession() + if err != nil { + return err + } + err = sessiontxn.NewTxn(context.Background(), session) + if err != nil { + return err + } + defer func() { + // ReleaseSysSession will rollbacks txn automatically. + e.ReleaseSysSession(kv.WithInternalSourceType(context.Background(), kv.InternalTxnDDL), session) + }() + txn, err := session.Txn(true) + if err != nil { + return err + } + session.GetSessionVars().SetInTxn(true) + + m := meta.NewMeta(txn) + jobs, err = ddl.GetAllDDLJobs(session) + if err != nil { + return err + } + + historyJobs, err := ddl.GetLastNHistoryDDLJobs(m, int(e.offset+e.limit)) + if err != nil { + return err + } + + appendedJobID := make(map[int64]struct{}) + // deduplicate job results + // for situations when this operation happens at the same time with new DDLs being executed + for _, job := range jobs { + if _, ok := appendedJobID[job.ID]; !ok { + appendedJobID[job.ID] = struct{}{} + e.jobs = append(e.jobs, job) + } + } + for _, historyJob := range historyJobs { + if _, ok := appendedJobID[historyJob.ID]; !ok { + appendedJobID[historyJob.ID] = struct{}{} + e.jobs = append(e.jobs, historyJob) + } + } + + if e.cursor < int(e.offset) { + e.cursor = int(e.offset) + } + + return nil +} + +// Next implements the Executor Next interface. +func (e *ShowDDLJobQueriesWithRangeExec) Next(_ context.Context, req *chunk.Chunk) error { + req.GrowAndReset(e.MaxChunkSize()) + if e.cursor >= len(e.jobs) { + return nil + } + if int(e.offset) > len(e.jobs) { + return nil + } + numCurBatch := min(req.Capacity(), len(e.jobs)-e.cursor) + for i := e.cursor; i < e.cursor+numCurBatch; i++ { + // i is make true to be >= int(e.offset) + if i >= int(e.offset+e.limit) { + break + } + req.AppendString(0, strconv.FormatInt(e.jobs[i].ID, 10)) + req.AppendString(1, e.jobs[i].Query) + } + e.cursor += numCurBatch + return nil +} + +// Open implements the Executor Open interface. +func (e *ShowDDLJobsExec) Open(ctx context.Context) error { + if err := e.BaseExecutor.Open(ctx); err != nil { + return err + } + e.DDLJobRetriever.is = e.is + if e.jobNumber == 0 { + e.jobNumber = ddl.DefNumHistoryJobs + } + sess, err := e.GetSysSession() + if err != nil { + return err + } + e.sess = sess + err = sessiontxn.NewTxn(context.Background(), sess) + if err != nil { + return err + } + txn, err := sess.Txn(true) + if err != nil { + return err + } + sess.GetSessionVars().SetInTxn(true) + err = e.DDLJobRetriever.initial(txn, sess) + return err +} + +// Next implements the Executor Next interface. +func (e *ShowDDLJobsExec) Next(_ context.Context, req *chunk.Chunk) error { + req.GrowAndReset(e.MaxChunkSize()) + if (e.cursor - len(e.runningJobs)) >= e.jobNumber { + return nil + } + count := 0 + + // Append running ddl jobs. + if e.cursor < len(e.runningJobs) { + numCurBatch := min(req.Capacity(), len(e.runningJobs)-e.cursor) + for i := e.cursor; i < e.cursor+numCurBatch; i++ { + e.appendJobToChunk(req, e.runningJobs[i], nil) + } + e.cursor += numCurBatch + count += numCurBatch + } + + // Append history ddl jobs. + var err error + if count < req.Capacity() { + num := req.Capacity() - count + remainNum := e.jobNumber - (e.cursor - len(e.runningJobs)) + num = min(num, remainNum) + e.cacheJobs, err = e.historyJobIter.GetLastJobs(num, e.cacheJobs) + if err != nil { + return err + } + for _, job := range e.cacheJobs { + e.appendJobToChunk(req, job, nil) + } + e.cursor += len(e.cacheJobs) + } + return nil +} + +// Close implements the Executor Close interface. +func (e *ShowDDLJobsExec) Close() error { + e.ReleaseSysSession(kv.WithInternalSourceType(context.Background(), kv.InternalTxnDDL), e.sess) + return e.BaseExecutor.Close() +} + +func getSchemaName(is infoschema.InfoSchema, id int64) string { + var schemaName string + dbInfo, ok := is.SchemaByID(id) + if ok { + schemaName = dbInfo.Name.O + return schemaName + } + + return schemaName +} + +func getTableName(is infoschema.InfoSchema, id int64) string { + var tableName string + table, ok := is.TableByID(id) + if ok { + tableName = table.Meta().Name.O + return tableName + } + + return tableName +} + +// CheckTableExec represents a check table executor. +// It is built from the "admin check table" statement, and it checks if the +// index matches the records in the table. +type CheckTableExec struct { + exec.BaseExecutor + + dbName string + table table.Table + indexInfos []*model.IndexInfo + srcs []*IndexLookUpExecutor + done bool + is infoschema.InfoSchema + exitCh chan struct{} + retCh chan error + checkIndex bool +} + +// Open implements the Executor Open interface. +func (e *CheckTableExec) Open(ctx context.Context) error { + if err := e.BaseExecutor.Open(ctx); err != nil { + return err + } + for _, src := range e.srcs { + if err := exec.Open(ctx, src); err != nil { + return errors.Trace(err) + } + } + e.done = false + return nil +} + +// Close implements the Executor Close interface. +func (e *CheckTableExec) Close() error { + var firstErr error + close(e.exitCh) + for _, src := range e.srcs { + if err := exec.Close(src); err != nil && firstErr == nil { + firstErr = err + } + } + return firstErr +} + +func (e *CheckTableExec) checkTableIndexHandle(ctx context.Context, idxInfo *model.IndexInfo) error { + // For partition table, there will be multi same index indexLookUpReaders on different partitions. + for _, src := range e.srcs { + if src.index.Name.L == idxInfo.Name.L { + err := e.checkIndexHandle(ctx, src) + if err != nil { + return err + } + } + } + return nil +} + +func (e *CheckTableExec) checkIndexHandle(ctx context.Context, src *IndexLookUpExecutor) error { + cols := src.Schema().Columns + retFieldTypes := make([]*types.FieldType, len(cols)) + for i := range cols { + retFieldTypes[i] = cols[i].RetType + } + chk := chunk.New(retFieldTypes, e.InitCap(), e.MaxChunkSize()) + + var err error + for { + err = exec.Next(ctx, src, chk) + if err != nil { + e.retCh <- errors.Trace(err) + break + } + if chk.NumRows() == 0 { + break + } + } + return errors.Trace(err) +} + +func (e *CheckTableExec) handlePanic(r interface{}) { + if r != nil { + e.retCh <- errors.Errorf("%v", r) + } +} + +// Next implements the Executor Next interface. +func (e *CheckTableExec) Next(ctx context.Context, _ *chunk.Chunk) error { + if e.done || len(e.srcs) == 0 { + return nil + } + defer func() { e.done = true }() + + // See the comment of `ColumnInfos2ColumnsAndNames`. It's fixing #42341 + originalTypeFlags := e.Ctx().GetSessionVars().StmtCtx.TypeFlags() + defer func() { + e.Ctx().GetSessionVars().StmtCtx.SetTypeFlags(originalTypeFlags) + }() + e.Ctx().GetSessionVars().StmtCtx.SetTypeFlags(originalTypeFlags.WithIgnoreTruncateErr(true)) + + idxNames := make([]string, 0, len(e.indexInfos)) + for _, idx := range e.indexInfos { + if idx.MVIndex { + continue + } + idxNames = append(idxNames, idx.Name.O) + } + greater, idxOffset, err := admin.CheckIndicesCount(e.Ctx(), e.dbName, e.table.Meta().Name.O, idxNames) + if err != nil { + // For admin check index statement, for speed up and compatibility, doesn't do below checks. + if e.checkIndex { + return errors.Trace(err) + } + if greater == admin.IdxCntGreater { + err = e.checkTableIndexHandle(ctx, e.indexInfos[idxOffset]) + } else if greater == admin.TblCntGreater { + err = e.checkTableRecord(ctx, idxOffset) + } + return errors.Trace(err) + } + + // The number of table rows is equal to the number of index rows. + // TODO: Make the value of concurrency adjustable. And we can consider the number of records. + if len(e.srcs) == 1 { + err = e.checkIndexHandle(ctx, e.srcs[0]) + if err == nil && e.srcs[0].index.MVIndex { + err = e.checkTableRecord(ctx, 0) + } + if err != nil { + return err + } + } + taskCh := make(chan *IndexLookUpExecutor, len(e.srcs)) + failure := atomicutil.NewBool(false) + concurrency := min(3, len(e.srcs)) + var wg util.WaitGroupWrapper + for _, src := range e.srcs { + taskCh <- src + } + for i := 0; i < concurrency; i++ { + wg.Run(func() { + util.WithRecovery(func() { + for { + if fail := failure.Load(); fail { + return + } + select { + case src := <-taskCh: + err1 := e.checkIndexHandle(ctx, src) + if err1 == nil && src.index.MVIndex { + for offset, idx := range e.indexInfos { + if idx.ID == src.index.ID { + err1 = e.checkTableRecord(ctx, offset) + break + } + } + } + if err1 != nil { + failure.Store(true) + logutil.Logger(ctx).Info("check index handle failed", zap.Error(err1)) + return + } + case <-e.exitCh: + return + default: + return + } + } + }, e.handlePanic) + }) + } + wg.Wait() + select { + case err := <-e.retCh: + return errors.Trace(err) + default: + return nil + } +} + +func (e *CheckTableExec) checkTableRecord(ctx context.Context, idxOffset int) error { + idxInfo := e.indexInfos[idxOffset] + txn, err := e.Ctx().Txn(true) + if err != nil { + return err + } + if e.table.Meta().GetPartitionInfo() == nil { + idx := tables.NewIndex(e.table.Meta().ID, e.table.Meta(), idxInfo) + return admin.CheckRecordAndIndex(ctx, e.Ctx(), txn, e.table, idx) + } + + info := e.table.Meta().GetPartitionInfo() + for _, def := range info.Definitions { + pid := def.ID + partition := e.table.(table.PartitionedTable).GetPartition(pid) + idx := tables.NewIndex(def.ID, e.table.Meta(), idxInfo) + if err := admin.CheckRecordAndIndex(ctx, e.Ctx(), txn, partition, idx); err != nil { + return errors.Trace(err) + } + } + return nil +} + +// ShowSlowExec represents the executor of showing the slow queries. +// It is build from the "admin show slow" statement: +// +// admin show slow top [internal | all] N +// admin show slow recent N +type ShowSlowExec struct { + exec.BaseExecutor + + ShowSlow *ast.ShowSlow + result []*domain.SlowQueryInfo + cursor int +} + +// Open implements the Executor Open interface. +func (e *ShowSlowExec) Open(ctx context.Context) error { + if err := e.BaseExecutor.Open(ctx); err != nil { + return err + } + + dom := domain.GetDomain(e.Ctx()) + e.result = dom.ShowSlowQuery(e.ShowSlow) + return nil +} + +// Next implements the Executor Next interface. +func (e *ShowSlowExec) Next(_ context.Context, req *chunk.Chunk) error { + req.Reset() + if e.cursor >= len(e.result) { + return nil + } + + for e.cursor < len(e.result) && req.NumRows() < e.MaxChunkSize() { + slow := e.result[e.cursor] + req.AppendString(0, slow.SQL) + req.AppendTime(1, types.NewTime(types.FromGoTime(slow.Start), mysql.TypeTimestamp, types.MaxFsp)) + req.AppendDuration(2, types.Duration{Duration: slow.Duration, Fsp: types.MaxFsp}) + req.AppendString(3, slow.Detail.String()) + if slow.Succ { + req.AppendInt64(4, 1) + } else { + req.AppendInt64(4, 0) + } + req.AppendUint64(5, slow.ConnID) + req.AppendUint64(6, slow.TxnTS) + req.AppendString(7, slow.User) + req.AppendString(8, slow.DB) + req.AppendString(9, slow.TableIDs) + req.AppendString(10, slow.IndexNames) + if slow.Internal { + req.AppendInt64(11, 1) + } else { + req.AppendInt64(11, 0) + } + req.AppendString(12, slow.Digest) + req.AppendString(13, slow.SessAlias) + e.cursor++ + } + return nil +} + +// SelectLockExec represents a select lock executor. +// It is built from the "SELECT .. FOR UPDATE" or the "SELECT .. LOCK IN SHARE MODE" statement. +// For "SELECT .. FOR UPDATE" statement, it locks every row key from source Executor. +// After the execution, the keys are buffered in transaction, and will be sent to KV +// when doing commit. If there is any key already locked by another transaction, +// the transaction will rollback and retry. +type SelectLockExec struct { + exec.BaseExecutor + + Lock *ast.SelectLockInfo + keys []kv.Key + + // The children may be a join of multiple tables, so we need a map. + tblID2Handle map[int64][]plannercore.HandleCols + + // When SelectLock work on a partition table, we need the partition ID + // (Physical Table ID) instead of the 'logical' table ID to calculate + // the lock KV. In that case, the Physical Table ID is extracted + // from the row key in the store and as an extra column in the chunk row. + + // tblID2PhyTblIDCol is used for partitioned tables. + // The child executor need to return an extra column containing + // the Physical Table ID (i.e. from which partition the row came from) + // Used during building + tblID2PhysTblIDCol map[int64]*expression.Column + + // Used during execution + // Map from logic tableID to column index where the physical table id is stored + // For dynamic prune mode, model.ExtraPhysTblID columns are requested from + // storage and used for physical table id + // For static prune mode, model.ExtraPhysTblID is still sent to storage/Protobuf + // but could be filled in by the partitions TableReaderExecutor + // due to issues with chunk handling between the TableReaderExecutor and the + // SelectReader result. + tblID2PhysTblIDColIdx map[int64]int +} + +// Open implements the Executor Open interface. +func (e *SelectLockExec) Open(ctx context.Context) error { + if len(e.tblID2PhysTblIDCol) > 0 { + e.tblID2PhysTblIDColIdx = make(map[int64]int) + cols := e.Schema().Columns + for i := len(cols) - 1; i >= 0; i-- { + if cols[i].ID == model.ExtraPhysTblID { + for tblID, col := range e.tblID2PhysTblIDCol { + if cols[i].UniqueID == col.UniqueID { + e.tblID2PhysTblIDColIdx[tblID] = i + break + } + } + } + } + } + return e.BaseExecutor.Open(ctx) +} + +// Next implements the Executor Next interface. +func (e *SelectLockExec) Next(ctx context.Context, req *chunk.Chunk) error { + req.GrowAndReset(e.MaxChunkSize()) + err := exec.Next(ctx, e.Children(0), req) + if err != nil { + return err + } + // If there's no handle or it's not a `SELECT FOR UPDATE` statement. + if len(e.tblID2Handle) == 0 || (!plannercore.IsSelectForUpdateLockType(e.Lock.LockType)) { + return nil + } + + if req.NumRows() > 0 { + iter := chunk.NewIterator4Chunk(req) + for row := iter.Begin(); row != iter.End(); row = iter.Next() { + for tblID, cols := range e.tblID2Handle { + for _, col := range cols { + handle, err := col.BuildHandle(row) + if err != nil { + return err + } + physTblID := tblID + if physTblColIdx, ok := e.tblID2PhysTblIDColIdx[tblID]; ok { + physTblID = row.GetInt64(physTblColIdx) + if physTblID == 0 { + // select * from t1 left join t2 on t1.c = t2.c for update + // The join right side might be added NULL in left join + // In that case, physTblID is 0, so skip adding the lock. + // + // Note, we can't distinguish whether it's the left join case, + // or a bug that TiKV return without correct physical ID column. + continue + } + } + e.keys = append(e.keys, tablecodec.EncodeRowKeyWithHandle(physTblID, handle)) + } + } + } + return nil + } + lockWaitTime := e.Ctx().GetSessionVars().LockWaitTimeout + if e.Lock.LockType == ast.SelectLockForUpdateNoWait { + lockWaitTime = tikvstore.LockNoWait + } else if e.Lock.LockType == ast.SelectLockForUpdateWaitN { + lockWaitTime = int64(e.Lock.WaitSec) * 1000 + } + + for id := range e.tblID2Handle { + e.UpdateDeltaForTableID(id) + } + lockCtx, err := newLockCtx(e.Ctx(), lockWaitTime, len(e.keys)) + if err != nil { + return err + } + return doLockKeys(ctx, e.Ctx(), lockCtx, e.keys...) +} + +func newLockCtx(sctx sessionctx.Context, lockWaitTime int64, numKeys int) (*tikvstore.LockCtx, error) { + seVars := sctx.GetSessionVars() + forUpdateTS, err := sessiontxn.GetTxnManager(sctx).GetStmtForUpdateTS() + if err != nil { + return nil, err + } + lockCtx := tikvstore.NewLockCtx(forUpdateTS, lockWaitTime, seVars.StmtCtx.GetLockWaitStartTime()) + lockCtx.Killed = &seVars.SQLKiller.Signal + lockCtx.PessimisticLockWaited = &seVars.StmtCtx.PessimisticLockWaited + lockCtx.LockKeysDuration = &seVars.StmtCtx.LockKeysDuration + lockCtx.LockKeysCount = &seVars.StmtCtx.LockKeysCount + lockCtx.LockExpired = &seVars.TxnCtx.LockExpire + lockCtx.ResourceGroupTagger = func(req *kvrpcpb.PessimisticLockRequest) []byte { + if req == nil { + return nil + } + if len(req.Mutations) == 0 { + return nil + } + if mutation := req.Mutations[0]; mutation != nil { + label := resourcegrouptag.GetResourceGroupLabelByKey(mutation.Key) + normalized, digest := seVars.StmtCtx.SQLDigest() + if len(normalized) == 0 { + return nil + } + _, planDigest := seVars.StmtCtx.GetPlanDigest() + return resourcegrouptag.EncodeResourceGroupTag(digest, planDigest, label) + } + return nil + } + lockCtx.OnDeadlock = func(deadlock *tikverr.ErrDeadlock) { + cfg := config.GetGlobalConfig() + if deadlock.IsRetryable && !cfg.PessimisticTxn.DeadlockHistoryCollectRetryable { + return + } + rec := deadlockhistory.ErrDeadlockToDeadlockRecord(deadlock) + deadlockhistory.GlobalDeadlockHistory.Push(rec) + } + if lockCtx.ForUpdateTS > 0 && seVars.AssertionLevel != variable.AssertionLevelOff { + lockCtx.InitCheckExistence(numKeys) + } + return lockCtx, nil +} + +// doLockKeys is the main entry for pessimistic lock keys +// waitTime means the lock operation will wait in milliseconds if target key is already +// locked by others. used for (select for update nowait) situation +func doLockKeys(ctx context.Context, se sessionctx.Context, lockCtx *tikvstore.LockCtx, keys ...kv.Key) error { + sessVars := se.GetSessionVars() + sctx := sessVars.StmtCtx + if !sctx.InUpdateStmt && !sctx.InDeleteStmt { + atomic.StoreUint32(&se.GetSessionVars().TxnCtx.ForUpdate, 1) + } + // Lock keys only once when finished fetching all results. + txn, err := se.Txn(true) + if err != nil { + return err + } + + // Skip the temporary table keys. + keys = filterTemporaryTableKeys(sessVars, keys) + + keys = filterLockTableKeys(sessVars.StmtCtx, keys) + var lockKeyStats *tikvutil.LockKeysDetails + ctx = context.WithValue(ctx, tikvutil.LockKeysDetailCtxKey, &lockKeyStats) + err = txn.LockKeys(tikvutil.SetSessionID(ctx, se.GetSessionVars().ConnectionID), lockCtx, keys...) + if lockKeyStats != nil { + sctx.MergeLockKeysExecDetails(lockKeyStats) + } + return err +} + +func filterTemporaryTableKeys(vars *variable.SessionVars, keys []kv.Key) []kv.Key { + txnCtx := vars.TxnCtx + if txnCtx == nil || txnCtx.TemporaryTables == nil { + return keys + } + + newKeys := keys[:0:len(keys)] + for _, key := range keys { + tblID := tablecodec.DecodeTableID(key) + if _, ok := txnCtx.TemporaryTables[tblID]; !ok { + newKeys = append(newKeys, key) + } + } + return newKeys +} + +func filterLockTableKeys(stmtCtx *stmtctx.StatementContext, keys []kv.Key) []kv.Key { + if len(stmtCtx.LockTableIDs) == 0 { + return keys + } + newKeys := keys[:0:len(keys)] + for _, key := range keys { + tblID := tablecodec.DecodeTableID(key) + if _, ok := stmtCtx.LockTableIDs[tblID]; ok { + newKeys = append(newKeys, key) + } + } + return newKeys +} + +// LimitExec represents limit executor +// It ignores 'Offset' rows from src, then returns 'Count' rows at maximum. +type LimitExec struct { + exec.BaseExecutor + + begin uint64 + end uint64 + cursor uint64 + + // meetFirstBatch represents whether we have met the first valid Chunk from child. + meetFirstBatch bool + + childResult *chunk.Chunk + + // columnIdxsUsedByChild keep column indexes of child executor used for inline projection + columnIdxsUsedByChild []int + + // Log the close time when opentracing is enabled. + span opentracing.Span +} + +// Next implements the Executor Next interface. +func (e *LimitExec) Next(ctx context.Context, req *chunk.Chunk) error { + req.Reset() + if e.cursor >= e.end { + return nil + } + for !e.meetFirstBatch { + // transfer req's requiredRows to childResult and then adjust it in childResult + e.childResult = e.childResult.SetRequiredRows(req.RequiredRows(), e.MaxChunkSize()) + err := exec.Next(ctx, e.Children(0), e.adjustRequiredRows(e.childResult)) + if err != nil { + return err + } + batchSize := uint64(e.childResult.NumRows()) + // no more data. + if batchSize == 0 { + return nil + } + if newCursor := e.cursor + batchSize; newCursor >= e.begin { + e.meetFirstBatch = true + begin, end := e.begin-e.cursor, batchSize + if newCursor > e.end { + end = e.end - e.cursor + } + e.cursor += end + if begin == end { + break + } + if e.columnIdxsUsedByChild != nil { + req.Append(e.childResult.Prune(e.columnIdxsUsedByChild), int(begin), int(end)) + } else { + req.Append(e.childResult, int(begin), int(end)) + } + return nil + } + e.cursor += batchSize + } + e.childResult.Reset() + e.childResult = e.childResult.SetRequiredRows(req.RequiredRows(), e.MaxChunkSize()) + e.adjustRequiredRows(e.childResult) + err := exec.Next(ctx, e.Children(0), e.childResult) + if err != nil { + return err + } + batchSize := uint64(e.childResult.NumRows()) + // no more data. + if batchSize == 0 { + return nil + } + if e.cursor+batchSize > e.end { + e.childResult.TruncateTo(int(e.end - e.cursor)) + batchSize = e.end - e.cursor + } + e.cursor += batchSize + + if e.columnIdxsUsedByChild != nil { + for i, childIdx := range e.columnIdxsUsedByChild { + if err = req.SwapColumn(i, e.childResult, childIdx); err != nil { + return err + } + } + } else { + req.SwapColumns(e.childResult) + } + return nil +} + +// Open implements the Executor Open interface. +func (e *LimitExec) Open(ctx context.Context) error { + if err := e.BaseExecutor.Open(ctx); err != nil { + return err + } + e.childResult = exec.TryNewCacheChunk(e.Children(0)) + e.cursor = 0 + e.meetFirstBatch = e.begin == 0 + if span := opentracing.SpanFromContext(ctx); span != nil && span.Tracer() != nil { + e.span = span + } + return nil +} + +// Close implements the Executor Close interface. +func (e *LimitExec) Close() error { + start := time.Now() + + e.childResult = nil + err := e.BaseExecutor.Close() + + elapsed := time.Since(start) + if elapsed > time.Millisecond { + logutil.BgLogger().Info("limit executor close takes a long time", + zap.Duration("elapsed", elapsed)) + if e.span != nil { + span1 := e.span.Tracer().StartSpan("limitExec.Close", opentracing.ChildOf(e.span.Context()), opentracing.StartTime(start)) + defer span1.Finish() + } + } + return err +} + +func (e *LimitExec) adjustRequiredRows(chk *chunk.Chunk) *chunk.Chunk { + // the limit of maximum number of rows the LimitExec should read + limitTotal := int(e.end - e.cursor) + + var limitRequired int + if e.cursor < e.begin { + // if cursor is less than begin, it have to read (begin-cursor) rows to ignore + // and then read chk.RequiredRows() rows to return, + // so the limit is (begin-cursor)+chk.RequiredRows(). + limitRequired = int(e.begin) - int(e.cursor) + chk.RequiredRows() + } else { + // if cursor is equal or larger than begin, just read chk.RequiredRows() rows to return. + limitRequired = chk.RequiredRows() + } + + return chk.SetRequiredRows(min(limitTotal, limitRequired), e.MaxChunkSize()) +} + +func init() { + // While doing optimization in the plan package, we need to execute uncorrelated subquery, + // but the plan package cannot import the executor package because of the dependency cycle. + // So we assign a function implemented in the executor package to the plan package to avoid the dependency cycle. + plannercore.EvalSubqueryFirstRow = func(ctx context.Context, p plannercore.PhysicalPlan, is infoschema.InfoSchema, sctx sessionctx.Context) ([]types.Datum, error) { + defer func(begin time.Time) { + s := sctx.GetSessionVars() + s.StmtCtx.SetSkipPlanCache(errors.New("query has uncorrelated sub-queries is un-cacheable")) + s.RewritePhaseInfo.PreprocessSubQueries++ + s.RewritePhaseInfo.DurationPreprocessSubQuery += time.Since(begin) + }(time.Now()) + + r, ctx := tracing.StartRegionEx(ctx, "executor.EvalSubQuery") + defer r.End() + + e := newExecutorBuilder(sctx, is, nil) + executor := e.build(p) + if e.err != nil { + return nil, e.err + } + err := exec.Open(ctx, executor) + defer func() { terror.Log(exec.Close(executor)) }() + if err != nil { + return nil, err + } + if pi, ok := sctx.(processinfoSetter); ok { + // Before executing the sub-query, we need update the processinfo to make the progress bar more accurate. + // because the sub-query may take a long time. + pi.UpdateProcessInfo() + } + chk := exec.TryNewCacheChunk(executor) + err = exec.Next(ctx, executor, chk) + if err != nil { + return nil, err + } + if chk.NumRows() == 0 { + return nil, nil + } + row := chk.GetRow(0).GetDatumRow(exec.RetTypes(executor)) + return row, err + } +} + +// TableDualExec represents a dual table executor. +type TableDualExec struct { + exec.BaseExecutor + + // numDualRows can only be 0 or 1. + numDualRows int + numReturned int +} + +// Open implements the Executor Open interface. +func (e *TableDualExec) Open(context.Context) error { + e.numReturned = 0 + return nil +} + +// Next implements the Executor Next interface. +func (e *TableDualExec) Next(_ context.Context, req *chunk.Chunk) error { + req.Reset() + if e.numReturned >= e.numDualRows { + return nil + } + if e.Schema().Len() == 0 { + req.SetNumVirtualRows(1) + } else { + for i := range e.Schema().Columns { + req.AppendNull(i) + } + } + e.numReturned = e.numDualRows + return nil +} + +// SelectionExec represents a filter executor. +type SelectionExec struct { + exec.BaseExecutor + + batched bool + filters []expression.Expression + selected []bool + inputIter *chunk.Iterator4Chunk + inputRow chunk.Row + childResult *chunk.Chunk + + memTracker *memory.Tracker +} + +// Open implements the Executor Open interface. +func (e *SelectionExec) Open(ctx context.Context) error { + if err := e.BaseExecutor.Open(ctx); err != nil { + return err + } + failpoint.Inject("mockSelectionExecBaseExecutorOpenReturnedError", func(val failpoint.Value) { + if val.(bool) { + failpoint.Return(errors.New("mock SelectionExec.baseExecutor.Open returned error")) + } + }) + return e.open(ctx) +} + +func (e *SelectionExec) open(context.Context) error { + if e.memTracker != nil { + e.memTracker.Reset() + } else { + e.memTracker = memory.NewTracker(e.ID(), -1) + } + e.memTracker.AttachTo(e.Ctx().GetSessionVars().StmtCtx.MemTracker) + e.childResult = exec.TryNewCacheChunk(e.Children(0)) + e.memTracker.Consume(e.childResult.MemoryUsage()) + e.batched = expression.Vectorizable(e.filters) + if e.batched { + e.selected = make([]bool, 0, chunk.InitialCapacity) + } + e.inputIter = chunk.NewIterator4Chunk(e.childResult) + e.inputRow = e.inputIter.End() + return nil +} + +// Close implements plannercore.Plan Close interface. +func (e *SelectionExec) Close() error { + if e.childResult != nil { + e.memTracker.Consume(-e.childResult.MemoryUsage()) + e.childResult = nil + } + e.selected = nil + return e.BaseExecutor.Close() +} + +// Next implements the Executor Next interface. +func (e *SelectionExec) Next(ctx context.Context, req *chunk.Chunk) error { + req.GrowAndReset(e.MaxChunkSize()) + + if !e.batched { + return e.unBatchedNext(ctx, req) + } + + for { + for ; e.inputRow != e.inputIter.End(); e.inputRow = e.inputIter.Next() { + if req.IsFull() { + return nil + } + + if !e.selected[e.inputRow.Idx()] { + continue + } + + req.AppendRow(e.inputRow) + } + mSize := e.childResult.MemoryUsage() + err := exec.Next(ctx, e.Children(0), e.childResult) + e.memTracker.Consume(e.childResult.MemoryUsage() - mSize) + if err != nil { + return err + } + // no more data. + if e.childResult.NumRows() == 0 { + return nil + } + e.selected, err = expression.VectorizedFilter(e.Ctx(), e.filters, e.inputIter, e.selected) + if err != nil { + return err + } + e.inputRow = e.inputIter.Begin() + } +} + +// unBatchedNext filters input rows one by one and returns once an input row is selected. +// For sql with "SETVAR" in filter and "GETVAR" in projection, for example: "SELECT @a FROM t WHERE (@a := 2) > 0", +// we have to set batch size to 1 to do the evaluation of filter and projection. +func (e *SelectionExec) unBatchedNext(ctx context.Context, chk *chunk.Chunk) error { + for { + for ; e.inputRow != e.inputIter.End(); e.inputRow = e.inputIter.Next() { + selected, _, err := expression.EvalBool(e.Ctx(), e.filters, e.inputRow) + if err != nil { + return err + } + if selected { + chk.AppendRow(e.inputRow) + e.inputRow = e.inputIter.Next() + return nil + } + } + mSize := e.childResult.MemoryUsage() + err := exec.Next(ctx, e.Children(0), e.childResult) + e.memTracker.Consume(e.childResult.MemoryUsage() - mSize) + if err != nil { + return err + } + e.inputRow = e.inputIter.Begin() + // no more data. + if e.childResult.NumRows() == 0 { + return nil + } + } +} + +// TableScanExec is a table scan executor without result fields. +type TableScanExec struct { + exec.BaseExecutor + + t table.Table + columns []*model.ColumnInfo + virtualTableChunkList *chunk.List + virtualTableChunkIdx int +} + +// Next implements the Executor Next interface. +func (e *TableScanExec) Next(ctx context.Context, req *chunk.Chunk) error { + req.GrowAndReset(e.MaxChunkSize()) + return e.nextChunk4InfoSchema(ctx, req) +} + +func (e *TableScanExec) nextChunk4InfoSchema(ctx context.Context, chk *chunk.Chunk) error { + chk.GrowAndReset(e.MaxChunkSize()) + if e.virtualTableChunkList == nil { + e.virtualTableChunkList = chunk.NewList(exec.RetTypes(e), e.InitCap(), e.MaxChunkSize()) + columns := make([]*table.Column, e.Schema().Len()) + for i, colInfo := range e.columns { + columns[i] = table.ToColumn(colInfo) + } + mutableRow := chunk.MutRowFromTypes(exec.RetTypes(e)) + type tableIter interface { + IterRecords(ctx context.Context, sctx sessionctx.Context, cols []*table.Column, fn table.RecordIterFunc) error + } + err := (e.t.(tableIter)).IterRecords(ctx, e.Ctx(), columns, func(_ kv.Handle, rec []types.Datum, cols []*table.Column) (bool, error) { + mutableRow.SetDatums(rec...) + e.virtualTableChunkList.AppendRow(mutableRow.ToRow()) + return true, nil + }) + if err != nil { + return err + } + } + // no more data. + if e.virtualTableChunkIdx >= e.virtualTableChunkList.NumChunks() { + return nil + } + virtualTableChunk := e.virtualTableChunkList.GetChunk(e.virtualTableChunkIdx) + e.virtualTableChunkIdx++ + chk.SwapColumns(virtualTableChunk) + return nil +} + +// Open implements the Executor Open interface. +func (e *TableScanExec) Open(context.Context) error { + e.virtualTableChunkList = nil + return nil +} + +// MaxOneRowExec checks if the number of rows that a query returns is at maximum one. +// It's built from subquery expression. +type MaxOneRowExec struct { + exec.BaseExecutor + + evaluated bool +} + +// Open implements the Executor Open interface. +func (e *MaxOneRowExec) Open(ctx context.Context) error { + if err := e.BaseExecutor.Open(ctx); err != nil { + return err + } + e.evaluated = false + return nil +} + +// Next implements the Executor Next interface. +func (e *MaxOneRowExec) Next(ctx context.Context, req *chunk.Chunk) error { + req.Reset() + if e.evaluated { + return nil + } + e.evaluated = true + err := exec.Next(ctx, e.Children(0), req) + if err != nil { + return err + } + + if num := req.NumRows(); num == 0 { + for i := range e.Schema().Columns { + req.AppendNull(i) + } + return nil + } else if num != 1 { + return exeerrors.ErrSubqueryMoreThan1Row + } + + childChunk := exec.TryNewCacheChunk(e.Children(0)) + err = exec.Next(ctx, e.Children(0), childChunk) + if err != nil { + return err + } + if childChunk.NumRows() != 0 { + return exeerrors.ErrSubqueryMoreThan1Row + } + + return nil +} + +// UnionExec pulls all it's children's result and returns to its parent directly. +// A "resultPuller" is started for every child to pull result from that child and push it to the "resultPool", the used +// "Chunk" is obtained from the corresponding "resourcePool". All resultPullers are running concurrently. +// +// +----------------+ +// +---> resourcePool 1 ---> | resultPuller 1 |-----+ +// | +----------------+ | +// | | +// | +----------------+ v +// +---> resourcePool 2 ---> | resultPuller 2 |-----> resultPool ---+ +// | +----------------+ ^ | +// | ...... | | +// | +----------------+ | | +// +---> resourcePool n ---> | resultPuller n |-----+ | +// | +----------------+ | +// | | +// | +-------------+ | +// |--------------------------| main thread | <---------------------+ +// +-------------+ +type UnionExec struct { + exec.BaseExecutor + concurrency int + childIDChan chan int + + stopFetchData atomic.Value + + finished chan struct{} + resourcePools []chan *chunk.Chunk + resultPool chan *unionWorkerResult + + results []*chunk.Chunk + wg sync.WaitGroup + initialized bool + mu struct { + *syncutil.Mutex + maxOpenedChildID int + } + + childInFlightForTest int32 +} + +// unionWorkerResult stores the result for a union worker. +// A "resultPuller" is started for every child to pull result from that child, unionWorkerResult is used to store that pulled result. +// "src" is used for Chunk reuse: after pulling result from "resultPool", main-thread must push a valid unused Chunk to "src" to +// enable the corresponding "resultPuller" continue to work. +type unionWorkerResult struct { + chk *chunk.Chunk + err error + src chan<- *chunk.Chunk +} + +func (e *UnionExec) waitAllFinished() { + e.wg.Wait() + close(e.resultPool) +} + +// Open implements the Executor Open interface. +func (e *UnionExec) Open(context.Context) error { + e.stopFetchData.Store(false) + e.initialized = false + e.finished = make(chan struct{}) + e.mu.Mutex = &syncutil.Mutex{} + e.mu.maxOpenedChildID = -1 + return nil +} + +func (e *UnionExec) initialize(ctx context.Context) { + if e.concurrency > e.ChildrenLen() { + e.concurrency = e.ChildrenLen() + } + for i := 0; i < e.concurrency; i++ { + e.results = append(e.results, exec.NewFirstChunk(e.Children(0))) + } + e.resultPool = make(chan *unionWorkerResult, e.concurrency) + e.resourcePools = make([]chan *chunk.Chunk, e.concurrency) + e.childIDChan = make(chan int, e.ChildrenLen()) + for i := 0; i < e.concurrency; i++ { + e.resourcePools[i] = make(chan *chunk.Chunk, 1) + e.resourcePools[i] <- e.results[i] + e.wg.Add(1) + go e.resultPuller(ctx, i) + } + for i := 0; i < e.ChildrenLen(); i++ { + e.childIDChan <- i + } + close(e.childIDChan) + go e.waitAllFinished() +} + +func (e *UnionExec) resultPuller(ctx context.Context, workerID int) { + result := &unionWorkerResult{ + err: nil, + chk: nil, + src: e.resourcePools[workerID], + } + defer func() { + if r := recover(); r != nil { + logutil.Logger(ctx).Error("resultPuller panicked", zap.Any("recover", r), zap.Stack("stack")) + result.err = util.GetRecoverError(r) + e.resultPool <- result + e.stopFetchData.Store(true) + } + e.wg.Done() + }() + for childID := range e.childIDChan { + e.mu.Lock() + if childID > e.mu.maxOpenedChildID { + e.mu.maxOpenedChildID = childID + } + e.mu.Unlock() + if err := exec.Open(ctx, e.Children(childID)); err != nil { + result.err = err + e.stopFetchData.Store(true) + e.resultPool <- result + } + failpoint.Inject("issue21441", func() { + atomic.AddInt32(&e.childInFlightForTest, 1) + }) + for { + if e.stopFetchData.Load().(bool) { + return + } + select { + case <-e.finished: + return + case result.chk = <-e.resourcePools[workerID]: + } + result.err = exec.Next(ctx, e.Children(childID), result.chk) + if result.err == nil && result.chk.NumRows() == 0 { + e.resourcePools[workerID] <- result.chk + break + } + failpoint.Inject("issue21441", func() { + if int(atomic.LoadInt32(&e.childInFlightForTest)) > e.concurrency { + panic("the count of child in flight is larger than e.concurrency unexpectedly") + } + }) + e.resultPool <- result + if result.err != nil { + e.stopFetchData.Store(true) + return + } + } + failpoint.Inject("issue21441", func() { + atomic.AddInt32(&e.childInFlightForTest, -1) + }) + } +} + +// Next implements the Executor Next interface. +func (e *UnionExec) Next(ctx context.Context, req *chunk.Chunk) error { + req.GrowAndReset(e.MaxChunkSize()) + if !e.initialized { + e.initialize(ctx) + e.initialized = true + } + result, ok := <-e.resultPool + if !ok { + return nil + } + if result.err != nil { + return errors.Trace(result.err) + } + + if result.chk.NumCols() != req.NumCols() { + return errors.Errorf("Internal error: UnionExec chunk column count mismatch, req: %d, result: %d", + req.NumCols(), result.chk.NumCols()) + } + req.SwapColumns(result.chk) + result.src <- result.chk + return nil +} + +// Close implements the Executor Close interface. +func (e *UnionExec) Close() error { + if e.finished != nil { + close(e.finished) + } + e.results = nil + if e.resultPool != nil { + channel.Clear(e.resultPool) + } + e.resourcePools = nil + if e.childIDChan != nil { + channel.Clear(e.childIDChan) + } + // We do not need to acquire the e.mu.Lock since all the resultPuller can be + // promised to exit when reaching here (e.childIDChan been closed). + var firstErr error + for i := 0; i <= e.mu.maxOpenedChildID; i++ { + if err := exec.Close(e.Children(i)); err != nil && firstErr == nil { + firstErr = err + } + } + return firstErr +} + +// ResetContextOfStmt resets the StmtContext and session variables. +// Before every execution, we must clear statement context. +func ResetContextOfStmt(ctx sessionctx.Context, s ast.StmtNode) (err error) { + defer func() { + if r := recover(); r != nil { + logutil.BgLogger().Warn("ResetContextOfStmt panicked", zap.Stack("stack"), zap.Any("recover", r), zap.Error(err)) + if err != nil { + err = stderrors.Join(err, util.GetRecoverError(r)) + } else { + err = util.GetRecoverError(r) + } + } + }() + vars := ctx.GetSessionVars() + for name, val := range vars.StmtCtx.SetVarHintRestore { + err := vars.SetSystemVar(name, val) + if err != nil { + logutil.BgLogger().Warn("Failed to restore the variable after SET_VAR hint", zap.String("variable name", name), zap.String("expected value", val)) + } + } + vars.StmtCtx.SetVarHintRestore = nil + var sc *stmtctx.StatementContext + if vars.TxnCtx.CouldRetry || mysql.HasCursorExistsFlag(vars.Status) { + // Must construct new statement context object, the retry history need context for every statement. + // TODO: Maybe one day we can get rid of transaction retry, then this logic can be deleted. + sc = stmtctx.NewStmtCtx() + } else { + sc = vars.InitStatementContext() + } + sc.SetTimeZone(vars.Location()) + sc.TaskID = stmtctx.AllocateTaskID() + sc.CTEStorageMap = map[int]*CTEStorages{} + sc.IsStaleness = false + sc.LockTableIDs = make(map[int64]struct{}) + sc.EnableOptimizeTrace = false + sc.OptimizeTracer = nil + sc.OptimizerCETrace = nil + sc.IsSyncStatsFailed = false + sc.IsExplainAnalyzeDML = false + // Firstly we assume that UseDynamicPruneMode can be enabled according session variable, then we will check other conditions + // in PlanBuilder.buildDataSource + if ctx.GetSessionVars().IsDynamicPartitionPruneEnabled() { + sc.UseDynamicPruneMode = true + } else { + sc.UseDynamicPruneMode = false + } + + sc.StatsLoad.Timeout = 0 + sc.StatsLoad.NeededItems = nil + sc.StatsLoad.ResultCh = nil + + sc.SysdateIsNow = ctx.GetSessionVars().SysdateIsNow + + vars.MemTracker.Detach() + vars.MemTracker.UnbindActions() + vars.MemTracker.SetBytesLimit(vars.MemQuotaQuery) + vars.MemTracker.ResetMaxConsumed() + vars.DiskTracker.Detach() + vars.DiskTracker.ResetMaxConsumed() + vars.MemTracker.SessionID.Store(vars.ConnectionID) + vars.MemTracker.Killer = &vars.SQLKiller + vars.DiskTracker.Killer = &vars.SQLKiller + vars.SQLKiller.Reset() + vars.SQLKiller.ConnID = vars.ConnectionID + vars.StmtCtx.TableStats = make(map[int64]interface{}) + + isAnalyze := false + if execStmt, ok := s.(*ast.ExecuteStmt); ok { + prepareStmt, err := plannercore.GetPreparedStmt(execStmt, vars) + if err != nil { + return err + } + _, isAnalyze = prepareStmt.PreparedAst.Stmt.(*ast.AnalyzeTableStmt) + } else if _, ok := s.(*ast.AnalyzeTableStmt); ok { + isAnalyze = true + } + if isAnalyze { + sc.InitMemTracker(memory.LabelForAnalyzeMemory, -1) + vars.MemTracker.SetBytesLimit(-1) + vars.MemTracker.AttachTo(GlobalAnalyzeMemoryTracker) + } else { + sc.InitMemTracker(memory.LabelForSQLText, -1) + } + logOnQueryExceedMemQuota := domain.GetDomain(ctx).ExpensiveQueryHandle().LogOnQueryExceedMemQuota + switch variable.OOMAction.Load() { + case variable.OOMActionCancel: + action := &memory.PanicOnExceed{ConnID: vars.ConnectionID, Killer: vars.MemTracker.Killer} + action.SetLogHook(logOnQueryExceedMemQuota) + vars.MemTracker.SetActionOnExceed(action) + case variable.OOMActionLog: + fallthrough + default: + action := &memory.LogOnExceed{ConnID: vars.ConnectionID} + action.SetLogHook(logOnQueryExceedMemQuota) + vars.MemTracker.SetActionOnExceed(action) + } + sc.MemTracker.SessionID.Store(vars.ConnectionID) + sc.MemTracker.AttachTo(vars.MemTracker) + sc.InitDiskTracker(memory.LabelForSQLText, -1) + globalConfig := config.GetGlobalConfig() + if variable.EnableTmpStorageOnOOM.Load() && sc.DiskTracker != nil { + sc.DiskTracker.AttachTo(vars.DiskTracker) + if GlobalDiskUsageTracker != nil { + vars.DiskTracker.AttachTo(GlobalDiskUsageTracker) + } + } + if execStmt, ok := s.(*ast.ExecuteStmt); ok { + prepareStmt, err := plannercore.GetPreparedStmt(execStmt, vars) + if err != nil { + return err + } + s = prepareStmt.PreparedAst.Stmt + sc.InitSQLDigest(prepareStmt.NormalizedSQL, prepareStmt.SQLDigest) + // For `execute stmt` SQL, should reset the SQL digest with the prepare SQL digest. + goCtx := context.Background() + if variable.EnablePProfSQLCPU.Load() && len(prepareStmt.NormalizedSQL) > 0 { + goCtx = pprof.WithLabels(goCtx, pprof.Labels("sql", FormatSQL(prepareStmt.NormalizedSQL).String())) + pprof.SetGoroutineLabels(goCtx) + } + if topsqlstate.TopSQLEnabled() && prepareStmt.SQLDigest != nil { + sc.IsSQLRegistered.Store(true) + topsql.AttachAndRegisterSQLInfo(goCtx, prepareStmt.NormalizedSQL, prepareStmt.SQLDigest, vars.InRestrictedSQL) + } + if s, ok := prepareStmt.PreparedAst.Stmt.(*ast.SelectStmt); ok { + if s.LockInfo == nil { + sc.WeakConsistency = isWeakConsistencyRead(ctx, execStmt) + } + } + } + // execute missed stmtID uses empty sql + sc.OriginalSQL = s.Text() + if explainStmt, ok := s.(*ast.ExplainStmt); ok { + sc.InExplainStmt = true + sc.ExplainFormat = explainStmt.Format + sc.InExplainAnalyzeStmt = explainStmt.Analyze + sc.IgnoreExplainIDSuffix = strings.ToLower(explainStmt.Format) == types.ExplainFormatBrief + sc.InVerboseExplain = strings.ToLower(explainStmt.Format) == types.ExplainFormatVerbose + s = explainStmt.Stmt + } else { + sc.ExplainFormat = "" + } + if explainForStmt, ok := s.(*ast.ExplainForStmt); ok { + sc.InExplainStmt = true + sc.InExplainAnalyzeStmt = true + sc.InVerboseExplain = strings.ToLower(explainForStmt.Format) == types.ExplainFormatVerbose + } + + // TODO: Many same bool variables here. + // We should set only two variables ( + // IgnoreErr and StrictSQLMode) to avoid setting the same bool variables and + // pushing them down to TiKV as flags. + + sc.InRestrictedSQL = vars.InRestrictedSQL + switch stmt := s.(type) { + // `ResetUpdateStmtCtx` and `ResetDeleteStmtCtx` may modify the flags, so we'll need to store them. + case *ast.UpdateStmt: + ResetUpdateStmtCtx(sc, stmt, vars) + case *ast.DeleteStmt: + ResetDeleteStmtCtx(sc, stmt, vars) + case *ast.InsertStmt: + sc.InInsertStmt = true + // For insert statement (not for update statement), disabling the StrictSQLMode + // should make TruncateAsWarning and DividedByZeroAsWarning, + // but should not make DupKeyAsWarning. + sc.DupKeyAsWarning = stmt.IgnoreErr + sc.BadNullAsWarning = !vars.StrictSQLMode || stmt.IgnoreErr + // see https://dev.mysql.com/doc/refman/8.0/en/out-of-range-and-overflow.html + sc.OverflowAsWarning = !vars.StrictSQLMode || stmt.IgnoreErr + sc.IgnoreNoPartition = stmt.IgnoreErr + sc.ErrAutoincReadFailedAsWarning = stmt.IgnoreErr + sc.DividedByZeroAsWarning = !vars.StrictSQLMode || stmt.IgnoreErr + sc.Priority = stmt.Priority + sc.SetTypeFlags(sc.TypeFlags(). + WithTruncateAsWarning(!vars.StrictSQLMode || stmt.IgnoreErr). + WithIgnoreInvalidDateErr(vars.SQLMode.HasAllowInvalidDatesMode()). + WithIgnoreZeroInDate(!vars.SQLMode.HasNoZeroInDateMode() || + !vars.SQLMode.HasNoZeroDateMode() || !vars.StrictSQLMode || stmt.IgnoreErr || + vars.SQLMode.HasAllowInvalidDatesMode())) + case *ast.CreateTableStmt, *ast.AlterTableStmt: + sc.InCreateOrAlterStmt = true + sc.SetTypeFlags(sc.TypeFlags(). + WithTruncateAsWarning(!vars.StrictSQLMode). + WithIgnoreInvalidDateErr(vars.SQLMode.HasAllowInvalidDatesMode()). + WithIgnoreZeroInDate(!vars.SQLMode.HasNoZeroInDateMode() || !vars.StrictSQLMode || + vars.SQLMode.HasAllowInvalidDatesMode()). + WithIgnoreZeroDateErr(!vars.SQLMode.HasNoZeroDateMode() || !vars.StrictSQLMode)) + + case *ast.LoadDataStmt: + sc.InLoadDataStmt = true + // return warning instead of error when load data meet no partition for value + sc.IgnoreNoPartition = true + case *ast.SelectStmt: + sc.InSelectStmt = true + + // see https://dev.mysql.com/doc/refman/5.7/en/sql-mode.html#sql-mode-strict + // said "For statements such as SELECT that do not change data, invalid values + // generate a warning in strict mode, not an error." + // and https://dev.mysql.com/doc/refman/5.7/en/out-of-range-and-overflow.html + sc.OverflowAsWarning = true + + // Return warning for truncate error in selection. + sc.SetTypeFlags(sc.TypeFlags(). + WithTruncateAsWarning(true). + WithIgnoreZeroInDate(true). + WithIgnoreInvalidDateErr(vars.SQLMode.HasAllowInvalidDatesMode())) + if opts := stmt.SelectStmtOpts; opts != nil { + sc.Priority = opts.Priority + sc.NotFillCache = !opts.SQLCache + } + sc.WeakConsistency = isWeakConsistencyRead(ctx, stmt) + case *ast.SetOprStmt: + sc.InSelectStmt = true + sc.OverflowAsWarning = true + sc.SetTypeFlags(sc.TypeFlags(). + WithTruncateAsWarning(true). + WithIgnoreZeroInDate(true). + WithIgnoreInvalidDateErr(vars.SQLMode.HasAllowInvalidDatesMode())) + case *ast.ShowStmt: + sc.SetTypeFlags(sc.TypeFlags(). + WithIgnoreTruncateErr(true). + WithIgnoreZeroInDate(true). + WithIgnoreInvalidDateErr(vars.SQLMode.HasAllowInvalidDatesMode())) + if stmt.Tp == ast.ShowWarnings || stmt.Tp == ast.ShowErrors || stmt.Tp == ast.ShowSessionStates { + sc.InShowWarning = true + sc.SetWarnings(vars.StmtCtx.GetWarnings()) + } + case *ast.SplitRegionStmt: + sc.SetTypeFlags(sc.TypeFlags(). + WithIgnoreTruncateErr(false). + WithIgnoreZeroInDate(true). + WithIgnoreInvalidDateErr(vars.SQLMode.HasAllowInvalidDatesMode())) + case *ast.SetSessionStatesStmt: + sc.InSetSessionStatesStmt = true + sc.SetTypeFlags(sc.TypeFlags(). + WithIgnoreTruncateErr(true). + WithIgnoreZeroInDate(true). + WithIgnoreInvalidDateErr(vars.SQLMode.HasAllowInvalidDatesMode())) + default: + sc.SetTypeFlags(sc.TypeFlags(). + WithIgnoreTruncateErr(true). + WithIgnoreZeroInDate(true). + WithIgnoreInvalidDateErr(vars.SQLMode.HasAllowInvalidDatesMode())) + } + + sc.SetTypeFlags(sc.TypeFlags(). + WithSkipUTF8Check(vars.SkipUTF8Check). + WithSkipSACIICheck(vars.SkipASCIICheck). + WithSkipUTF8MB4Check(!globalConfig.Instance.CheckMb4ValueInUTF8.Load()). + // WithAllowNegativeToUnsigned with false value indicates values less than 0 should be clipped to 0 for unsigned integer types. + // This is the case for `insert`, `update`, `alter table`, `create table` and `load data infile` statements, when not in strict SQL mode. + // see https://dev.mysql.com/doc/refman/5.7/en/out-of-range-and-overflow.html + WithAllowNegativeToUnsigned(!sc.InInsertStmt && !sc.InLoadDataStmt && !sc.InUpdateStmt && !sc.InCreateOrAlterStmt), + ) + + vars.PlanCacheParams.Reset() + if priority := mysql.PriorityEnum(atomic.LoadInt32(&variable.ForcePriority)); priority != mysql.NoPriority { + sc.Priority = priority + } + if vars.StmtCtx.LastInsertID > 0 { + sc.PrevLastInsertID = vars.StmtCtx.LastInsertID + } else { + sc.PrevLastInsertID = vars.StmtCtx.PrevLastInsertID + } + sc.PrevAffectedRows = 0 + if vars.StmtCtx.InUpdateStmt || vars.StmtCtx.InDeleteStmt || vars.StmtCtx.InInsertStmt || vars.StmtCtx.InSetSessionStatesStmt { + sc.PrevAffectedRows = int64(vars.StmtCtx.AffectedRows()) + } else if vars.StmtCtx.InSelectStmt { + sc.PrevAffectedRows = -1 + } + if globalConfig.Instance.EnableCollectExecutionInfo.Load() { + // In ExplainFor case, RuntimeStatsColl should not be reset for reuse, + // because ExplainFor need to display the last statement information. + reuseObj := vars.StmtCtx.RuntimeStatsColl + if _, ok := s.(*ast.ExplainForStmt); ok { + reuseObj = nil + } + sc.RuntimeStatsColl = execdetails.NewRuntimeStatsColl(reuseObj) + } + + sc.TblInfo2UnionScan = make(map[*model.TableInfo]bool) + errCount, warnCount := vars.StmtCtx.NumErrorWarnings() + vars.SysErrorCount = errCount + vars.SysWarningCount = warnCount + vars.ExchangeChunkStatus() + vars.StmtCtx = sc + vars.PrevFoundInPlanCache = vars.FoundInPlanCache + vars.FoundInPlanCache = false + vars.ClearStmtVars() + vars.PrevFoundInBinding = vars.FoundInBinding + vars.FoundInBinding = false + vars.DurationWaitTS = 0 + vars.CurrInsertBatchExtraCols = nil + vars.CurrInsertValues = chunk.Row{} + + return +} + +// ResetUpdateStmtCtx resets statement context for UpdateStmt. +func ResetUpdateStmtCtx(sc *stmtctx.StatementContext, stmt *ast.UpdateStmt, vars *variable.SessionVars) { + sc.InUpdateStmt = true + sc.DupKeyAsWarning = stmt.IgnoreErr + sc.BadNullAsWarning = !vars.StrictSQLMode || stmt.IgnoreErr + sc.DividedByZeroAsWarning = !vars.StrictSQLMode || stmt.IgnoreErr + sc.Priority = stmt.Priority + sc.IgnoreNoPartition = stmt.IgnoreErr + sc.SetTypeFlags(sc.TypeFlags(). + WithTruncateAsWarning(!vars.StrictSQLMode || stmt.IgnoreErr). + WithIgnoreInvalidDateErr(vars.SQLMode.HasAllowInvalidDatesMode()). + WithIgnoreZeroInDate(!vars.SQLMode.HasNoZeroInDateMode() || !vars.SQLMode.HasNoZeroDateMode() || + !vars.StrictSQLMode || stmt.IgnoreErr || vars.SQLMode.HasAllowInvalidDatesMode())) +} + +// ResetDeleteStmtCtx resets statement context for DeleteStmt. +func ResetDeleteStmtCtx(sc *stmtctx.StatementContext, stmt *ast.DeleteStmt, vars *variable.SessionVars) { + sc.InDeleteStmt = true + sc.DupKeyAsWarning = stmt.IgnoreErr + sc.BadNullAsWarning = !vars.StrictSQLMode || stmt.IgnoreErr + sc.DividedByZeroAsWarning = !vars.StrictSQLMode || stmt.IgnoreErr + sc.Priority = stmt.Priority + sc.SetTypeFlags(sc.TypeFlags(). + WithTruncateAsWarning(!vars.StrictSQLMode || stmt.IgnoreErr). + WithIgnoreInvalidDateErr(vars.SQLMode.HasAllowInvalidDatesMode()). + WithIgnoreZeroInDate(!vars.SQLMode.HasNoZeroInDateMode() || !vars.SQLMode.HasNoZeroDateMode() || + !vars.StrictSQLMode || stmt.IgnoreErr || vars.SQLMode.HasAllowInvalidDatesMode())) +} + +func setOptionForTopSQL(sc *stmtctx.StatementContext, snapshot kv.Snapshot) { + if snapshot == nil { + return + } + snapshot.SetOption(kv.ResourceGroupTagger, sc.GetResourceGroupTagger()) + if sc.KvExecCounter != nil { + snapshot.SetOption(kv.RPCInterceptor, sc.KvExecCounter.RPCInterceptor()) + } +} + +func isWeakConsistencyRead(ctx sessionctx.Context, node ast.Node) bool { + sessionVars := ctx.GetSessionVars() + return sessionVars.ConnectionID > 0 && sessionVars.ReadConsistency.IsWeak() && + plannercore.IsAutoCommitTxn(ctx) && plannercore.IsReadOnly(node, sessionVars) +} + +// FastCheckTableExec represents a check table executor. +// It is built from the "admin check table" statement, and it checks if the +// index matches the records in the table. +// It uses a new algorithms to check table data, which is faster than the old one(CheckTableExec). +type FastCheckTableExec struct { + exec.BaseExecutor + + dbName string + table table.Table + indexInfos []*model.IndexInfo + done bool + is infoschema.InfoSchema + err *atomic.Pointer[error] + wg sync.WaitGroup + contextCtx context.Context +} + +// Open implements the Executor Open interface. +func (e *FastCheckTableExec) Open(ctx context.Context) error { + if err := e.BaseExecutor.Open(ctx); err != nil { + return err + } + + e.done = false + e.contextCtx = ctx + return nil +} + +type checkIndexTask struct { + indexOffset int +} + +type checkIndexWorker struct { + sctx sessionctx.Context + dbName string + table table.Table + indexInfos []*model.IndexInfo + e *FastCheckTableExec +} + +type groupByChecksum struct { + bucket uint64 + checksum uint64 + count int64 +} + +func getCheckSum(ctx context.Context, se sessionctx.Context, sql string) ([]groupByChecksum, error) { + ctx = kv.WithInternalSourceType(ctx, kv.InternalTxnAdmin) + rs, err := se.(sqlexec.SQLExecutor).ExecuteInternal(ctx, sql) + if err != nil { + return nil, err + } + defer func(rs sqlexec.RecordSet) { + err := rs.Close() + if err != nil { + logutil.BgLogger().Error("close record set failed", zap.Error(err)) + } + }(rs) + rows, err := sqlexec.DrainRecordSet(ctx, rs, 256) + if err != nil { + return nil, err + } + checksums := make([]groupByChecksum, 0, len(rows)) + for _, row := range rows { + checksums = append(checksums, groupByChecksum{bucket: row.GetUint64(1), checksum: row.GetUint64(0), count: row.GetInt64(2)}) + } + return checksums, nil +} + +func (w *checkIndexWorker) initSessCtx(se sessionctx.Context) (restore func()) { + sessVars := se.GetSessionVars() + originOptUseInvisibleIdx := sessVars.OptimizerUseInvisibleIndexes + originMemQuotaQuery := sessVars.MemQuotaQuery + + sessVars.OptimizerUseInvisibleIndexes = true + sessVars.MemQuotaQuery = w.sctx.GetSessionVars().MemQuotaQuery + return func() { + sessVars.OptimizerUseInvisibleIndexes = originOptUseInvisibleIdx + sessVars.MemQuotaQuery = originMemQuotaQuery + } +} + +// HandleTask implements the Worker interface. +func (w *checkIndexWorker) HandleTask(task checkIndexTask, _ func(workerpool.None)) { + defer w.e.wg.Done() + idxInfo := w.indexInfos[task.indexOffset] + bucketSize := int(CheckTableFastBucketSize.Load()) + + ctx := kv.WithInternalSourceType(w.e.contextCtx, kv.InternalTxnAdmin) + + trySaveErr := func(err error) { + w.e.err.CompareAndSwap(nil, &err) + } + + se, err := w.e.Base().GetSysSession() + if err != nil { + trySaveErr(err) + return + } + restoreCtx := w.initSessCtx(se) + defer func() { + restoreCtx() + w.e.Base().ReleaseSysSession(ctx, se) + }() + + var pkCols []string + var pkTypes []*types.FieldType + switch { + case w.e.table.Meta().IsCommonHandle: + pkColsInfo := w.e.table.Meta().GetPrimaryKey().Columns + for _, colInfo := range pkColsInfo { + colStr := colInfo.Name.O + pkCols = append(pkCols, colStr) + pkTypes = append(pkTypes, &w.e.table.Meta().Columns[colInfo.Offset].FieldType) + } + case w.e.table.Meta().PKIsHandle: + pkCols = append(pkCols, w.e.table.Meta().GetPkName().O) + default: // support decoding _tidb_rowid. + pkCols = append(pkCols, model.ExtraHandleName.O) + } + + // CheckSum of (handle + index columns). + var md5HandleAndIndexCol strings.Builder + md5HandleAndIndexCol.WriteString("crc32(md5(concat_ws(0x2, ") + for _, col := range pkCols { + md5HandleAndIndexCol.WriteString(ColumnName(col)) + md5HandleAndIndexCol.WriteString(", ") + } + for offset, col := range idxInfo.Columns { + tblCol := w.table.Meta().Columns[col.Offset] + if tblCol.IsGenerated() && !tblCol.GeneratedStored { + md5HandleAndIndexCol.WriteString(tblCol.GeneratedExprString) + } else { + md5HandleAndIndexCol.WriteString(ColumnName(col.Name.O)) + } + if offset != len(idxInfo.Columns)-1 { + md5HandleAndIndexCol.WriteString(", ") + } + } + md5HandleAndIndexCol.WriteString(")))") + + // Used to group by and order. + var md5Handle strings.Builder + md5Handle.WriteString("crc32(md5(concat_ws(0x2, ") + for i, col := range pkCols { + md5Handle.WriteString(ColumnName(col)) + if i != len(pkCols)-1 { + md5Handle.WriteString(", ") + } + } + md5Handle.WriteString(")))") + + handleColumnField := strings.Join(pkCols, ", ") + var indexColumnField strings.Builder + for offset, col := range idxInfo.Columns { + indexColumnField.WriteString(ColumnName(col.Name.O)) + if offset != len(idxInfo.Columns)-1 { + indexColumnField.WriteString(", ") + } + } + + tableRowCntToCheck := int64(0) + + offset := 0 + mod := 1 + meetError := false + + lookupCheckThreshold := int64(100) + checkOnce := false + + if w.e.Ctx().GetSessionVars().SnapshotTS != 0 { + se.GetSessionVars().SnapshotTS = w.e.Ctx().GetSessionVars().SnapshotTS + defer func() { + se.GetSessionVars().SnapshotTS = 0 + }() + } + _, err = se.(sqlexec.SQLExecutor).ExecuteInternal(ctx, "begin") + if err != nil { + trySaveErr(err) + return + } + + times := 0 + const maxTimes = 10 + for tableRowCntToCheck > lookupCheckThreshold || !checkOnce { + times++ + if times == maxTimes { + logutil.BgLogger().Warn("compare checksum by group reaches time limit", zap.Int("times", times)) + break + } + whereKey := fmt.Sprintf("((cast(%s as signed) - %d) %% %d)", md5Handle.String(), offset, mod) + groupByKey := fmt.Sprintf("((cast(%s as signed) - %d) div %d %% %d)", md5Handle.String(), offset, mod, bucketSize) + if !checkOnce { + whereKey = "0" + } + checkOnce = true + + tblQuery := fmt.Sprintf("select /*+ read_from_storage(tikv[%s]) */ bit_xor(%s), %s, count(*) from %s use index() where %s = 0 group by %s", TableName(w.e.dbName, w.e.table.Meta().Name.String()), md5HandleAndIndexCol.String(), groupByKey, TableName(w.e.dbName, w.e.table.Meta().Name.String()), whereKey, groupByKey) + idxQuery := fmt.Sprintf("select bit_xor(%s), %s, count(*) from %s use index(`%s`) where %s = 0 group by %s", md5HandleAndIndexCol.String(), groupByKey, TableName(w.e.dbName, w.e.table.Meta().Name.String()), idxInfo.Name, whereKey, groupByKey) + + logutil.BgLogger().Info("fast check table by group", zap.String("table name", w.table.Meta().Name.String()), zap.String("index name", idxInfo.Name.String()), zap.Int("times", times), zap.Int("current offset", offset), zap.Int("current mod", mod), zap.String("table sql", tblQuery), zap.String("index sql", idxQuery)) + + // compute table side checksum. + tableChecksum, err := getCheckSum(w.e.contextCtx, se, tblQuery) + if err != nil { + trySaveErr(err) + return + } + slices.SortFunc(tableChecksum, func(i, j groupByChecksum) int { + return cmp.Compare(i.bucket, j.bucket) + }) + + // compute index side checksum. + indexChecksum, err := getCheckSum(w.e.contextCtx, se, idxQuery) + if err != nil { + trySaveErr(err) + return + } + slices.SortFunc(indexChecksum, func(i, j groupByChecksum) int { + return cmp.Compare(i.bucket, j.bucket) + }) + + currentOffset := 0 + + // Every checksum in table side should be the same as the index side. + i := 0 + for i < len(tableChecksum) && i < len(indexChecksum) { + if tableChecksum[i].bucket != indexChecksum[i].bucket || tableChecksum[i].checksum != indexChecksum[i].checksum { + if tableChecksum[i].bucket <= indexChecksum[i].bucket { + currentOffset = int(tableChecksum[i].bucket) + tableRowCntToCheck = tableChecksum[i].count + } else { + currentOffset = int(indexChecksum[i].bucket) + tableRowCntToCheck = indexChecksum[i].count + } + meetError = true + break + } + i++ + } + + if !meetError && i < len(indexChecksum) && i == len(tableChecksum) { + // Table side has fewer buckets. + currentOffset = int(indexChecksum[i].bucket) + tableRowCntToCheck = indexChecksum[i].count + meetError = true + } else if !meetError && i < len(tableChecksum) && i == len(indexChecksum) { + // Index side has fewer buckets. + currentOffset = int(tableChecksum[i].bucket) + tableRowCntToCheck = tableChecksum[i].count + meetError = true + } + + if !meetError { + if times != 1 { + logutil.BgLogger().Error("unexpected result, no error detected in this round, but an error is detected in the previous round", zap.Int("times", times), zap.Int("offset", offset), zap.Int("mod", mod)) + } + break + } + + offset += currentOffset * mod + mod *= bucketSize + } + + queryToRow := func(se sessionctx.Context, sql string) ([]chunk.Row, error) { + rs, err := se.(sqlexec.SQLExecutor).ExecuteInternal(ctx, sql) + if err != nil { + return nil, err + } + row, err := sqlexec.DrainRecordSet(ctx, rs, 4096) + if err != nil { + return nil, err + } + err = rs.Close() + if err != nil { + logutil.BgLogger().Warn("close result set failed", zap.Error(err)) + } + return row, nil + } + + if meetError { + groupByKey := fmt.Sprintf("((cast(%s as signed) - %d) %% %d)", md5Handle.String(), offset, mod) + indexSQL := fmt.Sprintf("select %s, %s, %s from %s use index(`%s`) where %s = 0 order by %s", handleColumnField, indexColumnField.String(), md5HandleAndIndexCol.String(), TableName(w.e.dbName, w.e.table.Meta().Name.String()), idxInfo.Name, groupByKey, handleColumnField) + tableSQL := fmt.Sprintf("select /*+ read_from_storage(tikv[%s]) */ %s, %s, %s from %s use index() where %s = 0 order by %s", TableName(w.e.dbName, w.e.table.Meta().Name.String()), handleColumnField, indexColumnField.String(), md5HandleAndIndexCol.String(), TableName(w.e.dbName, w.e.table.Meta().Name.String()), groupByKey, handleColumnField) + + idxRow, err := queryToRow(se, indexSQL) + if err != nil { + trySaveErr(err) + return + } + tblRow, err := queryToRow(se, tableSQL) + if err != nil { + trySaveErr(err) + return + } + + errCtx := w.sctx.GetSessionVars().StmtCtx.ErrCtx() + getHandleFromRow := func(row chunk.Row) (kv.Handle, error) { + handleDatum := make([]types.Datum, 0) + for i, t := range pkTypes { + handleDatum = append(handleDatum, row.GetDatum(i, t)) + } + if w.table.Meta().IsCommonHandle { + handleBytes, err := codec.EncodeKey(w.sctx.GetSessionVars().StmtCtx.TimeZone(), nil, handleDatum...) + err = errCtx.HandleError(err) + if err != nil { + return nil, err + } + return kv.NewCommonHandle(handleBytes) + } + return kv.IntHandle(row.GetInt64(0)), nil + } + getValueFromRow := func(row chunk.Row) ([]types.Datum, error) { + valueDatum := make([]types.Datum, 0) + for i, t := range idxInfo.Columns { + valueDatum = append(valueDatum, row.GetDatum(i+len(pkCols), &w.table.Meta().Columns[t.Offset].FieldType)) + } + return valueDatum, nil + } + + ir := func() *consistency.Reporter { + return &consistency.Reporter{ + HandleEncode: func(handle kv.Handle) kv.Key { + return tablecodec.EncodeRecordKey(w.table.RecordPrefix(), handle) + }, + IndexEncode: func(idxRow *consistency.RecordData) kv.Key { + var idx table.Index + for _, v := range w.table.Indices() { + if strings.EqualFold(v.Meta().Name.String(), idxInfo.Name.O) { + idx = v + break + } + } + if idx == nil { + return nil + } + k, _, err := idx.GenIndexKey(w.sctx.GetSessionVars().StmtCtx, idxRow.Values[:len(idx.Meta().Columns)], idxRow.Handle, nil) + if err != nil { + return nil + } + return k + }, + Tbl: w.table.Meta(), + Idx: idxInfo, + Sctx: w.sctx, + } + } + + getCheckSum := func(row chunk.Row) uint64 { + return row.GetUint64(len(pkCols) + len(idxInfo.Columns)) + } + + var handle kv.Handle + var tableRecord *consistency.RecordData + var lastTableRecord *consistency.RecordData + var indexRecord *consistency.RecordData + i := 0 + for i < len(tblRow) || i < len(idxRow) { + if i == len(tblRow) { + // No more rows in table side. + tableRecord = nil + } else { + handle, err = getHandleFromRow(tblRow[i]) + if err != nil { + trySaveErr(err) + return + } + value, err := getValueFromRow(tblRow[i]) + if err != nil { + trySaveErr(err) + return + } + tableRecord = &consistency.RecordData{Handle: handle, Values: value} + } + if i == len(idxRow) { + // No more rows in index side. + indexRecord = nil + } else { + indexHandle, err := getHandleFromRow(idxRow[i]) + if err != nil { + trySaveErr(err) + return + } + indexValue, err := getValueFromRow(idxRow[i]) + if err != nil { + trySaveErr(err) + return + } + indexRecord = &consistency.RecordData{Handle: indexHandle, Values: indexValue} + } + + if tableRecord == nil { + if lastTableRecord != nil && lastTableRecord.Handle.Equal(indexRecord.Handle) { + tableRecord = lastTableRecord + } + err = ir().ReportAdminCheckInconsistent(w.e.contextCtx, indexRecord.Handle, indexRecord, tableRecord) + } else if indexRecord == nil { + err = ir().ReportAdminCheckInconsistent(w.e.contextCtx, tableRecord.Handle, indexRecord, tableRecord) + } else if tableRecord.Handle.Equal(indexRecord.Handle) && getCheckSum(tblRow[i]) != getCheckSum(idxRow[i]) { + err = ir().ReportAdminCheckInconsistent(w.e.contextCtx, tableRecord.Handle, indexRecord, tableRecord) + } else if !tableRecord.Handle.Equal(indexRecord.Handle) { + if tableRecord.Handle.Compare(indexRecord.Handle) < 0 { + err = ir().ReportAdminCheckInconsistent(w.e.contextCtx, tableRecord.Handle, nil, tableRecord) + } else { + if lastTableRecord != nil && lastTableRecord.Handle.Equal(indexRecord.Handle) { + err = ir().ReportAdminCheckInconsistent(w.e.contextCtx, indexRecord.Handle, indexRecord, lastTableRecord) + } else { + err = ir().ReportAdminCheckInconsistent(w.e.contextCtx, indexRecord.Handle, indexRecord, nil) + } + } + } + if err != nil { + trySaveErr(err) + return + } + i++ + if tableRecord != nil { + lastTableRecord = &consistency.RecordData{Handle: tableRecord.Handle, Values: tableRecord.Values} + } else { + lastTableRecord = nil + } + } + } +} + +// Close implements the Worker interface. +func (*checkIndexWorker) Close() {} + +func (e *FastCheckTableExec) createWorker() workerpool.Worker[checkIndexTask, workerpool.None] { + return &checkIndexWorker{sctx: e.Ctx(), dbName: e.dbName, table: e.table, indexInfos: e.indexInfos, e: e} +} + +// Next implements the Executor Next interface. +func (e *FastCheckTableExec) Next(ctx context.Context, _ *chunk.Chunk) error { + if e.done || len(e.indexInfos) == 0 { + return nil + } + defer func() { e.done = true }() + + // Here we need check all indexes, includes invisible index + e.Ctx().GetSessionVars().OptimizerUseInvisibleIndexes = true + defer func() { + e.Ctx().GetSessionVars().OptimizerUseInvisibleIndexes = false + }() + + workerPool := workerpool.NewWorkerPool[checkIndexTask]("checkIndex", + poolutil.CheckTable, 3, e.createWorker) + workerPool.Start(ctx) + + e.wg.Add(len(e.indexInfos)) + for i := range e.indexInfos { + workerPool.AddTask(checkIndexTask{indexOffset: i}) + } + + e.wg.Wait() + workerPool.ReleaseAndWait() + + p := e.err.Load() + if p == nil { + return nil + } + return *p +} + +// TableName returns `schema`.`table` +func TableName(schema, table string) string { + return fmt.Sprintf("`%s`.`%s`", escapeName(schema), escapeName(table)) +} + +// ColumnName returns `column` +func ColumnName(column string) string { + return fmt.Sprintf("`%s`", escapeName(column)) +} + +func escapeName(name string) string { + return strings.ReplaceAll(name, "`", "``") +} + +// AdminShowBDRRoleExec represents a show BDR role executor. +type AdminShowBDRRoleExec struct { + exec.BaseExecutor + + done bool +} + +// Next implements the Executor Next interface. +func (e *AdminShowBDRRoleExec) Next(ctx context.Context, req *chunk.Chunk) error { + req.Reset() + if e.done { + return nil + } + + return kv.RunInNewTxn(kv.WithInternalSourceType(ctx, kv.InternalTxnAdmin), e.Ctx().GetStore(), true, func(ctx context.Context, txn kv.Transaction) error { + role, err := meta.NewMeta(txn).GetBDRRole() + if err != nil { + return err + } + + if role == "" { + role = string(ast.BDRRoleNone) + } + + req.AppendString(0, role) + e.done = true + return nil + }) +} diff --git a/tests/integrationtest/r/executor/issues.result b/tests/integrationtest/r/executor/issues.result new file mode 100644 index 0000000000000..7a564b0b06692 --- /dev/null +++ b/tests/integrationtest/r/executor/issues.result @@ -0,0 +1,852 @@ +drop table if exists t_issue_23993; +create table t_issue_23993(a double); +insert into t_issue_23993 values(-790822912); +select cast(a as time) from t_issue_23993; +cast(a as time) +NULL +select a from t_issue_23993 where cast(a as time); +a +drop table if exists t_issue_23993; +create table t_issue_23993(a int); +insert into t_issue_23993 values(-790822912); +select cast(a as time) from t_issue_23993; +cast(a as time) +NULL +select a from t_issue_23993 where cast(a as time); +a +drop table if exists t_issue_23993; +create table t_issue_23993(a decimal); +insert into t_issue_23993 values(-790822912); +select cast(a as time) from t_issue_23993; +cast(a as time) +NULL +select a from t_issue_23993 where cast(a as time); +a +drop table if exists t_issue_23993; +create table t_issue_23993(a varchar(255)); +insert into t_issue_23993 values('-790822912'); +select cast(a as time) from t_issue_23993; +cast(a as time) +-838:59:59 +select a from t_issue_23993 where cast(a as time); +a +-790822912 +SELECT HEX(WEIGHT_STRING('ab' AS BINARY(1000000000000000000))); +HEX(WEIGHT_STRING('ab' AS BINARY(1000000000000000000))) +NULL +Level Code Message +Warning 1301 Result of cast_as_binary() was larger than max_allowed_packet (67108864) - truncated +SELECT HEX(WEIGHT_STRING('ab' AS char(1000000000000000000))); +HEX(WEIGHT_STRING('ab' AS char(1000000000000000000))) +NULL +Level Code Message +Warning 1301 Result of weight_string() was larger than max_allowed_packet (67108864) - truncated +drop table if exists m, mp; +CREATE TABLE m ( +mid varchar(50) NOT NULL, +ParentId varchar(50) DEFAULT NULL, +PRIMARY KEY (mid), +KEY ind_bm_parent (ParentId,mid) +); +CREATE TABLE mp ( +mpid bigint(20) unsigned NOT NULL DEFAULT '0', +mid varchar(50) DEFAULT NULL COMMENT '模块主键', +sid int, +PRIMARY KEY (mpid) +); +insert into mp values("1","1","0"); +insert into m values("0", "0"); +SELECT ( SELECT COUNT(1) FROM m WHERE ParentId = c.mid ) expand, bmp.mpid, bmp.mpid IS NULL,bmp.mpid IS NOT NULL, sid FROM m c LEFT JOIN mp bmp ON c.mid = bmp.mid WHERE c.ParentId = '0'; +expand mpid bmp.mpid IS NULL bmp.mpid IS NOT NULL sid +1 NULL 1 0 NULL +SELECT bmp.mpid, bmp.mpid IS NULL,bmp.mpid IS NOT NULL FROM m c LEFT JOIN mp bmp ON c.mid = bmp.mid WHERE c.ParentId = '0'; +mpid bmp.mpid IS NULL bmp.mpid IS NOT NULL +NULL 1 0 +drop table if exists t1; +CREATE TABLE `t1` ( +`a` timestamp NULL DEFAULT NULL, +`b` year(4) DEFAULT NULL, +KEY `a` (`a`), +KEY `b` (`b`) +); +insert into t1 values("2002-10-03 04:28:53",2000), ("2002-10-03 04:28:53",2002), (NULL, 2002); +select /*+ inl_join (x,y) */ * from t1 x cross join t1 y on x.a=y.b; +a b a b +select * from t1 x cross join t1 y on x.a>y.b order by x.a, x.b, y.a, y.b; +a b a b +2002-10-03 04:28:53 2000 NULL 2002 +2002-10-03 04:28:53 2000 2002-10-03 04:28:53 2000 +2002-10-03 04:28:53 2000 2002-10-03 04:28:53 2002 +2002-10-03 04:28:53 2002 NULL 2002 +2002-10-03 04:28:53 2002 2002-10-03 04:28:53 2000 +2002-10-03 04:28:53 2002 2002-10-03 04:28:53 2002 +select * from t1 where a = b; +a b +select * from t1 where a < b; +a b +drop table if exists t; +create table t(a int) partition by hash (a div 0) partitions 10; +insert into t values (NULL); +select null div 0; +null div 0 +NULL +select * from t; +a +NULL +drop table if exists t; +CREATE TABLE t ( +a varchar(8) DEFAULT NULL, +b varchar(8) DEFAULT NULL, +c decimal(20,2) DEFAULT NULL, +d decimal(15,8) DEFAULT NULL +); +insert into t values(20210606, 20210606, 50000.00, 5.04600000); +select a * c *(d/36000) from t; +a * c *(d/36000) +141642663.71666598 +select cast(a as double) * cast(c as double) *cast(d/36000 as double) from t; +cast(a as double) * cast(c as double) *cast(d/36000 as double) +141642663.71666598 +select 20210606*50000.00*(5.04600000/36000); +20210606*50000.00*(5.04600000/36000) +141642663.71666599297980 +select "20210606"*50000.00*(5.04600000/36000); +"20210606"*50000.00*(5.04600000/36000) +141642663.71666598 +select cast("20210606" as double)*50000.00*(5.04600000/36000); +cast("20210606" as double)*50000.00*(5.04600000/36000) +141642663.71666598 +drop table if exists t1, t2; +create table t1(a int, b varchar(8)); +insert into t1 values(1,'1'); +create table t2(a int , b varchar(8) GENERATED ALWAYS AS (c) VIRTUAL, c varchar(8), PRIMARY KEY (a)); +insert into t2(a) values(1); +select /*+ tidb_inlj(t2) */ t2.b, t1.b from t1 join t2 ON t2.a=t1.a; +b b +NULL 1 +drop table if exists t; +CREATE TABLE t (a bigint unsigned PRIMARY KEY); +INSERT INTO t VALUES (0),(1),(2),(3),(18446744073709551600),(18446744073709551605),(18446744073709551610),(18446744073709551615); +ANALYZE TABLE t; +EXPLAIN FORMAT = 'brief' SELECT a FROM t WHERE a >= 0x1 AND a <= 0x2; +id estRows task access object operator info +TableReader 2.00 root data:TableRangeScan +└─TableRangeScan 2.00 cop[tikv] table:t range:[1,2], keep order:false +EXPLAIN FORMAT = 'brief' SELECT a FROM t WHERE a BETWEEN 0x1 AND 0x2; +id estRows task access object operator info +TableReader 2.00 root data:TableRangeScan +└─TableRangeScan 2.00 cop[tikv] table:t range:[1,2], keep order:false +SELECT a FROM t WHERE a BETWEEN 0xFFFFFFFFFFFFFFF5 AND X'FFFFFFFFFFFFFFFA'; +a +18446744073709551605 +18446744073709551610 +set @@tidb_enable_vectorized_expression=true; +select trim(leading from " a "), trim(both from " a "), trim(trailing from " a "); +trim(leading from " a ") trim(both from " a ") trim(trailing from " a ") +a a a +select trim(leading null from " a "), trim(both null from " a "), trim(trailing null from " a "); +trim(leading null from " a ") trim(both null from " a ") trim(trailing null from " a ") +NULL NULL NULL +select trim(null from " a "); +trim(null from " a ") +NULL +set @@tidb_enable_vectorized_expression=false; +select trim(leading from " a "), trim(both from " a "), trim(trailing from " a "); +trim(leading from " a ") trim(both from " a ") trim(trailing from " a ") +a a a +select trim(leading null from " a "), trim(both null from " a "), trim(trailing null from " a "); +trim(leading null from " a ") trim(both null from " a ") trim(trailing null from " a ") +NULL NULL NULL +select trim(null from " a "); +trim(null from " a ") +NULL +set tidb_enable_vectorized_expression=default; +drop table if exists t29142_1; +drop table if exists t29142_2; +create table t29142_1(a int); +create table t29142_2(a double); +insert into t29142_1 value(20); +select sum(distinct a) as x from t29142_1 having x > some ( select a from t29142_2 where x in (a)); +x +drop table if exists e; +create table e (e enum('a', 'b')); +insert into e values ('a'), ('b'); +select * from e where case 1 when 0 then e end; +e +select * from e where case 1 when 1 then e end; +e +a +b +select * from e where case e when 1 then e end; +e +a +select * from e where case 1 when e then e end; +e +a +drop table if exists t; +create table t (en enum('c', 'b', 'a')); +insert into t values ('a'), ('b'), ('c'); +select max(en) from t; +max(en) +c +select min(en) from t; +min(en) +a +select * from t order by en; +en +c +b +a +drop table t; +create table t(s set('c', 'b', 'a')); +insert into t values ('a'), ('b'), ('c'); +select max(s) from t; +max(s) +c +select min(s) from t; +min(s) +a +drop table t; +create table t(id int, en enum('c', 'b', 'a')); +insert into t values (1, 'a'),(2, 'b'), (3, 'c'), (1, 'c'); +select id, max(en) from t where id=1 group by id; +id max(en) +1 c +select id, min(en) from t where id=1 group by id; +id min(en) +1 a +drop table t; +create table t(id int, s set('c', 'b', 'a')); +insert into t values (1, 'a'),(2, 'b'), (3, 'c'), (1, 'c'); +select id, max(s) from t where id=1 group by id; +id max(s) +1 c +select id, min(s) from t where id=1 group by id; +id min(s) +1 a +drop table t; +create table t(e enum('e','d','c','b','a')); +insert into t values ('e'),('d'),('c'),('b'),('a'); +select * from t order by e limit 1; +e +e +drop table t; +create table t(s set('e', 'd', 'c', 'b', 'a')); +insert into t values ('e'),('d'),('c'),('b'),('a'); +select * from t order by s limit 1; +s +e +drop table t; +select distinct 0.7544678906163867 / 0.68234634; +0.7544678906163867 / 0.68234634 +1.10569639842486251190 +drop table if exists t_issue_22231; +create table t_issue_22231(a datetime); +insert into t_issue_22231 values('2020--05-20 01:22:12'); +select * from t_issue_22231 where a >= '2020-05-13 00:00:00 00:00:00' and a <= '2020-05-28 23:59:59 00:00:00'; +a +2020-05-20 01:22:12 +Level Code Message +Warning 1292 Truncated incorrect datetime value: '2020-05-13 00:00:00 00:00:00' +Warning 1292 Truncated incorrect datetime value: '2020-05-28 23:59:59 00:00:00' +select cast('2020-10-22 10:31-10:12' as datetime); +cast('2020-10-22 10:31-10:12' as datetime) +2020-10-22 10:31:10 +Level Code Message +Warning 1292 Truncated incorrect datetime value: '2020-10-22 10:31-10:12' +select cast('2020-05-28 23:59:59 00:00:00' as datetime); +cast('2020-05-28 23:59:59 00:00:00' as datetime) +2020-05-28 23:59:59 +Level Code Message +Warning 1292 Truncated incorrect datetime value: '2020-05-28 23:59:59 00:00:00' +SELECT CAST("1111111111-" AS DATE); +CAST("1111111111-" AS DATE) +NULL +Level Code Message +Warning 1292 Incorrect datetime value: '1111111111-' +drop table if exists t; +create table t ( +create_at datetime NOT NULL DEFAULT '1000-01-01 00:00:00', +finish_at datetime NOT NULL DEFAULT '1000-01-01 00:00:00'); +insert into t values ('2016-02-13 15:32:24', '2016-02-11 17:23:22'); +select timediff(finish_at, create_at) from t; +timediff(finish_at, create_at) +-46:09:02 +drop table if exists t1, t2; +create table t1 (c1 int); +create table t2 (c2 int); +insert into t1 values (1); +insert into t2 values (2); +update t1, t2 set t1.c1 = 2, t2.c2 = 1; +update t1, t2 set c1 = 2, c2 = 1; +update t1 as a, t2 as b set a.c1 = 2, b.c2 = 1; +SELECT * FROM t1; +c1 +2 +SELECT * FROM t2; +c2 +1 +update t1 as a, t2 as t1 set a.c1 = 1, t1.c2 = 2; +SELECT * FROM t1; +c1 +1 +SELECT * FROM t2; +c2 +2 +update t1 as a, t2 set t1.c1 = 10; +Error 1054 (42S22): Unknown column 'c1' in 'field list' +drop table if exists t1, t2; +create table t1 (a int); +create table t2 (a int); +insert into t1 values(1); +insert into t2 values(1); +select tbl1.* from (select t1.a, 1 from t1) tbl1 left join t2 tbl2 on tbl1.a = tbl2.a order by tbl1.a desc limit 1; +a 1 +1 1 +create database executor__issues2; +use executor__issues2; +create table t(a int); +insert into t values(1); +use executor__issues; +drop table if exists t; +create table t(a int); +insert into t values(1); +update t, executor__issues2.t set executor__issues2.t.a=2; +select * from t; +a +1 +select * from executor__issues2.t; +a +2 +update executor__issues.t, executor__issues2.t set executor__issues.t.a=3; +select * from t; +a +3 +select * from executor__issues2.t; +a +2 +drop database executor__issues2; +set @@profiling=1; +SELECT QUERY_ID, SUM(DURATION) AS SUM_DURATION FROM INFORMATION_SCHEMA.PROFILING GROUP BY QUERY_ID; +QUERY_ID SUM_DURATION +0 0 +drop table if exists t; +create table t(a char); +insert into t value('a'); +select * from t where a < 1 order by a limit 0; +a +drop table if exists t; +create table t (a float); +create index a on t(a); +insert into t values (1.0), (NULL), (0), (2.0); +select `a` from `t` use index (a) where !`a`; +a +0 +select `a` from `t` ignore index (a) where !`a`; +a +0 +select `a` from `t` use index (a) where `a`; +a +1 +2 +select `a` from `t` ignore index (a) where `a`; +a +1 +2 +select a from t use index (a) where not a is true; +a +NULL +0 +select a from t use index (a) where not not a is true; +a +1 +2 +select a from t use index (a) where not not a; +a +1 +2 +select a from t use index (a) where not not not a is true; +a +NULL +0 +select a from t use index (a) where not not not a; +a +0 +drop table if exists t1, t2; +create table t1 (c decimal); +create table t2 (c decimal, key(c)); +insert into t1 values (null); +insert into t2 values (null); +select count(*) from t1 where not c; +count(*) +0 +select count(*) from t2 where not c; +count(*) +0 +select count(*) from t1 where c; +count(*) +0 +select count(*) from t2 where c; +count(*) +0 +drop table if exists t; +create table t (a timestamp); +insert into t values ("1970-07-23 10:04:59"), ("2038-01-19 03:14:07"); +select * from t where date_sub(a, interval 10 month) = date_sub("1970-07-23 10:04:59", interval 10 month); +a +1970-07-23 10:04:59 +select * from t where timestampadd(hour, 1, a ) = timestampadd(hour, 1, "2038-01-19 03:14:07"); +a +2038-01-19 03:14:07 +drop table if exists tt; +create table tt(a decimal(10, 0), b varchar(1), c time); +insert into tt values(0, '2', null), (7, null, '1122'), (NULL, 'w', null), (NULL, '2', '3344'), (NULL, NULL, '0'), (7, 'f', '33'); +select a and b as d, a or c as e from tt; +d e +0 NULL +NULL 1 +0 NULL +NULL 1 +NULL NULL +0 1 +drop table if exists tt; +create table tt(a decimal(10, 0), b varchar(1), c time); +insert into tt values(0, '2', '123'), (7, null, '1122'), (null, 'w', null); +select a and b as d, a, b from tt order by d limit 1; +d a b +NULL 7 NULL +select b or c as d, b, c from tt order by d limit 1; +d b c +NULL w NULL +drop table if exists t0; +CREATE TABLE t0(c0 FLOAT); +INSERT INTO t0(c0) VALUES (NULL); +SELECT * FROM t0 WHERE NOT(0 OR t0.c0); +c0 +drop table if exists t; +create table t(a int, b char); +insert into t values (1,'s'),(2,'b'),(1,'c'),(2,'e'),(1,'a'); +insert into t select * from t; +insert into t select * from t; +insert into t select * from t; +select b, count(*) from ( select b from t order by a limit 20 offset 2) as s group by b order by b; +b count(*) +a 6 +c 7 +s 7 +drop table if exists t0; +CREATE TABLE t0(c0 NUMERIC PRIMARY KEY); +INSERT IGNORE INTO t0(c0) VALUES (NULL); +SELECT * FROM t0 WHERE c0; +c0 +drop table if exists t; +CREATE TABLE `t` ( `a` enum('WAITING','PRINTED','STOCKUP','CHECKED','OUTSTOCK','PICKEDUP','WILLBACK','BACKED') DEFAULT NULL); +insert into t values(1),(2),(3),(4),(5),(6),(7); +insert into t select * from t; +insert into t select * from t; +insert into t select * from t; +insert into t select * from t; +insert into t select * from t; +insert into t select * from t; +insert into t select * from t; +set @@tidb_max_chunk_size=100; +select distinct a from t order by a; +a +WAITING +PRINTED +STOCKUP +CHECKED +OUTSTOCK +PICKEDUP +WILLBACK +drop table t; +CREATE TABLE `t` ( `a` set('WAITING','PRINTED','STOCKUP','CHECKED','OUTSTOCK','PICKEDUP','WILLBACK','BACKED') DEFAULT NULL); +insert into t values(1),(2),(3),(4),(5),(6),(7); +insert into t select * from t; +insert into t select * from t; +insert into t select * from t; +insert into t select * from t; +insert into t select * from t; +insert into t select * from t; +insert into t select * from t; +set @@tidb_max_chunk_size=100; +select distinct a from t order by a; +a +WAITING +PRINTED +WAITING,PRINTED +STOCKUP +WAITING,STOCKUP +PRINTED,STOCKUP +WAITING,PRINTED,STOCKUP +set @@tidb_max_chunk_size=default; +drop table if exists t2; +create table t2 (a year(4)); +insert into t2 values(69); +select * from t2 where a <= 69; +a +2069 +drop table if exists t3; +CREATE TABLE `t3` (`y` year DEFAULT NULL, `a` int DEFAULT NULL); +INSERT INTO `t3` VALUES (2069, 70), (2010, 11), (2155, 2156), (2069, 69); +SELECT * FROM `t3` where y <= a; +y a +2155 2156 +drop table if exists t3; +create table t3 (a year); +insert into t3 values (1991), ("1992"), ("93"), (94); +select * from t3 where a >= NULL; +a +drop table if exists t; +CREATE TABLE `t` (`id` int(11) DEFAULT NULL, `tp_bigint` bigint(20) DEFAULT NULL ); +insert into t values(0,1),(1,9215570218099803537); +select A.tp_bigint,B.id from t A join t B on A.id < B.id * 16 where A.tp_bigint = B.id; +tp_bigint id +1 1 +drop table if exists t0; +create table t0 (c0 double); +insert into t0 values (1e30); +update t0 set c0=0 where t0.c0 like 0; +select count(*) from t0 where c0 = 0; +count(*) +0 +drop table if exists t; +create table t (a year); +insert into t values(0); +select cast(a as char) from t; +cast(a as char) +0000 +SELECT TIMESTAMP '9999-01-01 00:00:00'; +TIMESTAMP '9999-01-01 00:00:00' +9999-01-01 00:00:00 +drop table if exists ta; +create table ta(id decimal(60,2)); +insert into ta values (JSON_EXTRACT('{"c": "1234567890123456789012345678901234567890123456789012345"}', '$.c')); +select * from ta; +id +1234567890123456789012345678901234567890123456789012345.00 +drop table if exists t1; +create table t1 (f1 json); +insert into t1(f1) values ('"asd"'),('"asdf"'),('"asasas"'); +select f1 from t1 where json_extract(f1,"$") in ("asd","asasas","asdf"); +f1 +"asd" +"asdf" +"asasas" +select f1 from t1 where json_extract(f1, '$') = 'asd'; +f1 +"asd" +select f1 from t1 where case json_extract(f1,"$") when "asd" then 1 else 0 end; +f1 +"asd" +delete from t1; +insert into t1 values ('{"a": 1}'); +select f1 from t1 where f1 in ('{"a": 1}', 'asdf', 'asdf'); +f1 +select f1 from t1 where f1 in (cast('{"a": 1}' as JSON), 'asdf', 'asdf'); +f1 +{"a": 1} +select json_extract('"asd"', '$') = 'asd'; +json_extract('"asd"', '$') = 'asd' +1 +select json_extract('"asd"', '$') <=> 'asd'; +json_extract('"asd"', '$') <=> 'asd' +1 +select json_extract('"asd"', '$') <> 'asd'; +json_extract('"asd"', '$') <> 'asd' +0 +select json_extract('{"f": 1.0}', '$.f') = 1.0; +json_extract('{"f": 1.0}', '$.f') = 1.0 +1 +select json_extract('{"f": 1.0}', '$.f') = '1.0'; +json_extract('{"f": 1.0}', '$.f') = '1.0' +0 +select json_extract('{"n": 1}', '$') = '{"n": 1}'; +json_extract('{"n": 1}', '$') = '{"n": 1}' +0 +select json_extract('{"n": 1}', '$') <> '{"n": 1}'; +json_extract('{"n": 1}', '$') <> '{"n": 1}' +1 +drop table if exists t; +create table t (a int, b int); +insert into t values (2, 20), (1, 10), (3, 30); +select a + 1 as field1, a as field2 from t order by field1, field2 limit 2; +field1 field2 +2 1 +3 2 +drop table if exists t; +create table t (c int auto_increment, key(c)) auto_id_cache 1; +insert into t values(); +insert into t values(); +select * from t; +c +1 +2 +drop table if exists test; +create table test(id float primary key clustered AUTO_INCREMENT, col1 int); +replace into test(col1) values(1); +replace into test(col1) values(2); +select * from test; +id col1 +1 1 +2 2 +drop table test; +drop table if exists test; +create table test(id float primary key nonclustered AUTO_INCREMENT, col1 int) AUTO_ID_CACHE 1; +replace into test(col1) values(1); +replace into test(col1) values(2); +select * from test; +id col1 +1 1 +2 2 +drop table test; +create table test2(id double primary key clustered AUTO_INCREMENT, col1 int); +replace into test2(col1) values(1); +insert into test2(col1) values(1); +replace into test2(col1) values(1); +insert into test2(col1) values(1); +replace into test2(col1) values(1); +replace into test2(col1) values(1); +select * from test2; +id col1 +1 1 +2 1 +3 1 +4 1 +5 1 +6 1 +drop table test2; +create table test2(id double primary key nonclustered AUTO_INCREMENT, col1 int) AUTO_ID_CACHE 1; +replace into test2(col1) values(1); +insert into test2(col1) values(1); +replace into test2(col1) values(1); +insert into test2(col1) values(1); +replace into test2(col1) values(1); +replace into test2(col1) values(1); +select * from test2; +id col1 +1 1 +2 1 +3 1 +4 1 +5 1 +6 1 +drop table test2; +drop table if exists t1; +CREATE TABLE t1 ( +c_int int(11) NOT NULL, +c_str varbinary(40) NOT NULL, +c_datetime datetime DEFAULT NULL, +c_timestamp timestamp NULL DEFAULT NULL, +c_double double DEFAULT NULL, +c_decimal decimal(12,6) DEFAULT NULL, +c_enum enum('blue','green','red','yellow','white','orange','purple') DEFAULT NULL, +PRIMARY KEY (c_int,c_str) /*T![clustered_index] CLUSTERED */, +KEY c_int_2 (c_int), +KEY c_decimal (c_decimal), +KEY c_datetime (c_datetime) +) ENGINE=InnoDB DEFAULT CHARSET=utf8mb4 COLLATE=utf8mb4_bin +PARTITION BY LIST COLUMNS(c_int) +(PARTITION p0 VALUES IN (1,5,9,13,17,21,25,29,33,37), +PARTITION p1 VALUES IN (2,6,10,14,18,22,26,30,34,38), +PARTITION p2 VALUES IN (3,7,11,15,19,23,27,31,35,39), +PARTITION p3 VALUES IN (4,8,12,16,20,24,28,32,36,40)); +INSERT INTO t1 VALUES (3,'bold goldberg','2020-01-07 12:08:19','2020-06-19 08:13:35',0.941002,5.303000,'yellow'),(1,'crazy wescoff','2020-03-24 21:51:02','2020-06-19 08:13:35',47.565275,6.313000,'orange'),(5,'relaxed gagarin','2020-05-20 11:36:26','2020-06-19 08:13:35',38.948617,3.143000,'green'),(9,'gifted vaughan','2020-04-09 16:19:45','2020-06-19 08:13:35',95.922976,8.708000,'yellow'),(2,'focused taussig','2020-05-17 17:58:34','2020-06-19 08:13:35',4.137803,4.902000,'white'),(6,'fervent yonath','2020-05-26 03:55:25','2020-06-19 08:13:35',72.394272,6.491000,'white'),(18,'mystifying bhaskara','2020-02-19 10:41:48','2020-06-19 08:13:35',10.832397,9.707000,'red'),(4,'goofy saha','2020-03-11 13:24:31','2020-06-19 08:13:35',39.007216,2.446000,'blue'),(20,'mystifying bhaskara','2020-04-03 11:33:27','2020-06-19 08:13:35',85.190386,6.787000,'blue'); +DROP TABLE IF EXISTS t2; +CREATE TABLE t2 ( +c_int int(11) NOT NULL, +c_str varbinary(40) NOT NULL, +c_datetime datetime DEFAULT NULL, +c_timestamp timestamp NULL DEFAULT NULL, +c_double double DEFAULT NULL, +c_decimal decimal(12,6) DEFAULT NULL, +c_enum enum('blue','green','red','yellow','white','orange','purple') DEFAULT NULL, +PRIMARY KEY (c_int,c_str) /*T![clustered_index] CLUSTERED */, +KEY c_int_2 (c_int), +KEY c_decimal (c_decimal), +KEY c_datetime (c_datetime) +) ENGINE=InnoDB DEFAULT CHARSET=utf8mb4 COLLATE=utf8mb4_bin +PARTITION BY LIST COLUMNS(c_int) +(PARTITION p0 VALUES IN (1,5,9,13,17,21,25,29,33,37), +PARTITION p1 VALUES IN (2,6,10,14,18,22,26,30,34,38), +PARTITION p2 VALUES IN (3,7,11,15,19,23,27,31,35,39), +PARTITION p3 VALUES IN (4,8,12,16,20,24,28,32,36,40)); +INSERT INTO t2 VALUES (1,'crazy wescoff','2020-03-24 21:51:02','2020-04-01 12:11:56',47.565275,6.313000,'orange'),(1,'unruffled johnson','2020-06-30 03:42:58','2020-06-14 00:16:50',35.444084,1.090000,'red'),(5,'relaxed gagarin','2020-05-20 11:36:26','2020-02-19 12:25:48',38.948617,3.143000,'green'),(9,'eloquent archimedes','2020-02-16 04:20:21','2020-05-23 15:42:33',32.310878,5.855000,'orange'),(9,'gifted vaughan','2020-04-09 16:19:45','2020-05-15 01:42:16',95.922976,8.708000,'yellow'),(13,'dreamy benz','2020-04-27 17:43:44','2020-03-27 06:33:03',39.539233,4.823000,'red'),(3,'bold goldberg','2020-01-07 12:08:19','2020-03-10 18:37:09',0.941002,5.303000,'yellow'),(3,'youthful yonath','2020-01-12 17:10:39','2020-06-10 15:13:44',66.288511,6.046000,'white'),(7,'upbeat bhabha','2020-04-29 01:17:05','2020-03-11 22:58:43',23.316987,9.026000,'yellow'),(11,'quizzical ritchie','2020-05-16 08:21:36','2020-03-05 19:23:25',75.019379,0.260000,'purple'),(2,'dazzling kepler','2020-04-11 04:38:59','2020-05-06 04:42:32',78.798503,2.274000,'purple'),(2,'focused taussig','2020-05-17 17:58:34','2020-02-25 09:11:03',4.137803,4.902000,'white'),(2,'sharp ptolemy',NULL,'2020-05-17 18:04:19',NULL,5.573000,'purple'),(6,'fervent yonath','2020-05-26 03:55:25','2020-05-06 14:23:44',72.394272,6.491000,'white'),(10,'musing wu','2020-04-03 11:33:27','2020-05-24 06:11:56',85.190386,6.787000,'blue'),(8,'hopeful keller','2020-02-19 10:41:48','2020-04-19 17:10:36',10.832397,9.707000,'red'),(12,'exciting boyd',NULL,'2020-03-28 18:27:23',NULL,9.249000,'blue'); +set tidb_txn_assertion_level=strict; +begin; +delete t1, t2 from t1, t2 where t1.c_enum in ('blue'); +commit; +set tidb_txn_assertion_level=default; +drop table if exists t1; +create table t1 (_id int PRIMARY KEY, c1 char, index (c1)); +insert into t1 values (1, null); +select * from t1 where c1 is null and _id < 1; +_id c1 +drop table if exists t1, t2; +CREATE TABLE t1 ( +c1 double DEFAULT '1.335088259490289', +c2 set('mj','4s7ht','z','3i','b26','9','cg11','uvzcp','c','ns','fl9') NOT NULL DEFAULT 'mj,z,3i,9,cg11,c', +PRIMARY KEY (c2) /*T![clustered_index] CLUSTERED */, +KEY i1 (c1), +KEY i2 (c1), +KEY i3 (c1) +) ENGINE=InnoDB DEFAULT CHARSET=gbk COLLATE=gbk_chinese_ci; +INSERT INTO t1 VALUES (634.2783557491367,''),(2000.5041449792013,'4s7ht'),(634.2783557491367,'3i'),(634.2783557491367,'9'),(7803.173688589342,'uvzcp'),(634.2783557491367,'ns'),(634.2783557491367,'fl9'); +CREATE TABLE t2 ( +c3 decimal(56,16) DEFAULT '931359772706767457132645278260455518957.9866038319986886', +c4 set('3bqx','g','6op3','2g','jf','arkd3','y0b','jdy','1g','ff5z','224b') DEFAULT '3bqx,2g,ff5z,224b', +c5 smallint(6) NOT NULL DEFAULT '-25973', +c6 year(4) DEFAULT '2122', +c7 text DEFAULT NULL, +PRIMARY KEY (c5) /*T![clustered_index] CLUSTERED */, +KEY i4 (c6), +KEY i5 (c5) +) ENGINE=InnoDB DEFAULT CHARSET=utf8 COLLATE=utf8_bin COMMENT='' +PARTITION BY HASH (c5) PARTITIONS 4; +INSERT INTO t2 VALUES (465.0000000000000000,'jdy',-8542,2008,'FgZXe'); +set @@sql_mode=''; +set tidb_partition_prune_mode=dynamic; +analyze table t1; +analyze table t2; +select /*+ inl_join( t1 , t2 ) */ avg( t2.c5 ) as r0 , repeat( t2.c7 , t2.c5 ) as r1 , locate( t2.c7 , t2.c7 ) as r2 , unhex( t1.c1 ) as r3 from t1 right join t2 on t1.c2 = t2.c5 where not( t2.c5 in ( -7860 ,-13384 ,-12940 ) ) and not( t1.c2 between '4s7ht' and 'mj' ); +r0 r1 r2 r3 +NULL NULL NULL NULL +select /*+ inl_join (t1, t2) */ t2.c5 from t1 right join t2 on t1.c2 = t2.c5 where not( t1.c2 between '4s7ht' and 'mj' ); +c5 +set sql_mode=default; +set tidb_partition_prune_mode=default; +drop table if exists ta, tb, tc; +CREATE TABLE ta ( +a1 json DEFAULT NULL, +a2 decimal(31, 1) DEFAULT '0' +); +CREATE TABLE tb ( +b1 smallint(6) DEFAULT '-11385', +b2 decimal(63, 14) DEFAULT '-6197127648752447138876497216172307937317445669286.98661563645110' +); +CREATE TABLE tc ( +c1 text DEFAULT NULL, +c2 float NOT NULL DEFAULT '1.8132474', +PRIMARY KEY (c2) +/*T![clustered_index] CLUSTERED */ +); +insert into ta +values (NULL, 1228.0); +insert into ta +values ('"json string1"', 623.8); +insert into ta +values (NULL, 1337.0); +select count(*)from ta where not ( ta.a1 in ( select b2 from tb where not ( ta.a1 in ( select c1 from tc where ta.a2 in ( select b2 from tb where IsNull(ta.a1) ) ) ) ) ); +Error 1815 (HY000): expression isnull(cast(executor__issues.ta.a1, var_string(4294967295))) cannot be pushed down +drop table if exists V, F; +create table V (id int primary key, col_int int); +insert into V values (1, 8); +create table F (id int primary key, col_int int); +insert into F values (1, 8); +select table1.`col_int` as field1, table1.`col_int` as field2 from V as table1 left join F as table2 on table1.`col_int` = table2.`col_int` order by field1, field2 desc limit 2; +field1 field2 +8 8 +set tidb_cost_model_version=2; +set @@session.tidb_enable_list_partition = ON; +drop table if exists t1, t2; +create table t1 (c_int int, c_str varchar(40), c_decimal decimal(12, 6), primary key (c_int) , key(c_str(2)) , key(c_decimal) ) partition by list (c_int) ( partition p0 values IN (1, 5, 9, 13, 17, 21, 25, 29, 33, 37), partition p1 values IN (2, 6, 10, 14, 18, 22, 26, 30, 34, 38), partition p2 values IN (3, 7, 11, 15, 19, 23, 27, 31, 35, 39), partition p3 values IN (4, 8, 12, 16, 20, 24, 28, 32, 36, 40)) ; +create table t2 (c_int int, c_str varchar(40), c_decimal decimal(12, 6), primary key (c_int) , key(c_str) , key(c_decimal) ) partition by hash (c_int) partitions 4; +insert into t1 values (6, 'musing mayer', 1.280), (7, 'wizardly heisenberg', 6.589), (8, 'optimistic swirles', 9.633), (9, 'hungry haslett', 2.659), (10, 'stupefied wiles', 2.336); +insert into t2 select * from t1 ; +analyze table t1; +analyze table t2; +begin; +select * from t1 where c_str <> any (select c_str from t2 where c_decimal < 5) for update; +c_int c_str c_decimal +10 stupefied wiles 2.336000 +6 musing mayer 1.280000 +7 wizardly heisenberg 6.589000 +8 optimistic swirles 9.633000 +9 hungry haslett 2.659000 +explain format = 'brief' select * from t1 where c_str <> any (select c_str from t2 where c_decimal < 5) for update; +id estRows task access object operator info +SelectLock 3.20 root for update 0 +└─HashJoin 3.20 root CARTESIAN inner join, other cond:or(gt(Column#8, 1), or(ne(executor__issues.t1.c_str, Column#7), if(ne(Column#9, 0), NULL, 0))) + ├─Selection(Build) 0.80 root ne(Column#10, 0) + │ └─StreamAgg 1.00 root funcs:max(Column#17)->Column#7, funcs:count(distinct Column#18)->Column#8, funcs:sum(Column#19)->Column#9, funcs:count(1)->Column#10 + │ └─Projection 3.00 root executor__issues.t2.c_str->Column#17, executor__issues.t2.c_str->Column#18, cast(isnull(executor__issues.t2.c_str), decimal(20,0) BINARY)->Column#19 + │ └─TableReader 3.00 root partition:all data:Selection + │ └─Selection 3.00 cop[tikv] lt(executor__issues.t2.c_decimal, 5) + │ └─TableFullScan 5.00 cop[tikv] table:t2 keep order:false + └─TableReader(Probe) 4.00 root partition:all data:Selection + └─Selection 4.00 cop[tikv] if(isnull(executor__issues.t1.c_str), NULL, 1) + └─TableFullScan 5.00 cop[tikv] table:t1 keep order:false +commit; +set tidb_cost_model_version=default; +set @@session.tidb_enable_list_partition = default; +drop table if exists trade, trade_history, status_type; +set @@foreign_key_checks=0; +CREATE TABLE trade ( +t_id bigint(16) NOT NULL AUTO_INCREMENT, +t_dts datetime NOT NULL, +t_st_id char(4) NOT NULL, +t_tt_id char(3) NOT NULL, +t_is_cash tinyint(1) NOT NULL, +t_s_symb char(15) NOT NULL, +t_qty mediumint(7) NOT NULL, +t_bid_price decimal(8,2) NOT NULL, +t_ca_id bigint(12) NOT NULL, +t_exec_name varchar(49) NOT NULL, +t_trade_price decimal(8,2) DEFAULT NULL, +t_chrg decimal(10,2) NOT NULL, +t_comm decimal(10,2) NOT NULL, +t_tax decimal(10,2) NOT NULL, +t_lifo tinyint(1) NOT NULL, +PRIMARY KEY (t_id) /*T![clustered_index] CLUSTERED */, +KEY i_t_ca_id_dts (t_ca_id,t_dts), +KEY i_t_s_symb_dts (t_s_symb,t_dts), +CONSTRAINT fk_trade_st FOREIGN KEY (t_st_id) REFERENCES status_type (st_id), +CONSTRAINT fk_trade_tt FOREIGN KEY (t_tt_id) REFERENCES trade_type (tt_id), +CONSTRAINT fk_trade_s FOREIGN KEY (t_s_symb) REFERENCES security (s_symb), +CONSTRAINT fk_trade_ca FOREIGN KEY (t_ca_id) REFERENCES customer_account (ca_id) +) ; +CREATE TABLE trade_history ( +th_t_id bigint(16) NOT NULL, +th_dts datetime NOT NULL, +th_st_id char(4) NOT NULL, +PRIMARY KEY (th_t_id,th_st_id) /*T![clustered_index] NONCLUSTERED */, +KEY i_th_t_id_dts (th_t_id,th_dts), +CONSTRAINT fk_trade_history_t FOREIGN KEY (th_t_id) REFERENCES trade (t_id), +CONSTRAINT fk_trade_history_st FOREIGN KEY (th_st_id) REFERENCES status_type (st_id) +); +CREATE TABLE status_type ( +st_id char(4) NOT NULL, +st_name char(10) NOT NULL, +PRIMARY KEY (st_id) /*T![clustered_index] NONCLUSTERED */ +); +trace plan SELECT T_ID, T_S_SYMB, T_QTY, ST_NAME, TH_DTS FROM ( SELECT T_ID AS ID FROM TRADE WHERE T_CA_ID = 43000014236 ORDER BY T_DTS DESC LIMIT 10 ) T, TRADE, TRADE_HISTORY, STATUS_TYPE WHERE TRADE.T_ID = ID AND TRADE_HISTORY.TH_T_ID = TRADE.T_ID AND STATUS_TYPE.ST_ID = TRADE_HISTORY.TH_ST_ID ORDER BY TH_DTS DESC LIMIT 30; +set @@foreign_key_checks=default; +drop table if exists partsupp, supplier, nation; +SET GLOBAL tidb_mem_oom_action='CANCEL'; +CREATE TABLE `partsupp` ( `PS_PARTKEY` bigint(20) NOT NULL,`PS_SUPPKEY` bigint(20) NOT NULL,`PS_AVAILQTY` bigint(20) NOT NULL,`PS_SUPPLYCOST` decimal(15,2) NOT NULL,`PS_COMMENT` varchar(199) NOT NULL,PRIMARY KEY (`PS_PARTKEY`,`PS_SUPPKEY`) /*T![clustered_index] CLUSTERED */) ENGINE=InnoDB DEFAULT CHARSET=utf8mb4 COLLATE=utf8mb4_bin; +CREATE TABLE `supplier` (`S_SUPPKEY` bigint(20) NOT NULL,`S_NAME` char(25) NOT NULL,`S_ADDRESS` varchar(40) NOT NULL,`S_NATIONKEY` bigint(20) NOT NULL,`S_PHONE` char(15) NOT NULL,`S_ACCTBAL` decimal(15,2) NOT NULL,`S_COMMENT` varchar(101) NOT NULL,PRIMARY KEY (`S_SUPPKEY`) /*T![clustered_index] CLUSTERED */) ENGINE=InnoDB DEFAULT CHARSET=utf8mb4 COLLATE=utf8mb4_bin; +CREATE TABLE `nation` (`N_NATIONKEY` bigint(20) NOT NULL,`N_NAME` char(25) NOT NULL,`N_REGIONKEY` bigint(20) NOT NULL,`N_COMMENT` varchar(152) DEFAULT NULL,PRIMARY KEY (`N_NATIONKEY`) /*T![clustered_index] CLUSTERED */) ENGINE=InnoDB DEFAULT CHARSET=utf8mb4 COLLATE=utf8mb4_bin; +set @@tidb_mem_quota_query=128; +explain select ps_partkey, sum(ps_supplycost * ps_availqty) as value from partsupp, supplier, nation where ps_suppkey = s_suppkey and s_nationkey = n_nationkey and n_name = 'MOZAMBIQUE' group by ps_partkey having sum(ps_supplycost * ps_availqty) > ( select sum(ps_supplycost * ps_availqty) * 0.0001000000 from partsupp, supplier, nation where ps_suppkey = s_suppkey and s_nationkey = n_nationkey and n_name = 'MOZAMBIQUE' ) order by value desc; +Error 8175 (HY000): Your query has been cancelled due to exceeding the allowed memory limit for a single SQL query. Please try narrowing your query scope or increase the tidb_mem_quota_query limit and try again.[conn=] +SET GLOBAL tidb_mem_oom_action = DEFAULT; +set @@tidb_mem_quota_query=default; +drop table if exists issue49369; +CREATE TABLE `issue49369` ( +`x` varchar(32) COLLATE utf8mb4_bin DEFAULT NULL +) ENGINE=InnoDB DEFAULT CHARSET=utf8mb4 COLLATE=utf8mb4_bin; +insert into t select round(cast('88888899999999999888888888888888888888888888888888888.11111111111111111111' as decimal(18,12)) * cast('88888899999999999888888888888888888888888888888888888.11111111111111111111' as decimal(42,18)) ); +Error 1690 (22003): DECIMAL value is out of range in '(18, 12)' +set @@sql_mode = ''; +insert into t select round(cast('88888899999999999888888888888888888888888888888888888.11111111111111111111' as decimal(18,12)) * cast('88888899999999999888888888888888888888888888888888888.11111111111111111111' as decimal(42,18)) ); +show warnings; +Level Code Message +Warning 1690 DECIMAL value is out of range in '(18, 12)' +Warning 1690 DECIMAL value is out of range in '(42, 18)' +Warning 1690 %s value is out of range in '%s' +select * from t; +c +1 +2 +2147483647 +set @@sql_mode = default; diff --git a/tests/integrationtest/t/executor/issues.test b/tests/integrationtest/t/executor/issues.test new file mode 100644 index 0000000000000..52f2a7e279d2c --- /dev/null +++ b/tests/integrationtest/t/executor/issues.test @@ -0,0 +1,649 @@ +# TestIssue23993 +drop table if exists t_issue_23993; +create table t_issue_23993(a double); +insert into t_issue_23993 values(-790822912); +select cast(a as time) from t_issue_23993; +select a from t_issue_23993 where cast(a as time); +drop table if exists t_issue_23993; +create table t_issue_23993(a int); +insert into t_issue_23993 values(-790822912); +select cast(a as time) from t_issue_23993; +select a from t_issue_23993 where cast(a as time); +drop table if exists t_issue_23993; +create table t_issue_23993(a decimal); +insert into t_issue_23993 values(-790822912); +select cast(a as time) from t_issue_23993; +select a from t_issue_23993 where cast(a as time); +drop table if exists t_issue_23993; +create table t_issue_23993(a varchar(255)); +insert into t_issue_23993 values('-790822912'); +select cast(a as time) from t_issue_23993; +select a from t_issue_23993 where cast(a as time); + +# TestIssue22201 +--enable_warnings +SELECT HEX(WEIGHT_STRING('ab' AS BINARY(1000000000000000000))); +SELECT HEX(WEIGHT_STRING('ab' AS char(1000000000000000000))); +--disable_warnings + +# TestIssue22941 +drop table if exists m, mp; +CREATE TABLE m ( + mid varchar(50) NOT NULL, + ParentId varchar(50) DEFAULT NULL, + PRIMARY KEY (mid), + KEY ind_bm_parent (ParentId,mid) +); +CREATE TABLE mp ( + mpid bigint(20) unsigned NOT NULL DEFAULT '0', + mid varchar(50) DEFAULT NULL COMMENT '模块主键', + sid int, +PRIMARY KEY (mpid) +); +insert into mp values("1","1","0"); +insert into m values("0", "0"); +SELECT ( SELECT COUNT(1) FROM m WHERE ParentId = c.mid ) expand, bmp.mpid, bmp.mpid IS NULL,bmp.mpid IS NOT NULL, sid FROM m c LEFT JOIN mp bmp ON c.mid = bmp.mid WHERE c.ParentId = '0'; +SELECT bmp.mpid, bmp.mpid IS NULL,bmp.mpid IS NOT NULL FROM m c LEFT JOIN mp bmp ON c.mid = bmp.mid WHERE c.ParentId = '0'; + +# TestIssue23609 +drop table if exists t1; +CREATE TABLE `t1` ( + `a` timestamp NULL DEFAULT NULL, + `b` year(4) DEFAULT NULL, + KEY `a` (`a`), + KEY `b` (`b`) +); +insert into t1 values("2002-10-03 04:28:53",2000), ("2002-10-03 04:28:53",2002), (NULL, 2002); +select /*+ inl_join (x,y) */ * from t1 x cross join t1 y on x.a=y.b; +select * from t1 x cross join t1 y on x.a>y.b order by x.a, x.b, y.a, y.b; +select * from t1 where a = b; +# to check warning count +--enable_warnings +select * from t1 where a < b; +--disable_warnings + +# TestIssue24091 +drop table if exists t; +create table t(a int) partition by hash (a div 0) partitions 10; +insert into t values (NULL); +select null div 0; +select * from t; + +# TestIssue26348 +drop table if exists t; +CREATE TABLE t ( +a varchar(8) DEFAULT NULL, +b varchar(8) DEFAULT NULL, +c decimal(20,2) DEFAULT NULL, +d decimal(15,8) DEFAULT NULL +); +insert into t values(20210606, 20210606, 50000.00, 5.04600000); +select a * c *(d/36000) from t; +select cast(a as double) * cast(c as double) *cast(d/36000 as double) from t; +select 20210606*50000.00*(5.04600000/36000); +# differs from MySQL cause constant-fold +select "20210606"*50000.00*(5.04600000/36000); +select cast("20210606" as double)*50000.00*(5.04600000/36000); + +# TestIssue25447 +drop table if exists t1, t2; +create table t1(a int, b varchar(8)); +insert into t1 values(1,'1'); +create table t2(a int , b varchar(8) GENERATED ALWAYS AS (c) VIRTUAL, c varchar(8), PRIMARY KEY (a)); +insert into t2(a) values(1); +select /*+ tidb_inlj(t2) */ t2.b, t1.b from t1 join t2 ON t2.a=t1.a; + +# TestIssue23602 +drop table if exists t; +CREATE TABLE t (a bigint unsigned PRIMARY KEY); +INSERT INTO t VALUES (0),(1),(2),(3),(18446744073709551600),(18446744073709551605),(18446744073709551610),(18446744073709551615); +ANALYZE TABLE t; +EXPLAIN FORMAT = 'brief' SELECT a FROM t WHERE a >= 0x1 AND a <= 0x2; +EXPLAIN FORMAT = 'brief' SELECT a FROM t WHERE a BETWEEN 0x1 AND 0x2; +SELECT a FROM t WHERE a BETWEEN 0xFFFFFFFFFFFFFFF5 AND X'FFFFFFFFFFFFFFFA'; + +# TestIssue28935 +set @@tidb_enable_vectorized_expression=true; +select trim(leading from " a "), trim(both from " a "), trim(trailing from " a "); +select trim(leading null from " a "), trim(both null from " a "), trim(trailing null from " a "); +select trim(null from " a "); +set @@tidb_enable_vectorized_expression=false; +select trim(leading from " a "), trim(both from " a "), trim(trailing from " a "); +select trim(leading null from " a "), trim(both null from " a "), trim(trailing null from " a "); +select trim(null from " a "); +set tidb_enable_vectorized_expression=default; + +# TestIssue29412 +drop table if exists t29142_1; +drop table if exists t29142_2; +create table t29142_1(a int); +create table t29142_2(a double); +insert into t29142_1 value(20); +select sum(distinct a) as x from t29142_1 having x > some ( select a from t29142_2 where x in (a)); + +# TestIssue12201 +drop table if exists e; +create table e (e enum('a', 'b')); +insert into e values ('a'), ('b'); +select * from e where case 1 when 0 then e end; +select * from e where case 1 when 1 then e end; +select * from e where case e when 1 then e end; +select * from e where case 1 when e then e end; + +# TestIssue21451 +drop table if exists t; +create table t (en enum('c', 'b', 'a')); +insert into t values ('a'), ('b'), ('c'); +select max(en) from t; +select min(en) from t; +select * from t order by en; +drop table t; +create table t(s set('c', 'b', 'a')); +insert into t values ('a'), ('b'), ('c'); +select max(s) from t; +select min(s) from t; +drop table t; +create table t(id int, en enum('c', 'b', 'a')); +insert into t values (1, 'a'),(2, 'b'), (3, 'c'), (1, 'c'); +select id, max(en) from t where id=1 group by id; +select id, min(en) from t where id=1 group by id; +drop table t; +create table t(id int, s set('c', 'b', 'a')); +insert into t values (1, 'a'),(2, 'b'), (3, 'c'), (1, 'c'); +select id, max(s) from t where id=1 group by id; +select id, min(s) from t where id=1 group by id; +drop table t; +create table t(e enum('e','d','c','b','a')); +insert into t values ('e'),('d'),('c'),('b'),('a'); +select * from t order by e limit 1; +drop table t; +create table t(s set('e', 'd', 'c', 'b', 'a')); +insert into t values ('e'),('d'),('c'),('b'),('a'); +select * from t order by s limit 1; +drop table t; + +# TestIssue15563 +select distinct 0.7544678906163867 / 0.68234634; + +# TestIssue22231 +drop table if exists t_issue_22231; +create table t_issue_22231(a datetime); +insert into t_issue_22231 values('2020--05-20 01:22:12'); +--enable_warnings +select * from t_issue_22231 where a >= '2020-05-13 00:00:00 00:00:00' and a <= '2020-05-28 23:59:59 00:00:00'; +select cast('2020-10-22 10:31-10:12' as datetime); +select cast('2020-05-28 23:59:59 00:00:00' as datetime); +SELECT CAST("1111111111-" AS DATE); +--disable_warnings + +# TestIssue2612 +drop table if exists t; +create table t ( + create_at datetime NOT NULL DEFAULT '1000-01-01 00:00:00', + finish_at datetime NOT NULL DEFAULT '1000-01-01 00:00:00'); +insert into t values ('2016-02-13 15:32:24', '2016-02-11 17:23:22'); +select timediff(finish_at, create_at) from t; + +# TestIssue345 +drop table if exists t1, t2; +create table t1 (c1 int); +create table t2 (c2 int); +insert into t1 values (1); +insert into t2 values (2); +update t1, t2 set t1.c1 = 2, t2.c2 = 1; +update t1, t2 set c1 = 2, c2 = 1; +update t1 as a, t2 as b set a.c1 = 2, b.c2 = 1; +SELECT * FROM t1; +SELECT * FROM t2; +update t1 as a, t2 as t1 set a.c1 = 1, t1.c2 = 2; +SELECT * FROM t1; +SELECT * FROM t2; +--error 1054 +update t1 as a, t2 set t1.c1 = 10; + +# TestIssue5055 +drop table if exists t1, t2; +create table t1 (a int); +create table t2 (a int); +insert into t1 values(1); +insert into t2 values(1); +select tbl1.* from (select t1.a, 1 from t1) tbl1 left join t2 tbl2 on tbl1.a = tbl2.a order by tbl1.a desc limit 1; + +# TestIssue4024 +create database executor__issues2; +use executor__issues2; +create table t(a int); +insert into t values(1); +use executor__issues; +drop table if exists t; +create table t(a int); +insert into t values(1); +update t, executor__issues2.t set executor__issues2.t.a=2; +select * from t; +select * from executor__issues2.t; +update executor__issues.t, executor__issues2.t set executor__issues.t.a=3; +select * from t; +select * from executor__issues2.t; +drop database executor__issues2; + +# TestIssue5666 +set @@profiling=1; +SELECT QUERY_ID, SUM(DURATION) AS SUM_DURATION FROM INFORMATION_SCHEMA.PROFILING GROUP BY QUERY_ID; + +# TestIssue5341 +drop table if exists t; +create table t(a char); +insert into t value('a'); +select * from t where a < 1 order by a limit 0; + +# TestIssue16921 +drop table if exists t; +create table t (a float); +create index a on t(a); +insert into t values (1.0), (NULL), (0), (2.0); +select `a` from `t` use index (a) where !`a`; +select `a` from `t` ignore index (a) where !`a`; +select `a` from `t` use index (a) where `a`; +select `a` from `t` ignore index (a) where `a`; +select a from t use index (a) where not a is true; +select a from t use index (a) where not not a is true; +select a from t use index (a) where not not a; +select a from t use index (a) where not not not a is true; +select a from t use index (a) where not not not a; + +# TestIssue19100 +drop table if exists t1, t2; +create table t1 (c decimal); +create table t2 (c decimal, key(c)); +insert into t1 values (null); +insert into t2 values (null); +select count(*) from t1 where not c; +select count(*) from t2 where not c; +select count(*) from t1 where c; +select count(*) from t2 where c; + +# TestIssue27232 +drop table if exists t; +create table t (a timestamp); +insert into t values ("1970-07-23 10:04:59"), ("2038-01-19 03:14:07"); +select * from t where date_sub(a, interval 10 month) = date_sub("1970-07-23 10:04:59", interval 10 month); +select * from t where timestampadd(hour, 1, a ) = timestampadd(hour, 1, "2038-01-19 03:14:07"); + +# TestIssue15718 +drop table if exists tt; +create table tt(a decimal(10, 0), b varchar(1), c time); +insert into tt values(0, '2', null), (7, null, '1122'), (NULL, 'w', null), (NULL, '2', '3344'), (NULL, NULL, '0'), (7, 'f', '33'); +select a and b as d, a or c as e from tt; +drop table if exists tt; +create table tt(a decimal(10, 0), b varchar(1), c time); +insert into tt values(0, '2', '123'), (7, null, '1122'), (null, 'w', null); +select a and b as d, a, b from tt order by d limit 1; +select b or c as d, b, c from tt order by d limit 1; +drop table if exists t0; +CREATE TABLE t0(c0 FLOAT); +INSERT INTO t0(c0) VALUES (NULL); +SELECT * FROM t0 WHERE NOT(0 OR t0.c0); + +# TestIssue15767 +drop table if exists t; +create table t(a int, b char); +insert into t values (1,'s'),(2,'b'),(1,'c'),(2,'e'),(1,'a'); +insert into t select * from t; +insert into t select * from t; +insert into t select * from t; +select b, count(*) from ( select b from t order by a limit 20 offset 2) as s group by b order by b; + +# TestIssue16025 +drop table if exists t0; +CREATE TABLE t0(c0 NUMERIC PRIMARY KEY); +INSERT IGNORE INTO t0(c0) VALUES (NULL); +SELECT * FROM t0 WHERE c0; + +# TestIssue16854 +drop table if exists t; +CREATE TABLE `t` ( `a` enum('WAITING','PRINTED','STOCKUP','CHECKED','OUTSTOCK','PICKEDUP','WILLBACK','BACKED') DEFAULT NULL); +insert into t values(1),(2),(3),(4),(5),(6),(7); +insert into t select * from t; +insert into t select * from t; +insert into t select * from t; +insert into t select * from t; +insert into t select * from t; +insert into t select * from t; +insert into t select * from t; +set @@tidb_max_chunk_size=100; +select distinct a from t order by a; +drop table t; +CREATE TABLE `t` ( `a` set('WAITING','PRINTED','STOCKUP','CHECKED','OUTSTOCK','PICKEDUP','WILLBACK','BACKED') DEFAULT NULL); +insert into t values(1),(2),(3),(4),(5),(6),(7); +insert into t select * from t; +insert into t select * from t; +insert into t select * from t; +insert into t select * from t; +insert into t select * from t; +insert into t select * from t; +insert into t select * from t; +set @@tidb_max_chunk_size=100; +select distinct a from t order by a; +set @@tidb_max_chunk_size=default; + +# TestIssue20305 +drop table if exists t2; +create table t2 (a year(4)); +insert into t2 values(69); +select * from t2 where a <= 69; +# the following test is a regression test that matches MySQL's behavior. +drop table if exists t3; +CREATE TABLE `t3` (`y` year DEFAULT NULL, `a` int DEFAULT NULL); +INSERT INTO `t3` VALUES (2069, 70), (2010, 11), (2155, 2156), (2069, 69); +SELECT * FROM `t3` where y <= a; + +# TestIssue22817 +drop table if exists t3; +create table t3 (a year); +insert into t3 values (1991), ("1992"), ("93"), (94); +select * from t3 where a >= NULL; + +# TestIssue13953 +drop table if exists t; +CREATE TABLE `t` (`id` int(11) DEFAULT NULL, `tp_bigint` bigint(20) DEFAULT NULL ); +insert into t values(0,1),(1,9215570218099803537); +select A.tp_bigint,B.id from t A join t B on A.id < B.id * 16 where A.tp_bigint = B.id; + +# TestIssue17780 +drop table if exists t0; +create table t0 (c0 double); +insert into t0 values (1e30); +update t0 set c0=0 where t0.c0 like 0; +select count(*) from t0 where c0 = 0; + +# TestIssue9918 +drop table if exists t; +create table t (a year); +insert into t values(0); +select cast(a as char) from t; + +# TestIssue13004 +# see https://dev.mysql.com/doc/refman/5.6/en/date-and-time-literals.html, timestamp here actually produces a datetime +SELECT TIMESTAMP '9999-01-01 00:00:00'; + +# TestIssue12178 +drop table if exists ta; +create table ta(id decimal(60,2)); +insert into ta values (JSON_EXTRACT('{"c": "1234567890123456789012345678901234567890123456789012345"}', '$.c')); +select * from ta; + +# TestIssue11883 +drop table if exists t1; +create table t1 (f1 json); +insert into t1(f1) values ('"asd"'),('"asdf"'),('"asasas"'); +select f1 from t1 where json_extract(f1,"$") in ("asd","asasas","asdf"); +select f1 from t1 where json_extract(f1, '$') = 'asd'; +# MySQL produces empty row for the following SQL, I doubt it should be MySQL's bug. +select f1 from t1 where case json_extract(f1,"$") when "asd" then 1 else 0 end; +delete from t1; +insert into t1 values ('{"a": 1}'); +# the first value in the tuple should be interpreted as string instead of JSON, so no row will be returned +select f1 from t1 where f1 in ('{"a": 1}', 'asdf', 'asdf'); +# and if we explicitly cast it into a JSON value, the check will pass +select f1 from t1 where f1 in (cast('{"a": 1}' as JSON), 'asdf', 'asdf'); +select json_extract('"asd"', '$') = 'asd'; +select json_extract('"asd"', '$') <=> 'asd'; +select json_extract('"asd"', '$') <> 'asd'; +select json_extract('{"f": 1.0}', '$.f') = 1.0; +select json_extract('{"f": 1.0}', '$.f') = '1.0'; +select json_extract('{"n": 1}', '$') = '{"n": 1}'; +select json_extract('{"n": 1}', '$') <> '{"n": 1}'; + +# TestIssue15492 +drop table if exists t; +create table t (a int, b int); +insert into t values (2, 20), (1, 10), (3, 30); +select a + 1 as field1, a as field2 from t order by field1, field2 limit 2; + +# TestIssue982 +drop table if exists t; +create table t (c int auto_increment, key(c)) auto_id_cache 1; +insert into t values(); +insert into t values(); +select * from t; + +# TestIssue24627 +drop table if exists test; +create table test(id float primary key clustered AUTO_INCREMENT, col1 int); +replace into test(col1) values(1); +replace into test(col1) values(2); +select * from test; +drop table test; +drop table if exists test; +create table test(id float primary key nonclustered AUTO_INCREMENT, col1 int) AUTO_ID_CACHE 1; +replace into test(col1) values(1); +replace into test(col1) values(2); +select * from test; +drop table test; +create table test2(id double primary key clustered AUTO_INCREMENT, col1 int); +replace into test2(col1) values(1); +insert into test2(col1) values(1); +replace into test2(col1) values(1); +insert into test2(col1) values(1); +replace into test2(col1) values(1); +replace into test2(col1) values(1); +select * from test2; +drop table test2; +create table test2(id double primary key nonclustered AUTO_INCREMENT, col1 int) AUTO_ID_CACHE 1; +replace into test2(col1) values(1); +insert into test2(col1) values(1); +replace into test2(col1) values(1); +insert into test2(col1) values(1); +replace into test2(col1) values(1); +replace into test2(col1) values(1); +select * from test2; +drop table test2; + +# TestIssue39618 +drop table if exists t1; +CREATE TABLE t1 ( + c_int int(11) NOT NULL, + c_str varbinary(40) NOT NULL, + c_datetime datetime DEFAULT NULL, + c_timestamp timestamp NULL DEFAULT NULL, + c_double double DEFAULT NULL, + c_decimal decimal(12,6) DEFAULT NULL, + c_enum enum('blue','green','red','yellow','white','orange','purple') DEFAULT NULL, + PRIMARY KEY (c_int,c_str) /*T![clustered_index] CLUSTERED */, + KEY c_int_2 (c_int), + KEY c_decimal (c_decimal), + KEY c_datetime (c_datetime) +) ENGINE=InnoDB DEFAULT CHARSET=utf8mb4 COLLATE=utf8mb4_bin +PARTITION BY LIST COLUMNS(c_int) +(PARTITION p0 VALUES IN (1,5,9,13,17,21,25,29,33,37), + PARTITION p1 VALUES IN (2,6,10,14,18,22,26,30,34,38), + PARTITION p2 VALUES IN (3,7,11,15,19,23,27,31,35,39), + PARTITION p3 VALUES IN (4,8,12,16,20,24,28,32,36,40)); +INSERT INTO t1 VALUES (3,'bold goldberg','2020-01-07 12:08:19','2020-06-19 08:13:35',0.941002,5.303000,'yellow'),(1,'crazy wescoff','2020-03-24 21:51:02','2020-06-19 08:13:35',47.565275,6.313000,'orange'),(5,'relaxed gagarin','2020-05-20 11:36:26','2020-06-19 08:13:35',38.948617,3.143000,'green'),(9,'gifted vaughan','2020-04-09 16:19:45','2020-06-19 08:13:35',95.922976,8.708000,'yellow'),(2,'focused taussig','2020-05-17 17:58:34','2020-06-19 08:13:35',4.137803,4.902000,'white'),(6,'fervent yonath','2020-05-26 03:55:25','2020-06-19 08:13:35',72.394272,6.491000,'white'),(18,'mystifying bhaskara','2020-02-19 10:41:48','2020-06-19 08:13:35',10.832397,9.707000,'red'),(4,'goofy saha','2020-03-11 13:24:31','2020-06-19 08:13:35',39.007216,2.446000,'blue'),(20,'mystifying bhaskara','2020-04-03 11:33:27','2020-06-19 08:13:35',85.190386,6.787000,'blue'); +DROP TABLE IF EXISTS t2; +CREATE TABLE t2 ( + c_int int(11) NOT NULL, + c_str varbinary(40) NOT NULL, + c_datetime datetime DEFAULT NULL, + c_timestamp timestamp NULL DEFAULT NULL, + c_double double DEFAULT NULL, + c_decimal decimal(12,6) DEFAULT NULL, + c_enum enum('blue','green','red','yellow','white','orange','purple') DEFAULT NULL, + PRIMARY KEY (c_int,c_str) /*T![clustered_index] CLUSTERED */, + KEY c_int_2 (c_int), + KEY c_decimal (c_decimal), + KEY c_datetime (c_datetime) +) ENGINE=InnoDB DEFAULT CHARSET=utf8mb4 COLLATE=utf8mb4_bin +PARTITION BY LIST COLUMNS(c_int) +(PARTITION p0 VALUES IN (1,5,9,13,17,21,25,29,33,37), + PARTITION p1 VALUES IN (2,6,10,14,18,22,26,30,34,38), + PARTITION p2 VALUES IN (3,7,11,15,19,23,27,31,35,39), + PARTITION p3 VALUES IN (4,8,12,16,20,24,28,32,36,40)); +INSERT INTO t2 VALUES (1,'crazy wescoff','2020-03-24 21:51:02','2020-04-01 12:11:56',47.565275,6.313000,'orange'),(1,'unruffled johnson','2020-06-30 03:42:58','2020-06-14 00:16:50',35.444084,1.090000,'red'),(5,'relaxed gagarin','2020-05-20 11:36:26','2020-02-19 12:25:48',38.948617,3.143000,'green'),(9,'eloquent archimedes','2020-02-16 04:20:21','2020-05-23 15:42:33',32.310878,5.855000,'orange'),(9,'gifted vaughan','2020-04-09 16:19:45','2020-05-15 01:42:16',95.922976,8.708000,'yellow'),(13,'dreamy benz','2020-04-27 17:43:44','2020-03-27 06:33:03',39.539233,4.823000,'red'),(3,'bold goldberg','2020-01-07 12:08:19','2020-03-10 18:37:09',0.941002,5.303000,'yellow'),(3,'youthful yonath','2020-01-12 17:10:39','2020-06-10 15:13:44',66.288511,6.046000,'white'),(7,'upbeat bhabha','2020-04-29 01:17:05','2020-03-11 22:58:43',23.316987,9.026000,'yellow'),(11,'quizzical ritchie','2020-05-16 08:21:36','2020-03-05 19:23:25',75.019379,0.260000,'purple'),(2,'dazzling kepler','2020-04-11 04:38:59','2020-05-06 04:42:32',78.798503,2.274000,'purple'),(2,'focused taussig','2020-05-17 17:58:34','2020-02-25 09:11:03',4.137803,4.902000,'white'),(2,'sharp ptolemy',NULL,'2020-05-17 18:04:19',NULL,5.573000,'purple'),(6,'fervent yonath','2020-05-26 03:55:25','2020-05-06 14:23:44',72.394272,6.491000,'white'),(10,'musing wu','2020-04-03 11:33:27','2020-05-24 06:11:56',85.190386,6.787000,'blue'),(8,'hopeful keller','2020-02-19 10:41:48','2020-04-19 17:10:36',10.832397,9.707000,'red'),(12,'exciting boyd',NULL,'2020-03-28 18:27:23',NULL,9.249000,'blue'); +set tidb_txn_assertion_level=strict; +begin; +delete t1, t2 from t1, t2 where t1.c_enum in ('blue'); +commit; +set tidb_txn_assertion_level=default; + +# TestIssue40158 +drop table if exists t1; +create table t1 (_id int PRIMARY KEY, c1 char, index (c1)); +insert into t1 values (1, null); +select * from t1 where c1 is null and _id < 1; + +# TestIssue40596 +drop table if exists t1, t2; +CREATE TABLE t1 ( + c1 double DEFAULT '1.335088259490289', + c2 set('mj','4s7ht','z','3i','b26','9','cg11','uvzcp','c','ns','fl9') NOT NULL DEFAULT 'mj,z,3i,9,cg11,c', + PRIMARY KEY (c2) /*T![clustered_index] CLUSTERED */, + KEY i1 (c1), + KEY i2 (c1), + KEY i3 (c1) +) ENGINE=InnoDB DEFAULT CHARSET=gbk COLLATE=gbk_chinese_ci; +INSERT INTO t1 VALUES (634.2783557491367,''),(2000.5041449792013,'4s7ht'),(634.2783557491367,'3i'),(634.2783557491367,'9'),(7803.173688589342,'uvzcp'),(634.2783557491367,'ns'),(634.2783557491367,'fl9'); +CREATE TABLE t2 ( + c3 decimal(56,16) DEFAULT '931359772706767457132645278260455518957.9866038319986886', + c4 set('3bqx','g','6op3','2g','jf','arkd3','y0b','jdy','1g','ff5z','224b') DEFAULT '3bqx,2g,ff5z,224b', + c5 smallint(6) NOT NULL DEFAULT '-25973', + c6 year(4) DEFAULT '2122', + c7 text DEFAULT NULL, + PRIMARY KEY (c5) /*T![clustered_index] CLUSTERED */, + KEY i4 (c6), + KEY i5 (c5) +) ENGINE=InnoDB DEFAULT CHARSET=utf8 COLLATE=utf8_bin COMMENT='' +PARTITION BY HASH (c5) PARTITIONS 4; +INSERT INTO t2 VALUES (465.0000000000000000,'jdy',-8542,2008,'FgZXe'); +set @@sql_mode=''; +set tidb_partition_prune_mode=dynamic; +analyze table t1; +analyze table t2; +select /*+ inl_join( t1 , t2 ) */ avg( t2.c5 ) as r0 , repeat( t2.c7 , t2.c5 ) as r1 , locate( t2.c7 , t2.c7 ) as r2 , unhex( t1.c1 ) as r3 from t1 right join t2 on t1.c2 = t2.c5 where not( t2.c5 in ( -7860 ,-13384 ,-12940 ) ) and not( t1.c2 between '4s7ht' and 'mj' ); +select /*+ inl_join (t1, t2) */ t2.c5 from t1 right join t2 on t1.c2 = t2.c5 where not( t1.c2 between '4s7ht' and 'mj' ); +set sql_mode=default; +set tidb_partition_prune_mode=default; + +# TestIssue41778 +drop table if exists ta, tb, tc; +CREATE TABLE ta ( + a1 json DEFAULT NULL, + a2 decimal(31, 1) DEFAULT '0' +); +CREATE TABLE tb ( + b1 smallint(6) DEFAULT '-11385', + b2 decimal(63, 14) DEFAULT '-6197127648752447138876497216172307937317445669286.98661563645110' +); +CREATE TABLE tc ( + c1 text DEFAULT NULL, + c2 float NOT NULL DEFAULT '1.8132474', + PRIMARY KEY (c2) + /*T![clustered_index] CLUSTERED */ +); +insert into ta +values (NULL, 1228.0); +insert into ta +values ('"json string1"', 623.8); +insert into ta +values (NULL, 1337.0); +-- error 1815 +select count(*)from ta where not ( ta.a1 in ( select b2 from tb where not ( ta.a1 in ( select c1 from tc where ta.a2 in ( select b2 from tb where IsNull(ta.a1) ) ) ) ) ); + +# TestIssue15662 +drop table if exists V, F; +create table V (id int primary key, col_int int); +insert into V values (1, 8); +create table F (id int primary key, col_int int); +insert into F values (1, 8); +select table1.`col_int` as field1, table1.`col_int` as field2 from V as table1 left join F as table2 on table1.`col_int` = table2.`col_int` order by field1, field2 desc limit 2; + +# TestIssue30382 +set tidb_cost_model_version=2; +set @@session.tidb_enable_list_partition = ON; +drop table if exists t1, t2; +create table t1 (c_int int, c_str varchar(40), c_decimal decimal(12, 6), primary key (c_int) , key(c_str(2)) , key(c_decimal) ) partition by list (c_int) ( partition p0 values IN (1, 5, 9, 13, 17, 21, 25, 29, 33, 37), partition p1 values IN (2, 6, 10, 14, 18, 22, 26, 30, 34, 38), partition p2 values IN (3, 7, 11, 15, 19, 23, 27, 31, 35, 39), partition p3 values IN (4, 8, 12, 16, 20, 24, 28, 32, 36, 40)) ; +create table t2 (c_int int, c_str varchar(40), c_decimal decimal(12, 6), primary key (c_int) , key(c_str) , key(c_decimal) ) partition by hash (c_int) partitions 4; +insert into t1 values (6, 'musing mayer', 1.280), (7, 'wizardly heisenberg', 6.589), (8, 'optimistic swirles', 9.633), (9, 'hungry haslett', 2.659), (10, 'stupefied wiles', 2.336); +insert into t2 select * from t1 ; +analyze table t1; +analyze table t2; +begin; +--sorted_result +select * from t1 where c_str <> any (select c_str from t2 where c_decimal < 5) for update; +explain format = 'brief' select * from t1 where c_str <> any (select c_str from t2 where c_decimal < 5) for update; +commit; +set tidb_cost_model_version=default; +set @@session.tidb_enable_list_partition = default; + +# TestFix31537 +drop table if exists trade, trade_history, status_type; +set @@foreign_key_checks=0; +CREATE TABLE trade ( + t_id bigint(16) NOT NULL AUTO_INCREMENT, + t_dts datetime NOT NULL, + t_st_id char(4) NOT NULL, + t_tt_id char(3) NOT NULL, + t_is_cash tinyint(1) NOT NULL, + t_s_symb char(15) NOT NULL, + t_qty mediumint(7) NOT NULL, + t_bid_price decimal(8,2) NOT NULL, + t_ca_id bigint(12) NOT NULL, + t_exec_name varchar(49) NOT NULL, + t_trade_price decimal(8,2) DEFAULT NULL, + t_chrg decimal(10,2) NOT NULL, + t_comm decimal(10,2) NOT NULL, + t_tax decimal(10,2) NOT NULL, + t_lifo tinyint(1) NOT NULL, + PRIMARY KEY (t_id) /*T![clustered_index] CLUSTERED */, + KEY i_t_ca_id_dts (t_ca_id,t_dts), + KEY i_t_s_symb_dts (t_s_symb,t_dts), + CONSTRAINT fk_trade_st FOREIGN KEY (t_st_id) REFERENCES status_type (st_id), + CONSTRAINT fk_trade_tt FOREIGN KEY (t_tt_id) REFERENCES trade_type (tt_id), + CONSTRAINT fk_trade_s FOREIGN KEY (t_s_symb) REFERENCES security (s_symb), + CONSTRAINT fk_trade_ca FOREIGN KEY (t_ca_id) REFERENCES customer_account (ca_id) +) ; +CREATE TABLE trade_history ( + th_t_id bigint(16) NOT NULL, + th_dts datetime NOT NULL, + th_st_id char(4) NOT NULL, + PRIMARY KEY (th_t_id,th_st_id) /*T![clustered_index] NONCLUSTERED */, + KEY i_th_t_id_dts (th_t_id,th_dts), + CONSTRAINT fk_trade_history_t FOREIGN KEY (th_t_id) REFERENCES trade (t_id), + CONSTRAINT fk_trade_history_st FOREIGN KEY (th_st_id) REFERENCES status_type (st_id) +); +CREATE TABLE status_type ( + st_id char(4) NOT NULL, + st_name char(10) NOT NULL, + PRIMARY KEY (st_id) /*T![clustered_index] NONCLUSTERED */ +); +--disable_result_log +trace plan SELECT T_ID, T_S_SYMB, T_QTY, ST_NAME, TH_DTS FROM ( SELECT T_ID AS ID FROM TRADE WHERE T_CA_ID = 43000014236 ORDER BY T_DTS DESC LIMIT 10 ) T, TRADE, TRADE_HISTORY, STATUS_TYPE WHERE TRADE.T_ID = ID AND TRADE_HISTORY.TH_T_ID = TRADE.T_ID AND STATUS_TYPE.ST_ID = TRADE_HISTORY.TH_ST_ID ORDER BY TH_DTS DESC LIMIT 30; +--enable_result_log +set @@foreign_key_checks=default; + +# TestIssue48007 +drop table if exists partsupp, supplier, nation; +SET GLOBAL tidb_mem_oom_action='CANCEL'; +CREATE TABLE `partsupp` ( `PS_PARTKEY` bigint(20) NOT NULL,`PS_SUPPKEY` bigint(20) NOT NULL,`PS_AVAILQTY` bigint(20) NOT NULL,`PS_SUPPLYCOST` decimal(15,2) NOT NULL,`PS_COMMENT` varchar(199) NOT NULL,PRIMARY KEY (`PS_PARTKEY`,`PS_SUPPKEY`) /*T![clustered_index] CLUSTERED */) ENGINE=InnoDB DEFAULT CHARSET=utf8mb4 COLLATE=utf8mb4_bin; +CREATE TABLE `supplier` (`S_SUPPKEY` bigint(20) NOT NULL,`S_NAME` char(25) NOT NULL,`S_ADDRESS` varchar(40) NOT NULL,`S_NATIONKEY` bigint(20) NOT NULL,`S_PHONE` char(15) NOT NULL,`S_ACCTBAL` decimal(15,2) NOT NULL,`S_COMMENT` varchar(101) NOT NULL,PRIMARY KEY (`S_SUPPKEY`) /*T![clustered_index] CLUSTERED */) ENGINE=InnoDB DEFAULT CHARSET=utf8mb4 COLLATE=utf8mb4_bin; +CREATE TABLE `nation` (`N_NATIONKEY` bigint(20) NOT NULL,`N_NAME` char(25) NOT NULL,`N_REGIONKEY` bigint(20) NOT NULL,`N_COMMENT` varchar(152) DEFAULT NULL,PRIMARY KEY (`N_NATIONKEY`) /*T![clustered_index] CLUSTERED */) ENGINE=InnoDB DEFAULT CHARSET=utf8mb4 COLLATE=utf8mb4_bin; +set @@tidb_mem_quota_query=128; +-- replace_regex /conn=[0-9]+/conn=/ +-- error 8175 +explain select ps_partkey, sum(ps_supplycost * ps_availqty) as value from partsupp, supplier, nation where ps_suppkey = s_suppkey and s_nationkey = n_nationkey and n_name = 'MOZAMBIQUE' group by ps_partkey having sum(ps_supplycost * ps_availqty) > ( select sum(ps_supplycost * ps_availqty) * 0.0001000000 from partsupp, supplier, nation where ps_suppkey = s_suppkey and s_nationkey = n_nationkey and n_name = 'MOZAMBIQUE' ) order by value desc; +SET GLOBAL tidb_mem_oom_action = DEFAULT; +set @@tidb_mem_quota_query=default; + + +# TestIssue49369 +drop table if exists issue49369; +CREATE TABLE `issue49369` ( + `x` varchar(32) COLLATE utf8mb4_bin DEFAULT NULL +) ENGINE=InnoDB DEFAULT CHARSET=utf8mb4 COLLATE=utf8mb4_bin; +--error 1690 +insert into t select round(cast('88888899999999999888888888888888888888888888888888888.11111111111111111111' as decimal(18,12)) * cast('88888899999999999888888888888888888888888888888888888.11111111111111111111' as decimal(42,18)) ); +set @@sql_mode = ''; +insert into t select round(cast('88888899999999999888888888888888888888888888888888888.11111111111111111111' as decimal(18,12)) * cast('88888899999999999888888888888888888888888888888888888.11111111111111111111' as decimal(42,18)) ); +show warnings; +select * from t; +set @@sql_mode = default; \ No newline at end of file