diff --git a/.github/CODEOWNERS b/.github/CODEOWNERS index 2e69534502f49..abdd9b114b948 100644 --- a/.github/CODEOWNERS +++ b/.github/CODEOWNERS @@ -1 +1,5 @@ /expression @pingcap/co-expression +/planner @pingcap/co-planner +/statistics @pingcap/co-planner +/util/ranger @pingcap/co-planner +/bindinfo @pingcap/co-planner diff --git a/CONTRIBUTORS.md b/CONTRIBUTORS.md index e1fcbd5875978..72df9438be027 100644 --- a/CONTRIBUTORS.md +++ b/CONTRIBUTORS.md @@ -1 +1 @@ -Contributor list is moved to [Contributors](https://github.com/pingcap/community/blob/master/contributors#tidb-contributors) +Contributor list is moved to [Contributors](https://github.com/pingcap/community/blob/master/architecture/contributor-list.md#tidb-contributors) diff --git a/Makefile b/Makefile index cd7d80e6b59d6..b681fa0f48d55 100644 --- a/Makefile +++ b/Makefile @@ -1,5 +1,6 @@ PROJECT=tidb GOPATH ?= $(shell go env GOPATH) +P=8 # Ensure GOPATH is set before running build process. ifeq "$(GOPATH)" "" @@ -14,7 +15,7 @@ export PATH := $(path_to_add):$(PATH) GO := GO111MODULE=on go GOBUILD := $(GO) build $(BUILD_FLAG) -tags codes GOBUILDCOVERAGE := GOPATH=$(GOPATH) cd tidb-server; $(GO) test -coverpkg="../..." -c . -GOTEST := $(GO) test -p 8 +GOTEST := $(GO) test -p $(P) OVERALLS := GO111MODULE=on overalls ARCH := "`uname -s`" diff --git a/bindinfo/bind_test.go b/bindinfo/bind_test.go index 2361c0d1a1dc0..c0bb2d6118ddb 100644 --- a/bindinfo/bind_test.go +++ b/bindinfo/bind_test.go @@ -528,3 +528,25 @@ func (s *testSuite) TestAddEvolveTasks(c *C) { status := rows[1][3].(string) c.Assert(status == "using" || status == "rejected", IsTrue) } + +func (s *testSuite) TestBindingCache(c *C) { + tk := testkit.NewTestKit(c, s.store) + s.cleanBindingEnv(tk) + tk.MustExec("use test") + tk.MustExec("drop table if exists t") + tk.MustExec("create table t(a int, b int, index idx(a))") + tk.MustExec("create global binding for select * from t using select * from t use index(idx)") + tk.MustExec("create database tmp") + tk.MustExec("use tmp") + tk.MustExec("create table t(a int, b int, index idx(a))") + tk.MustExec("create global binding for select * from t using select * from t use index(idx)") + + c.Assert(s.domain.BindHandle().Update(false), IsNil) + c.Assert(s.domain.BindHandle().Update(false), IsNil) + res := tk.MustQuery("show global bindings") + c.Assert(len(res.Rows()), Equals, 2) + + tk.MustExec("drop global binding for select * from t") + c.Assert(s.domain.BindHandle().Update(false), IsNil) + c.Assert(len(s.domain.BindHandle().GetAllBindRecord()), Equals, 1) +} diff --git a/bindinfo/handle.go b/bindinfo/handle.go index 9272a439d0f5b..badd9012a5d6a 100644 --- a/bindinfo/handle.go +++ b/bindinfo/handle.go @@ -126,7 +126,7 @@ func (h *BindHandle) Update(fullLoad bool) (err error) { sql := "select original_sql, bind_sql, default_db, status, create_time, update_time, charset, collation from mysql.bind_info" if !fullLoad { - sql += " where update_time >= \"" + lastUpdateTime.String() + "\"" + sql += " where update_time > \"" + lastUpdateTime.String() + "\"" } // We need to apply the updates by order, wrong apply order of same original sql may cause inconsistent state. sql += " order by update_time" @@ -154,7 +154,7 @@ func (h *BindHandle) Update(fullLoad bool) (err error) { lastUpdateTime = meta.Bindings[0].UpdateTime } if err != nil { - logutil.BgLogger().Error("update bindinfo failed", zap.Error(err)) + logutil.BgLogger().Info("update bindinfo failed", zap.Error(err)) continue } @@ -163,7 +163,7 @@ func (h *BindHandle) Update(fullLoad bool) (err error) { if len(newRecord.Bindings) > 0 { newCache.setBindRecord(hash, newRecord) } else { - newCache.removeDeletedBindRecord(hash, oldRecord) + newCache.removeDeletedBindRecord(hash, newRecord) } updateMetrics(metrics.ScopeGlobal, oldRecord, newCache.getBindRecord(hash, meta.OriginalSQL, meta.Db), true) } @@ -459,6 +459,7 @@ func (c cache) removeDeletedBindRecord(hash string, meta *BindRecord) { } } } + c[hash] = metas } func (c cache) setBindRecord(hash string, meta *BindRecord) { diff --git a/checkout-pr-branch.sh b/checkout-pr-branch.sh index e888709847cb1..2f78588650a7d 100755 --- a/checkout-pr-branch.sh +++ b/checkout-pr-branch.sh @@ -1,4 +1,4 @@ -#!/bin/bash +#!/usr/bin/env bash # This script is used to checkout a TiDB PR branch in a forked repo. if test -z $1; then diff --git a/cmd/explaintest/r/access_tiflash.result b/cmd/explaintest/r/access_tiflash.result index 3bf05e9077180..d44565ca9e6fd 100644 --- a/cmd/explaintest/r/access_tiflash.result +++ b/cmd/explaintest/r/access_tiflash.result @@ -40,3 +40,18 @@ id count task operator info TableReader_7 44.00 root data:Selection_6 └─Selection_6 44.00 cop[tiflash] or(and(gt(test.tt.a, 1), lt(test.tt.a, 20)), and(ge(test.tt.a, 30), lt(test.tt.a, 55))) └─TableScan_5 44.00 cop[tiflash] table:tt, range:[-inf,+inf], keep order:false, stats:pseudo +drop table if exists ttt; +create table ttt (a int, primary key (a desc)); +desc select * from ttt order by ttt.a desc; +id count task operator info +TableReader_11 10000.00 root data:TableScan_10 +└─TableScan_10 10000.00 cop[tikv] table:ttt, range:[-inf,+inf], keep order:true, desc, stats:pseudo +desc select /*+ read_from_storage(tiflash[ttt]) */ * from ttt order by ttt.a desc; +id count task operator info +Sort_4 10000.00 root test.ttt.a:desc +└─TableReader_8 10000.00 root data:TableScan_7 + └─TableScan_7 10000.00 cop[tiflash] table:ttt, range:[-inf,+inf], keep order:false, stats:pseudo +desc select /*+ read_from_storage(tiflash[ttt]) */ * from ttt order by ttt.a; +id count task operator info +TableReader_11 10000.00 root data:TableScan_10 +└─TableScan_10 10000.00 cop[tiflash] table:ttt, range:[-inf,+inf], keep order:true, stats:pseudo diff --git a/cmd/explaintest/r/explain_easy.result b/cmd/explaintest/r/explain_easy.result index b1f9ba92be976..dcb09bf7de748 100644 --- a/cmd/explaintest/r/explain_easy.result +++ b/cmd/explaintest/r/explain_easy.result @@ -79,9 +79,9 @@ TopN_7 1.00 root test.t2.c2:asc, offset:0, count:1 └─TableScan_13 10000.00 cop[tikv] table:t2, range:[-inf,+inf], keep order:false, stats:pseudo explain select * from t1 where c1 > 1 and c2 = 1 and c3 < 1; id count task operator info -IndexLookUp_11 1.11 root +IndexLookUp_11 11.08 root ├─IndexScan_8 33.33 cop[tikv] table:t1, index:c2, range:(1 1,1 +inf], keep order:false, stats:pseudo -└─Selection_10 1.11 cop[tikv] lt(test.t1.c3, 1) +└─Selection_10 11.08 cop[tikv] lt(test.t1.c3, 1) └─TableScan_9 33.33 cop[tikv] table:t1, keep order:false, stats:pseudo explain select * from t1 where c1 = 1 and c2 > 1; id count task operator info diff --git a/cmd/explaintest/run-tests.sh b/cmd/explaintest/run-tests.sh index ed3c942432d7d..f195392a0402d 100755 --- a/cmd/explaintest/run-tests.sh +++ b/cmd/explaintest/run-tests.sh @@ -1,4 +1,4 @@ -#!/bin/bash +#!/usr/bin/env bash TIDB_TEST_STORE_NAME=$TIDB_TEST_STORE_NAME TIKV_PATH=$TIKV_PATH diff --git a/cmd/explaintest/t/access_tiflash.test b/cmd/explaintest/t/access_tiflash.test index 101892d7f43f1..76604cdf0bfc4 100644 --- a/cmd/explaintest/t/access_tiflash.test +++ b/cmd/explaintest/t/access_tiflash.test @@ -15,4 +15,11 @@ desc select /*+ read_from_storage(tiflash[t]) */ sum(isnull(a)) from t; create table tt(a int, b int, primary key(a)); desc select * from tt where (tt.a > 1 and tt.a < 20) or (tt.a >= 30 and tt.a < 55); -desc select /*+ read_from_storage(tiflash[tt]) */ * from tt where (tt.a > 1 and tt.a < 20) or (tt.a >= 30 and tt.a < 55); \ No newline at end of file +desc select /*+ read_from_storage(tiflash[tt]) */ * from tt where (tt.a > 1 and tt.a < 20) or (tt.a >= 30 and tt.a < 55); + +drop table if exists ttt; +create table ttt (a int, primary key (a desc)); + +desc select * from ttt order by ttt.a desc; +desc select /*+ read_from_storage(tiflash[ttt]) */ * from ttt order by ttt.a desc; +desc select /*+ read_from_storage(tiflash[ttt]) */ * from ttt order by ttt.a; diff --git a/config/config.go b/config/config.go index 5c6a437c4892d..8fbeca484b585 100644 --- a/config/config.go +++ b/config/config.go @@ -29,6 +29,7 @@ import ( "github.com/BurntSushi/toml" "github.com/pingcap/errors" zaplog "github.com/pingcap/log" + "github.com/pingcap/parser/mysql" "github.com/pingcap/tidb/util/logutil" tracing "github.com/uber/jaeger-client-go/config" "go.uber.org/atomic" @@ -38,7 +39,7 @@ import ( const ( MaxLogFileSize = 4096 // MB // DefTxnTotalSizeLimit is the default value of TxnTxnTotalSizeLimit. - DefTxnTotalSizeLimit = 100 * 1024 * 1024 + DefTxnTotalSizeLimit = 1024 * 1024 * 1024 ) // Valid config maps @@ -74,8 +75,8 @@ type Config struct { TxnLocalLatches TxnLocalLatches `toml:"txn-local-latches" json:"txn-local-latches"` // Set sys variable lower-case-table-names, ref: https://dev.mysql.com/doc/refman/5.7/en/identifier-case-sensitivity.html. // TODO: We actually only support mode 2, which keeps the original case, but the comparison is case-insensitive. - LowerCaseTableNames int `toml:"lower-case-table-names" json:"lower-case-table-names"` - + LowerCaseTableNames int `toml:"lower-case-table-names" json:"lower-case-table-names"` + ServerVersion string `toml:"server-version" json:"server-version"` Log Log `toml:"log" json:"log"` Security Security `toml:"security" json:"security"` Status Status `toml:"status" json:"status"` @@ -100,6 +101,9 @@ type Config struct { DelayCleanTableLock uint64 `toml:"delay-clean-table-lock" json:"delay-clean-table-lock"` SplitRegionMaxNum uint64 `toml:"split-region-max-num" json:"split-region-max-num"` StmtSummary StmtSummary `toml:"stmt-summary" json:"stmt-summary"` + // RepairMode indicates that the TiDB is in the repair mode for table meta. + RepairMode bool `toml:"repair-mode" json:"repair-mode"` + RepairTableList []string `toml:"repair-table-list" json:"repair-table-list"` } // nullableBool defaults unset bool options to unset instead of false, which enables us to know if the user has set 2 @@ -440,11 +444,14 @@ var defaultConf = Config{ EnableTableLock: false, DelayCleanTableLock: 0, SplitRegionMaxNum: 1000, + RepairMode: false, + RepairTableList: []string{}, TxnLocalLatches: TxnLocalLatches{ Enabled: false, Capacity: 2048000, }, LowerCaseTableNames: 2, + ServerVersion: "", Log: Log{ Level: "info", Format: "text", @@ -638,6 +645,9 @@ func (c *Config) Load(confFile string) error { if c.TokenLimit == 0 { c.TokenLimit = 1000 } + if len(c.ServerVersion) > 0 { + mysql.ServerVersion = c.ServerVersion + } // If any items in confFile file are not mapped into the Config struct, issue // an error and stop the server from starting. undecoded := metaData.Undecoded() @@ -700,6 +710,10 @@ func (c *Config) Valid() error { if c.TiKVClient.GrpcConnectionCount == 0 { return fmt.Errorf("grpc-connection-count should be greater than 0") } + + if c.Performance.TxnTotalSizeLimit > (10 << 30) { + return fmt.Errorf("txn-total-size-limit should be less than %d", 10<<30) + } return nil } diff --git a/config/config.toml.example b/config/config.toml.example index b1c2d77799fa0..f17a5aaa1509f 100644 --- a/config/config.toml.example +++ b/config/config.toml.example @@ -74,6 +74,19 @@ split-region-max-num = 1000 # In order to support "drop primary key" operation , this flag must be true and the table does not have the pkIsHandle flag. alter-primary-key = false +# server-version is used to change the version string of TiDB in the following scenarios: +# 1. the server version returned by builtin-function `VERSION()`. +# 2. the server version filled in handshake packets of MySQL Connection Protocol, see https://dev.mysql.com/doc/internals/en/connection-phase-packets.html#packet-Protocol::Handshake for more details. +# if server-version = "", the default value(original TiDB version string) is used. +server-version = "" + +# repair mode is used to repair the broken table meta in TiKV in extreme cases. +repair-mode = false + +# Repair table list is used to list the tables in repair mode with the format like ["db.table",]. +# In repair mode, repairing table which is not in repair list will get wrong database or wrong table error. +repair-table-list = [] + [log] # Log level: debug, info, warn, error, fatal. level = "info" @@ -201,7 +214,7 @@ bind-info-lease = "3s" # If using TiKV as the storage, the entry represents a key/value pair. # WARNING: Do not set the value too large, otherwise it will make a very large impact on the TiKV cluster. # Please adjust this configuration carefully. -txn-total-size-limit = 104857600 +txn-total-size-limit = 1073741824 [proxy-protocol] # PROXY protocol acceptable client networks. diff --git a/config/config_test.go b/config/config_test.go index f8d3a3dea03f2..b6e7afb7410e2 100644 --- a/config/config_test.go +++ b/config/config_test.go @@ -23,6 +23,7 @@ import ( "github.com/BurntSushi/toml" . "github.com/pingcap/check" zaplog "github.com/pingcap/log" + "github.com/pingcap/parser/mysql" "github.com/pingcap/tidb/util/logutil" tracing "github.com/uber/jaeger-client-go/config" ) @@ -179,6 +180,8 @@ alter-primary-key = true delay-clean-table-lock = 5 split-region-max-num=10000 enable-batch-dml = true +server-version = "test_version" +repair-mode = true [performance] txn-total-size-limit=2000 [tikv-client] @@ -196,6 +199,8 @@ max-sql-length=1024 c.Assert(conf.Load(configFile), IsNil) + c.Assert(conf.ServerVersion, Equals, "test_version") + c.Assert(mysql.ServerVersion, Equals, conf.ServerVersion) // Test that the original value will not be clear by load the config file that does not contain the option. c.Assert(conf.Binlog.Enable, Equals, true) c.Assert(conf.Binlog.Strategy, Equals, "hash") @@ -215,6 +220,7 @@ max-sql-length=1024 c.Assert(conf.StmtSummary.MaxStmtCount, Equals, uint(1000)) c.Assert(conf.StmtSummary.MaxSQLLength, Equals, uint(1024)) c.Assert(conf.EnableBatchDML, Equals, true) + c.Assert(conf.RepairMode, Equals, true) c.Assert(f.Close(), IsNil) c.Assert(os.Remove(configFile), IsNil) @@ -224,7 +230,7 @@ max-sql-length=1024 // Make sure the example config is the same as default config. c.Assert(conf, DeepEquals, GetGlobalConfig()) - // Test for lof config. + // Test for log config. c.Assert(conf.Log.ToLogConfig(), DeepEquals, logutil.NewLogConfig("info", "text", "tidb-slow.log", conf.Log.File, false, func(config *zaplog.Config) { config.DisableErrorVerbose = conf.Log.getDisableErrorStack() })) // Test for tracing config. @@ -349,3 +355,20 @@ func (s *testConfigSuite) TestOOMActionValid(c *C) { c.Assert(c1.Valid() == nil, Equals, tt.valid) } } + +func (s *testConfigSuite) TestTxnTotalSizeLimitValid(c *C) { + conf := NewConfig() + tests := []struct { + limit uint64 + valid bool + }{ + {4 << 10, true}, + {10 << 30, true}, + {10<<30 + 1, false}, + } + + for _, tt := range tests { + conf.Performance.TxnTotalSizeLimit = tt.limit + c.Assert(conf.Valid() == nil, Equals, tt.valid) + } +} diff --git a/ddl/column_test.go b/ddl/column_test.go index 7e1b9641dbbe4..16f682543950e 100644 --- a/ddl/column_test.go +++ b/ddl/column_test.go @@ -800,7 +800,7 @@ func (s *testColumnSuite) TestAddColumn(c *C) { hookErr = errors.Trace(err1) return } - newCol := table.FindCol(t.(*tables.Table).Columns, newColName) + newCol := table.FindCol(t.(*tables.TableCommon).Columns, newColName) if newCol == nil { return } @@ -891,7 +891,7 @@ func (s *testColumnSuite) TestDropColumn(c *C) { hookErr = errors.Trace(err1) return } - col := table.FindCol(t.(*tables.Table).Columns, colName) + col := table.FindCol(t.(*tables.TableCommon).Columns, colName) if col == nil { checkOK = true return diff --git a/ddl/db_change_test.go b/ddl/db_change_test.go index 35d473642c30d..c0e25e59b1a04 100644 --- a/ddl/db_change_test.go +++ b/ddl/db_change_test.go @@ -23,6 +23,7 @@ import ( . "github.com/pingcap/check" "github.com/pingcap/errors" + "github.com/pingcap/failpoint" "github.com/pingcap/log" "github.com/pingcap/parser" "github.com/pingcap/parser/ast" @@ -37,14 +38,24 @@ import ( "github.com/pingcap/tidb/sessionctx" "github.com/pingcap/tidb/store/mockstore" "github.com/pingcap/tidb/util/admin" + "github.com/pingcap/tidb/util/gcutil" "github.com/pingcap/tidb/util/sqlexec" "github.com/pingcap/tidb/util/testkit" "go.uber.org/zap" ) var _ = Suite(&testStateChangeSuite{}) +var _ = SerialSuites(&serialTestStateChangeSuite{}) + +type serialTestStateChangeSuite struct { + testStateChangeSuiteBase +} type testStateChangeSuite struct { + testStateChangeSuiteBase +} + +type testStateChangeSuiteBase struct { lease time.Duration store kv.Storage dom *domain.Domain @@ -53,7 +64,7 @@ type testStateChangeSuite struct { preSQL string } -func (s *testStateChangeSuite) SetUpSuite(c *C) { +func (s *testStateChangeSuiteBase) SetUpSuite(c *C) { s.lease = 200 * time.Millisecond ddl.WaitTimeWhenErrorOccured = 1 * time.Microsecond var err error @@ -71,7 +82,7 @@ func (s *testStateChangeSuite) SetUpSuite(c *C) { s.p = parser.New() } -func (s *testStateChangeSuite) TearDownSuite(c *C) { +func (s *testStateChangeSuiteBase) TearDownSuite(c *C) { s.se.Execute(context.Background(), "drop database if exists test_db_state") s.se.Close() s.dom.Close() @@ -534,7 +545,7 @@ func (s *testStateChangeSuite) TestDeleteOnly(c *C) { s.runTestInSchemaState(c, model.StateDeleteOnly, "", dropColumnSQL, sqls, nil) } -func (s *testStateChangeSuite) runTestInSchemaState(c *C, state model.SchemaState, tableName, alterTableSQL string, +func (s *testStateChangeSuiteBase) runTestInSchemaState(c *C, state model.SchemaState, tableName, alterTableSQL string, sqlWithErrs []sqlWithErr, expectQuery *expectQuery) { _, err := s.se.Execute(context.Background(), `create table t ( c1 varchar(64), @@ -592,7 +603,7 @@ func (s *testStateChangeSuite) runTestInSchemaState(c *C, state model.SchemaStat } } -func (s *testStateChangeSuite) execQuery(tk *testkit.TestKit, sql string, args ...interface{}) (*testkit.Result, error) { +func (s *testStateChangeSuiteBase) execQuery(tk *testkit.TestKit, sql string, args ...interface{}) (*testkit.Result, error) { comment := Commentf("sql:%s, args:%v", sql, args) rs, err := tk.Exec(sql, args...) if err != nil { @@ -611,7 +622,7 @@ func checkResult(result *testkit.Result, expected [][]interface{}) error { return nil } -func (s *testStateChangeSuite) CheckResult(tk *testkit.TestKit, sql string, args ...interface{}) (*testkit.Result, error) { +func (s *testStateChangeSuiteBase) CheckResult(tk *testkit.TestKit, sql string, args ...interface{}) (*testkit.Result, error) { comment := Commentf("sql:%s, args:%v", sql, args) rs, err := tk.Exec(sql, args...) if err != nil { @@ -829,7 +840,7 @@ func (s *testStateChangeSuite) TestParallelCreateAndRename(c *C) { type checkRet func(c *C, err1, err2 error) -func (s *testStateChangeSuite) testControlParallelExecSQL(c *C, sql1, sql2 string, f checkRet) { +func (s *testStateChangeSuiteBase) testControlParallelExecSQL(c *C, sql1, sql2 string, f checkRet) { _, err := s.se.Execute(context.Background(), "use test_db_state") c.Assert(err, IsNil) _, err = s.se.Execute(context.Background(), "create table t(a int, b int, c int, d int auto_increment,e int, index idx1(d), index idx2(d,e))") @@ -1136,3 +1147,60 @@ func (s *testStateChangeSuite) TestParallelTruncateTableAndAddColumn(c *C) { } s.testControlParallelExecSQL(c, sql1, sql2, f) } + +// TestParallelFlashbackTable tests parallel flashback table. +func (s *serialTestStateChangeSuite) TestParallelFlashbackTable(c *C) { + c.Assert(failpoint.Enable("github.com/pingcap/tidb/meta/autoid/mockAutoIDChange", `return(true)`), IsNil) + defer func(originGC bool) { + c.Assert(failpoint.Disable("github.com/pingcap/tidb/meta/autoid/mockAutoIDChange"), IsNil) + if originGC { + ddl.EmulatorGCEnable() + } else { + ddl.EmulatorGCDisable() + } + }(ddl.IsEmulatorGCEnable()) + + // disable emulator GC. + // Disable emulator GC, otherwise, emulator GC will delete table record as soon as possible after executing drop table DDL. + ddl.EmulatorGCDisable() + gcTimeFormat := "20060102-15:04:05 -0700 MST" + timeBeforeDrop := time.Now().Add(0 - time.Duration(48*60*60*time.Second)).Format(gcTimeFormat) + safePointSQL := `INSERT HIGH_PRIORITY INTO mysql.tidb VALUES ('tikv_gc_safe_point', '%[1]s', '') + ON DUPLICATE KEY + UPDATE variable_value = '%[1]s'` + tk := testkit.NewTestKit(c, s.store) + // clear GC variables first. + tk.MustExec("delete from mysql.tidb where variable_name in ( 'tikv_gc_safe_point','tikv_gc_enable' )") + // set GC safe point + tk.MustExec(fmt.Sprintf(safePointSQL, timeBeforeDrop)) + // set GC enable. + err := gcutil.EnableGC(tk.Se) + c.Assert(err, IsNil) + + // prepare dropped table. + tk.MustExec("use test_db_state") + tk.MustExec("drop table if exists t") + tk.MustExec("create table t (a int);") + tk.MustExec("drop table if exists t") + // Test parallel flashback table. + ts := getDDLJobStartTime(tk, "test_db_state", "t") + sql1 := fmt.Sprintf("flashback table t until timestamp '%v' to t_flashback", ts) + f := func(c *C, err1, err2 error) { + c.Assert(err1, IsNil) + c.Assert(err2, NotNil) + c.Assert(err2.Error(), Equals, "[schema:1050]Table 't_flashback' already exists") + + } + s.testControlParallelExecSQL(c, sql1, sql1, f) +} + +func getDDLJobStartTime(tk *testkit.TestKit, dbName, tblName string) string { + re := tk.MustQuery("admin show ddl jobs 100") + rows := re.Rows() + for _, row := range rows { + if row[1] == dbName && row[2] == tblName && (row[3] == "drop table" || row[3] == "truncate table") { + return row[8].(string) + } + } + return "" +} diff --git a/ddl/db_integration_test.go b/ddl/db_integration_test.go index c327f9bcd6747..4fabc86e74c40 100644 --- a/ddl/db_integration_test.go +++ b/ddl/db_integration_test.go @@ -1043,6 +1043,7 @@ func (s *testIntegrationSuite5) TestBackwardCompatibility(c *C) { err = txn.Commit(context.Background()) c.Assert(err, IsNil) ticker := time.NewTicker(s.lease) + defer ticker.Stop() for range ticker.C { historyJob, err := s.getHistoryDDLJob(job.ID) c.Assert(err, IsNil) diff --git a/ddl/db_test.go b/ddl/db_test.go index 5ab37e81fd952..52ed09b91cec6 100644 --- a/ddl/db_test.go +++ b/ddl/db_test.go @@ -47,6 +47,7 @@ import ( "github.com/pingcap/tidb/tablecodec" "github.com/pingcap/tidb/types" "github.com/pingcap/tidb/util/admin" + "github.com/pingcap/tidb/util/domainutil" "github.com/pingcap/tidb/util/israce" "github.com/pingcap/tidb/util/mock" "github.com/pingcap/tidb/util/testkit" @@ -2003,6 +2004,241 @@ func (s *testDBSuite1) TestCreateTable(c *C) { c.Assert(err.Error(), Equals, "[types:1291]Column 'a' has duplicated value 'B' in ENUM") } +func (s *testDBSuite5) TestRepairTable(c *C) { + c.Assert(failpoint.Enable("github.com/pingcap/tidb/infoschema/repairFetchCreateTable", `return(true)`), IsNil) + defer func() { + c.Assert(failpoint.Disable("github.com/pingcap/tidb/infoschema/repairFetchCreateTable"), IsNil) + }() + s.tk = testkit.NewTestKit(c, s.store) + s.tk.MustExec("use test") + s.tk.MustExec("drop table if exists t, other_table, origin") + + // Test repair table when TiDB is not in repair mode. + s.tk.MustExec("CREATE TABLE t (a int primary key, b varchar(10));") + _, err := s.tk.Exec("admin repair table t CREATE TABLE t (a float primary key, b varchar(5));") + c.Assert(err, NotNil) + c.Assert(err.Error(), Equals, "[ddl:8215]Failed to repair table: TiDB is not in REPAIR MODE") + + // Test repair table when the repaired list is empty. + domainutil.RepairInfo.SetRepairMode(true) + _, err = s.tk.Exec("admin repair table t CREATE TABLE t (a float primary key, b varchar(5));") + c.Assert(err, NotNil) + c.Assert(err.Error(), Equals, "[ddl:8215]Failed to repair table: repair list is empty") + + // Test repair table when it's database isn't in repairInfo. + domainutil.RepairInfo.SetRepairTableList([]string{"test.other_table"}) + _, err = s.tk.Exec("admin repair table t CREATE TABLE t (a float primary key, b varchar(5));") + c.Assert(err, NotNil) + c.Assert(err.Error(), Equals, "[ddl:8215]Failed to repair table: database test is not in repair") + + // Test repair table when the table isn't in repairInfo. + s.tk.MustExec("CREATE TABLE other_table (a int, b varchar(1), key using hash(b));") + _, err = s.tk.Exec("admin repair table t CREATE TABLE t (a float primary key, b varchar(5));") + c.Assert(err, NotNil) + c.Assert(err.Error(), Equals, "[ddl:8215]Failed to repair table: table t is not in repair") + + // Test user can't access to the repaired table. + _, err = s.tk.Exec("select * from other_table") + c.Assert(err, NotNil) + c.Assert(err.Error(), Equals, "[schema:1146]Table 'test.other_table' doesn't exist") + + // Test create statement use the same name with what is in repaired. + _, err = s.tk.Exec("CREATE TABLE other_table (a int);") + c.Assert(err, NotNil) + c.Assert(err.Error(), Equals, "[ddl:1103]Incorrect table name 'other_table'%!(EXTRA string=this table is in repair)") + + // Test column lost in repair table. + _, err = s.tk.Exec("admin repair table other_table CREATE TABLE other_table (a int, c char(1));") + c.Assert(err, NotNil) + c.Assert(err.Error(), Equals, "[ddl:8215]Failed to repair table: Column c has lost") + + // Test column type should be the same. + _, err = s.tk.Exec("admin repair table other_table CREATE TABLE other_table (a bigint, b varchar(1), key using hash(b));") + c.Assert(err, NotNil) + c.Assert(err.Error(), Equals, "[ddl:8215]Failed to repair table: Column a type should be the same") + + // Test index lost in repair table. + _, err = s.tk.Exec("admin repair table other_table CREATE TABLE other_table (a int unique);") + c.Assert(err, NotNil) + c.Assert(err.Error(), Equals, "[ddl:8215]Failed to repair table: Index a has lost") + + // Test index type should be the same. + _, err = s.tk.Exec("admin repair table other_table CREATE TABLE other_table (a int, b varchar(2) unique)") + c.Assert(err, NotNil) + c.Assert(err.Error(), Equals, "[ddl:8215]Failed to repair table: Index b type should be the same") + + // Test sub create statement in repair statement with the same name. + _, err = s.tk.Exec("admin repair table other_table CREATE TABLE other_table (a int);") + c.Assert(err, IsNil) + + // Test whether repair table name is case sensitive. + domainutil.RepairInfo.SetRepairMode(true) + domainutil.RepairInfo.SetRepairTableList([]string{"test.other_table2"}) + s.tk.MustExec("CREATE TABLE otHer_tAblE2 (a int, b varchar(1));") + _, err = s.tk.Exec("admin repair table otHer_tAblE2 CREATE TABLE otHeR_tAbLe (a int, b varchar(2));") + c.Assert(err, IsNil) + repairTable := testGetTableByName(c, s.s, "test", "otHeR_tAbLe") + c.Assert(repairTable.Meta().Name.O, Equals, "otHeR_tAbLe") + + // Test memory and system database is not for repair. + domainutil.RepairInfo.SetRepairMode(true) + domainutil.RepairInfo.SetRepairTableList([]string{"test.xxx"}) + _, err = s.tk.Exec("admin repair table performance_schema.xxx CREATE TABLE yyy (a int);") + c.Assert(err.Error(), Equals, "[ddl:8215]Failed to repair table: memory or system database is not for repair") + + // Test the repair detail. + turnRepairModeAndInit(true) + defer turnRepairModeAndInit(false) + // Domain reload the tableInfo and add it into repairInfo. + s.tk.MustExec("CREATE TABLE origin (a int primary key, b varchar(10), c int auto_increment);") + // Repaired tableInfo has been filtered by `domain.InfoSchema()`, so get it in repairInfo. + originTableInfo, _ := domainutil.RepairInfo.GetRepairedTableInfoByTableName("test", "origin") + + hook := &ddl.TestDDLCallback{} + var repairErr error + hook.OnJobRunBeforeExported = func(job *model.Job) { + if job.Type != model.ActionRepairTable { + return + } + if job.TableID != originTableInfo.ID { + repairErr = errors.New("table id should be the same") + return + } + if job.SchemaState != model.StateNone { + repairErr = errors.New("repair job state should be the none") + return + } + // Test whether it's readable, when repaired table is still stateNone. + tkInternal := testkit.NewTestKitWithInit(c, s.store) + _, repairErr = tkInternal.Exec("select * from origin") + // Repaired tableInfo has been filtered by `domain.InfoSchema()`, here will get an error cause user can't get access to it. + if repairErr != nil && terror.ErrorEqual(repairErr, infoschema.ErrTableNotExists) { + repairErr = nil + } + } + originalHook := s.dom.DDL().GetHook() + defer s.dom.DDL().(ddl.DDLForTest).SetHook(originalHook) + s.dom.DDL().(ddl.DDLForTest).SetHook(hook) + + // Exec the repair statement to override the tableInfo. + s.tk.MustExec("admin repair table origin CREATE TABLE origin (a int primary key, b varchar(5), c int auto_increment);") + c.Assert(repairErr, IsNil) + + // Check the repaired tableInfo is exactly the same with old one in tableID, indexID, colID. + // testGetTableByName will extract the Table from `domain.InfoSchema()` directly. + repairTable = testGetTableByName(c, s.s, "test", "origin") + c.Assert(repairTable.Meta().ID, Equals, originTableInfo.ID) + c.Assert(len(repairTable.Meta().Columns), Equals, 3) + c.Assert(repairTable.Meta().Columns[0].ID, Equals, originTableInfo.Columns[0].ID) + c.Assert(repairTable.Meta().Columns[1].ID, Equals, originTableInfo.Columns[1].ID) + c.Assert(repairTable.Meta().Columns[2].ID, Equals, originTableInfo.Columns[2].ID) + c.Assert(len(repairTable.Meta().Indices), Equals, 1) + c.Assert(repairTable.Meta().Indices[0].ID, Equals, originTableInfo.Columns[0].ID) + c.Assert(repairTable.Meta().AutoIncID, Equals, originTableInfo.AutoIncID) + + c.Assert(repairTable.Meta().Columns[0].Tp, Equals, mysql.TypeLong) + c.Assert(repairTable.Meta().Columns[1].Tp, Equals, mysql.TypeVarchar) + c.Assert(repairTable.Meta().Columns[1].Flen, Equals, 5) + c.Assert(repairTable.Meta().Columns[2].Tp, Equals, mysql.TypeLong) + + // Exec the show create table statement to make sure new tableInfo has been set. + result := s.tk.MustQuery("show create table origin") + c.Assert(result.Rows()[0][1], Equals, "CREATE TABLE `origin` (\n `a` int(11) NOT NULL,\n `b` varchar(5) DEFAULT NULL,\n `c` int(11) NOT NULL AUTO_INCREMENT,\n PRIMARY KEY (`a`)\n) ENGINE=InnoDB DEFAULT CHARSET=utf8mb4 COLLATE=utf8mb4_bin") + +} + +func turnRepairModeAndInit(on bool) { + list := make([]string, 0, 0) + if on { + list = append(list, "test.origin") + } + domainutil.RepairInfo.SetRepairMode(on) + domainutil.RepairInfo.SetRepairTableList(list) +} + +func (s *testDBSuite5) TestRepairTableWithPartition(c *C) { + c.Assert(failpoint.Enable("github.com/pingcap/tidb/infoschema/repairFetchCreateTable", `return(true)`), IsNil) + defer func() { + c.Assert(failpoint.Disable("github.com/pingcap/tidb/infoschema/repairFetchCreateTable"), IsNil) + }() + s.tk = testkit.NewTestKit(c, s.store) + s.tk.MustExec("use test") + s.tk.MustExec("drop table if exists origin") + + turnRepairModeAndInit(true) + defer turnRepairModeAndInit(false) + // Domain reload the tableInfo and add it into repairInfo. + s.tk.MustExec("create table origin (a int not null) partition by RANGE(a) (" + + "partition p10 values less than (10)," + + "partition p30 values less than (30)," + + "partition p50 values less than (50)," + + "partition p70 values less than (70)," + + "partition p90 values less than (90));") + // Test for some old partition has lost. + _, err := s.tk.Exec("admin repair table origin create table origin (a int not null) partition by RANGE(a) (" + + "partition p10 values less than (10)," + + "partition p30 values less than (30)," + + "partition p50 values less than (50)," + + "partition p90 values less than (90)," + + "partition p100 values less than (100));") + c.Assert(err, NotNil) + c.Assert(err.Error(), Equals, "[ddl:8215]Failed to repair table: Partition p100 has lost") + + // Test for some partition changed the condition. + _, err = s.tk.Exec("admin repair table origin create table origin (a int not null) partition by RANGE(a) (" + + "partition p10 values less than (10)," + + "partition p20 values less than (25)," + + "partition p50 values less than (50)," + + "partition p90 values less than (90));") + c.Assert(err, NotNil) + c.Assert(err.Error(), Equals, "[ddl:8215]Failed to repair table: Partition p20 has lost") + + // Test for some partition changed the partition name. + _, err = s.tk.Exec("admin repair table origin create table origin (a int not null) partition by RANGE(a) (" + + "partition p10 values less than (10)," + + "partition p30 values less than (30)," + + "partition pNew values less than (50)," + + "partition p90 values less than (90));") + c.Assert(err, NotNil) + c.Assert(err.Error(), Equals, "[ddl:8215]Failed to repair table: Partition pnew has lost") + + originTableInfo, _ := domainutil.RepairInfo.GetRepairedTableInfoByTableName("test", "origin") + s.tk.MustExec("admin repair table origin create table origin_rename (a int not null) partition by RANGE(a) (" + + "partition p10 values less than (10)," + + "partition p30 values less than (30)," + + "partition p50 values less than (50)," + + "partition p90 values less than (90));") + repairTable := testGetTableByName(c, s.s, "test", "origin_rename") + c.Assert(repairTable.Meta().ID, Equals, originTableInfo.ID) + c.Assert(len(repairTable.Meta().Columns), Equals, 1) + c.Assert(repairTable.Meta().Columns[0].ID, Equals, originTableInfo.Columns[0].ID) + c.Assert(len(repairTable.Meta().Partition.Definitions), Equals, 4) + c.Assert(repairTable.Meta().Partition.Definitions[0].ID, Equals, originTableInfo.Partition.Definitions[0].ID) + c.Assert(repairTable.Meta().Partition.Definitions[1].ID, Equals, originTableInfo.Partition.Definitions[1].ID) + c.Assert(repairTable.Meta().Partition.Definitions[2].ID, Equals, originTableInfo.Partition.Definitions[2].ID) + c.Assert(repairTable.Meta().Partition.Definitions[3].ID, Equals, originTableInfo.Partition.Definitions[4].ID) + + // Test hash partition. + s.tk.MustExec("drop table if exists origin") + domainutil.RepairInfo.SetRepairMode(true) + domainutil.RepairInfo.SetRepairTableList([]string{"test.origin"}) + s.tk.MustExec("create table origin (a varchar(1), b int not null, c int, key idx(c)) partition by hash(b) partitions 30") + + // Test partition num in repair should be exactly same with old one, other wise will cause partition semantic problem. + _, err = s.tk.Exec("admin repair table origin create table origin (a varchar(2), b int not null, c int, key idx(c)) partition by hash(b) partitions 20") + c.Assert(err, NotNil) + c.Assert(err.Error(), Equals, "[ddl:8215]Failed to repair table: Hash partition num should be the same") + + originTableInfo, _ = domainutil.RepairInfo.GetRepairedTableInfoByTableName("test", "origin") + s.tk.MustExec("admin repair table origin create table origin (a varchar(3), b int not null, c int, key idx(c)) partition by hash(b) partitions 30") + repairTable = testGetTableByName(c, s.s, "test", "origin") + c.Assert(repairTable.Meta().ID, Equals, originTableInfo.ID) + c.Assert(len(repairTable.Meta().Partition.Definitions), Equals, 30) + c.Assert(repairTable.Meta().Partition.Definitions[0].ID, Equals, originTableInfo.Partition.Definitions[0].ID) + c.Assert(repairTable.Meta().Partition.Definitions[1].ID, Equals, originTableInfo.Partition.Definitions[1].ID) + c.Assert(repairTable.Meta().Partition.Definitions[29].ID, Equals, originTableInfo.Partition.Definitions[29].ID) +} + func (s *testDBSuite2) TestCreateTableWithSetCol(c *C) { s.tk = testkit.NewTestKitWithInit(c, s.store) s.tk.MustExec("create table t_set (a int, b set('e') default '');") diff --git a/ddl/ddl.go b/ddl/ddl.go index b8b657974c9b1..fcacfbb6b1750 100644 --- a/ddl/ddl.go +++ b/ddl/ddl.go @@ -84,6 +84,8 @@ var ( errRunMultiSchemaChanges = terror.ClassDDL.New(mysql.ErrUnsupportedDDLOperation, fmt.Sprintf(mysql.MySQLErrName[mysql.ErrUnsupportedDDLOperation], "multi schema change")) errWaitReorgTimeout = terror.ClassDDL.New(mysql.ErrLockWaitTimeout, mysql.MySQLErrName[mysql.ErrWaitReorgTimeout]) errInvalidStoreVer = terror.ClassDDL.New(mysql.ErrInvalidStoreVersion, mysql.MySQLErrName[mysql.ErrInvalidStoreVersion]) + // ErrRepairTableFail is used to repair tableInfo in repair mode. + ErrRepairTableFail = terror.ClassDDL.New(mysql.ErrRepairTable, mysql.MySQLErrName[mysql.ErrRepairTable]) // We don't support dropping column with index covered now. errCantDropColWithIndex = terror.ClassDDL.New(mysql.ErrUnsupportedDDLOperation, fmt.Sprintf(mysql.MySQLErrName[mysql.ErrUnsupportedDDLOperation], "drop column with index")) @@ -244,6 +246,7 @@ type DDL interface { UnlockTables(ctx sessionctx.Context, lockedTables []model.TableLockTpInfo) error CleanupTableLock(ctx sessionctx.Context, tables []*ast.TableName) error UpdateTableReplicaInfo(ctx sessionctx.Context, tid int64, available bool) error + RepairTable(ctx sessionctx.Context, table *ast.TableName, createStmt *ast.CreateTableStmt) error // GetLease returns current schema lease time. GetLease() time.Duration diff --git a/ddl/ddl_api.go b/ddl/ddl_api.go index e7f7324fec046..db16bcb555ead 100644 --- a/ddl/ddl_api.go +++ b/ddl/ddl_api.go @@ -47,6 +47,7 @@ import ( "github.com/pingcap/tidb/types/parser_driver" "github.com/pingcap/tidb/util" "github.com/pingcap/tidb/util/chunk" + "github.com/pingcap/tidb/util/domainutil" "github.com/pingcap/tidb/util/logutil" "github.com/pingcap/tidb/util/mock" "github.com/pingcap/tidb/util/set" @@ -1412,7 +1413,7 @@ func (d *ddl) CreateTable(ctx sessionctx.Context, s *ast.CreateTableStmt) (err e err = d.doDDLJob(ctx, job) if err == nil { // do pre-split and scatter. - sp, ok := d.store.(kv.SplitableStore) + sp, ok := d.store.(kv.SplittableStore) if ok && atomic.LoadUint32(&EnableSplitTableRegion) != 0 { var ( preSplit func() @@ -3931,3 +3932,80 @@ func extractCollateFromOption(def *ast.ColumnDef) []string { } return specifiedCollates } + +func (d *ddl) RepairTable(ctx sessionctx.Context, table *ast.TableName, createStmt *ast.CreateTableStmt) error { + // Existence of DB and table has been checked in the preprocessor. + oldTableInfo, ok := (ctx.Value(domainutil.RepairedTable)).(*model.TableInfo) + if !ok || oldTableInfo == nil { + return ErrRepairTableFail.GenWithStack("Failed to get the repaired table") + } + oldDBInfo, ok := (ctx.Value(domainutil.RepairedDatabase)).(*model.DBInfo) + if !ok || oldDBInfo == nil { + return ErrRepairTableFail.GenWithStack("Failed to get the repaired database") + } + // By now only support same DB repair. + if createStmt.Table.Schema.L != oldDBInfo.Name.L { + return ErrRepairTableFail.GenWithStack("Repaired table should in same database with the old one") + } + // It is necessary to specify the table.ID and partition.ID manually. + newTableInfo, err := buildTableInfoWithCheck(ctx, d, createStmt, oldTableInfo.Charset, oldTableInfo.Collate) + if err != nil { + return errors.Trace(err) + } + + // Override newTableInfo with oldTableInfo's element necessary. + // TODO: There may be more element assignments here, and the new TableInfo should be verified with the actual data. + newTableInfo.ID = oldTableInfo.ID + if err = checkAndOverridePartitionID(newTableInfo, oldTableInfo); err != nil { + return err + } + newTableInfo.AutoIncID = oldTableInfo.AutoIncID + // If any old columnInfo has lost, that means the old column ID lost too, repair failed. + for i, newOne := range newTableInfo.Columns { + old := getColumnInfoByName(oldTableInfo, newOne.Name.L) + if old == nil { + return ErrRepairTableFail.GenWithStackByArgs("Column " + newOne.Name.L + " has lost") + } + if newOne.Tp != old.Tp { + return ErrRepairTableFail.GenWithStackByArgs("Column " + newOne.Name.L + " type should be the same") + } + if newOne.Flen != old.Flen { + logutil.BgLogger().Warn("[ddl] admin repair table : Column " + newOne.Name.L + " flen is not equal to the old one") + } + newTableInfo.Columns[i].ID = old.ID + } + // If any old indexInfo has lost, that means the index ID lost too, so did the data, repair failed. + for i, newOne := range newTableInfo.Indices { + old := getIndexInfoByNameAndColumn(oldTableInfo, newOne) + if old == nil { + return ErrRepairTableFail.GenWithStackByArgs("Index " + newOne.Name.L + " has lost") + } + if newOne.Tp != old.Tp { + return ErrRepairTableFail.GenWithStackByArgs("Index " + newOne.Name.L + " type should be the same") + } + newTableInfo.Indices[i].ID = old.ID + } + + newTableInfo.State = model.StatePublic + err = checkTableInfoValid(newTableInfo) + if err != nil { + return err + } + newTableInfo.State = model.StateNone + + job := &model.Job{ + SchemaID: oldDBInfo.ID, + TableID: newTableInfo.ID, + SchemaName: oldDBInfo.Name.L, + Type: model.ActionRepairTable, + BinlogInfo: &model.HistoryInfo{}, + Args: []interface{}{newTableInfo}, + } + err = d.doDDLJob(ctx, job) + if err == nil { + // Remove the old TableInfo from repairInfo before domain reload. + domainutil.RepairInfo.RemoveFromRepairInfo(oldDBInfo.Name.L, oldTableInfo.Name.L) + } + err = d.callHookOnChanged(err) + return errors.Trace(err) +} diff --git a/ddl/ddl_worker.go b/ddl/ddl_worker.go index 2cfd37b2c4832..54d7ff7d4ed30 100644 --- a/ddl/ddl_worker.go +++ b/ddl/ddl_worker.go @@ -527,6 +527,8 @@ func (w *worker) runDDLJob(d *ddlCtx, t *meta.Meta, job *model.Job) (ver int64, ver, err = onDropSchema(t, job) case model.ActionCreateTable: ver, err = onCreateTable(d, t, job) + case model.ActionRepairTable: + ver, err = onRepairTable(d, t, job) case model.ActionCreateView: ver, err = onCreateView(d, t, job) case model.ActionDropTable, model.ActionDropView: diff --git a/ddl/delete_range.go b/ddl/delete_range.go index e053619bf9f27..81179a7c2606f 100644 --- a/ddl/delete_range.go +++ b/ddl/delete_range.go @@ -49,7 +49,7 @@ type delRangeManager interface { addDelRangeJob(job *model.Job) error // removeFromGCDeleteRange removes the deleting table job from gc_delete_range table by jobID and tableID. // It's use for recover the table that was mistakenly deleted. - removeFromGCDeleteRange(jobID, tableID int64) error + removeFromGCDeleteRange(jobID int64, tableID []int64) error start() clear() } @@ -100,13 +100,13 @@ func (dr *delRange) addDelRangeJob(job *model.Job) error { } // removeFromGCDeleteRange implements delRangeManager interface. -func (dr *delRange) removeFromGCDeleteRange(jobID, tableID int64) error { +func (dr *delRange) removeFromGCDeleteRange(jobID int64, tableIDs []int64) error { ctx, err := dr.sessPool.get() if err != nil { return errors.Trace(err) } defer dr.sessPool.put(ctx) - err = util.RemoveFromGCDeleteRange(ctx, jobID, tableID) + err = util.RemoveMultiFromGCDeleteRange(ctx, jobID, tableIDs) return errors.Trace(err) } diff --git a/ddl/index.go b/ddl/index.go index dcf23e93a67ac..a030e428e8fd9 100644 --- a/ddl/index.go +++ b/ddl/index.go @@ -1518,3 +1518,30 @@ func iterateSnapshotRows(store kv.Storage, priority int, t table.Table, version return nil } + +func getIndexInfoByNameAndColumn(oldTableInfo *model.TableInfo, newOne *model.IndexInfo) *model.IndexInfo { + for _, oldOne := range oldTableInfo.Indices { + if newOne.Name.L == oldOne.Name.L && indexColumnSliceEqual(newOne.Columns, oldOne.Columns) { + return oldOne + } + } + return nil +} + +func indexColumnSliceEqual(a, b []*model.IndexColumn) bool { + if len(a) != len(b) { + return false + } + if len(a) == 0 { + logutil.BgLogger().Warn("[ddl] admin repair table : index's columns length equal to 0") + return true + } + // Accelerate the compare by eliminate index bound check. + b = b[:len(a)] + for i, v := range a { + if v.Name.L != b[i].Name.L { + return false + } + } + return true +} diff --git a/ddl/mock.go b/ddl/mock.go index 32c244618ec30..33f420d510fea 100644 --- a/ddl/mock.go +++ b/ddl/mock.go @@ -137,7 +137,7 @@ func (dr *mockDelRange) addDelRangeJob(job *model.Job) error { } // removeFromGCDeleteRange implements delRangeManager interface. -func (dr *mockDelRange) removeFromGCDeleteRange(jobID, tableID int64) error { +func (dr *mockDelRange) removeFromGCDeleteRange(jobID int64, tableIDs []int64) error { return nil } diff --git a/ddl/partition.go b/ddl/partition.go index 2a5bc0806c119..75241571f637f 100644 --- a/ddl/partition.go +++ b/ddl/partition.go @@ -138,7 +138,7 @@ func buildHashPartitionDefinitions(ctx sessionctx.Context, d *ddl, s *ast.Create func buildRangePartitionDefinitions(ctx sessionctx.Context, d *ddl, s *ast.CreateTableStmt, pi *model.PartitionInfo) error { genIDs, err := d.genGlobalIDs(len(s.Partition.Definitions)) if err != nil { - return err + return errors.Trace(err) } for ith, def := range s.Partition.Definitions { comment, _ := def.Comment() @@ -190,6 +190,56 @@ func checkAddPartitionNameUnique(tbInfo *model.TableInfo, pi *model.PartitionInf return nil } +func checkAndOverridePartitionID(newTableInfo, oldTableInfo *model.TableInfo) error { + // If any old partitionInfo has lost, that means the partition ID lost too, so did the data, repair failed. + if newTableInfo.Partition == nil { + return nil + } + if oldTableInfo.Partition == nil { + return ErrRepairTableFail.GenWithStackByArgs("Old table doesn't have partitions") + } + if newTableInfo.Partition.Type != oldTableInfo.Partition.Type { + return ErrRepairTableFail.GenWithStackByArgs("Partition type should be the same") + } + // Check whether partitionType is hash partition. + if newTableInfo.Partition.Type == model.PartitionTypeHash { + if newTableInfo.Partition.Num != oldTableInfo.Partition.Num { + return ErrRepairTableFail.GenWithStackByArgs("Hash partition num should be the same") + } + } + for i, newOne := range newTableInfo.Partition.Definitions { + found := false + for _, oldOne := range oldTableInfo.Partition.Definitions { + if newOne.Name.L == oldOne.Name.L && stringSliceEqual(newOne.LessThan, oldOne.LessThan) { + newTableInfo.Partition.Definitions[i].ID = oldOne.ID + found = true + break + } + } + if !found { + return ErrRepairTableFail.GenWithStackByArgs("Partition " + newOne.Name.L + " has lost") + } + } + return nil +} + +func stringSliceEqual(a, b []string) bool { + if len(a) != len(b) { + return false + } + if len(a) == 0 { + return true + } + // Accelerate the compare by eliminate index bound check. + b = b[:len(a)] + for i, v := range a { + if v != b[i] { + return false + } + } + return true +} + // See https://github.com/mysql/mysql-server/blob/5.7/sql/item_func.h#L387 func hasTimestampField(ctx sessionctx.Context, tblInfo *model.TableInfo, expr ast.ExprNode) (bool, error) { partCols, err := checkPartitionColumns(tblInfo, expr) diff --git a/ddl/reorg.go b/ddl/reorg.go index 42d2dc2c2bffa..16d92b609b1a6 100644 --- a/ddl/reorg.go +++ b/ddl/reorg.go @@ -220,9 +220,8 @@ func constructLimitPB(count uint64) *tipb.Executor { return &tipb.Executor{Tp: tipb.ExecType_TypeLimit, Limit: limitExec} } -func buildDescTableScanDAG(ctx sessionctx.Context, startTS uint64, tbl table.PhysicalTable, columns []*model.ColumnInfo, limit uint64) (*tipb.DAGRequest, error) { +func buildDescTableScanDAG(ctx sessionctx.Context, tbl table.PhysicalTable, columns []*model.ColumnInfo, limit uint64) (*tipb.DAGRequest, error) { dagReq := &tipb.DAGRequest{} - dagReq.StartTs = startTS _, timeZoneOffset := time.Now().In(time.UTC).Zone() dagReq.TimeZoneOffset = int64(timeZoneOffset) for i := range columns { @@ -249,7 +248,7 @@ func getColumnsTypes(columns []*model.ColumnInfo) []*types.FieldType { // buildDescTableScan builds a desc table scan upon tblInfo. func (d *ddlCtx) buildDescTableScan(ctx context.Context, startTS uint64, tbl table.PhysicalTable, columns []*model.ColumnInfo, limit uint64) (distsql.SelectResult, error) { sctx := newContext(d.store) - dagPB, err := buildDescTableScanDAG(sctx, startTS, tbl, columns, limit) + dagPB, err := buildDescTableScanDAG(sctx, tbl, columns, limit) if err != nil { return nil, errors.Trace(err) } @@ -257,6 +256,7 @@ func (d *ddlCtx) buildDescTableScan(ctx context.Context, startTS uint64, tbl tab var builder distsql.RequestBuilder builder.SetTableRanges(tbl.GetPhysicalID(), ranges, nil). SetDAGRequest(dagPB). + SetStartTS(startTS). SetKeepOrder(true). SetConcurrency(1).SetDesc(true) diff --git a/ddl/rollingback.go b/ddl/rollingback.go index b8af05a80203e..360743249ba86 100644 --- a/ddl/rollingback.go +++ b/ddl/rollingback.go @@ -295,7 +295,7 @@ func convertJob2RollbackJob(w *worker, d *ddlCtx, t *meta.Meta, job *model.Job) model.ActionModifyColumn, model.ActionAddForeignKey, model.ActionDropForeignKey, model.ActionRenameTable, model.ActionModifyTableCharsetAndCollate, model.ActionTruncateTablePartition, - model.ActionModifySchemaCharsetAndCollate: + model.ActionModifySchemaCharsetAndCollate, model.ActionRepairTable: ver, err = cancelOnlyNotHandledJob(job) default: job.State = model.JobStateCancelled diff --git a/ddl/split_region.go b/ddl/split_region.go index 79472faad43ea..9b88fd250988d 100644 --- a/ddl/split_region.go +++ b/ddl/split_region.go @@ -23,7 +23,7 @@ import ( "go.uber.org/zap" ) -func splitPartitionTableRegion(store kv.SplitableStore, pi *model.PartitionInfo, scatter bool) { +func splitPartitionTableRegion(store kv.SplittableStore, pi *model.PartitionInfo, scatter bool) { // Max partition count is 4096, should we sample and just choose some of the partition to split? regionIDs := make([]uint64, 0, len(pi.Definitions)) for _, def := range pi.Definitions { @@ -34,7 +34,7 @@ func splitPartitionTableRegion(store kv.SplitableStore, pi *model.PartitionInfo, } } -func splitTableRegion(store kv.SplitableStore, tbInfo *model.TableInfo, scatter bool) { +func splitTableRegion(store kv.SplittableStore, tbInfo *model.TableInfo, scatter bool) { if tbInfo.ShardRowIDBits > 0 && tbInfo.PreSplitRegions > 0 { splitPreSplitedTable(store, tbInfo, scatter) } else { @@ -45,7 +45,7 @@ func splitTableRegion(store kv.SplitableStore, tbInfo *model.TableInfo, scatter } } -func splitPreSplitedTable(store kv.SplitableStore, tbInfo *model.TableInfo, scatter bool) { +func splitPreSplitedTable(store kv.SplittableStore, tbInfo *model.TableInfo, scatter bool) { // Example: // ShardRowIDBits = 4 // PreSplitRegions = 2 @@ -91,7 +91,7 @@ func splitPreSplitedTable(store kv.SplitableStore, tbInfo *model.TableInfo, scat } } -func splitRecordRegion(store kv.SplitableStore, tableID int64, scatter bool) uint64 { +func splitRecordRegion(store kv.SplittableStore, tableID int64, scatter bool) uint64 { tableStartKey := tablecodec.GenTablePrefix(tableID) regionIDs, err := store.SplitRegions(context.Background(), [][]byte{tableStartKey}, scatter) if err != nil { @@ -104,7 +104,7 @@ func splitRecordRegion(store kv.SplitableStore, tableID int64, scatter bool) uin return 0 } -func splitIndexRegion(store kv.SplitableStore, tblInfo *model.TableInfo, scatter bool) []uint64 { +func splitIndexRegion(store kv.SplittableStore, tblInfo *model.TableInfo, scatter bool) []uint64 { splitKeys := make([][]byte, 0, len(tblInfo.Indices)) for _, idx := range tblInfo.Indices { indexPrefix := tablecodec.EncodeTableIndexPrefix(tblInfo.ID, idx.ID) @@ -118,7 +118,7 @@ func splitIndexRegion(store kv.SplitableStore, tblInfo *model.TableInfo, scatter return regionIDs } -func waitScatterRegionFinish(store kv.SplitableStore, regionIDs ...uint64) { +func waitScatterRegionFinish(store kv.SplittableStore, regionIDs ...uint64) { for _, regionID := range regionIDs { err := store.WaitScatterRegionFinish(regionID, 0) if err != nil { diff --git a/ddl/table.go b/ddl/table.go index 155e208114e7d..16cdc99527200 100644 --- a/ddl/table.go +++ b/ddl/table.go @@ -92,6 +92,15 @@ func createTableOrViewWithCheck(t *meta.Meta, job *model.Job, schemaID int64, tb return t.CreateTableOrView(schemaID, tbInfo) } +func repairTableOrViewWithCheck(t *meta.Meta, job *model.Job, schemaID int64, tbInfo *model.TableInfo) error { + err := checkTableInfoValid(tbInfo) + if err != nil { + job.State = model.JobStateCancelled + return errors.Trace(err) + } + return t.UpdateTable(schemaID, tbInfo) +} + func onCreateView(d *ddlCtx, t *meta.Meta, job *model.Job) (ver int64, _ error) { schemaID := job.SchemaID tbInfo := &model.TableInfo{} @@ -264,7 +273,13 @@ func (w *worker) onRecoverTable(d *ddlCtx, t *meta.Meta, job *model.Job) (ver in return ver, errors.Trace(err) } // Remove dropped table DDL job from gc_delete_range table. - err = w.delRangeManager.removeFromGCDeleteRange(dropJobID, tblInfo.ID) + var tids []int64 + if tblInfo.GetPartitionInfo() != nil { + tids = getPartitionIDs(tblInfo) + } else { + tids = []int64{tblInfo.ID} + } + err = w.delRangeManager.removeFromGCDeleteRange(dropJobID, tids) if err != nil { return ver, errors.Trace(err) } @@ -896,3 +911,46 @@ func checkAddPartitionValue(meta *model.TableInfo, part *model.PartitionInfo) er } return nil } + +func onRepairTable(d *ddlCtx, t *meta.Meta, job *model.Job) (ver int64, _ error) { + schemaID := job.SchemaID + tblInfo := &model.TableInfo{} + + if err := job.DecodeArgs(tblInfo); err != nil { + // Invalid arguments, cancel this job. + job.State = model.JobStateCancelled + return ver, errors.Trace(err) + } + + tblInfo.State = model.StateNone + + // Check the old DB and old table exist. + _, err := getTableInfoAndCancelFaultJob(t, job, schemaID) + if err != nil { + return ver, errors.Trace(err) + } + + // When in repair mode, the repaired table in a server is not access to user, + // the table after repairing will be removed from repair list. Other server left + // behind alive may need to restart to get the latest schema version. + ver, err = updateSchemaVersion(t, job) + if err != nil { + return ver, errors.Trace(err) + } + switch tblInfo.State { + case model.StateNone: + // none -> public + tblInfo.State = model.StatePublic + tblInfo.UpdateTS = t.StartTS + err = repairTableOrViewWithCheck(t, job, schemaID, tblInfo) + if err != nil { + return ver, errors.Trace(err) + } + // Finish this job. + job.FinishTableJob(model.JobStateDone, model.StatePublic, ver, tblInfo) + asyncNotifyEvent(d, &util.Event{Tp: model.ActionRepairTable, TableInfo: tblInfo}) + return ver, nil + default: + return ver, ErrInvalidDDLState.GenWithStackByArgs("table", tblInfo.State) + } +} diff --git a/ddl/util/util.go b/ddl/util/util.go index 08ecdc9d8b1ca..69a6d3ced56f7 100644 --- a/ddl/util/util.go +++ b/ddl/util/util.go @@ -14,9 +14,11 @@ package util import ( + "bytes" "context" "encoding/hex" "fmt" + "strconv" "github.com/pingcap/errors" "github.com/pingcap/parser/terror" @@ -28,13 +30,14 @@ import ( ) const ( - deleteRangesTable = `gc_delete_range` - doneDeleteRangesTable = `gc_delete_range_done` - loadDeleteRangeSQL = `SELECT HIGH_PRIORITY job_id, element_id, start_key, end_key FROM mysql.%s WHERE ts < %v` - recordDoneDeletedRangeSQL = `INSERT IGNORE INTO mysql.gc_delete_range_done SELECT * FROM mysql.gc_delete_range WHERE job_id = %d AND element_id = %d` - completeDeleteRangeSQL = `DELETE FROM mysql.gc_delete_range WHERE job_id = %d AND element_id = %d` - updateDeleteRangeSQL = `UPDATE mysql.gc_delete_range SET start_key = "%s" WHERE job_id = %d AND element_id = %d AND start_key = "%s"` - deleteDoneRecordSQL = `DELETE FROM mysql.gc_delete_range_done WHERE job_id = %d AND element_id = %d` + deleteRangesTable = `gc_delete_range` + doneDeleteRangesTable = `gc_delete_range_done` + loadDeleteRangeSQL = `SELECT HIGH_PRIORITY job_id, element_id, start_key, end_key FROM mysql.%s WHERE ts < %v` + recordDoneDeletedRangeSQL = `INSERT IGNORE INTO mysql.gc_delete_range_done SELECT * FROM mysql.gc_delete_range WHERE job_id = %d AND element_id = %d` + completeDeleteRangeSQL = `DELETE FROM mysql.gc_delete_range WHERE job_id = %d AND element_id = %d` + completeDeleteMultiRangesSQL = `DELETE FROM mysql.gc_delete_range WHERE job_id = %d AND element_id in (%v)` + updateDeleteRangeSQL = `UPDATE mysql.gc_delete_range SET start_key = "%s" WHERE job_id = %d AND element_id = %d AND start_key = "%s"` + deleteDoneRecordSQL = `DELETE FROM mysql.gc_delete_range_done WHERE job_id = %d AND element_id = %d` ) // DelRangeTask is for run delete-range command in gc_worker. @@ -119,6 +122,20 @@ func RemoveFromGCDeleteRange(ctx sessionctx.Context, jobID, elementID int64) err return errors.Trace(err) } +// RemoveMultiFromGCDeleteRange is exported for ddl pkg to use. +func RemoveMultiFromGCDeleteRange(ctx sessionctx.Context, jobID int64, elementIDs []int64) error { + var buf bytes.Buffer + for i, elementID := range elementIDs { + if i > 0 { + buf.WriteString(", ") + } + buf.WriteString(strconv.FormatInt(elementID, 10)) + } + sql := fmt.Sprintf(completeDeleteMultiRangesSQL, jobID, buf.String()) + _, err := ctx.(sqlexec.SQLExecutor).Execute(context.TODO(), sql) + return errors.Trace(err) +} + // DeleteDoneRecord removes a record from gc_delete_range_done table. func DeleteDoneRecord(ctx sessionctx.Context, dr DelRangeTask) error { sql := fmt.Sprintf(deleteDoneRecordSQL, dr.JobID, dr.ElementID) diff --git a/distsql/request_builder.go b/distsql/request_builder.go index dbde317178447..651e7c3bfa5fa 100644 --- a/distsql/request_builder.go +++ b/distsql/request_builder.go @@ -76,7 +76,6 @@ func (builder *RequestBuilder) SetTableHandles(tid int64, handles []int64) *Requ func (builder *RequestBuilder) SetDAGRequest(dag *tipb.DAGRequest) *RequestBuilder { if builder.err == nil { builder.Request.Tp = kv.ReqTypeDAG - builder.Request.StartTs = dag.StartTs builder.Request.Data, builder.err = dag.Marshal() } @@ -87,7 +86,6 @@ func (builder *RequestBuilder) SetDAGRequest(dag *tipb.DAGRequest) *RequestBuild func (builder *RequestBuilder) SetAnalyzeRequest(ana *tipb.AnalyzeReq) *RequestBuilder { if builder.err == nil { builder.Request.Tp = kv.ReqTypeAnalyze - builder.Request.StartTs = ana.StartTs builder.Request.Data, builder.err = ana.Marshal() builder.Request.NotFillCache = true builder.Request.IsolationLevel = kv.RC @@ -101,7 +99,6 @@ func (builder *RequestBuilder) SetAnalyzeRequest(ana *tipb.AnalyzeReq) *RequestB func (builder *RequestBuilder) SetChecksumRequest(checksum *tipb.ChecksumRequest) *RequestBuilder { if builder.err == nil { builder.Request.Tp = kv.ReqTypeChecksum - builder.Request.StartTs = checksum.StartTs builder.Request.Data, builder.err = checksum.Marshal() builder.Request.NotFillCache = true } @@ -115,6 +112,12 @@ func (builder *RequestBuilder) SetKeyRanges(keyRanges []kv.KeyRange) *RequestBui return builder } +// SetStartTS sets "StartTS" for "kv.Request". +func (builder *RequestBuilder) SetStartTS(startTS uint64) *RequestBuilder { + builder.Request.StartTs = startTS + return builder +} + // SetDesc sets "Desc" for "kv.Request". func (builder *RequestBuilder) SetDesc(desc bool) *RequestBuilder { builder.Request.Desc = desc diff --git a/distsql/request_builder_test.go b/distsql/request_builder_test.go index 62c25cfc3e5f2..1e4800898b788 100644 --- a/distsql/request_builder_test.go +++ b/distsql/request_builder_test.go @@ -28,6 +28,7 @@ import ( "github.com/pingcap/tidb/types" "github.com/pingcap/tidb/util/chunk" "github.com/pingcap/tidb/util/codec" + "github.com/pingcap/tidb/util/disk" "github.com/pingcap/tidb/util/logutil" "github.com/pingcap/tidb/util/memory" "github.com/pingcap/tidb/util/mock" @@ -55,7 +56,8 @@ type testSuite struct { func (s *testSuite) SetUpSuite(c *C) { ctx := mock.NewContext() ctx.GetSessionVars().StmtCtx = &stmtctx.StatementContext{ - MemTracker: memory.NewTracker(stringutil.StringerStr("testSuite"), variable.DefTiDBMemQuotaDistSQL), + MemTracker: memory.NewTracker(stringutil.StringerStr("testSuite"), variable.DefTiDBMemQuotaDistSQL), + DiskTracker: disk.NewTracker(stringutil.StringerStr("testSuite"), -1), } ctx.Store = &mock.Store{ Client: &mock.Client{ @@ -285,7 +287,7 @@ func (s *testSuite) TestRequestBuilder1(c *C) { expect := &kv.Request{ Tp: 103, StartTs: 0x0, - Data: []uint8{0x8, 0x0, 0x18, 0x0, 0x20, 0x0, 0x40, 0x0, 0x5a, 0x0}, + Data: []uint8{0x18, 0x0, 0x20, 0x0, 0x40, 0x0, 0x5a, 0x0}, KeyRanges: []kv.KeyRange{ { StartKey: kv.Key{0x74, 0x80, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0xc, 0x5f, 0x72, 0x80, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x1}, @@ -360,7 +362,7 @@ func (s *testSuite) TestRequestBuilder2(c *C) { expect := &kv.Request{ Tp: 103, StartTs: 0x0, - Data: []uint8{0x8, 0x0, 0x18, 0x0, 0x20, 0x0, 0x40, 0x0, 0x5a, 0x0}, + Data: []uint8{0x18, 0x0, 0x20, 0x0, 0x40, 0x0, 0x5a, 0x0}, KeyRanges: []kv.KeyRange{ { StartKey: kv.Key{0x74, 0x80, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0xc, 0x5f, 0x69, 0x80, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0xf, 0x3, 0x80, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x1}, @@ -409,7 +411,7 @@ func (s *testSuite) TestRequestBuilder3(c *C) { expect := &kv.Request{ Tp: 103, StartTs: 0x0, - Data: []uint8{0x8, 0x0, 0x18, 0x0, 0x20, 0x0, 0x40, 0x0, 0x5a, 0x0}, + Data: []uint8{0x18, 0x0, 0x20, 0x0, 0x40, 0x0, 0x5a, 0x0}, KeyRanges: []kv.KeyRange{ { StartKey: kv.Key{0x74, 0x80, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0xf, 0x5f, 0x72, 0x80, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0}, @@ -472,7 +474,7 @@ func (s *testSuite) TestRequestBuilder4(c *C) { expect := &kv.Request{ Tp: 103, StartTs: 0x0, - Data: []uint8{0x8, 0x0, 0x18, 0x0, 0x20, 0x0, 0x40, 0x0, 0x5a, 0x0}, + Data: []uint8{0x18, 0x0, 0x20, 0x0, 0x40, 0x0, 0x5a, 0x0}, KeyRanges: keyRanges, KeepOrder: false, Desc: false, @@ -516,7 +518,7 @@ func (s *testSuite) TestRequestBuilder5(c *C) { expect := &kv.Request{ Tp: 104, StartTs: 0x0, - Data: []uint8{0x8, 0x0, 0x10, 0x0, 0x18, 0x0, 0x20, 0x0}, + Data: []uint8{0x8, 0x0, 0x18, 0x0, 0x20, 0x0}, KeyRanges: keyRanges, KeepOrder: true, Desc: false, @@ -549,7 +551,7 @@ func (s *testSuite) TestRequestBuilder6(c *C) { expect := &kv.Request{ Tp: 105, StartTs: 0x0, - Data: []uint8{0x8, 0x0, 0x10, 0x0, 0x18, 0x0}, + Data: []uint8{0x10, 0x0, 0x18, 0x0}, KeyRanges: keyRanges, KeepOrder: false, Desc: false, diff --git a/docs/design/2019-11-14-tidb-builtin-diagnostics-zh_CN.md b/docs/design/2019-11-14-tidb-builtin-diagnostics-zh_CN.md index 6d612ff9622e9..0d235da7f9496 100644 --- a/docs/design/2019-11-14-tidb-builtin-diagnostics-zh_CN.md +++ b/docs/design/2019-11-14-tidb-builtin-diagnostics-zh_CN.md @@ -132,7 +132,7 @@ TiDB/TiKV/PD 产生的日志都保存在各自的节点上,并且 TiDB 集群 - `start_time`: 日志检索的开始时间(unix 时间戳,单位毫秒),如果没有该谓词,则默认为 0。 - `end_time`: 日志检索的开始时间(unix 时间戳,单位毫秒),如果没有该谓词,则默认为 `int64::MAX`。 -- `pattern`: 如 SELECT * FROM tidb_cluster_log WHERE pattern LIKE "%gc%" 中的 %gc% 即为过滤的关键字 +- `pattern`: 如 SELECT * FROM cluster_log WHERE pattern LIKE "%gc%" 中的 %gc% 即为过滤的关键字 - `level`: 日志等级,可以选为 DEBUG/INFO/WARN/WARNING/TRACE/CRITICAL/ERROR - `limit`: 返回日志的条数,如果没有指定,则限制为 64k 条,防止日质量太大占用大量网络 @@ -301,7 +301,7 @@ message ServerInfoResponse { mysql> use information_schema; Database changed -mysql> desc TIDB_CLUSTER_INFO; +mysql> desc CLUSTER_INFO; +----------------+---------------------+------+------+---------+-------+ | Field | Type | Null | Key | Default | Extra | +----------------+---------------------+------+------+---------+-------+ @@ -313,7 +313,7 @@ mysql> desc TIDB_CLUSTER_INFO; +----------------+---------------------+------+------+---------+-------+ 5 rows in set (0.00 sec) -mysql> select TYPE, ADDRESS, STATUS_ADDRESS,VERSION from TIDB_CLUSTER_INFO; +mysql> select TYPE, ADDRESS, STATUS_ADDRESS,VERSION from CLUSTER_INFO; +------+-----------------+-----------------+-----------------------------------------------+ | TYPE | ADDRESS | STATUS_ADDRESS | VERSION | +------+-----------------+-----------------+-----------------------------------------------+ @@ -492,9 +492,9 @@ mysql> select address, type, value from pd_client_cmd_ops where start_time='2019 | 表名 | 描述 | |------|-----| -| tidb_cluster_slow_query | 所有 TiDB 节点的 slow_query 表数据 | -| tidb_cluster_statements_summary | 所有 TiDB 节点的 statements summary 表数据 | -| tidb_cluster_processlist | 所有 TiDB 节点的 processlist 表数据 | +| cluster_slow_query | 所有 TiDB 节点的 slow_query 表数据 | +| cluster_statements_summary | 所有 TiDB 节点的 statements summary 表数据 | +| cluster_processlist | 所有 TiDB 节点的 processlist 表数据 | #### 所有节点的配置信息 @@ -506,7 +506,7 @@ mysql> select address, type, value from pd_client_cmd_ops where start_time='2019 mysql> use information_schema; Database changed -mysql> select * from tidb_cluster_config where `key` like 'log%'; +mysql> select * from cluster_config where `key` like 'log%'; +------+-----------------+-----------------------------+---------------+ | TYPE | ADDRESS | KEY | VALUE | +------+-----------------+-----------------------------+---------------+ @@ -546,7 +546,7 @@ mysql> select * from tidb_cluster_config where `key` like 'log%'; +------+-----------------+-----------------------------+---------------+ 33 rows in set (0.00 sec) -mysql> select * from tidb_cluster_config where type='tikv' and `key` like 'raftdb.wal%'; +mysql> select * from cluster_config where type='tikv' and `key` like 'raftdb.wal%'; +------+-----------------+---------------------------+--------+ | TYPE | ADDRESS | KEY | VALUE | +------+-----------------+---------------------------+--------+ @@ -567,7 +567,7 @@ mysql> select * from tidb_cluster_config where type='tikv' and `key` like 'raftd mysql> use information_schema; Database changed -mysql> select * from tidb_cluster_hardware +mysql> select * from cluster_hardware +------+-----------------+----------+----------+-------------+--------+ | TYPE | ADDRESS | HW_TYPE | HW_NAME | KEY | VALUE | +------+-----------------+----------+----------+-------------+--------+ @@ -584,7 +584,7 @@ mysql> select * from tidb_cluster_hardware +------+-----------------+----------+----------+-------------+--------+ 10 rows in set (0.01 sec) -mysql> select * from tidb_cluster_systeminfo +mysql> select * from cluster_systeminfo +------+-----------------+----------+--------------+--------+ | TYPE | ADDRESS | MODULE | KEY | VALUE | +------+-----------------+----------+--------------+--------+ @@ -594,7 +594,7 @@ mysql> select * from tidb_cluster_systeminfo +------+-----------------+----------+--------------+--------+ 20 rows in set (0.01 sec) -mysql> select * from tidb_cluster_load +mysql> select * from cluster_load +------+-----------------+----------+-------------+--------+ | TYPE | ADDRESS | MODULE | KEY | VALUE | +------+-----------------+----------+-------------+--------+ @@ -606,7 +606,7 @@ mysql> select * from tidb_cluster_load #### 全链路日志系统表 -当前日志搜索需要登陆多台机器分别进行检索,并且没有简单的办法对多个机器的检索结果按照时间全排序。本提案新建一个 `tidb_cluster_log` 系统表用于提供全链路日志,简化通过日志排查问题的方式以及提高效率。实现方式为:通过 gRPC Diagnosis Service 的 `search_log` 接口,将日志过滤的谓词下推到各个节点,并最终按照时间进行归并。 +当前日志搜索需要登陆多台机器分别进行检索,并且没有简单的办法对多个机器的检索结果按照时间全排序。本提案新建一个 `cluster_log` 系统表用于提供全链路日志,简化通过日志排查问题的方式以及提高效率。实现方式为:通过 gRPC Diagnosis Service 的 `search_log` 接口,将日志过滤的谓词下推到各个节点,并最终按照时间进行归并。 如下示例是实现本提案后的预期结果: @@ -614,7 +614,7 @@ mysql> select * from tidb_cluster_load mysql> use information_schema; Database changed -mysql> desc tidb_cluster_log; +mysql> desc cluster_log; +---------+-------------+------+------+---------+-------+ | Field | Type | Null | Key | Default | Extra | +---------+-------------+------+------+---------+-------+ @@ -626,7 +626,7 @@ mysql> desc tidb_cluster_log; +---------+-------------+------+------+---------+-------+ 5 rows in set (0.00 sec) -mysql> select * from tidb_cluster_log where content like '%412134239937495042%'; -- 查询 TSO 为 412134239937495042 全链路日志 +mysql> select * from cluster_log where content like '%412134239937495042%'; -- 查询 TSO 为 412134239937495042 全链路日志 +------+--------------------------------+-------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------+ | TYPE | ADDRESS | LEVEL | CONTENT | +------+------------------------+-------+-------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------+ @@ -665,9 +665,9 @@ mysql> select * from tidb_cluster_log where content like '%412134239937495042%'; +------+--------------------------------+-------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------+ 31 rows in set (0.01 sec) -mysql> select * from tidb_cluster_log where type='pd' and content like '%scheduler%'; -- 查询 PD 的调度日志 +mysql> select * from cluster_log where type='pd' and content like '%scheduler%'; -- 查询 PD 的调度日志 -mysql> select * from tidb_cluster_log where type='tidb' and content like '%ddl%'; -- 查询 TiDB 的 DDL 日志 +mysql> select * from cluster_log where type='tidb' and content like '%ddl%'; -- 查询 TiDB 的 DDL 日志 ``` ### 集群诊断 diff --git a/docs/design/2019-11-14-tidb-builtin-diagnostics.md b/docs/design/2019-11-14-tidb-builtin-diagnostics.md index 98a2e7757858e..4223c17f08a09 100644 --- a/docs/design/2019-11-14-tidb-builtin-diagnostics.md +++ b/docs/design/2019-11-14-tidb-builtin-diagnostics.md @@ -135,7 +135,7 @@ The following are the predicates that the log interface needs to process: - `start_time`: start time of the log retrieval (Unix timestamp, in milliseconds). If there is no such predicate, the default is 0. - `end_time:`: end time of the log retrieval (Unix timestamp, in milliseconds). If there is no such predicate, the default is `int64::MAX`. -- `pattern`: filter pattern determined by the keyword. For example, `SELECT * FROM tidb_cluster_log` WHERE "%gc%" `%gc%` is the filtered keyword. +- `pattern`: filter pattern determined by the keyword. For example, `SELECT * FROM cluster_log` WHERE "%gc%" `%gc%` is the filtered keyword. - `level`: log level; can be selected as DEBUG/INFO/WARN/WARNING/TRACE/CRITICAL/ERROR - `limit`: the maximum of logs items to return, preventing the log from being too large and occupying a large bandwidth of the network.. If not specified, the default limit is 64k. @@ -306,7 +306,7 @@ The implementation of this proposal can query the following results through SQL: mysql> use information_schema; Database changed -mysql> desc TIDB_CLUSTER_INFO; +mysql> desc CLUSTER_INFO; +----------------+---------------------+------+------+---------+-------+ | Field | Type | Null | Key | Default | Extra | +----------------+---------------------+------+------+---------+-------+ @@ -318,7 +318,7 @@ mysql> desc TIDB_CLUSTER_INFO; +----------------+---------------------+------+------+---------+-------+ 5 rows in set (0.00 sec) -mysql> select TYPE, ADDRESS, STATUS_ADDRESS,VERSION from TIDB_CLUSTER_INFO; +mysql> select TYPE, ADDRESS, STATUS_ADDRESS,VERSION from CLUSTER_INFO; +------+-----------------+-----------------+-----------------------------------------------+ | TYPE | ADDRESS | STATUS_ADDRESS | VERSION | +------+-----------------+-----------------+-----------------------------------------------+ @@ -484,9 +484,9 @@ Current the `slow_query`/`events_statements_summary_by_digest`/`processlist` mem | Table Name | Description | |------|-----| -| tidb_cluster_slow_query | slow_query table data for all TiDB nodes | -| tidb_cluster_statements_summary | statements summary table Data for all TiDB nodes | -| tidb_cluster_processlist | processlist table data for all TiDB nodes | +| cluster_slow_query | slow_query table data for all TiDB nodes | +| cluster_statements_summary | statements summary table Data for all TiDB nodes | +| cluster_processlist | processlist table data for all TiDB nodes | #### Configuration information of all nodes @@ -498,7 +498,7 @@ See the following example for some expected results of this proposal: mysql> use information_schema; Database changed -mysql> select * from tidb_cluster_config where `key` like 'log%'; +mysql> select * from cluster_config where `key` like 'log%'; +------+-----------------+-----------------------------+---------------+ | TYPE | ADDRESS | KEY | VALUE | +------+-----------------+-----------------------------+---------------+ @@ -538,7 +538,7 @@ mysql> select * from tidb_cluster_config where `key` like 'log%'; +------+-----------------+-----------------------------+---------------+ 33 rows in set (0.00 sec) -mysql> select * from tidb_cluster_config where type='tikv' and `key` like 'raftdb.wal%'; +mysql> select * from cluster_config where type='tikv' and `key` like 'raftdb.wal%'; +------+-----------------+---------------------------+--------+ | TYPE | ADDRESS | KEY | VALUE | +------+-----------------+---------------------------+--------+ @@ -559,7 +559,7 @@ According to the definition of `gRPC Service` protocol, each `ServerInfoItem` co mysql> use information_schema; Database changed -mysql> select * from tidb_cluster_hardware +mysql> select * from cluster_hardware +------+-----------------+----------+----------+-------------+--------+ | TYPE | ADDRESS | HW_TYPE | HW_NAME | KEY | VALUE | +------+-----------------+----------+----------+-------------+--------+ @@ -576,7 +576,7 @@ mysql> select * from tidb_cluster_hardware +------+-----------------+----------+----------+-------------+--------+ 10 rows in set (0.01 sec) -mysql> select * from tidb_cluster_systeminfo +mysql> select * from cluster_systeminfo +------+-----------------+----------+--------------+--------+ | TYPE | ADDRESS | MODULE | KEY | VALUE | +------+-----------------+----------+--------------+--------+ @@ -586,7 +586,7 @@ mysql> select * from tidb_cluster_systeminfo +------+-----------------+----------+--------------+--------+ 20 rows in set (0.01 sec) -mysql> select * from tidb_cluster_load +mysql> select * from cluster_load +------+-----------------+----------+-------------+--------+ | TYPE | ADDRESS | MODULE | KEY | VALUE | +------+-----------------+----------+-------------+--------+ @@ -599,7 +599,7 @@ mysql> select * from tidb_cluster_load #### Full-chain log system table -To search in the current log, users need to log in to multiple machines for retrieval respectively, and there is no easy way to sort the retrieval results of multiple machines by time. This proposal creates a new `tidb_cluster_log` system table to provide full-link logs, thereby simplifying the way to troubleshoot problems through logs and improving efficiency. This is achieved by pushing the log-filtering predicates down to the nodes through the `search_log` interface of the gRPC Diagnosis Service. The filtered logs will be eventually merged by time. +To search in the current log, users need to log in to multiple machines for retrieval respectively, and there is no easy way to sort the retrieval results of multiple machines by time. This proposal creates a new `cluster_log` system table to provide full-link logs, thereby simplifying the way to troubleshoot problems through logs and improving efficiency. This is achieved by pushing the log-filtering predicates down to the nodes through the `search_log` interface of the gRPC Diagnosis Service. The filtered logs will be eventually merged by time. The following example shows the expected results of this proposal: @@ -607,7 +607,7 @@ The following example shows the expected results of this proposal: mysql> use information_schema; Database changed -mysql> desc tidb_cluster_log; +mysql> desc cluster_log; +---------+-------------+------+------+---------+-------+ | Field | Type | Null | Key | Default | Extra | +---------+-------------+------+------+---------+-------+ @@ -619,7 +619,7 @@ mysql> desc tidb_cluster_log; +---------+-------------+------+------+---------+-------+ 5 rows in set (0.00 sec) -mysql> select * from tidb_cluster_log where content like '%412134239937495042%'; -- Query the full link log related to TSO 412134239937495042 +mysql> select * from cluster_log where content like '%412134239937495042%'; -- Query the full link log related to TSO 412134239937495042 +------+--------------------------------+-------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------+ | TYPE | ADDRESS | LEVEL | CONTENT | +------+------------------------+-------+-------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------+ @@ -658,9 +658,9 @@ mysql> select * from tidb_cluster_log where content like '%412134239937495042%'; +------+--------------------------------+-------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------+ 31 rows in set (0.01 sec) -mysql> select * from tidb_cluster_log where type='pd' and content like '%scheduler%'; -- Query scheduler logs of PD +mysql> select * from cluster_log where type='pd' and content like '%scheduler%'; -- Query scheduler logs of PD -mysql> select * from tidb_cluster_log where type='tidb' and content like '%ddl%'; -- Query DDL logs of TiDB +mysql> select * from cluster_log where type='tidb' and content like '%ddl%'; -- Query DDL logs of TiDB ``` ### Cluster diagnostics @@ -700,4 +700,4 @@ Therefore, implementing a diagnostic system that supports hot rule loading is ne - Advantages: easy to write, load and execute - Disadvantages: need to run on the machine where the MySQL client is installed -This proposal temporarily adopts the third option to write diagnostic rules using Shell. There is no intrusion into TiDB, and it also provides scalability for subsequent implementations of better solutions. \ No newline at end of file +This proposal temporarily adopts the third option to write diagnostic rules using Shell. There is no intrusion into TiDB, and it also provides scalability for subsequent implementations of better solutions. diff --git a/docs/tidb_http_api.md b/docs/tidb_http_api.md index ebe9d7cb104c6..6a6ddd8252574 100644 --- a/docs/tidb_http_api.md +++ b/docs/tidb_http_api.md @@ -418,4 +418,24 @@ timezone.* ```shell curl http://{TiDBIP}:10080/binlog/recover - ``` \ No newline at end of file + ``` + + Return value: + + * timeout, return status code: 400, message: `timeout` + * If it returns normally, status code: 200, message example: + ```text + { +   "Skipped": false, +   "SkippedCommitterCounter": 0 + } + ``` + `Skipped`: false indicates that the current binlog is not in the skipped state, otherwise, it is in the skipped state + `SkippedCommitterCounter`: Represents how many transactions are currently being committed in the skipped state. By default, the API will return after waiting until all skipped-binlog transactions are committed. If this value is greater than 0, it means that you need to wait until them are committed . + + Param: + + * op=nowait: return after binlog status is recoverd, do not wait until the skipped-binlog transactions are committed. + * op=reset: reset `SkippedCommitterCounter` to 0 to avoid the problem that `SkippedCommitterCounter` is not cleared due to some unusual cases. + * op=status: Get the current status of binlog recovery. + * seconds={num}: Specify the interface request timeout time in seconds. If not specified, the default is 1800 seconds. \ No newline at end of file diff --git a/domain/domain.go b/domain/domain.go index 0373111eb1c71..207a5f7e6fd51 100644 --- a/domain/domain.go +++ b/domain/domain.go @@ -45,6 +45,7 @@ import ( "github.com/pingcap/tidb/statistics/handle" "github.com/pingcap/tidb/store/tikv" "github.com/pingcap/tidb/util" + "github.com/pingcap/tidb/util/domainutil" "github.com/pingcap/tidb/util/expensivequery" "github.com/pingcap/tidb/util/logutil" "github.com/pingcap/tidb/util/sqlexec" @@ -207,6 +208,10 @@ func (do *Domain) fetchSchemasWithTables(schemas []*model.DBInfo, m *meta.Meta, continue } infoschema.ConvertCharsetCollateToLowerCaseIfNeed(tbl) + // Check whether the table is in repair mode. + if domainutil.RepairInfo.InRepairMode() && domainutil.RepairInfo.CheckAndFetchRepairedTable(di, tbl) { + continue + } di.Tables = append(di.Tables, tbl) } } diff --git a/domain/domain_test.go b/domain/domain_test.go index 9e6e6d9fddace..3b3644b860bd1 100644 --- a/domain/domain_test.go +++ b/domain/domain_test.go @@ -317,23 +317,33 @@ func (*testSuite) TestT(c *C) { res := dom.ShowSlowQuery(&ast.ShowSlow{Tp: ast.ShowSlowTop, Count: 2}) c.Assert(res, HasLen, 2) - c.Assert(*res[0], Equals, SlowQueryInfo{SQL: "bbb", Duration: 3 * time.Second}) - c.Assert(*res[1], Equals, SlowQueryInfo{SQL: "ccc", Duration: 2 * time.Second}) + c.Assert(res[0].SQL, Equals, "bbb") + c.Assert(res[0].Duration, Equals, 3*time.Second) + c.Assert(res[1].SQL, Equals, "ccc") + c.Assert(res[1].Duration, Equals, 2*time.Second) res = dom.ShowSlowQuery(&ast.ShowSlow{Tp: ast.ShowSlowTop, Count: 2, Kind: ast.ShowSlowKindInternal}) c.Assert(res, HasLen, 1) - c.Assert(*res[0], Equals, SlowQueryInfo{SQL: "aaa", Duration: time.Second, Internal: true}) + c.Assert(res[0].SQL, Equals, "aaa") + c.Assert(res[0].Duration, Equals, time.Second) + c.Assert(res[0].Internal, Equals, true) res = dom.ShowSlowQuery(&ast.ShowSlow{Tp: ast.ShowSlowTop, Count: 4, Kind: ast.ShowSlowKindAll}) c.Assert(res, HasLen, 3) - c.Assert(*res[0], Equals, SlowQueryInfo{SQL: "bbb", Duration: 3 * time.Second}) - c.Assert(*res[1], Equals, SlowQueryInfo{SQL: "ccc", Duration: 2 * time.Second}) - c.Assert(*res[2], Equals, SlowQueryInfo{SQL: "aaa", Duration: time.Second, Internal: true}) + c.Assert(res[0].SQL, Equals, "bbb") + c.Assert(res[0].Duration, Equals, 3*time.Second) + c.Assert(res[1].SQL, Equals, "ccc") + c.Assert(res[1].Duration, Equals, 2*time.Second) + c.Assert(res[2].SQL, Equals, "aaa") + c.Assert(res[2].Duration, Equals, time.Second) + c.Assert(res[2].Internal, Equals, true) res = dom.ShowSlowQuery(&ast.ShowSlow{Tp: ast.ShowSlowRecent, Count: 2}) c.Assert(res, HasLen, 2) - c.Assert(*res[0], Equals, SlowQueryInfo{SQL: "ccc", Duration: 2 * time.Second}) - c.Assert(*res[1], Equals, SlowQueryInfo{SQL: "bbb", Duration: 3 * time.Second}) + c.Assert(res[0].SQL, Equals, "ccc") + c.Assert(res[0].Duration, Equals, 2*time.Second) + c.Assert(res[1].SQL, Equals, "bbb") + c.Assert(res[1].Duration, Equals, 3*time.Second) metrics.PanicCounter.Reset() // Since the stats lease is 0 now, so create a new ticker will panic. diff --git a/domain/schema_validator_test.go b/domain/schema_validator_test.go index 08b6aae000e20..7ddc97654aef2 100644 --- a/domain/schema_validator_test.go +++ b/domain/schema_validator_test.go @@ -128,6 +128,7 @@ func serverFunc(lease time.Duration, requireLease chan leaseGrantItem, oracleCh var version int64 leaseTS := uint64(time.Now().UnixNano()) ticker := time.NewTicker(lease) + defer ticker.Stop() for { select { case now := <-ticker.C: diff --git a/domain/topn_slow_query_test.go b/domain/topn_slow_query_test.go index dbf7ea0a8c6fb..583d82651e9c7 100644 --- a/domain/topn_slow_query_test.go +++ b/domain/topn_slow_query_test.go @@ -116,14 +116,14 @@ func (t *testTopNSlowQuerySuite) TestQueue(c *C) { q.Append(&SlowQueryInfo{SQL: "ccc"}) query := q.recent.Query(1) - c.Assert(*query[0], Equals, SlowQueryInfo{SQL: "ccc"}) + c.Assert(query[0].SQL, Equals, "ccc") query = q.recent.Query(2) - c.Assert(*query[0], Equals, SlowQueryInfo{SQL: "ccc"}) - c.Assert(*query[1], Equals, SlowQueryInfo{SQL: "bbb"}) + c.Assert(query[0].SQL, Equals, "ccc") + c.Assert(query[1].SQL, Equals, "bbb") query = q.recent.Query(6) - c.Assert(*query[0], Equals, SlowQueryInfo{SQL: "ccc"}) - c.Assert(*query[1], Equals, SlowQueryInfo{SQL: "bbb"}) - c.Assert(*query[2], Equals, SlowQueryInfo{SQL: "aaa"}) + c.Assert(query[0].SQL, Equals, "ccc") + c.Assert(query[1].SQL, Equals, "bbb") + c.Assert(query[2].SQL, Equals, "aaa") q.Append(&SlowQueryInfo{SQL: "ddd"}) q.Append(&SlowQueryInfo{SQL: "eee"}) @@ -131,13 +131,13 @@ func (t *testTopNSlowQuerySuite) TestQueue(c *C) { q.Append(&SlowQueryInfo{SQL: "ggg"}) query = q.recent.Query(3) - c.Assert(*query[0], Equals, SlowQueryInfo{SQL: "ggg"}) - c.Assert(*query[1], Equals, SlowQueryInfo{SQL: "fff"}) - c.Assert(*query[2], Equals, SlowQueryInfo{SQL: "eee"}) + c.Assert(query[0].SQL, Equals, "ggg") + c.Assert(query[1].SQL, Equals, "fff") + c.Assert(query[2].SQL, Equals, "eee") query = q.recent.Query(6) - c.Assert(*query[0], Equals, SlowQueryInfo{SQL: "ggg"}) - c.Assert(*query[1], Equals, SlowQueryInfo{SQL: "fff"}) - c.Assert(*query[2], Equals, SlowQueryInfo{SQL: "eee"}) - c.Assert(*query[3], Equals, SlowQueryInfo{SQL: "ddd"}) - c.Assert(*query[4], Equals, SlowQueryInfo{SQL: "ccc"}) + c.Assert(query[0].SQL, Equals, "ggg") + c.Assert(query[1].SQL, Equals, "fff") + c.Assert(query[2].SQL, Equals, "eee") + c.Assert(query[3].SQL, Equals, "ddd") + c.Assert(query[4].SQL, Equals, "ccc") } diff --git a/executor/adapter.go b/executor/adapter.go index e8e027122d2f6..4e6b47d3d84d6 100644 --- a/executor/adapter.go +++ b/executor/adapter.go @@ -254,12 +254,7 @@ func (a *ExecStmt) IsReadOnly(vars *variable.SessionVars) bool { // RebuildPlan rebuilds current execute statement plan. // It returns the current information schema version that 'a' is using. func (a *ExecStmt) RebuildPlan(ctx context.Context) (int64, error) { - startTime := time.Now() - defer func() { - a.Ctx.GetSessionVars().DurationCompile = time.Since(startTime) - }() - - is := GetInfoSchema(a.Ctx) + is := infoschema.GetInfoSchema(a.Ctx) a.InfoSchema = is if err := plannercore.Preprocess(a.Ctx, a.StmtNode, is, plannercore.InTxnRetry); err != nil { return 0, err @@ -619,9 +614,6 @@ func (a *ExecStmt) handlePessimisticLockError(ctx context.Context, err error) (E // Rollback the statement change before retry it. a.Ctx.StmtRollback() a.Ctx.GetSessionVars().StmtCtx.ResetForRetry() - a.Ctx.GetSessionVars().StartTime = time.Now() - a.Ctx.GetSessionVars().DurationCompile = time.Duration(0) - a.Ctx.GetSessionVars().DurationParse = time.Duration(0) if err = e.Open(ctx); err != nil { return nil, err @@ -752,7 +744,7 @@ func (a *ExecStmt) LogSlowQuery(txnTS uint64, succ bool, hasMoreResults bool) { sessVars := a.Ctx.GetSessionVars() level := log.GetLevel() cfg := config.GetGlobalConfig() - costTime := time.Since(a.Ctx.GetSessionVars().StartTime) + costTime := time.Since(sessVars.StartTime) + sessVars.DurationParse threshold := time.Duration(atomic.LoadUint64(&cfg.Log.SlowThreshold)) * time.Millisecond if costTime < threshold && level > zapcore.DebugLevel { return @@ -761,10 +753,10 @@ func (a *ExecStmt) LogSlowQuery(txnTS uint64, succ bool, hasMoreResults bool) { var tableIDs, indexNames string if len(sessVars.StmtCtx.TableIDs) > 0 { - tableIDs = strings.Replace(fmt.Sprintf("%v", a.Ctx.GetSessionVars().StmtCtx.TableIDs), " ", ",", -1) + tableIDs = strings.Replace(fmt.Sprintf("%v", sessVars.StmtCtx.TableIDs), " ", ",", -1) } if len(sessVars.StmtCtx.IndexNames) > 0 { - indexNames = strings.Replace(fmt.Sprintf("%v", a.Ctx.GetSessionVars().StmtCtx.IndexNames), " ", ",", -1) + indexNames = strings.Replace(fmt.Sprintf("%v", sessVars.StmtCtx.IndexNames), " ", ",", -1) } execDetail := sessVars.StmtCtx.GetExecDetails() copTaskInfo := sessVars.StmtCtx.CopTasksDetails() @@ -776,8 +768,8 @@ func (a *ExecStmt) LogSlowQuery(txnTS uint64, succ bool, hasMoreResults bool) { SQL: sql.String(), Digest: digest, TimeTotal: costTime, - TimeParse: a.Ctx.GetSessionVars().DurationParse, - TimeCompile: a.Ctx.GetSessionVars().DurationCompile, + TimeParse: sessVars.DurationParse, + TimeCompile: sessVars.DurationCompile, IndexNames: indexNames, StatsInfos: statsInfos, CopTasks: copTaskInfo, @@ -805,7 +797,7 @@ func (a *ExecStmt) LogSlowQuery(txnTS uint64, succ bool, hasMoreResults bool) { domain.GetDomain(a.Ctx).LogSlowQuery(&domain.SlowQueryInfo{ SQL: sql.String(), Digest: digest, - Start: a.Ctx.GetSessionVars().StartTime, + Start: sessVars.StartTime, Duration: costTime, Detail: sessVars.StmtCtx.GetExecDetails(), Succ: succ, @@ -826,23 +818,7 @@ func getPlanTree(p plannercore.Plan) string { if atomic.LoadUint32(&cfg.Log.RecordPlanInSlowLog) == 0 { return "" } - var selectPlan plannercore.PhysicalPlan - if physicalPlan, ok := p.(plannercore.PhysicalPlan); ok { - selectPlan = physicalPlan - } else { - switch x := p.(type) { - case *plannercore.Delete: - selectPlan = x.SelectPlan - case *plannercore.Update: - selectPlan = x.SelectPlan - case *plannercore.Insert: - selectPlan = x.SelectPlan - } - } - if selectPlan == nil { - return "" - } - planTree := plannercore.EncodePlan(selectPlan) + planTree := plannercore.EncodePlan(p) if len(planTree) == 0 { return planTree } diff --git a/executor/admin.go b/executor/admin.go index 0f58ae187c8c5..73ce92dad6444 100644 --- a/executor/admin.go +++ b/executor/admin.go @@ -108,9 +108,14 @@ func (e *CheckIndexRangeExec) Open(ctx context.Context) error { return err } sc := e.ctx.GetSessionVars().StmtCtx + txn, err := e.ctx.Txn(true) + if err != nil { + return nil + } var builder distsql.RequestBuilder kvReq, err := builder.SetIndexRanges(sc, e.table.ID, e.index.ID, ranger.FullRange()). SetDAGRequest(dagPB). + SetStartTS(txn.StartTS()). SetKeepOrder(true). SetFromSessionVars(e.ctx.GetSessionVars()). Build() @@ -128,11 +133,6 @@ func (e *CheckIndexRangeExec) Open(ctx context.Context) error { func (e *CheckIndexRangeExec) buildDAGPB() (*tipb.DAGRequest, error) { dagReq := &tipb.DAGRequest{} - txn, err := e.ctx.Txn(true) - if err != nil { - return nil, err - } - dagReq.StartTs = txn.StartTS() dagReq.TimeZoneName, dagReq.TimeZoneOffset = timeutil.Zone(e.ctx.GetSessionVars().Location()) sc := e.ctx.GetSessionVars().StmtCtx dagReq.Flags = sc.PushDownFlags() @@ -142,7 +142,7 @@ func (e *CheckIndexRangeExec) buildDAGPB() (*tipb.DAGRequest, error) { execPB := e.constructIndexScanPB() dagReq.Executors = append(dagReq.Executors, execPB) - err = plannercore.SetPBColumnsDefaultValue(e.ctx, dagReq.Executors[0].IdxScan.Columns, e.cols) + err := plannercore.SetPBColumnsDefaultValue(e.ctx, dagReq.Executors[0].IdxScan.Columns, e.cols) if err != nil { return nil, err } @@ -231,7 +231,6 @@ func (e *RecoverIndexExec) constructLimitPB(count uint64) *tipb.Executor { func (e *RecoverIndexExec) buildDAGPB(txn kv.Transaction, limitCnt uint64) (*tipb.DAGRequest, error) { dagReq := &tipb.DAGRequest{} - dagReq.StartTs = txn.StartTS() dagReq.TimeZoneName, dagReq.TimeZoneOffset = timeutil.Zone(e.ctx.GetSessionVars().Location()) sc := e.ctx.GetSessionVars().StmtCtx dagReq.Flags = sc.PushDownFlags() @@ -264,6 +263,7 @@ func (e *RecoverIndexExec) buildTableScan(ctx context.Context, txn kv.Transactio var builder distsql.RequestBuilder kvReq, err := builder.SetTableRanges(tblInfo.ID, ranges, nil). SetDAGRequest(dagPB). + SetStartTS(txn.StartTS()). SetKeepOrder(true). SetFromSessionVars(e.ctx.GetSessionVars()). Build() @@ -631,6 +631,7 @@ func (e *CleanupIndexExec) buildIndexScan(ctx context.Context, txn kv.Transactio ranges := ranger.FullRange() kvReq, err := builder.SetIndexRanges(sc, e.table.Meta().ID, e.index.Meta().ID, ranges). SetDAGRequest(dagPB). + SetStartTS(txn.StartTS()). SetKeepOrder(true). SetFromSessionVars(e.ctx.GetSessionVars()). Build() @@ -668,7 +669,6 @@ func (e *CleanupIndexExec) Open(ctx context.Context) error { func (e *CleanupIndexExec) buildIdxDAGPB(txn kv.Transaction) (*tipb.DAGRequest, error) { dagReq := &tipb.DAGRequest{} - dagReq.StartTs = txn.StartTS() dagReq.TimeZoneName, dagReq.TimeZoneOffset = timeutil.Zone(e.ctx.GetSessionVars().Location()) sc := e.ctx.GetSessionVars().StmtCtx dagReq.Flags = sc.PushDownFlags() diff --git a/executor/aggregate.go b/executor/aggregate.go index 574ee516c33cf..b6595ba8f01ae 100644 --- a/executor/aggregate.go +++ b/executor/aggregate.go @@ -947,64 +947,6 @@ func (e *StreamAggExec) appendResult2Chunk(chk *chunk.Chunk) error { return nil } -type groupChecker struct { - StmtCtx *stmtctx.StatementContext - GroupByItems []expression.Expression - curGroupKey []types.Datum - tmpGroupKey []types.Datum -} - -func newGroupChecker(stmtCtx *stmtctx.StatementContext, items []expression.Expression) *groupChecker { - return &groupChecker{ - StmtCtx: stmtCtx, - GroupByItems: items, - } -} - -// meetNewGroup returns a value that represents if the new group is different from last group. -// TODO: Since all the group by items are only a column reference, guaranteed by building projection below aggregation, we can directly compare data in a chunk. -func (e *groupChecker) meetNewGroup(row chunk.Row) (bool, error) { - if len(e.GroupByItems) == 0 { - return false, nil - } - e.tmpGroupKey = e.tmpGroupKey[:0] - matched, firstGroup := true, false - if len(e.curGroupKey) == 0 { - matched, firstGroup = false, true - } - for i, item := range e.GroupByItems { - v, err := item.Eval(row) - if err != nil { - return false, err - } - if matched { - c, err := v.CompareDatum(e.StmtCtx, &e.curGroupKey[i]) - if err != nil { - return false, err - } - matched = c == 0 - } - e.tmpGroupKey = append(e.tmpGroupKey, v) - } - if matched { - return false, nil - } - e.curGroupKey = e.curGroupKey[:0] - for _, v := range e.tmpGroupKey { - e.curGroupKey = append(e.curGroupKey, *((&v).Copy())) - } - return !firstGroup, nil -} - -func (e *groupChecker) reset() { - if e.curGroupKey != nil { - e.curGroupKey = e.curGroupKey[:0] - } - if e.tmpGroupKey != nil { - e.tmpGroupKey = e.tmpGroupKey[:0] - } -} - // vecGroupChecker is used to split a given chunk according to the `group by` expression in a vectorized manner // It is usually used for streamAgg type vecGroupChecker struct { @@ -1034,7 +976,6 @@ type vecGroupChecker struct { } func newVecGroupChecker(ctx sessionctx.Context, items []expression.Expression) *vecGroupChecker { - return &vecGroupChecker{ ctx: ctx, GroupByItems: items, diff --git a/executor/analyze.go b/executor/analyze.go index 1aa62d1baaabb..f6145495578f3 100755 --- a/executor/analyze.go +++ b/executor/analyze.go @@ -34,6 +34,7 @@ import ( "github.com/pingcap/parser/mysql" "github.com/pingcap/tidb/distsql" "github.com/pingcap/tidb/domain" + "github.com/pingcap/tidb/infoschema" "github.com/pingcap/tidb/kv" "github.com/pingcap/tidb/metrics" "github.com/pingcap/tidb/sessionctx" @@ -125,7 +126,7 @@ func (e *AnalyzeExec) Next(ctx context.Context, req *chunk.Chunk) error { if err != nil { return err } - return statsHandle.Update(GetInfoSchema(e.ctx)) + return statsHandle.Update(infoschema.GetInfoSchema(e.ctx)) } func getBuildStatsConcurrency(ctx sessionctx.Context) (int, error) { @@ -259,6 +260,7 @@ func (e *AnalyzeIndexExec) fetchAnalyzeResult(ranges []*ranger.Range, isNullRang var builder distsql.RequestBuilder kvReq, err := builder.SetIndexRanges(e.ctx.GetSessionVars().StmtCtx, e.physicalTableID, e.idxInfo.ID, ranges). SetAnalyzeRequest(e.analyzePB). + SetStartTS(math.MaxUint64). SetKeepOrder(true). SetConcurrency(e.concurrency). Build() @@ -429,6 +431,7 @@ func (e *AnalyzeColumnsExec) buildResp(ranges []*ranger.Range) (distsql.SelectRe // correct `correlation` of columns. kvReq, err := builder.SetTableRanges(e.physicalTableID, ranges, nil). SetAnalyzeRequest(e.analyzePB). + SetStartTS(math.MaxUint64). SetKeepOrder(true). SetConcurrency(e.concurrency). Build() diff --git a/executor/analyze_test.go b/executor/analyze_test.go index 16b54bafee52d..c1a6a6965e75e 100644 --- a/executor/analyze_test.go +++ b/executor/analyze_test.go @@ -27,6 +27,7 @@ import ( "github.com/pingcap/parser/mysql" "github.com/pingcap/tidb/domain" "github.com/pingcap/tidb/executor" + "github.com/pingcap/tidb/infoschema" "github.com/pingcap/tidb/kv" "github.com/pingcap/tidb/session" "github.com/pingcap/tidb/sessionctx" @@ -62,7 +63,7 @@ PARTITION BY RANGE ( a ) ( } tk.MustExec("analyze table t") - is := executor.GetInfoSchema(tk.Se.(sessionctx.Context)) + is := infoschema.GetInfoSchema(tk.Se.(sessionctx.Context)) table, err := is.TableByName(model.NewCIStr("test"), model.NewCIStr("t")) c.Assert(err, IsNil) pi := table.Meta().GetPartitionInfo() @@ -89,7 +90,7 @@ PARTITION BY RANGE ( a ) ( tk.MustExec(fmt.Sprintf(`insert into t values (%d, %d, "hello")`, i, i)) } tk.MustExec("alter table t analyze partition p0") - is = executor.GetInfoSchema(tk.Se.(sessionctx.Context)) + is = infoschema.GetInfoSchema(tk.Se.(sessionctx.Context)) table, err = is.TableByName(model.NewCIStr("test"), model.NewCIStr("t")) c.Assert(err, IsNil) pi = table.Meta().GetPartitionInfo() @@ -139,7 +140,7 @@ func (s *testSuite1) TestAnalyzeParameters(c *C) { tk.MustExec("set @@tidb_enable_fast_analyze = 1") tk.MustExec("analyze table t with 30 samples") - is := executor.GetInfoSchema(tk.Se.(sessionctx.Context)) + is := infoschema.GetInfoSchema(tk.Se.(sessionctx.Context)) table, err := is.TableByName(model.NewCIStr("test"), model.NewCIStr("t")) c.Assert(err, IsNil) tableInfo := table.Meta() @@ -170,7 +171,7 @@ func (s *testSuite1) TestAnalyzeTooLongColumns(c *C) { tk.MustExec(fmt.Sprintf("insert into t values ('%s')", value)) tk.MustExec("analyze table t") - is := executor.GetInfoSchema(tk.Se.(sessionctx.Context)) + is := infoschema.GetInfoSchema(tk.Se.(sessionctx.Context)) table, err := is.TableByName(model.NewCIStr("test"), model.NewCIStr("t")) c.Assert(err, IsNil) tableInfo := table.Meta() @@ -186,11 +187,13 @@ func (s *testFastAnalyze) TestAnalyzeFastSample(c *C) { mockstore.WithCluster(cluster), ) c.Assert(err, IsNil) + defer store.Close() var dom *domain.Domain session.DisableStats4Test() session.SetSchemaLease(0) dom, err = session.BootstrapSession(store) c.Assert(err, IsNil) + defer dom.Close() tk := testkit.NewTestKit(c, store) executor.RandSeed = 123 @@ -279,11 +282,13 @@ func (s *testFastAnalyze) TestFastAnalyze(c *C) { mockstore.WithCluster(cluster), ) c.Assert(err, IsNil) + defer store.Close() var dom *domain.Domain session.DisableStats4Test() session.SetSchemaLease(0) dom, err = session.BootstrapSession(store) c.Assert(err, IsNil) + defer dom.Close() tk := testkit.NewTestKit(c, store) executor.RandSeed = 123 @@ -307,7 +312,7 @@ func (s *testFastAnalyze) TestFastAnalyze(c *C) { } tk.MustExec("analyze table t with 5 buckets, 6 samples") - is := executor.GetInfoSchema(tk.Se.(sessionctx.Context)) + is := infoschema.GetInfoSchema(tk.Se.(sessionctx.Context)) table, err := is.TableByName(model.NewCIStr("test"), model.NewCIStr("t")) c.Assert(err, IsNil) tableInfo := table.Meta() @@ -453,8 +458,10 @@ func (s *testFastAnalyze) TestFastAnalyzeRetryRowCount(c *C) { mockstore.WithMVCCStore(mvccStore), ) c.Assert(err, IsNil) + defer store.Close() dom, err := session.BootstrapSession(store) c.Assert(err, IsNil) + defer dom.Close() tk := testkit.NewTestKit(c, store) tk.MustExec("use test") diff --git a/executor/benchmark_test.go b/executor/benchmark_test.go index a120c2b6d1850..3260ce2cac7b0 100644 --- a/executor/benchmark_test.go +++ b/executor/benchmark_test.go @@ -33,6 +33,7 @@ import ( "github.com/pingcap/tidb/sessionctx/variable" "github.com/pingcap/tidb/types" "github.com/pingcap/tidb/util/chunk" + "github.com/pingcap/tidb/util/disk" "github.com/pingcap/tidb/util/memory" "github.com/pingcap/tidb/util/mock" "github.com/pingcap/tidb/util/stringutil" @@ -558,6 +559,7 @@ func defaultHashJoinTestCase(cols []*types.FieldType, joinType core.JoinType, us ctx.GetSessionVars().InitChunkSize = variable.DefInitChunkSize ctx.GetSessionVars().MaxChunkSize = variable.DefMaxChunkSize ctx.GetSessionVars().StmtCtx.MemTracker = memory.NewTracker(nil, -1) + ctx.GetSessionVars().StmtCtx.DiskTracker = disk.NewTracker(nil, -1) ctx.GetSessionVars().IndexLookupJoinConcurrency = 4 tc := &hashJoinTestCase{rows: 100000, concurrency: 4, ctx: ctx, keyIdx: []int{0, 1}} tc.cols = cols @@ -603,7 +605,9 @@ func prepare4HashJoin(testCase *hashJoinTestCase, innerExec, outerExec Executor) } t := memory.NewTracker(stringutil.StringerStr("root of prepare4HashJoin"), memLimit) t.SetActionOnExceed(nil) + t2 := disk.NewTracker(stringutil.StringerStr("root of prepare4HashJoin"), -1) e.ctx.GetSessionVars().StmtCtx.MemTracker = t + e.ctx.GetSessionVars().StmtCtx.DiskTracker = t2 return e } @@ -865,6 +869,7 @@ func defaultIndexJoinTestCase() *indexJoinTestCase { ctx.GetSessionVars().MaxChunkSize = variable.DefMaxChunkSize ctx.GetSessionVars().SnapshotTS = 1 ctx.GetSessionVars().StmtCtx.MemTracker = memory.NewTracker(nil, -1) + ctx.GetSessionVars().StmtCtx.DiskTracker = disk.NewTracker(nil, -1) tc := &indexJoinTestCase{ outerRows: 100000, innerRows: variable.DefMaxChunkSize * 100, diff --git a/executor/bind.go b/executor/bind.go index ef160bd868f70..b2ef276342827 100644 --- a/executor/bind.go +++ b/executor/bind.go @@ -20,6 +20,7 @@ import ( "github.com/pingcap/parser/ast" "github.com/pingcap/tidb/bindinfo" "github.com/pingcap/tidb/domain" + "github.com/pingcap/tidb/infoschema" plannercore "github.com/pingcap/tidb/planner/core" "github.com/pingcap/tidb/util/chunk" ) @@ -72,9 +73,9 @@ func (e *SQLBindExec) dropSQLBind() error { } if !e.isGlobal { handle := e.ctx.Value(bindinfo.SessionBindInfoKeyType).(*bindinfo.SessionHandle) - return handle.DropBindRecord(e.ctx, GetInfoSchema(e.ctx), record) + return handle.DropBindRecord(e.ctx, infoschema.GetInfoSchema(e.ctx), record) } - return domain.GetDomain(e.ctx).BindHandle().DropBindRecord(e.ctx, GetInfoSchema(e.ctx), record) + return domain.GetDomain(e.ctx).BindHandle().DropBindRecord(e.ctx, infoschema.GetInfoSchema(e.ctx), record) } func (e *SQLBindExec) createSQLBind() error { @@ -91,9 +92,9 @@ func (e *SQLBindExec) createSQLBind() error { } if !e.isGlobal { handle := e.ctx.Value(bindinfo.SessionBindInfoKeyType).(*bindinfo.SessionHandle) - return handle.AddBindRecord(e.ctx, GetInfoSchema(e.ctx), record) + return handle.AddBindRecord(e.ctx, infoschema.GetInfoSchema(e.ctx), record) } - return domain.GetDomain(e.ctx).BindHandle().AddBindRecord(e.ctx, GetInfoSchema(e.ctx), record) + return domain.GetDomain(e.ctx).BindHandle().AddBindRecord(e.ctx, infoschema.GetInfoSchema(e.ctx), record) } func (e *SQLBindExec) flushBindings() error { diff --git a/executor/builder.go b/executor/builder.go index f748fddd82041..48d5c21e40a83 100644 --- a/executor/builder.go +++ b/executor/builder.go @@ -1245,9 +1245,9 @@ func (b *executorBuilder) buildMemTable(v *plannercore.PhysicalMemTable) Executo e := &TableScanExec{ baseExecutor: newBaseExecutor(b.ctx, v.Schema(), v.ExplainID()), t: tb, - columns: v.Table.Columns, + columns: v.Columns, seekHandle: math.MinInt64, - isVirtualTable: tb.Type() == table.VirtualTable, + isVirtualTable: !tb.Type().IsNormalTable(), } return e } @@ -1452,7 +1452,6 @@ func (b *executorBuilder) buildAnalyzeIndexPushdown(task plannercore.AnalyzeInde concurrency: b.ctx.GetSessionVars().IndexSerialScanConcurrency, analyzePB: &tipb.AnalyzeReq{ Tp: tipb.AnalyzeType_TypeIndex, - StartTs: math.MaxUint64, Flags: sc.PushDownFlags(), TimeZoneOffset: offset, }, @@ -1520,7 +1519,6 @@ func (b *executorBuilder) buildAnalyzeColumnsPushdown(task plannercore.AnalyzeCo concurrency: b.ctx.GetSessionVars().DistSQLScanConcurrency, analyzePB: &tipb.AnalyzeReq{ Tp: tipb.AnalyzeType_TypeColumn, - StartTs: math.MaxUint64, Flags: sc.PushDownFlags(), TimeZoneOffset: offset, }, @@ -1698,10 +1696,6 @@ func constructDistExec(sctx sessionctx.Context, plans []plannercore.PhysicalPlan func (b *executorBuilder) constructDAGReq(plans []plannercore.PhysicalPlan) (dagReq *tipb.DAGRequest, streaming bool, err error) { dagReq = &tipb.DAGRequest{} - dagReq.StartTs, err = b.getStartTS() - if err != nil { - return nil, false, err - } dagReq.TimeZoneName, dagReq.TimeZoneOffset = timeutil.Zone(b.ctx.GetSessionVars().Location()) sc := b.ctx.GetSessionVars().StmtCtx dagReq.Flags = sc.PushDownFlags() @@ -1935,9 +1929,14 @@ func buildNoRangeTableReader(b *executorBuilder, v *plannercore.PhysicalTableRea pt := tbl.(table.PartitionedTable) tbl = pt.GetPartition(physicalTableID) } + startTS, err := b.getStartTS() + if err != nil { + return nil, err + } e := &TableReaderExecutor{ baseExecutor: newBaseExecutor(b.ctx, v.Schema(), v.ExplainID()), dagPB: dagReq, + startTS: startTS, table: tbl, keepOrder: ts.KeepOrder, desc: ts.Desc, @@ -1997,9 +1996,14 @@ func buildNoRangeIndexReader(b *executorBuilder, v *plannercore.PhysicalIndexRea } else { physicalTableID = is.Table.ID } + startTS, err := b.getStartTS() + if err != nil { + return nil, err + } e := &IndexReaderExecutor{ baseExecutor: newBaseExecutor(b.ctx, v.Schema(), v.ExplainID()), dagPB: dagReq, + startTS: startTS, physicalTableID: physicalTableID, table: tbl, index: is.Index, @@ -2068,9 +2072,14 @@ func buildNoRangeIndexLookUpReader(b *executorBuilder, v *plannercore.PhysicalIn pt := tbl.(table.PartitionedTable) tbl = pt.GetPartition(physicalTableID) } + startTS, err := b.getStartTS() + if err != nil { + return nil, err + } e := &IndexLookUpExecutor{ baseExecutor: newBaseExecutor(b.ctx, v.Schema(), v.ExplainID()), dagPB: indexReq, + startTS: startTS, table: tbl, index: is.Index, keepOrder: is.KeepOrder, @@ -2199,11 +2208,16 @@ func (builder *dataReaderBuilder) buildTableReaderFromHandles(ctx context.Contex colExec := true e.dagPB.CollectExecutionSummaries = &colExec } + startTS, err := builder.getStartTS() + if err != nil { + return nil, err + } sort.Sort(sortutil.Int64Slice(handles)) var b distsql.RequestBuilder kvReq, err := b.SetTableHandles(getPhysicalTableID(e.table), handles). SetDAGRequest(e.dagPB). + SetStartTS(startTS). SetDesc(e.desc). SetKeepOrder(e.keepOrder). SetStreaming(e.streaming). @@ -2384,7 +2398,7 @@ func (b *executorBuilder) buildWindow(v *plannercore.PhysicalWindow) *WindowExec } return &WindowExec{baseExecutor: base, processor: processor, - groupChecker: newGroupChecker(b.ctx.GetSessionVars().StmtCtx, groupByItems), + groupChecker: newVecGroupChecker(b.ctx, groupByItems), numWindowFuncs: len(v.WindowFuncDescs), } } diff --git a/executor/checksum.go b/executor/checksum.go index c84579fe85ee8..5ff1b07841902 100644 --- a/executor/checksum.go +++ b/executor/checksum.go @@ -229,7 +229,6 @@ func (c *checksumContext) appendRequest(ctx sessionctx.Context, tableID int64, r func (c *checksumContext) buildTableRequest(ctx sessionctx.Context, tableID int64) (*kv.Request, error) { checksum := &tipb.ChecksumRequest{ - StartTs: c.StartTs, ScanOn: tipb.ChecksumScanOn_Table, Algorithm: tipb.ChecksumAlgorithm_Crc64_Xor, } @@ -239,13 +238,13 @@ func (c *checksumContext) buildTableRequest(ctx sessionctx.Context, tableID int6 var builder distsql.RequestBuilder return builder.SetTableRanges(tableID, ranges, nil). SetChecksumRequest(checksum). + SetStartTS(c.StartTs). SetConcurrency(ctx.GetSessionVars().DistSQLScanConcurrency). Build() } func (c *checksumContext) buildIndexRequest(ctx sessionctx.Context, tableID int64, indexInfo *model.IndexInfo) (*kv.Request, error) { checksum := &tipb.ChecksumRequest{ - StartTs: c.StartTs, ScanOn: tipb.ChecksumScanOn_Index, Algorithm: tipb.ChecksumAlgorithm_Crc64_Xor, } @@ -255,6 +254,7 @@ func (c *checksumContext) buildIndexRequest(ctx sessionctx.Context, tableID int6 var builder distsql.RequestBuilder return builder.SetIndexRanges(ctx.GetSessionVars().StmtCtx, tableID, indexInfo.ID, ranges). SetChecksumRequest(checksum). + SetStartTS(c.StartTs). SetConcurrency(ctx.GetSessionVars().DistSQLScanConcurrency). Build() } diff --git a/executor/chunk_size_control_test.go b/executor/chunk_size_control_test.go index 57e16372b0b3d..5c4f2990901af 100644 --- a/executor/chunk_size_control_test.go +++ b/executor/chunk_size_control_test.go @@ -121,6 +121,7 @@ type testChunkSizeControlSuite struct { } func (s *testChunkSizeControlSuite) SetUpSuite(c *C) { + c.Skip("not stable because coprocessor may result in goroutine leak") tableSQLs := map[string]string{} tableSQLs["Limit&TableScan"] = "create table t (a int, primary key (a))" tableSQLs["Limit&IndexScan"] = "create table t (a int, index idx_a(a))" @@ -161,7 +162,6 @@ func (s *testChunkSizeControlSuite) getKit(name string) ( } func (s *testChunkSizeControlSuite) TestLimitAndTableScan(c *C) { - c.Skip("not stable because coprocessor may result in goroutine leak") _, dom, tk, client, cluster := s.getKit("Limit&TableScan") defer client.Close() tbl, err := dom.InfoSchema().TableByName(model.NewCIStr("test"), model.NewCIStr("t")) @@ -193,7 +193,6 @@ func (s *testChunkSizeControlSuite) TestLimitAndTableScan(c *C) { } func (s *testChunkSizeControlSuite) TestLimitAndIndexScan(c *C) { - c.Skip("not stable because coprocessor may result in goroutine leak") _, dom, tk, client, cluster := s.getKit("Limit&IndexScan") defer client.Close() tbl, err := dom.InfoSchema().TableByName(model.NewCIStr("test"), model.NewCIStr("t")) diff --git a/executor/compiler.go b/executor/compiler.go index e55d7d04b5945..b7077e066d8ec 100644 --- a/executor/compiler.go +++ b/executor/compiler.go @@ -25,8 +25,6 @@ import ( "github.com/pingcap/tidb/planner" plannercore "github.com/pingcap/tidb/planner/core" "github.com/pingcap/tidb/sessionctx" - "github.com/pingcap/tidb/util/logutil" - "go.uber.org/zap" ) var ( @@ -55,7 +53,7 @@ func (c *Compiler) Compile(ctx context.Context, stmtNode ast.StmtNode) (*ExecStm ctx = opentracing.ContextWithSpan(ctx, span1) } - infoSchema := GetInfoSchema(c.Ctx) + infoSchema := infoschema.GetInfoSchema(c.Ctx) if err := plannercore.Preprocess(c.Ctx, stmtNode, infoSchema); err != nil { return nil, err } @@ -347,17 +345,3 @@ func GetStmtLabel(stmtNode ast.StmtNode) string { } return "other" } - -// GetInfoSchema gets TxnCtx InfoSchema if snapshot schema is not set, -// Otherwise, snapshot schema is returned. -func GetInfoSchema(ctx sessionctx.Context) infoschema.InfoSchema { - sessVar := ctx.GetSessionVars() - var is infoschema.InfoSchema - if snap := sessVar.SnapshotInfoschema; snap != nil { - is = snap.(infoschema.InfoSchema) - logutil.BgLogger().Info("use snapshot schema", zap.Uint64("conn", sessVar.ConnectionID), zap.Int64("schemaVersion", is.SchemaMetaVersion())) - } else { - is = sessVar.TxnCtx.InfoSchema.(infoschema.InfoSchema) - } - return is -} diff --git a/executor/coprocessor.go b/executor/coprocessor.go new file mode 100644 index 0000000000000..3c95ddf55158d --- /dev/null +++ b/executor/coprocessor.go @@ -0,0 +1,183 @@ +// Copyright 2019 PingCAP, Inc. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// See the License for the specific language governing permissions and +// limitations under the License. + +package executor + +import ( + "context" + + "github.com/gogo/protobuf/proto" + "github.com/pingcap/errors" + "github.com/pingcap/kvproto/pkg/coprocessor" + "github.com/pingcap/tidb/infoschema" + "github.com/pingcap/tidb/kv" + "github.com/pingcap/tidb/planner/core" + "github.com/pingcap/tidb/sessionctx" + "github.com/pingcap/tidb/types" + "github.com/pingcap/tidb/util/chunk" + "github.com/pingcap/tidb/util/codec" + "github.com/pingcap/tidb/util/timeutil" + "github.com/pingcap/tipb/go-tipb" +) + +// CoprocessorDAGHandler uses to handle cop dag request. +type CoprocessorDAGHandler struct { + sctx sessionctx.Context + resp *coprocessor.Response + selResp *tipb.SelectResponse + dagReq *tipb.DAGRequest +} + +// NewCoprocessorDAGHandler creates a new CoprocessorDAGHandler. +func NewCoprocessorDAGHandler(sctx sessionctx.Context) *CoprocessorDAGHandler { + return &CoprocessorDAGHandler{ + sctx: sctx, + resp: &coprocessor.Response{}, + selResp: &tipb.SelectResponse{}, + } +} + +// HandleRequest handles the coprocessor request. +func (h *CoprocessorDAGHandler) HandleRequest(ctx context.Context, req *coprocessor.Request) *coprocessor.Response { + e, err := h.buildDAGExecutor(req) + if err != nil { + return h.buildResponse(err) + } + + err = e.Open(ctx) + if err != nil { + return h.buildResponse(err) + } + + chk := newFirstChunk(e) + tps := e.base().retFieldTypes + for { + chk.Reset() + err = Next(ctx, e, chk) + if err != nil { + break + } + if chk.NumRows() == 0 { + break + } + err = h.appendChunk(chk, tps) + if err != nil { + break + } + } + return h.buildResponse(err) +} + +func (h *CoprocessorDAGHandler) buildDAGExecutor(req *coprocessor.Request) (Executor, error) { + if req.GetTp() != kv.ReqTypeDAG { + return nil, errors.Errorf("unsupported request type %d", req.GetTp()) + } + dagReq := new(tipb.DAGRequest) + err := proto.Unmarshal(req.Data, dagReq) + if err != nil { + return nil, errors.Trace(err) + } + + stmtCtx := h.sctx.GetSessionVars().StmtCtx + stmtCtx.SetFlagsFromPBFlag(dagReq.Flags) + stmtCtx.TimeZone, err = timeutil.ConstructTimeZone(dagReq.TimeZoneName, int(dagReq.TimeZoneOffset)) + if err != nil { + return nil, errors.Trace(err) + } + h.dagReq = dagReq + is := h.sctx.GetSessionVars().TxnCtx.InfoSchema.(infoschema.InfoSchema) + // Build physical plan. + bp := core.NewPBPlanBuilder(h.sctx, is) + plan, err := bp.Build(dagReq.Executors) + if err != nil { + return nil, errors.Trace(err) + } + // Build executor. + b := newExecutorBuilder(h.sctx, is) + return b.build(plan), nil +} + +func (h *CoprocessorDAGHandler) appendChunk(chk *chunk.Chunk, tps []*types.FieldType) error { + var err error + switch h.dagReq.EncodeType { + case tipb.EncodeType_TypeDefault: + err = h.encodeDefault(chk, tps) + case tipb.EncodeType_TypeChunk: + err = h.encodeChunk(chk, tps) + default: + return errors.Errorf("unknown DAG encode type: %v", h.dagReq.EncodeType) + } + return err +} + +func (h *CoprocessorDAGHandler) buildResponse(err error) *coprocessor.Response { + if err != nil { + h.resp.OtherError = err.Error() + return h.resp + } + h.selResp.EncodeType = h.dagReq.EncodeType + data, err := proto.Marshal(h.selResp) + if err != nil { + h.resp.OtherError = err.Error() + return h.resp + } + h.resp.Data = data + return h.resp +} + +func (h *CoprocessorDAGHandler) encodeChunk(chk *chunk.Chunk, colTypes []*types.FieldType) error { + colOrdinal := h.dagReq.OutputOffsets + chunks := h.selResp.Chunks + respColTypes := make([]*types.FieldType, 0, len(colOrdinal)) + for _, ordinal := range colOrdinal { + respColTypes = append(respColTypes, colTypes[ordinal]) + } + encoder := chunk.NewCodec(respColTypes) + chunks = append(chunks, tipb.Chunk{}) + cur := &chunks[len(chunks)-1] + cur.RowsData = append(cur.RowsData, encoder.Encode(chk)...) + h.selResp.Chunks = chunks + return nil +} + +func (h *CoprocessorDAGHandler) encodeDefault(chk *chunk.Chunk, tps []*types.FieldType) error { + colOrdinal := h.dagReq.OutputOffsets + chunks := h.selResp.Chunks + stmtCtx := h.sctx.GetSessionVars().StmtCtx + requestedRow := make([]byte, 0) + for i := 0; i < chk.NumRows(); i++ { + requestedRow = requestedRow[:0] + row := chk.GetRow(i) + for _, ordinal := range colOrdinal { + data, err := codec.EncodeValue(stmtCtx, nil, row.GetDatum(int(ordinal), tps[ordinal])) + if err != nil { + return err + } + requestedRow = append(requestedRow, data...) + } + chunks = h.appendRow(chunks, requestedRow, i) + } + h.selResp.Chunks = chunks + return nil +} + +const rowsPerChunk = 64 + +func (h *CoprocessorDAGHandler) appendRow(chunks []tipb.Chunk, data []byte, rowCnt int) []tipb.Chunk { + if rowCnt%rowsPerChunk == 0 { + chunks = append(chunks, tipb.Chunk{}) + } + cur := &chunks[len(chunks)-1] + cur.RowsData = append(cur.RowsData, data...) + return chunks +} diff --git a/executor/ddl.go b/executor/ddl.go index e7756c2d9c030..f897c060d90d4 100644 --- a/executor/ddl.go +++ b/executor/ddl.go @@ -16,6 +16,7 @@ package executor import ( "context" "fmt" + "strconv" "strings" "github.com/pingcap/errors" @@ -98,6 +99,8 @@ func (e *DDLExec) Next(ctx context.Context, req *chunk.Chunk) (err error) { err = e.executeDropTableOrView(x) case *ast.RecoverTableStmt: err = e.executeRecoverTable(x) + case *ast.FlashBackTableStmt: + err = e.executeFlashbackTable(x) case *ast.RenameTableStmt: err = e.executeRenameTable(x) case *ast.TruncateTableStmt: @@ -108,6 +111,8 @@ func (e *DDLExec) Next(ctx context.Context, req *chunk.Chunk) (err error) { err = e.executeUnlockTables(x) case *ast.CleanupTableLockStmt: err = e.executeCleanupTableLock(x) + case *ast.RepairTableStmt: + err = e.executeRepairTable(x) } if err != nil { @@ -328,23 +333,32 @@ func (e *DDLExec) executeRecoverTable(s *ast.RecoverTableStmt) error { if s.JobID != 0 { job, tblInfo, err = e.getRecoverTableByJobID(s, t, dom) } else { - job, tblInfo, err = e.getRecoverTableByTableName(s, t, dom) + job, tblInfo, err = e.getRecoverTableByTableName(s.Table, "") } if err != nil { return err } + autoID, err := e.getTableAutoIDFromSnapshot(job) + if err != nil { + return err + } + // Call DDL RecoverTable. + err = domain.GetDomain(e.ctx).DDL().RecoverTable(e.ctx, tblInfo, job.SchemaID, autoID, job.ID, job.StartTS) + return err +} + +func (e *DDLExec) getTableAutoIDFromSnapshot(job *model.Job) (int64, error) { // Get table original autoID before table drop. + dom := domain.GetDomain(e.ctx) m, err := dom.GetSnapshotMeta(job.StartTS) if err != nil { - return err + return 0, err } autoID, err := m.GetAutoTableID(job.SchemaID, job.TableID) if err != nil { - return errors.Errorf("recover table_id: %d, get original autoID from snapshot meta err: %s", job.TableID, err.Error()) + return 0, errors.Errorf("recover table_id: %d, get original autoID from snapshot meta err: %s", job.TableID, err.Error()) } - // Call DDL RecoverTable - err = domain.GetDomain(e.ctx).DDL().RecoverTable(e.ctx, tblInfo, job.SchemaID, autoID, job.ID, job.StartTS) - return err + return autoID, nil } func (e *DDLExec) getRecoverTableByJobID(s *ast.RecoverTableStmt, t *meta.Meta, dom *domain.Domain) (*model.Job, *model.TableInfo, error) { @@ -381,7 +395,12 @@ func (e *DDLExec) getRecoverTableByJobID(s *ast.RecoverTableStmt, t *meta.Meta, return job, table.Meta(), nil } -func (e *DDLExec) getRecoverTableByTableName(s *ast.RecoverTableStmt, t *meta.Meta, dom *domain.Domain) (*model.Job, *model.TableInfo, error) { +func (e *DDLExec) getRecoverTableByTableName(tableName *ast.TableName, ts string) (*model.Job, *model.TableInfo, error) { + txn, err := e.ctx.Txn(true) + if err != nil { + return nil, nil, err + } + t := meta.NewMeta(txn) jobs, err := t.GetAllHistoryDDLJobs() if err != nil { return nil, nil, err @@ -392,17 +411,18 @@ func (e *DDLExec) getRecoverTableByTableName(s *ast.RecoverTableStmt, t *meta.Me if err != nil { return nil, nil, err } - schemaName := s.Table.Schema.L + schemaName := tableName.Schema.L if schemaName == "" { schemaName = e.ctx.GetSessionVars().CurrentDB } if schemaName == "" { return nil, nil, errors.Trace(core.ErrNoDB) } + dom := domain.GetDomain(e.ctx) // TODO: only search recent `e.JobNum` DDL jobs. for i := len(jobs) - 1; i > 0; i-- { job = jobs[i] - if job.Type != model.ActionDropTable { + if job.Type != model.ActionDropTable && job.Type != model.ActionTruncateTable { continue } // Check GC safe point for getting snapshot infoSchema. @@ -410,6 +430,9 @@ func (e *DDLExec) getRecoverTableByTableName(s *ast.RecoverTableStmt, t *meta.Me if err != nil { return nil, nil, err } + if len(ts) != 0 && ts != model.TSConvert2Time(job.StartTS).String() { + continue + } // Get the snapshot infoSchema before drop table. snapInfo, err := dom.GetSnapshotInfoSchema(job.StartTS) if err != nil { @@ -423,7 +446,7 @@ func (e *DDLExec) getRecoverTableByTableName(s *ast.RecoverTableStmt, t *meta.Me fmt.Sprintf("(Table ID %d)", job.TableID), ) } - if table.Meta().Name.L == s.Table.Name.L { + if table.Meta().Name.L == tableName.Name.L { schema, ok := dom.InfoSchema().SchemaByID(job.SchemaID) if !ok { return nil, nil, infoschema.ErrDatabaseNotExists.GenWithStackByArgs( @@ -437,11 +460,39 @@ func (e *DDLExec) getRecoverTableByTableName(s *ast.RecoverTableStmt, t *meta.Me } } if tblInfo == nil { - return nil, nil, errors.Errorf("Can't found drop table: %v in ddl history jobs", s.Table.Name) + return nil, nil, errors.Errorf("Can't find dropped table: %v in ddl history jobs", tableName.Name) } return job, tblInfo, nil } +func (e *DDLExec) executeFlashbackTable(s *ast.FlashBackTableStmt) error { + ts := s.Timestamp.GetString() + if len(ts) == 0 { + return errors.Errorf("The timestamp in flashback statement should be consistent with the drop/truncate DDL start time") + } + job, tblInfo, err := e.getRecoverTableByTableName(s.Table, ts) + if err != nil { + return err + } + if len(s.NewName) != 0 { + tblInfo.Name = model.NewCIStr(s.NewName) + } + // Check the table ID was not exists. + is := domain.GetDomain(e.ctx).InfoSchema() + _, ok := is.TableByID(tblInfo.ID) + if ok { + return infoschema.ErrTableExists.GenWithStackByArgs("tableID:" + strconv.FormatInt(tblInfo.ID, 10)) + } + + autoID, err := e.getTableAutoIDFromSnapshot(job) + if err != nil { + return err + } + // Call DDL RecoverTable. + err = domain.GetDomain(e.ctx).DDL().RecoverTable(e.ctx, tblInfo, job.SchemaID, autoID, job.ID, job.StartTS) + return err +} + func (e *DDLExec) executeLockTables(s *ast.LockTablesStmt) error { if !config.TableLockEnabled() { return nil @@ -458,6 +509,9 @@ func (e *DDLExec) executeUnlockTables(s *ast.UnlockTablesStmt) error { } func (e *DDLExec) executeCleanupTableLock(s *ast.CleanupTableLockStmt) error { - err := domain.GetDomain(e.ctx).DDL().CleanupTableLock(e.ctx, s.Tables) - return err + return domain.GetDomain(e.ctx).DDL().CleanupTableLock(e.ctx, s.Tables) +} + +func (e *DDLExec) executeRepairTable(s *ast.RepairTableStmt) error { + return domain.GetDomain(e.ctx).DDL().RepairTable(e.ctx, s.Table, s.CreateStmt) } diff --git a/executor/distsql.go b/executor/distsql.go index b57403c4e3900..82e71a9b7c199 100644 --- a/executor/distsql.go +++ b/executor/distsql.go @@ -213,6 +213,7 @@ type IndexReaderExecutor struct { // kvRanges are only used for union scan. kvRanges []kv.KeyRange dagPB *tipb.DAGRequest + startTS uint64 // result returns one or more distsql.PartialResult and each PartialResult is returned by one region. result distsql.SelectResult @@ -292,6 +293,7 @@ func (e *IndexReaderExecutor) open(ctx context.Context, kvRanges []kv.KeyRange) var builder distsql.RequestBuilder kvReq, err := builder.SetKeyRanges(kvRanges). SetDAGRequest(e.dagPB). + SetStartTS(e.startTS). SetDesc(e.desc). SetKeepOrder(e.keepOrder). SetStreaming(e.streaming). @@ -321,6 +323,7 @@ type IndexLookUpExecutor struct { desc bool ranges []*ranger.Range dagPB *tipb.DAGRequest + startTS uint64 // handleIdx is the index of handle, which is only used for case of keeping order. handleIdx int tableRequest *tipb.DAGRequest @@ -438,6 +441,7 @@ func (e *IndexLookUpExecutor) startIndexWorker(ctx context.Context, kvRanges []k var builder distsql.RequestBuilder kvReq, err := builder.SetKeyRanges(kvRanges). SetDAGRequest(e.dagPB). + SetStartTS(e.startTS). SetDesc(e.desc). SetKeepOrder(e.keepOrder). SetStreaming(e.indexStreaming). @@ -528,6 +532,7 @@ func (e *IndexLookUpExecutor) buildTableReader(ctx context.Context, handles []in baseExecutor: newBaseExecutor(e.ctx, e.schema, stringutil.MemoizeStr(func() string { return e.id.String() + "_tableReader" })), table: e.table, dagPB: e.tableRequest, + startTS: e.startTS, columns: e.columns, streaming: e.tableStreaming, feedback: statistics.NewQueryFeedback(0, nil, 0, false), diff --git a/executor/errors.go b/executor/errors.go index a48152f0acdfe..cac87c2716872 100644 --- a/executor/errors.go +++ b/executor/errors.go @@ -18,26 +18,15 @@ import ( "github.com/pingcap/parser/terror" ) -// Error codes that are not mapping to mysql error codes. -const ( - codeUnknownPlan = iota - codePrepareMulti - codePrepareDDL - codeResultIsEmpty - codeErrBuildExec - codeBatchInsertFail - codeGetStartTS -) - // Error instances. var ( - ErrGetStartTS = terror.ClassExecutor.New(codeGetStartTS, "Can not get start ts") - ErrUnknownPlan = terror.ClassExecutor.New(codeUnknownPlan, "Unknown plan") - ErrPrepareMulti = terror.ClassExecutor.New(codePrepareMulti, "Can not prepare multiple statements") - ErrPrepareDDL = terror.ClassExecutor.New(codePrepareDDL, "Can not prepare DDL statements with parameters") - ErrResultIsEmpty = terror.ClassExecutor.New(codeResultIsEmpty, "result is empty") - ErrBuildExecutor = terror.ClassExecutor.New(codeErrBuildExec, "Failed to build executor") - ErrBatchInsertFail = terror.ClassExecutor.New(codeBatchInsertFail, "Batch insert failed, please clean the table and try again.") + ErrGetStartTS = terror.ClassExecutor.New(mysql.ErrGetStartTS, mysql.MySQLErrName[mysql.ErrGetStartTS]) + ErrUnknownPlan = terror.ClassExecutor.New(mysql.ErrUnknownPlan, mysql.MySQLErrName[mysql.ErrUnknownPlan]) + ErrPrepareMulti = terror.ClassExecutor.New(mysql.ErrPrepareMulti, mysql.MySQLErrName[mysql.ErrPrepareMulti]) + ErrPrepareDDL = terror.ClassExecutor.New(mysql.ErrPrepareDDL, mysql.MySQLErrName[mysql.ErrPrepareDDL]) + ErrResultIsEmpty = terror.ClassExecutor.New(mysql.ErrResultIsEmpty, mysql.MySQLErrName[mysql.ErrResultIsEmpty]) + ErrBuildExecutor = terror.ClassExecutor.New(mysql.ErrBuildExecutor, mysql.MySQLErrName[mysql.ErrBuildExecutor]) + ErrBatchInsertFail = terror.ClassExecutor.New(mysql.ErrBatchInsertFail, mysql.MySQLErrName[mysql.ErrBatchInsertFail]) ErrCantCreateUserWithGrant = terror.ClassExecutor.New(mysql.ErrCantCreateUserWithGrant, mysql.MySQLErrName[mysql.ErrCantCreateUserWithGrant]) ErrPasswordNoMatch = terror.ClassExecutor.New(mysql.ErrPasswordNoMatch, mysql.MySQLErrName[mysql.ErrPasswordNoMatch]) @@ -58,9 +47,17 @@ var ( func init() { // Map error codes to mysql error codes. tableMySQLErrCodes := map[terror.ErrCode]uint16{ + mysql.ErrGetStartTS: mysql.ErrGetStartTS, + mysql.ErrUnknownPlan: mysql.ErrUnknownPlan, + mysql.ErrPrepareMulti: mysql.ErrPrepareMulti, + mysql.ErrPrepareDDL: mysql.ErrPrepareDDL, + mysql.ErrResultIsEmpty: mysql.ErrResultIsEmpty, + mysql.ErrBuildExecutor: mysql.ErrBuildExecutor, + mysql.ErrBatchInsertFail: mysql.ErrBatchInsertFail, + + mysql.ErrCantCreateUserWithGrant: mysql.ErrCantCreateUserWithGrant, mysql.ErrPasswordNoMatch: mysql.ErrPasswordNoMatch, mysql.ErrCannotUser: mysql.ErrCannotUser, - mysql.ErrWrongValueCountOnRow: mysql.ErrWrongValueCountOnRow, mysql.ErrPasswordFormat: mysql.ErrPasswordFormat, mysql.ErrCantChangeTxCharacteristics: mysql.ErrCantChangeTxCharacteristics, mysql.ErrPsManyParam: mysql.ErrPsManyParam, @@ -69,8 +66,10 @@ func init() { mysql.ErrTableaccessDenied: mysql.ErrTableaccessDenied, mysql.ErrBadDB: mysql.ErrBadDB, mysql.ErrWrongObject: mysql.ErrWrongObject, + mysql.ErrRoleNotGranted: mysql.ErrRoleNotGranted, mysql.ErrLockDeadlock: mysql.ErrLockDeadlock, mysql.ErrQueryInterrupted: mysql.ErrQueryInterrupted, + mysql.ErrWrongValueCountOnRow: mysql.ErrWrongValueCountOnRow, } terror.ErrClassToMySQLCodes[terror.ClassExecutor] = tableMySQLErrCodes } diff --git a/executor/executor.go b/executor/executor.go index ff4acd00a981d..d11969fa58e61 100644 --- a/executor/executor.go +++ b/executor/executor.go @@ -46,6 +46,7 @@ import ( "github.com/pingcap/tidb/util" "github.com/pingcap/tidb/util/admin" "github.com/pingcap/tidb/util/chunk" + "github.com/pingcap/tidb/util/disk" "github.com/pingcap/tidb/util/execdetails" "github.com/pingcap/tidb/util/logutil" "github.com/pingcap/tidb/util/memory" @@ -1493,9 +1494,10 @@ func ResetContextOfStmt(ctx sessionctx.Context, s ast.StmtNode) (err error) { memQuota = stmtHints.MemQuotaQuery } sc := &stmtctx.StatementContext{ - StmtHints: stmtHints, - TimeZone: vars.Location(), - MemTracker: memory.NewTracker(stringutil.MemoizeStr(s.Text), memQuota), + StmtHints: stmtHints, + TimeZone: vars.Location(), + MemTracker: memory.NewTracker(stringutil.MemoizeStr(s.Text), memQuota), + DiskTracker: disk.NewTracker(stringutil.MemoizeStr(s.Text), -1), } switch config.GetGlobalConfig().OOMAction { case config.OOMActionCancel: @@ -1616,14 +1618,8 @@ func ResetContextOfStmt(ctx sessionctx.Context, s ast.StmtNode) (err error) { sc.PrevAffectedRows = -1 } errCount, warnCount := vars.StmtCtx.NumErrorWarnings() - err = vars.SetSystemVar("warning_count", warnCount) - if err != nil { - return err - } - err = vars.SetSystemVar("error_count", errCount) - if err != nil { - return err - } + vars.SysErrorCount = errCount + vars.SysWarningCount = warnCount vars.StmtCtx = sc for _, warn := range hintWarns { vars.StmtCtx.AppendWarning(warn) diff --git a/executor/executor_required_rows_test.go b/executor/executor_required_rows_test.go index 90df8e91bdcb9..e3896f4d66ddd 100644 --- a/executor/executor_required_rows_test.go +++ b/executor/executor_required_rows_test.go @@ -31,6 +31,7 @@ import ( "github.com/pingcap/tidb/sessionctx/variable" "github.com/pingcap/tidb/types" "github.com/pingcap/tidb/util/chunk" + "github.com/pingcap/tidb/util/disk" "github.com/pingcap/tidb/util/memory" "github.com/pingcap/tidb/util/mock" ) @@ -206,6 +207,7 @@ func defaultCtx() sessionctx.Context { ctx.GetSessionVars().MaxChunkSize = variable.DefMaxChunkSize ctx.GetSessionVars().MemQuotaSort = variable.DefTiDBMemQuotaSort ctx.GetSessionVars().StmtCtx.MemTracker = memory.NewTracker(nil, ctx.GetSessionVars().MemQuotaQuery) + ctx.GetSessionVars().StmtCtx.DiskTracker = disk.NewTracker(nil, -1) ctx.GetSessionVars().SnapshotTS = uint64(1) return ctx } diff --git a/executor/executor_test.go b/executor/executor_test.go index 7fc12e6c08b7b..8fb7d74f28a1d 100644 --- a/executor/executor_test.go +++ b/executor/executor_test.go @@ -115,7 +115,7 @@ var _ = Suite(&testUpdateSuite{}) var _ = Suite(&testOOMSuite{}) var _ = Suite(&testPointGetSuite{}) var _ = Suite(&testBatchPointGetSuite{}) -var _ = Suite(&testRecoverTable{}) +var _ = SerialSuites(&testRecoverTable{}) var _ = Suite(&testFlushSuite{}) type testSuite struct{ *baseTestSuite } @@ -2089,7 +2089,7 @@ func (s *testSuiteP2) TestIsPointGet(c *C) { "select * from help_topic where help_topic_id=1": true, "select * from help_topic where help_category_id=1": false, } - infoSchema := executor.GetInfoSchema(ctx) + infoSchema := infoschema.GetInfoSchema(ctx) for sqlStr, result := range tests { stmtNode, err := s.ParseOneStmt(sqlStr, "", "") @@ -4672,6 +4672,131 @@ func (s *testRecoverTable) TestRecoverTable(c *C) { c.Assert(gcEnable, Equals, false) } +func (s *testRecoverTable) TestFlashbackTable(c *C) { + c.Assert(failpoint.Enable("github.com/pingcap/tidb/meta/autoid/mockAutoIDChange", `return(true)`), IsNil) + defer func() { + c.Assert(failpoint.Disable("github.com/pingcap/tidb/meta/autoid/mockAutoIDChange"), IsNil) + }() + tk := testkit.NewTestKit(c, s.store) + tk.MustExec("create database if not exists test_flashback") + tk.MustExec("use test_flashback") + tk.MustExec("drop table if exists t_flashback") + tk.MustExec("create table t_flashback (a int);") + defer func(originGC bool) { + if originGC { + ddl.EmulatorGCEnable() + } else { + ddl.EmulatorGCDisable() + } + }(ddl.IsEmulatorGCEnable()) + + // Disable emulator GC. + // Otherwise emulator GC will delete table record as soon as possible after execute drop table ddl. + ddl.EmulatorGCDisable() + gcTimeFormat := "20060102-15:04:05 -0700 MST" + timeBeforeDrop := time.Now().Add(0 - time.Duration(48*60*60*time.Second)).Format(gcTimeFormat) + safePointSQL := `INSERT HIGH_PRIORITY INTO mysql.tidb VALUES ('tikv_gc_safe_point', '%[1]s', '') + ON DUPLICATE KEY + UPDATE variable_value = '%[1]s'` + // Clear GC variables first. + tk.MustExec("delete from mysql.tidb where variable_name in ( 'tikv_gc_safe_point','tikv_gc_enable' )") + // Set GC safe point + tk.MustExec(fmt.Sprintf(safePointSQL, timeBeforeDrop)) + // Set GC enable. + err := gcutil.EnableGC(tk.Se) + c.Assert(err, IsNil) + + tk.MustExec("insert into t_flashback values (1),(2),(3)") + tk.MustExec("drop table t_flashback") + + // Test flash table with wrong time. + _, err = tk.Exec(fmt.Sprintf("flashback table t_flashback until timestamp '%v'", time.Now().String())) + c.Assert(err.Error(), Equals, "Can't find dropped table: t_flashback in ddl history jobs") + + // Test flashback table failed by there is already a new table with the same name. + ts := getDDLJobStartTime(tk, "test_flashback", "t_flashback") + // If there is a new table with the same name, should return failed. + tk.MustExec("create table t_flashback (a int);") + _, err = tk.Exec(fmt.Sprintf("flashback table t_flashback until timestamp '%v'", ts)) + c.Assert(err.Error(), Equals, infoschema.ErrTableExists.GenWithStackByArgs("t_flashback").Error()) + + // Drop the new table with the same name, then flashback table. + tk.MustExec("drop table t_flashback") + + // Test for flashback table. + tk.MustExec(fmt.Sprintf("flashback table t_flashback until timestamp '%v'", ts)) + // Check flashback table meta and data record. + tk.MustQuery("select * from t_flashback;").Check(testkit.Rows("1", "2", "3")) + // Check flashback table autoID. + tk.MustExec("insert into t_flashback values (4),(5),(6)") + tk.MustQuery("select * from t_flashback;").Check(testkit.Rows("1", "2", "3", "4", "5", "6")) + // Check rebase auto id. + tk.MustQuery("select a,_tidb_rowid from t_flashback;").Check(testkit.Rows("1 1", "2 2", "3 3", "4 5001", "5 5002", "6 5003")) + + // Test for flashback to new table. + tk.MustExec("drop table t_flashback") + ts = getDDLJobStartTime(tk, "test_flashback", "t_flashback") + tk.MustExec("create table t_flashback (a int);") + tk.MustExec(fmt.Sprintf("flashback table t_flashback until timestamp '%v' to t_flashback2", ts)) + // Check flashback table meta and data record. + tk.MustQuery("select * from t_flashback2;").Check(testkit.Rows("1", "2", "3", "4", "5", "6")) + // Check flashback table autoID. + tk.MustExec("insert into t_flashback2 values (7),(8),(9)") + tk.MustQuery("select * from t_flashback2;").Check(testkit.Rows("1", "2", "3", "4", "5", "6", "7", "8", "9")) + // Check rebase auto id. + tk.MustQuery("select a,_tidb_rowid from t_flashback2;").Check(testkit.Rows("1 1", "2 2", "3 3", "4 5001", "5 5002", "6 5003", "7 10001", "8 10002", "9 10003")) + + // Test for flashback one table multiple time. + _, err = tk.Exec(fmt.Sprintf("flashback table t_flashback until timestamp '%v' to t_flashback4", ts)) + c.Assert(infoschema.ErrTableExists.Equal(err), IsTrue) + + // Test for flashback truncated table to new table. + tk.MustExec("truncate table t_flashback2") + ts = getDDLJobStartTime(tk, "test_flashback", "t_flashback2") + tk.MustExec(fmt.Sprintf("flashback table t_flashback2 until timestamp '%v' to t_flashback3", ts)) + // Check flashback table meta and data record. + tk.MustQuery("select * from t_flashback3;").Check(testkit.Rows("1", "2", "3", "4", "5", "6", "7", "8", "9")) + // Check flashback table autoID. + tk.MustExec("insert into t_flashback3 values (10),(11)") + tk.MustQuery("select * from t_flashback3;").Check(testkit.Rows("1", "2", "3", "4", "5", "6", "7", "8", "9", "10", "11")) + // Check rebase auto id. + tk.MustQuery("select a,_tidb_rowid from t_flashback3;").Check(testkit.Rows("1 1", "2 2", "3 3", "4 5001", "5 5002", "6 5003", "7 10001", "8 10002", "9 10003", "10 15001", "11 15002")) + + // Test for flashback drop partition table. + tk.MustExec("drop table if exists t_p_flashback") + tk.MustExec("create table t_p_flashback (a int) partition by hash(a) partitions 4;") + tk.MustExec("insert into t_p_flashback values (1),(2),(3)") + tk.MustExec("drop table t_p_flashback") + ts = getDDLJobStartTime(tk, "test_flashback", "t_p_flashback") + tk.MustExec(fmt.Sprintf("flashback table t_p_flashback until timestamp '%v'", ts)) + // Check flashback table meta and data record. + tk.MustQuery("select * from t_p_flashback order by a;").Check(testkit.Rows("1", "2", "3")) + // Check flashback table autoID. + tk.MustExec("insert into t_p_flashback values (4),(5)") + tk.MustQuery("select a,_tidb_rowid from t_p_flashback order by a;").Check(testkit.Rows("1 1", "2 2", "3 3", "4 5001", "5 5002")) + + // Test for flashback truncate partition table. + tk.MustExec("truncate table t_p_flashback") + ts = getDDLJobStartTime(tk, "test_flashback", "t_p_flashback") + tk.MustExec(fmt.Sprintf("flashback table t_p_flashback until timestamp '%v' to t_p_flashback1", ts)) + // Check flashback table meta and data record. + tk.MustQuery("select * from t_p_flashback1 order by a;").Check(testkit.Rows("1", "2", "3", "4", "5")) + // Check flashback table autoID. + tk.MustExec("insert into t_p_flashback1 values (6)") + tk.MustQuery("select a,_tidb_rowid from t_p_flashback1 order by a;").Check(testkit.Rows("1 1", "2 2", "3 3", "4 5001", "5 5002", "6 10001")) +} + +func getDDLJobStartTime(tk *testkit.TestKit, dbName, tblName string) string { + re := tk.MustQuery("admin show ddl jobs 100") + rows := re.Rows() + for _, row := range rows { + if row[1] == dbName && row[2] == tblName && (row[3] == "drop table" || row[3] == "truncate table") { + return row[8].(string) + } + } + return "" +} + func (s *testSuiteP2) TestPointGetPreparedPlan(c *C) { tk1 := testkit.NewTestKit(c, s.store) tk1.MustExec("drop database if exists ps_text") diff --git a/executor/hash_table.go b/executor/hash_table.go index 77e11576004c3..c453e781efd21 100644 --- a/executor/hash_table.go +++ b/executor/hash_table.go @@ -28,6 +28,7 @@ import ( "github.com/pingcap/tidb/util/disk" "github.com/pingcap/tidb/util/logutil" "github.com/pingcap/tidb/util/memory" + "github.com/pingcap/tidb/util/stringutil" "go.uber.org/zap" ) @@ -88,7 +89,8 @@ type hashRowContainer struct { // memTracker is the reference of records.GetMemTracker(). // records would be set to nil for garbage collection when spilling is activated // so we need this reference. - memTracker *memory.Tracker + memTracker *memory.Tracker + diskTracker *disk.Tracker // records stores the chunks in memory. records *chunk.List @@ -122,9 +124,10 @@ func newHashRowContainer(sCtx sessionctx.Context, estCount int, hCtx *hashContex sc: sCtx.GetSessionVars().StmtCtx, hCtx: hCtx, - hashTable: newRowHashMap(estCount), - memTracker: initList.GetMemTracker(), - records: initList, + hashTable: newRowHashMap(estCount), + memTracker: initList.GetMemTracker(), + diskTracker: disk.NewTracker(stringutil.StringerStr("hashRowContainer"), -1), + records: initList, } return c @@ -174,6 +177,7 @@ func (c *hashRowContainer) matchJoinKey(buildRow, probeRow chunk.Row, probeHCtx func (c *hashRowContainer) spillToDisk() (err error) { N := c.records.NumChunks() c.recordsInDisk = chunk.NewListInDisk(c.hCtx.allTypes) + c.recordsInDisk.GetDiskTracker().AttachTo(c.diskTracker) for i := 0; i < N; i++ { chk := c.records.GetChunk(i) err = c.recordsInDisk.Add(chk) @@ -271,7 +275,7 @@ func (c *hashRowContainer) Close() error { func (c *hashRowContainer) GetMemTracker() *memory.Tracker { return c.memTracker } // GetDiskTracker returns the underlying disk usage tracker in hashRowContainer. -func (c *hashRowContainer) GetDiskTracker() *disk.Tracker { return c.recordsInDisk.GetDiskTracker() } +func (c *hashRowContainer) GetDiskTracker() *disk.Tracker { return c.diskTracker } // ActionSpill returns a memory.ActionOnExceed for spilling over to disk. func (c *hashRowContainer) ActionSpill() memory.ActionOnExceed { diff --git a/executor/insert_common.go b/executor/insert_common.go index b9dca684c639a..69e4001359190 100644 --- a/executor/insert_common.go +++ b/executor/insert_common.go @@ -393,7 +393,7 @@ func insertRowsFromSelect(ctx context.Context, base insertCommon) error { } for innerChunkRow := iter.Begin(); innerChunkRow != iter.End(); innerChunkRow = iter.Next() { - innerRow := types.CloneRow(innerChunkRow.GetDatumRow(fields)) + innerRow := innerChunkRow.GetDatumRow(fields) e.rowCount++ row, err := e.getRow(ctx, innerRow) if err != nil { @@ -410,8 +410,14 @@ func insertRowsFromSelect(ctx context.Context, base insertCommon) error { } } } + + err = base.exec(ctx, rows) + if err != nil { + return err + } + rows = rows[:0] } - return base.exec(ctx, rows) + return nil } func (e *InsertValues) doBatchInsert(ctx context.Context) error { diff --git a/executor/join.go b/executor/join.go index d9d7c921dd226..8e6c95f339b2d 100644 --- a/executor/join.go +++ b/executor/join.go @@ -28,6 +28,7 @@ import ( "github.com/pingcap/tidb/util/bitmap" "github.com/pingcap/tidb/util/chunk" "github.com/pingcap/tidb/util/codec" + "github.com/pingcap/tidb/util/disk" "github.com/pingcap/tidb/util/memory" "github.com/pingcap/tidb/util/stringutil" ) @@ -70,6 +71,7 @@ type HashJoinExec struct { joinResultCh chan *hashjoinWorkerResult memTracker *memory.Tracker // track memory usage. + diskTracker *disk.Tracker // track disk usage. prepared bool isOuterJoin bool @@ -145,6 +147,9 @@ func (e *HashJoinExec) Open(ctx context.Context) error { e.memTracker = memory.NewTracker(e.id, -1) e.memTracker.AttachTo(e.ctx.GetSessionVars().StmtCtx.MemTracker) + e.diskTracker = disk.NewTracker(e.id, -1) + e.diskTracker.AttachTo(e.ctx.GetSessionVars().StmtCtx.DiskTracker) + e.closeCh = make(chan struct{}) e.finished.Store(false) e.joinWorkerWaitGroup = sync.WaitGroup{} @@ -677,6 +682,8 @@ func (e *HashJoinExec) buildHashTableForList(buildSideResultCh <-chan *chunk.Chu e.rowContainer = newHashRowContainer(e.ctx, int(e.buildSideEstCount), hCtx) e.rowContainer.GetMemTracker().AttachTo(e.memTracker) e.rowContainer.GetMemTracker().SetLabel(buildSideResultLabel) + e.rowContainer.GetDiskTracker().AttachTo(e.diskTracker) + e.rowContainer.GetDiskTracker().SetLabel(buildSideResultLabel) if config.GetGlobalConfig().OOMUseTmpStorage { actionSpill := e.rowContainer.ActionSpill() e.ctx.GetSessionVars().StmtCtx.MemTracker.FallbackOldAndSetNewAction(actionSpill) diff --git a/executor/load_stats.go b/executor/load_stats.go index 58a764748341e..83fbb3ad188f7 100644 --- a/executor/load_stats.go +++ b/executor/load_stats.go @@ -19,6 +19,7 @@ import ( "github.com/pingcap/errors" "github.com/pingcap/tidb/domain" + "github.com/pingcap/tidb/infoschema" "github.com/pingcap/tidb/sessionctx" "github.com/pingcap/tidb/statistics/handle" "github.com/pingcap/tidb/util/chunk" @@ -85,5 +86,5 @@ func (e *LoadStatsInfo) Update(data []byte) error { if h == nil { return errors.New("Load Stats: handle is nil") } - return h.LoadStatsFromJSON(GetInfoSchema(e.Ctx), jsonTbl) + return h.LoadStatsFromJSON(infoschema.GetInfoSchema(e.Ctx), jsonTbl) } diff --git a/executor/metrics_test.go b/executor/metrics_test.go index ba6f3cc866738..19000b4faee5d 100644 --- a/executor/metrics_test.go +++ b/executor/metrics_test.go @@ -20,6 +20,7 @@ import ( . "github.com/pingcap/check" "github.com/pingcap/parser" "github.com/pingcap/tidb/executor" + "github.com/pingcap/tidb/infoschema" "github.com/pingcap/tidb/planner" plannercore "github.com/pingcap/tidb/planner/core" "github.com/pingcap/tidb/sessionctx" @@ -61,7 +62,7 @@ func (s *testSuite7) TestStmtLabel(c *C) { for _, tt := range tests { stmtNode, err := parser.New().ParseOneStmt(tt.sql, "", "") c.Check(err, IsNil) - is := executor.GetInfoSchema(tk.Se) + is := infoschema.GetInfoSchema(tk.Se) err = plannercore.Preprocess(tk.Se.(sessionctx.Context), stmtNode, is) c.Assert(err, IsNil) _, _, err = planner.Optimize(context.TODO(), tk.Se, stmtNode, is) diff --git a/executor/prepared.go b/executor/prepared.go index dd8152341929f..8c84b82e88333 100644 --- a/executor/prepared.go +++ b/executor/prepared.go @@ -297,7 +297,7 @@ func CompileExecutePreparedStmt(ctx context.Context, sctx sessionctx.Context, return nil, err } execStmt.BinaryArgs = args - is := GetInfoSchema(sctx) + is := infoschema.GetInfoSchema(sctx) execPlan, names, err := planner.Optimize(ctx, sctx, execStmt, is) if err != nil { return nil, err diff --git a/executor/seqtest/seq_executor_test.go b/executor/seqtest/seq_executor_test.go index 72815eb039d87..cf85e0c17167c 100644 --- a/executor/seqtest/seq_executor_test.go +++ b/executor/seqtest/seq_executor_test.go @@ -755,6 +755,43 @@ func (s *seqTestSuite) TestAdminShowNextID(c *C) { r.Check(testkit.Rows("test1 tt id 41")) } +func (s *seqTestSuite) TestNoHistoryWhenDisableRetry(c *C) { + tk := testkit.NewTestKitWithInit(c, s.store) + tk.MustExec("use test") + tk.MustExec("drop table if exists history") + tk.MustExec("create table history (a int)") + tk.MustExec("set @@autocommit = 0") + + // retry_limit = 0 will not add history. + tk.MustExec("set @@tidb_retry_limit = 0") + tk.MustExec("insert history values (1)") + c.Assert(session.GetHistory(tk.Se).Count(), Equals, 0) + + // Disable auto_retry will add history for auto committed only + tk.MustExec("set @@autocommit = 1") + tk.MustExec("set @@tidb_retry_limit = 10") + tk.MustExec("set @@tidb_disable_txn_auto_retry = 1") + c.Assert(failpoint.Enable("github.com/pingcap/tidb/session/keepHistory", `return(true)`), IsNil) + tk.MustExec("insert history values (1)") + c.Assert(session.GetHistory(tk.Se).Count(), Equals, 1) + c.Assert(failpoint.Disable("github.com/pingcap/tidb/session/keepHistory"), IsNil) + tk.MustExec("begin") + tk.MustExec("insert history values (1)") + c.Assert(session.GetHistory(tk.Se).Count(), Equals, 0) + tk.MustExec("commit") + + // Enable auto_retry will add history for both. + tk.MustExec("set @@tidb_disable_txn_auto_retry = 0") + c.Assert(failpoint.Enable("github.com/pingcap/tidb/session/keepHistory", `return(true)`), IsNil) + tk.MustExec("insert history values (1)") + c.Assert(failpoint.Disable("github.com/pingcap/tidb/session/keepHistory"), IsNil) + c.Assert(session.GetHistory(tk.Se).Count(), Equals, 1) + tk.MustExec("begin") + tk.MustExec("insert history values (1)") + c.Assert(session.GetHistory(tk.Se).Count(), Equals, 2) + tk.MustExec("commit") +} + func (s *seqTestSuite) TestPrepareMaxParamCountCheck(c *C) { tk := testkit.NewTestKit(c, s.store) tk.MustExec("use test") diff --git a/executor/show.go b/executor/show.go index 8e63c42bcb5ca..9360173cda129 100644 --- a/executor/show.go +++ b/executor/show.go @@ -39,9 +39,11 @@ import ( "github.com/pingcap/tidb/expression" "github.com/pingcap/tidb/infoschema" "github.com/pingcap/tidb/kv" + "github.com/pingcap/tidb/meta/autoid" plannercore "github.com/pingcap/tidb/planner/core" "github.com/pingcap/tidb/plugin" "github.com/pingcap/tidb/privilege" + "github.com/pingcap/tidb/sessionctx" "github.com/pingcap/tidb/sessionctx/stmtctx" "github.com/pingcap/tidb/sessionctx/variable" "github.com/pingcap/tidb/store/tikv" @@ -183,6 +185,8 @@ func (e *ShowExec) fetchAll(ctx context.Context) error { return nil case ast.ShowRegions: return e.fetchShowTableRegions() + case ast.ShowBuiltins: + return e.fetchShowBuiltins() } return nil } @@ -345,7 +349,7 @@ func (e *ShowExec) fetchShowTableStatus() error { FROM information_schema.tables WHERE table_schema='%s' ORDER BY table_name`, e.DBName) - rows, _, err := e.ctx.(sqlexec.RestrictedSQLExecutor).ExecRestrictedSQL(sql) + rows, _, err := e.ctx.(sqlexec.RestrictedSQLExecutor).ExecRestrictedSQLWithSnapshot(sql) if err != nil { return errors.Trace(err) @@ -632,53 +636,45 @@ func escape(cis model.CIStr, sqlMode mysql.SQLMode) string { return quote + strings.Replace(cis.O, quote, quote+quote, -1) + quote } -func (e *ShowExec) fetchShowCreateTable() error { - tb, err := e.getTable() - if err != nil { - return errors.Trace(err) - } - - sqlMode := e.ctx.GetSessionVars().SQLMode - - // TODO: let the result more like MySQL. - var buf bytes.Buffer - if tb.Meta().IsView() { - e.fetchShowCreateTable4View(tb.Meta(), &buf) - e.appendRow([]interface{}{tb.Meta().Name.O, buf.String(), tb.Meta().Charset, tb.Meta().Collate}) +// ConstructResultOfShowCreateTable constructs the result for show create table. +func ConstructResultOfShowCreateTable(ctx sessionctx.Context, tableInfo *model.TableInfo, allocator autoid.Allocator, buf *bytes.Buffer) (err error) { + if tableInfo.IsView() { + fetchShowCreateTable4View(ctx, tableInfo, buf) return nil } - tblCharset := tb.Meta().Charset + tblCharset := tableInfo.Charset if len(tblCharset) == 0 { tblCharset = mysql.DefaultCharset } - tblCollate := tb.Meta().Collate + tblCollate := tableInfo.Collate // Set default collate if collate is not specified. if len(tblCollate) == 0 { tblCollate = getDefaultCollate(tblCharset) } - fmt.Fprintf(&buf, "CREATE TABLE %s (\n", escape(tb.Meta().Name, sqlMode)) - var pkCol *table.Column + sqlMode := ctx.GetSessionVars().SQLMode + fmt.Fprintf(buf, "CREATE TABLE %s (\n", escape(tableInfo.Name, sqlMode)) + var pkCol *model.ColumnInfo var hasAutoIncID bool - for i, col := range tb.Cols() { - fmt.Fprintf(&buf, " %s %s", escape(col.Name, sqlMode), col.GetTypeDesc()) + for i, col := range tableInfo.Cols() { + fmt.Fprintf(buf, " %s %s", escape(col.Name, sqlMode), col.GetTypeDesc()) if col.Charset != "binary" { if col.Charset != tblCharset { - fmt.Fprintf(&buf, " CHARACTER SET %s", col.Charset) + fmt.Fprintf(buf, " CHARACTER SET %s", col.Charset) } if col.Collate != tblCollate { - fmt.Fprintf(&buf, " COLLATE %s", col.Collate) + fmt.Fprintf(buf, " COLLATE %s", col.Collate) } else { defcol, err := charset.GetDefaultCollation(col.Charset) if err == nil && defcol != col.Collate { - fmt.Fprintf(&buf, " COLLATE %s", col.Collate) + fmt.Fprintf(buf, " COLLATE %s", col.Collate) } } } if col.IsGenerated() { // It's a generated column. - fmt.Fprintf(&buf, " GENERATED ALWAYS AS (%s)", col.GeneratedExprString) + fmt.Fprintf(buf, " GENERATED ALWAYS AS (%s)", col.GeneratedExprString) if col.GeneratedStored { buf.WriteString(" STORED") } else { @@ -712,7 +708,7 @@ func (e *ShowExec) fetchShowCreateTable() error { defaultValStr := fmt.Sprintf("%v", defaultValue) // If column is timestamp, and default value is not current_timestamp, should convert the default value to the current session time zone. if col.Tp == mysql.TypeTimestamp && defaultValStr != types.ZeroDatetimeStr { - timeValue, err := table.GetColDefaultValue(e.ctx, col.ToInfo()) + timeValue, err := table.GetColDefaultValue(ctx, col) if err != nil { return errors.Trace(err) } @@ -721,9 +717,9 @@ func (e *ShowExec) fetchShowCreateTable() error { if col.Tp == mysql.TypeBit { defaultValBinaryLiteral := types.BinaryLiteral(defaultValStr) - fmt.Fprintf(&buf, " DEFAULT %s", defaultValBinaryLiteral.ToBitLiteralString(true)) + fmt.Fprintf(buf, " DEFAULT %s", defaultValBinaryLiteral.ToBitLiteralString(true)) } else { - fmt.Fprintf(&buf, " DEFAULT '%s'", format.OutputFormat(defaultValStr)) + fmt.Fprintf(buf, " DEFAULT '%s'", format.OutputFormat(defaultValStr)) } } } @@ -733,12 +729,12 @@ func (e *ShowExec) fetchShowCreateTable() error { } } if len(col.Comment) > 0 { - fmt.Fprintf(&buf, " COMMENT '%s'", format.OutputFormat(col.Comment)) + fmt.Fprintf(buf, " COMMENT '%s'", format.OutputFormat(col.Comment)) } - if i != len(tb.Cols())-1 { + if i != len(tableInfo.Cols())-1 { buf.WriteString(",\n") } - if tb.Meta().PKIsHandle && mysql.HasPriKeyFlag(col.Flag) { + if tableInfo.PKIsHandle && mysql.HasPriKeyFlag(col.Flag) { pkCol = col } } @@ -746,12 +742,12 @@ func (e *ShowExec) fetchShowCreateTable() error { if pkCol != nil { // If PKIsHanle, pk info is not in tb.Indices(). We should handle it here. buf.WriteString(",\n") - fmt.Fprintf(&buf, " PRIMARY KEY (%s)", escape(pkCol.Name, sqlMode)) + fmt.Fprintf(buf, " PRIMARY KEY (%s)", escape(pkCol.Name, sqlMode)) } - publicIndices := make([]table.Index, 0, len(tb.Indices())) - for _, idx := range tb.Indices() { - if idx.Meta().State == model.StatePublic { + publicIndices := make([]*model.IndexInfo, 0, len(tableInfo.Indices)) + for _, idx := range tableInfo.Indices { + if idx.State == model.StatePublic { publicIndices = append(publicIndices, idx) } } @@ -759,14 +755,13 @@ func (e *ShowExec) fetchShowCreateTable() error { buf.WriteString(",\n") } - for i, idx := range publicIndices { - idxInfo := idx.Meta() + for i, idxInfo := range publicIndices { if idxInfo.Primary { buf.WriteString(" PRIMARY KEY ") } else if idxInfo.Unique { - fmt.Fprintf(&buf, " UNIQUE KEY %s ", escape(idxInfo.Name, sqlMode)) + fmt.Fprintf(buf, " UNIQUE KEY %s ", escape(idxInfo.Name, sqlMode)) } else { - fmt.Fprintf(&buf, " KEY %s ", escape(idxInfo.Name, sqlMode)) + fmt.Fprintf(buf, " KEY %s ", escape(idxInfo.Name, sqlMode)) } cols := make([]string, 0, len(idxInfo.Columns)) @@ -777,7 +772,7 @@ func (e *ShowExec) fetchShowCreateTable() error { } cols = append(cols, colInfo) } - fmt.Fprintf(&buf, "(%s)", strings.Join(cols, ",")) + fmt.Fprintf(buf, "(%s)", strings.Join(cols, ",")) if i != len(publicIndices)-1 { buf.WriteString(",\n") } @@ -791,40 +786,60 @@ func (e *ShowExec) fetchShowCreateTable() error { if len(tblCollate) == 0 { // If we can not find default collate for the given charset, // do not show the collate part. - fmt.Fprintf(&buf, " DEFAULT CHARSET=%s", tblCharset) + fmt.Fprintf(buf, " DEFAULT CHARSET=%s", tblCharset) } else { - fmt.Fprintf(&buf, " DEFAULT CHARSET=%s COLLATE=%s", tblCharset, tblCollate) + fmt.Fprintf(buf, " DEFAULT CHARSET=%s COLLATE=%s", tblCharset, tblCollate) } // Displayed if the compression typed is set. - if len(tb.Meta().Compression) != 0 { - fmt.Fprintf(&buf, " COMPRESSION='%s'", tb.Meta().Compression) + if len(tableInfo.Compression) != 0 { + fmt.Fprintf(buf, " COMPRESSION='%s'", tableInfo.Compression) } if hasAutoIncID { - autoIncID, err := tb.Allocator(e.ctx).NextGlobalAutoID(tb.Meta().ID) + autoIncID, err := allocator.NextGlobalAutoID(tableInfo.ID) if err != nil { return errors.Trace(err) } - // It's campatible with MySQL. + // It's compatible with MySQL. if autoIncID > 1 { - fmt.Fprintf(&buf, " AUTO_INCREMENT=%d", autoIncID) + fmt.Fprintf(buf, " AUTO_INCREMENT=%d", autoIncID) } } - if tb.Meta().ShardRowIDBits > 0 { - fmt.Fprintf(&buf, "/*!90000 SHARD_ROW_ID_BITS=%d ", tb.Meta().ShardRowIDBits) - if tb.Meta().PreSplitRegions > 0 { - fmt.Fprintf(&buf, "PRE_SPLIT_REGIONS=%d ", tb.Meta().PreSplitRegions) + if tableInfo.ShardRowIDBits > 0 { + fmt.Fprintf(buf, "/*!90000 SHARD_ROW_ID_BITS=%d ", tableInfo.ShardRowIDBits) + if tableInfo.PreSplitRegions > 0 { + fmt.Fprintf(buf, "PRE_SPLIT_REGIONS=%d ", tableInfo.PreSplitRegions) } buf.WriteString("*/") } - if len(tb.Meta().Comment) > 0 { - fmt.Fprintf(&buf, " COMMENT='%s'", format.OutputFormat(tb.Meta().Comment)) + if len(tableInfo.Comment) > 0 { + fmt.Fprintf(buf, " COMMENT='%s'", format.OutputFormat(tableInfo.Comment)) } // add partition info here. - appendPartitionInfo(tb.Meta().Partition, &buf) + appendPartitionInfo(tableInfo.Partition, buf) + return nil +} + +func (e *ShowExec) fetchShowCreateTable() error { + tb, err := e.getTable() + if err != nil { + return errors.Trace(err) + } + + tableInfo := tb.Meta() + allocator := tb.Allocator(e.ctx) + var buf bytes.Buffer + // TODO: let the result more like MySQL. + if err = ConstructResultOfShowCreateTable(e.ctx, tb.Meta(), allocator, &buf); err != nil { + return err + } + if tableInfo.IsView() { + e.appendRow([]interface{}{tableInfo.Name.O, buf.String(), tableInfo.Charset, tableInfo.Collate}) + return nil + } e.appendRow([]interface{}{tb.Meta().Name.O, buf.String()}) return nil @@ -846,13 +861,13 @@ func (e *ShowExec) fetchShowCreateView() error { } var buf bytes.Buffer - e.fetchShowCreateTable4View(tb.Meta(), &buf) + fetchShowCreateTable4View(e.ctx, tb.Meta(), &buf) e.appendRow([]interface{}{tb.Meta().Name.O, buf.String(), tb.Meta().Charset, tb.Meta().Collate}) return nil } -func (e *ShowExec) fetchShowCreateTable4View(tb *model.TableInfo, buf *bytes.Buffer) { - sqlMode := e.ctx.GetSessionVars().SQLMode +func fetchShowCreateTable4View(ctx sessionctx.Context, tb *model.TableInfo, buf *bytes.Buffer) { + sqlMode := ctx.GetSessionVars().SQLMode fmt.Fprintf(buf, "CREATE ALGORITHM=%s ", tb.View.Algorithm.String()) fmt.Fprintf(buf, "DEFINER=%s@%s ", escape(model.NewCIStr(tb.View.Definer.Username), sqlMode), escape(model.NewCIStr(tb.View.Definer.Hostname), sqlMode)) @@ -901,6 +916,20 @@ func appendPartitionInfo(partitionInfo *model.PartitionInfo, buf *bytes.Buffer) buf.WriteString(")") } +// ConstructResultOfShowCreateDatabase constructs the result for show create database. +func ConstructResultOfShowCreateDatabase(ctx sessionctx.Context, dbInfo *model.DBInfo, ifNotExists bool, buf *bytes.Buffer) (err error) { + sqlMode := ctx.GetSessionVars().SQLMode + var ifNotExistsStr string + if ifNotExists { + ifNotExistsStr = "/*!32312 IF NOT EXISTS*/ " + } + fmt.Fprintf(buf, "CREATE DATABASE %s%s", ifNotExistsStr, escape(dbInfo.Name, sqlMode)) + if s := dbInfo.Charset; len(s) > 0 { + fmt.Fprintf(buf, " /*!40100 DEFAULT CHARACTER SET %s */", s) + } + return nil +} + // fetchShowCreateDatabase composes show create database result. func (e *ShowExec) fetchShowCreateDatabase() error { checker := privilege.GetPrivilegeManager(e.ctx) @@ -909,24 +938,17 @@ func (e *ShowExec) fetchShowCreateDatabase() error { return e.dbAccessDenied() } } - db, ok := e.is.SchemaByName(e.DBName) + dbInfo, ok := e.is.SchemaByName(e.DBName) if !ok { return infoschema.ErrDatabaseNotExists.GenWithStackByArgs(e.DBName.O) } - sqlMode := e.ctx.GetSessionVars().SQLMode - var buf bytes.Buffer - var ifNotExists string - if e.IfNotExists { - ifNotExists = "/*!32312 IF NOT EXISTS*/ " - } - fmt.Fprintf(&buf, "CREATE DATABASE %s%s", ifNotExists, escape(db.Name, sqlMode)) - if s := db.Charset; len(s) > 0 { - fmt.Fprintf(&buf, " /*!40100 DEFAULT CHARACTER SET %s */", s) + err := ConstructResultOfShowCreateDatabase(e.ctx, dbInfo, e.IfNotExists, &buf) + if err != nil { + return err } - - e.appendRow([]interface{}{db.Name.O, buf.String()}) + e.appendRow([]interface{}{dbInfo.Name.O, buf.String()}) return nil } @@ -1204,7 +1226,7 @@ func (e *ShowExec) fetchShowTableRegions() error { if !ok { return nil } - splitStore, ok := store.(kv.SplitableStore) + splitStore, ok := store.(kv.SplittableStore) if !ok { return nil } @@ -1233,21 +1255,21 @@ func (e *ShowExec) fetchShowTableRegions() error { return nil } -func getTableRegions(tb table.Table, tikvStore tikv.Storage, splitStore kv.SplitableStore) ([]regionMeta, error) { +func getTableRegions(tb table.Table, tikvStore tikv.Storage, splitStore kv.SplittableStore) ([]regionMeta, error) { if info := tb.Meta().GetPartitionInfo(); info != nil { return getPartitionTableRegions(info, tb.(table.PartitionedTable), tikvStore, splitStore) } return getPhysicalTableRegions(tb.Meta().ID, tb.Meta(), tikvStore, splitStore, nil) } -func getTableIndexRegions(tb table.Table, indexInfo *model.IndexInfo, tikvStore tikv.Storage, splitStore kv.SplitableStore) ([]regionMeta, error) { +func getTableIndexRegions(tb table.Table, indexInfo *model.IndexInfo, tikvStore tikv.Storage, splitStore kv.SplittableStore) ([]regionMeta, error) { if info := tb.Meta().GetPartitionInfo(); info != nil { return getPartitionIndexRegions(info, tb.(table.PartitionedTable), indexInfo, tikvStore, splitStore) } return getPhysicalIndexRegions(tb.Meta().ID, indexInfo, tikvStore, splitStore, nil) } -func getPartitionTableRegions(info *model.PartitionInfo, tbl table.PartitionedTable, tikvStore tikv.Storage, splitStore kv.SplitableStore) ([]regionMeta, error) { +func getPartitionTableRegions(info *model.PartitionInfo, tbl table.PartitionedTable, tikvStore tikv.Storage, splitStore kv.SplittableStore) ([]regionMeta, error) { regions := make([]regionMeta, 0, len(info.Definitions)) uniqueRegionMap := make(map[uint64]struct{}) for _, def := range info.Definitions { @@ -1263,7 +1285,7 @@ func getPartitionTableRegions(info *model.PartitionInfo, tbl table.PartitionedTa return regions, nil } -func getPartitionIndexRegions(info *model.PartitionInfo, tbl table.PartitionedTable, indexInfo *model.IndexInfo, tikvStore tikv.Storage, splitStore kv.SplitableStore) ([]regionMeta, error) { +func getPartitionIndexRegions(info *model.PartitionInfo, tbl table.PartitionedTable, indexInfo *model.IndexInfo, tikvStore tikv.Storage, splitStore kv.SplittableStore) ([]regionMeta, error) { var regions []regionMeta uniqueRegionMap := make(map[uint64]struct{}) for _, def := range info.Definitions { @@ -1307,3 +1329,10 @@ func (e *ShowExec) fillRegionsToChunk(regions []regionMeta) { e.result.AppendInt64(10, regions[i].approximateKeys) } } + +func (e *ShowExec) fetchShowBuiltins() error { + for _, f := range expression.GetBuiltinList() { + e.appendRow([]interface{}{f}) + } + return nil +} diff --git a/executor/show_stats_test.go b/executor/show_stats_test.go index 152898eb2170d..1aae306c9ef7a 100644 --- a/executor/show_stats_test.go +++ b/executor/show_stats_test.go @@ -14,6 +14,9 @@ package executor_test import ( + "fmt" + "time" + . "github.com/pingcap/check" "github.com/pingcap/tidb/statistics" "github.com/pingcap/tidb/util/testkit" @@ -203,3 +206,28 @@ func (s *testShowStatsSuite) TestShowAnalyzeStatus(c *C) { c.Assert(result.Rows()[1][5], NotNil) c.Assert(result.Rows()[1][6], Equals, "finished") } + +func (s *testShowStatsSuite) TestShowStatusSnapshot(c *C) { + tk := testkit.NewTestKit(c, s.store) + tk.MustExec("drop database if exists test;") + tk.MustExec("create database test;") + tk.MustExec("use test;") + tk.MustExec("create table t (a int);") + + // For mocktikv, safe point is not initialized, we manually insert it for snapshot to use. + safePointName := "tikv_gc_safe_point" + safePointValue := "20060102-15:04:05 -0700" + safePointComment := "All versions after safe point can be accessed. (DO NOT EDIT)" + updateSafePoint := fmt.Sprintf(`INSERT INTO mysql.tidb VALUES ('%[1]s', '%[2]s', '%[3]s') + ON DUPLICATE KEY + UPDATE variable_value = '%[2]s', comment = '%[3]s'`, safePointName, safePointValue, safePointComment) + tk.MustExec(updateSafePoint) + + snapshotTime := time.Now() + + tk.MustExec("drop table t;") + tk.MustQuery("show table status;").Check(testkit.Rows()) + tk.MustExec("set @@tidb_snapshot = '" + snapshotTime.Format("2006-01-02 15:04:05.999999") + "'") + result := tk.MustQuery("show table status;") + c.Check(result.Rows()[0][0], Matches, "t") +} diff --git a/executor/show_test.go b/executor/show_test.go index 68ff9a164d2c4..4760f109b69b4 100644 --- a/executor/show_test.go +++ b/executor/show_test.go @@ -639,3 +639,13 @@ func (s *testSuite5) TestShowEscape(c *C) { tk.MustExec("rename table \"t`abl\"\"e\" to t") tk.MustExec("set sql_mode=@old_sql_mode") } + +func (s *testSuite5) TestShowBuiltin(c *C) { + tk := testkit.NewTestKit(c, s.store) + res := tk.MustQuery("show builtins;") + c.Assert(res, NotNil) + rows := res.Rows() + c.Assert(262, Equals, len(rows)) + c.Assert("abs", Equals, rows[0][0].(string)) + c.Assert("yearweek", Equals, rows[261][0].(string)) +} diff --git a/executor/simple.go b/executor/simple.go index bafd310d5ffe5..aea375ab54e85 100644 --- a/executor/simple.go +++ b/executor/simple.go @@ -135,7 +135,12 @@ func (e *SimpleExec) Next(ctx context.Context, req *chunk.Chunk) (err error) { } func (e *SimpleExec) setDefaultRoleNone(s *ast.SetDefaultRoleStmt) error { - sqlExecutor := e.ctx.(sqlexec.SQLExecutor) + restrictedCtx, err := e.getSysSession() + if err != nil { + return err + } + defer e.releaseSysSession(restrictedCtx) + sqlExecutor := restrictedCtx.(sqlexec.SQLExecutor) if _, err := sqlExecutor.Execute(context.Background(), "begin"); err != nil { return err } @@ -177,7 +182,13 @@ func (e *SimpleExec) setDefaultRoleRegular(s *ast.SetDefaultRoleStmt) error { return ErrCannotUser.GenWithStackByArgs("SET DEFAULT ROLE", role.String()) } } - sqlExecutor := e.ctx.(sqlexec.SQLExecutor) + + restrictedCtx, err := e.getSysSession() + if err != nil { + return err + } + defer e.releaseSysSession(restrictedCtx) + sqlExecutor := restrictedCtx.(sqlexec.SQLExecutor) if _, err := sqlExecutor.Execute(context.Background(), "begin"); err != nil { return err } @@ -291,7 +302,6 @@ func (e *SimpleExec) setDefaultRoleForCurrentUser(s *ast.SetDefaultRoleStmt) (er return err } defer e.releaseSysSession(restrictedCtx) - sqlExecutor := restrictedCtx.(sqlexec.SQLExecutor) if _, err := sqlExecutor.Execute(context.Background(), "begin"); err != nil { @@ -554,8 +564,15 @@ func (e *SimpleExec) executeRevokeRole(s *ast.RevokeRoleStmt) error { } } + restrictedCtx, err := e.getSysSession() + if err != nil { + return err + } + defer e.releaseSysSession(restrictedCtx) + sqlExecutor := restrictedCtx.(sqlexec.SQLExecutor) + // begin a transaction to insert role graph edges. - if _, err := e.ctx.(sqlexec.SQLExecutor).Execute(context.Background(), "begin"); err != nil { + if _, err := sqlExecutor.Execute(context.Background(), "begin"); err != nil { return errors.Trace(err) } for _, user := range s.Users { @@ -564,7 +581,7 @@ func (e *SimpleExec) executeRevokeRole(s *ast.RevokeRoleStmt) error { return errors.Trace(err) } if !exists { - if _, err := e.ctx.(sqlexec.SQLExecutor).Execute(context.Background(), "rollback"); err != nil { + if _, err := sqlExecutor.Execute(context.Background(), "rollback"); err != nil { return errors.Trace(err) } return ErrCannotUser.GenWithStackByArgs("REVOKE ROLE", user.String()) @@ -574,22 +591,22 @@ func (e *SimpleExec) executeRevokeRole(s *ast.RevokeRoleStmt) error { role.Hostname = "%" } sql := fmt.Sprintf(`DELETE IGNORE FROM %s.%s WHERE FROM_HOST='%s' and FROM_USER='%s' and TO_HOST='%s' and TO_USER='%s'`, mysql.SystemDB, mysql.RoleEdgeTable, role.Hostname, role.Username, user.Hostname, user.Username) - if _, err := e.ctx.(sqlexec.SQLExecutor).Execute(context.Background(), sql); err != nil { - if _, err := e.ctx.(sqlexec.SQLExecutor).Execute(context.Background(), "rollback"); err != nil { + if _, err := sqlExecutor.Execute(context.Background(), sql); err != nil { + if _, err := sqlExecutor.Execute(context.Background(), "rollback"); err != nil { return errors.Trace(err) } return ErrCannotUser.GenWithStackByArgs("REVOKE ROLE", role.String()) } sql = fmt.Sprintf(`DELETE IGNORE FROM %s.%s WHERE DEFAULT_ROLE_HOST='%s' and DEFAULT_ROLE_USER='%s' and HOST='%s' and USER='%s'`, mysql.SystemDB, mysql.DefaultRoleTable, role.Hostname, role.Username, user.Hostname, user.Username) - if _, err := e.ctx.(sqlexec.SQLExecutor).Execute(context.Background(), sql); err != nil { - if _, err := e.ctx.(sqlexec.SQLExecutor).Execute(context.Background(), "rollback"); err != nil { + if _, err := sqlExecutor.Execute(context.Background(), sql); err != nil { + if _, err := sqlExecutor.Execute(context.Background(), "rollback"); err != nil { return errors.Trace(err) } return ErrCannotUser.GenWithStackByArgs("REVOKE ROLE", role.String()) } } } - if _, err := e.ctx.(sqlexec.SQLExecutor).Execute(context.Background(), "commit"); err != nil { + if _, err := sqlExecutor.Execute(context.Background(), "commit"); err != nil { return err } domain.GetDomain(e.ctx).NotifyUpdatePrivilege(e.ctx) @@ -770,29 +787,36 @@ func (e *SimpleExec) executeGrantRole(s *ast.GrantRoleStmt) error { } } + restrictedCtx, err := e.getSysSession() + if err != nil { + return err + } + defer e.releaseSysSession(restrictedCtx) + sqlExecutor := restrictedCtx.(sqlexec.SQLExecutor) + // begin a transaction to insert role graph edges. - if _, err := e.ctx.(sqlexec.SQLExecutor).Execute(context.Background(), "begin"); err != nil { + if _, err := sqlExecutor.Execute(context.Background(), "begin"); err != nil { return err } for _, user := range s.Users { for _, role := range s.Roles { sql := fmt.Sprintf(`INSERT IGNORE INTO %s.%s (FROM_HOST, FROM_USER, TO_HOST, TO_USER) VALUES ('%s','%s','%s','%s')`, mysql.SystemDB, mysql.RoleEdgeTable, role.Hostname, role.Username, user.Hostname, user.Username) - if _, err := e.ctx.(sqlexec.SQLExecutor).Execute(context.Background(), sql); err != nil { + if _, err := sqlExecutor.Execute(context.Background(), sql); err != nil { failedUsers = append(failedUsers, user.String()) logutil.BgLogger().Error(fmt.Sprintf("Error occur when executing %s", sql)) - if _, err := e.ctx.(sqlexec.SQLExecutor).Execute(context.Background(), "rollback"); err != nil { + if _, err := sqlExecutor.Execute(context.Background(), "rollback"); err != nil { return err } return ErrCannotUser.GenWithStackByArgs("GRANT ROLE", user.String()) } } } - if _, err := e.ctx.(sqlexec.SQLExecutor).Execute(context.Background(), "commit"); err != nil { + if _, err := sqlExecutor.Execute(context.Background(), "commit"); err != nil { return err } - err := domain.GetDomain(e.ctx).PrivilegeHandle().Update(e.ctx.(sessionctx.Context)) - return err + domain.GetDomain(e.ctx).NotifyUpdatePrivilege(e.ctx) + return nil } func (e *SimpleExec) executeDropUser(s *ast.DropUserStmt) error { @@ -1024,7 +1048,7 @@ func (e *SimpleExec) executeDropStats(s *ast.DropStatsStmt) error { if err != nil { return err } - return h.Update(GetInfoSchema(e.ctx)) + return h.Update(infoschema.GetInfoSchema(e.ctx)) } func (e *SimpleExec) autoNewTxn() bool { diff --git a/executor/simple_test.go b/executor/simple_test.go index f237c494b6284..1fe1fbba14d80 100644 --- a/executor/simple_test.go +++ b/executor/simple_test.go @@ -188,6 +188,31 @@ func (s *testSuite3) TestRole(c *C) { tk.MustExec("SET ROLE NONE") } +func (s *testSuite3) TestRoleAdmin(c *C) { + tk := testkit.NewTestKit(c, s.store) + tk.MustExec("CREATE USER 'testRoleAdmin';") + tk.MustExec("CREATE ROLE 'targetRole';") + + // Create a new session. + se, err := session.CreateSession4Test(s.store) + c.Check(err, IsNil) + defer se.Close() + c.Assert(se.Auth(&auth.UserIdentity{Username: "testRoleAdmin", Hostname: "localhost"}, nil, nil), IsTrue) + + ctx := context.Background() + _, err = se.Execute(ctx, "GRANT `targetRole` TO `testRoleAdmin`;") + c.Assert(err, NotNil) + + tk.MustExec("GRANT SUPER ON *.* TO `testRoleAdmin`;") + _, err = se.Execute(ctx, "GRANT `targetRole` TO `testRoleAdmin`;") + c.Assert(err, IsNil) + _, err = se.Execute(ctx, "REVOKE `targetRole` FROM `testRoleAdmin`;") + c.Assert(err, IsNil) + + tk.MustExec("DROP USER 'testRoleAdmin';") + tk.MustExec("DROP ROLE 'targetRole';") +} + func (s *testSuite3) TestDefaultRole(c *C) { tk := testkit.NewTestKit(c, s.store) diff --git a/executor/split.go b/executor/split.go index 8d2d23884322e..a446f27e3ffa9 100755 --- a/executor/split.go +++ b/executor/split.go @@ -88,7 +88,7 @@ const checkScatterRegionFinishBackOff = 50 // splitIndexRegion is used to split index regions. func (e *SplitIndexRegionExec) splitIndexRegion(ctx context.Context) error { store := e.ctx.GetStore() - s, ok := store.(kv.SplitableStore) + s, ok := store.(kv.SplittableStore) if !ok { return nil } @@ -359,7 +359,7 @@ func (e *SplitTableRegionExec) Next(ctx context.Context, chk *chunk.Chunk) error func (e *SplitTableRegionExec) splitTableRegion(ctx context.Context) error { store := e.ctx.GetStore() - s, ok := store.(kv.SplitableStore) + s, ok := store.(kv.SplittableStore) if !ok { return nil } @@ -387,7 +387,7 @@ func (e *SplitTableRegionExec) splitTableRegion(ctx context.Context) error { return nil } -func waitScatterRegionFinish(ctxWithTimeout context.Context, sctx sessionctx.Context, startTime time.Time, store kv.SplitableStore, regionIDs []uint64, tableName, indexName string) int { +func waitScatterRegionFinish(ctxWithTimeout context.Context, sctx sessionctx.Context, startTime time.Time, store kv.SplittableStore, regionIDs []uint64, tableName, indexName string) int { remainMillisecond := 0 finishScatterNum := 0 for _, regionID := range regionIDs { @@ -580,7 +580,7 @@ type regionMeta struct { approximateKeys int64 } -func getPhysicalTableRegions(physicalTableID int64, tableInfo *model.TableInfo, tikvStore tikv.Storage, s kv.SplitableStore, uniqueRegionMap map[uint64]struct{}) ([]regionMeta, error) { +func getPhysicalTableRegions(physicalTableID int64, tableInfo *model.TableInfo, tikvStore tikv.Storage, s kv.SplittableStore, uniqueRegionMap map[uint64]struct{}) ([]regionMeta, error) { if uniqueRegionMap == nil { uniqueRegionMap = make(map[uint64]struct{}) } @@ -623,7 +623,7 @@ func getPhysicalTableRegions(physicalTableID int64, tableInfo *model.TableInfo, return regions, nil } -func getPhysicalIndexRegions(physicalTableID int64, indexInfo *model.IndexInfo, tikvStore tikv.Storage, s kv.SplitableStore, uniqueRegionMap map[uint64]struct{}) ([]regionMeta, error) { +func getPhysicalIndexRegions(physicalTableID int64, indexInfo *model.IndexInfo, tikvStore tikv.Storage, s kv.SplittableStore, uniqueRegionMap map[uint64]struct{}) ([]regionMeta, error) { if uniqueRegionMap == nil { uniqueRegionMap = make(map[uint64]struct{}) } @@ -648,7 +648,7 @@ func getPhysicalIndexRegions(physicalTableID int64, indexInfo *model.IndexInfo, return indexRegions, nil } -func checkRegionsStatus(store kv.SplitableStore, regions []regionMeta) error { +func checkRegionsStatus(store kv.SplittableStore, regions []regionMeta) error { for i := range regions { scattering, err := store.CheckRegionInScattering(regions[i].region.Id) if err != nil { diff --git a/executor/statement_context_test.go b/executor/statement_context_test.go index 46c73907ec4bb..3bb049c96889c 100644 --- a/executor/statement_context_test.go +++ b/executor/statement_context_test.go @@ -72,7 +72,7 @@ func (s *testSuite1) TestStatementContext(c *C) { tk.MustExec(strictModeSQL) _, err = tk.Exec("insert sc2 values (unhex('4040ffff'))") c.Assert(err, NotNil) - c.Assert(terror.ErrorEqual(err, table.ErrTruncateWrongValue), IsTrue, Commentf("err %v", err)) + c.Assert(terror.ErrorEqual(err, table.ErrTruncatedWrongValueForField), IsTrue, Commentf("err %v", err)) tk.MustExec("set @@tidb_skip_utf8_check = '1'") _, err = tk.Exec("insert sc2 values (unhex('4040ffff'))") @@ -98,10 +98,10 @@ func (s *testSuite1) TestStatementContext(c *C) { tk.MustExec(strictModeSQL) _, err = tk.Exec("insert t1 values (unhex('f09f8c80'))") c.Assert(err, NotNil) - c.Assert(terror.ErrorEqual(err, table.ErrTruncateWrongValue), IsTrue, Commentf("err %v", err)) + c.Assert(terror.ErrorEqual(err, table.ErrTruncatedWrongValueForField), IsTrue, Commentf("err %v", err)) _, err = tk.Exec("insert t1 values (unhex('F0A48BAE'))") c.Assert(err, NotNil) - c.Assert(terror.ErrorEqual(err, table.ErrTruncateWrongValue), IsTrue, Commentf("err %v", err)) + c.Assert(terror.ErrorEqual(err, table.ErrTruncatedWrongValueForField), IsTrue, Commentf("err %v", err)) old := config.GetGlobalConfig() conf := *old conf.CheckMb4ValueInUTF8 = false diff --git a/executor/table_reader.go b/executor/table_reader.go index 3b45dd9be5efc..4b7a0559b9451 100644 --- a/executor/table_reader.go +++ b/executor/table_reader.go @@ -61,6 +61,7 @@ type TableReaderExecutor struct { // kvRanges are only use for union scan. kvRanges []kv.KeyRange dagPB *tipb.DAGRequest + startTS uint64 // columns are only required by union scan and virtual column. columns []*model.ColumnInfo @@ -207,6 +208,7 @@ func (e *TableReaderExecutor) buildResp(ctx context.Context, ranges []*ranger.Ra var builder distsql.RequestBuilder kvReq, err := builder.SetTableRanges(getPhysicalTableID(e.table), ranges, e.feedback). SetDAGRequest(e.dagPB). + SetStartTS(e.startTS). SetDesc(e.desc). SetKeepOrder(e.keepOrder). SetStreaming(e.streaming). diff --git a/executor/table_readers_required_rows_test.go b/executor/table_readers_required_rows_test.go index a34b36f80c40d..9e5aab6b53fa1 100644 --- a/executor/table_readers_required_rows_test.go +++ b/executor/table_readers_required_rows_test.go @@ -115,7 +115,7 @@ func mockSelectResult(ctx context.Context, sctx sessionctx.Context, kvReq *kv.Re func buildTableReader(sctx sessionctx.Context) Executor { e := &TableReaderExecutor{ baseExecutor: buildMockBaseExec(sctx), - table: &tables.Table{}, + table: &tables.TableCommon{}, dagPB: buildMockDAGRequest(sctx), selectResultHook: selectResultHook{mockSelectResult}, } diff --git a/executor/window.go b/executor/window.go index 12cfb1797ee81..f99df0afc0dd3 100644 --- a/executor/window.go +++ b/executor/window.go @@ -30,9 +30,9 @@ import ( type WindowExec struct { baseExecutor - groupChecker *groupChecker - // inputIter is the iterator of child chunks - inputIter *chunk.Iterator4Chunk + groupChecker *vecGroupChecker + // childResult stores the child chunk + childResult *chunk.Chunk // executed indicates the child executor is drained or something unexpected happened. executed bool // resultChunks stores the chunks to return @@ -74,8 +74,8 @@ func (e *WindowExec) preparedChunkAvailable() bool { func (e *WindowExec) consumeOneGroup(ctx context.Context) error { var groupRows []chunk.Row - for { - eof, err := e.fetchChildIfNecessary(ctx) + if e.groupChecker.isExhausted() { + eof, err := e.fetchChild(ctx) if err != nil { return errors.Trace(err) } @@ -83,17 +83,41 @@ func (e *WindowExec) consumeOneGroup(ctx context.Context) error { e.executed = true return e.consumeGroupRows(groupRows) } - for inputRow := e.inputIter.Current(); inputRow != e.inputIter.End(); inputRow = e.inputIter.Next() { - meetNewGroup, err := e.groupChecker.meetNewGroup(inputRow) - if err != nil { - return errors.Trace(err) - } - if meetNewGroup { - return e.consumeGroupRows(groupRows) + _, err = e.groupChecker.splitIntoGroups(e.childResult) + if err != nil { + return errors.Trace(err) + } + } + begin, end := e.groupChecker.getNextGroup() + for i := begin; i < end; i++ { + groupRows = append(groupRows, e.childResult.GetRow(i)) + } + + for meetLastGroup := end == e.childResult.NumRows(); meetLastGroup; { + meetLastGroup = false + eof, err := e.fetchChild(ctx) + if err != nil { + return errors.Trace(err) + } + if eof { + e.executed = true + return e.consumeGroupRows(groupRows) + } + + isFirstGroupSameAsPrev, err := e.groupChecker.splitIntoGroups(e.childResult) + if err != nil { + return errors.Trace(err) + } + + if isFirstGroupSameAsPrev { + begin, end = e.groupChecker.getNextGroup() + for i := begin; i < end; i++ { + groupRows = append(groupRows, e.childResult.GetRow(i)) } - groupRows = append(groupRows, inputRow) + meetLastGroup = end == e.childResult.NumRows() } } + return e.consumeGroupRows(groupRows) } func (e *WindowExec) consumeGroupRows(groupRows []chunk.Row) (err error) { @@ -125,11 +149,7 @@ func (e *WindowExec) consumeGroupRows(groupRows []chunk.Row) (err error) { return nil } -func (e *WindowExec) fetchChildIfNecessary(ctx context.Context) (EOF bool, err error) { - if e.inputIter != nil && e.inputIter.Current() != e.inputIter.End() { - return false, nil - } - +func (e *WindowExec) fetchChild(ctx context.Context) (EOF bool, err error) { childResult := newFirstChunk(e.children[0]) err = Next(ctx, e.children[0], childResult) if err != nil { @@ -149,8 +169,7 @@ func (e *WindowExec) fetchChildIfNecessary(ctx context.Context) (EOF bool, err e e.resultChunks = append(e.resultChunks, resultChk) e.remainingRowsInChunk = append(e.remainingRowsInChunk, numRows) - e.inputIter = chunk.NewIterator4Chunk(childResult) - e.inputIter.Begin() + e.childResult = childResult return false, nil } diff --git a/expression/aggregation/agg_to_pb.go b/expression/aggregation/agg_to_pb.go index 59d09db237701..9fc89f19fd3e5 100644 --- a/expression/aggregation/agg_to_pb.go +++ b/expression/aggregation/agg_to_pb.go @@ -14,10 +14,12 @@ package aggregation import ( + "github.com/pingcap/errors" "github.com/pingcap/parser/ast" "github.com/pingcap/tidb/expression" "github.com/pingcap/tidb/kv" "github.com/pingcap/tidb/sessionctx/stmtctx" + "github.com/pingcap/tidb/types" "github.com/pingcap/tipb/go-tipb" ) @@ -64,3 +66,47 @@ func AggFuncToPBExpr(sc *stmtctx.StatementContext, client kv.Client, aggFunc *Ag } return &tipb.Expr{Tp: tp, Children: children, FieldType: expression.ToPBFieldType(aggFunc.RetTp)} } + +// PBExprToAggFuncDesc converts pb to aggregate function. +func PBExprToAggFuncDesc(sc *stmtctx.StatementContext, aggFunc *tipb.Expr, fieldTps []*types.FieldType) (*AggFuncDesc, error) { + var name string + switch aggFunc.Tp { + case tipb.ExprType_Count: + name = ast.AggFuncCount + case tipb.ExprType_First: + name = ast.AggFuncFirstRow + case tipb.ExprType_GroupConcat: + name = ast.AggFuncGroupConcat + case tipb.ExprType_Max: + name = ast.AggFuncMax + case tipb.ExprType_Min: + name = ast.AggFuncMin + case tipb.ExprType_Sum: + name = ast.AggFuncSum + case tipb.ExprType_Avg: + name = ast.AggFuncAvg + case tipb.ExprType_Agg_BitOr: + name = ast.AggFuncBitOr + case tipb.ExprType_Agg_BitXor: + name = ast.AggFuncBitXor + case tipb.ExprType_Agg_BitAnd: + name = ast.AggFuncBitAnd + default: + return nil, errors.Errorf("unknown aggregation function type: %v", aggFunc.Tp) + } + + args, err := expression.PBToExprs(aggFunc.Children, fieldTps, sc) + if err != nil { + return nil, err + } + base := baseFuncDesc{ + Name: name, + Args: args, + RetTp: expression.FieldTypeFromPB(aggFunc.FieldType), + } + return &AggFuncDesc{ + baseFuncDesc: base, + Mode: Partial1Mode, + HasDistinct: false, + }, nil +} diff --git a/expression/bench_test.go b/expression/bench_test.go index e75ad2a2bab86..d31493ef6c8f0 100644 --- a/expression/bench_test.go +++ b/expression/bench_test.go @@ -320,6 +320,16 @@ func (g *jsonStringGener) gen() interface{} { return j.String() } +type decimalStringGener struct{} + +func (g *decimalStringGener) gen() interface{} { + tempDecimal := new(types.MyDecimal) + if err := tempDecimal.FromFloat64(rand.Float64()); err != nil { + panic(err) + } + return tempDecimal.String() +} + type jsonTimeGener struct{} func (g *jsonTimeGener) gen() interface{} { @@ -602,15 +612,15 @@ func (g *dateTimeStrGener) gen() interface{} { return dataTimeStr } -// timeStrGener is used to generate strings which are time format -type timeStrGener struct { +// dateStrGener is used to generate strings which are date format +type dateStrGener struct { Year int Month int Day int NullRation float64 } -func (g *timeStrGener) gen() interface{} { +func (g *dateStrGener) gen() interface{} { if g.NullRation > 1e-6 && rand.Float64() < g.NullRation { return nil } @@ -628,12 +638,12 @@ func (g *timeStrGener) gen() interface{} { return fmt.Sprintf("%d-%d-%d", g.Year, g.Month, g.Day) } -// dateStrGener is used to generate strings which are data format -type dateStrGener struct { +// timeStrGener is used to generate strings which are time format +type timeStrGener struct { nullRation float64 } -func (g *dateStrGener) gen() interface{} { +func (g *timeStrGener) gen() interface{} { if g.nullRation > 1e-6 && rand.Float64() < g.nullRation { return nil } @@ -644,6 +654,24 @@ func (g *dateStrGener) gen() interface{} { return fmt.Sprintf("%d:%d:%d", hour, minute, second) } +type dateTimeIntGener struct { + dateTimeGener + nullRation float64 +} + +func (g *dateTimeIntGener) gen() interface{} { + if rand.Float64() < g.nullRation { + return nil + } + + t := g.dateTimeGener.gen().(types.Time) + num, err := t.ToNumber().ToInt() + if err != nil { + panic(err) + } + return num +} + // constStrGener always returns the given string type constStrGener struct { s string @@ -659,6 +687,19 @@ func (g *randDurInt) gen() interface{} { return int64(rand.Intn(types.TimeMaxHour)*10000 + rand.Intn(60)*100 + rand.Intn(60)) } +type randDurReal struct{} + +func (g *randDurReal) gen() interface{} { + return float64(rand.Intn(types.TimeMaxHour)*10000 + rand.Intn(60)*100 + rand.Intn(60)) +} + +type randDurDecimal struct{} + +func (g *randDurDecimal) gen() interface{} { + d := new(types.MyDecimal) + return d.FromFloat64(float64(rand.Intn(types.TimeMaxHour)*10000 + rand.Intn(60)*100 + rand.Intn(60))) +} + // locationGener is used to generate location for the built-in function GetFormat. type locationGener struct { nullRation float64 @@ -728,6 +769,9 @@ type vecExprBenchCase struct { aesModes string // constants are used to generate constant data for children[i]. constants []*Constant + // chunkSize is used to specify the chunk size of children, the maximum is 1024. + // This field is optional, 1024 by default. + chunkSize int } type vecExprBenchCases map[string][]vecExprBenchCase @@ -741,7 +785,7 @@ func fillColumn(eType types.EvalType, chk *chunk.Chunk, colIdx int, testCase vec } func fillColumnWithGener(eType types.EvalType, chk *chunk.Chunk, colIdx int, gen dataGenerator) { - batchSize := 1024 + batchSize := chk.Capacity() if gen == nil { gen = &defaultGener{0.2, eType} } @@ -819,8 +863,12 @@ func genVecExprBenchCase(ctx sessionctx.Context, funcName string, testCase vecEx fts[i] = eType2FieldType(testCase.childrenTypes[i]) } } + if testCase.chunkSize <= 0 || testCase.chunkSize > 1024 { + testCase.chunkSize = 1024 + } cols := make([]Expression, len(testCase.childrenTypes)) - input = chunk.New(fts, 1024, 1024) + input = chunk.New(fts, testCase.chunkSize, testCase.chunkSize) + input.NumRows() for i, eType := range testCase.childrenTypes { fillColumn(eType, input, i, testCase) if i < len(testCase.constants) && testCase.constants[i] != nil { @@ -835,7 +883,7 @@ func genVecExprBenchCase(ctx sessionctx.Context, funcName string, testCase vecEx panic(err) } - output = chunk.New([]*types.FieldType{eType2FieldType(expr.GetType().EvalType())}, 1024, 1024) + output = chunk.New([]*types.FieldType{eType2FieldType(expr.GetType().EvalType())}, testCase.chunkSize, testCase.chunkSize) return expr, fts, input, output } @@ -956,7 +1004,10 @@ func genVecBuiltinFuncBenchCase(ctx sessionctx.Context, funcName string, testCas } } cols := make([]Expression, childrenNumber) - input = chunk.New(fts, 1024, 1024) + if testCase.chunkSize <= 0 || testCase.chunkSize > 1024 { + testCase.chunkSize = 1024 + } + input = chunk.New(fts, testCase.chunkSize, testCase.chunkSize) for i, eType := range testCase.childrenTypes { fillColumn(eType, input, i, testCase) if i < len(testCase.constants) && testCase.constants[i] != nil { @@ -966,7 +1017,7 @@ func genVecBuiltinFuncBenchCase(ctx sessionctx.Context, funcName string, testCas } } if len(cols) == 0 { - input.SetNumVirtualRows(1024) + input.SetNumVirtualRows(testCase.chunkSize) } var err error @@ -996,7 +1047,7 @@ func genVecBuiltinFuncBenchCase(ctx sessionctx.Context, funcName string, testCas if err != nil { panic(err) } - result = chunk.NewColumn(eType2FieldType(testCase.retEvalType), 1024) + result = chunk.NewColumn(eType2FieldType(testCase.retEvalType), testCase.chunkSize) // Mess up the output to make sure vecEvalXXX to call ResizeXXX/ReserveXXX itself. result.AppendNull() return baseFunc, fts, input, result @@ -1016,7 +1067,7 @@ func removeTestOptions(args []string) []string { // args contains '-test.timeout=' option for example // excluding it to be able to run all tests for _, arg := range args { - if strings.HasPrefix(arg, "builtin") { + if strings.HasPrefix(arg, "builtin") || IsFunctionSupported(arg) { argList = append(argList, arg) } } @@ -1068,7 +1119,7 @@ func testVectorizedBuiltinFunc(c *C, vecExprCases vecExprBenchCases) { tmp := strings.Split(baseFuncName, ".") baseFuncName = tmp[len(tmp)-1] - if !testAll && testFunc[baseFuncName] != true { + if !testAll && (testFunc[baseFuncName] != true && testFunc[funcName] != true) { continue } // do not forget to implement the vectorized method. @@ -1288,7 +1339,7 @@ func benchmarkVectorizedBuiltinFunc(b *testing.B, vecExprCases vecExprBenchCases tmp := strings.Split(baseFuncName, ".") baseFuncName = tmp[len(tmp)-1] - if !testAll && testFunc[baseFuncName] != true { + if !testAll && testFunc[baseFuncName] != true && testFunc[funcName] != true { continue } diff --git a/expression/builtin.go b/expression/builtin.go index 001f91e471597..3b1f6e9169bcb 100644 --- a/expression/builtin.go +++ b/expression/builtin.go @@ -24,6 +24,8 @@ package expression import ( + "sort" + "strings" "sync" "github.com/pingcap/errors" @@ -32,6 +34,7 @@ import ( "github.com/pingcap/parser/mysql" "github.com/pingcap/parser/opcode" "github.com/pingcap/tidb/sessionctx" + "github.com/pingcap/tidb/sessionctx/stmtctx" "github.com/pingcap/tidb/types" "github.com/pingcap/tidb/types/json" "github.com/pingcap/tidb/util/chunk" @@ -48,6 +51,9 @@ type baseBuiltinFunc struct { childrenVectorizedOnce *sync.Once childrenVectorized bool + + childrenReversedOnce *sync.Once + childrenReversed bool } func (b *baseBuiltinFunc) PbCode() tipb.ScalarFuncSig { @@ -74,6 +80,7 @@ func newBaseBuiltinFunc(ctx sessionctx.Context, args []Expression) baseBuiltinFu return baseBuiltinFunc{ bufAllocator: newLocalSliceBuffer(len(args)), childrenVectorizedOnce: new(sync.Once), + childrenReversedOnce: new(sync.Once), args: args, ctx: ctx, @@ -179,6 +186,7 @@ func newBaseBuiltinFuncWithTp(ctx sessionctx.Context, args []Expression, retType return baseBuiltinFunc{ bufAllocator: newLocalSliceBuffer(len(args)), childrenVectorizedOnce: new(sync.Once), + childrenReversedOnce: new(sync.Once), args: args, ctx: ctx, @@ -250,6 +258,27 @@ func (b *baseBuiltinFunc) vectorized() bool { return false } +func (b *baseBuiltinFunc) supportReverseEval() bool { + return false +} + +func (b *baseBuiltinFunc) isChildrenReversed() bool { + b.childrenReversedOnce.Do(func() { + b.childrenReversed = true + for _, arg := range b.args { + if !arg.SupportReverseEval() { + b.childrenReversed = false + break + } + } + }) + return b.childrenReversed +} + +func (b *baseBuiltinFunc) reverseEval(sc *stmtctx.StatementContext, res types.Datum, rType types.RoundingType) (types.Datum, error) { + return types.Datum{}, errors.Errorf("baseBuiltinFunc.reverseEvalInt() should never be called, please contact the TiDB team for help") +} + func (b *baseBuiltinFunc) isChildrenVectorized() bool { b.childrenVectorizedOnce.Do(func() { b.childrenVectorized = true @@ -305,6 +334,7 @@ func (b *baseBuiltinFunc) cloneFrom(from *baseBuiltinFunc) { b.pbCode = from.pbCode b.bufAllocator = newLocalSliceBuffer(len(b.args)) b.childrenVectorizedOnce = new(sync.Once) + b.childrenReversedOnce = new(sync.Once) } func (b *baseBuiltinFunc) Clone() builtinFunc { @@ -372,9 +402,22 @@ type vecBuiltinFunc interface { vecEvalJSON(input *chunk.Chunk, result *chunk.Column) error } +// reverseBuiltinFunc evaluates the exactly one column value in the function when given a result for expression. +// For example, the buitinFunc is builtinArithmeticPlusRealSig(2.3, builtinArithmeticMinusRealSig(Column, 3.4)) +// when given the result like 1.0, then the ReverseEval should evaluate the column value 1.0 - 2.3 + 3.4 = 2.1 +type reverseBuiltinFunc interface { + // supportReverseEval checks whether the builtinFunc support reverse evaluation. + supportReverseEval() bool + // isChildrenReversed checks whether the builtinFunc's children support reverse evaluation. + isChildrenReversed() bool + // reverseEval evaluates the only one column value with given function result. + reverseEval(sc *stmtctx.StatementContext, res types.Datum, rType types.RoundingType) (val types.Datum, err error) +} + // builtinFunc stands for a particular function signature. type builtinFunc interface { vecBuiltinFunc + reverseBuiltinFunc // evalInt evaluates int result of builtinFunc by given row. evalInt(row chunk.Row) (val int64, isNull bool, err error) @@ -734,3 +777,30 @@ func IsFunctionSupported(name string) bool { _, ok := funcs[name] return ok } + +// GetBuiltinList returns a list of builtin functions +func GetBuiltinList() []string { + res := make([]string, 0, len(funcs)) + notImplementedFunctions := []string{ast.RowFunc} + for funcName := range funcs { + skipFunc := false + // Skip not implemented functions + for _, notImplFunc := range notImplementedFunctions { + if funcName == notImplFunc { + skipFunc = true + } + } + // Skip literal functions + // (their names are not readable: 'tidb`.(dateliteral, for example) + // See: https://github.com/pingcap/parser/pull/591 + if strings.HasPrefix(funcName, "'tidb`.(") { + skipFunc = true + } + if skipFunc { + continue + } + res = append(res, funcName) + } + sort.Strings(res) + return res +} diff --git a/expression/builtin_cast.go b/expression/builtin_cast.go index ff556cfc09a3d..955c9f1a92364 100644 --- a/expression/builtin_cast.go +++ b/expression/builtin_cast.go @@ -1458,6 +1458,9 @@ func (b *builtinCastDurationAsDecimalSig) evalDecimal(row chunk.Row) (res *types if isNull || err != nil { return res, isNull, err } + if val.Fsp, err = types.CheckFsp(int(val.Fsp)); err != nil { + return res, false, err + } sc := b.ctx.GetSessionVars().StmtCtx res, err = types.ProduceDecWithSpecifiedTp(val.ToNumber(), b.tp, sc) return res, false, err diff --git a/expression/builtin_cast_vec.go b/expression/builtin_cast_vec.go index 16b452415abc4..8add713d6053a 100644 --- a/expression/builtin_cast_vec.go +++ b/expression/builtin_cast_vec.go @@ -881,11 +881,42 @@ func (b *builtinCastStringAsDurationSig) vecEvalDuration(input *chunk.Chunk, res } func (b *builtinCastDurationAsDecimalSig) vectorized() bool { - return false + return true } func (b *builtinCastDurationAsDecimalSig) vecEvalDecimal(input *chunk.Chunk, result *chunk.Column) error { - return errors.Errorf("not implemented") + n := input.NumRows() + buf, err := b.bufAllocator.get(types.ETDuration, n) + if err != nil { + return err + } + defer b.bufAllocator.put(buf) + if err := b.args[0].VecEvalDuration(b.ctx, input, buf); err != nil { + return err + } + result.ResizeDecimal(n, false) + result.MergeNulls(buf) + d64s := result.Decimals() + var duration types.Duration + ds := buf.GoDurations() + sc := b.ctx.GetSessionVars().StmtCtx + fsp := int8(b.args[0].GetType().Decimal) + if fsp, err = types.CheckFsp(int(fsp)); err != nil { + return err + } + for i := 0; i < n; i++ { + if result.IsNull(i) { + continue + } + duration.Duration = ds[i] + duration.Fsp = fsp + res, err := types.ProduceDecWithSpecifiedTp(duration.ToNumber(), b.tp, sc) + if err != nil { + return err + } + d64s[i] = *res + } + return nil } func (b *builtinCastIntAsDecimalSig) vectorized() bool { @@ -1046,11 +1077,34 @@ func (b *builtinCastJSONAsIntSig) vecEvalInt(input *chunk.Chunk, result *chunk.C } func (b *builtinCastRealAsDurationSig) vectorized() bool { - return false + return true } func (b *builtinCastRealAsDurationSig) vecEvalDuration(input *chunk.Chunk, result *chunk.Column) error { - return errors.Errorf("not implemented") + n := input.NumRows() + buf, err := b.bufAllocator.get(types.ETReal, n) + if err != nil { + return err + } + defer b.bufAllocator.put(buf) + if err := b.args[0].VecEvalReal(b.ctx, input, buf); err != nil { + return err + } + result.ResizeGoDuration(n, false) + result.MergeNulls(buf) + f64s := buf.Float64s() + ds := result.GoDurations() + for i := 0; i < n; i++ { + if result.IsNull(i) { + continue + } + dur, err := types.ParseDuration(b.ctx.GetSessionVars().StmtCtx, strconv.FormatFloat(f64s[i], 'f', -1, 64), int8(b.tp.Decimal)) + if err != nil { + return err + } + ds[i] = dur.Duration + } + return nil } func (b *builtinCastTimeAsDurationSig) vectorized() bool { @@ -1407,11 +1461,43 @@ func (b *builtinCastStringAsRealSig) vecEvalReal(input *chunk.Chunk, result *chu } func (b *builtinCastStringAsDecimalSig) vectorized() bool { - return false + return true } func (b *builtinCastStringAsDecimalSig) vecEvalDecimal(input *chunk.Chunk, result *chunk.Column) error { - return errors.Errorf("not implemented") + if IsBinaryLiteral(b.args[0]) { + return b.args[0].VecEvalDecimal(b.ctx, input, result) + } + n := input.NumRows() + buf, err := b.bufAllocator.get(types.ETString, n) + if err != nil { + return err + } + defer b.bufAllocator.put(buf) + if err = b.args[0].VecEvalString(b.ctx, input, buf); err != nil { + return err + } + result.ResizeDecimal(n, false) + result.MergeNulls(buf) + res := result.Decimals() + stmtCtx := b.ctx.GetSessionVars().StmtCtx + for i := 0; i < n; i++ { + if result.IsNull(i) { + continue + } + dec := new(types.MyDecimal) + if !(b.inUnion && mysql.HasUnsignedFlag(b.tp.Flag) && dec.IsNegative()) { + if err := stmtCtx.HandleTruncate(dec.FromString([]byte(buf.GetString(i)))); err != nil { + return err + } + dec, err := types.ProduceDecWithSpecifiedTp(dec, b.tp, stmtCtx) + if err != nil { + return err + } + res[i] = *dec + } + } + return nil } func (b *builtinCastStringAsTimeSig) vectorized() bool { @@ -1509,11 +1595,44 @@ func (b *builtinCastDecimalAsIntSig) vecEvalInt(input *chunk.Chunk, result *chun } func (b *builtinCastDecimalAsDurationSig) vectorized() bool { - return false + return true } func (b *builtinCastDecimalAsDurationSig) vecEvalDuration(input *chunk.Chunk, result *chunk.Column) error { - return errors.Errorf("not implemented") + n := input.NumRows() + buf, err := b.bufAllocator.get(types.ETDecimal, n) + if err != nil { + return err + } + defer b.bufAllocator.put(buf) + if err := b.args[0].VecEvalDecimal(b.ctx, input, buf); err != nil { + return err + } + + result.ResizeGoDuration(n, false) + result.MergeNulls(buf) + args := buf.Decimals() + ds := result.GoDurations() + for i := 0; i < n; i++ { + if result.IsNull(i) { + continue + } + dur, err := types.ParseDuration(b.ctx.GetSessionVars().StmtCtx, string(args[i].ToString()), int8(b.tp.Decimal)) + if err != nil { + if types.ErrTruncatedWrongVal.Equal(err) { + err = b.ctx.GetSessionVars().StmtCtx.HandleTruncate(err) + } + if err != nil { + return err + } + if dur == types.ZeroDuration { + result.SetNull(i, true) + continue + } + } + ds[i] = dur.Duration + } + return nil } func (b *builtinCastStringAsStringSig) vectorized() bool { diff --git a/expression/builtin_cast_vec_test.go b/expression/builtin_cast_vec_test.go index ac40dece6b20d..65a4c46650b32 100644 --- a/expression/builtin_cast_vec_test.go +++ b/expression/builtin_cast_vec_test.go @@ -28,7 +28,9 @@ import ( var vecBuiltinCastCases = map[string][]vecExprBenchCase{ ast.Cast: { {retEvalType: types.ETDecimal, childrenTypes: []types.EvalType{types.ETJson}, geners: []dataGenerator{&decimalJSONGener{}}}, + {retEvalType: types.ETDecimal, childrenTypes: []types.EvalType{types.ETString}, geners: []dataGenerator{&decimalStringGener{}}}, {retEvalType: types.ETDecimal, childrenTypes: []types.EvalType{types.ETReal}}, + {retEvalType: types.ETDecimal, childrenTypes: []types.EvalType{types.ETDuration}}, {retEvalType: types.ETInt, childrenTypes: []types.EvalType{types.ETInt}}, {retEvalType: types.ETInt, childrenTypes: []types.EvalType{types.ETReal}}, {retEvalType: types.ETInt, childrenTypes: []types.EvalType{types.ETDecimal}}, @@ -47,6 +49,8 @@ var vecBuiltinCastCases = map[string][]vecExprBenchCase{ }, {retEvalType: types.ETReal, childrenTypes: []types.EvalType{types.ETInt}}, {retEvalType: types.ETDuration, childrenTypes: []types.EvalType{types.ETInt}, geners: []dataGenerator{new(randDurInt)}}, + {retEvalType: types.ETDuration, childrenTypes: []types.EvalType{types.ETReal}, geners: []dataGenerator{new(randDurReal)}}, + {retEvalType: types.ETDuration, childrenTypes: []types.EvalType{types.ETDecimal}, geners: []dataGenerator{new(randDurDecimal)}}, {retEvalType: types.ETReal, childrenTypes: []types.EvalType{types.ETReal}}, {retEvalType: types.ETReal, childrenTypes: []types.EvalType{types.ETJson}}, {retEvalType: types.ETReal, childrenTypes: []types.EvalType{types.ETDecimal}}, @@ -83,8 +87,8 @@ var vecBuiltinCastCases = map[string][]vecExprBenchCase{ {retEvalType: types.ETDatetime, childrenTypes: []types.EvalType{types.ETString}, geners: []dataGenerator{ &dateTimeStrGener{}, - &timeStrGener{}, &dateStrGener{}, + &timeStrGener{}, }}, {retEvalType: types.ETDatetime, childrenTypes: []types.EvalType{types.ETDuration}}, {retEvalType: types.ETDatetime, childrenTypes: []types.EvalType{types.ETDatetime}}, diff --git a/expression/builtin_compare.go b/expression/builtin_compare.go index b91ab400334f7..d0a430730959e 100644 --- a/expression/builtin_compare.go +++ b/expression/builtin_compare.go @@ -664,7 +664,7 @@ func (b *builtinLeastIntSig) Clone() builtinFunc { } // evalInt evals a builtinLeastIntSig. -// See http://dev.mysql.com/doc/refman/5.7/en/comparison-operators.html#functionleast +// See http://dev.mysql.com/doc/refman/5.7/en/comparison-operators.html#function_least func (b *builtinLeastIntSig) evalInt(row chunk.Row) (min int64, isNull bool, err error) { min, isNull, err = b.args[0].EvalInt(b.ctx, row) if isNull || err != nil { diff --git a/expression/builtin_compare_vec.go b/expression/builtin_compare_vec.go index 838837fae399d..8dd927c1501eb 100644 --- a/expression/builtin_compare_vec.go +++ b/expression/builtin_compare_vec.go @@ -14,7 +14,6 @@ package expression import ( - "github.com/pingcap/errors" "github.com/pingcap/parser/mysql" "github.com/pingcap/tidb/types" "github.com/pingcap/tidb/util/chunk" @@ -707,11 +706,69 @@ func (b *builtinGreatestRealSig) vecEvalReal(input *chunk.Chunk, result *chunk.C } func (b *builtinLeastTimeSig) vectorized() bool { - return false + return true } func (b *builtinLeastTimeSig) vecEvalString(input *chunk.Chunk, result *chunk.Column) error { - return errors.Errorf("not implemented") + n := input.NumRows() + dst, err := b.bufAllocator.get(types.ETTimestamp, n) + if err != nil { + return err + } + defer b.bufAllocator.put(dst) + + sc := b.ctx.GetSessionVars().StmtCtx + dst.ResizeTime(n, false) + dstTimes := dst.Times() + for i := 0; i < n; i++ { + dstTimes[i] = types.Time{ + Time: types.MaxDatetime, + Type: mysql.TypeDatetime, + Fsp: types.DefaultFsp, + } + } + var argTime types.Time + + var findInvalidTime []bool = make([]bool, n) + var invalidValue []string = make([]string, n) + + for j := 0; j < len(b.args); j++ { + if err := b.args[j].VecEvalString(b.ctx, input, result); err != nil { + return err + } + dst.MergeNulls(result) + for i := 0; i < n; i++ { + if dst.IsNull(i) { + continue + } + argTime, err = types.ParseDatetime(sc, result.GetString(i)) + if err != nil { + if err = handleInvalidTimeError(b.ctx, err); err != nil { + return err + } else if !findInvalidTime[i] { + invalidValue[i] = result.GetString(i) + findInvalidTime[i] = true + } + continue + } + if argTime.Compare(dstTimes[i]) < 0 { + dstTimes[i] = argTime + } + } + } + result.ReserveString(n) + for i := 0; i < n; i++ { + if findInvalidTime[i] { + result.AppendString(invalidValue[i]) + continue + } + if dst.IsNull(i) { + result.AppendNull() + } else { + result.AppendString(dstTimes[i].String()) + } + } + return nil } func (b *builtinGreatestStringSig) vectorized() bool { diff --git a/expression/builtin_compare_vec_test.go b/expression/builtin_compare_vec_test.go index 62fa726b65910..a1ce86c0dd1e7 100644 --- a/expression/builtin_compare_vec_test.go +++ b/expression/builtin_compare_vec_test.go @@ -134,6 +134,7 @@ var vecBuiltinCompareCases = map[string][]vecExprBenchCase{ {retEvalType: types.ETInt, childrenTypes: []types.EvalType{types.ETInt, types.ETInt, types.ETInt}}, {retEvalType: types.ETReal, childrenTypes: []types.EvalType{types.ETReal, types.ETReal, types.ETReal}}, {retEvalType: types.ETString, childrenTypes: []types.EvalType{types.ETString, types.ETString}}, + {retEvalType: types.ETString, childrenTypes: []types.EvalType{types.ETDatetime, types.ETDatetime, types.ETDatetime}}, {retEvalType: types.ETString, childrenTypes: []types.EvalType{types.ETString, types.ETString, types.ETString}}, }, ast.Interval: { diff --git a/expression/builtin_info_vec.go b/expression/builtin_info_vec.go index 42638fcf76c9f..7b26f448ba023 100644 --- a/expression/builtin_info_vec.go +++ b/expression/builtin_info_vec.go @@ -191,11 +191,22 @@ func (b *builtinTiDBIsDDLOwnerSig) vecEvalInt(input *chunk.Chunk, result *chunk. } func (b *builtinFoundRowsSig) vectorized() bool { - return false + return true } func (b *builtinFoundRowsSig) vecEvalInt(input *chunk.Chunk, result *chunk.Column) error { - return errors.Errorf("not implemented") + data := b.ctx.GetSessionVars() + if data == nil { + return errors.Errorf("Missing session variable when eval builtin") + } + lastFoundRows := int64(data.LastFoundRows) + n := input.NumRows() + result.ResizeInt64(n, false) + i64s := result.Int64s() + for i := range i64s { + i64s[i] = lastFoundRows + } + return nil } func (b *builtinBenchmarkSig) vectorized() bool { diff --git a/expression/builtin_info_vec_test.go b/expression/builtin_info_vec_test.go index 017dc75fac36b..bd3d43a8d365c 100644 --- a/expression/builtin_info_vec_test.go +++ b/expression/builtin_info_vec_test.go @@ -53,7 +53,9 @@ var vecBuiltinInfoCases = map[string][]vecExprBenchCase{ ast.CurrentUser: { {retEvalType: types.ETString, childrenTypes: []types.EvalType{}}, }, - ast.FoundRows: {}, + ast.FoundRows: { + {retEvalType: types.ETInt}, + }, ast.Database: { {retEvalType: types.ETString, childrenTypes: []types.EvalType{}}, }, diff --git a/expression/builtin_json.go b/expression/builtin_json.go index aefbe93d7421e..0d7844ec0264f 100644 --- a/expression/builtin_json.go +++ b/expression/builtin_json.go @@ -15,6 +15,7 @@ package expression import ( json2 "encoding/json" + "strconv" "strings" "github.com/pingcap/errors" @@ -182,11 +183,21 @@ func (b *builtinJSONUnquoteSig) Clone() builtinFunc { return newSig } +func (c *jsonUnquoteFunctionClass) verifyArgs(args []Expression) error { + if err := c.baseFunctionClass.verifyArgs(args); err != nil { + return err + } + if evalType := args[0].GetType().EvalType(); evalType != types.ETString && evalType != types.ETJson { + return ErrIncorrectType.GenWithStackByArgs("1", "json_unquote") + } + return nil +} + func (c *jsonUnquoteFunctionClass) getFunction(ctx sessionctx.Context, args []Expression) (builtinFunc, error) { if err := c.verifyArgs(args); err != nil { return nil, err } - bf := newBaseBuiltinFuncWithTp(ctx, args, types.ETString, types.ETJson) + bf := newBaseBuiltinFuncWithTp(ctx, args, types.ETString, types.ETString) bf.tp.Flen = mysql.MaxFieldVarCharLength DisableParseJSONFlag4Expr(args[0]) sig := &builtinJSONUnquoteSig{bf} @@ -194,14 +205,16 @@ func (c *jsonUnquoteFunctionClass) getFunction(ctx sessionctx.Context, args []Ex return sig, nil } -func (b *builtinJSONUnquoteSig) evalString(row chunk.Row) (res string, isNull bool, err error) { - var j json.BinaryJSON - j, isNull, err = b.args[0].EvalJSON(b.ctx, row) +func (b *builtinJSONUnquoteSig) evalString(row chunk.Row) (string, bool, error) { + str, isNull, err := b.args[0].EvalString(b.ctx, row) if isNull || err != nil { return "", isNull, err } - res, err = j.Unquote() - return res, err != nil, err + str, err = json.UnquoteString(str) + if err != nil { + return "", false, err + } + return str, false, nil } type jsonSetFunctionClass struct { @@ -1022,24 +1035,33 @@ func (b *builtinJSONQuoteSig) Clone() builtinFunc { return newSig } +func (c *jsonQuoteFunctionClass) verifyArgs(args []Expression) error { + if err := c.baseFunctionClass.verifyArgs(args); err != nil { + return err + } + if evalType := args[0].GetType().EvalType(); evalType != types.ETString { + return ErrIncorrectType.GenWithStackByArgs("1", "json_quote") + } + return nil +} + func (c *jsonQuoteFunctionClass) getFunction(ctx sessionctx.Context, args []Expression) (builtinFunc, error) { if err := c.verifyArgs(args); err != nil { return nil, err } - bf := newBaseBuiltinFuncWithTp(ctx, args, types.ETString, types.ETJson) + bf := newBaseBuiltinFuncWithTp(ctx, args, types.ETString, types.ETString) DisableParseJSONFlag4Expr(args[0]) sig := &builtinJSONQuoteSig{bf} sig.setPbCode(tipb.ScalarFuncSig_JsonQuoteSig) return sig, nil } -func (b *builtinJSONQuoteSig) evalString(row chunk.Row) (res string, isNull bool, err error) { - var j json.BinaryJSON - j, isNull, err = b.args[0].EvalJSON(b.ctx, row) +func (b *builtinJSONQuoteSig) evalString(row chunk.Row) (string, bool, error) { + str, isNull, err := b.args[0].EvalString(b.ctx, row) if isNull || err != nil { return "", isNull, err } - return j.Quote(), false, nil + return strconv.Quote(str), false, nil } type jsonSearchFunctionClass struct { diff --git a/expression/builtin_json_vec.go b/expression/builtin_json_vec.go index 903a15e82cf6e..53db3b56ab779 100644 --- a/expression/builtin_json_vec.go +++ b/expression/builtin_json_vec.go @@ -14,6 +14,8 @@ package expression import ( + "strconv" + "github.com/pingcap/errors" "github.com/pingcap/parser/ast" "github.com/pingcap/tidb/sessionctx" @@ -299,12 +301,12 @@ func (b *builtinJSONQuoteSig) vectorized() bool { func (b *builtinJSONQuoteSig) vecEvalString(input *chunk.Chunk, result *chunk.Column) error { n := input.NumRows() - buf, err := b.bufAllocator.get(types.ETJson, n) + buf, err := b.bufAllocator.get(types.ETString, n) if err != nil { return err } defer b.bufAllocator.put(buf) - if err := b.args[0].VecEvalJSON(b.ctx, input, buf); err != nil { + if err := b.args[0].VecEvalString(b.ctx, input, buf); err != nil { return err } @@ -314,7 +316,7 @@ func (b *builtinJSONQuoteSig) vecEvalString(input *chunk.Chunk, result *chunk.Co result.AppendNull() continue } - result.AppendString(buf.GetJSON(i).Quote()) + result.AppendString(strconv.Quote(buf.GetString(i))) } return nil } @@ -811,12 +813,12 @@ func (b *builtinJSONUnquoteSig) vectorized() bool { func (b *builtinJSONUnquoteSig) vecEvalString(input *chunk.Chunk, result *chunk.Column) error { n := input.NumRows() - buf, err := b.bufAllocator.get(types.ETJson, n) + buf, err := b.bufAllocator.get(types.ETString, n) if err != nil { return err } defer b.bufAllocator.put(buf) - if err := b.args[0].VecEvalJSON(b.ctx, input, buf); err != nil { + if err := b.args[0].VecEvalString(b.ctx, input, buf); err != nil { return err } @@ -826,11 +828,11 @@ func (b *builtinJSONUnquoteSig) vecEvalString(input *chunk.Chunk, result *chunk. result.AppendNull() continue } - res, err := buf.GetJSON(i).Unquote() + str, err := json.UnquoteString(buf.GetString(i)) if err != nil { return err } - result.AppendString(res) + result.AppendString(str) } return nil } diff --git a/expression/builtin_json_vec_test.go b/expression/builtin_json_vec_test.go index 9640330abaa2f..5b0ffb4fb5fc1 100644 --- a/expression/builtin_json_vec_test.go +++ b/expression/builtin_json_vec_test.go @@ -102,7 +102,7 @@ var vecBuiltinJSONCases = map[string][]vecExprBenchCase{ {retEvalType: types.ETJson, childrenTypes: []types.EvalType{types.ETJson, types.ETString, types.ETJson, types.ETString, types.ETJson}, geners: []dataGenerator{nil, &constStrGener{"$.aaa"}, nil, &constStrGener{"$.bbb"}, nil}}, }, ast.JSONQuote: { - {retEvalType: types.ETString, childrenTypes: []types.EvalType{types.ETJson}}, + {retEvalType: types.ETString, childrenTypes: []types.EvalType{types.ETString}}, }, } diff --git a/expression/builtin_math.go b/expression/builtin_math.go index 66ece06697950..7f60dfe591153 100644 --- a/expression/builtin_math.go +++ b/expression/builtin_math.go @@ -1770,7 +1770,7 @@ func (c *truncateFunctionClass) getFunction(ctx sessionctx.Context, args []Expre } argTp := args[0].GetType().EvalType() - if argTp == types.ETTimestamp || argTp == types.ETDatetime || argTp == types.ETDuration || argTp == types.ETString { + if argTp.IsStringKind() { argTp = types.ETReal } @@ -1796,6 +1796,8 @@ func (c *truncateFunctionClass) getFunction(ctx sessionctx.Context, args []Expre case types.ETDecimal: sig = &builtinTruncateDecimalSig{bf} sig.setPbCode(tipb.ScalarFuncSig_TruncateDecimal) + default: + return nil, errIncorrectArgs.GenWithStackByArgs("truncate") } return sig, nil diff --git a/expression/builtin_miscellaneous_vec.go b/expression/builtin_miscellaneous_vec.go index 266033fab53a6..3bcf50269495d 100644 --- a/expression/builtin_miscellaneous_vec.go +++ b/expression/builtin_miscellaneous_vec.go @@ -195,11 +195,20 @@ func (b *builtinNameConstDurationSig) vecEvalDuration(input *chunk.Chunk, result } func (b *builtinLockSig) vectorized() bool { - return false + return true } +// See https://dev.mysql.com/doc/refman/5.7/en/miscellaneous-functions.html#function_get-lock +// The lock function will do nothing. +// Warning: get_lock() function is parsed but ignored. func (b *builtinLockSig) vecEvalInt(input *chunk.Chunk, result *chunk.Column) error { - return errors.Errorf("not implemented") + n := input.NumRows() + result.ResizeInt64(n, false) + i64s := result.Int64s() + for i := range i64s { + i64s[i] = 1 + } + return nil } func (b *builtinDurationAnyValueSig) vectorized() bool { @@ -527,9 +536,18 @@ func (b *builtinNameConstRealSig) vecEvalReal(input *chunk.Chunk, result *chunk. } func (b *builtinReleaseLockSig) vectorized() bool { - return false + return true } +// See https://dev.mysql.com/doc/refman/5.7/en/miscellaneous-functions.html#function_release-lock +// The release lock function will do nothing. +// Warning: release_lock() function is parsed but ignored. func (b *builtinReleaseLockSig) vecEvalInt(input *chunk.Chunk, result *chunk.Column) error { - return errors.Errorf("not implemented") + n := input.NumRows() + result.ResizeInt64(n, false) + i64s := result.Int64s() + for i := range i64s { + i64s[i] = 1 + } + return nil } diff --git a/expression/builtin_string_vec.go b/expression/builtin_string_vec.go index 3f48e01fee73f..2263b8495054a 100644 --- a/expression/builtin_string_vec.go +++ b/expression/builtin_string_vec.go @@ -1175,11 +1175,62 @@ func (b *builtinLocateBinary2ArgsSig) vecEvalInt(input *chunk.Chunk, result *chu } func (b *builtinLocateBinary3ArgsSig) vectorized() bool { - return false + return true } +// vecEvalInt evals LOCATE(substr,str,pos), case-sensitive. +// See https://dev.mysql.com/doc/refman/5.7/en/string-functions.html#function_locate func (b *builtinLocateBinary3ArgsSig) vecEvalInt(input *chunk.Chunk, result *chunk.Column) error { - return errors.Errorf("not implemented") + n := input.NumRows() + buf0, err := b.bufAllocator.get(types.ETString, n) + if err != nil { + return err + } + defer b.bufAllocator.put(buf0) + if err := b.args[0].VecEvalString(b.ctx, input, buf0); err != nil { + return err + } + buf1, err := b.bufAllocator.get(types.ETString, n) + if err != nil { + return err + } + defer b.bufAllocator.put(buf1) + if err := b.args[1].VecEvalString(b.ctx, input, buf1); err != nil { + return err + } + // store positions in result + if err := b.args[2].VecEvalInt(b.ctx, input, result); err != nil { + return err + } + + result.MergeNulls(buf0, buf1) + i64s := result.Int64s() + for i := 0; i < n; i++ { + if result.IsNull(i) { + continue + } + pos := i64s[i] + // Transfer the argument which starts from 1 to real index which starts from 0. + pos-- + subStr := buf0.GetString(i) + str := buf1.GetString(i) + subStrLen := len(subStr) + if pos < 0 || pos > int64(len(str)-subStrLen) { + i64s[i] = 0 + continue + } else if subStrLen == 0 { + i64s[i] = pos + 1 + continue + } + slice := str[pos:] + idx := strings.Index(slice, subStr) + if idx != -1 { + i64s[i] = pos + int64(idx) + 1 + continue + } + i64s[i] = 0 + } + return nil } func (b *builtinExportSet4ArgSig) vectorized() bool { diff --git a/expression/builtin_string_vec_test.go b/expression/builtin_string_vec_test.go index 75a1faaece68e..192328e8aa7f4 100644 --- a/expression/builtin_string_vec_test.go +++ b/expression/builtin_string_vec_test.go @@ -191,6 +191,18 @@ var vecBuiltinStringCases = map[string][]vecExprBenchCase{ childrenFieldTypes: []*types.FieldType{{Tp: mysql.TypeString, Flag: mysql.BinaryFlag, Collate: charset.CollationBin}, {Tp: mysql.TypeString, Flag: mysql.BinaryFlag, Collate: charset.CollationBin}}, geners: []dataGenerator{&selectStringGener{[]string{"01", "10", "001", "110", "0001", "1110"}}, &selectStringGener{[]string{"010010001000010", "101101110111101"}}}, }, + { + retEvalType: types.ETInt, + childrenTypes: []types.EvalType{types.ETString, types.ETString, types.ETInt}, + childrenFieldTypes: []*types.FieldType{{Tp: mysql.TypeString, Flag: mysql.BinaryFlag, Collate: charset.CollationBin}, {Tp: mysql.TypeString, Flag: mysql.BinaryFlag, Collate: charset.CollationBin}, {Tp: mysql.TypeInt24}}, + geners: []dataGenerator{&randLenStrGener{0, 10}, &randLenStrGener{0, 20}, &rangeInt64Gener{-10, 20}}, + }, + { + retEvalType: types.ETInt, + childrenTypes: []types.EvalType{types.ETString, types.ETString, types.ETInt}, + childrenFieldTypes: []*types.FieldType{{Tp: mysql.TypeString, Flag: mysql.BinaryFlag, Collate: charset.CollationBin}, {Tp: mysql.TypeString, Flag: mysql.BinaryFlag, Collate: charset.CollationBin}, {Tp: mysql.TypeInt24}}, + geners: []dataGenerator{&selectStringGener{[]string{"01", "10", "001", "110", "0001", "1110"}}, &selectStringGener{[]string{"010010001000010", "101101110111101"}}, &rangeInt64Gener{-10, 20}}, + }, }, ast.Hex: { {retEvalType: types.ETString, childrenTypes: []types.EvalType{types.ETString}, geners: []dataGenerator{&randHexStrGener{10, 100}}}, diff --git a/expression/builtin_time.go b/expression/builtin_time.go index 41b339dca9189..56aa5a6b77780 100644 --- a/expression/builtin_time.go +++ b/expression/builtin_time.go @@ -2611,7 +2611,7 @@ type baseDateArithmitical struct { func newDateArighmeticalUtil() baseDateArithmitical { return baseDateArithmitical{ - intervalRegexp: regexp.MustCompile(`[\d]+`), + intervalRegexp: regexp.MustCompile(`-?[\d]+`), } } @@ -2724,15 +2724,11 @@ func (du *baseDateArithmitical) getIntervalFromDecimal(ctx sessionctx.Context, a interval = "-" + interval } case "SECOND": - // Decimal's EvalString is like %f format. - interval, isNull, err = args[1].EvalString(ctx, row) - if isNull || err != nil { - return "", true, err - } + // interval is already like the %f format. default: // YEAR, QUARTER, MONTH, WEEK, DAY, HOUR, MINUTE, MICROSECOND - args[1] = WrapWithCastAsInt(ctx, args[1]) - interval, isNull, err = args[1].EvalString(ctx, row) + castExpr := WrapWithCastAsString(ctx, WrapWithCastAsInt(ctx, args[1])) + interval, isNull, err = castExpr.EvalString(ctx, row) if isNull || err != nil { return "", true, err } @@ -2853,6 +2849,298 @@ func (du *baseDateArithmitical) sub(ctx sessionctx.Context, date types.Time, int return date, false, nil } +func (du *baseDateArithmitical) vecGetDateFromInt(b *baseBuiltinFunc, input *chunk.Chunk, unit string, result *chunk.Column) error { + n := input.NumRows() + buf, err := b.bufAllocator.get(types.ETInt, n) + if err != nil { + return err + } + defer b.bufAllocator.put(buf) + if err := b.args[0].VecEvalInt(b.ctx, input, buf); err != nil { + return err + } + + result.ResizeTime(n, false) + result.MergeNulls(buf) + dates := result.Times() + i64s := buf.Int64s() + sc := b.ctx.GetSessionVars().StmtCtx + isClockUnit := types.IsClockUnit(unit) + for i := 0; i < n; i++ { + if result.IsNull(i) { + continue + } + + date, err := types.ParseTimeFromInt64(sc, i64s[i]) + if err != nil { + err = handleInvalidTimeError(b.ctx, err) + if err != nil { + return err + } + result.SetNull(i, true) + continue + } + + dateTp := mysql.TypeDate + if date.Type == mysql.TypeDatetime || date.Type == mysql.TypeTimestamp || isClockUnit { + dateTp = mysql.TypeDatetime + } + date.Type = dateTp + dates[i] = date + } + return nil +} + +func (du *baseDateArithmitical) vecGetDateFromString(b *baseBuiltinFunc, input *chunk.Chunk, unit string, result *chunk.Column) error { + n := input.NumRows() + buf, err := b.bufAllocator.get(types.ETString, n) + if err != nil { + return err + } + defer b.bufAllocator.put(buf) + if err := b.args[0].VecEvalString(b.ctx, input, buf); err != nil { + return err + } + + result.ResizeTime(n, false) + result.MergeNulls(buf) + dates := result.Times() + sc := b.ctx.GetSessionVars().StmtCtx + isClockUnit := types.IsClockUnit(unit) + for i := 0; i < n; i++ { + if result.IsNull(i) { + continue + } + + dateStr := buf.GetString(i) + dateTp := mysql.TypeDate + if !types.IsDateFormat(dateStr) || isClockUnit { + dateTp = mysql.TypeDatetime + } + + date, err := types.ParseTime(sc, dateStr, dateTp, types.MaxFsp) + if err != nil { + err = handleInvalidTimeError(b.ctx, err) + if err != nil { + return err + } + result.SetNull(i, true) + } else { + dates[i] = date + } + + } + return nil +} + +func (du *baseDateArithmitical) vecGetDateFromDatetime(b *baseBuiltinFunc, input *chunk.Chunk, unit string, result *chunk.Column) error { + n := input.NumRows() + result.ResizeTime(n, false) + if err := b.args[0].VecEvalTime(b.ctx, input, result); err != nil { + return err + } + + dates := result.Times() + isClockUnit := types.IsClockUnit(unit) + for i := 0; i < n; i++ { + if result.IsNull(i) { + continue + } + + dateTp := mysql.TypeDate + if dates[i].Type == mysql.TypeDatetime || dates[i].Type == mysql.TypeTimestamp || isClockUnit { + dateTp = mysql.TypeDatetime + } + dates[i].Type = dateTp + } + return nil +} + +func (du *baseDateArithmitical) vecGetIntervalFromString(b *baseBuiltinFunc, input *chunk.Chunk, unit string, result *chunk.Column) error { + n := input.NumRows() + buf, err := b.bufAllocator.get(types.ETString, n) + if err != nil { + return err + } + defer b.bufAllocator.put(buf) + if err := b.args[1].VecEvalString(b.ctx, input, buf); err != nil { + return err + } + + amendInterval := func(val string) string { + return val + } + if unitLower := strings.ToLower(unit); unitLower == "day" || unitLower == "hour" { + amendInterval = func(val string) string { + if intervalLower := strings.ToLower(val); intervalLower == "true" { + return "1" + } else if intervalLower == "false" { + return "0" + } + return du.intervalRegexp.FindString(val) + } + } + + result.ReserveString(n) + for i := 0; i < n; i++ { + if buf.IsNull(i) { + result.AppendNull() + continue + } + + result.AppendString(amendInterval(buf.GetString(i))) + } + return nil +} + +func (du *baseDateArithmitical) vecGetIntervalFromDecimal(b *baseBuiltinFunc, input *chunk.Chunk, unit string, result *chunk.Column) error { + n := input.NumRows() + buf, err := b.bufAllocator.get(types.ETDecimal, n) + if err != nil { + return err + } + defer b.bufAllocator.put(buf) + if err := b.args[1].VecEvalDecimal(b.ctx, input, buf); err != nil { + return err + } + + isCompoundUnit := false + amendInterval := func(val string, row *chunk.Row) (string, bool, error) { + return val, false, nil + } + switch unitUpper := strings.ToUpper(unit); unitUpper { + case "HOUR_MINUTE", "MINUTE_SECOND", "YEAR_MONTH", "DAY_HOUR", "DAY_MINUTE", + "DAY_SECOND", "DAY_MICROSECOND", "HOUR_MICROSECOND", "HOUR_SECOND", "MINUTE_MICROSECOND", "SECOND_MICROSECOND": + isCompoundUnit = true + switch strings.ToUpper(unit) { + case "HOUR_MINUTE", "MINUTE_SECOND": + amendInterval = func(val string, _ *chunk.Row) (string, bool, error) { + return strings.Replace(val, ".", ":", -1), false, nil + } + case "YEAR_MONTH": + amendInterval = func(val string, _ *chunk.Row) (string, bool, error) { + return strings.Replace(val, ".", "-", -1), false, nil + } + case "DAY_HOUR": + amendInterval = func(val string, _ *chunk.Row) (string, bool, error) { + return strings.Replace(val, ".", " ", -1), false, nil + } + case "DAY_MINUTE": + amendInterval = func(val string, _ *chunk.Row) (string, bool, error) { + return "0 " + strings.Replace(val, ".", ":", -1), false, nil + } + case "DAY_SECOND": + amendInterval = func(val string, _ *chunk.Row) (string, bool, error) { + return "0 00:" + strings.Replace(val, ".", ":", -1), false, nil + } + case "DAY_MICROSECOND": + amendInterval = func(val string, _ *chunk.Row) (string, bool, error) { + return "0 00:00:" + val, false, nil + } + case "HOUR_MICROSECOND": + amendInterval = func(val string, _ *chunk.Row) (string, bool, error) { + return "00:00:" + val, false, nil + } + case "HOUR_SECOND": + amendInterval = func(val string, _ *chunk.Row) (string, bool, error) { + return "00:" + strings.Replace(val, ".", ":", -1), false, nil + } + case "MINUTE_MICROSECOND": + amendInterval = func(val string, _ *chunk.Row) (string, bool, error) { + return "00:" + val, false, nil + } + case "SECOND_MICROSECOND": + /* keep interval as original decimal */ + } + case "SECOND": + /* keep interval as original decimal */ + default: + // YEAR, QUARTER, MONTH, WEEK, DAY, HOUR, MINUTE, MICROSECOND + castExpr := WrapWithCastAsString(b.ctx, WrapWithCastAsInt(b.ctx, b.args[1])) + amendInterval = func(_ string, row *chunk.Row) (string, bool, error) { + interval, isNull, err := castExpr.EvalString(b.ctx, *row) + return interval, isNull || err != nil, err + } + } + + result.ReserveString(n) + decs := buf.Decimals() + for i := 0; i < n; i++ { + if buf.IsNull(i) { + result.AppendNull() + continue + } + + interval := decs[i].String() + row := input.GetRow(i) + isNeg := false + if isCompoundUnit && interval != "" && interval[0] == '-' { + isNeg = true + interval = interval[1:] + } + interval, isNull, err := amendInterval(interval, &row) + if err != nil { + return err + } + if isNull { + result.AppendNull() + continue + } + if isCompoundUnit && isNeg { + interval = "-" + interval + } + result.AppendString(interval) + } + return nil +} + +func (du *baseDateArithmitical) vecGetIntervalFromInt(b *baseBuiltinFunc, input *chunk.Chunk, unit string, result *chunk.Column) error { + n := input.NumRows() + buf, err := b.bufAllocator.get(types.ETInt, n) + if err != nil { + return err + } + defer b.bufAllocator.put(buf) + if err := b.args[1].VecEvalInt(b.ctx, input, buf); err != nil { + return err + } + + result.ReserveString(n) + i64s := buf.Int64s() + for i := 0; i < n; i++ { + if buf.IsNull(i) { + result.AppendNull() + } else { + result.AppendString(strconv.FormatInt(i64s[i], 10)) + } + } + return nil +} + +func (du *baseDateArithmitical) vecGetIntervalFromReal(b *baseBuiltinFunc, input *chunk.Chunk, unit string, result *chunk.Column) error { + n := input.NumRows() + buf, err := b.bufAllocator.get(types.ETReal, n) + if err != nil { + return err + } + defer b.bufAllocator.put(buf) + if err := b.args[1].VecEvalReal(b.ctx, input, buf); err != nil { + return err + } + + result.ReserveString(n) + f64s := buf.Float64s() + prec := b.args[1].GetType().Decimal + for i := 0; i < n; i++ { + if buf.IsNull(i) { + result.AppendNull() + } else { + result.AppendString(strconv.FormatFloat(f64s[i], 'f', prec, 64)) + } + } + return nil +} + type addDateFunctionClass struct { baseFunctionClass } diff --git a/expression/builtin_time_vec.go b/expression/builtin_time_vec.go index e0e3d2cae6e2f..1e41a7c99e2d5 100644 --- a/expression/builtin_time_vec.go +++ b/expression/builtin_time_vec.go @@ -17,6 +17,7 @@ import ( "fmt" "math" "strconv" + "strings" "time" "github.com/pingcap/errors" @@ -243,22 +244,6 @@ func (b *builtinExtractDatetimeSig) vecEvalInt(input *chunk.Chunk, result *chunk return nil } -func (b *builtinAddDateIntIntSig) vectorized() bool { - return false -} - -func (b *builtinAddDateIntIntSig) vecEvalTime(input *chunk.Chunk, result *chunk.Column) error { - return errors.Errorf("not implemented") -} - -func (b *builtinAddDateDatetimeDecimalSig) vectorized() bool { - return false -} - -func (b *builtinAddDateDatetimeDecimalSig) vecEvalTime(input *chunk.Chunk, result *chunk.Column) error { - return errors.Errorf("not implemented") -} - func (b *builtinDayNameSig) vectorized() bool { return true } @@ -462,14 +447,6 @@ func (b *builtinUTCTimeWithArgSig) vecEvalDuration(input *chunk.Chunk, result *c return nil } -func (b *builtinSubDateIntIntSig) vectorized() bool { - return false -} - -func (b *builtinSubDateIntIntSig) vecEvalTime(input *chunk.Chunk, result *chunk.Column) error { - return errors.Errorf("not implemented") -} - func (b *builtinUnixTimestampCurrentSig) vectorized() bool { return true } @@ -494,14 +471,6 @@ func (b *builtinUnixTimestampCurrentSig) vecEvalInt(input *chunk.Chunk, result * return nil } -func (b *builtinSubDateIntRealSig) vectorized() bool { - return false -} - -func (b *builtinSubDateIntRealSig) vecEvalTime(input *chunk.Chunk, result *chunk.Column) error { - return errors.Errorf("not implemented") -} - func (b *builtinYearWeekWithoutModeSig) vectorized() bool { return true } @@ -544,22 +513,6 @@ func (b *builtinYearWeekWithoutModeSig) vecEvalInt(input *chunk.Chunk, result *c return nil } -func (b *builtinAddDateStringRealSig) vectorized() bool { - return false -} - -func (b *builtinAddDateStringRealSig) vecEvalTime(input *chunk.Chunk, result *chunk.Column) error { - return errors.Errorf("not implemented") -} - -func (b *builtinSubDateStringDecimalSig) vectorized() bool { - return false -} - -func (b *builtinSubDateStringDecimalSig) vecEvalTime(input *chunk.Chunk, result *chunk.Column) error { - return errors.Errorf("not implemented") -} - func (b *builtinPeriodDiffSig) vectorized() bool { return true } @@ -641,30 +594,6 @@ func (b *builtinNowWithArgSig) vecEvalTime(input *chunk.Chunk, result *chunk.Col return nil } -func (b *builtinSubDateStringRealSig) vectorized() bool { - return false -} - -func (b *builtinSubDateStringRealSig) vecEvalTime(input *chunk.Chunk, result *chunk.Column) error { - return errors.Errorf("not implemented") -} - -func (b *builtinSubDateDatetimeIntSig) vectorized() bool { - return false -} - -func (b *builtinSubDateDatetimeIntSig) vecEvalTime(input *chunk.Chunk, result *chunk.Column) error { - return errors.Errorf("not implemented") -} - -func (b *builtinSubDateDurationDecimalSig) vectorized() bool { - return false -} - -func (b *builtinSubDateDurationDecimalSig) vecEvalDuration(input *chunk.Chunk, result *chunk.Column) error { - return errors.Errorf("not implemented") -} - func (b *builtinGetFormatSig) vectorized() bool { return true } @@ -783,22 +712,6 @@ func (b *builtinLastDaySig) vecEvalTime(input *chunk.Chunk, result *chunk.Column return nil } -func (b *builtinAddDateStringDecimalSig) vectorized() bool { - return false -} - -func (b *builtinAddDateStringDecimalSig) vecEvalTime(input *chunk.Chunk, result *chunk.Column) error { - return errors.Errorf("not implemented") -} - -func (b *builtinAddDateDatetimeRealSig) vectorized() bool { - return false -} - -func (b *builtinAddDateDatetimeRealSig) vecEvalTime(input *chunk.Chunk, result *chunk.Column) error { - return errors.Errorf("not implemented") -} - func (b *builtinSubTimeDurationNullSig) vectorized() bool { return false } @@ -861,14 +774,6 @@ func (b *builtinStrToDateDateSig) vecEvalTime(input *chunk.Chunk, result *chunk. return nil } -func (b *builtinAddDateStringIntSig) vectorized() bool { - return false -} - -func (b *builtinAddDateStringIntSig) vecEvalTime(input *chunk.Chunk, result *chunk.Column) error { - return errors.Errorf("not implemented") -} - func (b *builtinSysDateWithFspSig) vectorized() bool { return true } @@ -905,22 +810,6 @@ func (b *builtinSysDateWithFspSig) vecEvalTime(input *chunk.Chunk, result *chunk return nil } -func (b *builtinAddDateDurationIntSig) vectorized() bool { - return false -} - -func (b *builtinAddDateDurationIntSig) vecEvalDuration(input *chunk.Chunk, result *chunk.Column) error { - return errors.Errorf("not implemented") -} - -func (b *builtinSubDateIntStringSig) vectorized() bool { - return false -} - -func (b *builtinSubDateIntStringSig) vecEvalTime(input *chunk.Chunk, result *chunk.Column) error { - return errors.Errorf("not implemented") -} - func (b *builtinTidbParseTsoSig) vectorized() bool { return true } @@ -961,14 +850,6 @@ func (b *builtinTidbParseTsoSig) vecEvalTime(input *chunk.Chunk, result *chunk.C return nil } -func (b *builtinAddDateDurationStringSig) vectorized() bool { - return false -} - -func (b *builtinAddDateDurationStringSig) vecEvalDuration(input *chunk.Chunk, result *chunk.Column) error { - return errors.Errorf("not implemented") -} - func (b *builtinSubStringAndDurationSig) vectorized() bool { return false } @@ -1041,14 +922,6 @@ func (b *builtinSubDatetimeAndStringSig) vecEvalTime(input *chunk.Chunk, result return errors.Errorf("not implemented") } -func (b *builtinSubDateStringStringSig) vectorized() bool { - return false -} - -func (b *builtinSubDateStringStringSig) vecEvalTime(input *chunk.Chunk, result *chunk.Column) error { - return errors.Errorf("not implemented") -} - func (b *builtinQuarterSig) vectorized() bool { return true } @@ -1346,19 +1219,17 @@ func (b *builtinNowWithoutArgSig) vecEvalTime(input *chunk.Chunk, result *chunk. } func (b *builtinTimestampLiteralSig) vectorized() bool { - return false + return true } func (b *builtinTimestampLiteralSig) vecEvalTime(input *chunk.Chunk, result *chunk.Column) error { - return errors.Errorf("not implemented") -} - -func (b *builtinAddDateIntDecimalSig) vectorized() bool { - return false -} - -func (b *builtinAddDateIntDecimalSig) vecEvalTime(input *chunk.Chunk, result *chunk.Column) error { - return errors.Errorf("not implemented") + n := input.NumRows() + result.ResizeTime(n, false) + times := result.Times() + for i := range times { + times[i] = b.tm + } + return nil } func (b *builtinMakeDateSig) vectorized() bool { @@ -1509,14 +1380,6 @@ func (b *builtinUTCTimestampWithArgSig) vecEvalTime(input *chunk.Chunk, result * return nil } -func (b *builtinAddDateIntRealSig) vectorized() bool { - return false -} - -func (b *builtinAddDateIntRealSig) vecEvalTime(input *chunk.Chunk, result *chunk.Column) error { - return errors.Errorf("not implemented") -} - func (b *builtinSubDurationAndDurationSig) vectorized() bool { return false } @@ -1901,14 +1764,6 @@ func (b *builtinHourSig) vecEvalInt(input *chunk.Chunk, result *chunk.Column) er return nil } -func (b *builtinAddDateDurationRealSig) vectorized() bool { - return false -} - -func (b *builtinAddDateDurationRealSig) vecEvalDuration(input *chunk.Chunk, result *chunk.Column) error { - return errors.Errorf("not implemented") -} - func (b *builtinSecToTimeSig) vectorized() bool { return true } @@ -2058,14 +1913,6 @@ func (b *builtinUTCTimeWithoutArgSig) vecEvalDuration(input *chunk.Chunk, result return nil } -func (b *builtinSubDateIntDecimalSig) vectorized() bool { - return false -} - -func (b *builtinSubDateIntDecimalSig) vecEvalTime(input *chunk.Chunk, result *chunk.Column) error { - return errors.Errorf("not implemented") -} - func (b *builtinDateDiffSig) vectorized() bool { return true } @@ -2141,30 +1988,6 @@ func (b *builtinCurrentDateSig) vecEvalTime(input *chunk.Chunk, result *chunk.Co return nil } -func (b *builtinAddDateStringStringSig) vectorized() bool { - return false -} - -func (b *builtinAddDateStringStringSig) vecEvalTime(input *chunk.Chunk, result *chunk.Column) error { - return errors.Errorf("not implemented") -} - -func (b *builtinAddDateIntStringSig) vectorized() bool { - return false -} - -func (b *builtinAddDateIntStringSig) vecEvalTime(input *chunk.Chunk, result *chunk.Column) error { - return errors.Errorf("not implemented") -} - -func (b *builtinAddDateDatetimeStringSig) vectorized() bool { - return false -} - -func (b *builtinAddDateDatetimeStringSig) vecEvalTime(input *chunk.Chunk, result *chunk.Column) error { - return errors.Errorf("not implemented") -} - func (b *builtinMakeTimeSig) vectorized() bool { return false } @@ -2253,14 +2076,6 @@ func (b *builtinFromUnixTime1ArgSig) vecEvalTime(input *chunk.Chunk, result *chu return nil } -func (b *builtinSubDateDurationIntSig) vectorized() bool { - return false -} - -func (b *builtinSubDateDurationIntSig) vecEvalDuration(input *chunk.Chunk, result *chunk.Column) error { - return errors.Errorf("not implemented") -} - func (b *builtinYearWeekWithModeSig) vectorized() bool { return true } @@ -2382,30 +2197,6 @@ func (b *builtinUnixTimestampIntSig) vecEvalInt(input *chunk.Chunk, result *chun return errors.Errorf("not implemented") } -func (b *builtinAddDateDurationDecimalSig) vectorized() bool { - return false -} - -func (b *builtinAddDateDurationDecimalSig) vecEvalDuration(input *chunk.Chunk, result *chunk.Column) error { - return errors.Errorf("not implemented") -} - -func (b *builtinSubDateDatetimeRealSig) vectorized() bool { - return false -} - -func (b *builtinSubDateDatetimeRealSig) vecEvalTime(input *chunk.Chunk, result *chunk.Column) error { - return errors.Errorf("not implemented") -} - -func (b *builtinSubDateDurationRealSig) vectorized() bool { - return false -} - -func (b *builtinSubDateDurationRealSig) vecEvalDuration(input *chunk.Chunk, result *chunk.Column) error { - return errors.Errorf("not implemented") -} - func (b *builtinCurrentTime0ArgSig) vectorized() bool { return true } @@ -2431,27 +2222,51 @@ func (b *builtinCurrentTime0ArgSig) vecEvalDuration(input *chunk.Chunk, result * } func (b *builtinTimeSig) vectorized() bool { - return false + return true } func (b *builtinTimeSig) vecEvalDuration(input *chunk.Chunk, result *chunk.Column) error { - return errors.Errorf("not implemented") -} + n := input.NumRows() + buf, err := b.bufAllocator.get(types.ETString, n) + if err != nil { + return err + } + defer b.bufAllocator.put(buf) + if err := b.args[0].VecEvalString(b.ctx, input, buf); err != nil { + return err + } -func (b *builtinAddDateDatetimeIntSig) vectorized() bool { - return false -} + result.ResizeGoDuration(n, false) + result.MergeNulls(buf) + ds := result.GoDurations() + sc := b.ctx.GetSessionVars().StmtCtx + for i := 0; i < n; i++ { + if result.IsNull(i) { + continue + } -func (b *builtinAddDateDatetimeIntSig) vecEvalTime(input *chunk.Chunk, result *chunk.Column) error { - return errors.Errorf("not implemented") -} + fsp := 0 + expr := buf.GetString(i) + if idx := strings.Index(expr, "."); idx != -1 { + fsp = len(expr) - idx - 1 + } -func (b *builtinSubDateStringIntSig) vectorized() bool { - return false -} + var tmpFsp int8 + if tmpFsp, err = types.CheckFsp(fsp); err != nil { + return err + } + fsp = int(tmpFsp) -func (b *builtinSubDateStringIntSig) vecEvalTime(input *chunk.Chunk, result *chunk.Column) error { - return errors.Errorf("not implemented") + res, err := types.ParseDuration(sc, expr, int8(fsp)) + if types.ErrTruncatedWrongVal.Equal(err) { + err = sc.HandleTruncate(err) + } + if err != nil { + return err + } + ds[i] = res.Duration + } + return nil } func (b *builtinDateLiteralSig) vectorized() bool { @@ -2477,19 +2292,17 @@ func (b *builtinDateLiteralSig) vecEvalTime(input *chunk.Chunk, result *chunk.Co } func (b *builtinTimeLiteralSig) vectorized() bool { - return false + return true } func (b *builtinTimeLiteralSig) vecEvalDuration(input *chunk.Chunk, result *chunk.Column) error { - return errors.Errorf("not implemented") -} - -func (b *builtinSubDateDurationStringSig) vectorized() bool { - return false -} - -func (b *builtinSubDateDurationStringSig) vecEvalDuration(input *chunk.Chunk, result *chunk.Column) error { - return errors.Errorf("not implemented") + n := input.NumRows() + result.ResizeGoDuration(n, false) + d64s := result.GoDurations() + for i := 0; i < n; i++ { + d64s[i] = b.duration.Duration + } + return nil } func (b *builtinSubTimeStringNullSig) vectorized() bool { @@ -2543,22 +2356,6 @@ func (b *builtinMonthNameSig) vectorized() bool { return true } -func (b *builtinSubDateDatetimeStringSig) vectorized() bool { - return false -} - -func (b *builtinSubDateDatetimeStringSig) vecEvalTime(input *chunk.Chunk, result *chunk.Column) error { - return errors.Errorf("not implemented") -} - -func (b *builtinSubDateDatetimeDecimalSig) vectorized() bool { - return false -} - -func (b *builtinSubDateDatetimeDecimalSig) vecEvalTime(input *chunk.Chunk, result *chunk.Column) error { - return errors.Errorf("not implemented") -} - func (b *builtinSubDatetimeAndDurationSig) vectorized() bool { return false } diff --git a/expression/builtin_time_vec_generated.go b/expression/builtin_time_vec_generated.go index 2e0efc732568e..9abf7062f9731 100644 --- a/expression/builtin_time_vec_generated.go +++ b/expression/builtin_time_vec_generated.go @@ -1007,3 +1007,1531 @@ func (b *builtinTimeTimeTimeDiffSig) vecEvalDuration(input *chunk.Chunk, result func (b *builtinTimeTimeTimeDiffSig) vectorized() bool { return true } + +func (b *builtinAddDateStringStringSig) vecEvalTime(input *chunk.Chunk, result *chunk.Column) error { + n := input.NumRows() + unit, isNull, err := b.args[2].EvalString(b.ctx, chunk.Row{}) + if err != nil { + return err + } + if isNull { + result.ResizeTime(n, true) + return nil + } + + intervalBuf, err := b.bufAllocator.get(types.ETString, n) + if err != nil { + return err + } + defer b.bufAllocator.put(intervalBuf) + if err := b.vecGetIntervalFromString(&b.baseBuiltinFunc, input, unit, intervalBuf); err != nil { + return err + } + + if err := b.vecGetDateFromString(&b.baseBuiltinFunc, input, unit, result); err != nil { + return err + } + + result.MergeNulls(intervalBuf) + resDates := result.Times() + for i := 0; i < n; i++ { + if result.IsNull(i) { + continue + } + resDate, isNull, err := b.add(b.ctx, resDates[i], intervalBuf.GetString(i), unit) + if err != nil { + return err + } + if isNull { + result.SetNull(i, true) + } else { + resDates[i] = resDate + } + } + return nil +} + +func (b *builtinAddDateStringStringSig) vectorized() bool { + return true +} + +func (b *builtinAddDateStringIntSig) vecEvalTime(input *chunk.Chunk, result *chunk.Column) error { + n := input.NumRows() + unit, isNull, err := b.args[2].EvalString(b.ctx, chunk.Row{}) + if err != nil { + return err + } + if isNull { + result.ResizeTime(n, true) + return nil + } + + intervalBuf, err := b.bufAllocator.get(types.ETString, n) + if err != nil { + return err + } + defer b.bufAllocator.put(intervalBuf) + if err := b.vecGetIntervalFromInt(&b.baseBuiltinFunc, input, unit, intervalBuf); err != nil { + return err + } + + if err := b.vecGetDateFromString(&b.baseBuiltinFunc, input, unit, result); err != nil { + return err + } + + result.MergeNulls(intervalBuf) + resDates := result.Times() + for i := 0; i < n; i++ { + if result.IsNull(i) { + continue + } + resDate, isNull, err := b.add(b.ctx, resDates[i], intervalBuf.GetString(i), unit) + if err != nil { + return err + } + if isNull { + result.SetNull(i, true) + } else { + resDates[i] = resDate + } + } + return nil +} + +func (b *builtinAddDateStringIntSig) vectorized() bool { + return true +} + +func (b *builtinAddDateStringRealSig) vecEvalTime(input *chunk.Chunk, result *chunk.Column) error { + n := input.NumRows() + unit, isNull, err := b.args[2].EvalString(b.ctx, chunk.Row{}) + if err != nil { + return err + } + if isNull { + result.ResizeTime(n, true) + return nil + } + + intervalBuf, err := b.bufAllocator.get(types.ETString, n) + if err != nil { + return err + } + defer b.bufAllocator.put(intervalBuf) + if err := b.vecGetIntervalFromReal(&b.baseBuiltinFunc, input, unit, intervalBuf); err != nil { + return err + } + + if err := b.vecGetDateFromString(&b.baseBuiltinFunc, input, unit, result); err != nil { + return err + } + + result.MergeNulls(intervalBuf) + resDates := result.Times() + for i := 0; i < n; i++ { + if result.IsNull(i) { + continue + } + resDate, isNull, err := b.add(b.ctx, resDates[i], intervalBuf.GetString(i), unit) + if err != nil { + return err + } + if isNull { + result.SetNull(i, true) + } else { + resDates[i] = resDate + } + } + return nil +} + +func (b *builtinAddDateStringRealSig) vectorized() bool { + return true +} + +func (b *builtinAddDateStringDecimalSig) vecEvalTime(input *chunk.Chunk, result *chunk.Column) error { + n := input.NumRows() + unit, isNull, err := b.args[2].EvalString(b.ctx, chunk.Row{}) + if err != nil { + return err + } + if isNull { + result.ResizeTime(n, true) + return nil + } + + intervalBuf, err := b.bufAllocator.get(types.ETString, n) + if err != nil { + return err + } + defer b.bufAllocator.put(intervalBuf) + if err := b.vecGetIntervalFromDecimal(&b.baseBuiltinFunc, input, unit, intervalBuf); err != nil { + return err + } + + if err := b.vecGetDateFromString(&b.baseBuiltinFunc, input, unit, result); err != nil { + return err + } + + result.MergeNulls(intervalBuf) + resDates := result.Times() + for i := 0; i < n; i++ { + if result.IsNull(i) { + continue + } + resDate, isNull, err := b.add(b.ctx, resDates[i], intervalBuf.GetString(i), unit) + if err != nil { + return err + } + if isNull { + result.SetNull(i, true) + } else { + resDates[i] = resDate + } + } + return nil +} + +func (b *builtinAddDateStringDecimalSig) vectorized() bool { + return true +} + +func (b *builtinAddDateIntStringSig) vecEvalTime(input *chunk.Chunk, result *chunk.Column) error { + n := input.NumRows() + unit, isNull, err := b.args[2].EvalString(b.ctx, chunk.Row{}) + if err != nil { + return err + } + if isNull { + result.ResizeTime(n, true) + return nil + } + + intervalBuf, err := b.bufAllocator.get(types.ETString, n) + if err != nil { + return err + } + defer b.bufAllocator.put(intervalBuf) + if err := b.vecGetIntervalFromString(&b.baseBuiltinFunc, input, unit, intervalBuf); err != nil { + return err + } + + if err := b.vecGetDateFromInt(&b.baseBuiltinFunc, input, unit, result); err != nil { + return err + } + + result.MergeNulls(intervalBuf) + resDates := result.Times() + for i := 0; i < n; i++ { + if result.IsNull(i) { + continue + } + resDate, isNull, err := b.add(b.ctx, resDates[i], intervalBuf.GetString(i), unit) + if err != nil { + return err + } + if isNull { + result.SetNull(i, true) + } else { + resDates[i] = resDate + } + } + return nil +} + +func (b *builtinAddDateIntStringSig) vectorized() bool { + return true +} + +func (b *builtinAddDateIntIntSig) vecEvalTime(input *chunk.Chunk, result *chunk.Column) error { + n := input.NumRows() + unit, isNull, err := b.args[2].EvalString(b.ctx, chunk.Row{}) + if err != nil { + return err + } + if isNull { + result.ResizeTime(n, true) + return nil + } + + intervalBuf, err := b.bufAllocator.get(types.ETString, n) + if err != nil { + return err + } + defer b.bufAllocator.put(intervalBuf) + if err := b.vecGetIntervalFromInt(&b.baseBuiltinFunc, input, unit, intervalBuf); err != nil { + return err + } + + if err := b.vecGetDateFromInt(&b.baseBuiltinFunc, input, unit, result); err != nil { + return err + } + + result.MergeNulls(intervalBuf) + resDates := result.Times() + for i := 0; i < n; i++ { + if result.IsNull(i) { + continue + } + resDate, isNull, err := b.add(b.ctx, resDates[i], intervalBuf.GetString(i), unit) + if err != nil { + return err + } + if isNull { + result.SetNull(i, true) + } else { + resDates[i] = resDate + } + } + return nil +} + +func (b *builtinAddDateIntIntSig) vectorized() bool { + return true +} + +func (b *builtinAddDateIntRealSig) vecEvalTime(input *chunk.Chunk, result *chunk.Column) error { + n := input.NumRows() + unit, isNull, err := b.args[2].EvalString(b.ctx, chunk.Row{}) + if err != nil { + return err + } + if isNull { + result.ResizeTime(n, true) + return nil + } + + intervalBuf, err := b.bufAllocator.get(types.ETString, n) + if err != nil { + return err + } + defer b.bufAllocator.put(intervalBuf) + if err := b.vecGetIntervalFromReal(&b.baseBuiltinFunc, input, unit, intervalBuf); err != nil { + return err + } + + if err := b.vecGetDateFromInt(&b.baseBuiltinFunc, input, unit, result); err != nil { + return err + } + + result.MergeNulls(intervalBuf) + resDates := result.Times() + for i := 0; i < n; i++ { + if result.IsNull(i) { + continue + } + resDate, isNull, err := b.add(b.ctx, resDates[i], intervalBuf.GetString(i), unit) + if err != nil { + return err + } + if isNull { + result.SetNull(i, true) + } else { + resDates[i] = resDate + } + } + return nil +} + +func (b *builtinAddDateIntRealSig) vectorized() bool { + return true +} + +func (b *builtinAddDateIntDecimalSig) vecEvalTime(input *chunk.Chunk, result *chunk.Column) error { + n := input.NumRows() + unit, isNull, err := b.args[2].EvalString(b.ctx, chunk.Row{}) + if err != nil { + return err + } + if isNull { + result.ResizeTime(n, true) + return nil + } + + intervalBuf, err := b.bufAllocator.get(types.ETString, n) + if err != nil { + return err + } + defer b.bufAllocator.put(intervalBuf) + if err := b.vecGetIntervalFromDecimal(&b.baseBuiltinFunc, input, unit, intervalBuf); err != nil { + return err + } + + if err := b.vecGetDateFromInt(&b.baseBuiltinFunc, input, unit, result); err != nil { + return err + } + + result.MergeNulls(intervalBuf) + resDates := result.Times() + for i := 0; i < n; i++ { + if result.IsNull(i) { + continue + } + resDate, isNull, err := b.add(b.ctx, resDates[i], intervalBuf.GetString(i), unit) + if err != nil { + return err + } + if isNull { + result.SetNull(i, true) + } else { + resDates[i] = resDate + } + } + return nil +} + +func (b *builtinAddDateIntDecimalSig) vectorized() bool { + return true +} + +func (b *builtinAddDateDatetimeStringSig) vecEvalTime(input *chunk.Chunk, result *chunk.Column) error { + n := input.NumRows() + unit, isNull, err := b.args[2].EvalString(b.ctx, chunk.Row{}) + if err != nil { + return err + } + if isNull { + result.ResizeTime(n, true) + return nil + } + + intervalBuf, err := b.bufAllocator.get(types.ETString, n) + if err != nil { + return err + } + defer b.bufAllocator.put(intervalBuf) + if err := b.vecGetIntervalFromString(&b.baseBuiltinFunc, input, unit, intervalBuf); err != nil { + return err + } + + if err := b.vecGetDateFromDatetime(&b.baseBuiltinFunc, input, unit, result); err != nil { + return err + } + + result.MergeNulls(intervalBuf) + resDates := result.Times() + for i := 0; i < n; i++ { + if result.IsNull(i) { + continue + } + resDate, isNull, err := b.add(b.ctx, resDates[i], intervalBuf.GetString(i), unit) + if err != nil { + return err + } + if isNull { + result.SetNull(i, true) + } else { + resDates[i] = resDate + } + } + return nil +} + +func (b *builtinAddDateDatetimeStringSig) vectorized() bool { + return true +} + +func (b *builtinAddDateDatetimeIntSig) vecEvalTime(input *chunk.Chunk, result *chunk.Column) error { + n := input.NumRows() + unit, isNull, err := b.args[2].EvalString(b.ctx, chunk.Row{}) + if err != nil { + return err + } + if isNull { + result.ResizeTime(n, true) + return nil + } + + intervalBuf, err := b.bufAllocator.get(types.ETString, n) + if err != nil { + return err + } + defer b.bufAllocator.put(intervalBuf) + if err := b.vecGetIntervalFromInt(&b.baseBuiltinFunc, input, unit, intervalBuf); err != nil { + return err + } + + if err := b.vecGetDateFromDatetime(&b.baseBuiltinFunc, input, unit, result); err != nil { + return err + } + + result.MergeNulls(intervalBuf) + resDates := result.Times() + for i := 0; i < n; i++ { + if result.IsNull(i) { + continue + } + resDate, isNull, err := b.add(b.ctx, resDates[i], intervalBuf.GetString(i), unit) + if err != nil { + return err + } + if isNull { + result.SetNull(i, true) + } else { + resDates[i] = resDate + } + } + return nil +} + +func (b *builtinAddDateDatetimeIntSig) vectorized() bool { + return true +} + +func (b *builtinAddDateDatetimeRealSig) vecEvalTime(input *chunk.Chunk, result *chunk.Column) error { + n := input.NumRows() + unit, isNull, err := b.args[2].EvalString(b.ctx, chunk.Row{}) + if err != nil { + return err + } + if isNull { + result.ResizeTime(n, true) + return nil + } + + intervalBuf, err := b.bufAllocator.get(types.ETString, n) + if err != nil { + return err + } + defer b.bufAllocator.put(intervalBuf) + if err := b.vecGetIntervalFromReal(&b.baseBuiltinFunc, input, unit, intervalBuf); err != nil { + return err + } + + if err := b.vecGetDateFromDatetime(&b.baseBuiltinFunc, input, unit, result); err != nil { + return err + } + + result.MergeNulls(intervalBuf) + resDates := result.Times() + for i := 0; i < n; i++ { + if result.IsNull(i) { + continue + } + resDate, isNull, err := b.add(b.ctx, resDates[i], intervalBuf.GetString(i), unit) + if err != nil { + return err + } + if isNull { + result.SetNull(i, true) + } else { + resDates[i] = resDate + } + } + return nil +} + +func (b *builtinAddDateDatetimeRealSig) vectorized() bool { + return true +} + +func (b *builtinAddDateDatetimeDecimalSig) vecEvalTime(input *chunk.Chunk, result *chunk.Column) error { + n := input.NumRows() + unit, isNull, err := b.args[2].EvalString(b.ctx, chunk.Row{}) + if err != nil { + return err + } + if isNull { + result.ResizeTime(n, true) + return nil + } + + intervalBuf, err := b.bufAllocator.get(types.ETString, n) + if err != nil { + return err + } + defer b.bufAllocator.put(intervalBuf) + if err := b.vecGetIntervalFromDecimal(&b.baseBuiltinFunc, input, unit, intervalBuf); err != nil { + return err + } + + if err := b.vecGetDateFromDatetime(&b.baseBuiltinFunc, input, unit, result); err != nil { + return err + } + + result.MergeNulls(intervalBuf) + resDates := result.Times() + for i := 0; i < n; i++ { + if result.IsNull(i) { + continue + } + resDate, isNull, err := b.add(b.ctx, resDates[i], intervalBuf.GetString(i), unit) + if err != nil { + return err + } + if isNull { + result.SetNull(i, true) + } else { + resDates[i] = resDate + } + } + return nil +} + +func (b *builtinAddDateDatetimeDecimalSig) vectorized() bool { + return true +} + +func (b *builtinAddDateDurationStringSig) vecEvalDuration(input *chunk.Chunk, result *chunk.Column) error { + n := input.NumRows() + unit, isNull, err := b.args[2].EvalString(b.ctx, chunk.Row{}) + if err != nil { + return err + } + if isNull { + result.ResizeGoDuration(n, true) + return nil + } + + intervalBuf, err := b.bufAllocator.get(types.ETString, n) + if err != nil { + return err + } + defer b.bufAllocator.put(intervalBuf) + if err := b.vecGetIntervalFromString(&b.baseBuiltinFunc, input, unit, intervalBuf); err != nil { + return err + } + + result.ResizeGoDuration(n, false) + if err := b.args[0].VecEvalDuration(b.ctx, input, result); err != nil { + return err + } + + result.MergeNulls(intervalBuf) + resDurations := result.GoDurations() + iterDuration := types.Duration{Fsp: types.MaxFsp} + for i := 0; i < n; i++ { + if result.IsNull(i) { + continue + } + iterDuration.Duration = resDurations[i] + resDuration, isNull, err := b.addDuration(b.ctx, iterDuration, intervalBuf.GetString(i), unit) + if err != nil { + return err + } + if isNull { + result.SetNull(i, true) + } else { + resDurations[i] = resDuration.Duration + } + } + return nil +} + +func (b *builtinAddDateDurationStringSig) vectorized() bool { + return true +} + +func (b *builtinAddDateDurationIntSig) vecEvalDuration(input *chunk.Chunk, result *chunk.Column) error { + n := input.NumRows() + unit, isNull, err := b.args[2].EvalString(b.ctx, chunk.Row{}) + if err != nil { + return err + } + if isNull { + result.ResizeGoDuration(n, true) + return nil + } + + intervalBuf, err := b.bufAllocator.get(types.ETString, n) + if err != nil { + return err + } + defer b.bufAllocator.put(intervalBuf) + if err := b.vecGetIntervalFromInt(&b.baseBuiltinFunc, input, unit, intervalBuf); err != nil { + return err + } + + result.ResizeGoDuration(n, false) + if err := b.args[0].VecEvalDuration(b.ctx, input, result); err != nil { + return err + } + + result.MergeNulls(intervalBuf) + resDurations := result.GoDurations() + iterDuration := types.Duration{Fsp: types.MaxFsp} + for i := 0; i < n; i++ { + if result.IsNull(i) { + continue + } + iterDuration.Duration = resDurations[i] + resDuration, isNull, err := b.addDuration(b.ctx, iterDuration, intervalBuf.GetString(i), unit) + if err != nil { + return err + } + if isNull { + result.SetNull(i, true) + } else { + resDurations[i] = resDuration.Duration + } + } + return nil +} + +func (b *builtinAddDateDurationIntSig) vectorized() bool { + return true +} + +func (b *builtinAddDateDurationRealSig) vecEvalDuration(input *chunk.Chunk, result *chunk.Column) error { + n := input.NumRows() + unit, isNull, err := b.args[2].EvalString(b.ctx, chunk.Row{}) + if err != nil { + return err + } + if isNull { + result.ResizeGoDuration(n, true) + return nil + } + + intervalBuf, err := b.bufAllocator.get(types.ETString, n) + if err != nil { + return err + } + defer b.bufAllocator.put(intervalBuf) + if err := b.vecGetIntervalFromReal(&b.baseBuiltinFunc, input, unit, intervalBuf); err != nil { + return err + } + + result.ResizeGoDuration(n, false) + if err := b.args[0].VecEvalDuration(b.ctx, input, result); err != nil { + return err + } + + result.MergeNulls(intervalBuf) + resDurations := result.GoDurations() + iterDuration := types.Duration{Fsp: types.MaxFsp} + for i := 0; i < n; i++ { + if result.IsNull(i) { + continue + } + iterDuration.Duration = resDurations[i] + resDuration, isNull, err := b.addDuration(b.ctx, iterDuration, intervalBuf.GetString(i), unit) + if err != nil { + return err + } + if isNull { + result.SetNull(i, true) + } else { + resDurations[i] = resDuration.Duration + } + } + return nil +} + +func (b *builtinAddDateDurationRealSig) vectorized() bool { + return true +} + +func (b *builtinAddDateDurationDecimalSig) vecEvalDuration(input *chunk.Chunk, result *chunk.Column) error { + n := input.NumRows() + unit, isNull, err := b.args[2].EvalString(b.ctx, chunk.Row{}) + if err != nil { + return err + } + if isNull { + result.ResizeGoDuration(n, true) + return nil + } + + intervalBuf, err := b.bufAllocator.get(types.ETString, n) + if err != nil { + return err + } + defer b.bufAllocator.put(intervalBuf) + if err := b.vecGetIntervalFromDecimal(&b.baseBuiltinFunc, input, unit, intervalBuf); err != nil { + return err + } + + result.ResizeGoDuration(n, false) + if err := b.args[0].VecEvalDuration(b.ctx, input, result); err != nil { + return err + } + + result.MergeNulls(intervalBuf) + resDurations := result.GoDurations() + iterDuration := types.Duration{Fsp: types.MaxFsp} + for i := 0; i < n; i++ { + if result.IsNull(i) { + continue + } + iterDuration.Duration = resDurations[i] + resDuration, isNull, err := b.addDuration(b.ctx, iterDuration, intervalBuf.GetString(i), unit) + if err != nil { + return err + } + if isNull { + result.SetNull(i, true) + } else { + resDurations[i] = resDuration.Duration + } + } + return nil +} + +func (b *builtinAddDateDurationDecimalSig) vectorized() bool { + return true +} + +func (b *builtinSubDateStringStringSig) vecEvalTime(input *chunk.Chunk, result *chunk.Column) error { + n := input.NumRows() + unit, isNull, err := b.args[2].EvalString(b.ctx, chunk.Row{}) + if err != nil { + return err + } + if isNull { + result.ResizeTime(n, true) + return nil + } + + intervalBuf, err := b.bufAllocator.get(types.ETString, n) + if err != nil { + return err + } + defer b.bufAllocator.put(intervalBuf) + if err := b.vecGetIntervalFromString(&b.baseBuiltinFunc, input, unit, intervalBuf); err != nil { + return err + } + + if err := b.vecGetDateFromString(&b.baseBuiltinFunc, input, unit, result); err != nil { + return err + } + + result.MergeNulls(intervalBuf) + resDates := result.Times() + for i := 0; i < n; i++ { + if result.IsNull(i) { + continue + } + resDate, isNull, err := b.sub(b.ctx, resDates[i], intervalBuf.GetString(i), unit) + if err != nil { + return err + } + if isNull { + result.SetNull(i, true) + } else { + resDates[i] = resDate + } + } + return nil +} + +func (b *builtinSubDateStringStringSig) vectorized() bool { + return true +} + +func (b *builtinSubDateStringIntSig) vecEvalTime(input *chunk.Chunk, result *chunk.Column) error { + n := input.NumRows() + unit, isNull, err := b.args[2].EvalString(b.ctx, chunk.Row{}) + if err != nil { + return err + } + if isNull { + result.ResizeTime(n, true) + return nil + } + + intervalBuf, err := b.bufAllocator.get(types.ETString, n) + if err != nil { + return err + } + defer b.bufAllocator.put(intervalBuf) + if err := b.vecGetIntervalFromInt(&b.baseBuiltinFunc, input, unit, intervalBuf); err != nil { + return err + } + + if err := b.vecGetDateFromString(&b.baseBuiltinFunc, input, unit, result); err != nil { + return err + } + + result.MergeNulls(intervalBuf) + resDates := result.Times() + for i := 0; i < n; i++ { + if result.IsNull(i) { + continue + } + resDate, isNull, err := b.sub(b.ctx, resDates[i], intervalBuf.GetString(i), unit) + if err != nil { + return err + } + if isNull { + result.SetNull(i, true) + } else { + resDates[i] = resDate + } + } + return nil +} + +func (b *builtinSubDateStringIntSig) vectorized() bool { + return true +} + +func (b *builtinSubDateStringRealSig) vecEvalTime(input *chunk.Chunk, result *chunk.Column) error { + n := input.NumRows() + unit, isNull, err := b.args[2].EvalString(b.ctx, chunk.Row{}) + if err != nil { + return err + } + if isNull { + result.ResizeTime(n, true) + return nil + } + + intervalBuf, err := b.bufAllocator.get(types.ETString, n) + if err != nil { + return err + } + defer b.bufAllocator.put(intervalBuf) + if err := b.vecGetIntervalFromReal(&b.baseBuiltinFunc, input, unit, intervalBuf); err != nil { + return err + } + + if err := b.vecGetDateFromString(&b.baseBuiltinFunc, input, unit, result); err != nil { + return err + } + + result.MergeNulls(intervalBuf) + resDates := result.Times() + for i := 0; i < n; i++ { + if result.IsNull(i) { + continue + } + resDate, isNull, err := b.sub(b.ctx, resDates[i], intervalBuf.GetString(i), unit) + if err != nil { + return err + } + if isNull { + result.SetNull(i, true) + } else { + resDates[i] = resDate + } + } + return nil +} + +func (b *builtinSubDateStringRealSig) vectorized() bool { + return true +} + +func (b *builtinSubDateStringDecimalSig) vecEvalTime(input *chunk.Chunk, result *chunk.Column) error { + n := input.NumRows() + unit, isNull, err := b.args[2].EvalString(b.ctx, chunk.Row{}) + if err != nil { + return err + } + if isNull { + result.ResizeTime(n, true) + return nil + } + + intervalBuf, err := b.bufAllocator.get(types.ETString, n) + if err != nil { + return err + } + defer b.bufAllocator.put(intervalBuf) + if err := b.vecGetIntervalFromDecimal(&b.baseBuiltinFunc, input, unit, intervalBuf); err != nil { + return err + } + + if err := b.vecGetDateFromString(&b.baseBuiltinFunc, input, unit, result); err != nil { + return err + } + + result.MergeNulls(intervalBuf) + resDates := result.Times() + for i := 0; i < n; i++ { + if result.IsNull(i) { + continue + } + resDate, isNull, err := b.sub(b.ctx, resDates[i], intervalBuf.GetString(i), unit) + if err != nil { + return err + } + if isNull { + result.SetNull(i, true) + } else { + resDates[i] = resDate + } + } + return nil +} + +func (b *builtinSubDateStringDecimalSig) vectorized() bool { + return true +} + +func (b *builtinSubDateIntStringSig) vecEvalTime(input *chunk.Chunk, result *chunk.Column) error { + n := input.NumRows() + unit, isNull, err := b.args[2].EvalString(b.ctx, chunk.Row{}) + if err != nil { + return err + } + if isNull { + result.ResizeTime(n, true) + return nil + } + + intervalBuf, err := b.bufAllocator.get(types.ETString, n) + if err != nil { + return err + } + defer b.bufAllocator.put(intervalBuf) + if err := b.vecGetIntervalFromString(&b.baseBuiltinFunc, input, unit, intervalBuf); err != nil { + return err + } + + if err := b.vecGetDateFromInt(&b.baseBuiltinFunc, input, unit, result); err != nil { + return err + } + + result.MergeNulls(intervalBuf) + resDates := result.Times() + for i := 0; i < n; i++ { + if result.IsNull(i) { + continue + } + resDate, isNull, err := b.sub(b.ctx, resDates[i], intervalBuf.GetString(i), unit) + if err != nil { + return err + } + if isNull { + result.SetNull(i, true) + } else { + resDates[i] = resDate + } + } + return nil +} + +func (b *builtinSubDateIntStringSig) vectorized() bool { + return true +} + +func (b *builtinSubDateIntIntSig) vecEvalTime(input *chunk.Chunk, result *chunk.Column) error { + n := input.NumRows() + unit, isNull, err := b.args[2].EvalString(b.ctx, chunk.Row{}) + if err != nil { + return err + } + if isNull { + result.ResizeTime(n, true) + return nil + } + + intervalBuf, err := b.bufAllocator.get(types.ETString, n) + if err != nil { + return err + } + defer b.bufAllocator.put(intervalBuf) + if err := b.vecGetIntervalFromInt(&b.baseBuiltinFunc, input, unit, intervalBuf); err != nil { + return err + } + + if err := b.vecGetDateFromInt(&b.baseBuiltinFunc, input, unit, result); err != nil { + return err + } + + result.MergeNulls(intervalBuf) + resDates := result.Times() + for i := 0; i < n; i++ { + if result.IsNull(i) { + continue + } + resDate, isNull, err := b.sub(b.ctx, resDates[i], intervalBuf.GetString(i), unit) + if err != nil { + return err + } + if isNull { + result.SetNull(i, true) + } else { + resDates[i] = resDate + } + } + return nil +} + +func (b *builtinSubDateIntIntSig) vectorized() bool { + return true +} + +func (b *builtinSubDateIntRealSig) vecEvalTime(input *chunk.Chunk, result *chunk.Column) error { + n := input.NumRows() + unit, isNull, err := b.args[2].EvalString(b.ctx, chunk.Row{}) + if err != nil { + return err + } + if isNull { + result.ResizeTime(n, true) + return nil + } + + intervalBuf, err := b.bufAllocator.get(types.ETString, n) + if err != nil { + return err + } + defer b.bufAllocator.put(intervalBuf) + if err := b.vecGetIntervalFromReal(&b.baseBuiltinFunc, input, unit, intervalBuf); err != nil { + return err + } + + if err := b.vecGetDateFromInt(&b.baseBuiltinFunc, input, unit, result); err != nil { + return err + } + + result.MergeNulls(intervalBuf) + resDates := result.Times() + for i := 0; i < n; i++ { + if result.IsNull(i) { + continue + } + resDate, isNull, err := b.sub(b.ctx, resDates[i], intervalBuf.GetString(i), unit) + if err != nil { + return err + } + if isNull { + result.SetNull(i, true) + } else { + resDates[i] = resDate + } + } + return nil +} + +func (b *builtinSubDateIntRealSig) vectorized() bool { + return true +} + +func (b *builtinSubDateIntDecimalSig) vecEvalTime(input *chunk.Chunk, result *chunk.Column) error { + n := input.NumRows() + unit, isNull, err := b.args[2].EvalString(b.ctx, chunk.Row{}) + if err != nil { + return err + } + if isNull { + result.ResizeTime(n, true) + return nil + } + + intervalBuf, err := b.bufAllocator.get(types.ETString, n) + if err != nil { + return err + } + defer b.bufAllocator.put(intervalBuf) + if err := b.vecGetIntervalFromDecimal(&b.baseBuiltinFunc, input, unit, intervalBuf); err != nil { + return err + } + + if err := b.vecGetDateFromInt(&b.baseBuiltinFunc, input, unit, result); err != nil { + return err + } + + result.MergeNulls(intervalBuf) + resDates := result.Times() + for i := 0; i < n; i++ { + if result.IsNull(i) { + continue + } + resDate, isNull, err := b.sub(b.ctx, resDates[i], intervalBuf.GetString(i), unit) + if err != nil { + return err + } + if isNull { + result.SetNull(i, true) + } else { + resDates[i] = resDate + } + } + return nil +} + +func (b *builtinSubDateIntDecimalSig) vectorized() bool { + return true +} + +func (b *builtinSubDateDatetimeStringSig) vecEvalTime(input *chunk.Chunk, result *chunk.Column) error { + n := input.NumRows() + unit, isNull, err := b.args[2].EvalString(b.ctx, chunk.Row{}) + if err != nil { + return err + } + if isNull { + result.ResizeTime(n, true) + return nil + } + + intervalBuf, err := b.bufAllocator.get(types.ETString, n) + if err != nil { + return err + } + defer b.bufAllocator.put(intervalBuf) + if err := b.vecGetIntervalFromString(&b.baseBuiltinFunc, input, unit, intervalBuf); err != nil { + return err + } + + if err := b.vecGetDateFromDatetime(&b.baseBuiltinFunc, input, unit, result); err != nil { + return err + } + + result.MergeNulls(intervalBuf) + resDates := result.Times() + for i := 0; i < n; i++ { + if result.IsNull(i) { + continue + } + resDate, isNull, err := b.sub(b.ctx, resDates[i], intervalBuf.GetString(i), unit) + if err != nil { + return err + } + if isNull { + result.SetNull(i, true) + } else { + resDates[i] = resDate + } + } + return nil +} + +func (b *builtinSubDateDatetimeStringSig) vectorized() bool { + return true +} + +func (b *builtinSubDateDatetimeIntSig) vecEvalTime(input *chunk.Chunk, result *chunk.Column) error { + n := input.NumRows() + unit, isNull, err := b.args[2].EvalString(b.ctx, chunk.Row{}) + if err != nil { + return err + } + if isNull { + result.ResizeTime(n, true) + return nil + } + + intervalBuf, err := b.bufAllocator.get(types.ETString, n) + if err != nil { + return err + } + defer b.bufAllocator.put(intervalBuf) + if err := b.vecGetIntervalFromInt(&b.baseBuiltinFunc, input, unit, intervalBuf); err != nil { + return err + } + + if err := b.vecGetDateFromDatetime(&b.baseBuiltinFunc, input, unit, result); err != nil { + return err + } + + result.MergeNulls(intervalBuf) + resDates := result.Times() + for i := 0; i < n; i++ { + if result.IsNull(i) { + continue + } + resDate, isNull, err := b.sub(b.ctx, resDates[i], intervalBuf.GetString(i), unit) + if err != nil { + return err + } + if isNull { + result.SetNull(i, true) + } else { + resDates[i] = resDate + } + } + return nil +} + +func (b *builtinSubDateDatetimeIntSig) vectorized() bool { + return true +} + +func (b *builtinSubDateDatetimeRealSig) vecEvalTime(input *chunk.Chunk, result *chunk.Column) error { + n := input.NumRows() + unit, isNull, err := b.args[2].EvalString(b.ctx, chunk.Row{}) + if err != nil { + return err + } + if isNull { + result.ResizeTime(n, true) + return nil + } + + intervalBuf, err := b.bufAllocator.get(types.ETString, n) + if err != nil { + return err + } + defer b.bufAllocator.put(intervalBuf) + if err := b.vecGetIntervalFromReal(&b.baseBuiltinFunc, input, unit, intervalBuf); err != nil { + return err + } + + if err := b.vecGetDateFromDatetime(&b.baseBuiltinFunc, input, unit, result); err != nil { + return err + } + + result.MergeNulls(intervalBuf) + resDates := result.Times() + for i := 0; i < n; i++ { + if result.IsNull(i) { + continue + } + resDate, isNull, err := b.sub(b.ctx, resDates[i], intervalBuf.GetString(i), unit) + if err != nil { + return err + } + if isNull { + result.SetNull(i, true) + } else { + resDates[i] = resDate + } + } + return nil +} + +func (b *builtinSubDateDatetimeRealSig) vectorized() bool { + return true +} + +func (b *builtinSubDateDatetimeDecimalSig) vecEvalTime(input *chunk.Chunk, result *chunk.Column) error { + n := input.NumRows() + unit, isNull, err := b.args[2].EvalString(b.ctx, chunk.Row{}) + if err != nil { + return err + } + if isNull { + result.ResizeTime(n, true) + return nil + } + + intervalBuf, err := b.bufAllocator.get(types.ETString, n) + if err != nil { + return err + } + defer b.bufAllocator.put(intervalBuf) + if err := b.vecGetIntervalFromDecimal(&b.baseBuiltinFunc, input, unit, intervalBuf); err != nil { + return err + } + + if err := b.vecGetDateFromDatetime(&b.baseBuiltinFunc, input, unit, result); err != nil { + return err + } + + result.MergeNulls(intervalBuf) + resDates := result.Times() + for i := 0; i < n; i++ { + if result.IsNull(i) { + continue + } + resDate, isNull, err := b.sub(b.ctx, resDates[i], intervalBuf.GetString(i), unit) + if err != nil { + return err + } + if isNull { + result.SetNull(i, true) + } else { + resDates[i] = resDate + } + } + return nil +} + +func (b *builtinSubDateDatetimeDecimalSig) vectorized() bool { + return true +} + +func (b *builtinSubDateDurationStringSig) vecEvalDuration(input *chunk.Chunk, result *chunk.Column) error { + n := input.NumRows() + unit, isNull, err := b.args[2].EvalString(b.ctx, chunk.Row{}) + if err != nil { + return err + } + if isNull { + result.ResizeGoDuration(n, true) + return nil + } + + intervalBuf, err := b.bufAllocator.get(types.ETString, n) + if err != nil { + return err + } + defer b.bufAllocator.put(intervalBuf) + if err := b.vecGetIntervalFromString(&b.baseBuiltinFunc, input, unit, intervalBuf); err != nil { + return err + } + + result.ResizeGoDuration(n, false) + if err := b.args[0].VecEvalDuration(b.ctx, input, result); err != nil { + return err + } + + result.MergeNulls(intervalBuf) + resDurations := result.GoDurations() + iterDuration := types.Duration{Fsp: types.MaxFsp} + for i := 0; i < n; i++ { + if result.IsNull(i) { + continue + } + iterDuration.Duration = resDurations[i] + resDuration, isNull, err := b.subDuration(b.ctx, iterDuration, intervalBuf.GetString(i), unit) + if err != nil { + return err + } + if isNull { + result.SetNull(i, true) + } else { + resDurations[i] = resDuration.Duration + } + } + return nil +} + +func (b *builtinSubDateDurationStringSig) vectorized() bool { + return true +} + +func (b *builtinSubDateDurationIntSig) vecEvalDuration(input *chunk.Chunk, result *chunk.Column) error { + n := input.NumRows() + unit, isNull, err := b.args[2].EvalString(b.ctx, chunk.Row{}) + if err != nil { + return err + } + if isNull { + result.ResizeGoDuration(n, true) + return nil + } + + intervalBuf, err := b.bufAllocator.get(types.ETString, n) + if err != nil { + return err + } + defer b.bufAllocator.put(intervalBuf) + if err := b.vecGetIntervalFromInt(&b.baseBuiltinFunc, input, unit, intervalBuf); err != nil { + return err + } + + result.ResizeGoDuration(n, false) + if err := b.args[0].VecEvalDuration(b.ctx, input, result); err != nil { + return err + } + + result.MergeNulls(intervalBuf) + resDurations := result.GoDurations() + iterDuration := types.Duration{Fsp: types.MaxFsp} + for i := 0; i < n; i++ { + if result.IsNull(i) { + continue + } + iterDuration.Duration = resDurations[i] + resDuration, isNull, err := b.subDuration(b.ctx, iterDuration, intervalBuf.GetString(i), unit) + if err != nil { + return err + } + if isNull { + result.SetNull(i, true) + } else { + resDurations[i] = resDuration.Duration + } + } + return nil +} + +func (b *builtinSubDateDurationIntSig) vectorized() bool { + return true +} + +func (b *builtinSubDateDurationRealSig) vecEvalDuration(input *chunk.Chunk, result *chunk.Column) error { + n := input.NumRows() + unit, isNull, err := b.args[2].EvalString(b.ctx, chunk.Row{}) + if err != nil { + return err + } + if isNull { + result.ResizeGoDuration(n, true) + return nil + } + + intervalBuf, err := b.bufAllocator.get(types.ETString, n) + if err != nil { + return err + } + defer b.bufAllocator.put(intervalBuf) + if err := b.vecGetIntervalFromReal(&b.baseBuiltinFunc, input, unit, intervalBuf); err != nil { + return err + } + + result.ResizeGoDuration(n, false) + if err := b.args[0].VecEvalDuration(b.ctx, input, result); err != nil { + return err + } + + result.MergeNulls(intervalBuf) + resDurations := result.GoDurations() + iterDuration := types.Duration{Fsp: types.MaxFsp} + for i := 0; i < n; i++ { + if result.IsNull(i) { + continue + } + iterDuration.Duration = resDurations[i] + resDuration, isNull, err := b.subDuration(b.ctx, iterDuration, intervalBuf.GetString(i), unit) + if err != nil { + return err + } + if isNull { + result.SetNull(i, true) + } else { + resDurations[i] = resDuration.Duration + } + } + return nil +} + +func (b *builtinSubDateDurationRealSig) vectorized() bool { + return true +} + +func (b *builtinSubDateDurationDecimalSig) vecEvalDuration(input *chunk.Chunk, result *chunk.Column) error { + n := input.NumRows() + unit, isNull, err := b.args[2].EvalString(b.ctx, chunk.Row{}) + if err != nil { + return err + } + if isNull { + result.ResizeGoDuration(n, true) + return nil + } + + intervalBuf, err := b.bufAllocator.get(types.ETString, n) + if err != nil { + return err + } + defer b.bufAllocator.put(intervalBuf) + if err := b.vecGetIntervalFromDecimal(&b.baseBuiltinFunc, input, unit, intervalBuf); err != nil { + return err + } + + result.ResizeGoDuration(n, false) + if err := b.args[0].VecEvalDuration(b.ctx, input, result); err != nil { + return err + } + + result.MergeNulls(intervalBuf) + resDurations := result.GoDurations() + iterDuration := types.Duration{Fsp: types.MaxFsp} + for i := 0; i < n; i++ { + if result.IsNull(i) { + continue + } + iterDuration.Duration = resDurations[i] + resDuration, isNull, err := b.subDuration(b.ctx, iterDuration, intervalBuf.GetString(i), unit) + if err != nil { + return err + } + if isNull { + result.SetNull(i, true) + } else { + resDurations[i] = resDuration.Duration + } + } + return nil +} + +func (b *builtinSubDateDurationDecimalSig) vectorized() bool { + return true +} diff --git a/expression/builtin_time_vec_generated_test.go b/expression/builtin_time_vec_generated_test.go index 60dff916aca61..da3fd7e8f41af 100644 --- a/expression/builtin_time_vec_generated_test.go +++ b/expression/builtin_time_vec_generated_test.go @@ -16,6 +16,7 @@ package expression import ( + "math" "testing" . "github.com/pingcap/check" @@ -158,7 +159,7 @@ var vecBuiltinTimeGeneratedCases = map[string][]vecExprBenchCase{ // builtinDurationDurationTimeDiffSig {retEvalType: types.ETDuration, childrenTypes: []types.EvalType{types.ETDuration, types.ETDuration}}, // builtinDurationStringTimeDiffSig - {retEvalType: types.ETDuration, childrenTypes: []types.EvalType{types.ETDuration, types.ETString}, geners: []dataGenerator{nil, &timeStrGener{Year: 2019, Month: 10}}}, + {retEvalType: types.ETDuration, childrenTypes: []types.EvalType{types.ETDuration, types.ETString}, geners: []dataGenerator{nil, &dateStrGener{Year: 2019, Month: 10}}}, {retEvalType: types.ETDuration, childrenTypes: []types.EvalType{types.ETDuration, types.ETString}, geners: []dataGenerator{nil, &dateTimeStrGener{Year: 2019, Month: 10}}}, {retEvalType: types.ETDuration, childrenTypes: []types.EvalType{types.ETDuration, types.ETString}, geners: []dataGenerator{nil, &dateTimeStrGener{Year: 2019, Month: 10, Fsp: 4}}}, // builtinTimeTimeTimeDiffSig @@ -167,21 +168,6459 @@ var vecBuiltinTimeGeneratedCases = map[string][]vecExprBenchCase{ {retEvalType: types.ETDuration, childrenTypes: []types.EvalType{types.ETTimestamp, types.ETTimestamp}, geners: []dataGenerator{&dateTimeGener{Year: 2019, Month: 10}, &dateTimeGener{Year: 2019, Month: 10}}}, {retEvalType: types.ETDuration, childrenTypes: []types.EvalType{types.ETTimestamp, types.ETDatetime}, geners: []dataGenerator{&dateTimeGener{Year: 2019, Month: 10}, &dateTimeGener{Year: 2019, Month: 10}}}, // builtinTimeStringTimeDiffSig - {retEvalType: types.ETDuration, childrenTypes: []types.EvalType{types.ETDatetime, types.ETString}, geners: []dataGenerator{&dateTimeGener{Year: 2019, Month: 10}, &timeStrGener{Year: 2019, Month: 10}}}, + {retEvalType: types.ETDuration, childrenTypes: []types.EvalType{types.ETDatetime, types.ETString}, geners: []dataGenerator{&dateTimeGener{Year: 2019, Month: 10}, &dateStrGener{Year: 2019, Month: 10}}}, {retEvalType: types.ETDuration, childrenTypes: []types.EvalType{types.ETDatetime, types.ETString}, geners: []dataGenerator{&dateTimeGener{Year: 2019, Month: 10}, &dateTimeStrGener{Year: 2019, Month: 10}}}, - {retEvalType: types.ETDuration, childrenTypes: []types.EvalType{types.ETTimestamp, types.ETString}, geners: []dataGenerator{&dateTimeGener{Year: 2019, Month: 10}, &timeStrGener{Year: 2019, Month: 10}}}, + {retEvalType: types.ETDuration, childrenTypes: []types.EvalType{types.ETTimestamp, types.ETString}, geners: []dataGenerator{&dateTimeGener{Year: 2019, Month: 10}, &dateStrGener{Year: 2019, Month: 10}}}, {retEvalType: types.ETDuration, childrenTypes: []types.EvalType{types.ETTimestamp, types.ETString}, geners: []dataGenerator{&dateTimeGener{Year: 2019, Month: 10}, &dateTimeStrGener{Year: 2019, Month: 10}}}, // builtinStringDurationTimeDiffSig - {retEvalType: types.ETDuration, childrenTypes: []types.EvalType{types.ETString, types.ETDuration}, geners: []dataGenerator{&timeStrGener{Year: 2019, Month: 10}, nil}}, + {retEvalType: types.ETDuration, childrenTypes: []types.EvalType{types.ETString, types.ETDuration}, geners: []dataGenerator{&dateStrGener{Year: 2019, Month: 10}, nil}}, {retEvalType: types.ETDuration, childrenTypes: []types.EvalType{types.ETString, types.ETDuration}, geners: []dataGenerator{&dateTimeStrGener{Year: 2019, Month: 10}, nil}}, // builtinStringTimeTimeDiffSig - {retEvalType: types.ETDuration, childrenTypes: []types.EvalType{types.ETString, types.ETDatetime}, geners: []dataGenerator{&timeStrGener{Year: 2019, Month: 10}, &dateTimeGener{Year: 2019, Month: 10}}}, + {retEvalType: types.ETDuration, childrenTypes: []types.EvalType{types.ETString, types.ETDatetime}, geners: []dataGenerator{&dateStrGener{Year: 2019, Month: 10}, &dateTimeGener{Year: 2019, Month: 10}}}, {retEvalType: types.ETDuration, childrenTypes: []types.EvalType{types.ETString, types.ETDatetime}, geners: []dataGenerator{&dateTimeStrGener{Year: 2019, Month: 10}, &dateTimeGener{Year: 2019, Month: 10}}}, - {retEvalType: types.ETDuration, childrenTypes: []types.EvalType{types.ETString, types.ETTimestamp}, geners: []dataGenerator{&timeStrGener{Year: 2019, Month: 10}, &dateTimeGener{Year: 2019, Month: 10}}}, + {retEvalType: types.ETDuration, childrenTypes: []types.EvalType{types.ETString, types.ETTimestamp}, geners: []dataGenerator{&dateStrGener{Year: 2019, Month: 10}, &dateTimeGener{Year: 2019, Month: 10}}}, {retEvalType: types.ETDuration, childrenTypes: []types.EvalType{types.ETString, types.ETTimestamp}, geners: []dataGenerator{&dateTimeStrGener{Year: 2019, Month: 10}, &dateTimeGener{Year: 2019, Month: 10}}}, // builtinStringStringTimeDiffSig - {retEvalType: types.ETDuration, childrenTypes: []types.EvalType{types.ETString, types.ETString}, geners: []dataGenerator{&timeStrGener{Year: 2019, Month: 10}, &dateTimeStrGener{Year: 2019, Month: 10}}}, - {retEvalType: types.ETDuration, childrenTypes: []types.EvalType{types.ETString, types.ETString}, geners: []dataGenerator{&dateTimeStrGener{Year: 2019, Month: 10}, &timeStrGener{Year: 2019, Month: 10}}}, + {retEvalType: types.ETDuration, childrenTypes: []types.EvalType{types.ETString, types.ETString}, geners: []dataGenerator{&dateStrGener{Year: 2019, Month: 10}, &dateTimeStrGener{Year: 2019, Month: 10}}}, + {retEvalType: types.ETDuration, childrenTypes: []types.EvalType{types.ETString, types.ETString}, geners: []dataGenerator{&dateTimeStrGener{Year: 2019, Month: 10}, &dateStrGener{Year: 2019, Month: 10}}}, + }, + + ast.AddDate: { + // builtinAddDateStringStringSig + { + retEvalType: types.ETDatetime, + childrenTypes: []types.EvalType{types.ETString, types.ETString, types.ETString}, + geners: []dataGenerator{ + &dateStrGener{NullRation: 0.2}, + &numStrGener{rangeInt64Gener{math.MinInt32 + 1, math.MaxInt32}}, + }, + constants: []*Constant{nil, nil, {Value: types.NewStringDatum("MICROSECOND"), RetType: types.NewFieldType(mysql.TypeString)}}, + chunkSize: 128, + }, + { + retEvalType: types.ETDatetime, + childrenTypes: []types.EvalType{types.ETString, types.ETString, types.ETString}, + geners: []dataGenerator{ + &dateStrGener{NullRation: 0.2}, + &numStrGener{rangeInt64Gener{math.MinInt32 + 1, math.MaxInt32}}, + }, + constants: []*Constant{nil, nil, {Value: types.NewStringDatum("SECOND"), RetType: types.NewFieldType(mysql.TypeString)}}, + chunkSize: 128, + }, + { + retEvalType: types.ETDatetime, + childrenTypes: []types.EvalType{types.ETString, types.ETString, types.ETString}, + geners: []dataGenerator{ + &dateStrGener{NullRation: 0.2}, + &numStrGener{rangeInt64Gener{math.MinInt32 + 1, math.MaxInt32}}, + }, + constants: []*Constant{nil, nil, {Value: types.NewStringDatum("MINUTE"), RetType: types.NewFieldType(mysql.TypeString)}}, + chunkSize: 128, + }, + { + retEvalType: types.ETDatetime, + childrenTypes: []types.EvalType{types.ETString, types.ETString, types.ETString}, + geners: []dataGenerator{ + &dateStrGener{NullRation: 0.2}, + &numStrGener{rangeInt64Gener{math.MinInt32 + 1, math.MaxInt32}}, + }, + constants: []*Constant{nil, nil, {Value: types.NewStringDatum("HOUR"), RetType: types.NewFieldType(mysql.TypeString)}}, + chunkSize: 128, + }, + { + retEvalType: types.ETDatetime, + childrenTypes: []types.EvalType{types.ETString, types.ETString, types.ETString}, + geners: []dataGenerator{ + &dateStrGener{NullRation: 0.2}, + &numStrGener{rangeInt64Gener{math.MinInt32 + 1, math.MaxInt32}}, + }, + constants: []*Constant{nil, nil, {Value: types.NewStringDatum("DAY"), RetType: types.NewFieldType(mysql.TypeString)}}, + chunkSize: 128, + }, + { + retEvalType: types.ETDatetime, + childrenTypes: []types.EvalType{types.ETString, types.ETString, types.ETString}, + geners: []dataGenerator{ + &dateStrGener{NullRation: 0.2}, + &numStrGener{rangeInt64Gener{math.MinInt32 + 1, math.MaxInt32}}, + }, + constants: []*Constant{nil, nil, {Value: types.NewStringDatum("WEEK"), RetType: types.NewFieldType(mysql.TypeString)}}, + chunkSize: 128, + }, + { + retEvalType: types.ETDatetime, + childrenTypes: []types.EvalType{types.ETString, types.ETString, types.ETString}, + geners: []dataGenerator{ + &dateStrGener{NullRation: 0.2}, + &numStrGener{rangeInt64Gener{math.MinInt32 + 1, math.MaxInt32}}, + }, + constants: []*Constant{nil, nil, {Value: types.NewStringDatum("MONTH"), RetType: types.NewFieldType(mysql.TypeString)}}, + chunkSize: 128, + }, + { + retEvalType: types.ETDatetime, + childrenTypes: []types.EvalType{types.ETString, types.ETString, types.ETString}, + geners: []dataGenerator{ + &dateStrGener{NullRation: 0.2}, + &numStrGener{rangeInt64Gener{math.MinInt32 + 1, math.MaxInt32}}, + }, + constants: []*Constant{nil, nil, {Value: types.NewStringDatum("QUARTER"), RetType: types.NewFieldType(mysql.TypeString)}}, + chunkSize: 128, + }, + { + retEvalType: types.ETDatetime, + childrenTypes: []types.EvalType{types.ETString, types.ETString, types.ETString}, + geners: []dataGenerator{ + &dateStrGener{NullRation: 0.2}, + &numStrGener{rangeInt64Gener{math.MinInt32 + 1, math.MaxInt32}}, + }, + constants: []*Constant{nil, nil, {Value: types.NewStringDatum("YEAR"), RetType: types.NewFieldType(mysql.TypeString)}}, + chunkSize: 128, + }, + { + retEvalType: types.ETDatetime, + childrenTypes: []types.EvalType{types.ETString, types.ETString, types.ETString}, + geners: []dataGenerator{ + &dateStrGener{NullRation: 0.2}, + &numStrGener{rangeInt64Gener{math.MinInt32 + 1, math.MaxInt32}}, + }, + constants: []*Constant{nil, nil, {Value: types.NewStringDatum("SECOND_MICROSECOND"), RetType: types.NewFieldType(mysql.TypeString)}}, + chunkSize: 128, + }, + { + retEvalType: types.ETDatetime, + childrenTypes: []types.EvalType{types.ETString, types.ETString, types.ETString}, + geners: []dataGenerator{ + &dateStrGener{NullRation: 0.2}, + &numStrGener{rangeInt64Gener{math.MinInt32 + 1, math.MaxInt32}}, + }, + constants: []*Constant{nil, nil, {Value: types.NewStringDatum("MINUTE_MICROSECOND"), RetType: types.NewFieldType(mysql.TypeString)}}, + chunkSize: 128, + }, + { + retEvalType: types.ETDatetime, + childrenTypes: []types.EvalType{types.ETString, types.ETString, types.ETString}, + geners: []dataGenerator{ + &dateStrGener{NullRation: 0.2}, + &numStrGener{rangeInt64Gener{math.MinInt32 + 1, math.MaxInt32}}, + }, + constants: []*Constant{nil, nil, {Value: types.NewStringDatum("MINUTE_SECOND"), RetType: types.NewFieldType(mysql.TypeString)}}, + chunkSize: 128, + }, + { + retEvalType: types.ETDatetime, + childrenTypes: []types.EvalType{types.ETString, types.ETString, types.ETString}, + geners: []dataGenerator{ + &dateStrGener{NullRation: 0.2}, + &numStrGener{rangeInt64Gener{math.MinInt32 + 1, math.MaxInt32}}, + }, + constants: []*Constant{nil, nil, {Value: types.NewStringDatum("HOUR_MICROSECOND"), RetType: types.NewFieldType(mysql.TypeString)}}, + chunkSize: 128, + }, + { + retEvalType: types.ETDatetime, + childrenTypes: []types.EvalType{types.ETString, types.ETString, types.ETString}, + geners: []dataGenerator{ + &dateStrGener{NullRation: 0.2}, + &numStrGener{rangeInt64Gener{math.MinInt32 + 1, math.MaxInt32}}, + }, + constants: []*Constant{nil, nil, {Value: types.NewStringDatum("HOUR_SECOND"), RetType: types.NewFieldType(mysql.TypeString)}}, + chunkSize: 128, + }, + { + retEvalType: types.ETDatetime, + childrenTypes: []types.EvalType{types.ETString, types.ETString, types.ETString}, + geners: []dataGenerator{ + &dateStrGener{NullRation: 0.2}, + &numStrGener{rangeInt64Gener{math.MinInt32 + 1, math.MaxInt32}}, + }, + constants: []*Constant{nil, nil, {Value: types.NewStringDatum("HOUR_MINUTE"), RetType: types.NewFieldType(mysql.TypeString)}}, + chunkSize: 128, + }, + { + retEvalType: types.ETDatetime, + childrenTypes: []types.EvalType{types.ETString, types.ETString, types.ETString}, + geners: []dataGenerator{ + &dateStrGener{NullRation: 0.2}, + &numStrGener{rangeInt64Gener{math.MinInt32 + 1, math.MaxInt32}}, + }, + constants: []*Constant{nil, nil, {Value: types.NewStringDatum("DAY_MICROSECOND"), RetType: types.NewFieldType(mysql.TypeString)}}, + chunkSize: 128, + }, + { + retEvalType: types.ETDatetime, + childrenTypes: []types.EvalType{types.ETString, types.ETString, types.ETString}, + geners: []dataGenerator{ + &dateStrGener{NullRation: 0.2}, + &numStrGener{rangeInt64Gener{math.MinInt32 + 1, math.MaxInt32}}, + }, + constants: []*Constant{nil, nil, {Value: types.NewStringDatum("DAY_SECOND"), RetType: types.NewFieldType(mysql.TypeString)}}, + chunkSize: 128, + }, + { + retEvalType: types.ETDatetime, + childrenTypes: []types.EvalType{types.ETString, types.ETString, types.ETString}, + geners: []dataGenerator{ + &dateStrGener{NullRation: 0.2}, + &numStrGener{rangeInt64Gener{math.MinInt32 + 1, math.MaxInt32}}, + }, + constants: []*Constant{nil, nil, {Value: types.NewStringDatum("DAY_MINUTE"), RetType: types.NewFieldType(mysql.TypeString)}}, + chunkSize: 128, + }, + { + retEvalType: types.ETDatetime, + childrenTypes: []types.EvalType{types.ETString, types.ETString, types.ETString}, + geners: []dataGenerator{ + &dateStrGener{NullRation: 0.2}, + &numStrGener{rangeInt64Gener{math.MinInt32 + 1, math.MaxInt32}}, + }, + constants: []*Constant{nil, nil, {Value: types.NewStringDatum("DAY_HOUR"), RetType: types.NewFieldType(mysql.TypeString)}}, + chunkSize: 128, + }, + { + retEvalType: types.ETDatetime, + childrenTypes: []types.EvalType{types.ETString, types.ETString, types.ETString}, + geners: []dataGenerator{ + &dateStrGener{NullRation: 0.2}, + &numStrGener{rangeInt64Gener{math.MinInt32 + 1, math.MaxInt32}}, + }, + constants: []*Constant{nil, nil, {Value: types.NewStringDatum("YEAR_MONTH"), RetType: types.NewFieldType(mysql.TypeString)}}, + chunkSize: 128, + }, + // builtinAddDateStringIntSig + { + retEvalType: types.ETDatetime, + childrenTypes: []types.EvalType{types.ETString, types.ETInt, types.ETString}, + geners: []dataGenerator{ + &dateStrGener{NullRation: 0.2}, + &defaultGener{eType: types.ETInt, nullRation: 0.2}, + }, + constants: []*Constant{nil, nil, {Value: types.NewStringDatum("MICROSECOND"), RetType: types.NewFieldType(mysql.TypeString)}}, + chunkSize: 128, + }, + { + retEvalType: types.ETDatetime, + childrenTypes: []types.EvalType{types.ETString, types.ETInt, types.ETString}, + geners: []dataGenerator{ + &dateStrGener{NullRation: 0.2}, + &defaultGener{eType: types.ETInt, nullRation: 0.2}, + }, + constants: []*Constant{nil, nil, {Value: types.NewStringDatum("SECOND"), RetType: types.NewFieldType(mysql.TypeString)}}, + chunkSize: 128, + }, + { + retEvalType: types.ETDatetime, + childrenTypes: []types.EvalType{types.ETString, types.ETInt, types.ETString}, + geners: []dataGenerator{ + &dateStrGener{NullRation: 0.2}, + &defaultGener{eType: types.ETInt, nullRation: 0.2}, + }, + constants: []*Constant{nil, nil, {Value: types.NewStringDatum("MINUTE"), RetType: types.NewFieldType(mysql.TypeString)}}, + chunkSize: 128, + }, + { + retEvalType: types.ETDatetime, + childrenTypes: []types.EvalType{types.ETString, types.ETInt, types.ETString}, + geners: []dataGenerator{ + &dateStrGener{NullRation: 0.2}, + &defaultGener{eType: types.ETInt, nullRation: 0.2}, + }, + constants: []*Constant{nil, nil, {Value: types.NewStringDatum("HOUR"), RetType: types.NewFieldType(mysql.TypeString)}}, + chunkSize: 128, + }, + { + retEvalType: types.ETDatetime, + childrenTypes: []types.EvalType{types.ETString, types.ETInt, types.ETString}, + geners: []dataGenerator{ + &dateStrGener{NullRation: 0.2}, + &defaultGener{eType: types.ETInt, nullRation: 0.2}, + }, + constants: []*Constant{nil, nil, {Value: types.NewStringDatum("DAY"), RetType: types.NewFieldType(mysql.TypeString)}}, + chunkSize: 128, + }, + { + retEvalType: types.ETDatetime, + childrenTypes: []types.EvalType{types.ETString, types.ETInt, types.ETString}, + geners: []dataGenerator{ + &dateStrGener{NullRation: 0.2}, + &defaultGener{eType: types.ETInt, nullRation: 0.2}, + }, + constants: []*Constant{nil, nil, {Value: types.NewStringDatum("WEEK"), RetType: types.NewFieldType(mysql.TypeString)}}, + chunkSize: 128, + }, + { + retEvalType: types.ETDatetime, + childrenTypes: []types.EvalType{types.ETString, types.ETInt, types.ETString}, + geners: []dataGenerator{ + &dateStrGener{NullRation: 0.2}, + &defaultGener{eType: types.ETInt, nullRation: 0.2}, + }, + constants: []*Constant{nil, nil, {Value: types.NewStringDatum("MONTH"), RetType: types.NewFieldType(mysql.TypeString)}}, + chunkSize: 128, + }, + { + retEvalType: types.ETDatetime, + childrenTypes: []types.EvalType{types.ETString, types.ETInt, types.ETString}, + geners: []dataGenerator{ + &dateStrGener{NullRation: 0.2}, + &defaultGener{eType: types.ETInt, nullRation: 0.2}, + }, + constants: []*Constant{nil, nil, {Value: types.NewStringDatum("QUARTER"), RetType: types.NewFieldType(mysql.TypeString)}}, + chunkSize: 128, + }, + { + retEvalType: types.ETDatetime, + childrenTypes: []types.EvalType{types.ETString, types.ETInt, types.ETString}, + geners: []dataGenerator{ + &dateStrGener{NullRation: 0.2}, + &defaultGener{eType: types.ETInt, nullRation: 0.2}, + }, + constants: []*Constant{nil, nil, {Value: types.NewStringDatum("YEAR"), RetType: types.NewFieldType(mysql.TypeString)}}, + chunkSize: 128, + }, + { + retEvalType: types.ETDatetime, + childrenTypes: []types.EvalType{types.ETString, types.ETInt, types.ETString}, + geners: []dataGenerator{ + &dateStrGener{NullRation: 0.2}, + &defaultGener{eType: types.ETInt, nullRation: 0.2}, + }, + constants: []*Constant{nil, nil, {Value: types.NewStringDatum("SECOND_MICROSECOND"), RetType: types.NewFieldType(mysql.TypeString)}}, + chunkSize: 128, + }, + { + retEvalType: types.ETDatetime, + childrenTypes: []types.EvalType{types.ETString, types.ETInt, types.ETString}, + geners: []dataGenerator{ + &dateStrGener{NullRation: 0.2}, + &defaultGener{eType: types.ETInt, nullRation: 0.2}, + }, + constants: []*Constant{nil, nil, {Value: types.NewStringDatum("MINUTE_MICROSECOND"), RetType: types.NewFieldType(mysql.TypeString)}}, + chunkSize: 128, + }, + { + retEvalType: types.ETDatetime, + childrenTypes: []types.EvalType{types.ETString, types.ETInt, types.ETString}, + geners: []dataGenerator{ + &dateStrGener{NullRation: 0.2}, + &defaultGener{eType: types.ETInt, nullRation: 0.2}, + }, + constants: []*Constant{nil, nil, {Value: types.NewStringDatum("MINUTE_SECOND"), RetType: types.NewFieldType(mysql.TypeString)}}, + chunkSize: 128, + }, + { + retEvalType: types.ETDatetime, + childrenTypes: []types.EvalType{types.ETString, types.ETInt, types.ETString}, + geners: []dataGenerator{ + &dateStrGener{NullRation: 0.2}, + &defaultGener{eType: types.ETInt, nullRation: 0.2}, + }, + constants: []*Constant{nil, nil, {Value: types.NewStringDatum("HOUR_MICROSECOND"), RetType: types.NewFieldType(mysql.TypeString)}}, + chunkSize: 128, + }, + { + retEvalType: types.ETDatetime, + childrenTypes: []types.EvalType{types.ETString, types.ETInt, types.ETString}, + geners: []dataGenerator{ + &dateStrGener{NullRation: 0.2}, + &defaultGener{eType: types.ETInt, nullRation: 0.2}, + }, + constants: []*Constant{nil, nil, {Value: types.NewStringDatum("HOUR_SECOND"), RetType: types.NewFieldType(mysql.TypeString)}}, + chunkSize: 128, + }, + { + retEvalType: types.ETDatetime, + childrenTypes: []types.EvalType{types.ETString, types.ETInt, types.ETString}, + geners: []dataGenerator{ + &dateStrGener{NullRation: 0.2}, + &defaultGener{eType: types.ETInt, nullRation: 0.2}, + }, + constants: []*Constant{nil, nil, {Value: types.NewStringDatum("HOUR_MINUTE"), RetType: types.NewFieldType(mysql.TypeString)}}, + chunkSize: 128, + }, + { + retEvalType: types.ETDatetime, + childrenTypes: []types.EvalType{types.ETString, types.ETInt, types.ETString}, + geners: []dataGenerator{ + &dateStrGener{NullRation: 0.2}, + &defaultGener{eType: types.ETInt, nullRation: 0.2}, + }, + constants: []*Constant{nil, nil, {Value: types.NewStringDatum("DAY_MICROSECOND"), RetType: types.NewFieldType(mysql.TypeString)}}, + chunkSize: 128, + }, + { + retEvalType: types.ETDatetime, + childrenTypes: []types.EvalType{types.ETString, types.ETInt, types.ETString}, + geners: []dataGenerator{ + &dateStrGener{NullRation: 0.2}, + &defaultGener{eType: types.ETInt, nullRation: 0.2}, + }, + constants: []*Constant{nil, nil, {Value: types.NewStringDatum("DAY_SECOND"), RetType: types.NewFieldType(mysql.TypeString)}}, + chunkSize: 128, + }, + { + retEvalType: types.ETDatetime, + childrenTypes: []types.EvalType{types.ETString, types.ETInt, types.ETString}, + geners: []dataGenerator{ + &dateStrGener{NullRation: 0.2}, + &defaultGener{eType: types.ETInt, nullRation: 0.2}, + }, + constants: []*Constant{nil, nil, {Value: types.NewStringDatum("DAY_MINUTE"), RetType: types.NewFieldType(mysql.TypeString)}}, + chunkSize: 128, + }, + { + retEvalType: types.ETDatetime, + childrenTypes: []types.EvalType{types.ETString, types.ETInt, types.ETString}, + geners: []dataGenerator{ + &dateStrGener{NullRation: 0.2}, + &defaultGener{eType: types.ETInt, nullRation: 0.2}, + }, + constants: []*Constant{nil, nil, {Value: types.NewStringDatum("DAY_HOUR"), RetType: types.NewFieldType(mysql.TypeString)}}, + chunkSize: 128, + }, + { + retEvalType: types.ETDatetime, + childrenTypes: []types.EvalType{types.ETString, types.ETInt, types.ETString}, + geners: []dataGenerator{ + &dateStrGener{NullRation: 0.2}, + &defaultGener{eType: types.ETInt, nullRation: 0.2}, + }, + constants: []*Constant{nil, nil, {Value: types.NewStringDatum("YEAR_MONTH"), RetType: types.NewFieldType(mysql.TypeString)}}, + chunkSize: 128, + }, + // builtinAddDateStringRealSig + { + retEvalType: types.ETDatetime, + childrenTypes: []types.EvalType{types.ETString, types.ETReal, types.ETString}, + geners: []dataGenerator{ + &dateStrGener{NullRation: 0.2}, + &defaultGener{eType: types.ETReal, nullRation: 0.2}, + }, + constants: []*Constant{nil, nil, {Value: types.NewStringDatum("MICROSECOND"), RetType: types.NewFieldType(mysql.TypeString)}}, + chunkSize: 128, + }, + { + retEvalType: types.ETDatetime, + childrenTypes: []types.EvalType{types.ETString, types.ETReal, types.ETString}, + geners: []dataGenerator{ + &dateStrGener{NullRation: 0.2}, + &defaultGener{eType: types.ETReal, nullRation: 0.2}, + }, + constants: []*Constant{nil, nil, {Value: types.NewStringDatum("SECOND"), RetType: types.NewFieldType(mysql.TypeString)}}, + chunkSize: 128, + }, + { + retEvalType: types.ETDatetime, + childrenTypes: []types.EvalType{types.ETString, types.ETReal, types.ETString}, + geners: []dataGenerator{ + &dateStrGener{NullRation: 0.2}, + &defaultGener{eType: types.ETReal, nullRation: 0.2}, + }, + constants: []*Constant{nil, nil, {Value: types.NewStringDatum("MINUTE"), RetType: types.NewFieldType(mysql.TypeString)}}, + chunkSize: 128, + }, + { + retEvalType: types.ETDatetime, + childrenTypes: []types.EvalType{types.ETString, types.ETReal, types.ETString}, + geners: []dataGenerator{ + &dateStrGener{NullRation: 0.2}, + &defaultGener{eType: types.ETReal, nullRation: 0.2}, + }, + constants: []*Constant{nil, nil, {Value: types.NewStringDatum("HOUR"), RetType: types.NewFieldType(mysql.TypeString)}}, + chunkSize: 128, + }, + { + retEvalType: types.ETDatetime, + childrenTypes: []types.EvalType{types.ETString, types.ETReal, types.ETString}, + geners: []dataGenerator{ + &dateStrGener{NullRation: 0.2}, + &defaultGener{eType: types.ETReal, nullRation: 0.2}, + }, + constants: []*Constant{nil, nil, {Value: types.NewStringDatum("DAY"), RetType: types.NewFieldType(mysql.TypeString)}}, + chunkSize: 128, + }, + { + retEvalType: types.ETDatetime, + childrenTypes: []types.EvalType{types.ETString, types.ETReal, types.ETString}, + geners: []dataGenerator{ + &dateStrGener{NullRation: 0.2}, + &defaultGener{eType: types.ETReal, nullRation: 0.2}, + }, + constants: []*Constant{nil, nil, {Value: types.NewStringDatum("WEEK"), RetType: types.NewFieldType(mysql.TypeString)}}, + chunkSize: 128, + }, + { + retEvalType: types.ETDatetime, + childrenTypes: []types.EvalType{types.ETString, types.ETReal, types.ETString}, + geners: []dataGenerator{ + &dateStrGener{NullRation: 0.2}, + &defaultGener{eType: types.ETReal, nullRation: 0.2}, + }, + constants: []*Constant{nil, nil, {Value: types.NewStringDatum("MONTH"), RetType: types.NewFieldType(mysql.TypeString)}}, + chunkSize: 128, + }, + { + retEvalType: types.ETDatetime, + childrenTypes: []types.EvalType{types.ETString, types.ETReal, types.ETString}, + geners: []dataGenerator{ + &dateStrGener{NullRation: 0.2}, + &defaultGener{eType: types.ETReal, nullRation: 0.2}, + }, + constants: []*Constant{nil, nil, {Value: types.NewStringDatum("QUARTER"), RetType: types.NewFieldType(mysql.TypeString)}}, + chunkSize: 128, + }, + { + retEvalType: types.ETDatetime, + childrenTypes: []types.EvalType{types.ETString, types.ETReal, types.ETString}, + geners: []dataGenerator{ + &dateStrGener{NullRation: 0.2}, + &defaultGener{eType: types.ETReal, nullRation: 0.2}, + }, + constants: []*Constant{nil, nil, {Value: types.NewStringDatum("YEAR"), RetType: types.NewFieldType(mysql.TypeString)}}, + chunkSize: 128, + }, + { + retEvalType: types.ETDatetime, + childrenTypes: []types.EvalType{types.ETString, types.ETReal, types.ETString}, + geners: []dataGenerator{ + &dateStrGener{NullRation: 0.2}, + &defaultGener{eType: types.ETReal, nullRation: 0.2}, + }, + constants: []*Constant{nil, nil, {Value: types.NewStringDatum("SECOND_MICROSECOND"), RetType: types.NewFieldType(mysql.TypeString)}}, + chunkSize: 128, + }, + { + retEvalType: types.ETDatetime, + childrenTypes: []types.EvalType{types.ETString, types.ETReal, types.ETString}, + geners: []dataGenerator{ + &dateStrGener{NullRation: 0.2}, + &defaultGener{eType: types.ETReal, nullRation: 0.2}, + }, + constants: []*Constant{nil, nil, {Value: types.NewStringDatum("MINUTE_MICROSECOND"), RetType: types.NewFieldType(mysql.TypeString)}}, + chunkSize: 128, + }, + { + retEvalType: types.ETDatetime, + childrenTypes: []types.EvalType{types.ETString, types.ETReal, types.ETString}, + geners: []dataGenerator{ + &dateStrGener{NullRation: 0.2}, + &defaultGener{eType: types.ETReal, nullRation: 0.2}, + }, + constants: []*Constant{nil, nil, {Value: types.NewStringDatum("MINUTE_SECOND"), RetType: types.NewFieldType(mysql.TypeString)}}, + chunkSize: 128, + }, + { + retEvalType: types.ETDatetime, + childrenTypes: []types.EvalType{types.ETString, types.ETReal, types.ETString}, + geners: []dataGenerator{ + &dateStrGener{NullRation: 0.2}, + &defaultGener{eType: types.ETReal, nullRation: 0.2}, + }, + constants: []*Constant{nil, nil, {Value: types.NewStringDatum("HOUR_MICROSECOND"), RetType: types.NewFieldType(mysql.TypeString)}}, + chunkSize: 128, + }, + { + retEvalType: types.ETDatetime, + childrenTypes: []types.EvalType{types.ETString, types.ETReal, types.ETString}, + geners: []dataGenerator{ + &dateStrGener{NullRation: 0.2}, + &defaultGener{eType: types.ETReal, nullRation: 0.2}, + }, + constants: []*Constant{nil, nil, {Value: types.NewStringDatum("HOUR_SECOND"), RetType: types.NewFieldType(mysql.TypeString)}}, + chunkSize: 128, + }, + { + retEvalType: types.ETDatetime, + childrenTypes: []types.EvalType{types.ETString, types.ETReal, types.ETString}, + geners: []dataGenerator{ + &dateStrGener{NullRation: 0.2}, + &defaultGener{eType: types.ETReal, nullRation: 0.2}, + }, + constants: []*Constant{nil, nil, {Value: types.NewStringDatum("HOUR_MINUTE"), RetType: types.NewFieldType(mysql.TypeString)}}, + chunkSize: 128, + }, + { + retEvalType: types.ETDatetime, + childrenTypes: []types.EvalType{types.ETString, types.ETReal, types.ETString}, + geners: []dataGenerator{ + &dateStrGener{NullRation: 0.2}, + &defaultGener{eType: types.ETReal, nullRation: 0.2}, + }, + constants: []*Constant{nil, nil, {Value: types.NewStringDatum("DAY_MICROSECOND"), RetType: types.NewFieldType(mysql.TypeString)}}, + chunkSize: 128, + }, + { + retEvalType: types.ETDatetime, + childrenTypes: []types.EvalType{types.ETString, types.ETReal, types.ETString}, + geners: []dataGenerator{ + &dateStrGener{NullRation: 0.2}, + &defaultGener{eType: types.ETReal, nullRation: 0.2}, + }, + constants: []*Constant{nil, nil, {Value: types.NewStringDatum("DAY_SECOND"), RetType: types.NewFieldType(mysql.TypeString)}}, + chunkSize: 128, + }, + { + retEvalType: types.ETDatetime, + childrenTypes: []types.EvalType{types.ETString, types.ETReal, types.ETString}, + geners: []dataGenerator{ + &dateStrGener{NullRation: 0.2}, + &defaultGener{eType: types.ETReal, nullRation: 0.2}, + }, + constants: []*Constant{nil, nil, {Value: types.NewStringDatum("DAY_MINUTE"), RetType: types.NewFieldType(mysql.TypeString)}}, + chunkSize: 128, + }, + { + retEvalType: types.ETDatetime, + childrenTypes: []types.EvalType{types.ETString, types.ETReal, types.ETString}, + geners: []dataGenerator{ + &dateStrGener{NullRation: 0.2}, + &defaultGener{eType: types.ETReal, nullRation: 0.2}, + }, + constants: []*Constant{nil, nil, {Value: types.NewStringDatum("DAY_HOUR"), RetType: types.NewFieldType(mysql.TypeString)}}, + chunkSize: 128, + }, + { + retEvalType: types.ETDatetime, + childrenTypes: []types.EvalType{types.ETString, types.ETReal, types.ETString}, + geners: []dataGenerator{ + &dateStrGener{NullRation: 0.2}, + &defaultGener{eType: types.ETReal, nullRation: 0.2}, + }, + constants: []*Constant{nil, nil, {Value: types.NewStringDatum("YEAR_MONTH"), RetType: types.NewFieldType(mysql.TypeString)}}, + chunkSize: 128, + }, + // builtinAddDateStringDecimalSig + { + retEvalType: types.ETDatetime, + childrenTypes: []types.EvalType{types.ETString, types.ETDecimal, types.ETString}, + geners: []dataGenerator{ + &dateStrGener{NullRation: 0.2}, + &defaultGener{eType: types.ETDecimal, nullRation: 0.2}, + }, + constants: []*Constant{nil, nil, {Value: types.NewStringDatum("MICROSECOND"), RetType: types.NewFieldType(mysql.TypeString)}}, + chunkSize: 128, + }, + { + retEvalType: types.ETDatetime, + childrenTypes: []types.EvalType{types.ETString, types.ETDecimal, types.ETString}, + geners: []dataGenerator{ + &dateStrGener{NullRation: 0.2}, + &defaultGener{eType: types.ETDecimal, nullRation: 0.2}, + }, + constants: []*Constant{nil, nil, {Value: types.NewStringDatum("SECOND"), RetType: types.NewFieldType(mysql.TypeString)}}, + chunkSize: 128, + }, + { + retEvalType: types.ETDatetime, + childrenTypes: []types.EvalType{types.ETString, types.ETDecimal, types.ETString}, + geners: []dataGenerator{ + &dateStrGener{NullRation: 0.2}, + &defaultGener{eType: types.ETDecimal, nullRation: 0.2}, + }, + constants: []*Constant{nil, nil, {Value: types.NewStringDatum("MINUTE"), RetType: types.NewFieldType(mysql.TypeString)}}, + chunkSize: 128, + }, + { + retEvalType: types.ETDatetime, + childrenTypes: []types.EvalType{types.ETString, types.ETDecimal, types.ETString}, + geners: []dataGenerator{ + &dateStrGener{NullRation: 0.2}, + &defaultGener{eType: types.ETDecimal, nullRation: 0.2}, + }, + constants: []*Constant{nil, nil, {Value: types.NewStringDatum("HOUR"), RetType: types.NewFieldType(mysql.TypeString)}}, + chunkSize: 128, + }, + { + retEvalType: types.ETDatetime, + childrenTypes: []types.EvalType{types.ETString, types.ETDecimal, types.ETString}, + geners: []dataGenerator{ + &dateStrGener{NullRation: 0.2}, + &defaultGener{eType: types.ETDecimal, nullRation: 0.2}, + }, + constants: []*Constant{nil, nil, {Value: types.NewStringDatum("DAY"), RetType: types.NewFieldType(mysql.TypeString)}}, + chunkSize: 128, + }, + { + retEvalType: types.ETDatetime, + childrenTypes: []types.EvalType{types.ETString, types.ETDecimal, types.ETString}, + geners: []dataGenerator{ + &dateStrGener{NullRation: 0.2}, + &defaultGener{eType: types.ETDecimal, nullRation: 0.2}, + }, + constants: []*Constant{nil, nil, {Value: types.NewStringDatum("WEEK"), RetType: types.NewFieldType(mysql.TypeString)}}, + chunkSize: 128, + }, + { + retEvalType: types.ETDatetime, + childrenTypes: []types.EvalType{types.ETString, types.ETDecimal, types.ETString}, + geners: []dataGenerator{ + &dateStrGener{NullRation: 0.2}, + &defaultGener{eType: types.ETDecimal, nullRation: 0.2}, + }, + constants: []*Constant{nil, nil, {Value: types.NewStringDatum("MONTH"), RetType: types.NewFieldType(mysql.TypeString)}}, + chunkSize: 128, + }, + { + retEvalType: types.ETDatetime, + childrenTypes: []types.EvalType{types.ETString, types.ETDecimal, types.ETString}, + geners: []dataGenerator{ + &dateStrGener{NullRation: 0.2}, + &defaultGener{eType: types.ETDecimal, nullRation: 0.2}, + }, + constants: []*Constant{nil, nil, {Value: types.NewStringDatum("QUARTER"), RetType: types.NewFieldType(mysql.TypeString)}}, + chunkSize: 128, + }, + { + retEvalType: types.ETDatetime, + childrenTypes: []types.EvalType{types.ETString, types.ETDecimal, types.ETString}, + geners: []dataGenerator{ + &dateStrGener{NullRation: 0.2}, + &defaultGener{eType: types.ETDecimal, nullRation: 0.2}, + }, + constants: []*Constant{nil, nil, {Value: types.NewStringDatum("YEAR"), RetType: types.NewFieldType(mysql.TypeString)}}, + chunkSize: 128, + }, + { + retEvalType: types.ETDatetime, + childrenTypes: []types.EvalType{types.ETString, types.ETDecimal, types.ETString}, + geners: []dataGenerator{ + &dateStrGener{NullRation: 0.2}, + &defaultGener{eType: types.ETDecimal, nullRation: 0.2}, + }, + constants: []*Constant{nil, nil, {Value: types.NewStringDatum("SECOND_MICROSECOND"), RetType: types.NewFieldType(mysql.TypeString)}}, + chunkSize: 128, + }, + { + retEvalType: types.ETDatetime, + childrenTypes: []types.EvalType{types.ETString, types.ETDecimal, types.ETString}, + geners: []dataGenerator{ + &dateStrGener{NullRation: 0.2}, + &defaultGener{eType: types.ETDecimal, nullRation: 0.2}, + }, + constants: []*Constant{nil, nil, {Value: types.NewStringDatum("MINUTE_MICROSECOND"), RetType: types.NewFieldType(mysql.TypeString)}}, + chunkSize: 128, + }, + { + retEvalType: types.ETDatetime, + childrenTypes: []types.EvalType{types.ETString, types.ETDecimal, types.ETString}, + geners: []dataGenerator{ + &dateStrGener{NullRation: 0.2}, + &defaultGener{eType: types.ETDecimal, nullRation: 0.2}, + }, + constants: []*Constant{nil, nil, {Value: types.NewStringDatum("MINUTE_SECOND"), RetType: types.NewFieldType(mysql.TypeString)}}, + chunkSize: 128, + }, + { + retEvalType: types.ETDatetime, + childrenTypes: []types.EvalType{types.ETString, types.ETDecimal, types.ETString}, + geners: []dataGenerator{ + &dateStrGener{NullRation: 0.2}, + &defaultGener{eType: types.ETDecimal, nullRation: 0.2}, + }, + constants: []*Constant{nil, nil, {Value: types.NewStringDatum("HOUR_MICROSECOND"), RetType: types.NewFieldType(mysql.TypeString)}}, + chunkSize: 128, + }, + { + retEvalType: types.ETDatetime, + childrenTypes: []types.EvalType{types.ETString, types.ETDecimal, types.ETString}, + geners: []dataGenerator{ + &dateStrGener{NullRation: 0.2}, + &defaultGener{eType: types.ETDecimal, nullRation: 0.2}, + }, + constants: []*Constant{nil, nil, {Value: types.NewStringDatum("HOUR_SECOND"), RetType: types.NewFieldType(mysql.TypeString)}}, + chunkSize: 128, + }, + { + retEvalType: types.ETDatetime, + childrenTypes: []types.EvalType{types.ETString, types.ETDecimal, types.ETString}, + geners: []dataGenerator{ + &dateStrGener{NullRation: 0.2}, + &defaultGener{eType: types.ETDecimal, nullRation: 0.2}, + }, + constants: []*Constant{nil, nil, {Value: types.NewStringDatum("HOUR_MINUTE"), RetType: types.NewFieldType(mysql.TypeString)}}, + chunkSize: 128, + }, + { + retEvalType: types.ETDatetime, + childrenTypes: []types.EvalType{types.ETString, types.ETDecimal, types.ETString}, + geners: []dataGenerator{ + &dateStrGener{NullRation: 0.2}, + &defaultGener{eType: types.ETDecimal, nullRation: 0.2}, + }, + constants: []*Constant{nil, nil, {Value: types.NewStringDatum("DAY_MICROSECOND"), RetType: types.NewFieldType(mysql.TypeString)}}, + chunkSize: 128, + }, + { + retEvalType: types.ETDatetime, + childrenTypes: []types.EvalType{types.ETString, types.ETDecimal, types.ETString}, + geners: []dataGenerator{ + &dateStrGener{NullRation: 0.2}, + &defaultGener{eType: types.ETDecimal, nullRation: 0.2}, + }, + constants: []*Constant{nil, nil, {Value: types.NewStringDatum("DAY_SECOND"), RetType: types.NewFieldType(mysql.TypeString)}}, + chunkSize: 128, + }, + { + retEvalType: types.ETDatetime, + childrenTypes: []types.EvalType{types.ETString, types.ETDecimal, types.ETString}, + geners: []dataGenerator{ + &dateStrGener{NullRation: 0.2}, + &defaultGener{eType: types.ETDecimal, nullRation: 0.2}, + }, + constants: []*Constant{nil, nil, {Value: types.NewStringDatum("DAY_MINUTE"), RetType: types.NewFieldType(mysql.TypeString)}}, + chunkSize: 128, + }, + { + retEvalType: types.ETDatetime, + childrenTypes: []types.EvalType{types.ETString, types.ETDecimal, types.ETString}, + geners: []dataGenerator{ + &dateStrGener{NullRation: 0.2}, + &defaultGener{eType: types.ETDecimal, nullRation: 0.2}, + }, + constants: []*Constant{nil, nil, {Value: types.NewStringDatum("DAY_HOUR"), RetType: types.NewFieldType(mysql.TypeString)}}, + chunkSize: 128, + }, + { + retEvalType: types.ETDatetime, + childrenTypes: []types.EvalType{types.ETString, types.ETDecimal, types.ETString}, + geners: []dataGenerator{ + &dateStrGener{NullRation: 0.2}, + &defaultGener{eType: types.ETDecimal, nullRation: 0.2}, + }, + constants: []*Constant{nil, nil, {Value: types.NewStringDatum("YEAR_MONTH"), RetType: types.NewFieldType(mysql.TypeString)}}, + chunkSize: 128, + }, + // builtinAddDateIntStringSig + { + retEvalType: types.ETDatetime, + childrenTypes: []types.EvalType{types.ETInt, types.ETString, types.ETString}, + geners: []dataGenerator{ + &dateTimeIntGener{nullRation: 0.2}, + &numStrGener{rangeInt64Gener{math.MinInt32 + 1, math.MaxInt32}}, + }, + constants: []*Constant{nil, nil, {Value: types.NewStringDatum("MICROSECOND"), RetType: types.NewFieldType(mysql.TypeString)}}, + chunkSize: 128, + }, + { + retEvalType: types.ETDatetime, + childrenTypes: []types.EvalType{types.ETInt, types.ETString, types.ETString}, + geners: []dataGenerator{ + &dateTimeIntGener{nullRation: 0.2}, + &numStrGener{rangeInt64Gener{math.MinInt32 + 1, math.MaxInt32}}, + }, + constants: []*Constant{nil, nil, {Value: types.NewStringDatum("SECOND"), RetType: types.NewFieldType(mysql.TypeString)}}, + chunkSize: 128, + }, + { + retEvalType: types.ETDatetime, + childrenTypes: []types.EvalType{types.ETInt, types.ETString, types.ETString}, + geners: []dataGenerator{ + &dateTimeIntGener{nullRation: 0.2}, + &numStrGener{rangeInt64Gener{math.MinInt32 + 1, math.MaxInt32}}, + }, + constants: []*Constant{nil, nil, {Value: types.NewStringDatum("MINUTE"), RetType: types.NewFieldType(mysql.TypeString)}}, + chunkSize: 128, + }, + { + retEvalType: types.ETDatetime, + childrenTypes: []types.EvalType{types.ETInt, types.ETString, types.ETString}, + geners: []dataGenerator{ + &dateTimeIntGener{nullRation: 0.2}, + &numStrGener{rangeInt64Gener{math.MinInt32 + 1, math.MaxInt32}}, + }, + constants: []*Constant{nil, nil, {Value: types.NewStringDatum("HOUR"), RetType: types.NewFieldType(mysql.TypeString)}}, + chunkSize: 128, + }, + { + retEvalType: types.ETDatetime, + childrenTypes: []types.EvalType{types.ETInt, types.ETString, types.ETString}, + geners: []dataGenerator{ + &dateTimeIntGener{nullRation: 0.2}, + &numStrGener{rangeInt64Gener{math.MinInt32 + 1, math.MaxInt32}}, + }, + constants: []*Constant{nil, nil, {Value: types.NewStringDatum("DAY"), RetType: types.NewFieldType(mysql.TypeString)}}, + chunkSize: 128, + }, + { + retEvalType: types.ETDatetime, + childrenTypes: []types.EvalType{types.ETInt, types.ETString, types.ETString}, + geners: []dataGenerator{ + &dateTimeIntGener{nullRation: 0.2}, + &numStrGener{rangeInt64Gener{math.MinInt32 + 1, math.MaxInt32}}, + }, + constants: []*Constant{nil, nil, {Value: types.NewStringDatum("WEEK"), RetType: types.NewFieldType(mysql.TypeString)}}, + chunkSize: 128, + }, + { + retEvalType: types.ETDatetime, + childrenTypes: []types.EvalType{types.ETInt, types.ETString, types.ETString}, + geners: []dataGenerator{ + &dateTimeIntGener{nullRation: 0.2}, + &numStrGener{rangeInt64Gener{math.MinInt32 + 1, math.MaxInt32}}, + }, + constants: []*Constant{nil, nil, {Value: types.NewStringDatum("MONTH"), RetType: types.NewFieldType(mysql.TypeString)}}, + chunkSize: 128, + }, + { + retEvalType: types.ETDatetime, + childrenTypes: []types.EvalType{types.ETInt, types.ETString, types.ETString}, + geners: []dataGenerator{ + &dateTimeIntGener{nullRation: 0.2}, + &numStrGener{rangeInt64Gener{math.MinInt32 + 1, math.MaxInt32}}, + }, + constants: []*Constant{nil, nil, {Value: types.NewStringDatum("QUARTER"), RetType: types.NewFieldType(mysql.TypeString)}}, + chunkSize: 128, + }, + { + retEvalType: types.ETDatetime, + childrenTypes: []types.EvalType{types.ETInt, types.ETString, types.ETString}, + geners: []dataGenerator{ + &dateTimeIntGener{nullRation: 0.2}, + &numStrGener{rangeInt64Gener{math.MinInt32 + 1, math.MaxInt32}}, + }, + constants: []*Constant{nil, nil, {Value: types.NewStringDatum("YEAR"), RetType: types.NewFieldType(mysql.TypeString)}}, + chunkSize: 128, + }, + { + retEvalType: types.ETDatetime, + childrenTypes: []types.EvalType{types.ETInt, types.ETString, types.ETString}, + geners: []dataGenerator{ + &dateTimeIntGener{nullRation: 0.2}, + &numStrGener{rangeInt64Gener{math.MinInt32 + 1, math.MaxInt32}}, + }, + constants: []*Constant{nil, nil, {Value: types.NewStringDatum("SECOND_MICROSECOND"), RetType: types.NewFieldType(mysql.TypeString)}}, + chunkSize: 128, + }, + { + retEvalType: types.ETDatetime, + childrenTypes: []types.EvalType{types.ETInt, types.ETString, types.ETString}, + geners: []dataGenerator{ + &dateTimeIntGener{nullRation: 0.2}, + &numStrGener{rangeInt64Gener{math.MinInt32 + 1, math.MaxInt32}}, + }, + constants: []*Constant{nil, nil, {Value: types.NewStringDatum("MINUTE_MICROSECOND"), RetType: types.NewFieldType(mysql.TypeString)}}, + chunkSize: 128, + }, + { + retEvalType: types.ETDatetime, + childrenTypes: []types.EvalType{types.ETInt, types.ETString, types.ETString}, + geners: []dataGenerator{ + &dateTimeIntGener{nullRation: 0.2}, + &numStrGener{rangeInt64Gener{math.MinInt32 + 1, math.MaxInt32}}, + }, + constants: []*Constant{nil, nil, {Value: types.NewStringDatum("MINUTE_SECOND"), RetType: types.NewFieldType(mysql.TypeString)}}, + chunkSize: 128, + }, + { + retEvalType: types.ETDatetime, + childrenTypes: []types.EvalType{types.ETInt, types.ETString, types.ETString}, + geners: []dataGenerator{ + &dateTimeIntGener{nullRation: 0.2}, + &numStrGener{rangeInt64Gener{math.MinInt32 + 1, math.MaxInt32}}, + }, + constants: []*Constant{nil, nil, {Value: types.NewStringDatum("HOUR_MICROSECOND"), RetType: types.NewFieldType(mysql.TypeString)}}, + chunkSize: 128, + }, + { + retEvalType: types.ETDatetime, + childrenTypes: []types.EvalType{types.ETInt, types.ETString, types.ETString}, + geners: []dataGenerator{ + &dateTimeIntGener{nullRation: 0.2}, + &numStrGener{rangeInt64Gener{math.MinInt32 + 1, math.MaxInt32}}, + }, + constants: []*Constant{nil, nil, {Value: types.NewStringDatum("HOUR_SECOND"), RetType: types.NewFieldType(mysql.TypeString)}}, + chunkSize: 128, + }, + { + retEvalType: types.ETDatetime, + childrenTypes: []types.EvalType{types.ETInt, types.ETString, types.ETString}, + geners: []dataGenerator{ + &dateTimeIntGener{nullRation: 0.2}, + &numStrGener{rangeInt64Gener{math.MinInt32 + 1, math.MaxInt32}}, + }, + constants: []*Constant{nil, nil, {Value: types.NewStringDatum("HOUR_MINUTE"), RetType: types.NewFieldType(mysql.TypeString)}}, + chunkSize: 128, + }, + { + retEvalType: types.ETDatetime, + childrenTypes: []types.EvalType{types.ETInt, types.ETString, types.ETString}, + geners: []dataGenerator{ + &dateTimeIntGener{nullRation: 0.2}, + &numStrGener{rangeInt64Gener{math.MinInt32 + 1, math.MaxInt32}}, + }, + constants: []*Constant{nil, nil, {Value: types.NewStringDatum("DAY_MICROSECOND"), RetType: types.NewFieldType(mysql.TypeString)}}, + chunkSize: 128, + }, + { + retEvalType: types.ETDatetime, + childrenTypes: []types.EvalType{types.ETInt, types.ETString, types.ETString}, + geners: []dataGenerator{ + &dateTimeIntGener{nullRation: 0.2}, + &numStrGener{rangeInt64Gener{math.MinInt32 + 1, math.MaxInt32}}, + }, + constants: []*Constant{nil, nil, {Value: types.NewStringDatum("DAY_SECOND"), RetType: types.NewFieldType(mysql.TypeString)}}, + chunkSize: 128, + }, + { + retEvalType: types.ETDatetime, + childrenTypes: []types.EvalType{types.ETInt, types.ETString, types.ETString}, + geners: []dataGenerator{ + &dateTimeIntGener{nullRation: 0.2}, + &numStrGener{rangeInt64Gener{math.MinInt32 + 1, math.MaxInt32}}, + }, + constants: []*Constant{nil, nil, {Value: types.NewStringDatum("DAY_MINUTE"), RetType: types.NewFieldType(mysql.TypeString)}}, + chunkSize: 128, + }, + { + retEvalType: types.ETDatetime, + childrenTypes: []types.EvalType{types.ETInt, types.ETString, types.ETString}, + geners: []dataGenerator{ + &dateTimeIntGener{nullRation: 0.2}, + &numStrGener{rangeInt64Gener{math.MinInt32 + 1, math.MaxInt32}}, + }, + constants: []*Constant{nil, nil, {Value: types.NewStringDatum("DAY_HOUR"), RetType: types.NewFieldType(mysql.TypeString)}}, + chunkSize: 128, + }, + { + retEvalType: types.ETDatetime, + childrenTypes: []types.EvalType{types.ETInt, types.ETString, types.ETString}, + geners: []dataGenerator{ + &dateTimeIntGener{nullRation: 0.2}, + &numStrGener{rangeInt64Gener{math.MinInt32 + 1, math.MaxInt32}}, + }, + constants: []*Constant{nil, nil, {Value: types.NewStringDatum("YEAR_MONTH"), RetType: types.NewFieldType(mysql.TypeString)}}, + chunkSize: 128, + }, + // builtinAddDateIntIntSig + { + retEvalType: types.ETDatetime, + childrenTypes: []types.EvalType{types.ETInt, types.ETInt, types.ETString}, + geners: []dataGenerator{ + &dateTimeIntGener{nullRation: 0.2}, + &defaultGener{eType: types.ETInt, nullRation: 0.2}, + }, + constants: []*Constant{nil, nil, {Value: types.NewStringDatum("MICROSECOND"), RetType: types.NewFieldType(mysql.TypeString)}}, + chunkSize: 128, + }, + { + retEvalType: types.ETDatetime, + childrenTypes: []types.EvalType{types.ETInt, types.ETInt, types.ETString}, + geners: []dataGenerator{ + &dateTimeIntGener{nullRation: 0.2}, + &defaultGener{eType: types.ETInt, nullRation: 0.2}, + }, + constants: []*Constant{nil, nil, {Value: types.NewStringDatum("SECOND"), RetType: types.NewFieldType(mysql.TypeString)}}, + chunkSize: 128, + }, + { + retEvalType: types.ETDatetime, + childrenTypes: []types.EvalType{types.ETInt, types.ETInt, types.ETString}, + geners: []dataGenerator{ + &dateTimeIntGener{nullRation: 0.2}, + &defaultGener{eType: types.ETInt, nullRation: 0.2}, + }, + constants: []*Constant{nil, nil, {Value: types.NewStringDatum("MINUTE"), RetType: types.NewFieldType(mysql.TypeString)}}, + chunkSize: 128, + }, + { + retEvalType: types.ETDatetime, + childrenTypes: []types.EvalType{types.ETInt, types.ETInt, types.ETString}, + geners: []dataGenerator{ + &dateTimeIntGener{nullRation: 0.2}, + &defaultGener{eType: types.ETInt, nullRation: 0.2}, + }, + constants: []*Constant{nil, nil, {Value: types.NewStringDatum("HOUR"), RetType: types.NewFieldType(mysql.TypeString)}}, + chunkSize: 128, + }, + { + retEvalType: types.ETDatetime, + childrenTypes: []types.EvalType{types.ETInt, types.ETInt, types.ETString}, + geners: []dataGenerator{ + &dateTimeIntGener{nullRation: 0.2}, + &defaultGener{eType: types.ETInt, nullRation: 0.2}, + }, + constants: []*Constant{nil, nil, {Value: types.NewStringDatum("DAY"), RetType: types.NewFieldType(mysql.TypeString)}}, + chunkSize: 128, + }, + { + retEvalType: types.ETDatetime, + childrenTypes: []types.EvalType{types.ETInt, types.ETInt, types.ETString}, + geners: []dataGenerator{ + &dateTimeIntGener{nullRation: 0.2}, + &defaultGener{eType: types.ETInt, nullRation: 0.2}, + }, + constants: []*Constant{nil, nil, {Value: types.NewStringDatum("WEEK"), RetType: types.NewFieldType(mysql.TypeString)}}, + chunkSize: 128, + }, + { + retEvalType: types.ETDatetime, + childrenTypes: []types.EvalType{types.ETInt, types.ETInt, types.ETString}, + geners: []dataGenerator{ + &dateTimeIntGener{nullRation: 0.2}, + &defaultGener{eType: types.ETInt, nullRation: 0.2}, + }, + constants: []*Constant{nil, nil, {Value: types.NewStringDatum("MONTH"), RetType: types.NewFieldType(mysql.TypeString)}}, + chunkSize: 128, + }, + { + retEvalType: types.ETDatetime, + childrenTypes: []types.EvalType{types.ETInt, types.ETInt, types.ETString}, + geners: []dataGenerator{ + &dateTimeIntGener{nullRation: 0.2}, + &defaultGener{eType: types.ETInt, nullRation: 0.2}, + }, + constants: []*Constant{nil, nil, {Value: types.NewStringDatum("QUARTER"), RetType: types.NewFieldType(mysql.TypeString)}}, + chunkSize: 128, + }, + { + retEvalType: types.ETDatetime, + childrenTypes: []types.EvalType{types.ETInt, types.ETInt, types.ETString}, + geners: []dataGenerator{ + &dateTimeIntGener{nullRation: 0.2}, + &defaultGener{eType: types.ETInt, nullRation: 0.2}, + }, + constants: []*Constant{nil, nil, {Value: types.NewStringDatum("YEAR"), RetType: types.NewFieldType(mysql.TypeString)}}, + chunkSize: 128, + }, + { + retEvalType: types.ETDatetime, + childrenTypes: []types.EvalType{types.ETInt, types.ETInt, types.ETString}, + geners: []dataGenerator{ + &dateTimeIntGener{nullRation: 0.2}, + &defaultGener{eType: types.ETInt, nullRation: 0.2}, + }, + constants: []*Constant{nil, nil, {Value: types.NewStringDatum("SECOND_MICROSECOND"), RetType: types.NewFieldType(mysql.TypeString)}}, + chunkSize: 128, + }, + { + retEvalType: types.ETDatetime, + childrenTypes: []types.EvalType{types.ETInt, types.ETInt, types.ETString}, + geners: []dataGenerator{ + &dateTimeIntGener{nullRation: 0.2}, + &defaultGener{eType: types.ETInt, nullRation: 0.2}, + }, + constants: []*Constant{nil, nil, {Value: types.NewStringDatum("MINUTE_MICROSECOND"), RetType: types.NewFieldType(mysql.TypeString)}}, + chunkSize: 128, + }, + { + retEvalType: types.ETDatetime, + childrenTypes: []types.EvalType{types.ETInt, types.ETInt, types.ETString}, + geners: []dataGenerator{ + &dateTimeIntGener{nullRation: 0.2}, + &defaultGener{eType: types.ETInt, nullRation: 0.2}, + }, + constants: []*Constant{nil, nil, {Value: types.NewStringDatum("MINUTE_SECOND"), RetType: types.NewFieldType(mysql.TypeString)}}, + chunkSize: 128, + }, + { + retEvalType: types.ETDatetime, + childrenTypes: []types.EvalType{types.ETInt, types.ETInt, types.ETString}, + geners: []dataGenerator{ + &dateTimeIntGener{nullRation: 0.2}, + &defaultGener{eType: types.ETInt, nullRation: 0.2}, + }, + constants: []*Constant{nil, nil, {Value: types.NewStringDatum("HOUR_MICROSECOND"), RetType: types.NewFieldType(mysql.TypeString)}}, + chunkSize: 128, + }, + { + retEvalType: types.ETDatetime, + childrenTypes: []types.EvalType{types.ETInt, types.ETInt, types.ETString}, + geners: []dataGenerator{ + &dateTimeIntGener{nullRation: 0.2}, + &defaultGener{eType: types.ETInt, nullRation: 0.2}, + }, + constants: []*Constant{nil, nil, {Value: types.NewStringDatum("HOUR_SECOND"), RetType: types.NewFieldType(mysql.TypeString)}}, + chunkSize: 128, + }, + { + retEvalType: types.ETDatetime, + childrenTypes: []types.EvalType{types.ETInt, types.ETInt, types.ETString}, + geners: []dataGenerator{ + &dateTimeIntGener{nullRation: 0.2}, + &defaultGener{eType: types.ETInt, nullRation: 0.2}, + }, + constants: []*Constant{nil, nil, {Value: types.NewStringDatum("HOUR_MINUTE"), RetType: types.NewFieldType(mysql.TypeString)}}, + chunkSize: 128, + }, + { + retEvalType: types.ETDatetime, + childrenTypes: []types.EvalType{types.ETInt, types.ETInt, types.ETString}, + geners: []dataGenerator{ + &dateTimeIntGener{nullRation: 0.2}, + &defaultGener{eType: types.ETInt, nullRation: 0.2}, + }, + constants: []*Constant{nil, nil, {Value: types.NewStringDatum("DAY_MICROSECOND"), RetType: types.NewFieldType(mysql.TypeString)}}, + chunkSize: 128, + }, + { + retEvalType: types.ETDatetime, + childrenTypes: []types.EvalType{types.ETInt, types.ETInt, types.ETString}, + geners: []dataGenerator{ + &dateTimeIntGener{nullRation: 0.2}, + &defaultGener{eType: types.ETInt, nullRation: 0.2}, + }, + constants: []*Constant{nil, nil, {Value: types.NewStringDatum("DAY_SECOND"), RetType: types.NewFieldType(mysql.TypeString)}}, + chunkSize: 128, + }, + { + retEvalType: types.ETDatetime, + childrenTypes: []types.EvalType{types.ETInt, types.ETInt, types.ETString}, + geners: []dataGenerator{ + &dateTimeIntGener{nullRation: 0.2}, + &defaultGener{eType: types.ETInt, nullRation: 0.2}, + }, + constants: []*Constant{nil, nil, {Value: types.NewStringDatum("DAY_MINUTE"), RetType: types.NewFieldType(mysql.TypeString)}}, + chunkSize: 128, + }, + { + retEvalType: types.ETDatetime, + childrenTypes: []types.EvalType{types.ETInt, types.ETInt, types.ETString}, + geners: []dataGenerator{ + &dateTimeIntGener{nullRation: 0.2}, + &defaultGener{eType: types.ETInt, nullRation: 0.2}, + }, + constants: []*Constant{nil, nil, {Value: types.NewStringDatum("DAY_HOUR"), RetType: types.NewFieldType(mysql.TypeString)}}, + chunkSize: 128, + }, + { + retEvalType: types.ETDatetime, + childrenTypes: []types.EvalType{types.ETInt, types.ETInt, types.ETString}, + geners: []dataGenerator{ + &dateTimeIntGener{nullRation: 0.2}, + &defaultGener{eType: types.ETInt, nullRation: 0.2}, + }, + constants: []*Constant{nil, nil, {Value: types.NewStringDatum("YEAR_MONTH"), RetType: types.NewFieldType(mysql.TypeString)}}, + chunkSize: 128, + }, + // builtinAddDateIntRealSig + { + retEvalType: types.ETDatetime, + childrenTypes: []types.EvalType{types.ETInt, types.ETReal, types.ETString}, + geners: []dataGenerator{ + &dateTimeIntGener{nullRation: 0.2}, + &defaultGener{eType: types.ETReal, nullRation: 0.2}, + }, + constants: []*Constant{nil, nil, {Value: types.NewStringDatum("MICROSECOND"), RetType: types.NewFieldType(mysql.TypeString)}}, + chunkSize: 128, + }, + { + retEvalType: types.ETDatetime, + childrenTypes: []types.EvalType{types.ETInt, types.ETReal, types.ETString}, + geners: []dataGenerator{ + &dateTimeIntGener{nullRation: 0.2}, + &defaultGener{eType: types.ETReal, nullRation: 0.2}, + }, + constants: []*Constant{nil, nil, {Value: types.NewStringDatum("SECOND"), RetType: types.NewFieldType(mysql.TypeString)}}, + chunkSize: 128, + }, + { + retEvalType: types.ETDatetime, + childrenTypes: []types.EvalType{types.ETInt, types.ETReal, types.ETString}, + geners: []dataGenerator{ + &dateTimeIntGener{nullRation: 0.2}, + &defaultGener{eType: types.ETReal, nullRation: 0.2}, + }, + constants: []*Constant{nil, nil, {Value: types.NewStringDatum("MINUTE"), RetType: types.NewFieldType(mysql.TypeString)}}, + chunkSize: 128, + }, + { + retEvalType: types.ETDatetime, + childrenTypes: []types.EvalType{types.ETInt, types.ETReal, types.ETString}, + geners: []dataGenerator{ + &dateTimeIntGener{nullRation: 0.2}, + &defaultGener{eType: types.ETReal, nullRation: 0.2}, + }, + constants: []*Constant{nil, nil, {Value: types.NewStringDatum("HOUR"), RetType: types.NewFieldType(mysql.TypeString)}}, + chunkSize: 128, + }, + { + retEvalType: types.ETDatetime, + childrenTypes: []types.EvalType{types.ETInt, types.ETReal, types.ETString}, + geners: []dataGenerator{ + &dateTimeIntGener{nullRation: 0.2}, + &defaultGener{eType: types.ETReal, nullRation: 0.2}, + }, + constants: []*Constant{nil, nil, {Value: types.NewStringDatum("DAY"), RetType: types.NewFieldType(mysql.TypeString)}}, + chunkSize: 128, + }, + { + retEvalType: types.ETDatetime, + childrenTypes: []types.EvalType{types.ETInt, types.ETReal, types.ETString}, + geners: []dataGenerator{ + &dateTimeIntGener{nullRation: 0.2}, + &defaultGener{eType: types.ETReal, nullRation: 0.2}, + }, + constants: []*Constant{nil, nil, {Value: types.NewStringDatum("WEEK"), RetType: types.NewFieldType(mysql.TypeString)}}, + chunkSize: 128, + }, + { + retEvalType: types.ETDatetime, + childrenTypes: []types.EvalType{types.ETInt, types.ETReal, types.ETString}, + geners: []dataGenerator{ + &dateTimeIntGener{nullRation: 0.2}, + &defaultGener{eType: types.ETReal, nullRation: 0.2}, + }, + constants: []*Constant{nil, nil, {Value: types.NewStringDatum("MONTH"), RetType: types.NewFieldType(mysql.TypeString)}}, + chunkSize: 128, + }, + { + retEvalType: types.ETDatetime, + childrenTypes: []types.EvalType{types.ETInt, types.ETReal, types.ETString}, + geners: []dataGenerator{ + &dateTimeIntGener{nullRation: 0.2}, + &defaultGener{eType: types.ETReal, nullRation: 0.2}, + }, + constants: []*Constant{nil, nil, {Value: types.NewStringDatum("QUARTER"), RetType: types.NewFieldType(mysql.TypeString)}}, + chunkSize: 128, + }, + { + retEvalType: types.ETDatetime, + childrenTypes: []types.EvalType{types.ETInt, types.ETReal, types.ETString}, + geners: []dataGenerator{ + &dateTimeIntGener{nullRation: 0.2}, + &defaultGener{eType: types.ETReal, nullRation: 0.2}, + }, + constants: []*Constant{nil, nil, {Value: types.NewStringDatum("YEAR"), RetType: types.NewFieldType(mysql.TypeString)}}, + chunkSize: 128, + }, + { + retEvalType: types.ETDatetime, + childrenTypes: []types.EvalType{types.ETInt, types.ETReal, types.ETString}, + geners: []dataGenerator{ + &dateTimeIntGener{nullRation: 0.2}, + &defaultGener{eType: types.ETReal, nullRation: 0.2}, + }, + constants: []*Constant{nil, nil, {Value: types.NewStringDatum("SECOND_MICROSECOND"), RetType: types.NewFieldType(mysql.TypeString)}}, + chunkSize: 128, + }, + { + retEvalType: types.ETDatetime, + childrenTypes: []types.EvalType{types.ETInt, types.ETReal, types.ETString}, + geners: []dataGenerator{ + &dateTimeIntGener{nullRation: 0.2}, + &defaultGener{eType: types.ETReal, nullRation: 0.2}, + }, + constants: []*Constant{nil, nil, {Value: types.NewStringDatum("MINUTE_MICROSECOND"), RetType: types.NewFieldType(mysql.TypeString)}}, + chunkSize: 128, + }, + { + retEvalType: types.ETDatetime, + childrenTypes: []types.EvalType{types.ETInt, types.ETReal, types.ETString}, + geners: []dataGenerator{ + &dateTimeIntGener{nullRation: 0.2}, + &defaultGener{eType: types.ETReal, nullRation: 0.2}, + }, + constants: []*Constant{nil, nil, {Value: types.NewStringDatum("MINUTE_SECOND"), RetType: types.NewFieldType(mysql.TypeString)}}, + chunkSize: 128, + }, + { + retEvalType: types.ETDatetime, + childrenTypes: []types.EvalType{types.ETInt, types.ETReal, types.ETString}, + geners: []dataGenerator{ + &dateTimeIntGener{nullRation: 0.2}, + &defaultGener{eType: types.ETReal, nullRation: 0.2}, + }, + constants: []*Constant{nil, nil, {Value: types.NewStringDatum("HOUR_MICROSECOND"), RetType: types.NewFieldType(mysql.TypeString)}}, + chunkSize: 128, + }, + { + retEvalType: types.ETDatetime, + childrenTypes: []types.EvalType{types.ETInt, types.ETReal, types.ETString}, + geners: []dataGenerator{ + &dateTimeIntGener{nullRation: 0.2}, + &defaultGener{eType: types.ETReal, nullRation: 0.2}, + }, + constants: []*Constant{nil, nil, {Value: types.NewStringDatum("HOUR_SECOND"), RetType: types.NewFieldType(mysql.TypeString)}}, + chunkSize: 128, + }, + { + retEvalType: types.ETDatetime, + childrenTypes: []types.EvalType{types.ETInt, types.ETReal, types.ETString}, + geners: []dataGenerator{ + &dateTimeIntGener{nullRation: 0.2}, + &defaultGener{eType: types.ETReal, nullRation: 0.2}, + }, + constants: []*Constant{nil, nil, {Value: types.NewStringDatum("HOUR_MINUTE"), RetType: types.NewFieldType(mysql.TypeString)}}, + chunkSize: 128, + }, + { + retEvalType: types.ETDatetime, + childrenTypes: []types.EvalType{types.ETInt, types.ETReal, types.ETString}, + geners: []dataGenerator{ + &dateTimeIntGener{nullRation: 0.2}, + &defaultGener{eType: types.ETReal, nullRation: 0.2}, + }, + constants: []*Constant{nil, nil, {Value: types.NewStringDatum("DAY_MICROSECOND"), RetType: types.NewFieldType(mysql.TypeString)}}, + chunkSize: 128, + }, + { + retEvalType: types.ETDatetime, + childrenTypes: []types.EvalType{types.ETInt, types.ETReal, types.ETString}, + geners: []dataGenerator{ + &dateTimeIntGener{nullRation: 0.2}, + &defaultGener{eType: types.ETReal, nullRation: 0.2}, + }, + constants: []*Constant{nil, nil, {Value: types.NewStringDatum("DAY_SECOND"), RetType: types.NewFieldType(mysql.TypeString)}}, + chunkSize: 128, + }, + { + retEvalType: types.ETDatetime, + childrenTypes: []types.EvalType{types.ETInt, types.ETReal, types.ETString}, + geners: []dataGenerator{ + &dateTimeIntGener{nullRation: 0.2}, + &defaultGener{eType: types.ETReal, nullRation: 0.2}, + }, + constants: []*Constant{nil, nil, {Value: types.NewStringDatum("DAY_MINUTE"), RetType: types.NewFieldType(mysql.TypeString)}}, + chunkSize: 128, + }, + { + retEvalType: types.ETDatetime, + childrenTypes: []types.EvalType{types.ETInt, types.ETReal, types.ETString}, + geners: []dataGenerator{ + &dateTimeIntGener{nullRation: 0.2}, + &defaultGener{eType: types.ETReal, nullRation: 0.2}, + }, + constants: []*Constant{nil, nil, {Value: types.NewStringDatum("DAY_HOUR"), RetType: types.NewFieldType(mysql.TypeString)}}, + chunkSize: 128, + }, + { + retEvalType: types.ETDatetime, + childrenTypes: []types.EvalType{types.ETInt, types.ETReal, types.ETString}, + geners: []dataGenerator{ + &dateTimeIntGener{nullRation: 0.2}, + &defaultGener{eType: types.ETReal, nullRation: 0.2}, + }, + constants: []*Constant{nil, nil, {Value: types.NewStringDatum("YEAR_MONTH"), RetType: types.NewFieldType(mysql.TypeString)}}, + chunkSize: 128, + }, + // builtinAddDateIntDecimalSig + { + retEvalType: types.ETDatetime, + childrenTypes: []types.EvalType{types.ETInt, types.ETDecimal, types.ETString}, + geners: []dataGenerator{ + &dateTimeIntGener{nullRation: 0.2}, + &defaultGener{eType: types.ETDecimal, nullRation: 0.2}, + }, + constants: []*Constant{nil, nil, {Value: types.NewStringDatum("MICROSECOND"), RetType: types.NewFieldType(mysql.TypeString)}}, + chunkSize: 128, + }, + { + retEvalType: types.ETDatetime, + childrenTypes: []types.EvalType{types.ETInt, types.ETDecimal, types.ETString}, + geners: []dataGenerator{ + &dateTimeIntGener{nullRation: 0.2}, + &defaultGener{eType: types.ETDecimal, nullRation: 0.2}, + }, + constants: []*Constant{nil, nil, {Value: types.NewStringDatum("SECOND"), RetType: types.NewFieldType(mysql.TypeString)}}, + chunkSize: 128, + }, + { + retEvalType: types.ETDatetime, + childrenTypes: []types.EvalType{types.ETInt, types.ETDecimal, types.ETString}, + geners: []dataGenerator{ + &dateTimeIntGener{nullRation: 0.2}, + &defaultGener{eType: types.ETDecimal, nullRation: 0.2}, + }, + constants: []*Constant{nil, nil, {Value: types.NewStringDatum("MINUTE"), RetType: types.NewFieldType(mysql.TypeString)}}, + chunkSize: 128, + }, + { + retEvalType: types.ETDatetime, + childrenTypes: []types.EvalType{types.ETInt, types.ETDecimal, types.ETString}, + geners: []dataGenerator{ + &dateTimeIntGener{nullRation: 0.2}, + &defaultGener{eType: types.ETDecimal, nullRation: 0.2}, + }, + constants: []*Constant{nil, nil, {Value: types.NewStringDatum("HOUR"), RetType: types.NewFieldType(mysql.TypeString)}}, + chunkSize: 128, + }, + { + retEvalType: types.ETDatetime, + childrenTypes: []types.EvalType{types.ETInt, types.ETDecimal, types.ETString}, + geners: []dataGenerator{ + &dateTimeIntGener{nullRation: 0.2}, + &defaultGener{eType: types.ETDecimal, nullRation: 0.2}, + }, + constants: []*Constant{nil, nil, {Value: types.NewStringDatum("DAY"), RetType: types.NewFieldType(mysql.TypeString)}}, + chunkSize: 128, + }, + { + retEvalType: types.ETDatetime, + childrenTypes: []types.EvalType{types.ETInt, types.ETDecimal, types.ETString}, + geners: []dataGenerator{ + &dateTimeIntGener{nullRation: 0.2}, + &defaultGener{eType: types.ETDecimal, nullRation: 0.2}, + }, + constants: []*Constant{nil, nil, {Value: types.NewStringDatum("WEEK"), RetType: types.NewFieldType(mysql.TypeString)}}, + chunkSize: 128, + }, + { + retEvalType: types.ETDatetime, + childrenTypes: []types.EvalType{types.ETInt, types.ETDecimal, types.ETString}, + geners: []dataGenerator{ + &dateTimeIntGener{nullRation: 0.2}, + &defaultGener{eType: types.ETDecimal, nullRation: 0.2}, + }, + constants: []*Constant{nil, nil, {Value: types.NewStringDatum("MONTH"), RetType: types.NewFieldType(mysql.TypeString)}}, + chunkSize: 128, + }, + { + retEvalType: types.ETDatetime, + childrenTypes: []types.EvalType{types.ETInt, types.ETDecimal, types.ETString}, + geners: []dataGenerator{ + &dateTimeIntGener{nullRation: 0.2}, + &defaultGener{eType: types.ETDecimal, nullRation: 0.2}, + }, + constants: []*Constant{nil, nil, {Value: types.NewStringDatum("QUARTER"), RetType: types.NewFieldType(mysql.TypeString)}}, + chunkSize: 128, + }, + { + retEvalType: types.ETDatetime, + childrenTypes: []types.EvalType{types.ETInt, types.ETDecimal, types.ETString}, + geners: []dataGenerator{ + &dateTimeIntGener{nullRation: 0.2}, + &defaultGener{eType: types.ETDecimal, nullRation: 0.2}, + }, + constants: []*Constant{nil, nil, {Value: types.NewStringDatum("YEAR"), RetType: types.NewFieldType(mysql.TypeString)}}, + chunkSize: 128, + }, + { + retEvalType: types.ETDatetime, + childrenTypes: []types.EvalType{types.ETInt, types.ETDecimal, types.ETString}, + geners: []dataGenerator{ + &dateTimeIntGener{nullRation: 0.2}, + &defaultGener{eType: types.ETDecimal, nullRation: 0.2}, + }, + constants: []*Constant{nil, nil, {Value: types.NewStringDatum("SECOND_MICROSECOND"), RetType: types.NewFieldType(mysql.TypeString)}}, + chunkSize: 128, + }, + { + retEvalType: types.ETDatetime, + childrenTypes: []types.EvalType{types.ETInt, types.ETDecimal, types.ETString}, + geners: []dataGenerator{ + &dateTimeIntGener{nullRation: 0.2}, + &defaultGener{eType: types.ETDecimal, nullRation: 0.2}, + }, + constants: []*Constant{nil, nil, {Value: types.NewStringDatum("MINUTE_MICROSECOND"), RetType: types.NewFieldType(mysql.TypeString)}}, + chunkSize: 128, + }, + { + retEvalType: types.ETDatetime, + childrenTypes: []types.EvalType{types.ETInt, types.ETDecimal, types.ETString}, + geners: []dataGenerator{ + &dateTimeIntGener{nullRation: 0.2}, + &defaultGener{eType: types.ETDecimal, nullRation: 0.2}, + }, + constants: []*Constant{nil, nil, {Value: types.NewStringDatum("MINUTE_SECOND"), RetType: types.NewFieldType(mysql.TypeString)}}, + chunkSize: 128, + }, + { + retEvalType: types.ETDatetime, + childrenTypes: []types.EvalType{types.ETInt, types.ETDecimal, types.ETString}, + geners: []dataGenerator{ + &dateTimeIntGener{nullRation: 0.2}, + &defaultGener{eType: types.ETDecimal, nullRation: 0.2}, + }, + constants: []*Constant{nil, nil, {Value: types.NewStringDatum("HOUR_MICROSECOND"), RetType: types.NewFieldType(mysql.TypeString)}}, + chunkSize: 128, + }, + { + retEvalType: types.ETDatetime, + childrenTypes: []types.EvalType{types.ETInt, types.ETDecimal, types.ETString}, + geners: []dataGenerator{ + &dateTimeIntGener{nullRation: 0.2}, + &defaultGener{eType: types.ETDecimal, nullRation: 0.2}, + }, + constants: []*Constant{nil, nil, {Value: types.NewStringDatum("HOUR_SECOND"), RetType: types.NewFieldType(mysql.TypeString)}}, + chunkSize: 128, + }, + { + retEvalType: types.ETDatetime, + childrenTypes: []types.EvalType{types.ETInt, types.ETDecimal, types.ETString}, + geners: []dataGenerator{ + &dateTimeIntGener{nullRation: 0.2}, + &defaultGener{eType: types.ETDecimal, nullRation: 0.2}, + }, + constants: []*Constant{nil, nil, {Value: types.NewStringDatum("HOUR_MINUTE"), RetType: types.NewFieldType(mysql.TypeString)}}, + chunkSize: 128, + }, + { + retEvalType: types.ETDatetime, + childrenTypes: []types.EvalType{types.ETInt, types.ETDecimal, types.ETString}, + geners: []dataGenerator{ + &dateTimeIntGener{nullRation: 0.2}, + &defaultGener{eType: types.ETDecimal, nullRation: 0.2}, + }, + constants: []*Constant{nil, nil, {Value: types.NewStringDatum("DAY_MICROSECOND"), RetType: types.NewFieldType(mysql.TypeString)}}, + chunkSize: 128, + }, + { + retEvalType: types.ETDatetime, + childrenTypes: []types.EvalType{types.ETInt, types.ETDecimal, types.ETString}, + geners: []dataGenerator{ + &dateTimeIntGener{nullRation: 0.2}, + &defaultGener{eType: types.ETDecimal, nullRation: 0.2}, + }, + constants: []*Constant{nil, nil, {Value: types.NewStringDatum("DAY_SECOND"), RetType: types.NewFieldType(mysql.TypeString)}}, + chunkSize: 128, + }, + { + retEvalType: types.ETDatetime, + childrenTypes: []types.EvalType{types.ETInt, types.ETDecimal, types.ETString}, + geners: []dataGenerator{ + &dateTimeIntGener{nullRation: 0.2}, + &defaultGener{eType: types.ETDecimal, nullRation: 0.2}, + }, + constants: []*Constant{nil, nil, {Value: types.NewStringDatum("DAY_MINUTE"), RetType: types.NewFieldType(mysql.TypeString)}}, + chunkSize: 128, + }, + { + retEvalType: types.ETDatetime, + childrenTypes: []types.EvalType{types.ETInt, types.ETDecimal, types.ETString}, + geners: []dataGenerator{ + &dateTimeIntGener{nullRation: 0.2}, + &defaultGener{eType: types.ETDecimal, nullRation: 0.2}, + }, + constants: []*Constant{nil, nil, {Value: types.NewStringDatum("DAY_HOUR"), RetType: types.NewFieldType(mysql.TypeString)}}, + chunkSize: 128, + }, + { + retEvalType: types.ETDatetime, + childrenTypes: []types.EvalType{types.ETInt, types.ETDecimal, types.ETString}, + geners: []dataGenerator{ + &dateTimeIntGener{nullRation: 0.2}, + &defaultGener{eType: types.ETDecimal, nullRation: 0.2}, + }, + constants: []*Constant{nil, nil, {Value: types.NewStringDatum("YEAR_MONTH"), RetType: types.NewFieldType(mysql.TypeString)}}, + chunkSize: 128, + }, + // builtinAddDateDatetimeStringSig + { + retEvalType: types.ETDatetime, + childrenTypes: []types.EvalType{types.ETDatetime, types.ETString, types.ETString}, + geners: []dataGenerator{ + &defaultGener{eType: types.ETDatetime, nullRation: 0.2}, + &numStrGener{rangeInt64Gener{math.MinInt32 + 1, math.MaxInt32}}, + }, + constants: []*Constant{nil, nil, {Value: types.NewStringDatum("MICROSECOND"), RetType: types.NewFieldType(mysql.TypeString)}}, + chunkSize: 128, + }, + { + retEvalType: types.ETDatetime, + childrenTypes: []types.EvalType{types.ETDatetime, types.ETString, types.ETString}, + geners: []dataGenerator{ + &defaultGener{eType: types.ETDatetime, nullRation: 0.2}, + &numStrGener{rangeInt64Gener{math.MinInt32 + 1, math.MaxInt32}}, + }, + constants: []*Constant{nil, nil, {Value: types.NewStringDatum("SECOND"), RetType: types.NewFieldType(mysql.TypeString)}}, + chunkSize: 128, + }, + { + retEvalType: types.ETDatetime, + childrenTypes: []types.EvalType{types.ETDatetime, types.ETString, types.ETString}, + geners: []dataGenerator{ + &defaultGener{eType: types.ETDatetime, nullRation: 0.2}, + &numStrGener{rangeInt64Gener{math.MinInt32 + 1, math.MaxInt32}}, + }, + constants: []*Constant{nil, nil, {Value: types.NewStringDatum("MINUTE"), RetType: types.NewFieldType(mysql.TypeString)}}, + chunkSize: 128, + }, + { + retEvalType: types.ETDatetime, + childrenTypes: []types.EvalType{types.ETDatetime, types.ETString, types.ETString}, + geners: []dataGenerator{ + &defaultGener{eType: types.ETDatetime, nullRation: 0.2}, + &numStrGener{rangeInt64Gener{math.MinInt32 + 1, math.MaxInt32}}, + }, + constants: []*Constant{nil, nil, {Value: types.NewStringDatum("HOUR"), RetType: types.NewFieldType(mysql.TypeString)}}, + chunkSize: 128, + }, + { + retEvalType: types.ETDatetime, + childrenTypes: []types.EvalType{types.ETDatetime, types.ETString, types.ETString}, + geners: []dataGenerator{ + &defaultGener{eType: types.ETDatetime, nullRation: 0.2}, + &numStrGener{rangeInt64Gener{math.MinInt32 + 1, math.MaxInt32}}, + }, + constants: []*Constant{nil, nil, {Value: types.NewStringDatum("DAY"), RetType: types.NewFieldType(mysql.TypeString)}}, + chunkSize: 128, + }, + { + retEvalType: types.ETDatetime, + childrenTypes: []types.EvalType{types.ETDatetime, types.ETString, types.ETString}, + geners: []dataGenerator{ + &defaultGener{eType: types.ETDatetime, nullRation: 0.2}, + &numStrGener{rangeInt64Gener{math.MinInt32 + 1, math.MaxInt32}}, + }, + constants: []*Constant{nil, nil, {Value: types.NewStringDatum("WEEK"), RetType: types.NewFieldType(mysql.TypeString)}}, + chunkSize: 128, + }, + { + retEvalType: types.ETDatetime, + childrenTypes: []types.EvalType{types.ETDatetime, types.ETString, types.ETString}, + geners: []dataGenerator{ + &defaultGener{eType: types.ETDatetime, nullRation: 0.2}, + &numStrGener{rangeInt64Gener{math.MinInt32 + 1, math.MaxInt32}}, + }, + constants: []*Constant{nil, nil, {Value: types.NewStringDatum("MONTH"), RetType: types.NewFieldType(mysql.TypeString)}}, + chunkSize: 128, + }, + { + retEvalType: types.ETDatetime, + childrenTypes: []types.EvalType{types.ETDatetime, types.ETString, types.ETString}, + geners: []dataGenerator{ + &defaultGener{eType: types.ETDatetime, nullRation: 0.2}, + &numStrGener{rangeInt64Gener{math.MinInt32 + 1, math.MaxInt32}}, + }, + constants: []*Constant{nil, nil, {Value: types.NewStringDatum("QUARTER"), RetType: types.NewFieldType(mysql.TypeString)}}, + chunkSize: 128, + }, + { + retEvalType: types.ETDatetime, + childrenTypes: []types.EvalType{types.ETDatetime, types.ETString, types.ETString}, + geners: []dataGenerator{ + &defaultGener{eType: types.ETDatetime, nullRation: 0.2}, + &numStrGener{rangeInt64Gener{math.MinInt32 + 1, math.MaxInt32}}, + }, + constants: []*Constant{nil, nil, {Value: types.NewStringDatum("YEAR"), RetType: types.NewFieldType(mysql.TypeString)}}, + chunkSize: 128, + }, + { + retEvalType: types.ETDatetime, + childrenTypes: []types.EvalType{types.ETDatetime, types.ETString, types.ETString}, + geners: []dataGenerator{ + &defaultGener{eType: types.ETDatetime, nullRation: 0.2}, + &numStrGener{rangeInt64Gener{math.MinInt32 + 1, math.MaxInt32}}, + }, + constants: []*Constant{nil, nil, {Value: types.NewStringDatum("SECOND_MICROSECOND"), RetType: types.NewFieldType(mysql.TypeString)}}, + chunkSize: 128, + }, + { + retEvalType: types.ETDatetime, + childrenTypes: []types.EvalType{types.ETDatetime, types.ETString, types.ETString}, + geners: []dataGenerator{ + &defaultGener{eType: types.ETDatetime, nullRation: 0.2}, + &numStrGener{rangeInt64Gener{math.MinInt32 + 1, math.MaxInt32}}, + }, + constants: []*Constant{nil, nil, {Value: types.NewStringDatum("MINUTE_MICROSECOND"), RetType: types.NewFieldType(mysql.TypeString)}}, + chunkSize: 128, + }, + { + retEvalType: types.ETDatetime, + childrenTypes: []types.EvalType{types.ETDatetime, types.ETString, types.ETString}, + geners: []dataGenerator{ + &defaultGener{eType: types.ETDatetime, nullRation: 0.2}, + &numStrGener{rangeInt64Gener{math.MinInt32 + 1, math.MaxInt32}}, + }, + constants: []*Constant{nil, nil, {Value: types.NewStringDatum("MINUTE_SECOND"), RetType: types.NewFieldType(mysql.TypeString)}}, + chunkSize: 128, + }, + { + retEvalType: types.ETDatetime, + childrenTypes: []types.EvalType{types.ETDatetime, types.ETString, types.ETString}, + geners: []dataGenerator{ + &defaultGener{eType: types.ETDatetime, nullRation: 0.2}, + &numStrGener{rangeInt64Gener{math.MinInt32 + 1, math.MaxInt32}}, + }, + constants: []*Constant{nil, nil, {Value: types.NewStringDatum("HOUR_MICROSECOND"), RetType: types.NewFieldType(mysql.TypeString)}}, + chunkSize: 128, + }, + { + retEvalType: types.ETDatetime, + childrenTypes: []types.EvalType{types.ETDatetime, types.ETString, types.ETString}, + geners: []dataGenerator{ + &defaultGener{eType: types.ETDatetime, nullRation: 0.2}, + &numStrGener{rangeInt64Gener{math.MinInt32 + 1, math.MaxInt32}}, + }, + constants: []*Constant{nil, nil, {Value: types.NewStringDatum("HOUR_SECOND"), RetType: types.NewFieldType(mysql.TypeString)}}, + chunkSize: 128, + }, + { + retEvalType: types.ETDatetime, + childrenTypes: []types.EvalType{types.ETDatetime, types.ETString, types.ETString}, + geners: []dataGenerator{ + &defaultGener{eType: types.ETDatetime, nullRation: 0.2}, + &numStrGener{rangeInt64Gener{math.MinInt32 + 1, math.MaxInt32}}, + }, + constants: []*Constant{nil, nil, {Value: types.NewStringDatum("HOUR_MINUTE"), RetType: types.NewFieldType(mysql.TypeString)}}, + chunkSize: 128, + }, + { + retEvalType: types.ETDatetime, + childrenTypes: []types.EvalType{types.ETDatetime, types.ETString, types.ETString}, + geners: []dataGenerator{ + &defaultGener{eType: types.ETDatetime, nullRation: 0.2}, + &numStrGener{rangeInt64Gener{math.MinInt32 + 1, math.MaxInt32}}, + }, + constants: []*Constant{nil, nil, {Value: types.NewStringDatum("DAY_MICROSECOND"), RetType: types.NewFieldType(mysql.TypeString)}}, + chunkSize: 128, + }, + { + retEvalType: types.ETDatetime, + childrenTypes: []types.EvalType{types.ETDatetime, types.ETString, types.ETString}, + geners: []dataGenerator{ + &defaultGener{eType: types.ETDatetime, nullRation: 0.2}, + &numStrGener{rangeInt64Gener{math.MinInt32 + 1, math.MaxInt32}}, + }, + constants: []*Constant{nil, nil, {Value: types.NewStringDatum("DAY_SECOND"), RetType: types.NewFieldType(mysql.TypeString)}}, + chunkSize: 128, + }, + { + retEvalType: types.ETDatetime, + childrenTypes: []types.EvalType{types.ETDatetime, types.ETString, types.ETString}, + geners: []dataGenerator{ + &defaultGener{eType: types.ETDatetime, nullRation: 0.2}, + &numStrGener{rangeInt64Gener{math.MinInt32 + 1, math.MaxInt32}}, + }, + constants: []*Constant{nil, nil, {Value: types.NewStringDatum("DAY_MINUTE"), RetType: types.NewFieldType(mysql.TypeString)}}, + chunkSize: 128, + }, + { + retEvalType: types.ETDatetime, + childrenTypes: []types.EvalType{types.ETDatetime, types.ETString, types.ETString}, + geners: []dataGenerator{ + &defaultGener{eType: types.ETDatetime, nullRation: 0.2}, + &numStrGener{rangeInt64Gener{math.MinInt32 + 1, math.MaxInt32}}, + }, + constants: []*Constant{nil, nil, {Value: types.NewStringDatum("DAY_HOUR"), RetType: types.NewFieldType(mysql.TypeString)}}, + chunkSize: 128, + }, + { + retEvalType: types.ETDatetime, + childrenTypes: []types.EvalType{types.ETDatetime, types.ETString, types.ETString}, + geners: []dataGenerator{ + &defaultGener{eType: types.ETDatetime, nullRation: 0.2}, + &numStrGener{rangeInt64Gener{math.MinInt32 + 1, math.MaxInt32}}, + }, + constants: []*Constant{nil, nil, {Value: types.NewStringDatum("YEAR_MONTH"), RetType: types.NewFieldType(mysql.TypeString)}}, + chunkSize: 128, + }, + // builtinAddDateDatetimeIntSig + { + retEvalType: types.ETDatetime, + childrenTypes: []types.EvalType{types.ETDatetime, types.ETInt, types.ETString}, + geners: []dataGenerator{ + &defaultGener{eType: types.ETDatetime, nullRation: 0.2}, + &defaultGener{eType: types.ETInt, nullRation: 0.2}, + }, + constants: []*Constant{nil, nil, {Value: types.NewStringDatum("MICROSECOND"), RetType: types.NewFieldType(mysql.TypeString)}}, + chunkSize: 128, + }, + { + retEvalType: types.ETDatetime, + childrenTypes: []types.EvalType{types.ETDatetime, types.ETInt, types.ETString}, + geners: []dataGenerator{ + &defaultGener{eType: types.ETDatetime, nullRation: 0.2}, + &defaultGener{eType: types.ETInt, nullRation: 0.2}, + }, + constants: []*Constant{nil, nil, {Value: types.NewStringDatum("SECOND"), RetType: types.NewFieldType(mysql.TypeString)}}, + chunkSize: 128, + }, + { + retEvalType: types.ETDatetime, + childrenTypes: []types.EvalType{types.ETDatetime, types.ETInt, types.ETString}, + geners: []dataGenerator{ + &defaultGener{eType: types.ETDatetime, nullRation: 0.2}, + &defaultGener{eType: types.ETInt, nullRation: 0.2}, + }, + constants: []*Constant{nil, nil, {Value: types.NewStringDatum("MINUTE"), RetType: types.NewFieldType(mysql.TypeString)}}, + chunkSize: 128, + }, + { + retEvalType: types.ETDatetime, + childrenTypes: []types.EvalType{types.ETDatetime, types.ETInt, types.ETString}, + geners: []dataGenerator{ + &defaultGener{eType: types.ETDatetime, nullRation: 0.2}, + &defaultGener{eType: types.ETInt, nullRation: 0.2}, + }, + constants: []*Constant{nil, nil, {Value: types.NewStringDatum("HOUR"), RetType: types.NewFieldType(mysql.TypeString)}}, + chunkSize: 128, + }, + { + retEvalType: types.ETDatetime, + childrenTypes: []types.EvalType{types.ETDatetime, types.ETInt, types.ETString}, + geners: []dataGenerator{ + &defaultGener{eType: types.ETDatetime, nullRation: 0.2}, + &defaultGener{eType: types.ETInt, nullRation: 0.2}, + }, + constants: []*Constant{nil, nil, {Value: types.NewStringDatum("DAY"), RetType: types.NewFieldType(mysql.TypeString)}}, + chunkSize: 128, + }, + { + retEvalType: types.ETDatetime, + childrenTypes: []types.EvalType{types.ETDatetime, types.ETInt, types.ETString}, + geners: []dataGenerator{ + &defaultGener{eType: types.ETDatetime, nullRation: 0.2}, + &defaultGener{eType: types.ETInt, nullRation: 0.2}, + }, + constants: []*Constant{nil, nil, {Value: types.NewStringDatum("WEEK"), RetType: types.NewFieldType(mysql.TypeString)}}, + chunkSize: 128, + }, + { + retEvalType: types.ETDatetime, + childrenTypes: []types.EvalType{types.ETDatetime, types.ETInt, types.ETString}, + geners: []dataGenerator{ + &defaultGener{eType: types.ETDatetime, nullRation: 0.2}, + &defaultGener{eType: types.ETInt, nullRation: 0.2}, + }, + constants: []*Constant{nil, nil, {Value: types.NewStringDatum("MONTH"), RetType: types.NewFieldType(mysql.TypeString)}}, + chunkSize: 128, + }, + { + retEvalType: types.ETDatetime, + childrenTypes: []types.EvalType{types.ETDatetime, types.ETInt, types.ETString}, + geners: []dataGenerator{ + &defaultGener{eType: types.ETDatetime, nullRation: 0.2}, + &defaultGener{eType: types.ETInt, nullRation: 0.2}, + }, + constants: []*Constant{nil, nil, {Value: types.NewStringDatum("QUARTER"), RetType: types.NewFieldType(mysql.TypeString)}}, + chunkSize: 128, + }, + { + retEvalType: types.ETDatetime, + childrenTypes: []types.EvalType{types.ETDatetime, types.ETInt, types.ETString}, + geners: []dataGenerator{ + &defaultGener{eType: types.ETDatetime, nullRation: 0.2}, + &defaultGener{eType: types.ETInt, nullRation: 0.2}, + }, + constants: []*Constant{nil, nil, {Value: types.NewStringDatum("YEAR"), RetType: types.NewFieldType(mysql.TypeString)}}, + chunkSize: 128, + }, + { + retEvalType: types.ETDatetime, + childrenTypes: []types.EvalType{types.ETDatetime, types.ETInt, types.ETString}, + geners: []dataGenerator{ + &defaultGener{eType: types.ETDatetime, nullRation: 0.2}, + &defaultGener{eType: types.ETInt, nullRation: 0.2}, + }, + constants: []*Constant{nil, nil, {Value: types.NewStringDatum("SECOND_MICROSECOND"), RetType: types.NewFieldType(mysql.TypeString)}}, + chunkSize: 128, + }, + { + retEvalType: types.ETDatetime, + childrenTypes: []types.EvalType{types.ETDatetime, types.ETInt, types.ETString}, + geners: []dataGenerator{ + &defaultGener{eType: types.ETDatetime, nullRation: 0.2}, + &defaultGener{eType: types.ETInt, nullRation: 0.2}, + }, + constants: []*Constant{nil, nil, {Value: types.NewStringDatum("MINUTE_MICROSECOND"), RetType: types.NewFieldType(mysql.TypeString)}}, + chunkSize: 128, + }, + { + retEvalType: types.ETDatetime, + childrenTypes: []types.EvalType{types.ETDatetime, types.ETInt, types.ETString}, + geners: []dataGenerator{ + &defaultGener{eType: types.ETDatetime, nullRation: 0.2}, + &defaultGener{eType: types.ETInt, nullRation: 0.2}, + }, + constants: []*Constant{nil, nil, {Value: types.NewStringDatum("MINUTE_SECOND"), RetType: types.NewFieldType(mysql.TypeString)}}, + chunkSize: 128, + }, + { + retEvalType: types.ETDatetime, + childrenTypes: []types.EvalType{types.ETDatetime, types.ETInt, types.ETString}, + geners: []dataGenerator{ + &defaultGener{eType: types.ETDatetime, nullRation: 0.2}, + &defaultGener{eType: types.ETInt, nullRation: 0.2}, + }, + constants: []*Constant{nil, nil, {Value: types.NewStringDatum("HOUR_MICROSECOND"), RetType: types.NewFieldType(mysql.TypeString)}}, + chunkSize: 128, + }, + { + retEvalType: types.ETDatetime, + childrenTypes: []types.EvalType{types.ETDatetime, types.ETInt, types.ETString}, + geners: []dataGenerator{ + &defaultGener{eType: types.ETDatetime, nullRation: 0.2}, + &defaultGener{eType: types.ETInt, nullRation: 0.2}, + }, + constants: []*Constant{nil, nil, {Value: types.NewStringDatum("HOUR_SECOND"), RetType: types.NewFieldType(mysql.TypeString)}}, + chunkSize: 128, + }, + { + retEvalType: types.ETDatetime, + childrenTypes: []types.EvalType{types.ETDatetime, types.ETInt, types.ETString}, + geners: []dataGenerator{ + &defaultGener{eType: types.ETDatetime, nullRation: 0.2}, + &defaultGener{eType: types.ETInt, nullRation: 0.2}, + }, + constants: []*Constant{nil, nil, {Value: types.NewStringDatum("HOUR_MINUTE"), RetType: types.NewFieldType(mysql.TypeString)}}, + chunkSize: 128, + }, + { + retEvalType: types.ETDatetime, + childrenTypes: []types.EvalType{types.ETDatetime, types.ETInt, types.ETString}, + geners: []dataGenerator{ + &defaultGener{eType: types.ETDatetime, nullRation: 0.2}, + &defaultGener{eType: types.ETInt, nullRation: 0.2}, + }, + constants: []*Constant{nil, nil, {Value: types.NewStringDatum("DAY_MICROSECOND"), RetType: types.NewFieldType(mysql.TypeString)}}, + chunkSize: 128, + }, + { + retEvalType: types.ETDatetime, + childrenTypes: []types.EvalType{types.ETDatetime, types.ETInt, types.ETString}, + geners: []dataGenerator{ + &defaultGener{eType: types.ETDatetime, nullRation: 0.2}, + &defaultGener{eType: types.ETInt, nullRation: 0.2}, + }, + constants: []*Constant{nil, nil, {Value: types.NewStringDatum("DAY_SECOND"), RetType: types.NewFieldType(mysql.TypeString)}}, + chunkSize: 128, + }, + { + retEvalType: types.ETDatetime, + childrenTypes: []types.EvalType{types.ETDatetime, types.ETInt, types.ETString}, + geners: []dataGenerator{ + &defaultGener{eType: types.ETDatetime, nullRation: 0.2}, + &defaultGener{eType: types.ETInt, nullRation: 0.2}, + }, + constants: []*Constant{nil, nil, {Value: types.NewStringDatum("DAY_MINUTE"), RetType: types.NewFieldType(mysql.TypeString)}}, + chunkSize: 128, + }, + { + retEvalType: types.ETDatetime, + childrenTypes: []types.EvalType{types.ETDatetime, types.ETInt, types.ETString}, + geners: []dataGenerator{ + &defaultGener{eType: types.ETDatetime, nullRation: 0.2}, + &defaultGener{eType: types.ETInt, nullRation: 0.2}, + }, + constants: []*Constant{nil, nil, {Value: types.NewStringDatum("DAY_HOUR"), RetType: types.NewFieldType(mysql.TypeString)}}, + chunkSize: 128, + }, + { + retEvalType: types.ETDatetime, + childrenTypes: []types.EvalType{types.ETDatetime, types.ETInt, types.ETString}, + geners: []dataGenerator{ + &defaultGener{eType: types.ETDatetime, nullRation: 0.2}, + &defaultGener{eType: types.ETInt, nullRation: 0.2}, + }, + constants: []*Constant{nil, nil, {Value: types.NewStringDatum("YEAR_MONTH"), RetType: types.NewFieldType(mysql.TypeString)}}, + chunkSize: 128, + }, + // builtinAddDateDatetimeRealSig + { + retEvalType: types.ETDatetime, + childrenTypes: []types.EvalType{types.ETDatetime, types.ETReal, types.ETString}, + geners: []dataGenerator{ + &defaultGener{eType: types.ETDatetime, nullRation: 0.2}, + &defaultGener{eType: types.ETReal, nullRation: 0.2}, + }, + constants: []*Constant{nil, nil, {Value: types.NewStringDatum("MICROSECOND"), RetType: types.NewFieldType(mysql.TypeString)}}, + chunkSize: 128, + }, + { + retEvalType: types.ETDatetime, + childrenTypes: []types.EvalType{types.ETDatetime, types.ETReal, types.ETString}, + geners: []dataGenerator{ + &defaultGener{eType: types.ETDatetime, nullRation: 0.2}, + &defaultGener{eType: types.ETReal, nullRation: 0.2}, + }, + constants: []*Constant{nil, nil, {Value: types.NewStringDatum("SECOND"), RetType: types.NewFieldType(mysql.TypeString)}}, + chunkSize: 128, + }, + { + retEvalType: types.ETDatetime, + childrenTypes: []types.EvalType{types.ETDatetime, types.ETReal, types.ETString}, + geners: []dataGenerator{ + &defaultGener{eType: types.ETDatetime, nullRation: 0.2}, + &defaultGener{eType: types.ETReal, nullRation: 0.2}, + }, + constants: []*Constant{nil, nil, {Value: types.NewStringDatum("MINUTE"), RetType: types.NewFieldType(mysql.TypeString)}}, + chunkSize: 128, + }, + { + retEvalType: types.ETDatetime, + childrenTypes: []types.EvalType{types.ETDatetime, types.ETReal, types.ETString}, + geners: []dataGenerator{ + &defaultGener{eType: types.ETDatetime, nullRation: 0.2}, + &defaultGener{eType: types.ETReal, nullRation: 0.2}, + }, + constants: []*Constant{nil, nil, {Value: types.NewStringDatum("HOUR"), RetType: types.NewFieldType(mysql.TypeString)}}, + chunkSize: 128, + }, + { + retEvalType: types.ETDatetime, + childrenTypes: []types.EvalType{types.ETDatetime, types.ETReal, types.ETString}, + geners: []dataGenerator{ + &defaultGener{eType: types.ETDatetime, nullRation: 0.2}, + &defaultGener{eType: types.ETReal, nullRation: 0.2}, + }, + constants: []*Constant{nil, nil, {Value: types.NewStringDatum("DAY"), RetType: types.NewFieldType(mysql.TypeString)}}, + chunkSize: 128, + }, + { + retEvalType: types.ETDatetime, + childrenTypes: []types.EvalType{types.ETDatetime, types.ETReal, types.ETString}, + geners: []dataGenerator{ + &defaultGener{eType: types.ETDatetime, nullRation: 0.2}, + &defaultGener{eType: types.ETReal, nullRation: 0.2}, + }, + constants: []*Constant{nil, nil, {Value: types.NewStringDatum("WEEK"), RetType: types.NewFieldType(mysql.TypeString)}}, + chunkSize: 128, + }, + { + retEvalType: types.ETDatetime, + childrenTypes: []types.EvalType{types.ETDatetime, types.ETReal, types.ETString}, + geners: []dataGenerator{ + &defaultGener{eType: types.ETDatetime, nullRation: 0.2}, + &defaultGener{eType: types.ETReal, nullRation: 0.2}, + }, + constants: []*Constant{nil, nil, {Value: types.NewStringDatum("MONTH"), RetType: types.NewFieldType(mysql.TypeString)}}, + chunkSize: 128, + }, + { + retEvalType: types.ETDatetime, + childrenTypes: []types.EvalType{types.ETDatetime, types.ETReal, types.ETString}, + geners: []dataGenerator{ + &defaultGener{eType: types.ETDatetime, nullRation: 0.2}, + &defaultGener{eType: types.ETReal, nullRation: 0.2}, + }, + constants: []*Constant{nil, nil, {Value: types.NewStringDatum("QUARTER"), RetType: types.NewFieldType(mysql.TypeString)}}, + chunkSize: 128, + }, + { + retEvalType: types.ETDatetime, + childrenTypes: []types.EvalType{types.ETDatetime, types.ETReal, types.ETString}, + geners: []dataGenerator{ + &defaultGener{eType: types.ETDatetime, nullRation: 0.2}, + &defaultGener{eType: types.ETReal, nullRation: 0.2}, + }, + constants: []*Constant{nil, nil, {Value: types.NewStringDatum("YEAR"), RetType: types.NewFieldType(mysql.TypeString)}}, + chunkSize: 128, + }, + { + retEvalType: types.ETDatetime, + childrenTypes: []types.EvalType{types.ETDatetime, types.ETReal, types.ETString}, + geners: []dataGenerator{ + &defaultGener{eType: types.ETDatetime, nullRation: 0.2}, + &defaultGener{eType: types.ETReal, nullRation: 0.2}, + }, + constants: []*Constant{nil, nil, {Value: types.NewStringDatum("SECOND_MICROSECOND"), RetType: types.NewFieldType(mysql.TypeString)}}, + chunkSize: 128, + }, + { + retEvalType: types.ETDatetime, + childrenTypes: []types.EvalType{types.ETDatetime, types.ETReal, types.ETString}, + geners: []dataGenerator{ + &defaultGener{eType: types.ETDatetime, nullRation: 0.2}, + &defaultGener{eType: types.ETReal, nullRation: 0.2}, + }, + constants: []*Constant{nil, nil, {Value: types.NewStringDatum("MINUTE_MICROSECOND"), RetType: types.NewFieldType(mysql.TypeString)}}, + chunkSize: 128, + }, + { + retEvalType: types.ETDatetime, + childrenTypes: []types.EvalType{types.ETDatetime, types.ETReal, types.ETString}, + geners: []dataGenerator{ + &defaultGener{eType: types.ETDatetime, nullRation: 0.2}, + &defaultGener{eType: types.ETReal, nullRation: 0.2}, + }, + constants: []*Constant{nil, nil, {Value: types.NewStringDatum("MINUTE_SECOND"), RetType: types.NewFieldType(mysql.TypeString)}}, + chunkSize: 128, + }, + { + retEvalType: types.ETDatetime, + childrenTypes: []types.EvalType{types.ETDatetime, types.ETReal, types.ETString}, + geners: []dataGenerator{ + &defaultGener{eType: types.ETDatetime, nullRation: 0.2}, + &defaultGener{eType: types.ETReal, nullRation: 0.2}, + }, + constants: []*Constant{nil, nil, {Value: types.NewStringDatum("HOUR_MICROSECOND"), RetType: types.NewFieldType(mysql.TypeString)}}, + chunkSize: 128, + }, + { + retEvalType: types.ETDatetime, + childrenTypes: []types.EvalType{types.ETDatetime, types.ETReal, types.ETString}, + geners: []dataGenerator{ + &defaultGener{eType: types.ETDatetime, nullRation: 0.2}, + &defaultGener{eType: types.ETReal, nullRation: 0.2}, + }, + constants: []*Constant{nil, nil, {Value: types.NewStringDatum("HOUR_SECOND"), RetType: types.NewFieldType(mysql.TypeString)}}, + chunkSize: 128, + }, + { + retEvalType: types.ETDatetime, + childrenTypes: []types.EvalType{types.ETDatetime, types.ETReal, types.ETString}, + geners: []dataGenerator{ + &defaultGener{eType: types.ETDatetime, nullRation: 0.2}, + &defaultGener{eType: types.ETReal, nullRation: 0.2}, + }, + constants: []*Constant{nil, nil, {Value: types.NewStringDatum("HOUR_MINUTE"), RetType: types.NewFieldType(mysql.TypeString)}}, + chunkSize: 128, + }, + { + retEvalType: types.ETDatetime, + childrenTypes: []types.EvalType{types.ETDatetime, types.ETReal, types.ETString}, + geners: []dataGenerator{ + &defaultGener{eType: types.ETDatetime, nullRation: 0.2}, + &defaultGener{eType: types.ETReal, nullRation: 0.2}, + }, + constants: []*Constant{nil, nil, {Value: types.NewStringDatum("DAY_MICROSECOND"), RetType: types.NewFieldType(mysql.TypeString)}}, + chunkSize: 128, + }, + { + retEvalType: types.ETDatetime, + childrenTypes: []types.EvalType{types.ETDatetime, types.ETReal, types.ETString}, + geners: []dataGenerator{ + &defaultGener{eType: types.ETDatetime, nullRation: 0.2}, + &defaultGener{eType: types.ETReal, nullRation: 0.2}, + }, + constants: []*Constant{nil, nil, {Value: types.NewStringDatum("DAY_SECOND"), RetType: types.NewFieldType(mysql.TypeString)}}, + chunkSize: 128, + }, + { + retEvalType: types.ETDatetime, + childrenTypes: []types.EvalType{types.ETDatetime, types.ETReal, types.ETString}, + geners: []dataGenerator{ + &defaultGener{eType: types.ETDatetime, nullRation: 0.2}, + &defaultGener{eType: types.ETReal, nullRation: 0.2}, + }, + constants: []*Constant{nil, nil, {Value: types.NewStringDatum("DAY_MINUTE"), RetType: types.NewFieldType(mysql.TypeString)}}, + chunkSize: 128, + }, + { + retEvalType: types.ETDatetime, + childrenTypes: []types.EvalType{types.ETDatetime, types.ETReal, types.ETString}, + geners: []dataGenerator{ + &defaultGener{eType: types.ETDatetime, nullRation: 0.2}, + &defaultGener{eType: types.ETReal, nullRation: 0.2}, + }, + constants: []*Constant{nil, nil, {Value: types.NewStringDatum("DAY_HOUR"), RetType: types.NewFieldType(mysql.TypeString)}}, + chunkSize: 128, + }, + { + retEvalType: types.ETDatetime, + childrenTypes: []types.EvalType{types.ETDatetime, types.ETReal, types.ETString}, + geners: []dataGenerator{ + &defaultGener{eType: types.ETDatetime, nullRation: 0.2}, + &defaultGener{eType: types.ETReal, nullRation: 0.2}, + }, + constants: []*Constant{nil, nil, {Value: types.NewStringDatum("YEAR_MONTH"), RetType: types.NewFieldType(mysql.TypeString)}}, + chunkSize: 128, + }, + // builtinAddDateDatetimeDecimalSig + { + retEvalType: types.ETDatetime, + childrenTypes: []types.EvalType{types.ETDatetime, types.ETDecimal, types.ETString}, + geners: []dataGenerator{ + &defaultGener{eType: types.ETDatetime, nullRation: 0.2}, + &defaultGener{eType: types.ETDecimal, nullRation: 0.2}, + }, + constants: []*Constant{nil, nil, {Value: types.NewStringDatum("MICROSECOND"), RetType: types.NewFieldType(mysql.TypeString)}}, + chunkSize: 128, + }, + { + retEvalType: types.ETDatetime, + childrenTypes: []types.EvalType{types.ETDatetime, types.ETDecimal, types.ETString}, + geners: []dataGenerator{ + &defaultGener{eType: types.ETDatetime, nullRation: 0.2}, + &defaultGener{eType: types.ETDecimal, nullRation: 0.2}, + }, + constants: []*Constant{nil, nil, {Value: types.NewStringDatum("SECOND"), RetType: types.NewFieldType(mysql.TypeString)}}, + chunkSize: 128, + }, + { + retEvalType: types.ETDatetime, + childrenTypes: []types.EvalType{types.ETDatetime, types.ETDecimal, types.ETString}, + geners: []dataGenerator{ + &defaultGener{eType: types.ETDatetime, nullRation: 0.2}, + &defaultGener{eType: types.ETDecimal, nullRation: 0.2}, + }, + constants: []*Constant{nil, nil, {Value: types.NewStringDatum("MINUTE"), RetType: types.NewFieldType(mysql.TypeString)}}, + chunkSize: 128, + }, + { + retEvalType: types.ETDatetime, + childrenTypes: []types.EvalType{types.ETDatetime, types.ETDecimal, types.ETString}, + geners: []dataGenerator{ + &defaultGener{eType: types.ETDatetime, nullRation: 0.2}, + &defaultGener{eType: types.ETDecimal, nullRation: 0.2}, + }, + constants: []*Constant{nil, nil, {Value: types.NewStringDatum("HOUR"), RetType: types.NewFieldType(mysql.TypeString)}}, + chunkSize: 128, + }, + { + retEvalType: types.ETDatetime, + childrenTypes: []types.EvalType{types.ETDatetime, types.ETDecimal, types.ETString}, + geners: []dataGenerator{ + &defaultGener{eType: types.ETDatetime, nullRation: 0.2}, + &defaultGener{eType: types.ETDecimal, nullRation: 0.2}, + }, + constants: []*Constant{nil, nil, {Value: types.NewStringDatum("DAY"), RetType: types.NewFieldType(mysql.TypeString)}}, + chunkSize: 128, + }, + { + retEvalType: types.ETDatetime, + childrenTypes: []types.EvalType{types.ETDatetime, types.ETDecimal, types.ETString}, + geners: []dataGenerator{ + &defaultGener{eType: types.ETDatetime, nullRation: 0.2}, + &defaultGener{eType: types.ETDecimal, nullRation: 0.2}, + }, + constants: []*Constant{nil, nil, {Value: types.NewStringDatum("WEEK"), RetType: types.NewFieldType(mysql.TypeString)}}, + chunkSize: 128, + }, + { + retEvalType: types.ETDatetime, + childrenTypes: []types.EvalType{types.ETDatetime, types.ETDecimal, types.ETString}, + geners: []dataGenerator{ + &defaultGener{eType: types.ETDatetime, nullRation: 0.2}, + &defaultGener{eType: types.ETDecimal, nullRation: 0.2}, + }, + constants: []*Constant{nil, nil, {Value: types.NewStringDatum("MONTH"), RetType: types.NewFieldType(mysql.TypeString)}}, + chunkSize: 128, + }, + { + retEvalType: types.ETDatetime, + childrenTypes: []types.EvalType{types.ETDatetime, types.ETDecimal, types.ETString}, + geners: []dataGenerator{ + &defaultGener{eType: types.ETDatetime, nullRation: 0.2}, + &defaultGener{eType: types.ETDecimal, nullRation: 0.2}, + }, + constants: []*Constant{nil, nil, {Value: types.NewStringDatum("QUARTER"), RetType: types.NewFieldType(mysql.TypeString)}}, + chunkSize: 128, + }, + { + retEvalType: types.ETDatetime, + childrenTypes: []types.EvalType{types.ETDatetime, types.ETDecimal, types.ETString}, + geners: []dataGenerator{ + &defaultGener{eType: types.ETDatetime, nullRation: 0.2}, + &defaultGener{eType: types.ETDecimal, nullRation: 0.2}, + }, + constants: []*Constant{nil, nil, {Value: types.NewStringDatum("YEAR"), RetType: types.NewFieldType(mysql.TypeString)}}, + chunkSize: 128, + }, + { + retEvalType: types.ETDatetime, + childrenTypes: []types.EvalType{types.ETDatetime, types.ETDecimal, types.ETString}, + geners: []dataGenerator{ + &defaultGener{eType: types.ETDatetime, nullRation: 0.2}, + &defaultGener{eType: types.ETDecimal, nullRation: 0.2}, + }, + constants: []*Constant{nil, nil, {Value: types.NewStringDatum("SECOND_MICROSECOND"), RetType: types.NewFieldType(mysql.TypeString)}}, + chunkSize: 128, + }, + { + retEvalType: types.ETDatetime, + childrenTypes: []types.EvalType{types.ETDatetime, types.ETDecimal, types.ETString}, + geners: []dataGenerator{ + &defaultGener{eType: types.ETDatetime, nullRation: 0.2}, + &defaultGener{eType: types.ETDecimal, nullRation: 0.2}, + }, + constants: []*Constant{nil, nil, {Value: types.NewStringDatum("MINUTE_MICROSECOND"), RetType: types.NewFieldType(mysql.TypeString)}}, + chunkSize: 128, + }, + { + retEvalType: types.ETDatetime, + childrenTypes: []types.EvalType{types.ETDatetime, types.ETDecimal, types.ETString}, + geners: []dataGenerator{ + &defaultGener{eType: types.ETDatetime, nullRation: 0.2}, + &defaultGener{eType: types.ETDecimal, nullRation: 0.2}, + }, + constants: []*Constant{nil, nil, {Value: types.NewStringDatum("MINUTE_SECOND"), RetType: types.NewFieldType(mysql.TypeString)}}, + chunkSize: 128, + }, + { + retEvalType: types.ETDatetime, + childrenTypes: []types.EvalType{types.ETDatetime, types.ETDecimal, types.ETString}, + geners: []dataGenerator{ + &defaultGener{eType: types.ETDatetime, nullRation: 0.2}, + &defaultGener{eType: types.ETDecimal, nullRation: 0.2}, + }, + constants: []*Constant{nil, nil, {Value: types.NewStringDatum("HOUR_MICROSECOND"), RetType: types.NewFieldType(mysql.TypeString)}}, + chunkSize: 128, + }, + { + retEvalType: types.ETDatetime, + childrenTypes: []types.EvalType{types.ETDatetime, types.ETDecimal, types.ETString}, + geners: []dataGenerator{ + &defaultGener{eType: types.ETDatetime, nullRation: 0.2}, + &defaultGener{eType: types.ETDecimal, nullRation: 0.2}, + }, + constants: []*Constant{nil, nil, {Value: types.NewStringDatum("HOUR_SECOND"), RetType: types.NewFieldType(mysql.TypeString)}}, + chunkSize: 128, + }, + { + retEvalType: types.ETDatetime, + childrenTypes: []types.EvalType{types.ETDatetime, types.ETDecimal, types.ETString}, + geners: []dataGenerator{ + &defaultGener{eType: types.ETDatetime, nullRation: 0.2}, + &defaultGener{eType: types.ETDecimal, nullRation: 0.2}, + }, + constants: []*Constant{nil, nil, {Value: types.NewStringDatum("HOUR_MINUTE"), RetType: types.NewFieldType(mysql.TypeString)}}, + chunkSize: 128, + }, + { + retEvalType: types.ETDatetime, + childrenTypes: []types.EvalType{types.ETDatetime, types.ETDecimal, types.ETString}, + geners: []dataGenerator{ + &defaultGener{eType: types.ETDatetime, nullRation: 0.2}, + &defaultGener{eType: types.ETDecimal, nullRation: 0.2}, + }, + constants: []*Constant{nil, nil, {Value: types.NewStringDatum("DAY_MICROSECOND"), RetType: types.NewFieldType(mysql.TypeString)}}, + chunkSize: 128, + }, + { + retEvalType: types.ETDatetime, + childrenTypes: []types.EvalType{types.ETDatetime, types.ETDecimal, types.ETString}, + geners: []dataGenerator{ + &defaultGener{eType: types.ETDatetime, nullRation: 0.2}, + &defaultGener{eType: types.ETDecimal, nullRation: 0.2}, + }, + constants: []*Constant{nil, nil, {Value: types.NewStringDatum("DAY_SECOND"), RetType: types.NewFieldType(mysql.TypeString)}}, + chunkSize: 128, + }, + { + retEvalType: types.ETDatetime, + childrenTypes: []types.EvalType{types.ETDatetime, types.ETDecimal, types.ETString}, + geners: []dataGenerator{ + &defaultGener{eType: types.ETDatetime, nullRation: 0.2}, + &defaultGener{eType: types.ETDecimal, nullRation: 0.2}, + }, + constants: []*Constant{nil, nil, {Value: types.NewStringDatum("DAY_MINUTE"), RetType: types.NewFieldType(mysql.TypeString)}}, + chunkSize: 128, + }, + { + retEvalType: types.ETDatetime, + childrenTypes: []types.EvalType{types.ETDatetime, types.ETDecimal, types.ETString}, + geners: []dataGenerator{ + &defaultGener{eType: types.ETDatetime, nullRation: 0.2}, + &defaultGener{eType: types.ETDecimal, nullRation: 0.2}, + }, + constants: []*Constant{nil, nil, {Value: types.NewStringDatum("DAY_HOUR"), RetType: types.NewFieldType(mysql.TypeString)}}, + chunkSize: 128, + }, + { + retEvalType: types.ETDatetime, + childrenTypes: []types.EvalType{types.ETDatetime, types.ETDecimal, types.ETString}, + geners: []dataGenerator{ + &defaultGener{eType: types.ETDatetime, nullRation: 0.2}, + &defaultGener{eType: types.ETDecimal, nullRation: 0.2}, + }, + constants: []*Constant{nil, nil, {Value: types.NewStringDatum("YEAR_MONTH"), RetType: types.NewFieldType(mysql.TypeString)}}, + chunkSize: 128, + }, + // builtinAddDateDurationStringSig + { + retEvalType: types.ETDuration, + childrenTypes: []types.EvalType{types.ETDuration, types.ETString, types.ETString}, + geners: []dataGenerator{ + &defaultGener{eType: types.ETDuration, nullRation: 0.2}, + &numStrGener{rangeInt64Gener{math.MinInt32 + 1, math.MaxInt32}}, + }, + constants: []*Constant{nil, nil, {Value: types.NewStringDatum("MICROSECOND"), RetType: types.NewFieldType(mysql.TypeString)}}, + chunkSize: 128, + }, + { + retEvalType: types.ETDuration, + childrenTypes: []types.EvalType{types.ETDuration, types.ETString, types.ETString}, + geners: []dataGenerator{ + &defaultGener{eType: types.ETDuration, nullRation: 0.2}, + &numStrGener{rangeInt64Gener{math.MinInt32 + 1, math.MaxInt32}}, + }, + constants: []*Constant{nil, nil, {Value: types.NewStringDatum("SECOND"), RetType: types.NewFieldType(mysql.TypeString)}}, + chunkSize: 128, + }, + { + retEvalType: types.ETDuration, + childrenTypes: []types.EvalType{types.ETDuration, types.ETString, types.ETString}, + geners: []dataGenerator{ + &defaultGener{eType: types.ETDuration, nullRation: 0.2}, + &numStrGener{rangeInt64Gener{math.MinInt32 + 1, math.MaxInt32}}, + }, + constants: []*Constant{nil, nil, {Value: types.NewStringDatum("MINUTE"), RetType: types.NewFieldType(mysql.TypeString)}}, + chunkSize: 128, + }, + { + retEvalType: types.ETDuration, + childrenTypes: []types.EvalType{types.ETDuration, types.ETString, types.ETString}, + geners: []dataGenerator{ + &defaultGener{eType: types.ETDuration, nullRation: 0.2}, + &numStrGener{rangeInt64Gener{math.MinInt32 + 1, math.MaxInt32}}, + }, + constants: []*Constant{nil, nil, {Value: types.NewStringDatum("HOUR"), RetType: types.NewFieldType(mysql.TypeString)}}, + chunkSize: 128, + }, + { + retEvalType: types.ETDuration, + childrenTypes: []types.EvalType{types.ETDuration, types.ETString, types.ETString}, + geners: []dataGenerator{ + &defaultGener{eType: types.ETDuration, nullRation: 0.2}, + &numStrGener{rangeInt64Gener{math.MinInt32 + 1, math.MaxInt32}}, + }, + constants: []*Constant{nil, nil, {Value: types.NewStringDatum("DAY"), RetType: types.NewFieldType(mysql.TypeString)}}, + chunkSize: 128, + }, + { + retEvalType: types.ETDuration, + childrenTypes: []types.EvalType{types.ETDuration, types.ETString, types.ETString}, + geners: []dataGenerator{ + &defaultGener{eType: types.ETDuration, nullRation: 0.2}, + &numStrGener{rangeInt64Gener{math.MinInt32 + 1, math.MaxInt32}}, + }, + constants: []*Constant{nil, nil, {Value: types.NewStringDatum("WEEK"), RetType: types.NewFieldType(mysql.TypeString)}}, + chunkSize: 128, + }, + { + retEvalType: types.ETDuration, + childrenTypes: []types.EvalType{types.ETDuration, types.ETString, types.ETString}, + geners: []dataGenerator{ + &defaultGener{eType: types.ETDuration, nullRation: 0.2}, + &numStrGener{rangeInt64Gener{math.MinInt32 + 1, math.MaxInt32}}, + }, + constants: []*Constant{nil, nil, {Value: types.NewStringDatum("MONTH"), RetType: types.NewFieldType(mysql.TypeString)}}, + chunkSize: 128, + }, + { + retEvalType: types.ETDuration, + childrenTypes: []types.EvalType{types.ETDuration, types.ETString, types.ETString}, + geners: []dataGenerator{ + &defaultGener{eType: types.ETDuration, nullRation: 0.2}, + &numStrGener{rangeInt64Gener{math.MinInt32 + 1, math.MaxInt32}}, + }, + constants: []*Constant{nil, nil, {Value: types.NewStringDatum("QUARTER"), RetType: types.NewFieldType(mysql.TypeString)}}, + chunkSize: 128, + }, + { + retEvalType: types.ETDuration, + childrenTypes: []types.EvalType{types.ETDuration, types.ETString, types.ETString}, + geners: []dataGenerator{ + &defaultGener{eType: types.ETDuration, nullRation: 0.2}, + &numStrGener{rangeInt64Gener{math.MinInt32 + 1, math.MaxInt32}}, + }, + constants: []*Constant{nil, nil, {Value: types.NewStringDatum("YEAR"), RetType: types.NewFieldType(mysql.TypeString)}}, + chunkSize: 128, + }, + { + retEvalType: types.ETDuration, + childrenTypes: []types.EvalType{types.ETDuration, types.ETString, types.ETString}, + geners: []dataGenerator{ + &defaultGener{eType: types.ETDuration, nullRation: 0.2}, + &numStrGener{rangeInt64Gener{math.MinInt32 + 1, math.MaxInt32}}, + }, + constants: []*Constant{nil, nil, {Value: types.NewStringDatum("SECOND_MICROSECOND"), RetType: types.NewFieldType(mysql.TypeString)}}, + chunkSize: 128, + }, + { + retEvalType: types.ETDuration, + childrenTypes: []types.EvalType{types.ETDuration, types.ETString, types.ETString}, + geners: []dataGenerator{ + &defaultGener{eType: types.ETDuration, nullRation: 0.2}, + &numStrGener{rangeInt64Gener{math.MinInt32 + 1, math.MaxInt32}}, + }, + constants: []*Constant{nil, nil, {Value: types.NewStringDatum("MINUTE_MICROSECOND"), RetType: types.NewFieldType(mysql.TypeString)}}, + chunkSize: 128, + }, + { + retEvalType: types.ETDuration, + childrenTypes: []types.EvalType{types.ETDuration, types.ETString, types.ETString}, + geners: []dataGenerator{ + &defaultGener{eType: types.ETDuration, nullRation: 0.2}, + &numStrGener{rangeInt64Gener{math.MinInt32 + 1, math.MaxInt32}}, + }, + constants: []*Constant{nil, nil, {Value: types.NewStringDatum("MINUTE_SECOND"), RetType: types.NewFieldType(mysql.TypeString)}}, + chunkSize: 128, + }, + { + retEvalType: types.ETDuration, + childrenTypes: []types.EvalType{types.ETDuration, types.ETString, types.ETString}, + geners: []dataGenerator{ + &defaultGener{eType: types.ETDuration, nullRation: 0.2}, + &numStrGener{rangeInt64Gener{math.MinInt32 + 1, math.MaxInt32}}, + }, + constants: []*Constant{nil, nil, {Value: types.NewStringDatum("HOUR_MICROSECOND"), RetType: types.NewFieldType(mysql.TypeString)}}, + chunkSize: 128, + }, + { + retEvalType: types.ETDuration, + childrenTypes: []types.EvalType{types.ETDuration, types.ETString, types.ETString}, + geners: []dataGenerator{ + &defaultGener{eType: types.ETDuration, nullRation: 0.2}, + &numStrGener{rangeInt64Gener{math.MinInt32 + 1, math.MaxInt32}}, + }, + constants: []*Constant{nil, nil, {Value: types.NewStringDatum("HOUR_SECOND"), RetType: types.NewFieldType(mysql.TypeString)}}, + chunkSize: 128, + }, + { + retEvalType: types.ETDuration, + childrenTypes: []types.EvalType{types.ETDuration, types.ETString, types.ETString}, + geners: []dataGenerator{ + &defaultGener{eType: types.ETDuration, nullRation: 0.2}, + &numStrGener{rangeInt64Gener{math.MinInt32 + 1, math.MaxInt32}}, + }, + constants: []*Constant{nil, nil, {Value: types.NewStringDatum("HOUR_MINUTE"), RetType: types.NewFieldType(mysql.TypeString)}}, + chunkSize: 128, + }, + { + retEvalType: types.ETDuration, + childrenTypes: []types.EvalType{types.ETDuration, types.ETString, types.ETString}, + geners: []dataGenerator{ + &defaultGener{eType: types.ETDuration, nullRation: 0.2}, + &numStrGener{rangeInt64Gener{math.MinInt32 + 1, math.MaxInt32}}, + }, + constants: []*Constant{nil, nil, {Value: types.NewStringDatum("DAY_MICROSECOND"), RetType: types.NewFieldType(mysql.TypeString)}}, + chunkSize: 128, + }, + { + retEvalType: types.ETDuration, + childrenTypes: []types.EvalType{types.ETDuration, types.ETString, types.ETString}, + geners: []dataGenerator{ + &defaultGener{eType: types.ETDuration, nullRation: 0.2}, + &numStrGener{rangeInt64Gener{math.MinInt32 + 1, math.MaxInt32}}, + }, + constants: []*Constant{nil, nil, {Value: types.NewStringDatum("DAY_SECOND"), RetType: types.NewFieldType(mysql.TypeString)}}, + chunkSize: 128, + }, + { + retEvalType: types.ETDuration, + childrenTypes: []types.EvalType{types.ETDuration, types.ETString, types.ETString}, + geners: []dataGenerator{ + &defaultGener{eType: types.ETDuration, nullRation: 0.2}, + &numStrGener{rangeInt64Gener{math.MinInt32 + 1, math.MaxInt32}}, + }, + constants: []*Constant{nil, nil, {Value: types.NewStringDatum("DAY_MINUTE"), RetType: types.NewFieldType(mysql.TypeString)}}, + chunkSize: 128, + }, + { + retEvalType: types.ETDuration, + childrenTypes: []types.EvalType{types.ETDuration, types.ETString, types.ETString}, + geners: []dataGenerator{ + &defaultGener{eType: types.ETDuration, nullRation: 0.2}, + &numStrGener{rangeInt64Gener{math.MinInt32 + 1, math.MaxInt32}}, + }, + constants: []*Constant{nil, nil, {Value: types.NewStringDatum("DAY_HOUR"), RetType: types.NewFieldType(mysql.TypeString)}}, + chunkSize: 128, + }, + { + retEvalType: types.ETDuration, + childrenTypes: []types.EvalType{types.ETDuration, types.ETString, types.ETString}, + geners: []dataGenerator{ + &defaultGener{eType: types.ETDuration, nullRation: 0.2}, + &numStrGener{rangeInt64Gener{math.MinInt32 + 1, math.MaxInt32}}, + }, + constants: []*Constant{nil, nil, {Value: types.NewStringDatum("YEAR_MONTH"), RetType: types.NewFieldType(mysql.TypeString)}}, + chunkSize: 128, + }, + // builtinAddDateDurationIntSig + { + retEvalType: types.ETDuration, + childrenTypes: []types.EvalType{types.ETDuration, types.ETInt, types.ETString}, + geners: []dataGenerator{ + &defaultGener{eType: types.ETDuration, nullRation: 0.2}, + &defaultGener{eType: types.ETInt, nullRation: 0.2}, + }, + constants: []*Constant{nil, nil, {Value: types.NewStringDatum("MICROSECOND"), RetType: types.NewFieldType(mysql.TypeString)}}, + chunkSize: 128, + }, + { + retEvalType: types.ETDuration, + childrenTypes: []types.EvalType{types.ETDuration, types.ETInt, types.ETString}, + geners: []dataGenerator{ + &defaultGener{eType: types.ETDuration, nullRation: 0.2}, + &defaultGener{eType: types.ETInt, nullRation: 0.2}, + }, + constants: []*Constant{nil, nil, {Value: types.NewStringDatum("SECOND"), RetType: types.NewFieldType(mysql.TypeString)}}, + chunkSize: 128, + }, + { + retEvalType: types.ETDuration, + childrenTypes: []types.EvalType{types.ETDuration, types.ETInt, types.ETString}, + geners: []dataGenerator{ + &defaultGener{eType: types.ETDuration, nullRation: 0.2}, + &defaultGener{eType: types.ETInt, nullRation: 0.2}, + }, + constants: []*Constant{nil, nil, {Value: types.NewStringDatum("MINUTE"), RetType: types.NewFieldType(mysql.TypeString)}}, + chunkSize: 128, + }, + { + retEvalType: types.ETDuration, + childrenTypes: []types.EvalType{types.ETDuration, types.ETInt, types.ETString}, + geners: []dataGenerator{ + &defaultGener{eType: types.ETDuration, nullRation: 0.2}, + &defaultGener{eType: types.ETInt, nullRation: 0.2}, + }, + constants: []*Constant{nil, nil, {Value: types.NewStringDatum("HOUR"), RetType: types.NewFieldType(mysql.TypeString)}}, + chunkSize: 128, + }, + { + retEvalType: types.ETDuration, + childrenTypes: []types.EvalType{types.ETDuration, types.ETInt, types.ETString}, + geners: []dataGenerator{ + &defaultGener{eType: types.ETDuration, nullRation: 0.2}, + &defaultGener{eType: types.ETInt, nullRation: 0.2}, + }, + constants: []*Constant{nil, nil, {Value: types.NewStringDatum("DAY"), RetType: types.NewFieldType(mysql.TypeString)}}, + chunkSize: 128, + }, + { + retEvalType: types.ETDuration, + childrenTypes: []types.EvalType{types.ETDuration, types.ETInt, types.ETString}, + geners: []dataGenerator{ + &defaultGener{eType: types.ETDuration, nullRation: 0.2}, + &defaultGener{eType: types.ETInt, nullRation: 0.2}, + }, + constants: []*Constant{nil, nil, {Value: types.NewStringDatum("WEEK"), RetType: types.NewFieldType(mysql.TypeString)}}, + chunkSize: 128, + }, + { + retEvalType: types.ETDuration, + childrenTypes: []types.EvalType{types.ETDuration, types.ETInt, types.ETString}, + geners: []dataGenerator{ + &defaultGener{eType: types.ETDuration, nullRation: 0.2}, + &defaultGener{eType: types.ETInt, nullRation: 0.2}, + }, + constants: []*Constant{nil, nil, {Value: types.NewStringDatum("MONTH"), RetType: types.NewFieldType(mysql.TypeString)}}, + chunkSize: 128, + }, + { + retEvalType: types.ETDuration, + childrenTypes: []types.EvalType{types.ETDuration, types.ETInt, types.ETString}, + geners: []dataGenerator{ + &defaultGener{eType: types.ETDuration, nullRation: 0.2}, + &defaultGener{eType: types.ETInt, nullRation: 0.2}, + }, + constants: []*Constant{nil, nil, {Value: types.NewStringDatum("QUARTER"), RetType: types.NewFieldType(mysql.TypeString)}}, + chunkSize: 128, + }, + { + retEvalType: types.ETDuration, + childrenTypes: []types.EvalType{types.ETDuration, types.ETInt, types.ETString}, + geners: []dataGenerator{ + &defaultGener{eType: types.ETDuration, nullRation: 0.2}, + &defaultGener{eType: types.ETInt, nullRation: 0.2}, + }, + constants: []*Constant{nil, nil, {Value: types.NewStringDatum("YEAR"), RetType: types.NewFieldType(mysql.TypeString)}}, + chunkSize: 128, + }, + { + retEvalType: types.ETDuration, + childrenTypes: []types.EvalType{types.ETDuration, types.ETInt, types.ETString}, + geners: []dataGenerator{ + &defaultGener{eType: types.ETDuration, nullRation: 0.2}, + &defaultGener{eType: types.ETInt, nullRation: 0.2}, + }, + constants: []*Constant{nil, nil, {Value: types.NewStringDatum("SECOND_MICROSECOND"), RetType: types.NewFieldType(mysql.TypeString)}}, + chunkSize: 128, + }, + { + retEvalType: types.ETDuration, + childrenTypes: []types.EvalType{types.ETDuration, types.ETInt, types.ETString}, + geners: []dataGenerator{ + &defaultGener{eType: types.ETDuration, nullRation: 0.2}, + &defaultGener{eType: types.ETInt, nullRation: 0.2}, + }, + constants: []*Constant{nil, nil, {Value: types.NewStringDatum("MINUTE_MICROSECOND"), RetType: types.NewFieldType(mysql.TypeString)}}, + chunkSize: 128, + }, + { + retEvalType: types.ETDuration, + childrenTypes: []types.EvalType{types.ETDuration, types.ETInt, types.ETString}, + geners: []dataGenerator{ + &defaultGener{eType: types.ETDuration, nullRation: 0.2}, + &defaultGener{eType: types.ETInt, nullRation: 0.2}, + }, + constants: []*Constant{nil, nil, {Value: types.NewStringDatum("MINUTE_SECOND"), RetType: types.NewFieldType(mysql.TypeString)}}, + chunkSize: 128, + }, + { + retEvalType: types.ETDuration, + childrenTypes: []types.EvalType{types.ETDuration, types.ETInt, types.ETString}, + geners: []dataGenerator{ + &defaultGener{eType: types.ETDuration, nullRation: 0.2}, + &defaultGener{eType: types.ETInt, nullRation: 0.2}, + }, + constants: []*Constant{nil, nil, {Value: types.NewStringDatum("HOUR_MICROSECOND"), RetType: types.NewFieldType(mysql.TypeString)}}, + chunkSize: 128, + }, + { + retEvalType: types.ETDuration, + childrenTypes: []types.EvalType{types.ETDuration, types.ETInt, types.ETString}, + geners: []dataGenerator{ + &defaultGener{eType: types.ETDuration, nullRation: 0.2}, + &defaultGener{eType: types.ETInt, nullRation: 0.2}, + }, + constants: []*Constant{nil, nil, {Value: types.NewStringDatum("HOUR_SECOND"), RetType: types.NewFieldType(mysql.TypeString)}}, + chunkSize: 128, + }, + { + retEvalType: types.ETDuration, + childrenTypes: []types.EvalType{types.ETDuration, types.ETInt, types.ETString}, + geners: []dataGenerator{ + &defaultGener{eType: types.ETDuration, nullRation: 0.2}, + &defaultGener{eType: types.ETInt, nullRation: 0.2}, + }, + constants: []*Constant{nil, nil, {Value: types.NewStringDatum("HOUR_MINUTE"), RetType: types.NewFieldType(mysql.TypeString)}}, + chunkSize: 128, + }, + { + retEvalType: types.ETDuration, + childrenTypes: []types.EvalType{types.ETDuration, types.ETInt, types.ETString}, + geners: []dataGenerator{ + &defaultGener{eType: types.ETDuration, nullRation: 0.2}, + &defaultGener{eType: types.ETInt, nullRation: 0.2}, + }, + constants: []*Constant{nil, nil, {Value: types.NewStringDatum("DAY_MICROSECOND"), RetType: types.NewFieldType(mysql.TypeString)}}, + chunkSize: 128, + }, + { + retEvalType: types.ETDuration, + childrenTypes: []types.EvalType{types.ETDuration, types.ETInt, types.ETString}, + geners: []dataGenerator{ + &defaultGener{eType: types.ETDuration, nullRation: 0.2}, + &defaultGener{eType: types.ETInt, nullRation: 0.2}, + }, + constants: []*Constant{nil, nil, {Value: types.NewStringDatum("DAY_SECOND"), RetType: types.NewFieldType(mysql.TypeString)}}, + chunkSize: 128, + }, + { + retEvalType: types.ETDuration, + childrenTypes: []types.EvalType{types.ETDuration, types.ETInt, types.ETString}, + geners: []dataGenerator{ + &defaultGener{eType: types.ETDuration, nullRation: 0.2}, + &defaultGener{eType: types.ETInt, nullRation: 0.2}, + }, + constants: []*Constant{nil, nil, {Value: types.NewStringDatum("DAY_MINUTE"), RetType: types.NewFieldType(mysql.TypeString)}}, + chunkSize: 128, + }, + { + retEvalType: types.ETDuration, + childrenTypes: []types.EvalType{types.ETDuration, types.ETInt, types.ETString}, + geners: []dataGenerator{ + &defaultGener{eType: types.ETDuration, nullRation: 0.2}, + &defaultGener{eType: types.ETInt, nullRation: 0.2}, + }, + constants: []*Constant{nil, nil, {Value: types.NewStringDatum("DAY_HOUR"), RetType: types.NewFieldType(mysql.TypeString)}}, + chunkSize: 128, + }, + { + retEvalType: types.ETDuration, + childrenTypes: []types.EvalType{types.ETDuration, types.ETInt, types.ETString}, + geners: []dataGenerator{ + &defaultGener{eType: types.ETDuration, nullRation: 0.2}, + &defaultGener{eType: types.ETInt, nullRation: 0.2}, + }, + constants: []*Constant{nil, nil, {Value: types.NewStringDatum("YEAR_MONTH"), RetType: types.NewFieldType(mysql.TypeString)}}, + chunkSize: 128, + }, + // builtinAddDateDurationRealSig + { + retEvalType: types.ETDuration, + childrenTypes: []types.EvalType{types.ETDuration, types.ETReal, types.ETString}, + geners: []dataGenerator{ + &defaultGener{eType: types.ETDuration, nullRation: 0.2}, + &defaultGener{eType: types.ETReal, nullRation: 0.2}, + }, + constants: []*Constant{nil, nil, {Value: types.NewStringDatum("MICROSECOND"), RetType: types.NewFieldType(mysql.TypeString)}}, + chunkSize: 128, + }, + { + retEvalType: types.ETDuration, + childrenTypes: []types.EvalType{types.ETDuration, types.ETReal, types.ETString}, + geners: []dataGenerator{ + &defaultGener{eType: types.ETDuration, nullRation: 0.2}, + &defaultGener{eType: types.ETReal, nullRation: 0.2}, + }, + constants: []*Constant{nil, nil, {Value: types.NewStringDatum("SECOND"), RetType: types.NewFieldType(mysql.TypeString)}}, + chunkSize: 128, + }, + { + retEvalType: types.ETDuration, + childrenTypes: []types.EvalType{types.ETDuration, types.ETReal, types.ETString}, + geners: []dataGenerator{ + &defaultGener{eType: types.ETDuration, nullRation: 0.2}, + &defaultGener{eType: types.ETReal, nullRation: 0.2}, + }, + constants: []*Constant{nil, nil, {Value: types.NewStringDatum("MINUTE"), RetType: types.NewFieldType(mysql.TypeString)}}, + chunkSize: 128, + }, + { + retEvalType: types.ETDuration, + childrenTypes: []types.EvalType{types.ETDuration, types.ETReal, types.ETString}, + geners: []dataGenerator{ + &defaultGener{eType: types.ETDuration, nullRation: 0.2}, + &defaultGener{eType: types.ETReal, nullRation: 0.2}, + }, + constants: []*Constant{nil, nil, {Value: types.NewStringDatum("HOUR"), RetType: types.NewFieldType(mysql.TypeString)}}, + chunkSize: 128, + }, + { + retEvalType: types.ETDuration, + childrenTypes: []types.EvalType{types.ETDuration, types.ETReal, types.ETString}, + geners: []dataGenerator{ + &defaultGener{eType: types.ETDuration, nullRation: 0.2}, + &defaultGener{eType: types.ETReal, nullRation: 0.2}, + }, + constants: []*Constant{nil, nil, {Value: types.NewStringDatum("DAY"), RetType: types.NewFieldType(mysql.TypeString)}}, + chunkSize: 128, + }, + { + retEvalType: types.ETDuration, + childrenTypes: []types.EvalType{types.ETDuration, types.ETReal, types.ETString}, + geners: []dataGenerator{ + &defaultGener{eType: types.ETDuration, nullRation: 0.2}, + &defaultGener{eType: types.ETReal, nullRation: 0.2}, + }, + constants: []*Constant{nil, nil, {Value: types.NewStringDatum("WEEK"), RetType: types.NewFieldType(mysql.TypeString)}}, + chunkSize: 128, + }, + { + retEvalType: types.ETDuration, + childrenTypes: []types.EvalType{types.ETDuration, types.ETReal, types.ETString}, + geners: []dataGenerator{ + &defaultGener{eType: types.ETDuration, nullRation: 0.2}, + &defaultGener{eType: types.ETReal, nullRation: 0.2}, + }, + constants: []*Constant{nil, nil, {Value: types.NewStringDatum("MONTH"), RetType: types.NewFieldType(mysql.TypeString)}}, + chunkSize: 128, + }, + { + retEvalType: types.ETDuration, + childrenTypes: []types.EvalType{types.ETDuration, types.ETReal, types.ETString}, + geners: []dataGenerator{ + &defaultGener{eType: types.ETDuration, nullRation: 0.2}, + &defaultGener{eType: types.ETReal, nullRation: 0.2}, + }, + constants: []*Constant{nil, nil, {Value: types.NewStringDatum("QUARTER"), RetType: types.NewFieldType(mysql.TypeString)}}, + chunkSize: 128, + }, + { + retEvalType: types.ETDuration, + childrenTypes: []types.EvalType{types.ETDuration, types.ETReal, types.ETString}, + geners: []dataGenerator{ + &defaultGener{eType: types.ETDuration, nullRation: 0.2}, + &defaultGener{eType: types.ETReal, nullRation: 0.2}, + }, + constants: []*Constant{nil, nil, {Value: types.NewStringDatum("YEAR"), RetType: types.NewFieldType(mysql.TypeString)}}, + chunkSize: 128, + }, + { + retEvalType: types.ETDuration, + childrenTypes: []types.EvalType{types.ETDuration, types.ETReal, types.ETString}, + geners: []dataGenerator{ + &defaultGener{eType: types.ETDuration, nullRation: 0.2}, + &defaultGener{eType: types.ETReal, nullRation: 0.2}, + }, + constants: []*Constant{nil, nil, {Value: types.NewStringDatum("SECOND_MICROSECOND"), RetType: types.NewFieldType(mysql.TypeString)}}, + chunkSize: 128, + }, + { + retEvalType: types.ETDuration, + childrenTypes: []types.EvalType{types.ETDuration, types.ETReal, types.ETString}, + geners: []dataGenerator{ + &defaultGener{eType: types.ETDuration, nullRation: 0.2}, + &defaultGener{eType: types.ETReal, nullRation: 0.2}, + }, + constants: []*Constant{nil, nil, {Value: types.NewStringDatum("MINUTE_MICROSECOND"), RetType: types.NewFieldType(mysql.TypeString)}}, + chunkSize: 128, + }, + { + retEvalType: types.ETDuration, + childrenTypes: []types.EvalType{types.ETDuration, types.ETReal, types.ETString}, + geners: []dataGenerator{ + &defaultGener{eType: types.ETDuration, nullRation: 0.2}, + &defaultGener{eType: types.ETReal, nullRation: 0.2}, + }, + constants: []*Constant{nil, nil, {Value: types.NewStringDatum("MINUTE_SECOND"), RetType: types.NewFieldType(mysql.TypeString)}}, + chunkSize: 128, + }, + { + retEvalType: types.ETDuration, + childrenTypes: []types.EvalType{types.ETDuration, types.ETReal, types.ETString}, + geners: []dataGenerator{ + &defaultGener{eType: types.ETDuration, nullRation: 0.2}, + &defaultGener{eType: types.ETReal, nullRation: 0.2}, + }, + constants: []*Constant{nil, nil, {Value: types.NewStringDatum("HOUR_MICROSECOND"), RetType: types.NewFieldType(mysql.TypeString)}}, + chunkSize: 128, + }, + { + retEvalType: types.ETDuration, + childrenTypes: []types.EvalType{types.ETDuration, types.ETReal, types.ETString}, + geners: []dataGenerator{ + &defaultGener{eType: types.ETDuration, nullRation: 0.2}, + &defaultGener{eType: types.ETReal, nullRation: 0.2}, + }, + constants: []*Constant{nil, nil, {Value: types.NewStringDatum("HOUR_SECOND"), RetType: types.NewFieldType(mysql.TypeString)}}, + chunkSize: 128, + }, + { + retEvalType: types.ETDuration, + childrenTypes: []types.EvalType{types.ETDuration, types.ETReal, types.ETString}, + geners: []dataGenerator{ + &defaultGener{eType: types.ETDuration, nullRation: 0.2}, + &defaultGener{eType: types.ETReal, nullRation: 0.2}, + }, + constants: []*Constant{nil, nil, {Value: types.NewStringDatum("HOUR_MINUTE"), RetType: types.NewFieldType(mysql.TypeString)}}, + chunkSize: 128, + }, + { + retEvalType: types.ETDuration, + childrenTypes: []types.EvalType{types.ETDuration, types.ETReal, types.ETString}, + geners: []dataGenerator{ + &defaultGener{eType: types.ETDuration, nullRation: 0.2}, + &defaultGener{eType: types.ETReal, nullRation: 0.2}, + }, + constants: []*Constant{nil, nil, {Value: types.NewStringDatum("DAY_MICROSECOND"), RetType: types.NewFieldType(mysql.TypeString)}}, + chunkSize: 128, + }, + { + retEvalType: types.ETDuration, + childrenTypes: []types.EvalType{types.ETDuration, types.ETReal, types.ETString}, + geners: []dataGenerator{ + &defaultGener{eType: types.ETDuration, nullRation: 0.2}, + &defaultGener{eType: types.ETReal, nullRation: 0.2}, + }, + constants: []*Constant{nil, nil, {Value: types.NewStringDatum("DAY_SECOND"), RetType: types.NewFieldType(mysql.TypeString)}}, + chunkSize: 128, + }, + { + retEvalType: types.ETDuration, + childrenTypes: []types.EvalType{types.ETDuration, types.ETReal, types.ETString}, + geners: []dataGenerator{ + &defaultGener{eType: types.ETDuration, nullRation: 0.2}, + &defaultGener{eType: types.ETReal, nullRation: 0.2}, + }, + constants: []*Constant{nil, nil, {Value: types.NewStringDatum("DAY_MINUTE"), RetType: types.NewFieldType(mysql.TypeString)}}, + chunkSize: 128, + }, + { + retEvalType: types.ETDuration, + childrenTypes: []types.EvalType{types.ETDuration, types.ETReal, types.ETString}, + geners: []dataGenerator{ + &defaultGener{eType: types.ETDuration, nullRation: 0.2}, + &defaultGener{eType: types.ETReal, nullRation: 0.2}, + }, + constants: []*Constant{nil, nil, {Value: types.NewStringDatum("DAY_HOUR"), RetType: types.NewFieldType(mysql.TypeString)}}, + chunkSize: 128, + }, + { + retEvalType: types.ETDuration, + childrenTypes: []types.EvalType{types.ETDuration, types.ETReal, types.ETString}, + geners: []dataGenerator{ + &defaultGener{eType: types.ETDuration, nullRation: 0.2}, + &defaultGener{eType: types.ETReal, nullRation: 0.2}, + }, + constants: []*Constant{nil, nil, {Value: types.NewStringDatum("YEAR_MONTH"), RetType: types.NewFieldType(mysql.TypeString)}}, + chunkSize: 128, + }, + // builtinAddDateDurationDecimalSig + { + retEvalType: types.ETDuration, + childrenTypes: []types.EvalType{types.ETDuration, types.ETDecimal, types.ETString}, + geners: []dataGenerator{ + &defaultGener{eType: types.ETDuration, nullRation: 0.2}, + &defaultGener{eType: types.ETDecimal, nullRation: 0.2}, + }, + constants: []*Constant{nil, nil, {Value: types.NewStringDatum("MICROSECOND"), RetType: types.NewFieldType(mysql.TypeString)}}, + chunkSize: 128, + }, + { + retEvalType: types.ETDuration, + childrenTypes: []types.EvalType{types.ETDuration, types.ETDecimal, types.ETString}, + geners: []dataGenerator{ + &defaultGener{eType: types.ETDuration, nullRation: 0.2}, + &defaultGener{eType: types.ETDecimal, nullRation: 0.2}, + }, + constants: []*Constant{nil, nil, {Value: types.NewStringDatum("SECOND"), RetType: types.NewFieldType(mysql.TypeString)}}, + chunkSize: 128, + }, + { + retEvalType: types.ETDuration, + childrenTypes: []types.EvalType{types.ETDuration, types.ETDecimal, types.ETString}, + geners: []dataGenerator{ + &defaultGener{eType: types.ETDuration, nullRation: 0.2}, + &defaultGener{eType: types.ETDecimal, nullRation: 0.2}, + }, + constants: []*Constant{nil, nil, {Value: types.NewStringDatum("MINUTE"), RetType: types.NewFieldType(mysql.TypeString)}}, + chunkSize: 128, + }, + { + retEvalType: types.ETDuration, + childrenTypes: []types.EvalType{types.ETDuration, types.ETDecimal, types.ETString}, + geners: []dataGenerator{ + &defaultGener{eType: types.ETDuration, nullRation: 0.2}, + &defaultGener{eType: types.ETDecimal, nullRation: 0.2}, + }, + constants: []*Constant{nil, nil, {Value: types.NewStringDatum("HOUR"), RetType: types.NewFieldType(mysql.TypeString)}}, + chunkSize: 128, + }, + { + retEvalType: types.ETDuration, + childrenTypes: []types.EvalType{types.ETDuration, types.ETDecimal, types.ETString}, + geners: []dataGenerator{ + &defaultGener{eType: types.ETDuration, nullRation: 0.2}, + &defaultGener{eType: types.ETDecimal, nullRation: 0.2}, + }, + constants: []*Constant{nil, nil, {Value: types.NewStringDatum("DAY"), RetType: types.NewFieldType(mysql.TypeString)}}, + chunkSize: 128, + }, + { + retEvalType: types.ETDuration, + childrenTypes: []types.EvalType{types.ETDuration, types.ETDecimal, types.ETString}, + geners: []dataGenerator{ + &defaultGener{eType: types.ETDuration, nullRation: 0.2}, + &defaultGener{eType: types.ETDecimal, nullRation: 0.2}, + }, + constants: []*Constant{nil, nil, {Value: types.NewStringDatum("WEEK"), RetType: types.NewFieldType(mysql.TypeString)}}, + chunkSize: 128, + }, + { + retEvalType: types.ETDuration, + childrenTypes: []types.EvalType{types.ETDuration, types.ETDecimal, types.ETString}, + geners: []dataGenerator{ + &defaultGener{eType: types.ETDuration, nullRation: 0.2}, + &defaultGener{eType: types.ETDecimal, nullRation: 0.2}, + }, + constants: []*Constant{nil, nil, {Value: types.NewStringDatum("MONTH"), RetType: types.NewFieldType(mysql.TypeString)}}, + chunkSize: 128, + }, + { + retEvalType: types.ETDuration, + childrenTypes: []types.EvalType{types.ETDuration, types.ETDecimal, types.ETString}, + geners: []dataGenerator{ + &defaultGener{eType: types.ETDuration, nullRation: 0.2}, + &defaultGener{eType: types.ETDecimal, nullRation: 0.2}, + }, + constants: []*Constant{nil, nil, {Value: types.NewStringDatum("QUARTER"), RetType: types.NewFieldType(mysql.TypeString)}}, + chunkSize: 128, + }, + { + retEvalType: types.ETDuration, + childrenTypes: []types.EvalType{types.ETDuration, types.ETDecimal, types.ETString}, + geners: []dataGenerator{ + &defaultGener{eType: types.ETDuration, nullRation: 0.2}, + &defaultGener{eType: types.ETDecimal, nullRation: 0.2}, + }, + constants: []*Constant{nil, nil, {Value: types.NewStringDatum("YEAR"), RetType: types.NewFieldType(mysql.TypeString)}}, + chunkSize: 128, + }, + { + retEvalType: types.ETDuration, + childrenTypes: []types.EvalType{types.ETDuration, types.ETDecimal, types.ETString}, + geners: []dataGenerator{ + &defaultGener{eType: types.ETDuration, nullRation: 0.2}, + &defaultGener{eType: types.ETDecimal, nullRation: 0.2}, + }, + constants: []*Constant{nil, nil, {Value: types.NewStringDatum("SECOND_MICROSECOND"), RetType: types.NewFieldType(mysql.TypeString)}}, + chunkSize: 128, + }, + { + retEvalType: types.ETDuration, + childrenTypes: []types.EvalType{types.ETDuration, types.ETDecimal, types.ETString}, + geners: []dataGenerator{ + &defaultGener{eType: types.ETDuration, nullRation: 0.2}, + &defaultGener{eType: types.ETDecimal, nullRation: 0.2}, + }, + constants: []*Constant{nil, nil, {Value: types.NewStringDatum("MINUTE_MICROSECOND"), RetType: types.NewFieldType(mysql.TypeString)}}, + chunkSize: 128, + }, + { + retEvalType: types.ETDuration, + childrenTypes: []types.EvalType{types.ETDuration, types.ETDecimal, types.ETString}, + geners: []dataGenerator{ + &defaultGener{eType: types.ETDuration, nullRation: 0.2}, + &defaultGener{eType: types.ETDecimal, nullRation: 0.2}, + }, + constants: []*Constant{nil, nil, {Value: types.NewStringDatum("MINUTE_SECOND"), RetType: types.NewFieldType(mysql.TypeString)}}, + chunkSize: 128, + }, + { + retEvalType: types.ETDuration, + childrenTypes: []types.EvalType{types.ETDuration, types.ETDecimal, types.ETString}, + geners: []dataGenerator{ + &defaultGener{eType: types.ETDuration, nullRation: 0.2}, + &defaultGener{eType: types.ETDecimal, nullRation: 0.2}, + }, + constants: []*Constant{nil, nil, {Value: types.NewStringDatum("HOUR_MICROSECOND"), RetType: types.NewFieldType(mysql.TypeString)}}, + chunkSize: 128, + }, + { + retEvalType: types.ETDuration, + childrenTypes: []types.EvalType{types.ETDuration, types.ETDecimal, types.ETString}, + geners: []dataGenerator{ + &defaultGener{eType: types.ETDuration, nullRation: 0.2}, + &defaultGener{eType: types.ETDecimal, nullRation: 0.2}, + }, + constants: []*Constant{nil, nil, {Value: types.NewStringDatum("HOUR_SECOND"), RetType: types.NewFieldType(mysql.TypeString)}}, + chunkSize: 128, + }, + { + retEvalType: types.ETDuration, + childrenTypes: []types.EvalType{types.ETDuration, types.ETDecimal, types.ETString}, + geners: []dataGenerator{ + &defaultGener{eType: types.ETDuration, nullRation: 0.2}, + &defaultGener{eType: types.ETDecimal, nullRation: 0.2}, + }, + constants: []*Constant{nil, nil, {Value: types.NewStringDatum("HOUR_MINUTE"), RetType: types.NewFieldType(mysql.TypeString)}}, + chunkSize: 128, + }, + { + retEvalType: types.ETDuration, + childrenTypes: []types.EvalType{types.ETDuration, types.ETDecimal, types.ETString}, + geners: []dataGenerator{ + &defaultGener{eType: types.ETDuration, nullRation: 0.2}, + &defaultGener{eType: types.ETDecimal, nullRation: 0.2}, + }, + constants: []*Constant{nil, nil, {Value: types.NewStringDatum("DAY_MICROSECOND"), RetType: types.NewFieldType(mysql.TypeString)}}, + chunkSize: 128, + }, + { + retEvalType: types.ETDuration, + childrenTypes: []types.EvalType{types.ETDuration, types.ETDecimal, types.ETString}, + geners: []dataGenerator{ + &defaultGener{eType: types.ETDuration, nullRation: 0.2}, + &defaultGener{eType: types.ETDecimal, nullRation: 0.2}, + }, + constants: []*Constant{nil, nil, {Value: types.NewStringDatum("DAY_SECOND"), RetType: types.NewFieldType(mysql.TypeString)}}, + chunkSize: 128, + }, + { + retEvalType: types.ETDuration, + childrenTypes: []types.EvalType{types.ETDuration, types.ETDecimal, types.ETString}, + geners: []dataGenerator{ + &defaultGener{eType: types.ETDuration, nullRation: 0.2}, + &defaultGener{eType: types.ETDecimal, nullRation: 0.2}, + }, + constants: []*Constant{nil, nil, {Value: types.NewStringDatum("DAY_MINUTE"), RetType: types.NewFieldType(mysql.TypeString)}}, + chunkSize: 128, + }, + { + retEvalType: types.ETDuration, + childrenTypes: []types.EvalType{types.ETDuration, types.ETDecimal, types.ETString}, + geners: []dataGenerator{ + &defaultGener{eType: types.ETDuration, nullRation: 0.2}, + &defaultGener{eType: types.ETDecimal, nullRation: 0.2}, + }, + constants: []*Constant{nil, nil, {Value: types.NewStringDatum("DAY_HOUR"), RetType: types.NewFieldType(mysql.TypeString)}}, + chunkSize: 128, + }, + { + retEvalType: types.ETDuration, + childrenTypes: []types.EvalType{types.ETDuration, types.ETDecimal, types.ETString}, + geners: []dataGenerator{ + &defaultGener{eType: types.ETDuration, nullRation: 0.2}, + &defaultGener{eType: types.ETDecimal, nullRation: 0.2}, + }, + constants: []*Constant{nil, nil, {Value: types.NewStringDatum("YEAR_MONTH"), RetType: types.NewFieldType(mysql.TypeString)}}, + chunkSize: 128, + }, + }, + + ast.SubDate: { + // builtinSubDateStringStringSig + { + retEvalType: types.ETDatetime, + childrenTypes: []types.EvalType{types.ETString, types.ETString, types.ETString}, + geners: []dataGenerator{ + &dateStrGener{NullRation: 0.2}, + &numStrGener{rangeInt64Gener{math.MinInt32 + 1, math.MaxInt32}}, + }, + constants: []*Constant{nil, nil, {Value: types.NewStringDatum("MICROSECOND"), RetType: types.NewFieldType(mysql.TypeString)}}, + chunkSize: 128, + }, + { + retEvalType: types.ETDatetime, + childrenTypes: []types.EvalType{types.ETString, types.ETString, types.ETString}, + geners: []dataGenerator{ + &dateStrGener{NullRation: 0.2}, + &numStrGener{rangeInt64Gener{math.MinInt32 + 1, math.MaxInt32}}, + }, + constants: []*Constant{nil, nil, {Value: types.NewStringDatum("SECOND"), RetType: types.NewFieldType(mysql.TypeString)}}, + chunkSize: 128, + }, + { + retEvalType: types.ETDatetime, + childrenTypes: []types.EvalType{types.ETString, types.ETString, types.ETString}, + geners: []dataGenerator{ + &dateStrGener{NullRation: 0.2}, + &numStrGener{rangeInt64Gener{math.MinInt32 + 1, math.MaxInt32}}, + }, + constants: []*Constant{nil, nil, {Value: types.NewStringDatum("MINUTE"), RetType: types.NewFieldType(mysql.TypeString)}}, + chunkSize: 128, + }, + { + retEvalType: types.ETDatetime, + childrenTypes: []types.EvalType{types.ETString, types.ETString, types.ETString}, + geners: []dataGenerator{ + &dateStrGener{NullRation: 0.2}, + &numStrGener{rangeInt64Gener{math.MinInt32 + 1, math.MaxInt32}}, + }, + constants: []*Constant{nil, nil, {Value: types.NewStringDatum("HOUR"), RetType: types.NewFieldType(mysql.TypeString)}}, + chunkSize: 128, + }, + { + retEvalType: types.ETDatetime, + childrenTypes: []types.EvalType{types.ETString, types.ETString, types.ETString}, + geners: []dataGenerator{ + &dateStrGener{NullRation: 0.2}, + &numStrGener{rangeInt64Gener{math.MinInt32 + 1, math.MaxInt32}}, + }, + constants: []*Constant{nil, nil, {Value: types.NewStringDatum("DAY"), RetType: types.NewFieldType(mysql.TypeString)}}, + chunkSize: 128, + }, + { + retEvalType: types.ETDatetime, + childrenTypes: []types.EvalType{types.ETString, types.ETString, types.ETString}, + geners: []dataGenerator{ + &dateStrGener{NullRation: 0.2}, + &numStrGener{rangeInt64Gener{math.MinInt32 + 1, math.MaxInt32}}, + }, + constants: []*Constant{nil, nil, {Value: types.NewStringDatum("WEEK"), RetType: types.NewFieldType(mysql.TypeString)}}, + chunkSize: 128, + }, + { + retEvalType: types.ETDatetime, + childrenTypes: []types.EvalType{types.ETString, types.ETString, types.ETString}, + geners: []dataGenerator{ + &dateStrGener{NullRation: 0.2}, + &numStrGener{rangeInt64Gener{math.MinInt32 + 1, math.MaxInt32}}, + }, + constants: []*Constant{nil, nil, {Value: types.NewStringDatum("MONTH"), RetType: types.NewFieldType(mysql.TypeString)}}, + chunkSize: 128, + }, + { + retEvalType: types.ETDatetime, + childrenTypes: []types.EvalType{types.ETString, types.ETString, types.ETString}, + geners: []dataGenerator{ + &dateStrGener{NullRation: 0.2}, + &numStrGener{rangeInt64Gener{math.MinInt32 + 1, math.MaxInt32}}, + }, + constants: []*Constant{nil, nil, {Value: types.NewStringDatum("QUARTER"), RetType: types.NewFieldType(mysql.TypeString)}}, + chunkSize: 128, + }, + { + retEvalType: types.ETDatetime, + childrenTypes: []types.EvalType{types.ETString, types.ETString, types.ETString}, + geners: []dataGenerator{ + &dateStrGener{NullRation: 0.2}, + &numStrGener{rangeInt64Gener{math.MinInt32 + 1, math.MaxInt32}}, + }, + constants: []*Constant{nil, nil, {Value: types.NewStringDatum("YEAR"), RetType: types.NewFieldType(mysql.TypeString)}}, + chunkSize: 128, + }, + { + retEvalType: types.ETDatetime, + childrenTypes: []types.EvalType{types.ETString, types.ETString, types.ETString}, + geners: []dataGenerator{ + &dateStrGener{NullRation: 0.2}, + &numStrGener{rangeInt64Gener{math.MinInt32 + 1, math.MaxInt32}}, + }, + constants: []*Constant{nil, nil, {Value: types.NewStringDatum("SECOND_MICROSECOND"), RetType: types.NewFieldType(mysql.TypeString)}}, + chunkSize: 128, + }, + { + retEvalType: types.ETDatetime, + childrenTypes: []types.EvalType{types.ETString, types.ETString, types.ETString}, + geners: []dataGenerator{ + &dateStrGener{NullRation: 0.2}, + &numStrGener{rangeInt64Gener{math.MinInt32 + 1, math.MaxInt32}}, + }, + constants: []*Constant{nil, nil, {Value: types.NewStringDatum("MINUTE_MICROSECOND"), RetType: types.NewFieldType(mysql.TypeString)}}, + chunkSize: 128, + }, + { + retEvalType: types.ETDatetime, + childrenTypes: []types.EvalType{types.ETString, types.ETString, types.ETString}, + geners: []dataGenerator{ + &dateStrGener{NullRation: 0.2}, + &numStrGener{rangeInt64Gener{math.MinInt32 + 1, math.MaxInt32}}, + }, + constants: []*Constant{nil, nil, {Value: types.NewStringDatum("MINUTE_SECOND"), RetType: types.NewFieldType(mysql.TypeString)}}, + chunkSize: 128, + }, + { + retEvalType: types.ETDatetime, + childrenTypes: []types.EvalType{types.ETString, types.ETString, types.ETString}, + geners: []dataGenerator{ + &dateStrGener{NullRation: 0.2}, + &numStrGener{rangeInt64Gener{math.MinInt32 + 1, math.MaxInt32}}, + }, + constants: []*Constant{nil, nil, {Value: types.NewStringDatum("HOUR_MICROSECOND"), RetType: types.NewFieldType(mysql.TypeString)}}, + chunkSize: 128, + }, + { + retEvalType: types.ETDatetime, + childrenTypes: []types.EvalType{types.ETString, types.ETString, types.ETString}, + geners: []dataGenerator{ + &dateStrGener{NullRation: 0.2}, + &numStrGener{rangeInt64Gener{math.MinInt32 + 1, math.MaxInt32}}, + }, + constants: []*Constant{nil, nil, {Value: types.NewStringDatum("HOUR_SECOND"), RetType: types.NewFieldType(mysql.TypeString)}}, + chunkSize: 128, + }, + { + retEvalType: types.ETDatetime, + childrenTypes: []types.EvalType{types.ETString, types.ETString, types.ETString}, + geners: []dataGenerator{ + &dateStrGener{NullRation: 0.2}, + &numStrGener{rangeInt64Gener{math.MinInt32 + 1, math.MaxInt32}}, + }, + constants: []*Constant{nil, nil, {Value: types.NewStringDatum("HOUR_MINUTE"), RetType: types.NewFieldType(mysql.TypeString)}}, + chunkSize: 128, + }, + { + retEvalType: types.ETDatetime, + childrenTypes: []types.EvalType{types.ETString, types.ETString, types.ETString}, + geners: []dataGenerator{ + &dateStrGener{NullRation: 0.2}, + &numStrGener{rangeInt64Gener{math.MinInt32 + 1, math.MaxInt32}}, + }, + constants: []*Constant{nil, nil, {Value: types.NewStringDatum("DAY_MICROSECOND"), RetType: types.NewFieldType(mysql.TypeString)}}, + chunkSize: 128, + }, + { + retEvalType: types.ETDatetime, + childrenTypes: []types.EvalType{types.ETString, types.ETString, types.ETString}, + geners: []dataGenerator{ + &dateStrGener{NullRation: 0.2}, + &numStrGener{rangeInt64Gener{math.MinInt32 + 1, math.MaxInt32}}, + }, + constants: []*Constant{nil, nil, {Value: types.NewStringDatum("DAY_SECOND"), RetType: types.NewFieldType(mysql.TypeString)}}, + chunkSize: 128, + }, + { + retEvalType: types.ETDatetime, + childrenTypes: []types.EvalType{types.ETString, types.ETString, types.ETString}, + geners: []dataGenerator{ + &dateStrGener{NullRation: 0.2}, + &numStrGener{rangeInt64Gener{math.MinInt32 + 1, math.MaxInt32}}, + }, + constants: []*Constant{nil, nil, {Value: types.NewStringDatum("DAY_MINUTE"), RetType: types.NewFieldType(mysql.TypeString)}}, + chunkSize: 128, + }, + { + retEvalType: types.ETDatetime, + childrenTypes: []types.EvalType{types.ETString, types.ETString, types.ETString}, + geners: []dataGenerator{ + &dateStrGener{NullRation: 0.2}, + &numStrGener{rangeInt64Gener{math.MinInt32 + 1, math.MaxInt32}}, + }, + constants: []*Constant{nil, nil, {Value: types.NewStringDatum("DAY_HOUR"), RetType: types.NewFieldType(mysql.TypeString)}}, + chunkSize: 128, + }, + { + retEvalType: types.ETDatetime, + childrenTypes: []types.EvalType{types.ETString, types.ETString, types.ETString}, + geners: []dataGenerator{ + &dateStrGener{NullRation: 0.2}, + &numStrGener{rangeInt64Gener{math.MinInt32 + 1, math.MaxInt32}}, + }, + constants: []*Constant{nil, nil, {Value: types.NewStringDatum("YEAR_MONTH"), RetType: types.NewFieldType(mysql.TypeString)}}, + chunkSize: 128, + }, + // builtinSubDateStringIntSig + { + retEvalType: types.ETDatetime, + childrenTypes: []types.EvalType{types.ETString, types.ETInt, types.ETString}, + geners: []dataGenerator{ + &dateStrGener{NullRation: 0.2}, + &defaultGener{eType: types.ETInt, nullRation: 0.2}, + }, + constants: []*Constant{nil, nil, {Value: types.NewStringDatum("MICROSECOND"), RetType: types.NewFieldType(mysql.TypeString)}}, + chunkSize: 128, + }, + { + retEvalType: types.ETDatetime, + childrenTypes: []types.EvalType{types.ETString, types.ETInt, types.ETString}, + geners: []dataGenerator{ + &dateStrGener{NullRation: 0.2}, + &defaultGener{eType: types.ETInt, nullRation: 0.2}, + }, + constants: []*Constant{nil, nil, {Value: types.NewStringDatum("SECOND"), RetType: types.NewFieldType(mysql.TypeString)}}, + chunkSize: 128, + }, + { + retEvalType: types.ETDatetime, + childrenTypes: []types.EvalType{types.ETString, types.ETInt, types.ETString}, + geners: []dataGenerator{ + &dateStrGener{NullRation: 0.2}, + &defaultGener{eType: types.ETInt, nullRation: 0.2}, + }, + constants: []*Constant{nil, nil, {Value: types.NewStringDatum("MINUTE"), RetType: types.NewFieldType(mysql.TypeString)}}, + chunkSize: 128, + }, + { + retEvalType: types.ETDatetime, + childrenTypes: []types.EvalType{types.ETString, types.ETInt, types.ETString}, + geners: []dataGenerator{ + &dateStrGener{NullRation: 0.2}, + &defaultGener{eType: types.ETInt, nullRation: 0.2}, + }, + constants: []*Constant{nil, nil, {Value: types.NewStringDatum("HOUR"), RetType: types.NewFieldType(mysql.TypeString)}}, + chunkSize: 128, + }, + { + retEvalType: types.ETDatetime, + childrenTypes: []types.EvalType{types.ETString, types.ETInt, types.ETString}, + geners: []dataGenerator{ + &dateStrGener{NullRation: 0.2}, + &defaultGener{eType: types.ETInt, nullRation: 0.2}, + }, + constants: []*Constant{nil, nil, {Value: types.NewStringDatum("DAY"), RetType: types.NewFieldType(mysql.TypeString)}}, + chunkSize: 128, + }, + { + retEvalType: types.ETDatetime, + childrenTypes: []types.EvalType{types.ETString, types.ETInt, types.ETString}, + geners: []dataGenerator{ + &dateStrGener{NullRation: 0.2}, + &defaultGener{eType: types.ETInt, nullRation: 0.2}, + }, + constants: []*Constant{nil, nil, {Value: types.NewStringDatum("WEEK"), RetType: types.NewFieldType(mysql.TypeString)}}, + chunkSize: 128, + }, + { + retEvalType: types.ETDatetime, + childrenTypes: []types.EvalType{types.ETString, types.ETInt, types.ETString}, + geners: []dataGenerator{ + &dateStrGener{NullRation: 0.2}, + &defaultGener{eType: types.ETInt, nullRation: 0.2}, + }, + constants: []*Constant{nil, nil, {Value: types.NewStringDatum("MONTH"), RetType: types.NewFieldType(mysql.TypeString)}}, + chunkSize: 128, + }, + { + retEvalType: types.ETDatetime, + childrenTypes: []types.EvalType{types.ETString, types.ETInt, types.ETString}, + geners: []dataGenerator{ + &dateStrGener{NullRation: 0.2}, + &defaultGener{eType: types.ETInt, nullRation: 0.2}, + }, + constants: []*Constant{nil, nil, {Value: types.NewStringDatum("QUARTER"), RetType: types.NewFieldType(mysql.TypeString)}}, + chunkSize: 128, + }, + { + retEvalType: types.ETDatetime, + childrenTypes: []types.EvalType{types.ETString, types.ETInt, types.ETString}, + geners: []dataGenerator{ + &dateStrGener{NullRation: 0.2}, + &defaultGener{eType: types.ETInt, nullRation: 0.2}, + }, + constants: []*Constant{nil, nil, {Value: types.NewStringDatum("YEAR"), RetType: types.NewFieldType(mysql.TypeString)}}, + chunkSize: 128, + }, + { + retEvalType: types.ETDatetime, + childrenTypes: []types.EvalType{types.ETString, types.ETInt, types.ETString}, + geners: []dataGenerator{ + &dateStrGener{NullRation: 0.2}, + &defaultGener{eType: types.ETInt, nullRation: 0.2}, + }, + constants: []*Constant{nil, nil, {Value: types.NewStringDatum("SECOND_MICROSECOND"), RetType: types.NewFieldType(mysql.TypeString)}}, + chunkSize: 128, + }, + { + retEvalType: types.ETDatetime, + childrenTypes: []types.EvalType{types.ETString, types.ETInt, types.ETString}, + geners: []dataGenerator{ + &dateStrGener{NullRation: 0.2}, + &defaultGener{eType: types.ETInt, nullRation: 0.2}, + }, + constants: []*Constant{nil, nil, {Value: types.NewStringDatum("MINUTE_MICROSECOND"), RetType: types.NewFieldType(mysql.TypeString)}}, + chunkSize: 128, + }, + { + retEvalType: types.ETDatetime, + childrenTypes: []types.EvalType{types.ETString, types.ETInt, types.ETString}, + geners: []dataGenerator{ + &dateStrGener{NullRation: 0.2}, + &defaultGener{eType: types.ETInt, nullRation: 0.2}, + }, + constants: []*Constant{nil, nil, {Value: types.NewStringDatum("MINUTE_SECOND"), RetType: types.NewFieldType(mysql.TypeString)}}, + chunkSize: 128, + }, + { + retEvalType: types.ETDatetime, + childrenTypes: []types.EvalType{types.ETString, types.ETInt, types.ETString}, + geners: []dataGenerator{ + &dateStrGener{NullRation: 0.2}, + &defaultGener{eType: types.ETInt, nullRation: 0.2}, + }, + constants: []*Constant{nil, nil, {Value: types.NewStringDatum("HOUR_MICROSECOND"), RetType: types.NewFieldType(mysql.TypeString)}}, + chunkSize: 128, + }, + { + retEvalType: types.ETDatetime, + childrenTypes: []types.EvalType{types.ETString, types.ETInt, types.ETString}, + geners: []dataGenerator{ + &dateStrGener{NullRation: 0.2}, + &defaultGener{eType: types.ETInt, nullRation: 0.2}, + }, + constants: []*Constant{nil, nil, {Value: types.NewStringDatum("HOUR_SECOND"), RetType: types.NewFieldType(mysql.TypeString)}}, + chunkSize: 128, + }, + { + retEvalType: types.ETDatetime, + childrenTypes: []types.EvalType{types.ETString, types.ETInt, types.ETString}, + geners: []dataGenerator{ + &dateStrGener{NullRation: 0.2}, + &defaultGener{eType: types.ETInt, nullRation: 0.2}, + }, + constants: []*Constant{nil, nil, {Value: types.NewStringDatum("HOUR_MINUTE"), RetType: types.NewFieldType(mysql.TypeString)}}, + chunkSize: 128, + }, + { + retEvalType: types.ETDatetime, + childrenTypes: []types.EvalType{types.ETString, types.ETInt, types.ETString}, + geners: []dataGenerator{ + &dateStrGener{NullRation: 0.2}, + &defaultGener{eType: types.ETInt, nullRation: 0.2}, + }, + constants: []*Constant{nil, nil, {Value: types.NewStringDatum("DAY_MICROSECOND"), RetType: types.NewFieldType(mysql.TypeString)}}, + chunkSize: 128, + }, + { + retEvalType: types.ETDatetime, + childrenTypes: []types.EvalType{types.ETString, types.ETInt, types.ETString}, + geners: []dataGenerator{ + &dateStrGener{NullRation: 0.2}, + &defaultGener{eType: types.ETInt, nullRation: 0.2}, + }, + constants: []*Constant{nil, nil, {Value: types.NewStringDatum("DAY_SECOND"), RetType: types.NewFieldType(mysql.TypeString)}}, + chunkSize: 128, + }, + { + retEvalType: types.ETDatetime, + childrenTypes: []types.EvalType{types.ETString, types.ETInt, types.ETString}, + geners: []dataGenerator{ + &dateStrGener{NullRation: 0.2}, + &defaultGener{eType: types.ETInt, nullRation: 0.2}, + }, + constants: []*Constant{nil, nil, {Value: types.NewStringDatum("DAY_MINUTE"), RetType: types.NewFieldType(mysql.TypeString)}}, + chunkSize: 128, + }, + { + retEvalType: types.ETDatetime, + childrenTypes: []types.EvalType{types.ETString, types.ETInt, types.ETString}, + geners: []dataGenerator{ + &dateStrGener{NullRation: 0.2}, + &defaultGener{eType: types.ETInt, nullRation: 0.2}, + }, + constants: []*Constant{nil, nil, {Value: types.NewStringDatum("DAY_HOUR"), RetType: types.NewFieldType(mysql.TypeString)}}, + chunkSize: 128, + }, + { + retEvalType: types.ETDatetime, + childrenTypes: []types.EvalType{types.ETString, types.ETInt, types.ETString}, + geners: []dataGenerator{ + &dateStrGener{NullRation: 0.2}, + &defaultGener{eType: types.ETInt, nullRation: 0.2}, + }, + constants: []*Constant{nil, nil, {Value: types.NewStringDatum("YEAR_MONTH"), RetType: types.NewFieldType(mysql.TypeString)}}, + chunkSize: 128, + }, + // builtinSubDateStringRealSig + { + retEvalType: types.ETDatetime, + childrenTypes: []types.EvalType{types.ETString, types.ETReal, types.ETString}, + geners: []dataGenerator{ + &dateStrGener{NullRation: 0.2}, + &defaultGener{eType: types.ETReal, nullRation: 0.2}, + }, + constants: []*Constant{nil, nil, {Value: types.NewStringDatum("MICROSECOND"), RetType: types.NewFieldType(mysql.TypeString)}}, + chunkSize: 128, + }, + { + retEvalType: types.ETDatetime, + childrenTypes: []types.EvalType{types.ETString, types.ETReal, types.ETString}, + geners: []dataGenerator{ + &dateStrGener{NullRation: 0.2}, + &defaultGener{eType: types.ETReal, nullRation: 0.2}, + }, + constants: []*Constant{nil, nil, {Value: types.NewStringDatum("SECOND"), RetType: types.NewFieldType(mysql.TypeString)}}, + chunkSize: 128, + }, + { + retEvalType: types.ETDatetime, + childrenTypes: []types.EvalType{types.ETString, types.ETReal, types.ETString}, + geners: []dataGenerator{ + &dateStrGener{NullRation: 0.2}, + &defaultGener{eType: types.ETReal, nullRation: 0.2}, + }, + constants: []*Constant{nil, nil, {Value: types.NewStringDatum("MINUTE"), RetType: types.NewFieldType(mysql.TypeString)}}, + chunkSize: 128, + }, + { + retEvalType: types.ETDatetime, + childrenTypes: []types.EvalType{types.ETString, types.ETReal, types.ETString}, + geners: []dataGenerator{ + &dateStrGener{NullRation: 0.2}, + &defaultGener{eType: types.ETReal, nullRation: 0.2}, + }, + constants: []*Constant{nil, nil, {Value: types.NewStringDatum("HOUR"), RetType: types.NewFieldType(mysql.TypeString)}}, + chunkSize: 128, + }, + { + retEvalType: types.ETDatetime, + childrenTypes: []types.EvalType{types.ETString, types.ETReal, types.ETString}, + geners: []dataGenerator{ + &dateStrGener{NullRation: 0.2}, + &defaultGener{eType: types.ETReal, nullRation: 0.2}, + }, + constants: []*Constant{nil, nil, {Value: types.NewStringDatum("DAY"), RetType: types.NewFieldType(mysql.TypeString)}}, + chunkSize: 128, + }, + { + retEvalType: types.ETDatetime, + childrenTypes: []types.EvalType{types.ETString, types.ETReal, types.ETString}, + geners: []dataGenerator{ + &dateStrGener{NullRation: 0.2}, + &defaultGener{eType: types.ETReal, nullRation: 0.2}, + }, + constants: []*Constant{nil, nil, {Value: types.NewStringDatum("WEEK"), RetType: types.NewFieldType(mysql.TypeString)}}, + chunkSize: 128, + }, + { + retEvalType: types.ETDatetime, + childrenTypes: []types.EvalType{types.ETString, types.ETReal, types.ETString}, + geners: []dataGenerator{ + &dateStrGener{NullRation: 0.2}, + &defaultGener{eType: types.ETReal, nullRation: 0.2}, + }, + constants: []*Constant{nil, nil, {Value: types.NewStringDatum("MONTH"), RetType: types.NewFieldType(mysql.TypeString)}}, + chunkSize: 128, + }, + { + retEvalType: types.ETDatetime, + childrenTypes: []types.EvalType{types.ETString, types.ETReal, types.ETString}, + geners: []dataGenerator{ + &dateStrGener{NullRation: 0.2}, + &defaultGener{eType: types.ETReal, nullRation: 0.2}, + }, + constants: []*Constant{nil, nil, {Value: types.NewStringDatum("QUARTER"), RetType: types.NewFieldType(mysql.TypeString)}}, + chunkSize: 128, + }, + { + retEvalType: types.ETDatetime, + childrenTypes: []types.EvalType{types.ETString, types.ETReal, types.ETString}, + geners: []dataGenerator{ + &dateStrGener{NullRation: 0.2}, + &defaultGener{eType: types.ETReal, nullRation: 0.2}, + }, + constants: []*Constant{nil, nil, {Value: types.NewStringDatum("YEAR"), RetType: types.NewFieldType(mysql.TypeString)}}, + chunkSize: 128, + }, + { + retEvalType: types.ETDatetime, + childrenTypes: []types.EvalType{types.ETString, types.ETReal, types.ETString}, + geners: []dataGenerator{ + &dateStrGener{NullRation: 0.2}, + &defaultGener{eType: types.ETReal, nullRation: 0.2}, + }, + constants: []*Constant{nil, nil, {Value: types.NewStringDatum("SECOND_MICROSECOND"), RetType: types.NewFieldType(mysql.TypeString)}}, + chunkSize: 128, + }, + { + retEvalType: types.ETDatetime, + childrenTypes: []types.EvalType{types.ETString, types.ETReal, types.ETString}, + geners: []dataGenerator{ + &dateStrGener{NullRation: 0.2}, + &defaultGener{eType: types.ETReal, nullRation: 0.2}, + }, + constants: []*Constant{nil, nil, {Value: types.NewStringDatum("MINUTE_MICROSECOND"), RetType: types.NewFieldType(mysql.TypeString)}}, + chunkSize: 128, + }, + { + retEvalType: types.ETDatetime, + childrenTypes: []types.EvalType{types.ETString, types.ETReal, types.ETString}, + geners: []dataGenerator{ + &dateStrGener{NullRation: 0.2}, + &defaultGener{eType: types.ETReal, nullRation: 0.2}, + }, + constants: []*Constant{nil, nil, {Value: types.NewStringDatum("MINUTE_SECOND"), RetType: types.NewFieldType(mysql.TypeString)}}, + chunkSize: 128, + }, + { + retEvalType: types.ETDatetime, + childrenTypes: []types.EvalType{types.ETString, types.ETReal, types.ETString}, + geners: []dataGenerator{ + &dateStrGener{NullRation: 0.2}, + &defaultGener{eType: types.ETReal, nullRation: 0.2}, + }, + constants: []*Constant{nil, nil, {Value: types.NewStringDatum("HOUR_MICROSECOND"), RetType: types.NewFieldType(mysql.TypeString)}}, + chunkSize: 128, + }, + { + retEvalType: types.ETDatetime, + childrenTypes: []types.EvalType{types.ETString, types.ETReal, types.ETString}, + geners: []dataGenerator{ + &dateStrGener{NullRation: 0.2}, + &defaultGener{eType: types.ETReal, nullRation: 0.2}, + }, + constants: []*Constant{nil, nil, {Value: types.NewStringDatum("HOUR_SECOND"), RetType: types.NewFieldType(mysql.TypeString)}}, + chunkSize: 128, + }, + { + retEvalType: types.ETDatetime, + childrenTypes: []types.EvalType{types.ETString, types.ETReal, types.ETString}, + geners: []dataGenerator{ + &dateStrGener{NullRation: 0.2}, + &defaultGener{eType: types.ETReal, nullRation: 0.2}, + }, + constants: []*Constant{nil, nil, {Value: types.NewStringDatum("HOUR_MINUTE"), RetType: types.NewFieldType(mysql.TypeString)}}, + chunkSize: 128, + }, + { + retEvalType: types.ETDatetime, + childrenTypes: []types.EvalType{types.ETString, types.ETReal, types.ETString}, + geners: []dataGenerator{ + &dateStrGener{NullRation: 0.2}, + &defaultGener{eType: types.ETReal, nullRation: 0.2}, + }, + constants: []*Constant{nil, nil, {Value: types.NewStringDatum("DAY_MICROSECOND"), RetType: types.NewFieldType(mysql.TypeString)}}, + chunkSize: 128, + }, + { + retEvalType: types.ETDatetime, + childrenTypes: []types.EvalType{types.ETString, types.ETReal, types.ETString}, + geners: []dataGenerator{ + &dateStrGener{NullRation: 0.2}, + &defaultGener{eType: types.ETReal, nullRation: 0.2}, + }, + constants: []*Constant{nil, nil, {Value: types.NewStringDatum("DAY_SECOND"), RetType: types.NewFieldType(mysql.TypeString)}}, + chunkSize: 128, + }, + { + retEvalType: types.ETDatetime, + childrenTypes: []types.EvalType{types.ETString, types.ETReal, types.ETString}, + geners: []dataGenerator{ + &dateStrGener{NullRation: 0.2}, + &defaultGener{eType: types.ETReal, nullRation: 0.2}, + }, + constants: []*Constant{nil, nil, {Value: types.NewStringDatum("DAY_MINUTE"), RetType: types.NewFieldType(mysql.TypeString)}}, + chunkSize: 128, + }, + { + retEvalType: types.ETDatetime, + childrenTypes: []types.EvalType{types.ETString, types.ETReal, types.ETString}, + geners: []dataGenerator{ + &dateStrGener{NullRation: 0.2}, + &defaultGener{eType: types.ETReal, nullRation: 0.2}, + }, + constants: []*Constant{nil, nil, {Value: types.NewStringDatum("DAY_HOUR"), RetType: types.NewFieldType(mysql.TypeString)}}, + chunkSize: 128, + }, + { + retEvalType: types.ETDatetime, + childrenTypes: []types.EvalType{types.ETString, types.ETReal, types.ETString}, + geners: []dataGenerator{ + &dateStrGener{NullRation: 0.2}, + &defaultGener{eType: types.ETReal, nullRation: 0.2}, + }, + constants: []*Constant{nil, nil, {Value: types.NewStringDatum("YEAR_MONTH"), RetType: types.NewFieldType(mysql.TypeString)}}, + chunkSize: 128, + }, + // builtinSubDateStringDecimalSig + { + retEvalType: types.ETDatetime, + childrenTypes: []types.EvalType{types.ETString, types.ETDecimal, types.ETString}, + geners: []dataGenerator{ + &dateStrGener{NullRation: 0.2}, + &defaultGener{eType: types.ETDecimal, nullRation: 0.2}, + }, + constants: []*Constant{nil, nil, {Value: types.NewStringDatum("MICROSECOND"), RetType: types.NewFieldType(mysql.TypeString)}}, + chunkSize: 128, + }, + { + retEvalType: types.ETDatetime, + childrenTypes: []types.EvalType{types.ETString, types.ETDecimal, types.ETString}, + geners: []dataGenerator{ + &dateStrGener{NullRation: 0.2}, + &defaultGener{eType: types.ETDecimal, nullRation: 0.2}, + }, + constants: []*Constant{nil, nil, {Value: types.NewStringDatum("SECOND"), RetType: types.NewFieldType(mysql.TypeString)}}, + chunkSize: 128, + }, + { + retEvalType: types.ETDatetime, + childrenTypes: []types.EvalType{types.ETString, types.ETDecimal, types.ETString}, + geners: []dataGenerator{ + &dateStrGener{NullRation: 0.2}, + &defaultGener{eType: types.ETDecimal, nullRation: 0.2}, + }, + constants: []*Constant{nil, nil, {Value: types.NewStringDatum("MINUTE"), RetType: types.NewFieldType(mysql.TypeString)}}, + chunkSize: 128, + }, + { + retEvalType: types.ETDatetime, + childrenTypes: []types.EvalType{types.ETString, types.ETDecimal, types.ETString}, + geners: []dataGenerator{ + &dateStrGener{NullRation: 0.2}, + &defaultGener{eType: types.ETDecimal, nullRation: 0.2}, + }, + constants: []*Constant{nil, nil, {Value: types.NewStringDatum("HOUR"), RetType: types.NewFieldType(mysql.TypeString)}}, + chunkSize: 128, + }, + { + retEvalType: types.ETDatetime, + childrenTypes: []types.EvalType{types.ETString, types.ETDecimal, types.ETString}, + geners: []dataGenerator{ + &dateStrGener{NullRation: 0.2}, + &defaultGener{eType: types.ETDecimal, nullRation: 0.2}, + }, + constants: []*Constant{nil, nil, {Value: types.NewStringDatum("DAY"), RetType: types.NewFieldType(mysql.TypeString)}}, + chunkSize: 128, + }, + { + retEvalType: types.ETDatetime, + childrenTypes: []types.EvalType{types.ETString, types.ETDecimal, types.ETString}, + geners: []dataGenerator{ + &dateStrGener{NullRation: 0.2}, + &defaultGener{eType: types.ETDecimal, nullRation: 0.2}, + }, + constants: []*Constant{nil, nil, {Value: types.NewStringDatum("WEEK"), RetType: types.NewFieldType(mysql.TypeString)}}, + chunkSize: 128, + }, + { + retEvalType: types.ETDatetime, + childrenTypes: []types.EvalType{types.ETString, types.ETDecimal, types.ETString}, + geners: []dataGenerator{ + &dateStrGener{NullRation: 0.2}, + &defaultGener{eType: types.ETDecimal, nullRation: 0.2}, + }, + constants: []*Constant{nil, nil, {Value: types.NewStringDatum("MONTH"), RetType: types.NewFieldType(mysql.TypeString)}}, + chunkSize: 128, + }, + { + retEvalType: types.ETDatetime, + childrenTypes: []types.EvalType{types.ETString, types.ETDecimal, types.ETString}, + geners: []dataGenerator{ + &dateStrGener{NullRation: 0.2}, + &defaultGener{eType: types.ETDecimal, nullRation: 0.2}, + }, + constants: []*Constant{nil, nil, {Value: types.NewStringDatum("QUARTER"), RetType: types.NewFieldType(mysql.TypeString)}}, + chunkSize: 128, + }, + { + retEvalType: types.ETDatetime, + childrenTypes: []types.EvalType{types.ETString, types.ETDecimal, types.ETString}, + geners: []dataGenerator{ + &dateStrGener{NullRation: 0.2}, + &defaultGener{eType: types.ETDecimal, nullRation: 0.2}, + }, + constants: []*Constant{nil, nil, {Value: types.NewStringDatum("YEAR"), RetType: types.NewFieldType(mysql.TypeString)}}, + chunkSize: 128, + }, + { + retEvalType: types.ETDatetime, + childrenTypes: []types.EvalType{types.ETString, types.ETDecimal, types.ETString}, + geners: []dataGenerator{ + &dateStrGener{NullRation: 0.2}, + &defaultGener{eType: types.ETDecimal, nullRation: 0.2}, + }, + constants: []*Constant{nil, nil, {Value: types.NewStringDatum("SECOND_MICROSECOND"), RetType: types.NewFieldType(mysql.TypeString)}}, + chunkSize: 128, + }, + { + retEvalType: types.ETDatetime, + childrenTypes: []types.EvalType{types.ETString, types.ETDecimal, types.ETString}, + geners: []dataGenerator{ + &dateStrGener{NullRation: 0.2}, + &defaultGener{eType: types.ETDecimal, nullRation: 0.2}, + }, + constants: []*Constant{nil, nil, {Value: types.NewStringDatum("MINUTE_MICROSECOND"), RetType: types.NewFieldType(mysql.TypeString)}}, + chunkSize: 128, + }, + { + retEvalType: types.ETDatetime, + childrenTypes: []types.EvalType{types.ETString, types.ETDecimal, types.ETString}, + geners: []dataGenerator{ + &dateStrGener{NullRation: 0.2}, + &defaultGener{eType: types.ETDecimal, nullRation: 0.2}, + }, + constants: []*Constant{nil, nil, {Value: types.NewStringDatum("MINUTE_SECOND"), RetType: types.NewFieldType(mysql.TypeString)}}, + chunkSize: 128, + }, + { + retEvalType: types.ETDatetime, + childrenTypes: []types.EvalType{types.ETString, types.ETDecimal, types.ETString}, + geners: []dataGenerator{ + &dateStrGener{NullRation: 0.2}, + &defaultGener{eType: types.ETDecimal, nullRation: 0.2}, + }, + constants: []*Constant{nil, nil, {Value: types.NewStringDatum("HOUR_MICROSECOND"), RetType: types.NewFieldType(mysql.TypeString)}}, + chunkSize: 128, + }, + { + retEvalType: types.ETDatetime, + childrenTypes: []types.EvalType{types.ETString, types.ETDecimal, types.ETString}, + geners: []dataGenerator{ + &dateStrGener{NullRation: 0.2}, + &defaultGener{eType: types.ETDecimal, nullRation: 0.2}, + }, + constants: []*Constant{nil, nil, {Value: types.NewStringDatum("HOUR_SECOND"), RetType: types.NewFieldType(mysql.TypeString)}}, + chunkSize: 128, + }, + { + retEvalType: types.ETDatetime, + childrenTypes: []types.EvalType{types.ETString, types.ETDecimal, types.ETString}, + geners: []dataGenerator{ + &dateStrGener{NullRation: 0.2}, + &defaultGener{eType: types.ETDecimal, nullRation: 0.2}, + }, + constants: []*Constant{nil, nil, {Value: types.NewStringDatum("HOUR_MINUTE"), RetType: types.NewFieldType(mysql.TypeString)}}, + chunkSize: 128, + }, + { + retEvalType: types.ETDatetime, + childrenTypes: []types.EvalType{types.ETString, types.ETDecimal, types.ETString}, + geners: []dataGenerator{ + &dateStrGener{NullRation: 0.2}, + &defaultGener{eType: types.ETDecimal, nullRation: 0.2}, + }, + constants: []*Constant{nil, nil, {Value: types.NewStringDatum("DAY_MICROSECOND"), RetType: types.NewFieldType(mysql.TypeString)}}, + chunkSize: 128, + }, + { + retEvalType: types.ETDatetime, + childrenTypes: []types.EvalType{types.ETString, types.ETDecimal, types.ETString}, + geners: []dataGenerator{ + &dateStrGener{NullRation: 0.2}, + &defaultGener{eType: types.ETDecimal, nullRation: 0.2}, + }, + constants: []*Constant{nil, nil, {Value: types.NewStringDatum("DAY_SECOND"), RetType: types.NewFieldType(mysql.TypeString)}}, + chunkSize: 128, + }, + { + retEvalType: types.ETDatetime, + childrenTypes: []types.EvalType{types.ETString, types.ETDecimal, types.ETString}, + geners: []dataGenerator{ + &dateStrGener{NullRation: 0.2}, + &defaultGener{eType: types.ETDecimal, nullRation: 0.2}, + }, + constants: []*Constant{nil, nil, {Value: types.NewStringDatum("DAY_MINUTE"), RetType: types.NewFieldType(mysql.TypeString)}}, + chunkSize: 128, + }, + { + retEvalType: types.ETDatetime, + childrenTypes: []types.EvalType{types.ETString, types.ETDecimal, types.ETString}, + geners: []dataGenerator{ + &dateStrGener{NullRation: 0.2}, + &defaultGener{eType: types.ETDecimal, nullRation: 0.2}, + }, + constants: []*Constant{nil, nil, {Value: types.NewStringDatum("DAY_HOUR"), RetType: types.NewFieldType(mysql.TypeString)}}, + chunkSize: 128, + }, + { + retEvalType: types.ETDatetime, + childrenTypes: []types.EvalType{types.ETString, types.ETDecimal, types.ETString}, + geners: []dataGenerator{ + &dateStrGener{NullRation: 0.2}, + &defaultGener{eType: types.ETDecimal, nullRation: 0.2}, + }, + constants: []*Constant{nil, nil, {Value: types.NewStringDatum("YEAR_MONTH"), RetType: types.NewFieldType(mysql.TypeString)}}, + chunkSize: 128, + }, + // builtinSubDateIntStringSig + { + retEvalType: types.ETDatetime, + childrenTypes: []types.EvalType{types.ETInt, types.ETString, types.ETString}, + geners: []dataGenerator{ + &dateTimeIntGener{nullRation: 0.2}, + &numStrGener{rangeInt64Gener{math.MinInt32 + 1, math.MaxInt32}}, + }, + constants: []*Constant{nil, nil, {Value: types.NewStringDatum("MICROSECOND"), RetType: types.NewFieldType(mysql.TypeString)}}, + chunkSize: 128, + }, + { + retEvalType: types.ETDatetime, + childrenTypes: []types.EvalType{types.ETInt, types.ETString, types.ETString}, + geners: []dataGenerator{ + &dateTimeIntGener{nullRation: 0.2}, + &numStrGener{rangeInt64Gener{math.MinInt32 + 1, math.MaxInt32}}, + }, + constants: []*Constant{nil, nil, {Value: types.NewStringDatum("SECOND"), RetType: types.NewFieldType(mysql.TypeString)}}, + chunkSize: 128, + }, + { + retEvalType: types.ETDatetime, + childrenTypes: []types.EvalType{types.ETInt, types.ETString, types.ETString}, + geners: []dataGenerator{ + &dateTimeIntGener{nullRation: 0.2}, + &numStrGener{rangeInt64Gener{math.MinInt32 + 1, math.MaxInt32}}, + }, + constants: []*Constant{nil, nil, {Value: types.NewStringDatum("MINUTE"), RetType: types.NewFieldType(mysql.TypeString)}}, + chunkSize: 128, + }, + { + retEvalType: types.ETDatetime, + childrenTypes: []types.EvalType{types.ETInt, types.ETString, types.ETString}, + geners: []dataGenerator{ + &dateTimeIntGener{nullRation: 0.2}, + &numStrGener{rangeInt64Gener{math.MinInt32 + 1, math.MaxInt32}}, + }, + constants: []*Constant{nil, nil, {Value: types.NewStringDatum("HOUR"), RetType: types.NewFieldType(mysql.TypeString)}}, + chunkSize: 128, + }, + { + retEvalType: types.ETDatetime, + childrenTypes: []types.EvalType{types.ETInt, types.ETString, types.ETString}, + geners: []dataGenerator{ + &dateTimeIntGener{nullRation: 0.2}, + &numStrGener{rangeInt64Gener{math.MinInt32 + 1, math.MaxInt32}}, + }, + constants: []*Constant{nil, nil, {Value: types.NewStringDatum("DAY"), RetType: types.NewFieldType(mysql.TypeString)}}, + chunkSize: 128, + }, + { + retEvalType: types.ETDatetime, + childrenTypes: []types.EvalType{types.ETInt, types.ETString, types.ETString}, + geners: []dataGenerator{ + &dateTimeIntGener{nullRation: 0.2}, + &numStrGener{rangeInt64Gener{math.MinInt32 + 1, math.MaxInt32}}, + }, + constants: []*Constant{nil, nil, {Value: types.NewStringDatum("WEEK"), RetType: types.NewFieldType(mysql.TypeString)}}, + chunkSize: 128, + }, + { + retEvalType: types.ETDatetime, + childrenTypes: []types.EvalType{types.ETInt, types.ETString, types.ETString}, + geners: []dataGenerator{ + &dateTimeIntGener{nullRation: 0.2}, + &numStrGener{rangeInt64Gener{math.MinInt32 + 1, math.MaxInt32}}, + }, + constants: []*Constant{nil, nil, {Value: types.NewStringDatum("MONTH"), RetType: types.NewFieldType(mysql.TypeString)}}, + chunkSize: 128, + }, + { + retEvalType: types.ETDatetime, + childrenTypes: []types.EvalType{types.ETInt, types.ETString, types.ETString}, + geners: []dataGenerator{ + &dateTimeIntGener{nullRation: 0.2}, + &numStrGener{rangeInt64Gener{math.MinInt32 + 1, math.MaxInt32}}, + }, + constants: []*Constant{nil, nil, {Value: types.NewStringDatum("QUARTER"), RetType: types.NewFieldType(mysql.TypeString)}}, + chunkSize: 128, + }, + { + retEvalType: types.ETDatetime, + childrenTypes: []types.EvalType{types.ETInt, types.ETString, types.ETString}, + geners: []dataGenerator{ + &dateTimeIntGener{nullRation: 0.2}, + &numStrGener{rangeInt64Gener{math.MinInt32 + 1, math.MaxInt32}}, + }, + constants: []*Constant{nil, nil, {Value: types.NewStringDatum("YEAR"), RetType: types.NewFieldType(mysql.TypeString)}}, + chunkSize: 128, + }, + { + retEvalType: types.ETDatetime, + childrenTypes: []types.EvalType{types.ETInt, types.ETString, types.ETString}, + geners: []dataGenerator{ + &dateTimeIntGener{nullRation: 0.2}, + &numStrGener{rangeInt64Gener{math.MinInt32 + 1, math.MaxInt32}}, + }, + constants: []*Constant{nil, nil, {Value: types.NewStringDatum("SECOND_MICROSECOND"), RetType: types.NewFieldType(mysql.TypeString)}}, + chunkSize: 128, + }, + { + retEvalType: types.ETDatetime, + childrenTypes: []types.EvalType{types.ETInt, types.ETString, types.ETString}, + geners: []dataGenerator{ + &dateTimeIntGener{nullRation: 0.2}, + &numStrGener{rangeInt64Gener{math.MinInt32 + 1, math.MaxInt32}}, + }, + constants: []*Constant{nil, nil, {Value: types.NewStringDatum("MINUTE_MICROSECOND"), RetType: types.NewFieldType(mysql.TypeString)}}, + chunkSize: 128, + }, + { + retEvalType: types.ETDatetime, + childrenTypes: []types.EvalType{types.ETInt, types.ETString, types.ETString}, + geners: []dataGenerator{ + &dateTimeIntGener{nullRation: 0.2}, + &numStrGener{rangeInt64Gener{math.MinInt32 + 1, math.MaxInt32}}, + }, + constants: []*Constant{nil, nil, {Value: types.NewStringDatum("MINUTE_SECOND"), RetType: types.NewFieldType(mysql.TypeString)}}, + chunkSize: 128, + }, + { + retEvalType: types.ETDatetime, + childrenTypes: []types.EvalType{types.ETInt, types.ETString, types.ETString}, + geners: []dataGenerator{ + &dateTimeIntGener{nullRation: 0.2}, + &numStrGener{rangeInt64Gener{math.MinInt32 + 1, math.MaxInt32}}, + }, + constants: []*Constant{nil, nil, {Value: types.NewStringDatum("HOUR_MICROSECOND"), RetType: types.NewFieldType(mysql.TypeString)}}, + chunkSize: 128, + }, + { + retEvalType: types.ETDatetime, + childrenTypes: []types.EvalType{types.ETInt, types.ETString, types.ETString}, + geners: []dataGenerator{ + &dateTimeIntGener{nullRation: 0.2}, + &numStrGener{rangeInt64Gener{math.MinInt32 + 1, math.MaxInt32}}, + }, + constants: []*Constant{nil, nil, {Value: types.NewStringDatum("HOUR_SECOND"), RetType: types.NewFieldType(mysql.TypeString)}}, + chunkSize: 128, + }, + { + retEvalType: types.ETDatetime, + childrenTypes: []types.EvalType{types.ETInt, types.ETString, types.ETString}, + geners: []dataGenerator{ + &dateTimeIntGener{nullRation: 0.2}, + &numStrGener{rangeInt64Gener{math.MinInt32 + 1, math.MaxInt32}}, + }, + constants: []*Constant{nil, nil, {Value: types.NewStringDatum("HOUR_MINUTE"), RetType: types.NewFieldType(mysql.TypeString)}}, + chunkSize: 128, + }, + { + retEvalType: types.ETDatetime, + childrenTypes: []types.EvalType{types.ETInt, types.ETString, types.ETString}, + geners: []dataGenerator{ + &dateTimeIntGener{nullRation: 0.2}, + &numStrGener{rangeInt64Gener{math.MinInt32 + 1, math.MaxInt32}}, + }, + constants: []*Constant{nil, nil, {Value: types.NewStringDatum("DAY_MICROSECOND"), RetType: types.NewFieldType(mysql.TypeString)}}, + chunkSize: 128, + }, + { + retEvalType: types.ETDatetime, + childrenTypes: []types.EvalType{types.ETInt, types.ETString, types.ETString}, + geners: []dataGenerator{ + &dateTimeIntGener{nullRation: 0.2}, + &numStrGener{rangeInt64Gener{math.MinInt32 + 1, math.MaxInt32}}, + }, + constants: []*Constant{nil, nil, {Value: types.NewStringDatum("DAY_SECOND"), RetType: types.NewFieldType(mysql.TypeString)}}, + chunkSize: 128, + }, + { + retEvalType: types.ETDatetime, + childrenTypes: []types.EvalType{types.ETInt, types.ETString, types.ETString}, + geners: []dataGenerator{ + &dateTimeIntGener{nullRation: 0.2}, + &numStrGener{rangeInt64Gener{math.MinInt32 + 1, math.MaxInt32}}, + }, + constants: []*Constant{nil, nil, {Value: types.NewStringDatum("DAY_MINUTE"), RetType: types.NewFieldType(mysql.TypeString)}}, + chunkSize: 128, + }, + { + retEvalType: types.ETDatetime, + childrenTypes: []types.EvalType{types.ETInt, types.ETString, types.ETString}, + geners: []dataGenerator{ + &dateTimeIntGener{nullRation: 0.2}, + &numStrGener{rangeInt64Gener{math.MinInt32 + 1, math.MaxInt32}}, + }, + constants: []*Constant{nil, nil, {Value: types.NewStringDatum("DAY_HOUR"), RetType: types.NewFieldType(mysql.TypeString)}}, + chunkSize: 128, + }, + { + retEvalType: types.ETDatetime, + childrenTypes: []types.EvalType{types.ETInt, types.ETString, types.ETString}, + geners: []dataGenerator{ + &dateTimeIntGener{nullRation: 0.2}, + &numStrGener{rangeInt64Gener{math.MinInt32 + 1, math.MaxInt32}}, + }, + constants: []*Constant{nil, nil, {Value: types.NewStringDatum("YEAR_MONTH"), RetType: types.NewFieldType(mysql.TypeString)}}, + chunkSize: 128, + }, + // builtinSubDateIntIntSig + { + retEvalType: types.ETDatetime, + childrenTypes: []types.EvalType{types.ETInt, types.ETInt, types.ETString}, + geners: []dataGenerator{ + &dateTimeIntGener{nullRation: 0.2}, + &defaultGener{eType: types.ETInt, nullRation: 0.2}, + }, + constants: []*Constant{nil, nil, {Value: types.NewStringDatum("MICROSECOND"), RetType: types.NewFieldType(mysql.TypeString)}}, + chunkSize: 128, + }, + { + retEvalType: types.ETDatetime, + childrenTypes: []types.EvalType{types.ETInt, types.ETInt, types.ETString}, + geners: []dataGenerator{ + &dateTimeIntGener{nullRation: 0.2}, + &defaultGener{eType: types.ETInt, nullRation: 0.2}, + }, + constants: []*Constant{nil, nil, {Value: types.NewStringDatum("SECOND"), RetType: types.NewFieldType(mysql.TypeString)}}, + chunkSize: 128, + }, + { + retEvalType: types.ETDatetime, + childrenTypes: []types.EvalType{types.ETInt, types.ETInt, types.ETString}, + geners: []dataGenerator{ + &dateTimeIntGener{nullRation: 0.2}, + &defaultGener{eType: types.ETInt, nullRation: 0.2}, + }, + constants: []*Constant{nil, nil, {Value: types.NewStringDatum("MINUTE"), RetType: types.NewFieldType(mysql.TypeString)}}, + chunkSize: 128, + }, + { + retEvalType: types.ETDatetime, + childrenTypes: []types.EvalType{types.ETInt, types.ETInt, types.ETString}, + geners: []dataGenerator{ + &dateTimeIntGener{nullRation: 0.2}, + &defaultGener{eType: types.ETInt, nullRation: 0.2}, + }, + constants: []*Constant{nil, nil, {Value: types.NewStringDatum("HOUR"), RetType: types.NewFieldType(mysql.TypeString)}}, + chunkSize: 128, + }, + { + retEvalType: types.ETDatetime, + childrenTypes: []types.EvalType{types.ETInt, types.ETInt, types.ETString}, + geners: []dataGenerator{ + &dateTimeIntGener{nullRation: 0.2}, + &defaultGener{eType: types.ETInt, nullRation: 0.2}, + }, + constants: []*Constant{nil, nil, {Value: types.NewStringDatum("DAY"), RetType: types.NewFieldType(mysql.TypeString)}}, + chunkSize: 128, + }, + { + retEvalType: types.ETDatetime, + childrenTypes: []types.EvalType{types.ETInt, types.ETInt, types.ETString}, + geners: []dataGenerator{ + &dateTimeIntGener{nullRation: 0.2}, + &defaultGener{eType: types.ETInt, nullRation: 0.2}, + }, + constants: []*Constant{nil, nil, {Value: types.NewStringDatum("WEEK"), RetType: types.NewFieldType(mysql.TypeString)}}, + chunkSize: 128, + }, + { + retEvalType: types.ETDatetime, + childrenTypes: []types.EvalType{types.ETInt, types.ETInt, types.ETString}, + geners: []dataGenerator{ + &dateTimeIntGener{nullRation: 0.2}, + &defaultGener{eType: types.ETInt, nullRation: 0.2}, + }, + constants: []*Constant{nil, nil, {Value: types.NewStringDatum("MONTH"), RetType: types.NewFieldType(mysql.TypeString)}}, + chunkSize: 128, + }, + { + retEvalType: types.ETDatetime, + childrenTypes: []types.EvalType{types.ETInt, types.ETInt, types.ETString}, + geners: []dataGenerator{ + &dateTimeIntGener{nullRation: 0.2}, + &defaultGener{eType: types.ETInt, nullRation: 0.2}, + }, + constants: []*Constant{nil, nil, {Value: types.NewStringDatum("QUARTER"), RetType: types.NewFieldType(mysql.TypeString)}}, + chunkSize: 128, + }, + { + retEvalType: types.ETDatetime, + childrenTypes: []types.EvalType{types.ETInt, types.ETInt, types.ETString}, + geners: []dataGenerator{ + &dateTimeIntGener{nullRation: 0.2}, + &defaultGener{eType: types.ETInt, nullRation: 0.2}, + }, + constants: []*Constant{nil, nil, {Value: types.NewStringDatum("YEAR"), RetType: types.NewFieldType(mysql.TypeString)}}, + chunkSize: 128, + }, + { + retEvalType: types.ETDatetime, + childrenTypes: []types.EvalType{types.ETInt, types.ETInt, types.ETString}, + geners: []dataGenerator{ + &dateTimeIntGener{nullRation: 0.2}, + &defaultGener{eType: types.ETInt, nullRation: 0.2}, + }, + constants: []*Constant{nil, nil, {Value: types.NewStringDatum("SECOND_MICROSECOND"), RetType: types.NewFieldType(mysql.TypeString)}}, + chunkSize: 128, + }, + { + retEvalType: types.ETDatetime, + childrenTypes: []types.EvalType{types.ETInt, types.ETInt, types.ETString}, + geners: []dataGenerator{ + &dateTimeIntGener{nullRation: 0.2}, + &defaultGener{eType: types.ETInt, nullRation: 0.2}, + }, + constants: []*Constant{nil, nil, {Value: types.NewStringDatum("MINUTE_MICROSECOND"), RetType: types.NewFieldType(mysql.TypeString)}}, + chunkSize: 128, + }, + { + retEvalType: types.ETDatetime, + childrenTypes: []types.EvalType{types.ETInt, types.ETInt, types.ETString}, + geners: []dataGenerator{ + &dateTimeIntGener{nullRation: 0.2}, + &defaultGener{eType: types.ETInt, nullRation: 0.2}, + }, + constants: []*Constant{nil, nil, {Value: types.NewStringDatum("MINUTE_SECOND"), RetType: types.NewFieldType(mysql.TypeString)}}, + chunkSize: 128, + }, + { + retEvalType: types.ETDatetime, + childrenTypes: []types.EvalType{types.ETInt, types.ETInt, types.ETString}, + geners: []dataGenerator{ + &dateTimeIntGener{nullRation: 0.2}, + &defaultGener{eType: types.ETInt, nullRation: 0.2}, + }, + constants: []*Constant{nil, nil, {Value: types.NewStringDatum("HOUR_MICROSECOND"), RetType: types.NewFieldType(mysql.TypeString)}}, + chunkSize: 128, + }, + { + retEvalType: types.ETDatetime, + childrenTypes: []types.EvalType{types.ETInt, types.ETInt, types.ETString}, + geners: []dataGenerator{ + &dateTimeIntGener{nullRation: 0.2}, + &defaultGener{eType: types.ETInt, nullRation: 0.2}, + }, + constants: []*Constant{nil, nil, {Value: types.NewStringDatum("HOUR_SECOND"), RetType: types.NewFieldType(mysql.TypeString)}}, + chunkSize: 128, + }, + { + retEvalType: types.ETDatetime, + childrenTypes: []types.EvalType{types.ETInt, types.ETInt, types.ETString}, + geners: []dataGenerator{ + &dateTimeIntGener{nullRation: 0.2}, + &defaultGener{eType: types.ETInt, nullRation: 0.2}, + }, + constants: []*Constant{nil, nil, {Value: types.NewStringDatum("HOUR_MINUTE"), RetType: types.NewFieldType(mysql.TypeString)}}, + chunkSize: 128, + }, + { + retEvalType: types.ETDatetime, + childrenTypes: []types.EvalType{types.ETInt, types.ETInt, types.ETString}, + geners: []dataGenerator{ + &dateTimeIntGener{nullRation: 0.2}, + &defaultGener{eType: types.ETInt, nullRation: 0.2}, + }, + constants: []*Constant{nil, nil, {Value: types.NewStringDatum("DAY_MICROSECOND"), RetType: types.NewFieldType(mysql.TypeString)}}, + chunkSize: 128, + }, + { + retEvalType: types.ETDatetime, + childrenTypes: []types.EvalType{types.ETInt, types.ETInt, types.ETString}, + geners: []dataGenerator{ + &dateTimeIntGener{nullRation: 0.2}, + &defaultGener{eType: types.ETInt, nullRation: 0.2}, + }, + constants: []*Constant{nil, nil, {Value: types.NewStringDatum("DAY_SECOND"), RetType: types.NewFieldType(mysql.TypeString)}}, + chunkSize: 128, + }, + { + retEvalType: types.ETDatetime, + childrenTypes: []types.EvalType{types.ETInt, types.ETInt, types.ETString}, + geners: []dataGenerator{ + &dateTimeIntGener{nullRation: 0.2}, + &defaultGener{eType: types.ETInt, nullRation: 0.2}, + }, + constants: []*Constant{nil, nil, {Value: types.NewStringDatum("DAY_MINUTE"), RetType: types.NewFieldType(mysql.TypeString)}}, + chunkSize: 128, + }, + { + retEvalType: types.ETDatetime, + childrenTypes: []types.EvalType{types.ETInt, types.ETInt, types.ETString}, + geners: []dataGenerator{ + &dateTimeIntGener{nullRation: 0.2}, + &defaultGener{eType: types.ETInt, nullRation: 0.2}, + }, + constants: []*Constant{nil, nil, {Value: types.NewStringDatum("DAY_HOUR"), RetType: types.NewFieldType(mysql.TypeString)}}, + chunkSize: 128, + }, + { + retEvalType: types.ETDatetime, + childrenTypes: []types.EvalType{types.ETInt, types.ETInt, types.ETString}, + geners: []dataGenerator{ + &dateTimeIntGener{nullRation: 0.2}, + &defaultGener{eType: types.ETInt, nullRation: 0.2}, + }, + constants: []*Constant{nil, nil, {Value: types.NewStringDatum("YEAR_MONTH"), RetType: types.NewFieldType(mysql.TypeString)}}, + chunkSize: 128, + }, + // builtinSubDateIntRealSig + { + retEvalType: types.ETDatetime, + childrenTypes: []types.EvalType{types.ETInt, types.ETReal, types.ETString}, + geners: []dataGenerator{ + &dateTimeIntGener{nullRation: 0.2}, + &defaultGener{eType: types.ETReal, nullRation: 0.2}, + }, + constants: []*Constant{nil, nil, {Value: types.NewStringDatum("MICROSECOND"), RetType: types.NewFieldType(mysql.TypeString)}}, + chunkSize: 128, + }, + { + retEvalType: types.ETDatetime, + childrenTypes: []types.EvalType{types.ETInt, types.ETReal, types.ETString}, + geners: []dataGenerator{ + &dateTimeIntGener{nullRation: 0.2}, + &defaultGener{eType: types.ETReal, nullRation: 0.2}, + }, + constants: []*Constant{nil, nil, {Value: types.NewStringDatum("SECOND"), RetType: types.NewFieldType(mysql.TypeString)}}, + chunkSize: 128, + }, + { + retEvalType: types.ETDatetime, + childrenTypes: []types.EvalType{types.ETInt, types.ETReal, types.ETString}, + geners: []dataGenerator{ + &dateTimeIntGener{nullRation: 0.2}, + &defaultGener{eType: types.ETReal, nullRation: 0.2}, + }, + constants: []*Constant{nil, nil, {Value: types.NewStringDatum("MINUTE"), RetType: types.NewFieldType(mysql.TypeString)}}, + chunkSize: 128, + }, + { + retEvalType: types.ETDatetime, + childrenTypes: []types.EvalType{types.ETInt, types.ETReal, types.ETString}, + geners: []dataGenerator{ + &dateTimeIntGener{nullRation: 0.2}, + &defaultGener{eType: types.ETReal, nullRation: 0.2}, + }, + constants: []*Constant{nil, nil, {Value: types.NewStringDatum("HOUR"), RetType: types.NewFieldType(mysql.TypeString)}}, + chunkSize: 128, + }, + { + retEvalType: types.ETDatetime, + childrenTypes: []types.EvalType{types.ETInt, types.ETReal, types.ETString}, + geners: []dataGenerator{ + &dateTimeIntGener{nullRation: 0.2}, + &defaultGener{eType: types.ETReal, nullRation: 0.2}, + }, + constants: []*Constant{nil, nil, {Value: types.NewStringDatum("DAY"), RetType: types.NewFieldType(mysql.TypeString)}}, + chunkSize: 128, + }, + { + retEvalType: types.ETDatetime, + childrenTypes: []types.EvalType{types.ETInt, types.ETReal, types.ETString}, + geners: []dataGenerator{ + &dateTimeIntGener{nullRation: 0.2}, + &defaultGener{eType: types.ETReal, nullRation: 0.2}, + }, + constants: []*Constant{nil, nil, {Value: types.NewStringDatum("WEEK"), RetType: types.NewFieldType(mysql.TypeString)}}, + chunkSize: 128, + }, + { + retEvalType: types.ETDatetime, + childrenTypes: []types.EvalType{types.ETInt, types.ETReal, types.ETString}, + geners: []dataGenerator{ + &dateTimeIntGener{nullRation: 0.2}, + &defaultGener{eType: types.ETReal, nullRation: 0.2}, + }, + constants: []*Constant{nil, nil, {Value: types.NewStringDatum("MONTH"), RetType: types.NewFieldType(mysql.TypeString)}}, + chunkSize: 128, + }, + { + retEvalType: types.ETDatetime, + childrenTypes: []types.EvalType{types.ETInt, types.ETReal, types.ETString}, + geners: []dataGenerator{ + &dateTimeIntGener{nullRation: 0.2}, + &defaultGener{eType: types.ETReal, nullRation: 0.2}, + }, + constants: []*Constant{nil, nil, {Value: types.NewStringDatum("QUARTER"), RetType: types.NewFieldType(mysql.TypeString)}}, + chunkSize: 128, + }, + { + retEvalType: types.ETDatetime, + childrenTypes: []types.EvalType{types.ETInt, types.ETReal, types.ETString}, + geners: []dataGenerator{ + &dateTimeIntGener{nullRation: 0.2}, + &defaultGener{eType: types.ETReal, nullRation: 0.2}, + }, + constants: []*Constant{nil, nil, {Value: types.NewStringDatum("YEAR"), RetType: types.NewFieldType(mysql.TypeString)}}, + chunkSize: 128, + }, + { + retEvalType: types.ETDatetime, + childrenTypes: []types.EvalType{types.ETInt, types.ETReal, types.ETString}, + geners: []dataGenerator{ + &dateTimeIntGener{nullRation: 0.2}, + &defaultGener{eType: types.ETReal, nullRation: 0.2}, + }, + constants: []*Constant{nil, nil, {Value: types.NewStringDatum("SECOND_MICROSECOND"), RetType: types.NewFieldType(mysql.TypeString)}}, + chunkSize: 128, + }, + { + retEvalType: types.ETDatetime, + childrenTypes: []types.EvalType{types.ETInt, types.ETReal, types.ETString}, + geners: []dataGenerator{ + &dateTimeIntGener{nullRation: 0.2}, + &defaultGener{eType: types.ETReal, nullRation: 0.2}, + }, + constants: []*Constant{nil, nil, {Value: types.NewStringDatum("MINUTE_MICROSECOND"), RetType: types.NewFieldType(mysql.TypeString)}}, + chunkSize: 128, + }, + { + retEvalType: types.ETDatetime, + childrenTypes: []types.EvalType{types.ETInt, types.ETReal, types.ETString}, + geners: []dataGenerator{ + &dateTimeIntGener{nullRation: 0.2}, + &defaultGener{eType: types.ETReal, nullRation: 0.2}, + }, + constants: []*Constant{nil, nil, {Value: types.NewStringDatum("MINUTE_SECOND"), RetType: types.NewFieldType(mysql.TypeString)}}, + chunkSize: 128, + }, + { + retEvalType: types.ETDatetime, + childrenTypes: []types.EvalType{types.ETInt, types.ETReal, types.ETString}, + geners: []dataGenerator{ + &dateTimeIntGener{nullRation: 0.2}, + &defaultGener{eType: types.ETReal, nullRation: 0.2}, + }, + constants: []*Constant{nil, nil, {Value: types.NewStringDatum("HOUR_MICROSECOND"), RetType: types.NewFieldType(mysql.TypeString)}}, + chunkSize: 128, + }, + { + retEvalType: types.ETDatetime, + childrenTypes: []types.EvalType{types.ETInt, types.ETReal, types.ETString}, + geners: []dataGenerator{ + &dateTimeIntGener{nullRation: 0.2}, + &defaultGener{eType: types.ETReal, nullRation: 0.2}, + }, + constants: []*Constant{nil, nil, {Value: types.NewStringDatum("HOUR_SECOND"), RetType: types.NewFieldType(mysql.TypeString)}}, + chunkSize: 128, + }, + { + retEvalType: types.ETDatetime, + childrenTypes: []types.EvalType{types.ETInt, types.ETReal, types.ETString}, + geners: []dataGenerator{ + &dateTimeIntGener{nullRation: 0.2}, + &defaultGener{eType: types.ETReal, nullRation: 0.2}, + }, + constants: []*Constant{nil, nil, {Value: types.NewStringDatum("HOUR_MINUTE"), RetType: types.NewFieldType(mysql.TypeString)}}, + chunkSize: 128, + }, + { + retEvalType: types.ETDatetime, + childrenTypes: []types.EvalType{types.ETInt, types.ETReal, types.ETString}, + geners: []dataGenerator{ + &dateTimeIntGener{nullRation: 0.2}, + &defaultGener{eType: types.ETReal, nullRation: 0.2}, + }, + constants: []*Constant{nil, nil, {Value: types.NewStringDatum("DAY_MICROSECOND"), RetType: types.NewFieldType(mysql.TypeString)}}, + chunkSize: 128, + }, + { + retEvalType: types.ETDatetime, + childrenTypes: []types.EvalType{types.ETInt, types.ETReal, types.ETString}, + geners: []dataGenerator{ + &dateTimeIntGener{nullRation: 0.2}, + &defaultGener{eType: types.ETReal, nullRation: 0.2}, + }, + constants: []*Constant{nil, nil, {Value: types.NewStringDatum("DAY_SECOND"), RetType: types.NewFieldType(mysql.TypeString)}}, + chunkSize: 128, + }, + { + retEvalType: types.ETDatetime, + childrenTypes: []types.EvalType{types.ETInt, types.ETReal, types.ETString}, + geners: []dataGenerator{ + &dateTimeIntGener{nullRation: 0.2}, + &defaultGener{eType: types.ETReal, nullRation: 0.2}, + }, + constants: []*Constant{nil, nil, {Value: types.NewStringDatum("DAY_MINUTE"), RetType: types.NewFieldType(mysql.TypeString)}}, + chunkSize: 128, + }, + { + retEvalType: types.ETDatetime, + childrenTypes: []types.EvalType{types.ETInt, types.ETReal, types.ETString}, + geners: []dataGenerator{ + &dateTimeIntGener{nullRation: 0.2}, + &defaultGener{eType: types.ETReal, nullRation: 0.2}, + }, + constants: []*Constant{nil, nil, {Value: types.NewStringDatum("DAY_HOUR"), RetType: types.NewFieldType(mysql.TypeString)}}, + chunkSize: 128, + }, + { + retEvalType: types.ETDatetime, + childrenTypes: []types.EvalType{types.ETInt, types.ETReal, types.ETString}, + geners: []dataGenerator{ + &dateTimeIntGener{nullRation: 0.2}, + &defaultGener{eType: types.ETReal, nullRation: 0.2}, + }, + constants: []*Constant{nil, nil, {Value: types.NewStringDatum("YEAR_MONTH"), RetType: types.NewFieldType(mysql.TypeString)}}, + chunkSize: 128, + }, + // builtinSubDateIntDecimalSig + { + retEvalType: types.ETDatetime, + childrenTypes: []types.EvalType{types.ETInt, types.ETDecimal, types.ETString}, + geners: []dataGenerator{ + &dateTimeIntGener{nullRation: 0.2}, + &defaultGener{eType: types.ETDecimal, nullRation: 0.2}, + }, + constants: []*Constant{nil, nil, {Value: types.NewStringDatum("MICROSECOND"), RetType: types.NewFieldType(mysql.TypeString)}}, + chunkSize: 128, + }, + { + retEvalType: types.ETDatetime, + childrenTypes: []types.EvalType{types.ETInt, types.ETDecimal, types.ETString}, + geners: []dataGenerator{ + &dateTimeIntGener{nullRation: 0.2}, + &defaultGener{eType: types.ETDecimal, nullRation: 0.2}, + }, + constants: []*Constant{nil, nil, {Value: types.NewStringDatum("SECOND"), RetType: types.NewFieldType(mysql.TypeString)}}, + chunkSize: 128, + }, + { + retEvalType: types.ETDatetime, + childrenTypes: []types.EvalType{types.ETInt, types.ETDecimal, types.ETString}, + geners: []dataGenerator{ + &dateTimeIntGener{nullRation: 0.2}, + &defaultGener{eType: types.ETDecimal, nullRation: 0.2}, + }, + constants: []*Constant{nil, nil, {Value: types.NewStringDatum("MINUTE"), RetType: types.NewFieldType(mysql.TypeString)}}, + chunkSize: 128, + }, + { + retEvalType: types.ETDatetime, + childrenTypes: []types.EvalType{types.ETInt, types.ETDecimal, types.ETString}, + geners: []dataGenerator{ + &dateTimeIntGener{nullRation: 0.2}, + &defaultGener{eType: types.ETDecimal, nullRation: 0.2}, + }, + constants: []*Constant{nil, nil, {Value: types.NewStringDatum("HOUR"), RetType: types.NewFieldType(mysql.TypeString)}}, + chunkSize: 128, + }, + { + retEvalType: types.ETDatetime, + childrenTypes: []types.EvalType{types.ETInt, types.ETDecimal, types.ETString}, + geners: []dataGenerator{ + &dateTimeIntGener{nullRation: 0.2}, + &defaultGener{eType: types.ETDecimal, nullRation: 0.2}, + }, + constants: []*Constant{nil, nil, {Value: types.NewStringDatum("DAY"), RetType: types.NewFieldType(mysql.TypeString)}}, + chunkSize: 128, + }, + { + retEvalType: types.ETDatetime, + childrenTypes: []types.EvalType{types.ETInt, types.ETDecimal, types.ETString}, + geners: []dataGenerator{ + &dateTimeIntGener{nullRation: 0.2}, + &defaultGener{eType: types.ETDecimal, nullRation: 0.2}, + }, + constants: []*Constant{nil, nil, {Value: types.NewStringDatum("WEEK"), RetType: types.NewFieldType(mysql.TypeString)}}, + chunkSize: 128, + }, + { + retEvalType: types.ETDatetime, + childrenTypes: []types.EvalType{types.ETInt, types.ETDecimal, types.ETString}, + geners: []dataGenerator{ + &dateTimeIntGener{nullRation: 0.2}, + &defaultGener{eType: types.ETDecimal, nullRation: 0.2}, + }, + constants: []*Constant{nil, nil, {Value: types.NewStringDatum("MONTH"), RetType: types.NewFieldType(mysql.TypeString)}}, + chunkSize: 128, + }, + { + retEvalType: types.ETDatetime, + childrenTypes: []types.EvalType{types.ETInt, types.ETDecimal, types.ETString}, + geners: []dataGenerator{ + &dateTimeIntGener{nullRation: 0.2}, + &defaultGener{eType: types.ETDecimal, nullRation: 0.2}, + }, + constants: []*Constant{nil, nil, {Value: types.NewStringDatum("QUARTER"), RetType: types.NewFieldType(mysql.TypeString)}}, + chunkSize: 128, + }, + { + retEvalType: types.ETDatetime, + childrenTypes: []types.EvalType{types.ETInt, types.ETDecimal, types.ETString}, + geners: []dataGenerator{ + &dateTimeIntGener{nullRation: 0.2}, + &defaultGener{eType: types.ETDecimal, nullRation: 0.2}, + }, + constants: []*Constant{nil, nil, {Value: types.NewStringDatum("YEAR"), RetType: types.NewFieldType(mysql.TypeString)}}, + chunkSize: 128, + }, + { + retEvalType: types.ETDatetime, + childrenTypes: []types.EvalType{types.ETInt, types.ETDecimal, types.ETString}, + geners: []dataGenerator{ + &dateTimeIntGener{nullRation: 0.2}, + &defaultGener{eType: types.ETDecimal, nullRation: 0.2}, + }, + constants: []*Constant{nil, nil, {Value: types.NewStringDatum("SECOND_MICROSECOND"), RetType: types.NewFieldType(mysql.TypeString)}}, + chunkSize: 128, + }, + { + retEvalType: types.ETDatetime, + childrenTypes: []types.EvalType{types.ETInt, types.ETDecimal, types.ETString}, + geners: []dataGenerator{ + &dateTimeIntGener{nullRation: 0.2}, + &defaultGener{eType: types.ETDecimal, nullRation: 0.2}, + }, + constants: []*Constant{nil, nil, {Value: types.NewStringDatum("MINUTE_MICROSECOND"), RetType: types.NewFieldType(mysql.TypeString)}}, + chunkSize: 128, + }, + { + retEvalType: types.ETDatetime, + childrenTypes: []types.EvalType{types.ETInt, types.ETDecimal, types.ETString}, + geners: []dataGenerator{ + &dateTimeIntGener{nullRation: 0.2}, + &defaultGener{eType: types.ETDecimal, nullRation: 0.2}, + }, + constants: []*Constant{nil, nil, {Value: types.NewStringDatum("MINUTE_SECOND"), RetType: types.NewFieldType(mysql.TypeString)}}, + chunkSize: 128, + }, + { + retEvalType: types.ETDatetime, + childrenTypes: []types.EvalType{types.ETInt, types.ETDecimal, types.ETString}, + geners: []dataGenerator{ + &dateTimeIntGener{nullRation: 0.2}, + &defaultGener{eType: types.ETDecimal, nullRation: 0.2}, + }, + constants: []*Constant{nil, nil, {Value: types.NewStringDatum("HOUR_MICROSECOND"), RetType: types.NewFieldType(mysql.TypeString)}}, + chunkSize: 128, + }, + { + retEvalType: types.ETDatetime, + childrenTypes: []types.EvalType{types.ETInt, types.ETDecimal, types.ETString}, + geners: []dataGenerator{ + &dateTimeIntGener{nullRation: 0.2}, + &defaultGener{eType: types.ETDecimal, nullRation: 0.2}, + }, + constants: []*Constant{nil, nil, {Value: types.NewStringDatum("HOUR_SECOND"), RetType: types.NewFieldType(mysql.TypeString)}}, + chunkSize: 128, + }, + { + retEvalType: types.ETDatetime, + childrenTypes: []types.EvalType{types.ETInt, types.ETDecimal, types.ETString}, + geners: []dataGenerator{ + &dateTimeIntGener{nullRation: 0.2}, + &defaultGener{eType: types.ETDecimal, nullRation: 0.2}, + }, + constants: []*Constant{nil, nil, {Value: types.NewStringDatum("HOUR_MINUTE"), RetType: types.NewFieldType(mysql.TypeString)}}, + chunkSize: 128, + }, + { + retEvalType: types.ETDatetime, + childrenTypes: []types.EvalType{types.ETInt, types.ETDecimal, types.ETString}, + geners: []dataGenerator{ + &dateTimeIntGener{nullRation: 0.2}, + &defaultGener{eType: types.ETDecimal, nullRation: 0.2}, + }, + constants: []*Constant{nil, nil, {Value: types.NewStringDatum("DAY_MICROSECOND"), RetType: types.NewFieldType(mysql.TypeString)}}, + chunkSize: 128, + }, + { + retEvalType: types.ETDatetime, + childrenTypes: []types.EvalType{types.ETInt, types.ETDecimal, types.ETString}, + geners: []dataGenerator{ + &dateTimeIntGener{nullRation: 0.2}, + &defaultGener{eType: types.ETDecimal, nullRation: 0.2}, + }, + constants: []*Constant{nil, nil, {Value: types.NewStringDatum("DAY_SECOND"), RetType: types.NewFieldType(mysql.TypeString)}}, + chunkSize: 128, + }, + { + retEvalType: types.ETDatetime, + childrenTypes: []types.EvalType{types.ETInt, types.ETDecimal, types.ETString}, + geners: []dataGenerator{ + &dateTimeIntGener{nullRation: 0.2}, + &defaultGener{eType: types.ETDecimal, nullRation: 0.2}, + }, + constants: []*Constant{nil, nil, {Value: types.NewStringDatum("DAY_MINUTE"), RetType: types.NewFieldType(mysql.TypeString)}}, + chunkSize: 128, + }, + { + retEvalType: types.ETDatetime, + childrenTypes: []types.EvalType{types.ETInt, types.ETDecimal, types.ETString}, + geners: []dataGenerator{ + &dateTimeIntGener{nullRation: 0.2}, + &defaultGener{eType: types.ETDecimal, nullRation: 0.2}, + }, + constants: []*Constant{nil, nil, {Value: types.NewStringDatum("DAY_HOUR"), RetType: types.NewFieldType(mysql.TypeString)}}, + chunkSize: 128, + }, + { + retEvalType: types.ETDatetime, + childrenTypes: []types.EvalType{types.ETInt, types.ETDecimal, types.ETString}, + geners: []dataGenerator{ + &dateTimeIntGener{nullRation: 0.2}, + &defaultGener{eType: types.ETDecimal, nullRation: 0.2}, + }, + constants: []*Constant{nil, nil, {Value: types.NewStringDatum("YEAR_MONTH"), RetType: types.NewFieldType(mysql.TypeString)}}, + chunkSize: 128, + }, + // builtinSubDateDatetimeStringSig + { + retEvalType: types.ETDatetime, + childrenTypes: []types.EvalType{types.ETDatetime, types.ETString, types.ETString}, + geners: []dataGenerator{ + &defaultGener{eType: types.ETDatetime, nullRation: 0.2}, + &numStrGener{rangeInt64Gener{math.MinInt32 + 1, math.MaxInt32}}, + }, + constants: []*Constant{nil, nil, {Value: types.NewStringDatum("MICROSECOND"), RetType: types.NewFieldType(mysql.TypeString)}}, + chunkSize: 128, + }, + { + retEvalType: types.ETDatetime, + childrenTypes: []types.EvalType{types.ETDatetime, types.ETString, types.ETString}, + geners: []dataGenerator{ + &defaultGener{eType: types.ETDatetime, nullRation: 0.2}, + &numStrGener{rangeInt64Gener{math.MinInt32 + 1, math.MaxInt32}}, + }, + constants: []*Constant{nil, nil, {Value: types.NewStringDatum("SECOND"), RetType: types.NewFieldType(mysql.TypeString)}}, + chunkSize: 128, + }, + { + retEvalType: types.ETDatetime, + childrenTypes: []types.EvalType{types.ETDatetime, types.ETString, types.ETString}, + geners: []dataGenerator{ + &defaultGener{eType: types.ETDatetime, nullRation: 0.2}, + &numStrGener{rangeInt64Gener{math.MinInt32 + 1, math.MaxInt32}}, + }, + constants: []*Constant{nil, nil, {Value: types.NewStringDatum("MINUTE"), RetType: types.NewFieldType(mysql.TypeString)}}, + chunkSize: 128, + }, + { + retEvalType: types.ETDatetime, + childrenTypes: []types.EvalType{types.ETDatetime, types.ETString, types.ETString}, + geners: []dataGenerator{ + &defaultGener{eType: types.ETDatetime, nullRation: 0.2}, + &numStrGener{rangeInt64Gener{math.MinInt32 + 1, math.MaxInt32}}, + }, + constants: []*Constant{nil, nil, {Value: types.NewStringDatum("HOUR"), RetType: types.NewFieldType(mysql.TypeString)}}, + chunkSize: 128, + }, + { + retEvalType: types.ETDatetime, + childrenTypes: []types.EvalType{types.ETDatetime, types.ETString, types.ETString}, + geners: []dataGenerator{ + &defaultGener{eType: types.ETDatetime, nullRation: 0.2}, + &numStrGener{rangeInt64Gener{math.MinInt32 + 1, math.MaxInt32}}, + }, + constants: []*Constant{nil, nil, {Value: types.NewStringDatum("DAY"), RetType: types.NewFieldType(mysql.TypeString)}}, + chunkSize: 128, + }, + { + retEvalType: types.ETDatetime, + childrenTypes: []types.EvalType{types.ETDatetime, types.ETString, types.ETString}, + geners: []dataGenerator{ + &defaultGener{eType: types.ETDatetime, nullRation: 0.2}, + &numStrGener{rangeInt64Gener{math.MinInt32 + 1, math.MaxInt32}}, + }, + constants: []*Constant{nil, nil, {Value: types.NewStringDatum("WEEK"), RetType: types.NewFieldType(mysql.TypeString)}}, + chunkSize: 128, + }, + { + retEvalType: types.ETDatetime, + childrenTypes: []types.EvalType{types.ETDatetime, types.ETString, types.ETString}, + geners: []dataGenerator{ + &defaultGener{eType: types.ETDatetime, nullRation: 0.2}, + &numStrGener{rangeInt64Gener{math.MinInt32 + 1, math.MaxInt32}}, + }, + constants: []*Constant{nil, nil, {Value: types.NewStringDatum("MONTH"), RetType: types.NewFieldType(mysql.TypeString)}}, + chunkSize: 128, + }, + { + retEvalType: types.ETDatetime, + childrenTypes: []types.EvalType{types.ETDatetime, types.ETString, types.ETString}, + geners: []dataGenerator{ + &defaultGener{eType: types.ETDatetime, nullRation: 0.2}, + &numStrGener{rangeInt64Gener{math.MinInt32 + 1, math.MaxInt32}}, + }, + constants: []*Constant{nil, nil, {Value: types.NewStringDatum("QUARTER"), RetType: types.NewFieldType(mysql.TypeString)}}, + chunkSize: 128, + }, + { + retEvalType: types.ETDatetime, + childrenTypes: []types.EvalType{types.ETDatetime, types.ETString, types.ETString}, + geners: []dataGenerator{ + &defaultGener{eType: types.ETDatetime, nullRation: 0.2}, + &numStrGener{rangeInt64Gener{math.MinInt32 + 1, math.MaxInt32}}, + }, + constants: []*Constant{nil, nil, {Value: types.NewStringDatum("YEAR"), RetType: types.NewFieldType(mysql.TypeString)}}, + chunkSize: 128, + }, + { + retEvalType: types.ETDatetime, + childrenTypes: []types.EvalType{types.ETDatetime, types.ETString, types.ETString}, + geners: []dataGenerator{ + &defaultGener{eType: types.ETDatetime, nullRation: 0.2}, + &numStrGener{rangeInt64Gener{math.MinInt32 + 1, math.MaxInt32}}, + }, + constants: []*Constant{nil, nil, {Value: types.NewStringDatum("SECOND_MICROSECOND"), RetType: types.NewFieldType(mysql.TypeString)}}, + chunkSize: 128, + }, + { + retEvalType: types.ETDatetime, + childrenTypes: []types.EvalType{types.ETDatetime, types.ETString, types.ETString}, + geners: []dataGenerator{ + &defaultGener{eType: types.ETDatetime, nullRation: 0.2}, + &numStrGener{rangeInt64Gener{math.MinInt32 + 1, math.MaxInt32}}, + }, + constants: []*Constant{nil, nil, {Value: types.NewStringDatum("MINUTE_MICROSECOND"), RetType: types.NewFieldType(mysql.TypeString)}}, + chunkSize: 128, + }, + { + retEvalType: types.ETDatetime, + childrenTypes: []types.EvalType{types.ETDatetime, types.ETString, types.ETString}, + geners: []dataGenerator{ + &defaultGener{eType: types.ETDatetime, nullRation: 0.2}, + &numStrGener{rangeInt64Gener{math.MinInt32 + 1, math.MaxInt32}}, + }, + constants: []*Constant{nil, nil, {Value: types.NewStringDatum("MINUTE_SECOND"), RetType: types.NewFieldType(mysql.TypeString)}}, + chunkSize: 128, + }, + { + retEvalType: types.ETDatetime, + childrenTypes: []types.EvalType{types.ETDatetime, types.ETString, types.ETString}, + geners: []dataGenerator{ + &defaultGener{eType: types.ETDatetime, nullRation: 0.2}, + &numStrGener{rangeInt64Gener{math.MinInt32 + 1, math.MaxInt32}}, + }, + constants: []*Constant{nil, nil, {Value: types.NewStringDatum("HOUR_MICROSECOND"), RetType: types.NewFieldType(mysql.TypeString)}}, + chunkSize: 128, + }, + { + retEvalType: types.ETDatetime, + childrenTypes: []types.EvalType{types.ETDatetime, types.ETString, types.ETString}, + geners: []dataGenerator{ + &defaultGener{eType: types.ETDatetime, nullRation: 0.2}, + &numStrGener{rangeInt64Gener{math.MinInt32 + 1, math.MaxInt32}}, + }, + constants: []*Constant{nil, nil, {Value: types.NewStringDatum("HOUR_SECOND"), RetType: types.NewFieldType(mysql.TypeString)}}, + chunkSize: 128, + }, + { + retEvalType: types.ETDatetime, + childrenTypes: []types.EvalType{types.ETDatetime, types.ETString, types.ETString}, + geners: []dataGenerator{ + &defaultGener{eType: types.ETDatetime, nullRation: 0.2}, + &numStrGener{rangeInt64Gener{math.MinInt32 + 1, math.MaxInt32}}, + }, + constants: []*Constant{nil, nil, {Value: types.NewStringDatum("HOUR_MINUTE"), RetType: types.NewFieldType(mysql.TypeString)}}, + chunkSize: 128, + }, + { + retEvalType: types.ETDatetime, + childrenTypes: []types.EvalType{types.ETDatetime, types.ETString, types.ETString}, + geners: []dataGenerator{ + &defaultGener{eType: types.ETDatetime, nullRation: 0.2}, + &numStrGener{rangeInt64Gener{math.MinInt32 + 1, math.MaxInt32}}, + }, + constants: []*Constant{nil, nil, {Value: types.NewStringDatum("DAY_MICROSECOND"), RetType: types.NewFieldType(mysql.TypeString)}}, + chunkSize: 128, + }, + { + retEvalType: types.ETDatetime, + childrenTypes: []types.EvalType{types.ETDatetime, types.ETString, types.ETString}, + geners: []dataGenerator{ + &defaultGener{eType: types.ETDatetime, nullRation: 0.2}, + &numStrGener{rangeInt64Gener{math.MinInt32 + 1, math.MaxInt32}}, + }, + constants: []*Constant{nil, nil, {Value: types.NewStringDatum("DAY_SECOND"), RetType: types.NewFieldType(mysql.TypeString)}}, + chunkSize: 128, + }, + { + retEvalType: types.ETDatetime, + childrenTypes: []types.EvalType{types.ETDatetime, types.ETString, types.ETString}, + geners: []dataGenerator{ + &defaultGener{eType: types.ETDatetime, nullRation: 0.2}, + &numStrGener{rangeInt64Gener{math.MinInt32 + 1, math.MaxInt32}}, + }, + constants: []*Constant{nil, nil, {Value: types.NewStringDatum("DAY_MINUTE"), RetType: types.NewFieldType(mysql.TypeString)}}, + chunkSize: 128, + }, + { + retEvalType: types.ETDatetime, + childrenTypes: []types.EvalType{types.ETDatetime, types.ETString, types.ETString}, + geners: []dataGenerator{ + &defaultGener{eType: types.ETDatetime, nullRation: 0.2}, + &numStrGener{rangeInt64Gener{math.MinInt32 + 1, math.MaxInt32}}, + }, + constants: []*Constant{nil, nil, {Value: types.NewStringDatum("DAY_HOUR"), RetType: types.NewFieldType(mysql.TypeString)}}, + chunkSize: 128, + }, + { + retEvalType: types.ETDatetime, + childrenTypes: []types.EvalType{types.ETDatetime, types.ETString, types.ETString}, + geners: []dataGenerator{ + &defaultGener{eType: types.ETDatetime, nullRation: 0.2}, + &numStrGener{rangeInt64Gener{math.MinInt32 + 1, math.MaxInt32}}, + }, + constants: []*Constant{nil, nil, {Value: types.NewStringDatum("YEAR_MONTH"), RetType: types.NewFieldType(mysql.TypeString)}}, + chunkSize: 128, + }, + // builtinSubDateDatetimeIntSig + { + retEvalType: types.ETDatetime, + childrenTypes: []types.EvalType{types.ETDatetime, types.ETInt, types.ETString}, + geners: []dataGenerator{ + &defaultGener{eType: types.ETDatetime, nullRation: 0.2}, + &defaultGener{eType: types.ETInt, nullRation: 0.2}, + }, + constants: []*Constant{nil, nil, {Value: types.NewStringDatum("MICROSECOND"), RetType: types.NewFieldType(mysql.TypeString)}}, + chunkSize: 128, + }, + { + retEvalType: types.ETDatetime, + childrenTypes: []types.EvalType{types.ETDatetime, types.ETInt, types.ETString}, + geners: []dataGenerator{ + &defaultGener{eType: types.ETDatetime, nullRation: 0.2}, + &defaultGener{eType: types.ETInt, nullRation: 0.2}, + }, + constants: []*Constant{nil, nil, {Value: types.NewStringDatum("SECOND"), RetType: types.NewFieldType(mysql.TypeString)}}, + chunkSize: 128, + }, + { + retEvalType: types.ETDatetime, + childrenTypes: []types.EvalType{types.ETDatetime, types.ETInt, types.ETString}, + geners: []dataGenerator{ + &defaultGener{eType: types.ETDatetime, nullRation: 0.2}, + &defaultGener{eType: types.ETInt, nullRation: 0.2}, + }, + constants: []*Constant{nil, nil, {Value: types.NewStringDatum("MINUTE"), RetType: types.NewFieldType(mysql.TypeString)}}, + chunkSize: 128, + }, + { + retEvalType: types.ETDatetime, + childrenTypes: []types.EvalType{types.ETDatetime, types.ETInt, types.ETString}, + geners: []dataGenerator{ + &defaultGener{eType: types.ETDatetime, nullRation: 0.2}, + &defaultGener{eType: types.ETInt, nullRation: 0.2}, + }, + constants: []*Constant{nil, nil, {Value: types.NewStringDatum("HOUR"), RetType: types.NewFieldType(mysql.TypeString)}}, + chunkSize: 128, + }, + { + retEvalType: types.ETDatetime, + childrenTypes: []types.EvalType{types.ETDatetime, types.ETInt, types.ETString}, + geners: []dataGenerator{ + &defaultGener{eType: types.ETDatetime, nullRation: 0.2}, + &defaultGener{eType: types.ETInt, nullRation: 0.2}, + }, + constants: []*Constant{nil, nil, {Value: types.NewStringDatum("DAY"), RetType: types.NewFieldType(mysql.TypeString)}}, + chunkSize: 128, + }, + { + retEvalType: types.ETDatetime, + childrenTypes: []types.EvalType{types.ETDatetime, types.ETInt, types.ETString}, + geners: []dataGenerator{ + &defaultGener{eType: types.ETDatetime, nullRation: 0.2}, + &defaultGener{eType: types.ETInt, nullRation: 0.2}, + }, + constants: []*Constant{nil, nil, {Value: types.NewStringDatum("WEEK"), RetType: types.NewFieldType(mysql.TypeString)}}, + chunkSize: 128, + }, + { + retEvalType: types.ETDatetime, + childrenTypes: []types.EvalType{types.ETDatetime, types.ETInt, types.ETString}, + geners: []dataGenerator{ + &defaultGener{eType: types.ETDatetime, nullRation: 0.2}, + &defaultGener{eType: types.ETInt, nullRation: 0.2}, + }, + constants: []*Constant{nil, nil, {Value: types.NewStringDatum("MONTH"), RetType: types.NewFieldType(mysql.TypeString)}}, + chunkSize: 128, + }, + { + retEvalType: types.ETDatetime, + childrenTypes: []types.EvalType{types.ETDatetime, types.ETInt, types.ETString}, + geners: []dataGenerator{ + &defaultGener{eType: types.ETDatetime, nullRation: 0.2}, + &defaultGener{eType: types.ETInt, nullRation: 0.2}, + }, + constants: []*Constant{nil, nil, {Value: types.NewStringDatum("QUARTER"), RetType: types.NewFieldType(mysql.TypeString)}}, + chunkSize: 128, + }, + { + retEvalType: types.ETDatetime, + childrenTypes: []types.EvalType{types.ETDatetime, types.ETInt, types.ETString}, + geners: []dataGenerator{ + &defaultGener{eType: types.ETDatetime, nullRation: 0.2}, + &defaultGener{eType: types.ETInt, nullRation: 0.2}, + }, + constants: []*Constant{nil, nil, {Value: types.NewStringDatum("YEAR"), RetType: types.NewFieldType(mysql.TypeString)}}, + chunkSize: 128, + }, + { + retEvalType: types.ETDatetime, + childrenTypes: []types.EvalType{types.ETDatetime, types.ETInt, types.ETString}, + geners: []dataGenerator{ + &defaultGener{eType: types.ETDatetime, nullRation: 0.2}, + &defaultGener{eType: types.ETInt, nullRation: 0.2}, + }, + constants: []*Constant{nil, nil, {Value: types.NewStringDatum("SECOND_MICROSECOND"), RetType: types.NewFieldType(mysql.TypeString)}}, + chunkSize: 128, + }, + { + retEvalType: types.ETDatetime, + childrenTypes: []types.EvalType{types.ETDatetime, types.ETInt, types.ETString}, + geners: []dataGenerator{ + &defaultGener{eType: types.ETDatetime, nullRation: 0.2}, + &defaultGener{eType: types.ETInt, nullRation: 0.2}, + }, + constants: []*Constant{nil, nil, {Value: types.NewStringDatum("MINUTE_MICROSECOND"), RetType: types.NewFieldType(mysql.TypeString)}}, + chunkSize: 128, + }, + { + retEvalType: types.ETDatetime, + childrenTypes: []types.EvalType{types.ETDatetime, types.ETInt, types.ETString}, + geners: []dataGenerator{ + &defaultGener{eType: types.ETDatetime, nullRation: 0.2}, + &defaultGener{eType: types.ETInt, nullRation: 0.2}, + }, + constants: []*Constant{nil, nil, {Value: types.NewStringDatum("MINUTE_SECOND"), RetType: types.NewFieldType(mysql.TypeString)}}, + chunkSize: 128, + }, + { + retEvalType: types.ETDatetime, + childrenTypes: []types.EvalType{types.ETDatetime, types.ETInt, types.ETString}, + geners: []dataGenerator{ + &defaultGener{eType: types.ETDatetime, nullRation: 0.2}, + &defaultGener{eType: types.ETInt, nullRation: 0.2}, + }, + constants: []*Constant{nil, nil, {Value: types.NewStringDatum("HOUR_MICROSECOND"), RetType: types.NewFieldType(mysql.TypeString)}}, + chunkSize: 128, + }, + { + retEvalType: types.ETDatetime, + childrenTypes: []types.EvalType{types.ETDatetime, types.ETInt, types.ETString}, + geners: []dataGenerator{ + &defaultGener{eType: types.ETDatetime, nullRation: 0.2}, + &defaultGener{eType: types.ETInt, nullRation: 0.2}, + }, + constants: []*Constant{nil, nil, {Value: types.NewStringDatum("HOUR_SECOND"), RetType: types.NewFieldType(mysql.TypeString)}}, + chunkSize: 128, + }, + { + retEvalType: types.ETDatetime, + childrenTypes: []types.EvalType{types.ETDatetime, types.ETInt, types.ETString}, + geners: []dataGenerator{ + &defaultGener{eType: types.ETDatetime, nullRation: 0.2}, + &defaultGener{eType: types.ETInt, nullRation: 0.2}, + }, + constants: []*Constant{nil, nil, {Value: types.NewStringDatum("HOUR_MINUTE"), RetType: types.NewFieldType(mysql.TypeString)}}, + chunkSize: 128, + }, + { + retEvalType: types.ETDatetime, + childrenTypes: []types.EvalType{types.ETDatetime, types.ETInt, types.ETString}, + geners: []dataGenerator{ + &defaultGener{eType: types.ETDatetime, nullRation: 0.2}, + &defaultGener{eType: types.ETInt, nullRation: 0.2}, + }, + constants: []*Constant{nil, nil, {Value: types.NewStringDatum("DAY_MICROSECOND"), RetType: types.NewFieldType(mysql.TypeString)}}, + chunkSize: 128, + }, + { + retEvalType: types.ETDatetime, + childrenTypes: []types.EvalType{types.ETDatetime, types.ETInt, types.ETString}, + geners: []dataGenerator{ + &defaultGener{eType: types.ETDatetime, nullRation: 0.2}, + &defaultGener{eType: types.ETInt, nullRation: 0.2}, + }, + constants: []*Constant{nil, nil, {Value: types.NewStringDatum("DAY_SECOND"), RetType: types.NewFieldType(mysql.TypeString)}}, + chunkSize: 128, + }, + { + retEvalType: types.ETDatetime, + childrenTypes: []types.EvalType{types.ETDatetime, types.ETInt, types.ETString}, + geners: []dataGenerator{ + &defaultGener{eType: types.ETDatetime, nullRation: 0.2}, + &defaultGener{eType: types.ETInt, nullRation: 0.2}, + }, + constants: []*Constant{nil, nil, {Value: types.NewStringDatum("DAY_MINUTE"), RetType: types.NewFieldType(mysql.TypeString)}}, + chunkSize: 128, + }, + { + retEvalType: types.ETDatetime, + childrenTypes: []types.EvalType{types.ETDatetime, types.ETInt, types.ETString}, + geners: []dataGenerator{ + &defaultGener{eType: types.ETDatetime, nullRation: 0.2}, + &defaultGener{eType: types.ETInt, nullRation: 0.2}, + }, + constants: []*Constant{nil, nil, {Value: types.NewStringDatum("DAY_HOUR"), RetType: types.NewFieldType(mysql.TypeString)}}, + chunkSize: 128, + }, + { + retEvalType: types.ETDatetime, + childrenTypes: []types.EvalType{types.ETDatetime, types.ETInt, types.ETString}, + geners: []dataGenerator{ + &defaultGener{eType: types.ETDatetime, nullRation: 0.2}, + &defaultGener{eType: types.ETInt, nullRation: 0.2}, + }, + constants: []*Constant{nil, nil, {Value: types.NewStringDatum("YEAR_MONTH"), RetType: types.NewFieldType(mysql.TypeString)}}, + chunkSize: 128, + }, + // builtinSubDateDatetimeRealSig + { + retEvalType: types.ETDatetime, + childrenTypes: []types.EvalType{types.ETDatetime, types.ETReal, types.ETString}, + geners: []dataGenerator{ + &defaultGener{eType: types.ETDatetime, nullRation: 0.2}, + &defaultGener{eType: types.ETReal, nullRation: 0.2}, + }, + constants: []*Constant{nil, nil, {Value: types.NewStringDatum("MICROSECOND"), RetType: types.NewFieldType(mysql.TypeString)}}, + chunkSize: 128, + }, + { + retEvalType: types.ETDatetime, + childrenTypes: []types.EvalType{types.ETDatetime, types.ETReal, types.ETString}, + geners: []dataGenerator{ + &defaultGener{eType: types.ETDatetime, nullRation: 0.2}, + &defaultGener{eType: types.ETReal, nullRation: 0.2}, + }, + constants: []*Constant{nil, nil, {Value: types.NewStringDatum("SECOND"), RetType: types.NewFieldType(mysql.TypeString)}}, + chunkSize: 128, + }, + { + retEvalType: types.ETDatetime, + childrenTypes: []types.EvalType{types.ETDatetime, types.ETReal, types.ETString}, + geners: []dataGenerator{ + &defaultGener{eType: types.ETDatetime, nullRation: 0.2}, + &defaultGener{eType: types.ETReal, nullRation: 0.2}, + }, + constants: []*Constant{nil, nil, {Value: types.NewStringDatum("MINUTE"), RetType: types.NewFieldType(mysql.TypeString)}}, + chunkSize: 128, + }, + { + retEvalType: types.ETDatetime, + childrenTypes: []types.EvalType{types.ETDatetime, types.ETReal, types.ETString}, + geners: []dataGenerator{ + &defaultGener{eType: types.ETDatetime, nullRation: 0.2}, + &defaultGener{eType: types.ETReal, nullRation: 0.2}, + }, + constants: []*Constant{nil, nil, {Value: types.NewStringDatum("HOUR"), RetType: types.NewFieldType(mysql.TypeString)}}, + chunkSize: 128, + }, + { + retEvalType: types.ETDatetime, + childrenTypes: []types.EvalType{types.ETDatetime, types.ETReal, types.ETString}, + geners: []dataGenerator{ + &defaultGener{eType: types.ETDatetime, nullRation: 0.2}, + &defaultGener{eType: types.ETReal, nullRation: 0.2}, + }, + constants: []*Constant{nil, nil, {Value: types.NewStringDatum("DAY"), RetType: types.NewFieldType(mysql.TypeString)}}, + chunkSize: 128, + }, + { + retEvalType: types.ETDatetime, + childrenTypes: []types.EvalType{types.ETDatetime, types.ETReal, types.ETString}, + geners: []dataGenerator{ + &defaultGener{eType: types.ETDatetime, nullRation: 0.2}, + &defaultGener{eType: types.ETReal, nullRation: 0.2}, + }, + constants: []*Constant{nil, nil, {Value: types.NewStringDatum("WEEK"), RetType: types.NewFieldType(mysql.TypeString)}}, + chunkSize: 128, + }, + { + retEvalType: types.ETDatetime, + childrenTypes: []types.EvalType{types.ETDatetime, types.ETReal, types.ETString}, + geners: []dataGenerator{ + &defaultGener{eType: types.ETDatetime, nullRation: 0.2}, + &defaultGener{eType: types.ETReal, nullRation: 0.2}, + }, + constants: []*Constant{nil, nil, {Value: types.NewStringDatum("MONTH"), RetType: types.NewFieldType(mysql.TypeString)}}, + chunkSize: 128, + }, + { + retEvalType: types.ETDatetime, + childrenTypes: []types.EvalType{types.ETDatetime, types.ETReal, types.ETString}, + geners: []dataGenerator{ + &defaultGener{eType: types.ETDatetime, nullRation: 0.2}, + &defaultGener{eType: types.ETReal, nullRation: 0.2}, + }, + constants: []*Constant{nil, nil, {Value: types.NewStringDatum("QUARTER"), RetType: types.NewFieldType(mysql.TypeString)}}, + chunkSize: 128, + }, + { + retEvalType: types.ETDatetime, + childrenTypes: []types.EvalType{types.ETDatetime, types.ETReal, types.ETString}, + geners: []dataGenerator{ + &defaultGener{eType: types.ETDatetime, nullRation: 0.2}, + &defaultGener{eType: types.ETReal, nullRation: 0.2}, + }, + constants: []*Constant{nil, nil, {Value: types.NewStringDatum("YEAR"), RetType: types.NewFieldType(mysql.TypeString)}}, + chunkSize: 128, + }, + { + retEvalType: types.ETDatetime, + childrenTypes: []types.EvalType{types.ETDatetime, types.ETReal, types.ETString}, + geners: []dataGenerator{ + &defaultGener{eType: types.ETDatetime, nullRation: 0.2}, + &defaultGener{eType: types.ETReal, nullRation: 0.2}, + }, + constants: []*Constant{nil, nil, {Value: types.NewStringDatum("SECOND_MICROSECOND"), RetType: types.NewFieldType(mysql.TypeString)}}, + chunkSize: 128, + }, + { + retEvalType: types.ETDatetime, + childrenTypes: []types.EvalType{types.ETDatetime, types.ETReal, types.ETString}, + geners: []dataGenerator{ + &defaultGener{eType: types.ETDatetime, nullRation: 0.2}, + &defaultGener{eType: types.ETReal, nullRation: 0.2}, + }, + constants: []*Constant{nil, nil, {Value: types.NewStringDatum("MINUTE_MICROSECOND"), RetType: types.NewFieldType(mysql.TypeString)}}, + chunkSize: 128, + }, + { + retEvalType: types.ETDatetime, + childrenTypes: []types.EvalType{types.ETDatetime, types.ETReal, types.ETString}, + geners: []dataGenerator{ + &defaultGener{eType: types.ETDatetime, nullRation: 0.2}, + &defaultGener{eType: types.ETReal, nullRation: 0.2}, + }, + constants: []*Constant{nil, nil, {Value: types.NewStringDatum("MINUTE_SECOND"), RetType: types.NewFieldType(mysql.TypeString)}}, + chunkSize: 128, + }, + { + retEvalType: types.ETDatetime, + childrenTypes: []types.EvalType{types.ETDatetime, types.ETReal, types.ETString}, + geners: []dataGenerator{ + &defaultGener{eType: types.ETDatetime, nullRation: 0.2}, + &defaultGener{eType: types.ETReal, nullRation: 0.2}, + }, + constants: []*Constant{nil, nil, {Value: types.NewStringDatum("HOUR_MICROSECOND"), RetType: types.NewFieldType(mysql.TypeString)}}, + chunkSize: 128, + }, + { + retEvalType: types.ETDatetime, + childrenTypes: []types.EvalType{types.ETDatetime, types.ETReal, types.ETString}, + geners: []dataGenerator{ + &defaultGener{eType: types.ETDatetime, nullRation: 0.2}, + &defaultGener{eType: types.ETReal, nullRation: 0.2}, + }, + constants: []*Constant{nil, nil, {Value: types.NewStringDatum("HOUR_SECOND"), RetType: types.NewFieldType(mysql.TypeString)}}, + chunkSize: 128, + }, + { + retEvalType: types.ETDatetime, + childrenTypes: []types.EvalType{types.ETDatetime, types.ETReal, types.ETString}, + geners: []dataGenerator{ + &defaultGener{eType: types.ETDatetime, nullRation: 0.2}, + &defaultGener{eType: types.ETReal, nullRation: 0.2}, + }, + constants: []*Constant{nil, nil, {Value: types.NewStringDatum("HOUR_MINUTE"), RetType: types.NewFieldType(mysql.TypeString)}}, + chunkSize: 128, + }, + { + retEvalType: types.ETDatetime, + childrenTypes: []types.EvalType{types.ETDatetime, types.ETReal, types.ETString}, + geners: []dataGenerator{ + &defaultGener{eType: types.ETDatetime, nullRation: 0.2}, + &defaultGener{eType: types.ETReal, nullRation: 0.2}, + }, + constants: []*Constant{nil, nil, {Value: types.NewStringDatum("DAY_MICROSECOND"), RetType: types.NewFieldType(mysql.TypeString)}}, + chunkSize: 128, + }, + { + retEvalType: types.ETDatetime, + childrenTypes: []types.EvalType{types.ETDatetime, types.ETReal, types.ETString}, + geners: []dataGenerator{ + &defaultGener{eType: types.ETDatetime, nullRation: 0.2}, + &defaultGener{eType: types.ETReal, nullRation: 0.2}, + }, + constants: []*Constant{nil, nil, {Value: types.NewStringDatum("DAY_SECOND"), RetType: types.NewFieldType(mysql.TypeString)}}, + chunkSize: 128, + }, + { + retEvalType: types.ETDatetime, + childrenTypes: []types.EvalType{types.ETDatetime, types.ETReal, types.ETString}, + geners: []dataGenerator{ + &defaultGener{eType: types.ETDatetime, nullRation: 0.2}, + &defaultGener{eType: types.ETReal, nullRation: 0.2}, + }, + constants: []*Constant{nil, nil, {Value: types.NewStringDatum("DAY_MINUTE"), RetType: types.NewFieldType(mysql.TypeString)}}, + chunkSize: 128, + }, + { + retEvalType: types.ETDatetime, + childrenTypes: []types.EvalType{types.ETDatetime, types.ETReal, types.ETString}, + geners: []dataGenerator{ + &defaultGener{eType: types.ETDatetime, nullRation: 0.2}, + &defaultGener{eType: types.ETReal, nullRation: 0.2}, + }, + constants: []*Constant{nil, nil, {Value: types.NewStringDatum("DAY_HOUR"), RetType: types.NewFieldType(mysql.TypeString)}}, + chunkSize: 128, + }, + { + retEvalType: types.ETDatetime, + childrenTypes: []types.EvalType{types.ETDatetime, types.ETReal, types.ETString}, + geners: []dataGenerator{ + &defaultGener{eType: types.ETDatetime, nullRation: 0.2}, + &defaultGener{eType: types.ETReal, nullRation: 0.2}, + }, + constants: []*Constant{nil, nil, {Value: types.NewStringDatum("YEAR_MONTH"), RetType: types.NewFieldType(mysql.TypeString)}}, + chunkSize: 128, + }, + // builtinSubDateDatetimeDecimalSig + { + retEvalType: types.ETDatetime, + childrenTypes: []types.EvalType{types.ETDatetime, types.ETDecimal, types.ETString}, + geners: []dataGenerator{ + &defaultGener{eType: types.ETDatetime, nullRation: 0.2}, + &defaultGener{eType: types.ETDecimal, nullRation: 0.2}, + }, + constants: []*Constant{nil, nil, {Value: types.NewStringDatum("MICROSECOND"), RetType: types.NewFieldType(mysql.TypeString)}}, + chunkSize: 128, + }, + { + retEvalType: types.ETDatetime, + childrenTypes: []types.EvalType{types.ETDatetime, types.ETDecimal, types.ETString}, + geners: []dataGenerator{ + &defaultGener{eType: types.ETDatetime, nullRation: 0.2}, + &defaultGener{eType: types.ETDecimal, nullRation: 0.2}, + }, + constants: []*Constant{nil, nil, {Value: types.NewStringDatum("SECOND"), RetType: types.NewFieldType(mysql.TypeString)}}, + chunkSize: 128, + }, + { + retEvalType: types.ETDatetime, + childrenTypes: []types.EvalType{types.ETDatetime, types.ETDecimal, types.ETString}, + geners: []dataGenerator{ + &defaultGener{eType: types.ETDatetime, nullRation: 0.2}, + &defaultGener{eType: types.ETDecimal, nullRation: 0.2}, + }, + constants: []*Constant{nil, nil, {Value: types.NewStringDatum("MINUTE"), RetType: types.NewFieldType(mysql.TypeString)}}, + chunkSize: 128, + }, + { + retEvalType: types.ETDatetime, + childrenTypes: []types.EvalType{types.ETDatetime, types.ETDecimal, types.ETString}, + geners: []dataGenerator{ + &defaultGener{eType: types.ETDatetime, nullRation: 0.2}, + &defaultGener{eType: types.ETDecimal, nullRation: 0.2}, + }, + constants: []*Constant{nil, nil, {Value: types.NewStringDatum("HOUR"), RetType: types.NewFieldType(mysql.TypeString)}}, + chunkSize: 128, + }, + { + retEvalType: types.ETDatetime, + childrenTypes: []types.EvalType{types.ETDatetime, types.ETDecimal, types.ETString}, + geners: []dataGenerator{ + &defaultGener{eType: types.ETDatetime, nullRation: 0.2}, + &defaultGener{eType: types.ETDecimal, nullRation: 0.2}, + }, + constants: []*Constant{nil, nil, {Value: types.NewStringDatum("DAY"), RetType: types.NewFieldType(mysql.TypeString)}}, + chunkSize: 128, + }, + { + retEvalType: types.ETDatetime, + childrenTypes: []types.EvalType{types.ETDatetime, types.ETDecimal, types.ETString}, + geners: []dataGenerator{ + &defaultGener{eType: types.ETDatetime, nullRation: 0.2}, + &defaultGener{eType: types.ETDecimal, nullRation: 0.2}, + }, + constants: []*Constant{nil, nil, {Value: types.NewStringDatum("WEEK"), RetType: types.NewFieldType(mysql.TypeString)}}, + chunkSize: 128, + }, + { + retEvalType: types.ETDatetime, + childrenTypes: []types.EvalType{types.ETDatetime, types.ETDecimal, types.ETString}, + geners: []dataGenerator{ + &defaultGener{eType: types.ETDatetime, nullRation: 0.2}, + &defaultGener{eType: types.ETDecimal, nullRation: 0.2}, + }, + constants: []*Constant{nil, nil, {Value: types.NewStringDatum("MONTH"), RetType: types.NewFieldType(mysql.TypeString)}}, + chunkSize: 128, + }, + { + retEvalType: types.ETDatetime, + childrenTypes: []types.EvalType{types.ETDatetime, types.ETDecimal, types.ETString}, + geners: []dataGenerator{ + &defaultGener{eType: types.ETDatetime, nullRation: 0.2}, + &defaultGener{eType: types.ETDecimal, nullRation: 0.2}, + }, + constants: []*Constant{nil, nil, {Value: types.NewStringDatum("QUARTER"), RetType: types.NewFieldType(mysql.TypeString)}}, + chunkSize: 128, + }, + { + retEvalType: types.ETDatetime, + childrenTypes: []types.EvalType{types.ETDatetime, types.ETDecimal, types.ETString}, + geners: []dataGenerator{ + &defaultGener{eType: types.ETDatetime, nullRation: 0.2}, + &defaultGener{eType: types.ETDecimal, nullRation: 0.2}, + }, + constants: []*Constant{nil, nil, {Value: types.NewStringDatum("YEAR"), RetType: types.NewFieldType(mysql.TypeString)}}, + chunkSize: 128, + }, + { + retEvalType: types.ETDatetime, + childrenTypes: []types.EvalType{types.ETDatetime, types.ETDecimal, types.ETString}, + geners: []dataGenerator{ + &defaultGener{eType: types.ETDatetime, nullRation: 0.2}, + &defaultGener{eType: types.ETDecimal, nullRation: 0.2}, + }, + constants: []*Constant{nil, nil, {Value: types.NewStringDatum("SECOND_MICROSECOND"), RetType: types.NewFieldType(mysql.TypeString)}}, + chunkSize: 128, + }, + { + retEvalType: types.ETDatetime, + childrenTypes: []types.EvalType{types.ETDatetime, types.ETDecimal, types.ETString}, + geners: []dataGenerator{ + &defaultGener{eType: types.ETDatetime, nullRation: 0.2}, + &defaultGener{eType: types.ETDecimal, nullRation: 0.2}, + }, + constants: []*Constant{nil, nil, {Value: types.NewStringDatum("MINUTE_MICROSECOND"), RetType: types.NewFieldType(mysql.TypeString)}}, + chunkSize: 128, + }, + { + retEvalType: types.ETDatetime, + childrenTypes: []types.EvalType{types.ETDatetime, types.ETDecimal, types.ETString}, + geners: []dataGenerator{ + &defaultGener{eType: types.ETDatetime, nullRation: 0.2}, + &defaultGener{eType: types.ETDecimal, nullRation: 0.2}, + }, + constants: []*Constant{nil, nil, {Value: types.NewStringDatum("MINUTE_SECOND"), RetType: types.NewFieldType(mysql.TypeString)}}, + chunkSize: 128, + }, + { + retEvalType: types.ETDatetime, + childrenTypes: []types.EvalType{types.ETDatetime, types.ETDecimal, types.ETString}, + geners: []dataGenerator{ + &defaultGener{eType: types.ETDatetime, nullRation: 0.2}, + &defaultGener{eType: types.ETDecimal, nullRation: 0.2}, + }, + constants: []*Constant{nil, nil, {Value: types.NewStringDatum("HOUR_MICROSECOND"), RetType: types.NewFieldType(mysql.TypeString)}}, + chunkSize: 128, + }, + { + retEvalType: types.ETDatetime, + childrenTypes: []types.EvalType{types.ETDatetime, types.ETDecimal, types.ETString}, + geners: []dataGenerator{ + &defaultGener{eType: types.ETDatetime, nullRation: 0.2}, + &defaultGener{eType: types.ETDecimal, nullRation: 0.2}, + }, + constants: []*Constant{nil, nil, {Value: types.NewStringDatum("HOUR_SECOND"), RetType: types.NewFieldType(mysql.TypeString)}}, + chunkSize: 128, + }, + { + retEvalType: types.ETDatetime, + childrenTypes: []types.EvalType{types.ETDatetime, types.ETDecimal, types.ETString}, + geners: []dataGenerator{ + &defaultGener{eType: types.ETDatetime, nullRation: 0.2}, + &defaultGener{eType: types.ETDecimal, nullRation: 0.2}, + }, + constants: []*Constant{nil, nil, {Value: types.NewStringDatum("HOUR_MINUTE"), RetType: types.NewFieldType(mysql.TypeString)}}, + chunkSize: 128, + }, + { + retEvalType: types.ETDatetime, + childrenTypes: []types.EvalType{types.ETDatetime, types.ETDecimal, types.ETString}, + geners: []dataGenerator{ + &defaultGener{eType: types.ETDatetime, nullRation: 0.2}, + &defaultGener{eType: types.ETDecimal, nullRation: 0.2}, + }, + constants: []*Constant{nil, nil, {Value: types.NewStringDatum("DAY_MICROSECOND"), RetType: types.NewFieldType(mysql.TypeString)}}, + chunkSize: 128, + }, + { + retEvalType: types.ETDatetime, + childrenTypes: []types.EvalType{types.ETDatetime, types.ETDecimal, types.ETString}, + geners: []dataGenerator{ + &defaultGener{eType: types.ETDatetime, nullRation: 0.2}, + &defaultGener{eType: types.ETDecimal, nullRation: 0.2}, + }, + constants: []*Constant{nil, nil, {Value: types.NewStringDatum("DAY_SECOND"), RetType: types.NewFieldType(mysql.TypeString)}}, + chunkSize: 128, + }, + { + retEvalType: types.ETDatetime, + childrenTypes: []types.EvalType{types.ETDatetime, types.ETDecimal, types.ETString}, + geners: []dataGenerator{ + &defaultGener{eType: types.ETDatetime, nullRation: 0.2}, + &defaultGener{eType: types.ETDecimal, nullRation: 0.2}, + }, + constants: []*Constant{nil, nil, {Value: types.NewStringDatum("DAY_MINUTE"), RetType: types.NewFieldType(mysql.TypeString)}}, + chunkSize: 128, + }, + { + retEvalType: types.ETDatetime, + childrenTypes: []types.EvalType{types.ETDatetime, types.ETDecimal, types.ETString}, + geners: []dataGenerator{ + &defaultGener{eType: types.ETDatetime, nullRation: 0.2}, + &defaultGener{eType: types.ETDecimal, nullRation: 0.2}, + }, + constants: []*Constant{nil, nil, {Value: types.NewStringDatum("DAY_HOUR"), RetType: types.NewFieldType(mysql.TypeString)}}, + chunkSize: 128, + }, + { + retEvalType: types.ETDatetime, + childrenTypes: []types.EvalType{types.ETDatetime, types.ETDecimal, types.ETString}, + geners: []dataGenerator{ + &defaultGener{eType: types.ETDatetime, nullRation: 0.2}, + &defaultGener{eType: types.ETDecimal, nullRation: 0.2}, + }, + constants: []*Constant{nil, nil, {Value: types.NewStringDatum("YEAR_MONTH"), RetType: types.NewFieldType(mysql.TypeString)}}, + chunkSize: 128, + }, + // builtinSubDateDurationStringSig + { + retEvalType: types.ETDuration, + childrenTypes: []types.EvalType{types.ETDuration, types.ETString, types.ETString}, + geners: []dataGenerator{ + &defaultGener{eType: types.ETDuration, nullRation: 0.2}, + &numStrGener{rangeInt64Gener{math.MinInt32 + 1, math.MaxInt32}}, + }, + constants: []*Constant{nil, nil, {Value: types.NewStringDatum("MICROSECOND"), RetType: types.NewFieldType(mysql.TypeString)}}, + chunkSize: 128, + }, + { + retEvalType: types.ETDuration, + childrenTypes: []types.EvalType{types.ETDuration, types.ETString, types.ETString}, + geners: []dataGenerator{ + &defaultGener{eType: types.ETDuration, nullRation: 0.2}, + &numStrGener{rangeInt64Gener{math.MinInt32 + 1, math.MaxInt32}}, + }, + constants: []*Constant{nil, nil, {Value: types.NewStringDatum("SECOND"), RetType: types.NewFieldType(mysql.TypeString)}}, + chunkSize: 128, + }, + { + retEvalType: types.ETDuration, + childrenTypes: []types.EvalType{types.ETDuration, types.ETString, types.ETString}, + geners: []dataGenerator{ + &defaultGener{eType: types.ETDuration, nullRation: 0.2}, + &numStrGener{rangeInt64Gener{math.MinInt32 + 1, math.MaxInt32}}, + }, + constants: []*Constant{nil, nil, {Value: types.NewStringDatum("MINUTE"), RetType: types.NewFieldType(mysql.TypeString)}}, + chunkSize: 128, + }, + { + retEvalType: types.ETDuration, + childrenTypes: []types.EvalType{types.ETDuration, types.ETString, types.ETString}, + geners: []dataGenerator{ + &defaultGener{eType: types.ETDuration, nullRation: 0.2}, + &numStrGener{rangeInt64Gener{math.MinInt32 + 1, math.MaxInt32}}, + }, + constants: []*Constant{nil, nil, {Value: types.NewStringDatum("HOUR"), RetType: types.NewFieldType(mysql.TypeString)}}, + chunkSize: 128, + }, + { + retEvalType: types.ETDuration, + childrenTypes: []types.EvalType{types.ETDuration, types.ETString, types.ETString}, + geners: []dataGenerator{ + &defaultGener{eType: types.ETDuration, nullRation: 0.2}, + &numStrGener{rangeInt64Gener{math.MinInt32 + 1, math.MaxInt32}}, + }, + constants: []*Constant{nil, nil, {Value: types.NewStringDatum("DAY"), RetType: types.NewFieldType(mysql.TypeString)}}, + chunkSize: 128, + }, + { + retEvalType: types.ETDuration, + childrenTypes: []types.EvalType{types.ETDuration, types.ETString, types.ETString}, + geners: []dataGenerator{ + &defaultGener{eType: types.ETDuration, nullRation: 0.2}, + &numStrGener{rangeInt64Gener{math.MinInt32 + 1, math.MaxInt32}}, + }, + constants: []*Constant{nil, nil, {Value: types.NewStringDatum("WEEK"), RetType: types.NewFieldType(mysql.TypeString)}}, + chunkSize: 128, + }, + { + retEvalType: types.ETDuration, + childrenTypes: []types.EvalType{types.ETDuration, types.ETString, types.ETString}, + geners: []dataGenerator{ + &defaultGener{eType: types.ETDuration, nullRation: 0.2}, + &numStrGener{rangeInt64Gener{math.MinInt32 + 1, math.MaxInt32}}, + }, + constants: []*Constant{nil, nil, {Value: types.NewStringDatum("MONTH"), RetType: types.NewFieldType(mysql.TypeString)}}, + chunkSize: 128, + }, + { + retEvalType: types.ETDuration, + childrenTypes: []types.EvalType{types.ETDuration, types.ETString, types.ETString}, + geners: []dataGenerator{ + &defaultGener{eType: types.ETDuration, nullRation: 0.2}, + &numStrGener{rangeInt64Gener{math.MinInt32 + 1, math.MaxInt32}}, + }, + constants: []*Constant{nil, nil, {Value: types.NewStringDatum("QUARTER"), RetType: types.NewFieldType(mysql.TypeString)}}, + chunkSize: 128, + }, + { + retEvalType: types.ETDuration, + childrenTypes: []types.EvalType{types.ETDuration, types.ETString, types.ETString}, + geners: []dataGenerator{ + &defaultGener{eType: types.ETDuration, nullRation: 0.2}, + &numStrGener{rangeInt64Gener{math.MinInt32 + 1, math.MaxInt32}}, + }, + constants: []*Constant{nil, nil, {Value: types.NewStringDatum("YEAR"), RetType: types.NewFieldType(mysql.TypeString)}}, + chunkSize: 128, + }, + { + retEvalType: types.ETDuration, + childrenTypes: []types.EvalType{types.ETDuration, types.ETString, types.ETString}, + geners: []dataGenerator{ + &defaultGener{eType: types.ETDuration, nullRation: 0.2}, + &numStrGener{rangeInt64Gener{math.MinInt32 + 1, math.MaxInt32}}, + }, + constants: []*Constant{nil, nil, {Value: types.NewStringDatum("SECOND_MICROSECOND"), RetType: types.NewFieldType(mysql.TypeString)}}, + chunkSize: 128, + }, + { + retEvalType: types.ETDuration, + childrenTypes: []types.EvalType{types.ETDuration, types.ETString, types.ETString}, + geners: []dataGenerator{ + &defaultGener{eType: types.ETDuration, nullRation: 0.2}, + &numStrGener{rangeInt64Gener{math.MinInt32 + 1, math.MaxInt32}}, + }, + constants: []*Constant{nil, nil, {Value: types.NewStringDatum("MINUTE_MICROSECOND"), RetType: types.NewFieldType(mysql.TypeString)}}, + chunkSize: 128, + }, + { + retEvalType: types.ETDuration, + childrenTypes: []types.EvalType{types.ETDuration, types.ETString, types.ETString}, + geners: []dataGenerator{ + &defaultGener{eType: types.ETDuration, nullRation: 0.2}, + &numStrGener{rangeInt64Gener{math.MinInt32 + 1, math.MaxInt32}}, + }, + constants: []*Constant{nil, nil, {Value: types.NewStringDatum("MINUTE_SECOND"), RetType: types.NewFieldType(mysql.TypeString)}}, + chunkSize: 128, + }, + { + retEvalType: types.ETDuration, + childrenTypes: []types.EvalType{types.ETDuration, types.ETString, types.ETString}, + geners: []dataGenerator{ + &defaultGener{eType: types.ETDuration, nullRation: 0.2}, + &numStrGener{rangeInt64Gener{math.MinInt32 + 1, math.MaxInt32}}, + }, + constants: []*Constant{nil, nil, {Value: types.NewStringDatum("HOUR_MICROSECOND"), RetType: types.NewFieldType(mysql.TypeString)}}, + chunkSize: 128, + }, + { + retEvalType: types.ETDuration, + childrenTypes: []types.EvalType{types.ETDuration, types.ETString, types.ETString}, + geners: []dataGenerator{ + &defaultGener{eType: types.ETDuration, nullRation: 0.2}, + &numStrGener{rangeInt64Gener{math.MinInt32 + 1, math.MaxInt32}}, + }, + constants: []*Constant{nil, nil, {Value: types.NewStringDatum("HOUR_SECOND"), RetType: types.NewFieldType(mysql.TypeString)}}, + chunkSize: 128, + }, + { + retEvalType: types.ETDuration, + childrenTypes: []types.EvalType{types.ETDuration, types.ETString, types.ETString}, + geners: []dataGenerator{ + &defaultGener{eType: types.ETDuration, nullRation: 0.2}, + &numStrGener{rangeInt64Gener{math.MinInt32 + 1, math.MaxInt32}}, + }, + constants: []*Constant{nil, nil, {Value: types.NewStringDatum("HOUR_MINUTE"), RetType: types.NewFieldType(mysql.TypeString)}}, + chunkSize: 128, + }, + { + retEvalType: types.ETDuration, + childrenTypes: []types.EvalType{types.ETDuration, types.ETString, types.ETString}, + geners: []dataGenerator{ + &defaultGener{eType: types.ETDuration, nullRation: 0.2}, + &numStrGener{rangeInt64Gener{math.MinInt32 + 1, math.MaxInt32}}, + }, + constants: []*Constant{nil, nil, {Value: types.NewStringDatum("DAY_MICROSECOND"), RetType: types.NewFieldType(mysql.TypeString)}}, + chunkSize: 128, + }, + { + retEvalType: types.ETDuration, + childrenTypes: []types.EvalType{types.ETDuration, types.ETString, types.ETString}, + geners: []dataGenerator{ + &defaultGener{eType: types.ETDuration, nullRation: 0.2}, + &numStrGener{rangeInt64Gener{math.MinInt32 + 1, math.MaxInt32}}, + }, + constants: []*Constant{nil, nil, {Value: types.NewStringDatum("DAY_SECOND"), RetType: types.NewFieldType(mysql.TypeString)}}, + chunkSize: 128, + }, + { + retEvalType: types.ETDuration, + childrenTypes: []types.EvalType{types.ETDuration, types.ETString, types.ETString}, + geners: []dataGenerator{ + &defaultGener{eType: types.ETDuration, nullRation: 0.2}, + &numStrGener{rangeInt64Gener{math.MinInt32 + 1, math.MaxInt32}}, + }, + constants: []*Constant{nil, nil, {Value: types.NewStringDatum("DAY_MINUTE"), RetType: types.NewFieldType(mysql.TypeString)}}, + chunkSize: 128, + }, + { + retEvalType: types.ETDuration, + childrenTypes: []types.EvalType{types.ETDuration, types.ETString, types.ETString}, + geners: []dataGenerator{ + &defaultGener{eType: types.ETDuration, nullRation: 0.2}, + &numStrGener{rangeInt64Gener{math.MinInt32 + 1, math.MaxInt32}}, + }, + constants: []*Constant{nil, nil, {Value: types.NewStringDatum("DAY_HOUR"), RetType: types.NewFieldType(mysql.TypeString)}}, + chunkSize: 128, + }, + { + retEvalType: types.ETDuration, + childrenTypes: []types.EvalType{types.ETDuration, types.ETString, types.ETString}, + geners: []dataGenerator{ + &defaultGener{eType: types.ETDuration, nullRation: 0.2}, + &numStrGener{rangeInt64Gener{math.MinInt32 + 1, math.MaxInt32}}, + }, + constants: []*Constant{nil, nil, {Value: types.NewStringDatum("YEAR_MONTH"), RetType: types.NewFieldType(mysql.TypeString)}}, + chunkSize: 128, + }, + // builtinSubDateDurationIntSig + { + retEvalType: types.ETDuration, + childrenTypes: []types.EvalType{types.ETDuration, types.ETInt, types.ETString}, + geners: []dataGenerator{ + &defaultGener{eType: types.ETDuration, nullRation: 0.2}, + &defaultGener{eType: types.ETInt, nullRation: 0.2}, + }, + constants: []*Constant{nil, nil, {Value: types.NewStringDatum("MICROSECOND"), RetType: types.NewFieldType(mysql.TypeString)}}, + chunkSize: 128, + }, + { + retEvalType: types.ETDuration, + childrenTypes: []types.EvalType{types.ETDuration, types.ETInt, types.ETString}, + geners: []dataGenerator{ + &defaultGener{eType: types.ETDuration, nullRation: 0.2}, + &defaultGener{eType: types.ETInt, nullRation: 0.2}, + }, + constants: []*Constant{nil, nil, {Value: types.NewStringDatum("SECOND"), RetType: types.NewFieldType(mysql.TypeString)}}, + chunkSize: 128, + }, + { + retEvalType: types.ETDuration, + childrenTypes: []types.EvalType{types.ETDuration, types.ETInt, types.ETString}, + geners: []dataGenerator{ + &defaultGener{eType: types.ETDuration, nullRation: 0.2}, + &defaultGener{eType: types.ETInt, nullRation: 0.2}, + }, + constants: []*Constant{nil, nil, {Value: types.NewStringDatum("MINUTE"), RetType: types.NewFieldType(mysql.TypeString)}}, + chunkSize: 128, + }, + { + retEvalType: types.ETDuration, + childrenTypes: []types.EvalType{types.ETDuration, types.ETInt, types.ETString}, + geners: []dataGenerator{ + &defaultGener{eType: types.ETDuration, nullRation: 0.2}, + &defaultGener{eType: types.ETInt, nullRation: 0.2}, + }, + constants: []*Constant{nil, nil, {Value: types.NewStringDatum("HOUR"), RetType: types.NewFieldType(mysql.TypeString)}}, + chunkSize: 128, + }, + { + retEvalType: types.ETDuration, + childrenTypes: []types.EvalType{types.ETDuration, types.ETInt, types.ETString}, + geners: []dataGenerator{ + &defaultGener{eType: types.ETDuration, nullRation: 0.2}, + &defaultGener{eType: types.ETInt, nullRation: 0.2}, + }, + constants: []*Constant{nil, nil, {Value: types.NewStringDatum("DAY"), RetType: types.NewFieldType(mysql.TypeString)}}, + chunkSize: 128, + }, + { + retEvalType: types.ETDuration, + childrenTypes: []types.EvalType{types.ETDuration, types.ETInt, types.ETString}, + geners: []dataGenerator{ + &defaultGener{eType: types.ETDuration, nullRation: 0.2}, + &defaultGener{eType: types.ETInt, nullRation: 0.2}, + }, + constants: []*Constant{nil, nil, {Value: types.NewStringDatum("WEEK"), RetType: types.NewFieldType(mysql.TypeString)}}, + chunkSize: 128, + }, + { + retEvalType: types.ETDuration, + childrenTypes: []types.EvalType{types.ETDuration, types.ETInt, types.ETString}, + geners: []dataGenerator{ + &defaultGener{eType: types.ETDuration, nullRation: 0.2}, + &defaultGener{eType: types.ETInt, nullRation: 0.2}, + }, + constants: []*Constant{nil, nil, {Value: types.NewStringDatum("MONTH"), RetType: types.NewFieldType(mysql.TypeString)}}, + chunkSize: 128, + }, + { + retEvalType: types.ETDuration, + childrenTypes: []types.EvalType{types.ETDuration, types.ETInt, types.ETString}, + geners: []dataGenerator{ + &defaultGener{eType: types.ETDuration, nullRation: 0.2}, + &defaultGener{eType: types.ETInt, nullRation: 0.2}, + }, + constants: []*Constant{nil, nil, {Value: types.NewStringDatum("QUARTER"), RetType: types.NewFieldType(mysql.TypeString)}}, + chunkSize: 128, + }, + { + retEvalType: types.ETDuration, + childrenTypes: []types.EvalType{types.ETDuration, types.ETInt, types.ETString}, + geners: []dataGenerator{ + &defaultGener{eType: types.ETDuration, nullRation: 0.2}, + &defaultGener{eType: types.ETInt, nullRation: 0.2}, + }, + constants: []*Constant{nil, nil, {Value: types.NewStringDatum("YEAR"), RetType: types.NewFieldType(mysql.TypeString)}}, + chunkSize: 128, + }, + { + retEvalType: types.ETDuration, + childrenTypes: []types.EvalType{types.ETDuration, types.ETInt, types.ETString}, + geners: []dataGenerator{ + &defaultGener{eType: types.ETDuration, nullRation: 0.2}, + &defaultGener{eType: types.ETInt, nullRation: 0.2}, + }, + constants: []*Constant{nil, nil, {Value: types.NewStringDatum("SECOND_MICROSECOND"), RetType: types.NewFieldType(mysql.TypeString)}}, + chunkSize: 128, + }, + { + retEvalType: types.ETDuration, + childrenTypes: []types.EvalType{types.ETDuration, types.ETInt, types.ETString}, + geners: []dataGenerator{ + &defaultGener{eType: types.ETDuration, nullRation: 0.2}, + &defaultGener{eType: types.ETInt, nullRation: 0.2}, + }, + constants: []*Constant{nil, nil, {Value: types.NewStringDatum("MINUTE_MICROSECOND"), RetType: types.NewFieldType(mysql.TypeString)}}, + chunkSize: 128, + }, + { + retEvalType: types.ETDuration, + childrenTypes: []types.EvalType{types.ETDuration, types.ETInt, types.ETString}, + geners: []dataGenerator{ + &defaultGener{eType: types.ETDuration, nullRation: 0.2}, + &defaultGener{eType: types.ETInt, nullRation: 0.2}, + }, + constants: []*Constant{nil, nil, {Value: types.NewStringDatum("MINUTE_SECOND"), RetType: types.NewFieldType(mysql.TypeString)}}, + chunkSize: 128, + }, + { + retEvalType: types.ETDuration, + childrenTypes: []types.EvalType{types.ETDuration, types.ETInt, types.ETString}, + geners: []dataGenerator{ + &defaultGener{eType: types.ETDuration, nullRation: 0.2}, + &defaultGener{eType: types.ETInt, nullRation: 0.2}, + }, + constants: []*Constant{nil, nil, {Value: types.NewStringDatum("HOUR_MICROSECOND"), RetType: types.NewFieldType(mysql.TypeString)}}, + chunkSize: 128, + }, + { + retEvalType: types.ETDuration, + childrenTypes: []types.EvalType{types.ETDuration, types.ETInt, types.ETString}, + geners: []dataGenerator{ + &defaultGener{eType: types.ETDuration, nullRation: 0.2}, + &defaultGener{eType: types.ETInt, nullRation: 0.2}, + }, + constants: []*Constant{nil, nil, {Value: types.NewStringDatum("HOUR_SECOND"), RetType: types.NewFieldType(mysql.TypeString)}}, + chunkSize: 128, + }, + { + retEvalType: types.ETDuration, + childrenTypes: []types.EvalType{types.ETDuration, types.ETInt, types.ETString}, + geners: []dataGenerator{ + &defaultGener{eType: types.ETDuration, nullRation: 0.2}, + &defaultGener{eType: types.ETInt, nullRation: 0.2}, + }, + constants: []*Constant{nil, nil, {Value: types.NewStringDatum("HOUR_MINUTE"), RetType: types.NewFieldType(mysql.TypeString)}}, + chunkSize: 128, + }, + { + retEvalType: types.ETDuration, + childrenTypes: []types.EvalType{types.ETDuration, types.ETInt, types.ETString}, + geners: []dataGenerator{ + &defaultGener{eType: types.ETDuration, nullRation: 0.2}, + &defaultGener{eType: types.ETInt, nullRation: 0.2}, + }, + constants: []*Constant{nil, nil, {Value: types.NewStringDatum("DAY_MICROSECOND"), RetType: types.NewFieldType(mysql.TypeString)}}, + chunkSize: 128, + }, + { + retEvalType: types.ETDuration, + childrenTypes: []types.EvalType{types.ETDuration, types.ETInt, types.ETString}, + geners: []dataGenerator{ + &defaultGener{eType: types.ETDuration, nullRation: 0.2}, + &defaultGener{eType: types.ETInt, nullRation: 0.2}, + }, + constants: []*Constant{nil, nil, {Value: types.NewStringDatum("DAY_SECOND"), RetType: types.NewFieldType(mysql.TypeString)}}, + chunkSize: 128, + }, + { + retEvalType: types.ETDuration, + childrenTypes: []types.EvalType{types.ETDuration, types.ETInt, types.ETString}, + geners: []dataGenerator{ + &defaultGener{eType: types.ETDuration, nullRation: 0.2}, + &defaultGener{eType: types.ETInt, nullRation: 0.2}, + }, + constants: []*Constant{nil, nil, {Value: types.NewStringDatum("DAY_MINUTE"), RetType: types.NewFieldType(mysql.TypeString)}}, + chunkSize: 128, + }, + { + retEvalType: types.ETDuration, + childrenTypes: []types.EvalType{types.ETDuration, types.ETInt, types.ETString}, + geners: []dataGenerator{ + &defaultGener{eType: types.ETDuration, nullRation: 0.2}, + &defaultGener{eType: types.ETInt, nullRation: 0.2}, + }, + constants: []*Constant{nil, nil, {Value: types.NewStringDatum("DAY_HOUR"), RetType: types.NewFieldType(mysql.TypeString)}}, + chunkSize: 128, + }, + { + retEvalType: types.ETDuration, + childrenTypes: []types.EvalType{types.ETDuration, types.ETInt, types.ETString}, + geners: []dataGenerator{ + &defaultGener{eType: types.ETDuration, nullRation: 0.2}, + &defaultGener{eType: types.ETInt, nullRation: 0.2}, + }, + constants: []*Constant{nil, nil, {Value: types.NewStringDatum("YEAR_MONTH"), RetType: types.NewFieldType(mysql.TypeString)}}, + chunkSize: 128, + }, + // builtinSubDateDurationRealSig + { + retEvalType: types.ETDuration, + childrenTypes: []types.EvalType{types.ETDuration, types.ETReal, types.ETString}, + geners: []dataGenerator{ + &defaultGener{eType: types.ETDuration, nullRation: 0.2}, + &defaultGener{eType: types.ETReal, nullRation: 0.2}, + }, + constants: []*Constant{nil, nil, {Value: types.NewStringDatum("MICROSECOND"), RetType: types.NewFieldType(mysql.TypeString)}}, + chunkSize: 128, + }, + { + retEvalType: types.ETDuration, + childrenTypes: []types.EvalType{types.ETDuration, types.ETReal, types.ETString}, + geners: []dataGenerator{ + &defaultGener{eType: types.ETDuration, nullRation: 0.2}, + &defaultGener{eType: types.ETReal, nullRation: 0.2}, + }, + constants: []*Constant{nil, nil, {Value: types.NewStringDatum("SECOND"), RetType: types.NewFieldType(mysql.TypeString)}}, + chunkSize: 128, + }, + { + retEvalType: types.ETDuration, + childrenTypes: []types.EvalType{types.ETDuration, types.ETReal, types.ETString}, + geners: []dataGenerator{ + &defaultGener{eType: types.ETDuration, nullRation: 0.2}, + &defaultGener{eType: types.ETReal, nullRation: 0.2}, + }, + constants: []*Constant{nil, nil, {Value: types.NewStringDatum("MINUTE"), RetType: types.NewFieldType(mysql.TypeString)}}, + chunkSize: 128, + }, + { + retEvalType: types.ETDuration, + childrenTypes: []types.EvalType{types.ETDuration, types.ETReal, types.ETString}, + geners: []dataGenerator{ + &defaultGener{eType: types.ETDuration, nullRation: 0.2}, + &defaultGener{eType: types.ETReal, nullRation: 0.2}, + }, + constants: []*Constant{nil, nil, {Value: types.NewStringDatum("HOUR"), RetType: types.NewFieldType(mysql.TypeString)}}, + chunkSize: 128, + }, + { + retEvalType: types.ETDuration, + childrenTypes: []types.EvalType{types.ETDuration, types.ETReal, types.ETString}, + geners: []dataGenerator{ + &defaultGener{eType: types.ETDuration, nullRation: 0.2}, + &defaultGener{eType: types.ETReal, nullRation: 0.2}, + }, + constants: []*Constant{nil, nil, {Value: types.NewStringDatum("DAY"), RetType: types.NewFieldType(mysql.TypeString)}}, + chunkSize: 128, + }, + { + retEvalType: types.ETDuration, + childrenTypes: []types.EvalType{types.ETDuration, types.ETReal, types.ETString}, + geners: []dataGenerator{ + &defaultGener{eType: types.ETDuration, nullRation: 0.2}, + &defaultGener{eType: types.ETReal, nullRation: 0.2}, + }, + constants: []*Constant{nil, nil, {Value: types.NewStringDatum("WEEK"), RetType: types.NewFieldType(mysql.TypeString)}}, + chunkSize: 128, + }, + { + retEvalType: types.ETDuration, + childrenTypes: []types.EvalType{types.ETDuration, types.ETReal, types.ETString}, + geners: []dataGenerator{ + &defaultGener{eType: types.ETDuration, nullRation: 0.2}, + &defaultGener{eType: types.ETReal, nullRation: 0.2}, + }, + constants: []*Constant{nil, nil, {Value: types.NewStringDatum("MONTH"), RetType: types.NewFieldType(mysql.TypeString)}}, + chunkSize: 128, + }, + { + retEvalType: types.ETDuration, + childrenTypes: []types.EvalType{types.ETDuration, types.ETReal, types.ETString}, + geners: []dataGenerator{ + &defaultGener{eType: types.ETDuration, nullRation: 0.2}, + &defaultGener{eType: types.ETReal, nullRation: 0.2}, + }, + constants: []*Constant{nil, nil, {Value: types.NewStringDatum("QUARTER"), RetType: types.NewFieldType(mysql.TypeString)}}, + chunkSize: 128, + }, + { + retEvalType: types.ETDuration, + childrenTypes: []types.EvalType{types.ETDuration, types.ETReal, types.ETString}, + geners: []dataGenerator{ + &defaultGener{eType: types.ETDuration, nullRation: 0.2}, + &defaultGener{eType: types.ETReal, nullRation: 0.2}, + }, + constants: []*Constant{nil, nil, {Value: types.NewStringDatum("YEAR"), RetType: types.NewFieldType(mysql.TypeString)}}, + chunkSize: 128, + }, + { + retEvalType: types.ETDuration, + childrenTypes: []types.EvalType{types.ETDuration, types.ETReal, types.ETString}, + geners: []dataGenerator{ + &defaultGener{eType: types.ETDuration, nullRation: 0.2}, + &defaultGener{eType: types.ETReal, nullRation: 0.2}, + }, + constants: []*Constant{nil, nil, {Value: types.NewStringDatum("SECOND_MICROSECOND"), RetType: types.NewFieldType(mysql.TypeString)}}, + chunkSize: 128, + }, + { + retEvalType: types.ETDuration, + childrenTypes: []types.EvalType{types.ETDuration, types.ETReal, types.ETString}, + geners: []dataGenerator{ + &defaultGener{eType: types.ETDuration, nullRation: 0.2}, + &defaultGener{eType: types.ETReal, nullRation: 0.2}, + }, + constants: []*Constant{nil, nil, {Value: types.NewStringDatum("MINUTE_MICROSECOND"), RetType: types.NewFieldType(mysql.TypeString)}}, + chunkSize: 128, + }, + { + retEvalType: types.ETDuration, + childrenTypes: []types.EvalType{types.ETDuration, types.ETReal, types.ETString}, + geners: []dataGenerator{ + &defaultGener{eType: types.ETDuration, nullRation: 0.2}, + &defaultGener{eType: types.ETReal, nullRation: 0.2}, + }, + constants: []*Constant{nil, nil, {Value: types.NewStringDatum("MINUTE_SECOND"), RetType: types.NewFieldType(mysql.TypeString)}}, + chunkSize: 128, + }, + { + retEvalType: types.ETDuration, + childrenTypes: []types.EvalType{types.ETDuration, types.ETReal, types.ETString}, + geners: []dataGenerator{ + &defaultGener{eType: types.ETDuration, nullRation: 0.2}, + &defaultGener{eType: types.ETReal, nullRation: 0.2}, + }, + constants: []*Constant{nil, nil, {Value: types.NewStringDatum("HOUR_MICROSECOND"), RetType: types.NewFieldType(mysql.TypeString)}}, + chunkSize: 128, + }, + { + retEvalType: types.ETDuration, + childrenTypes: []types.EvalType{types.ETDuration, types.ETReal, types.ETString}, + geners: []dataGenerator{ + &defaultGener{eType: types.ETDuration, nullRation: 0.2}, + &defaultGener{eType: types.ETReal, nullRation: 0.2}, + }, + constants: []*Constant{nil, nil, {Value: types.NewStringDatum("HOUR_SECOND"), RetType: types.NewFieldType(mysql.TypeString)}}, + chunkSize: 128, + }, + { + retEvalType: types.ETDuration, + childrenTypes: []types.EvalType{types.ETDuration, types.ETReal, types.ETString}, + geners: []dataGenerator{ + &defaultGener{eType: types.ETDuration, nullRation: 0.2}, + &defaultGener{eType: types.ETReal, nullRation: 0.2}, + }, + constants: []*Constant{nil, nil, {Value: types.NewStringDatum("HOUR_MINUTE"), RetType: types.NewFieldType(mysql.TypeString)}}, + chunkSize: 128, + }, + { + retEvalType: types.ETDuration, + childrenTypes: []types.EvalType{types.ETDuration, types.ETReal, types.ETString}, + geners: []dataGenerator{ + &defaultGener{eType: types.ETDuration, nullRation: 0.2}, + &defaultGener{eType: types.ETReal, nullRation: 0.2}, + }, + constants: []*Constant{nil, nil, {Value: types.NewStringDatum("DAY_MICROSECOND"), RetType: types.NewFieldType(mysql.TypeString)}}, + chunkSize: 128, + }, + { + retEvalType: types.ETDuration, + childrenTypes: []types.EvalType{types.ETDuration, types.ETReal, types.ETString}, + geners: []dataGenerator{ + &defaultGener{eType: types.ETDuration, nullRation: 0.2}, + &defaultGener{eType: types.ETReal, nullRation: 0.2}, + }, + constants: []*Constant{nil, nil, {Value: types.NewStringDatum("DAY_SECOND"), RetType: types.NewFieldType(mysql.TypeString)}}, + chunkSize: 128, + }, + { + retEvalType: types.ETDuration, + childrenTypes: []types.EvalType{types.ETDuration, types.ETReal, types.ETString}, + geners: []dataGenerator{ + &defaultGener{eType: types.ETDuration, nullRation: 0.2}, + &defaultGener{eType: types.ETReal, nullRation: 0.2}, + }, + constants: []*Constant{nil, nil, {Value: types.NewStringDatum("DAY_MINUTE"), RetType: types.NewFieldType(mysql.TypeString)}}, + chunkSize: 128, + }, + { + retEvalType: types.ETDuration, + childrenTypes: []types.EvalType{types.ETDuration, types.ETReal, types.ETString}, + geners: []dataGenerator{ + &defaultGener{eType: types.ETDuration, nullRation: 0.2}, + &defaultGener{eType: types.ETReal, nullRation: 0.2}, + }, + constants: []*Constant{nil, nil, {Value: types.NewStringDatum("DAY_HOUR"), RetType: types.NewFieldType(mysql.TypeString)}}, + chunkSize: 128, + }, + { + retEvalType: types.ETDuration, + childrenTypes: []types.EvalType{types.ETDuration, types.ETReal, types.ETString}, + geners: []dataGenerator{ + &defaultGener{eType: types.ETDuration, nullRation: 0.2}, + &defaultGener{eType: types.ETReal, nullRation: 0.2}, + }, + constants: []*Constant{nil, nil, {Value: types.NewStringDatum("YEAR_MONTH"), RetType: types.NewFieldType(mysql.TypeString)}}, + chunkSize: 128, + }, + // builtinSubDateDurationDecimalSig + { + retEvalType: types.ETDuration, + childrenTypes: []types.EvalType{types.ETDuration, types.ETDecimal, types.ETString}, + geners: []dataGenerator{ + &defaultGener{eType: types.ETDuration, nullRation: 0.2}, + &defaultGener{eType: types.ETDecimal, nullRation: 0.2}, + }, + constants: []*Constant{nil, nil, {Value: types.NewStringDatum("MICROSECOND"), RetType: types.NewFieldType(mysql.TypeString)}}, + chunkSize: 128, + }, + { + retEvalType: types.ETDuration, + childrenTypes: []types.EvalType{types.ETDuration, types.ETDecimal, types.ETString}, + geners: []dataGenerator{ + &defaultGener{eType: types.ETDuration, nullRation: 0.2}, + &defaultGener{eType: types.ETDecimal, nullRation: 0.2}, + }, + constants: []*Constant{nil, nil, {Value: types.NewStringDatum("SECOND"), RetType: types.NewFieldType(mysql.TypeString)}}, + chunkSize: 128, + }, + { + retEvalType: types.ETDuration, + childrenTypes: []types.EvalType{types.ETDuration, types.ETDecimal, types.ETString}, + geners: []dataGenerator{ + &defaultGener{eType: types.ETDuration, nullRation: 0.2}, + &defaultGener{eType: types.ETDecimal, nullRation: 0.2}, + }, + constants: []*Constant{nil, nil, {Value: types.NewStringDatum("MINUTE"), RetType: types.NewFieldType(mysql.TypeString)}}, + chunkSize: 128, + }, + { + retEvalType: types.ETDuration, + childrenTypes: []types.EvalType{types.ETDuration, types.ETDecimal, types.ETString}, + geners: []dataGenerator{ + &defaultGener{eType: types.ETDuration, nullRation: 0.2}, + &defaultGener{eType: types.ETDecimal, nullRation: 0.2}, + }, + constants: []*Constant{nil, nil, {Value: types.NewStringDatum("HOUR"), RetType: types.NewFieldType(mysql.TypeString)}}, + chunkSize: 128, + }, + { + retEvalType: types.ETDuration, + childrenTypes: []types.EvalType{types.ETDuration, types.ETDecimal, types.ETString}, + geners: []dataGenerator{ + &defaultGener{eType: types.ETDuration, nullRation: 0.2}, + &defaultGener{eType: types.ETDecimal, nullRation: 0.2}, + }, + constants: []*Constant{nil, nil, {Value: types.NewStringDatum("DAY"), RetType: types.NewFieldType(mysql.TypeString)}}, + chunkSize: 128, + }, + { + retEvalType: types.ETDuration, + childrenTypes: []types.EvalType{types.ETDuration, types.ETDecimal, types.ETString}, + geners: []dataGenerator{ + &defaultGener{eType: types.ETDuration, nullRation: 0.2}, + &defaultGener{eType: types.ETDecimal, nullRation: 0.2}, + }, + constants: []*Constant{nil, nil, {Value: types.NewStringDatum("WEEK"), RetType: types.NewFieldType(mysql.TypeString)}}, + chunkSize: 128, + }, + { + retEvalType: types.ETDuration, + childrenTypes: []types.EvalType{types.ETDuration, types.ETDecimal, types.ETString}, + geners: []dataGenerator{ + &defaultGener{eType: types.ETDuration, nullRation: 0.2}, + &defaultGener{eType: types.ETDecimal, nullRation: 0.2}, + }, + constants: []*Constant{nil, nil, {Value: types.NewStringDatum("MONTH"), RetType: types.NewFieldType(mysql.TypeString)}}, + chunkSize: 128, + }, + { + retEvalType: types.ETDuration, + childrenTypes: []types.EvalType{types.ETDuration, types.ETDecimal, types.ETString}, + geners: []dataGenerator{ + &defaultGener{eType: types.ETDuration, nullRation: 0.2}, + &defaultGener{eType: types.ETDecimal, nullRation: 0.2}, + }, + constants: []*Constant{nil, nil, {Value: types.NewStringDatum("QUARTER"), RetType: types.NewFieldType(mysql.TypeString)}}, + chunkSize: 128, + }, + { + retEvalType: types.ETDuration, + childrenTypes: []types.EvalType{types.ETDuration, types.ETDecimal, types.ETString}, + geners: []dataGenerator{ + &defaultGener{eType: types.ETDuration, nullRation: 0.2}, + &defaultGener{eType: types.ETDecimal, nullRation: 0.2}, + }, + constants: []*Constant{nil, nil, {Value: types.NewStringDatum("YEAR"), RetType: types.NewFieldType(mysql.TypeString)}}, + chunkSize: 128, + }, + { + retEvalType: types.ETDuration, + childrenTypes: []types.EvalType{types.ETDuration, types.ETDecimal, types.ETString}, + geners: []dataGenerator{ + &defaultGener{eType: types.ETDuration, nullRation: 0.2}, + &defaultGener{eType: types.ETDecimal, nullRation: 0.2}, + }, + constants: []*Constant{nil, nil, {Value: types.NewStringDatum("SECOND_MICROSECOND"), RetType: types.NewFieldType(mysql.TypeString)}}, + chunkSize: 128, + }, + { + retEvalType: types.ETDuration, + childrenTypes: []types.EvalType{types.ETDuration, types.ETDecimal, types.ETString}, + geners: []dataGenerator{ + &defaultGener{eType: types.ETDuration, nullRation: 0.2}, + &defaultGener{eType: types.ETDecimal, nullRation: 0.2}, + }, + constants: []*Constant{nil, nil, {Value: types.NewStringDatum("MINUTE_MICROSECOND"), RetType: types.NewFieldType(mysql.TypeString)}}, + chunkSize: 128, + }, + { + retEvalType: types.ETDuration, + childrenTypes: []types.EvalType{types.ETDuration, types.ETDecimal, types.ETString}, + geners: []dataGenerator{ + &defaultGener{eType: types.ETDuration, nullRation: 0.2}, + &defaultGener{eType: types.ETDecimal, nullRation: 0.2}, + }, + constants: []*Constant{nil, nil, {Value: types.NewStringDatum("MINUTE_SECOND"), RetType: types.NewFieldType(mysql.TypeString)}}, + chunkSize: 128, + }, + { + retEvalType: types.ETDuration, + childrenTypes: []types.EvalType{types.ETDuration, types.ETDecimal, types.ETString}, + geners: []dataGenerator{ + &defaultGener{eType: types.ETDuration, nullRation: 0.2}, + &defaultGener{eType: types.ETDecimal, nullRation: 0.2}, + }, + constants: []*Constant{nil, nil, {Value: types.NewStringDatum("HOUR_MICROSECOND"), RetType: types.NewFieldType(mysql.TypeString)}}, + chunkSize: 128, + }, + { + retEvalType: types.ETDuration, + childrenTypes: []types.EvalType{types.ETDuration, types.ETDecimal, types.ETString}, + geners: []dataGenerator{ + &defaultGener{eType: types.ETDuration, nullRation: 0.2}, + &defaultGener{eType: types.ETDecimal, nullRation: 0.2}, + }, + constants: []*Constant{nil, nil, {Value: types.NewStringDatum("HOUR_SECOND"), RetType: types.NewFieldType(mysql.TypeString)}}, + chunkSize: 128, + }, + { + retEvalType: types.ETDuration, + childrenTypes: []types.EvalType{types.ETDuration, types.ETDecimal, types.ETString}, + geners: []dataGenerator{ + &defaultGener{eType: types.ETDuration, nullRation: 0.2}, + &defaultGener{eType: types.ETDecimal, nullRation: 0.2}, + }, + constants: []*Constant{nil, nil, {Value: types.NewStringDatum("HOUR_MINUTE"), RetType: types.NewFieldType(mysql.TypeString)}}, + chunkSize: 128, + }, + { + retEvalType: types.ETDuration, + childrenTypes: []types.EvalType{types.ETDuration, types.ETDecimal, types.ETString}, + geners: []dataGenerator{ + &defaultGener{eType: types.ETDuration, nullRation: 0.2}, + &defaultGener{eType: types.ETDecimal, nullRation: 0.2}, + }, + constants: []*Constant{nil, nil, {Value: types.NewStringDatum("DAY_MICROSECOND"), RetType: types.NewFieldType(mysql.TypeString)}}, + chunkSize: 128, + }, + { + retEvalType: types.ETDuration, + childrenTypes: []types.EvalType{types.ETDuration, types.ETDecimal, types.ETString}, + geners: []dataGenerator{ + &defaultGener{eType: types.ETDuration, nullRation: 0.2}, + &defaultGener{eType: types.ETDecimal, nullRation: 0.2}, + }, + constants: []*Constant{nil, nil, {Value: types.NewStringDatum("DAY_SECOND"), RetType: types.NewFieldType(mysql.TypeString)}}, + chunkSize: 128, + }, + { + retEvalType: types.ETDuration, + childrenTypes: []types.EvalType{types.ETDuration, types.ETDecimal, types.ETString}, + geners: []dataGenerator{ + &defaultGener{eType: types.ETDuration, nullRation: 0.2}, + &defaultGener{eType: types.ETDecimal, nullRation: 0.2}, + }, + constants: []*Constant{nil, nil, {Value: types.NewStringDatum("DAY_MINUTE"), RetType: types.NewFieldType(mysql.TypeString)}}, + chunkSize: 128, + }, + { + retEvalType: types.ETDuration, + childrenTypes: []types.EvalType{types.ETDuration, types.ETDecimal, types.ETString}, + geners: []dataGenerator{ + &defaultGener{eType: types.ETDuration, nullRation: 0.2}, + &defaultGener{eType: types.ETDecimal, nullRation: 0.2}, + }, + constants: []*Constant{nil, nil, {Value: types.NewStringDatum("DAY_HOUR"), RetType: types.NewFieldType(mysql.TypeString)}}, + chunkSize: 128, + }, + { + retEvalType: types.ETDuration, + childrenTypes: []types.EvalType{types.ETDuration, types.ETDecimal, types.ETString}, + geners: []dataGenerator{ + &defaultGener{eType: types.ETDuration, nullRation: 0.2}, + &defaultGener{eType: types.ETDecimal, nullRation: 0.2}, + }, + constants: []*Constant{nil, nil, {Value: types.NewStringDatum("YEAR_MONTH"), RetType: types.NewFieldType(mysql.TypeString)}}, + chunkSize: 128, + }, }, } diff --git a/expression/builtin_time_vec_test.go b/expression/builtin_time_vec_test.go index c401aef017b12..6e0b505c4686e 100644 --- a/expression/builtin_time_vec_test.go +++ b/expression/builtin_time_vec_test.go @@ -78,6 +78,12 @@ var vecBuiltinTimeCases = map[string][]vecExprBenchCase{ constants: []*Constant{{Value: types.NewStringDatum("2019-11-11"), RetType: types.NewFieldType(mysql.TypeString)}}, }, }, + ast.TimeLiteral: { + {retEvalType: types.ETDuration, childrenTypes: []types.EvalType{types.ETString}, + constants: []*Constant{ + {Value: types.NewStringDatum("838:59:59"), RetType: types.NewFieldType(mysql.TypeString)}}, + }, + }, ast.DateDiff: { {retEvalType: types.ETInt, childrenTypes: []types.EvalType{types.ETDatetime, types.ETDatetime}}, }, @@ -117,6 +123,9 @@ var vecBuiltinTimeCases = map[string][]vecExprBenchCase{ {retEvalType: types.ETDuration}, {retEvalType: types.ETDuration, childrenTypes: []types.EvalType{types.ETInt}, geners: []dataGenerator{&rangeInt64Gener{0, 7}}}, // fsp must be in the range 0 to 6. }, + ast.Time: { + {retEvalType: types.ETDuration, childrenTypes: []types.EvalType{types.ETString}, geners: []dataGenerator{new(dateTimeStrGener)}}, + }, ast.CurrentDate: { {retEvalType: types.ETDatetime}, }, @@ -144,22 +153,28 @@ var vecBuiltinTimeCases = map[string][]vecExprBenchCase{ ast.SecToTime: { {retEvalType: types.ETDuration, childrenTypes: []types.EvalType{types.ETReal}}, }, - ast.TimestampAdd: { - { - retEvalType: types.ETString, - childrenTypes: []types.EvalType{types.ETString, types.ETInt, types.ETDatetime}, - geners: []dataGenerator{&unitStrGener{}, nil, nil}, - }, - }, + // This test case may fail due to the issue: https://github.com/pingcap/tidb/issues/13638. + // We remove this case to stabilize CI, and will reopen this when we fix the issue above. + //ast.TimestampAdd: { + // { + // retEvalType: types.ETString, + // childrenTypes: []types.EvalType{types.ETString, types.ETInt, types.ETDatetime}, + // geners: []dataGenerator{&unitStrGener{}, nil, nil}, + // }, + //}, ast.TimestampDiff: { { retEvalType: types.ETInt, childrenTypes: []types.EvalType{types.ETString, types.ETDatetime, types.ETDatetime}, geners: []dataGenerator{&unitStrGener{}, nil, nil}}, }, - ast.TimestampLiteral: {}, - ast.SubDate: {}, - ast.AddDate: {}, + ast.TimestampLiteral: { + {retEvalType: types.ETTimestamp, childrenTypes: []types.EvalType{types.ETString}, + constants: []*Constant{{Value: types.NewStringDatum("2019-12-04 00:00:00"), RetType: types.NewFieldType(mysql.TypeString)}}, + }, + }, + ast.SubDate: {}, + ast.AddDate: {}, ast.SubTime: { { retEvalType: types.ETString, @@ -171,8 +186,8 @@ var vecBuiltinTimeCases = map[string][]vecExprBenchCase{ Flag: mysql.BinaryFlag, }}, geners: []dataGenerator{ - &timeStrGener{}, - &timeStrGener{}, + &dateStrGener{}, + &dateStrGener{}, }, }, // builtinSubTimeStringNullSig @@ -215,15 +230,15 @@ var vecBuiltinTimeCases = map[string][]vecExprBenchCase{ }, ast.Timestamp: { {retEvalType: types.ETDatetime, childrenTypes: []types.EvalType{types.ETString}, geners: []dataGenerator{new(dateTimeStrGener)}}, - {retEvalType: types.ETDatetime, childrenTypes: []types.EvalType{types.ETString}, geners: []dataGenerator{new(timeStrGener)}}, {retEvalType: types.ETDatetime, childrenTypes: []types.EvalType{types.ETString}, geners: []dataGenerator{new(dateStrGener)}}, + {retEvalType: types.ETDatetime, childrenTypes: []types.EvalType{types.ETString}, geners: []dataGenerator{new(timeStrGener)}}, {retEvalType: types.ETDatetime, childrenTypes: []types.EvalType{types.ETString}}, {retEvalType: types.ETDatetime, childrenTypes: []types.EvalType{types.ETString, types.ETString}, - geners: []dataGenerator{new(dateTimeStrGener), new(dateStrGener)}}, + geners: []dataGenerator{new(dateTimeStrGener), new(timeStrGener)}}, {retEvalType: types.ETDatetime, childrenTypes: []types.EvalType{types.ETString, types.ETString}, geners: []dataGenerator{new(dateTimeStrGener), nil}}, {retEvalType: types.ETDatetime, childrenTypes: []types.EvalType{types.ETString, types.ETString}, - geners: []dataGenerator{nil, new(dateStrGener)}}, + geners: []dataGenerator{nil, new(timeStrGener)}}, }, ast.MonthName: { {retEvalType: types.ETString, childrenTypes: []types.EvalType{types.ETDatetime}}, @@ -273,31 +288,31 @@ var vecBuiltinTimeCases = map[string][]vecExprBenchCase{ { retEvalType: types.ETDatetime, childrenTypes: []types.EvalType{types.ETString, types.ETString}, - geners: []dataGenerator{&timeStrGener{}, &constStrGener{"%y-%m-%d"}}, + geners: []dataGenerator{&dateStrGener{}, &constStrGener{"%y-%m-%d"}}, }, { retEvalType: types.ETDatetime, childrenTypes: []types.EvalType{types.ETString, types.ETString}, - geners: []dataGenerator{&timeStrGener{NullRation: 0.3}, nil}, + geners: []dataGenerator{&dateStrGener{NullRation: 0.3}, nil}, constants: []*Constant{nil, {Value: types.NewDatum("%Y-%m-%d"), RetType: types.NewFieldType(mysql.TypeString)}}, }, { retEvalType: types.ETDatetime, childrenTypes: []types.EvalType{types.ETString, types.ETString}, - geners: []dataGenerator{&timeStrGener{}, nil}, + geners: []dataGenerator{&dateStrGener{}, nil}, // "%y%m%d" is wrong format, STR_TO_DATE should be failed for all rows constants: []*Constant{nil, {Value: types.NewDatum("%y%m%d"), RetType: types.NewFieldType(mysql.TypeString)}}, }, { retEvalType: types.ETDuration, childrenTypes: []types.EvalType{types.ETString, types.ETString}, - geners: []dataGenerator{&dateStrGener{nullRation: 0.3}, nil}, + geners: []dataGenerator{&timeStrGener{nullRation: 0.3}, nil}, constants: []*Constant{nil, {Value: types.NewDatum("%H:%i:%s"), RetType: types.NewFieldType(mysql.TypeString)}}, }, { retEvalType: types.ETDuration, childrenTypes: []types.EvalType{types.ETString, types.ETString}, - geners: []dataGenerator{&dateStrGener{nullRation: 0.3}, nil}, + geners: []dataGenerator{&timeStrGener{nullRation: 0.3}, nil}, // "%H%i%s" is wrong format, STR_TO_DATE should be failed for all rows constants: []*Constant{nil, {Value: types.NewDatum("%H%i%s"), RetType: types.NewFieldType(mysql.TypeString)}}, }, diff --git a/expression/column.go b/expression/column.go index 8b0fa3f96d3b8..98601053c328d 100644 --- a/expression/column.go +++ b/expression/column.go @@ -581,3 +581,18 @@ idLoop: func (col *Column) EvalVirtualColumn(row chunk.Row) (types.Datum, error) { return col.VirtualExpr.Eval(row) } + +// SupportReverseEval checks whether the builtinFunc support reverse evaluation. +func (col *Column) SupportReverseEval() bool { + switch col.RetType.Tp { + case mysql.TypeShort, mysql.TypeLong, mysql.TypeLonglong, + mysql.TypeFloat, mysql.TypeDouble, mysql.TypeNewDecimal: + return true + } + return false +} + +// ReverseEval evaluates the only one column value with given function result. +func (col *Column) ReverseEval(sc *stmtctx.StatementContext, res types.Datum, rType types.RoundingType) (val types.Datum, err error) { + return types.ChangeReverseResultByUpperLowerBound(sc, col.RetType, res, rType) +} diff --git a/expression/constant.go b/expression/constant.go index be33d1f17ce41..5104b564072fc 100644 --- a/expression/constant.go +++ b/expression/constant.go @@ -448,3 +448,16 @@ func (c *Constant) Vectorized() bool { } return true } + +// SupportReverseEval checks whether the builtinFunc support reverse evaluation. +func (c *Constant) SupportReverseEval() bool { + if c.DeferredExpr != nil { + return c.DeferredExpr.SupportReverseEval() + } + return true +} + +// ReverseEval evaluates the only one column value with given function result. +func (c *Constant) ReverseEval(sc *stmtctx.StatementContext, res types.Datum, rType types.RoundingType) (val types.Datum, err error) { + return c.Value, nil +} diff --git a/expression/distsql_builtin.go b/expression/distsql_builtin.go index 3e7665374e81b..7c929db4f578b 100644 --- a/expression/distsql_builtin.go +++ b/expression/distsql_builtin.go @@ -1038,6 +1038,22 @@ func newDistSQLFunctionBySig(sc *stmtctx.StatementContext, sigCode tipb.ScalarFu }, nil } +// PBToExprs converts pb structures to expressions. +func PBToExprs(pbExprs []*tipb.Expr, fieldTps []*types.FieldType, sc *stmtctx.StatementContext) ([]Expression, error) { + exprs := make([]Expression, 0, len(pbExprs)) + for _, expr := range pbExprs { + e, err := PBToExpr(expr, fieldTps, sc) + if err != nil { + return nil, errors.Trace(err) + } + if e == nil { + return nil, errors.Errorf("pb to expression failed, pb expression is %v", expr) + } + exprs = append(exprs, e) + } + return exprs, nil +} + // PBToExpr converts pb structure to expression. func PBToExpr(expr *tipb.Expr, tps []*types.FieldType, sc *stmtctx.StatementContext) (Expression, error) { switch expr.Tp { diff --git a/expression/errors.go b/expression/errors.go index 10a719d54230a..5db1acbd0b494 100644 --- a/expression/errors.go +++ b/expression/errors.go @@ -29,6 +29,7 @@ var ( ErrOperandColumns = terror.ClassExpression.New(mysql.ErrOperandColumns, mysql.MySQLErrName[mysql.ErrOperandColumns]) ErrCutValueGroupConcat = terror.ClassExpression.New(mysql.ErrCutValueGroupConcat, mysql.MySQLErrName[mysql.ErrCutValueGroupConcat]) ErrFunctionsNoopImpl = terror.ClassExpression.New(mysql.ErrNotSupportedYet, "function %s has only noop implementation in tidb now, use tidb_enable_noop_functions to enable these functions") + ErrIncorrectType = terror.ClassExpression.New(mysql.ErrIncorrectType, mysql.MySQLErrName[mysql.ErrIncorrectType]) // All the un-exported errors are defined here: errFunctionNotExists = terror.ClassExpression.New(mysql.ErrSpDoesNotExist, mysql.MySQLErrName[mysql.ErrSpDoesNotExist]) @@ -51,6 +52,7 @@ func init() { mysql.ErrWrongParamcountToNativeFct: mysql.ErrWrongParamcountToNativeFct, mysql.ErrDivisionByZero: mysql.ErrDivisionByZero, mysql.ErrSpDoesNotExist: mysql.ErrSpDoesNotExist, + mysql.ErrNotSupportedYet: mysql.ErrNotSupportedYet, mysql.ErrZlibZData: mysql.ErrZlibZData, mysql.ErrZlibZBuf: mysql.ErrZlibZBuf, mysql.ErrWrongArguments: mysql.ErrWrongArguments, @@ -66,6 +68,7 @@ func init() { mysql.ErrUnknownLocale: mysql.ErrUnknownLocale, mysql.ErrBadField: mysql.ErrBadField, mysql.ErrNonUniq: mysql.ErrNonUniq, + mysql.ErrIncorrectType: mysql.ErrIncorrectType, } terror.ErrClassToMySQLCodes[terror.ClassExpression] = expressionMySQLErrCodes } diff --git a/expression/explain.go b/expression/explain.go index 8250a260b6605..b1994a426ba57 100644 --- a/expression/explain.go +++ b/expression/explain.go @@ -25,10 +25,18 @@ import ( // ExplainInfo implements the Expression interface. func (expr *ScalarFunction) ExplainInfo() string { + return expr.explainInfo(false) +} + +func (expr *ScalarFunction) explainInfo(normalized bool) string { var buffer bytes.Buffer fmt.Fprintf(&buffer, "%s(", expr.FuncName.L) for i, arg := range expr.GetArgs() { - buffer.WriteString(arg.ExplainInfo()) + if normalized { + buffer.WriteString(arg.ExplainNormalizedInfo()) + } else { + buffer.WriteString(arg.ExplainInfo()) + } if i+1 < len(expr.GetArgs()) { buffer.WriteString(", ") } @@ -37,11 +45,21 @@ func (expr *ScalarFunction) ExplainInfo() string { return buffer.String() } +// ExplainNormalizedInfo implements the Expression interface. +func (expr *ScalarFunction) ExplainNormalizedInfo() string { + return expr.explainInfo(true) +} + // ExplainInfo implements the Expression interface. func (col *Column) ExplainInfo() string { return col.String() } +// ExplainNormalizedInfo implements the Expression interface. +func (col *Column) ExplainNormalizedInfo() string { + return col.ExplainInfo() +} + // ExplainInfo implements the Expression interface. func (expr *Constant) ExplainInfo() string { dt, err := expr.Eval(chunk.Row{}) @@ -51,6 +69,11 @@ func (expr *Constant) ExplainInfo() string { return expr.format(dt) } +// ExplainNormalizedInfo implements the Expression interface. +func (expr *Constant) ExplainNormalizedInfo() string { + return "?" +} + func (expr *Constant) format(dt types.Datum) string { switch dt.Kind() { case types.KindNull: @@ -83,10 +106,18 @@ func ExplainExpressionList(exprs []Expression, schema *Schema) string { // In some scenarios, the expr's order may not be stable when executing multiple times. // So we add a sort to make its explain result stable. func SortedExplainExpressionList(exprs []Expression) []byte { + return sortedExplainExpressionList(exprs, false) +} + +func sortedExplainExpressionList(exprs []Expression, normalized bool) []byte { buffer := bytes.NewBufferString("") exprInfos := make([]string, 0, len(exprs)) for _, expr := range exprs { - exprInfos = append(exprInfos, expr.ExplainInfo()) + if normalized { + exprInfos = append(exprInfos, expr.ExplainNormalizedInfo()) + } else { + exprInfos = append(exprInfos, expr.ExplainInfo()) + } } sort.Strings(exprInfos) for i, info := range exprInfos { @@ -98,6 +129,20 @@ func SortedExplainExpressionList(exprs []Expression) []byte { return buffer.Bytes() } +// SortedExplainNormalizedExpressionList is same like SortedExplainExpressionList, but use for generating normalized information. +func SortedExplainNormalizedExpressionList(exprs []Expression) []byte { + return sortedExplainExpressionList(exprs, true) +} + +// SortedExplainNormalizedScalarFuncList is same like SortedExplainExpressionList, but use for generating normalized information. +func SortedExplainNormalizedScalarFuncList(exprs []*ScalarFunction) []byte { + expressions := make([]Expression, len(exprs)) + for i := range exprs { + expressions[i] = exprs[i] + } + return sortedExplainExpressionList(expressions, true) +} + // ExplainColumnList generates explain information for a list of columns. func ExplainColumnList(cols []*Column) []byte { buffer := bytes.NewBufferString("") diff --git a/expression/expr_to_pb.go b/expression/expr_to_pb.go index 515b2f5f13795..87c33019d43c8 100644 --- a/expression/expr_to_pb.go +++ b/expression/expr_to_pb.go @@ -186,6 +186,18 @@ func ToPBFieldType(ft *types.FieldType) *tipb.FieldType { } } +// FieldTypeFromPB converts *tipb.FieldType to *types.FieldType. +func FieldTypeFromPB(ft *tipb.FieldType) *types.FieldType { + return &types.FieldType{ + Tp: byte(ft.Tp), + Flag: uint(ft.Flag), + Flen: int(ft.Flen), + Decimal: int(ft.Decimal), + Charset: ft.Charset, + Collate: protoToCollation(ft.Collate), + } +} + func collationToProto(c string) int32 { v, ok := mysql.CollationNames[c] if ok { @@ -194,6 +206,14 @@ func collationToProto(c string) int32 { return int32(mysql.DefaultCollationID) } +func protoToCollation(c int32) string { + v, ok := mysql.Collations[uint8(c)] + if ok { + return v + } + return mysql.DefaultCollationName +} + func (pc PbConverter) columnToPBExpr(column *Column) *tipb.Expr { if !pc.client.IsRequestTypeSupported(kv.ReqTypeSelect, int64(tipb.ExprType_ColumnRef)) { return nil diff --git a/expression/expression.go b/expression/expression.go index bfdc2741e273b..2bb31b995c9f5 100644 --- a/expression/expression.go +++ b/expression/expression.go @@ -68,11 +68,21 @@ type VecExpr interface { VecEvalJSON(ctx sessionctx.Context, input *chunk.Chunk, result *chunk.Column) error } +// ReverseExpr contains all resersed evaluation methods. +type ReverseExpr interface { + // SupportReverseEval checks whether the builtinFunc support reverse evaluation. + SupportReverseEval() bool + + // ReverseEval evaluates the only one column value with given function result. + ReverseEval(sc *stmtctx.StatementContext, res types.Datum, rType types.RoundingType) (val types.Datum, err error) +} + // Expression represents all scalar expression in SQL. type Expression interface { fmt.Stringer goJSON.Marshaler VecExpr + ReverseExpr // Eval evaluates an expression through a row. Eval(row chunk.Row) (types.Datum, error) @@ -131,6 +141,9 @@ type Expression interface { // ExplainInfo returns operator information to be explained. ExplainInfo() string + // ExplainNormalizedInfo returns operator normalized information for generating digest. + ExplainNormalizedInfo() string + // HashCode creates the hashcode for expression which can be used to identify itself from other expression. // It generated as the following: // Constant: ConstantFlag+encoded value diff --git a/expression/generator/time_vec.go b/expression/generator/time_vec.go index cd03ec9dbd8b7..80199d4e9fe14 100644 --- a/expression/generator/time_vec.go +++ b/expression/generator/time_vec.go @@ -85,7 +85,7 @@ import ( if err != nil { return err } - } + } {{ end }} {{ range . }} @@ -234,7 +234,7 @@ func (b *{{.SigName}}) vecEval{{ .Output.TypeName }}(input *chunk.Chunk, result // commit result {{ if .Output.Fixed }} - resultSlice[i] = output + resultSlice[i] = output {{ else }} result.Append{{ .Output.TypeNameInColumn }}(output) {{ end }} @@ -308,27 +308,27 @@ func (b *{{.SigName}}) vecEvalDuration(input *chunk.Chunk, result *chunk.Column) {{- template "ArgsVecEval" . }} result.MergeNulls(buf0, buf1) {{- end }} - {{- if .TypeA.Fixed }} + {{- if .TypeA.Fixed }} arg0 := buf0.{{.TypeA.TypeNameInColumn}}s() {{- end }} - {{- if .TypeB.Fixed }} + {{- if .TypeB.Fixed }} arg1 := buf1.{{.TypeB.TypeNameInColumn}}s() {{- end }} - {{- if (or $AIsDuration $BIsDuration) }} + {{- if (or $AIsDuration $BIsDuration) }} var ( lhs types.Duration rhs types.Duration ) {{- end }} - {{- if or (or $AIsString $BIsString) (and $AIsTime $BIsTime) }} + {{- if or (or $AIsString $BIsString) (and $AIsTime $BIsTime) }} stmtCtx := b.ctx.GetSessionVars().StmtCtx {{- end }} for i:=0; i")) + tk.MustQuery("select json_unquote(NULL);").Check(testkit.Rows("")) + + tk.MustQuery("select json_quote('abc');").Check(testkit.Rows(`"abc"`)) + tk.MustQuery(`select json_quote(convert('"abc"' using ascii));`).Check(testkit.Rows(`"\"abc\""`)) + tk.MustQuery(`select json_quote(convert('"abc"' using latin1));`).Check(testkit.Rows(`"\"abc\""`)) + tk.MustQuery(`select json_quote(convert('"abc"' using utf8));`).Check(testkit.Rows(`"\"abc\""`)) + tk.MustQuery(`select json_quote(convert('"abc"' using utf8mb4));`).Check(testkit.Rows(`"\"abc\""`)) + + tk.MustQuery("select json_unquote('abc');").Check(testkit.Rows("abc")) + tk.MustQuery(`select json_unquote('"abc"');`).Check(testkit.Rows("abc")) + tk.MustQuery(`select json_unquote(convert('"abc"' using ascii));`).Check(testkit.Rows("abc")) + tk.MustQuery(`select json_unquote(convert('"abc"' using latin1));`).Check(testkit.Rows("abc")) + tk.MustQuery(`select json_unquote(convert('"abc"' using utf8));`).Check(testkit.Rows("abc")) + tk.MustQuery(`select json_unquote(convert('"abc"' using utf8mb4));`).Check(testkit.Rows("abc")) + + tk.MustQuery(`select json_quote('"');`).Check(testkit.Rows(`"\""`)) + tk.MustQuery(`select json_unquote('"');`).Check(testkit.Rows(`"`)) + + tk.MustQuery(`select json_unquote('""');`).Check(testkit.Rows(``)) + tk.MustQuery(`select char_length(json_unquote('""'));`).Check(testkit.Rows(`0`)) + tk.MustQuery(`select json_unquote('"" ');`).Check(testkit.Rows(`"" `)) + tk.MustQuery(`select json_unquote(cast(json_quote('abc') as json));`).Check(testkit.Rows("abc")) + + tk.MustQuery(`select json_unquote(cast('{"abc": "foo"}' as json));`).Check(testkit.Rows(`{"abc": "foo"}`)) + tk.MustQuery(`select json_unquote(json_extract(cast('{"abc": "foo"}' as json), '$.abc'));`).Check(testkit.Rows("foo")) + tk.MustQuery(`select json_unquote('["a", "b", "c"]');`).Check(testkit.Rows(`["a", "b", "c"]`)) + tk.MustQuery(`select json_unquote(cast('["a", "b", "c"]' as json));`).Check(testkit.Rows(`["a", "b", "c"]`)) + tk.MustQuery(`select json_quote(convert(X'e68891' using utf8));`).Check(testkit.Rows(`"我"`)) + tk.MustQuery(`select json_quote(convert(X'e68891' using utf8mb4));`).Check(testkit.Rows(`"我"`)) + tk.MustQuery(`select cast(json_quote(convert(X'e68891' using utf8)) as json);`).Check(testkit.Rows(`"我"`)) + tk.MustQuery(`select json_unquote(convert(X'e68891' using utf8));`).Check(testkit.Rows("我")) + + tk.MustQuery(`select json_quote(json_quote(json_quote('abc')));`).Check(testkit.Rows(`"\"\\\"abc\\\"\""`)) + tk.MustQuery(`select json_unquote(json_unquote(json_unquote(json_quote(json_quote(json_quote('abc'))))));`).Check(testkit.Rows("abc")) + + tk.MustGetErrCode("select json_quote(123)", mysql.ErrIncorrectType) + tk.MustGetErrCode("select json_quote(-100)", mysql.ErrIncorrectType) + tk.MustGetErrCode("select json_quote(123.123)", mysql.ErrIncorrectType) + tk.MustGetErrCode("select json_quote(-100.000)", mysql.ErrIncorrectType) + tk.MustGetErrCode(`select json_quote(true);`, mysql.ErrIncorrectType) + tk.MustGetErrCode(`select json_quote(false);`, mysql.ErrIncorrectType) + tk.MustGetErrCode(`select json_quote(cast("{}" as JSON));`, mysql.ErrIncorrectType) + tk.MustGetErrCode(`select json_quote(cast("[]" as JSON));`, mysql.ErrIncorrectType) + tk.MustGetErrCode(`select json_quote(cast("2015-07-29" as date));`, mysql.ErrIncorrectType) + tk.MustGetErrCode(`select json_quote(cast("12:18:29.000000" as time));`, mysql.ErrIncorrectType) + tk.MustGetErrCode(`select json_quote(cast("2015-07-29 12:18:29.000000" as datetime));`, mysql.ErrIncorrectType) + + tk.MustGetErrCode("select json_unquote(123)", mysql.ErrIncorrectType) + tk.MustGetErrCode("select json_unquote(-100)", mysql.ErrIncorrectType) + tk.MustGetErrCode("select json_unquote(123.123)", mysql.ErrIncorrectType) + tk.MustGetErrCode("select json_unquote(-100.000)", mysql.ErrIncorrectType) + tk.MustGetErrCode(`select json_unquote(true);`, mysql.ErrIncorrectType) + tk.MustGetErrCode(`select json_unquote(false);`, mysql.ErrIncorrectType) + tk.MustGetErrCode(`select json_unquote(cast("2015-07-29" as date));`, mysql.ErrIncorrectType) + tk.MustGetErrCode(`select json_unquote(cast("12:18:29.000000" as time));`, mysql.ErrIncorrectType) + tk.MustGetErrCode(`select json_unquote(cast("2015-07-29 12:18:29.000000" as datetime));`, mysql.ErrIncorrectType) r = tk.MustQuery(`select json_extract(a, '$.a[1]'), json_extract(b, '$.b') from table_json`) r.Check(testkit.Rows("\"2\" true", " ")) diff --git a/expression/scalar_function.go b/expression/scalar_function.go index 29a5933741b03..a274215f0c88b 100755 --- a/expression/scalar_function.go +++ b/expression/scalar_function.go @@ -86,6 +86,21 @@ func (sf *ScalarFunction) Vectorized() bool { return sf.Function.vectorized() && sf.Function.isChildrenVectorized() } +// SupportReverseEval returns if this expression supports reversed evaluation. +func (sf *ScalarFunction) SupportReverseEval() bool { + switch sf.RetType.Tp { + case mysql.TypeShort, mysql.TypeLong, mysql.TypeLonglong, + mysql.TypeFloat, mysql.TypeDouble, mysql.TypeNewDecimal: + return sf.Function.supportReverseEval() && sf.Function.isChildrenReversed() + } + return false +} + +// ReverseEval evaluates the only one column value with given function result. +func (sf *ScalarFunction) ReverseEval(sc *stmtctx.StatementContext, res types.Datum, rType types.RoundingType) (val types.Datum, err error) { + return sf.Function.reverseEval(sc, res, rType) +} + // GetCtx gets the context of function. func (sf *ScalarFunction) GetCtx() sessionctx.Context { return sf.Function.getCtx() diff --git a/expression/util_test.go b/expression/util_test.go index ec71299ec451f..4abda426a4414 100644 --- a/expression/util_test.go +++ b/expression/util_test.go @@ -501,6 +501,9 @@ func (m *MockExpr) EvalJSON(ctx sessionctx.Context, row chunk.Row) (val json.Bin } return json.BinaryJSON{}, m.i == nil, m.err } +func (m *MockExpr) ReverseEval(sc *stmtctx.StatementContext, res types.Datum, rType types.RoundingType) (val types.Datum, err error) { + return types.Datum{}, m.err +} func (m *MockExpr) GetType() *types.FieldType { return m.t } func (m *MockExpr) Clone() Expression { return nil } func (m *MockExpr) Equal(ctx sessionctx.Context, e Expression) bool { return false } @@ -510,5 +513,7 @@ func (m *MockExpr) Decorrelate(schema *Schema) Expression { return m func (m *MockExpr) ResolveIndices(schema *Schema) (Expression, error) { return m, nil } func (m *MockExpr) resolveIndices(schema *Schema) error { return nil } func (m *MockExpr) ExplainInfo() string { return "" } +func (m *MockExpr) ExplainNormalizedInfo() string { return "" } func (m *MockExpr) HashCode(sc *stmtctx.StatementContext) []byte { return nil } func (m *MockExpr) Vectorized() bool { return false } +func (m *MockExpr) SupportReverseEval() bool { return false } diff --git a/go.mod b/go.mod index 58ab75dc12000..a55aec22a7686 100644 --- a/go.mod +++ b/go.mod @@ -34,13 +34,13 @@ require ( github.com/pingcap/failpoint v0.0.0-20190512135322-30cc7431d99c github.com/pingcap/fn v0.0.0-20191016082858-07623b84a47d github.com/pingcap/goleveldb v0.0.0-20171020122428-b9ff6c35079e - github.com/pingcap/kvproto v0.0.0-20191121022655-4c654046831d + github.com/pingcap/kvproto v0.0.0-20191202044712-32be31591b03 github.com/pingcap/log v0.0.0-20191012051959-b742a5d432e9 - github.com/pingcap/parser v0.0.0-20191120165920-d5c49d11cc64 + github.com/pingcap/parser v0.0.0-20191204131342-259c92691fa4 github.com/pingcap/pd v1.1.0-beta.0.20190923032047-5c648dc365e0 github.com/pingcap/sysutil v0.0.0-20191126040022-986c5b3ed9a3 github.com/pingcap/tidb-tools v3.0.6-0.20191106033616-90632dda3863+incompatible - github.com/pingcap/tipb v0.0.0-20191127084114-0820b784842f + github.com/pingcap/tipb v0.0.0-20191203131953-a35f738b4796 github.com/prometheus/client_golang v1.0.0 github.com/prometheus/client_model v0.0.0-20190812154241-14fe0d1b01d4 github.com/remyoudompheng/bigfft v0.0.0-20190512091148-babf20351dd7 // indirect @@ -60,9 +60,9 @@ require ( go.uber.org/atomic v1.5.0 go.uber.org/automaxprocs v1.2.0 go.uber.org/zap v1.12.0 - golang.org/x/crypto v0.0.0-20191029031824-8986dd9e96cf // indirect + golang.org/x/crypto v0.0.0-20191122220453-ac88ee75c92c // indirect golang.org/x/net v0.0.0-20190909003024-a7b16738d86b - golang.org/x/sys v0.0.0-20191029155521-f43be2a4598c + golang.org/x/sys v0.0.0-20191128015809-6d18c012aee9 golang.org/x/text v0.3.2 golang.org/x/time v0.0.0-20190308202827-9d24e82272b4 // indirect golang.org/x/tools v0.0.0-20191107010934-f79515f33823 diff --git a/go.sum b/go.sum index 0cb5c7a16dba9..c9ce17b96fc53 100644 --- a/go.sum +++ b/go.sum @@ -181,21 +181,22 @@ github.com/pingcap/fn v0.0.0-20191016082858-07623b84a47d/go.mod h1:fMRU1BA1y+r89 github.com/pingcap/goleveldb v0.0.0-20171020122428-b9ff6c35079e h1:P73/4dPCL96rGrobssy1nVy2VaVpNCuLpCbr+FEaTA8= github.com/pingcap/goleveldb v0.0.0-20171020122428-b9ff6c35079e/go.mod h1:O17XtbryoCJhkKGbT62+L2OlrniwqiGLSqrmdHCMzZw= github.com/pingcap/kvproto v0.0.0-20190822090350-11ea838aedf7/go.mod h1:QMdbTAXCHzzygQzqcG9uVUgU2fKeSN1GmfMiykdSzzY= -github.com/pingcap/kvproto v0.0.0-20191121022655-4c654046831d h1:aH7ZFzWEyBgUtG/YlLOU7pIx++PqtXlRT7zpHcEf2Rg= github.com/pingcap/kvproto v0.0.0-20191121022655-4c654046831d/go.mod h1:WWLmULLO7l8IOcQG+t+ItJ3fEcrL5FxF0Wu+HrMy26w= +github.com/pingcap/kvproto v0.0.0-20191202044712-32be31591b03 h1:IyJl+qesVPf3UfFFmKtX69y1K5KC8uXlot3U0QgH7V4= +github.com/pingcap/kvproto v0.0.0-20191202044712-32be31591b03/go.mod h1:WWLmULLO7l8IOcQG+t+ItJ3fEcrL5FxF0Wu+HrMy26w= github.com/pingcap/log v0.0.0-20190715063458-479153f07ebd/go.mod h1:WpHUKhNZ18v116SvGrmjkA9CBhYmuUTKL+p8JC9ANEw= github.com/pingcap/log v0.0.0-20191012051959-b742a5d432e9 h1:AJD9pZYm72vMgPcQDww9rkZ1DnWfl0pXV3BOWlkYIjA= github.com/pingcap/log v0.0.0-20191012051959-b742a5d432e9/go.mod h1:4rbK1p9ILyIfb6hU7OG2CiWSqMXnp3JMbiaVJ6mvoY8= -github.com/pingcap/parser v0.0.0-20191120165920-d5c49d11cc64 h1:jpLGhi9hEp6Px9NDKkSjpcWuBdkgSCTxGMlrw9bWipQ= -github.com/pingcap/parser v0.0.0-20191120165920-d5c49d11cc64/go.mod h1:1FNvfp9+J0wvc4kl8eGNh7Rqrxveg15jJoWo/a0uHwA= +github.com/pingcap/parser v0.0.0-20191204131342-259c92691fa4 h1:RdccMHB2TG4FexODGlLPSZe/H+MgHJVVptPMJP+9AcA= +github.com/pingcap/parser v0.0.0-20191204131342-259c92691fa4/go.mod h1:1FNvfp9+J0wvc4kl8eGNh7Rqrxveg15jJoWo/a0uHwA= github.com/pingcap/pd v1.1.0-beta.0.20190923032047-5c648dc365e0 h1:GIEq+wZfrl2bcJxpuSrEH4H7/nlf5YdmpS+dU9lNIt8= github.com/pingcap/pd v1.1.0-beta.0.20190923032047-5c648dc365e0/go.mod h1:G/6rJpnYwM0LKMec2rI82/5Kg6GaZMvlfB+e6/tvYmI= github.com/pingcap/sysutil v0.0.0-20191126040022-986c5b3ed9a3 h1:HCNif3lukL83gNC2EBAoh2Qbz36+2p0bm0LjgnNfl1s= github.com/pingcap/sysutil v0.0.0-20191126040022-986c5b3ed9a3/go.mod h1:Futrrmuw98pEsbEmoPsjw8aKLCmixwHEmT2rF+AsXGw= github.com/pingcap/tidb-tools v3.0.6-0.20191106033616-90632dda3863+incompatible h1:H1jg0aDWz2SLRh3hNBo2HFtnuHtudIUvBumU7syRkic= github.com/pingcap/tidb-tools v3.0.6-0.20191106033616-90632dda3863+incompatible/go.mod h1:XGdcy9+yqlDSEMTpOXnwf3hiTeqrV6MN/u1se9N8yIM= -github.com/pingcap/tipb v0.0.0-20191127084114-0820b784842f h1:ywyH6JKJIf+gEYg2kRlU8SaPMryHgu5SoXKtPdkeZPU= -github.com/pingcap/tipb v0.0.0-20191127084114-0820b784842f/go.mod h1:RtkHW8WbcNxj8lsbzjaILci01CtYnYbIkQhjyZWrWVI= +github.com/pingcap/tipb v0.0.0-20191203131953-a35f738b4796 h1:VNxsATjjGSXYbLXYdwJMj4ah5oxkMbKtOg/kaoXUX64= +github.com/pingcap/tipb v0.0.0-20191203131953-a35f738b4796/go.mod h1:RtkHW8WbcNxj8lsbzjaILci01CtYnYbIkQhjyZWrWVI= github.com/pkg/errors v0.8.0/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0= github.com/pkg/errors v0.8.1 h1:iURUrRGxPUNPdy5/HRSm+Yj6okJ6UtLINN0Q9M4+h3I= github.com/pkg/errors v0.8.1/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0= @@ -300,8 +301,8 @@ golang.org/x/crypto v0.0.0-20180904163835-0709b304e793/go.mod h1:6SG95UA2DQfeDnf golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w= golang.org/x/crypto v0.0.0-20190510104115-cbcb75029529/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= golang.org/x/crypto v0.0.0-20190909091759-094676da4a83/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= -golang.org/x/crypto v0.0.0-20191029031824-8986dd9e96cf h1:fnPsqIDRbCSgumaMCRpoIoF2s4qxv0xSSS0BVZUE/ss= -golang.org/x/crypto v0.0.0-20191029031824-8986dd9e96cf/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto= +golang.org/x/crypto v0.0.0-20191122220453-ac88ee75c92c h1:/nJuwDLoL/zrqY6gf57vxC+Pi+pZ8bfhpPkicO5H7W4= +golang.org/x/crypto v0.0.0-20191122220453-ac88ee75c92c/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto= golang.org/x/exp v0.0.0-20190121172915-509febef88a4/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA= golang.org/x/lint v0.0.0-20181026193005-c67002cb31c3/go.mod h1:UVdnD1Gm6xHRNCYTkRU2/jEulfH38KcIWyp/GAMgvoE= golang.org/x/lint v0.0.0-20190227174305-5b3e6a55c961/go.mod h1:wehouNa3lNwaWXcvxsM5YxQ5yQlVC4a0KAMCusXpPoU= @@ -336,8 +337,8 @@ golang.org/x/sys v0.0.0-20190215142949-d0b11bdaac8a/go.mod h1:STP8DvDyc/dI5b8T5h golang.org/x/sys v0.0.0-20190412213103-97732733099d/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20190826190057-c7b8b68b1456/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20190909082730-f460065e899a/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20191029155521-f43be2a4598c h1:S/FtSvpNLtFBgjTqcKsRpsa6aVsI6iztaz1bQd9BJwE= -golang.org/x/sys v0.0.0-20191029155521-f43be2a4598c/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20191128015809-6d18c012aee9 h1:ZBzSG/7F4eNKz2L3GE9o300RX0Az1Bw5HF7PDraD+qU= +golang.org/x/sys v0.0.0-20191128015809-6d18c012aee9/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= golang.org/x/text v0.3.2 h1:tW2bmiBqwgJj/UpqtC8EpXEZVYOwU0yG4iWbprSVAcs= golang.org/x/text v0.3.2/go.mod h1:bEr9sfX3Q8Zfm5fL9x+3itogRgK3+ptLWKqgva+5dAk= diff --git a/hooks/pre-commit b/hooks/pre-commit index 8bc330c9af495..c7d8d2536c360 100755 --- a/hooks/pre-commit +++ b/hooks/pre-commit @@ -1,4 +1,4 @@ -#!/bin/bash +#!/usr/bin/env bash # This file modified from k8s # https://github.com/kubernetes/kubernetes/blob/master/hooks/pre-commit # Now It's removed, The Reason is https://github.com/kubernetes/community/issues/729 diff --git a/infoschema/builder.go b/infoschema/builder.go index f089ceff730cb..0075696fa3c2a 100644 --- a/infoschema/builder.go +++ b/infoschema/builder.go @@ -19,6 +19,7 @@ import ( "strings" "github.com/pingcap/errors" + "github.com/pingcap/failpoint" "github.com/pingcap/parser/charset" "github.com/pingcap/parser/model" "github.com/pingcap/tidb/config" @@ -26,6 +27,7 @@ import ( "github.com/pingcap/tidb/meta/autoid" "github.com/pingcap/tidb/table" "github.com/pingcap/tidb/table/tables" + "github.com/pingcap/tidb/util/domainutil" ) // Builder builds a new InfoSchema. @@ -46,7 +48,6 @@ func (b *Builder) ApplyDiff(m *meta.Meta, diff *model.SchemaDiff) ([]int64, erro } else if diff.Type == model.ActionModifySchemaCharsetAndCollate { return nil, b.applyModifySchemaCharsetAndCollate(m, diff) } - roDBInfo, ok := b.is.SchemaByID(diff.SchemaID) if !ok { return nil, ErrDatabaseNotExists.GenWithStackByArgs( @@ -56,7 +57,7 @@ func (b *Builder) ApplyDiff(m *meta.Meta, diff *model.SchemaDiff) ([]int64, erro var oldTableID, newTableID int64 tblIDs := make([]int64, 0, 2) switch diff.Type { - case model.ActionCreateTable, model.ActionRecoverTable: + case model.ActionCreateTable, model.ActionRecoverTable, model.ActionRepairTable: newTableID = diff.TableID tblIDs = append(tblIDs, newTableID) case model.ActionDropTable, model.ActionDropView: @@ -95,7 +96,7 @@ func (b *Builder) ApplyDiff(m *meta.Meta, diff *model.SchemaDiff) ([]int64, erro } if tableIDIsValid(newTableID) { // All types except DropTableOrView. - err := b.applyCreateTable(m, dbInfo, newTableID, alloc) + err := b.applyCreateTable(m, dbInfo, newTableID, alloc, diff.Type) if err != nil { return nil, errors.Trace(err) } @@ -179,7 +180,7 @@ func (b *Builder) copySortedTablesBucket(bucketIdx int) { b.is.sortedTablesBuckets[bucketIdx] = newSortedTables } -func (b *Builder) applyCreateTable(m *meta.Meta, dbInfo *model.DBInfo, tableID int64, alloc autoid.Allocator) error { +func (b *Builder) applyCreateTable(m *meta.Meta, dbInfo *model.DBInfo, tableID int64, alloc autoid.Allocator, tp model.ActionType) error { tblInfo, err := m.GetTable(dbInfo.ID, tableID) if err != nil { return errors.Trace(err) @@ -192,6 +193,16 @@ func (b *Builder) applyCreateTable(m *meta.Meta, dbInfo *model.DBInfo, tableID i fmt.Sprintf("(Table ID %d)", tableID), ) } + // Failpoint check whether tableInfo should be added to repairInfo. + // Typically used in repair table test to load mock `bad` tableInfo into repairInfo. + failpoint.Inject("repairFetchCreateTable", func(val failpoint.Value) { + if val.(bool) { + if domainutil.RepairInfo.InRepairMode() && tp != model.ActionRepairTable && domainutil.RepairInfo.CheckAndFetchRepairedTable(dbInfo, tblInfo) { + failpoint.Return(nil) + } + } + }) + ConvertCharsetCollateToLowerCaseIfNeed(tblInfo) ConvertOldVersionUTF8ToUTF8MB4IfNeed(tblInfo) diff --git a/infoschema/cluster.go b/infoschema/cluster.go new file mode 100644 index 0000000000000..98ca62fcacd5c --- /dev/null +++ b/infoschema/cluster.go @@ -0,0 +1,98 @@ +// Copyright 2019 PingCAP, Inc. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// See the License for the specific language governing permissions and +// limitations under the License. + +package infoschema + +import ( + "strconv" + "strings" + + "github.com/pingcap/errors" + "github.com/pingcap/parser/mysql" + "github.com/pingcap/tidb/domain/infosync" + "github.com/pingcap/tidb/sessionctx" + "github.com/pingcap/tidb/types" + "github.com/pingcap/tidb/util" +) + +// Cluster table list, attention: +// 1. the table name should be upper case. +// 2. clusterTableName should equal to "CLUSTER_" + memTableTableName. +const ( + clusterTableSlowLog = "CLUSTER_SLOW_QUERY" + clusterTableProcesslist = "CLUSTER_PROCESSLIST" +) + +// memTableToClusterTables means add memory table to cluster table. +var memTableToClusterTables = map[string]string{ + tableSlowLog: clusterTableSlowLog, + tableProcesslist: clusterTableProcesslist, +} + +func init() { + var addrCol = columnInfo{"ADDRESS", mysql.TypeVarchar, 64, 0, nil, nil} + for memTableName, clusterMemTableName := range memTableToClusterTables { + memTableCols := tableNameToColumns[memTableName] + if len(memTableCols) == 0 { + continue + } + cols := make([]columnInfo, 0, len(memTableCols)+1) + cols = append(cols, memTableCols...) + cols = append(cols, addrCol) + tableNameToColumns[clusterMemTableName] = cols + } +} + +// isClusterTableByName used to check whether the table is a cluster memory table. +func isClusterTableByName(dbName, tableName string) bool { + dbName = strings.ToUpper(dbName) + switch dbName { + case util.InformationSchemaName, util.PerformanceSchemaName: + break + default: + return false + } + tableName = strings.ToUpper(tableName) + for _, name := range memTableToClusterTables { + name = strings.ToUpper(name) + if name == tableName { + return true + } + } + return false +} + +func getClusterMemTableRows(ctx sessionctx.Context, tableName string) (rows [][]types.Datum, err error) { + tableName = strings.ToUpper(tableName) + switch tableName { + case clusterTableSlowLog: + rows, err = dataForSlowLog(ctx) + case clusterTableProcesslist: + rows = dataForProcesslist(ctx) + default: + err = errors.Errorf("unknown cluster table: %v", tableName) + } + if err != nil { + return nil, err + } + return appendHostInfoToRows(rows), nil +} + +func appendHostInfoToRows(rows [][]types.Datum) [][]types.Datum { + serverInfo := infosync.GetServerInfo() + addr := serverInfo.IP + ":" + strconv.FormatUint(uint64(serverInfo.StatusPort), 10) + for i := range rows { + rows[i] = append(rows[i], types.NewStringDatum(addr)) + } + return rows +} diff --git a/infoschema/infoschema.go b/infoschema/infoschema.go index c51c1507113ce..80392ac99026a 100644 --- a/infoschema/infoschema.go +++ b/infoschema/infoschema.go @@ -14,6 +14,7 @@ package infoschema import ( + "fmt" "sort" "sync/atomic" @@ -22,8 +23,11 @@ import ( "github.com/pingcap/parser/terror" "github.com/pingcap/tidb/kv" "github.com/pingcap/tidb/meta/autoid" + "github.com/pingcap/tidb/sessionctx" "github.com/pingcap/tidb/table" "github.com/pingcap/tidb/util" + "github.com/pingcap/tidb/util/logutil" + "go.uber.org/zap" ) var ( @@ -348,14 +352,18 @@ func init() { terror.ErrClassToMySQLCodes[terror.ClassSchema] = schemaMySQLErrCodes // Initialize the information shema database and register the driver to `drivers` - dbID := autoid.GenLocalSchemaID() + dbID := autoid.InformationSchemaDBID infoSchemaTables := make([]*model.TableInfo, 0, len(tableNameToColumns)) for name, cols := range tableNameToColumns { tableInfo := buildTableMeta(name, cols) infoSchemaTables = append(infoSchemaTables, tableInfo) - tableInfo.ID = autoid.GenLocalSchemaID() - for _, c := range tableInfo.Columns { - c.ID = autoid.GenLocalSchemaID() + var ok bool + tableInfo.ID, ok = tableIDMap[tableInfo.Name.O] + if !ok { + panic(fmt.Sprintf("get information_schema table id failed, unknown system table `%v`", tableInfo.Name.O)) + } + for i, c := range tableInfo.Columns { + c.ID = int64(i) + 1 } } infoSchemaDB := &model.DBInfo{ @@ -387,3 +395,17 @@ func HasAutoIncrementColumn(tbInfo *model.TableInfo) (bool, string) { } return false, "" } + +// GetInfoSchema gets TxnCtx InfoSchema if snapshot schema is not set, +// Otherwise, snapshot schema is returned. +func GetInfoSchema(ctx sessionctx.Context) InfoSchema { + sessVar := ctx.GetSessionVars() + var is InfoSchema + if snap := sessVar.SnapshotInfoschema; snap != nil { + is = snap.(InfoSchema) + logutil.BgLogger().Info("use snapshot schema", zap.Uint64("conn", sessVar.ConnectionID), zap.Int64("schemaVersion", is.SchemaMetaVersion())) + } else { + is = sessVar.TxnCtx.InfoSchema.(InfoSchema) + } + return is +} diff --git a/infoschema/perfschema/const.go b/infoschema/perfschema/const.go index 6f8809ef1ba5c..532892c5fcdac 100644 --- a/infoschema/perfschema/const.go +++ b/infoschema/perfschema/const.go @@ -43,24 +43,30 @@ var perfSchemaTables = []string{ tableTiDBProfileCPU, tableTiDBProfileMemory, tableTiDBProfileMutex, - tableTiDBAllocsProfile, + tableTiDBProfileAllocs, tableTiDBProfileBlock, tableTiDBProfileGoroutines, tableTiKVProfileCPU, + tablePDProfileCPU, + tablePDProfileMemory, + tablePDProfileMutex, + tablePDProfileAllocs, + tablePDProfileBlock, + tablePDProfileGoroutines, } // tableGlobalStatus contains the column name definitions for table global_status, same as MySQL. -const tableGlobalStatus = "CREATE TABLE performance_schema.global_status(" + +const tableGlobalStatus = "CREATE TABLE performance_schema." + tableNameGlobalStatus + " (" + "VARIABLE_NAME VARCHAR(64) not null," + "VARIABLE_VALUE VARCHAR(1024));" // tableSessionStatus contains the column name definitions for table session_status, same as MySQL. -const tableSessionStatus = "CREATE TABLE performance_schema.session_status(" + +const tableSessionStatus = "CREATE TABLE performance_schema." + tableNameSessionStatus + " (" + "VARIABLE_NAME VARCHAR(64) not null," + "VARIABLE_VALUE VARCHAR(1024));" // tableSetupActors contains the column name definitions for table setup_actors, same as MySQL. -const tableSetupActors = "CREATE TABLE if not exists performance_schema.setup_actors (" + +const tableSetupActors = "CREATE TABLE if not exists performance_schema." + tableNameSetupActors + " (" + "HOST CHAR(60) NOT NULL DEFAULT '%'," + "USER CHAR(32) NOT NULL DEFAULT '%'," + "ROLE CHAR(16) NOT NULL DEFAULT '%'," + @@ -68,7 +74,7 @@ const tableSetupActors = "CREATE TABLE if not exists performance_schema.setup_ac "HISTORY ENUM('YES','NO') NOT NULL DEFAULT 'YES');" // tableSetupObjects contains the column name definitions for table setup_objects, same as MySQL. -const tableSetupObjects = "CREATE TABLE if not exists performance_schema.setup_objects (" + +const tableSetupObjects = "CREATE TABLE if not exists performance_schema." + tableNameSetupObjects + " (" + "OBJECT_TYPE ENUM('EVENT','FUNCTION','TABLE') NOT NULL DEFAULT 'TABLE'," + "OBJECT_SCHEMA VARCHAR(64) DEFAULT '%'," + "OBJECT_NAME VARCHAR(64) NOT NULL DEFAULT '%'," + @@ -76,18 +82,18 @@ const tableSetupObjects = "CREATE TABLE if not exists performance_schema.setup_o "TIMED ENUM('YES','NO') NOT NULL DEFAULT 'YES');" // tableSetupInstruments contains the column name definitions for table setup_instruments, same as MySQL. -const tableSetupInstruments = "CREATE TABLE if not exists performance_schema.setup_instruments (" + +const tableSetupInstruments = "CREATE TABLE if not exists performance_schema." + tableNameSetupInstruments + " (" + "NAME VARCHAR(128) NOT NULL," + "ENABLED ENUM('YES','NO') NOT NULL," + "TIMED ENUM('YES','NO') NOT NULL);" // tableSetupConsumers contains the column name definitions for table setup_consumers, same as MySQL. -const tableSetupConsumers = "CREATE TABLE if not exists performance_schema.setup_consumers (" + +const tableSetupConsumers = "CREATE TABLE if not exists performance_schema." + tableNameSetupConsumers + " (" + "NAME VARCHAR(64) NOT NULL," + "ENABLED ENUM('YES','NO') NOT NULL);" // tableStmtsCurrent contains the column name definitions for table events_statements_current, same as MySQL. -const tableStmtsCurrent = "CREATE TABLE if not exists performance_schema.events_statements_current (" + +const tableStmtsCurrent = "CREATE TABLE if not exists performance_schema." + tableNameEventsStatementsCurrent + " (" + "THREAD_ID BIGINT(20) UNSIGNED NOT NULL," + "EVENT_ID BIGINT(20) UNSIGNED NOT NULL," + "END_EVENT_ID BIGINT(20) UNSIGNED," + @@ -131,7 +137,7 @@ const tableStmtsCurrent = "CREATE TABLE if not exists performance_schema.events_ "NESTING_EVENT_LEVEL INT(11));" // tableStmtsHistory contains the column name definitions for table events_statements_history, same as MySQL. -const tableStmtsHistory = "CREATE TABLE if not exists performance_schema.events_statements_history (" + +const tableStmtsHistory = "CREATE TABLE if not exists performance_schema." + tableNameEventsStatementsHistory + " (" + "THREAD_ID BIGINT(20) UNSIGNED NOT NULL," + "EVENT_ID BIGINT(20) UNSIGNED NOT NULL," + "END_EVENT_ID BIGINT(20) UNSIGNED," + @@ -175,7 +181,7 @@ const tableStmtsHistory = "CREATE TABLE if not exists performance_schema.events_ "NESTING_EVENT_LEVEL INT(11));" // tableStmtsHistoryLong contains the column name definitions for table events_statements_history_long, same as MySQL. -const tableStmtsHistoryLong = "CREATE TABLE if not exists performance_schema.events_statements_history_long (" + +const tableStmtsHistoryLong = "CREATE TABLE if not exists performance_schema." + tableNameEventsStatementsHistoryLong + " (" + "THREAD_ID BIGINT(20) UNSIGNED NOT NULL," + "EVENT_ID BIGINT(20) UNSIGNED NOT NULL," + "END_EVENT_ID BIGINT(20) UNSIGNED," + @@ -219,7 +225,7 @@ const tableStmtsHistoryLong = "CREATE TABLE if not exists performance_schema.eve "NESTING_EVENT_LEVEL INT(11));" // tablePreparedStmtsInstances contains the column name definitions for table prepared_statements_instances, same as MySQL. -const tablePreparedStmtsInstances = "CREATE TABLE if not exists performance_schema.prepared_statements_instances (" + +const tablePreparedStmtsInstances = "CREATE TABLE if not exists performance_schema." + tableNamePreparedStatementsInstances + " (" + "OBJECT_INSTANCE_BEGIN BIGINT(20) UNSIGNED NOT NULL," + "STATEMENT_ID BIGINT(20) UNSIGNED NOT NULL," + "STATEMENT_NAME VARCHAR(64)," + @@ -257,7 +263,7 @@ const tablePreparedStmtsInstances = "CREATE TABLE if not exists performance_sche "SUM_NO_GOOD_INDEX_USED BIGINT(20) UNSIGNED NOT NULL);" // tableTransCurrent contains the column name definitions for table events_transactions_current, same as MySQL. -const tableTransCurrent = "CREATE TABLE if not exists performance_schema.events_transactions_current (" + +const tableTransCurrent = "CREATE TABLE if not exists performance_schema." + tableNameEventsTransactionsCurrent + " (" + "THREAD_ID BIGINT(20) UNSIGNED NOT NULL," + "EVENT_ID BIGINT(20) UNSIGNED NOT NULL," + "END_EVENT_ID BIGINT(20) UNSIGNED," + @@ -285,7 +291,7 @@ const tableTransCurrent = "CREATE TABLE if not exists performance_schema.events_ // tableTransHistory contains the column name definitions for table events_transactions_history, same as MySQL. // -const tableTransHistory = "CREATE TABLE if not exists performance_schema.events_transactions_history (" + +const tableTransHistory = "CREATE TABLE if not exists performance_schema." + tableNameEventsTransactionsHistory + " (" + "THREAD_ID BIGINT(20) UNSIGNED NOT NULL," + "EVENT_ID BIGINT(20) UNSIGNED NOT NULL," + "END_EVENT_ID BIGINT(20) UNSIGNED," + @@ -312,7 +318,7 @@ const tableTransHistory = "CREATE TABLE if not exists performance_schema.events_ "NESTING_EVENT_TYPE ENUM('TRANSACTION','STATEMENT','STAGE'));" // tableTransHistoryLong contains the column name definitions for table events_transactions_history_long, same as MySQL. -const tableTransHistoryLong = "CREATE TABLE if not exists performance_schema.events_transactions_history_long (" + +const tableTransHistoryLong = "CREATE TABLE if not exists performance_schema." + tableNameEventsTransactionsHistoryLong + " (" + "THREAD_ID BIGINT(20) UNSIGNED NOT NULL," + "EVENT_ID BIGINT(20) UNSIGNED NOT NULL," + "END_EVENT_ID BIGINT(20) UNSIGNED," + @@ -339,7 +345,7 @@ const tableTransHistoryLong = "CREATE TABLE if not exists performance_schema.eve "NESTING_EVENT_TYPE ENUM('TRANSACTION','STATEMENT','STAGE'));" // tableStagesCurrent contains the column name definitions for table events_stages_current, same as MySQL. -const tableStagesCurrent = "CREATE TABLE if not exists performance_schema.events_stages_current (" + +const tableStagesCurrent = "CREATE TABLE if not exists performance_schema." + tableNameEventsStagesCurrent + " (" + "THREAD_ID BIGINT(20) UNSIGNED NOT NULL," + "EVENT_ID BIGINT(20) UNSIGNED NOT NULL," + "END_EVENT_ID BIGINT(20) UNSIGNED," + @@ -354,7 +360,7 @@ const tableStagesCurrent = "CREATE TABLE if not exists performance_schema.events "NESTING_EVENT_TYPE ENUM('TRANSACTION','STATEMENT','STAGE'));" // tableStagesHistory contains the column name definitions for table events_stages_history, same as MySQL. -const tableStagesHistory = "CREATE TABLE if not exists performance_schema.events_stages_history (" + +const tableStagesHistory = "CREATE TABLE if not exists performance_schema." + tableNameEventsStagesHistory + " (" + "THREAD_ID BIGINT(20) UNSIGNED NOT NULL," + "EVENT_ID BIGINT(20) UNSIGNED NOT NULL," + "END_EVENT_ID BIGINT(20) UNSIGNED," + @@ -369,7 +375,7 @@ const tableStagesHistory = "CREATE TABLE if not exists performance_schema.events "NESTING_EVENT_TYPE ENUM('TRANSACTION','STATEMENT','STAGE'));" // tableStagesHistoryLong contains the column name definitions for table events_stages_history_long, same as MySQL. -const tableStagesHistoryLong = "CREATE TABLE if not exists performance_schema.events_stages_history_long (" + +const tableStagesHistoryLong = "CREATE TABLE if not exists performance_schema." + tableNameEventsStagesHistoryLong + " (" + "THREAD_ID BIGINT(20) UNSIGNED NOT NULL," + "EVENT_ID BIGINT(20) UNSIGNED NOT NULL," + "END_EVENT_ID BIGINT(20) UNSIGNED," + @@ -385,7 +391,7 @@ const tableStagesHistoryLong = "CREATE TABLE if not exists performance_schema.ev // tableEventsStatementsSummaryByDigest contains the column name definitions for table // events_statements_summary_by_digest, same as MySQL. -const tableEventsStatementsSummaryByDigest = "CREATE TABLE if not exists events_statements_summary_by_digest (" + +const tableEventsStatementsSummaryByDigest = "CREATE TABLE if not exists " + tableNameEventsStatementsSummaryByDigest + " (" + "SUMMARY_BEGIN_TIME TIMESTAMP(6) NOT NULL," + "STMT_TYPE VARCHAR(64) NOT NULL," + "SCHEMA_NAME VARCHAR(64) DEFAULT NULL," + @@ -475,8 +481,8 @@ const tableTiDBProfileMutex = "CREATE TABLE IF NOT EXISTS " + tableNameTiDBProfi "DEPTH INT(8) NOT NULL," + "FILE VARCHAR(512) NOT NULL);" -// tableTiDBAllocsProfile contains the columns name definitions for table tidb_profile_allocs -const tableTiDBAllocsProfile = "CREATE TABLE IF NOT EXISTS " + tableNameTiDBProfileAllocs + " (" + +// tableTiDBProfileAllocs contains the columns name definitions for table tidb_profile_allocs +const tableTiDBProfileAllocs = "CREATE TABLE IF NOT EXISTS " + tableNameTiDBProfileAllocs + " (" + "FUNCTION VARCHAR(512) NOT NULL," + "PERCENT_ABS VARCHAR(8) NOT NULL," + "PERCENT_REL VARCHAR(8) NOT NULL," + @@ -498,9 +504,9 @@ const tableTiDBProfileGoroutines = "CREATE TABLE IF NOT EXISTS " + tableNameTiDB "FUNCTION VARCHAR(512) NOT NULL," + "ID INT(8) NOT NULL," + "STATE VARCHAR(16) NOT NULL," + - "LOCATION VARCHAR(512));" + "LOCATION VARCHAR(512) NOT NULL);" - // tableTiKVProfileCPU contains the columns name definitions for table tikv_profile_cpu +// tableTiKVProfileCPU contains the columns name definitions for table tikv_profile_cpu const tableTiKVProfileCPU = "CREATE TABLE IF NOT EXISTS " + tableNameTiKVProfileCPU + " (" + "ADDRESS VARCHAR(64) NOT NULL," + "FUNCTION VARCHAR(512) NOT NULL," + @@ -509,3 +515,61 @@ const tableTiKVProfileCPU = "CREATE TABLE IF NOT EXISTS " + tableNameTiKVProfile "ROOT_CHILD INT(8) NOT NULL," + "DEPTH INT(8) NOT NULL," + "FILE VARCHAR(512) NOT NULL);" + +// tablePDProfileCPU contains the columns name definitions for table pd_profile_cpu +const tablePDProfileCPU = "CREATE TABLE IF NOT EXISTS " + tableNamePDProfileCPU + " (" + + "ADDRESS VARCHAR(64) NOT NULL," + + "FUNCTION VARCHAR(512) NOT NULL," + + "PERCENT_ABS VARCHAR(8) NOT NULL," + + "PERCENT_REL VARCHAR(8) NOT NULL," + + "ROOT_CHILD INT(8) NOT NULL," + + "DEPTH INT(8) NOT NULL," + + "FILE VARCHAR(512) NOT NULL);" + +// tablePDProfileMemory contains the columns name definitions for table pd_profile_cpu_memory +const tablePDProfileMemory = "CREATE TABLE IF NOT EXISTS " + tableNamePDProfileMemory + " (" + + "ADDRESS VARCHAR(64) NOT NULL," + + "FUNCTION VARCHAR(512) NOT NULL," + + "PERCENT_ABS VARCHAR(8) NOT NULL," + + "PERCENT_REL VARCHAR(8) NOT NULL," + + "ROOT_CHILD INT(8) NOT NULL," + + "DEPTH INT(8) NOT NULL," + + "FILE VARCHAR(512) NOT NULL);" + +// tablePDProfileMutex contains the columns name definitions for table pd_profile_mutex +const tablePDProfileMutex = "CREATE TABLE IF NOT EXISTS " + tableNamePDProfileMutex + " (" + + "ADDRESS VARCHAR(64) NOT NULL," + + "FUNCTION VARCHAR(512) NOT NULL," + + "PERCENT_ABS VARCHAR(8) NOT NULL," + + "PERCENT_REL VARCHAR(8) NOT NULL," + + "ROOT_CHILD INT(8) NOT NULL," + + "DEPTH INT(8) NOT NULL," + + "FILE VARCHAR(512) NOT NULL);" + +// tablePDProfileAllocs contains the columns name definitions for table pd_profile_allocs +const tablePDProfileAllocs = "CREATE TABLE IF NOT EXISTS " + tableNamePDProfileAllocs + " (" + + "ADDRESS VARCHAR(64) NOT NULL," + + "FUNCTION VARCHAR(512) NOT NULL," + + "PERCENT_ABS VARCHAR(8) NOT NULL," + + "PERCENT_REL VARCHAR(8) NOT NULL," + + "ROOT_CHILD INT(8) NOT NULL," + + "DEPTH INT(8) NOT NULL," + + "FILE VARCHAR(512) NOT NULL);" + +// tablePDProfileBlock contains the columns name definitions for table pd_profile_block +const tablePDProfileBlock = "CREATE TABLE IF NOT EXISTS " + tableNamePDProfileBlock + " (" + + "ADDRESS VARCHAR(64) NOT NULL," + + "FUNCTION VARCHAR(512) NOT NULL," + + "PERCENT_ABS VARCHAR(8) NOT NULL," + + "PERCENT_REL VARCHAR(8) NOT NULL," + + "ROOT_CHILD INT(8) NOT NULL," + + "DEPTH INT(8) NOT NULL," + + "FILE VARCHAR(512) NOT NULL);" + +// tablePDProfileGoroutines contains the columns name definitions for table pd_profile_goroutines +const tablePDProfileGoroutines = "CREATE TABLE IF NOT EXISTS " + tableNamePDProfileGoroutines + " (" + + "ADDRESS VARCHAR(64) NOT NULL," + + "FUNCTION VARCHAR(512) NOT NULL," + + "ID INT(8) NOT NULL," + + "STATE VARCHAR(16) NOT NULL," + + "LOCATION VARCHAR(512) NOT NULL);" diff --git a/infoschema/perfschema/init.go b/infoschema/perfschema/init.go index 335556cf8ca5d..1cf549244ea3a 100644 --- a/infoschema/perfschema/init.go +++ b/infoschema/perfschema/init.go @@ -14,6 +14,7 @@ package perfschema import ( + "fmt" "sync" "github.com/pingcap/parser" @@ -39,8 +40,7 @@ func Init() { initOnce := func() { p := parser.New() tbls := make([]*model.TableInfo, 0) - dbID := autoid.GenLocalSchemaID() - + dbID := autoid.PerformanceSchemaDBID for _, sql := range perfSchemaTables { stmt, err := p.ParseOneStmt(sql, "", "") if err != nil { @@ -51,9 +51,13 @@ func Init() { panic(err) } tbls = append(tbls, meta) - meta.ID = autoid.GenLocalSchemaID() - for _, c := range meta.Columns { - c.ID = autoid.GenLocalSchemaID() + var ok bool + meta.ID, ok = tableIDMap[meta.Name.O] + if !ok { + panic(fmt.Sprintf("get performance_schema table id failed, unknown system table `%v`", meta.Name.O)) + } + for i, c := range meta.Columns { + c.ID = int64(i) + 1 } } dbInfo := &model.DBInfo{ diff --git a/infoschema/perfschema/tables.go b/infoschema/perfschema/tables.go index 718da2748c1b4..e9bc013f58487 100644 --- a/infoschema/perfschema/tables.go +++ b/infoschema/perfschema/tables.go @@ -37,6 +37,22 @@ import ( ) const ( + tableNameGlobalStatus = "global_status" + tableNameSessionStatus = "session_status" + tableNameSetupActors = "setup_actors" + tableNameSetupObjects = "setup_objects" + tableNameSetupInstruments = "setup_instruments" + tableNameSetupConsumers = "setup_consumers" + tableNameEventsStatementsCurrent = "events_statements_current" + tableNameEventsStatementsHistory = "events_statements_history" + tableNameEventsStatementsHistoryLong = "events_statements_history_long" + tableNamePreparedStatementsInstances = "prepared_statements_instances" + tableNameEventsTransactionsCurrent = "events_transactions_current" + tableNameEventsTransactionsHistory = "events_transactions_history" + tableNameEventsTransactionsHistoryLong = "events_transactions_history_long" + tableNameEventsStagesCurrent = "events_stages_current" + tableNameEventsStagesHistory = "events_stages_history" + tableNameEventsStagesHistoryLong = "events_stages_history_long" tableNameEventsStatementsSummaryByDigest = "events_statements_summary_by_digest" tableNameTiDBProfileCPU = "tidb_profile_cpu" tableNameTiDBProfileMemory = "tidb_profile_memory" @@ -45,8 +61,47 @@ const ( tableNameTiDBProfileBlock = "tidb_profile_block" tableNameTiDBProfileGoroutines = "tidb_profile_goroutines" tableNameTiKVProfileCPU = "tikv_profile_cpu" + tableNamePDProfileCPU = "pd_profile_cpu" + tableNamePDProfileMemory = "pd_profile_memory" + tableNamePDProfileMutex = "pd_profile_mutex" + tableNamePDProfileAllocs = "pd_profile_allocs" + tableNamePDProfileBlock = "pd_profile_block" + tableNamePDProfileGoroutines = "pd_profile_goroutines" ) +var tableIDMap = map[string]int64{ + tableNameGlobalStatus: autoid.PerformanceSchemaDBID + 1, + tableNameSessionStatus: autoid.PerformanceSchemaDBID + 2, + tableNameSetupActors: autoid.PerformanceSchemaDBID + 3, + tableNameSetupObjects: autoid.PerformanceSchemaDBID + 4, + tableNameSetupInstruments: autoid.PerformanceSchemaDBID + 5, + tableNameSetupConsumers: autoid.PerformanceSchemaDBID + 6, + tableNameEventsStatementsCurrent: autoid.PerformanceSchemaDBID + 7, + tableNameEventsStatementsHistory: autoid.PerformanceSchemaDBID + 8, + tableNameEventsStatementsHistoryLong: autoid.PerformanceSchemaDBID + 9, + tableNamePreparedStatementsInstances: autoid.PerformanceSchemaDBID + 10, + tableNameEventsTransactionsCurrent: autoid.PerformanceSchemaDBID + 11, + tableNameEventsTransactionsHistory: autoid.PerformanceSchemaDBID + 12, + tableNameEventsTransactionsHistoryLong: autoid.PerformanceSchemaDBID + 13, + tableNameEventsStagesCurrent: autoid.PerformanceSchemaDBID + 14, + tableNameEventsStagesHistory: autoid.PerformanceSchemaDBID + 15, + tableNameEventsStagesHistoryLong: autoid.PerformanceSchemaDBID + 16, + tableNameEventsStatementsSummaryByDigest: autoid.PerformanceSchemaDBID + 17, + tableNameTiDBProfileCPU: autoid.PerformanceSchemaDBID + 18, + tableNameTiDBProfileMemory: autoid.PerformanceSchemaDBID + 19, + tableNameTiDBProfileMutex: autoid.PerformanceSchemaDBID + 20, + tableNameTiDBProfileAllocs: autoid.PerformanceSchemaDBID + 21, + tableNameTiDBProfileBlock: autoid.PerformanceSchemaDBID + 22, + tableNameTiDBProfileGoroutines: autoid.PerformanceSchemaDBID + 23, + tableNameTiKVProfileCPU: autoid.PerformanceSchemaDBID + 24, + tableNamePDProfileCPU: autoid.PerformanceSchemaDBID + 25, + tableNamePDProfileMemory: autoid.PerformanceSchemaDBID + 26, + tableNamePDProfileMutex: autoid.PerformanceSchemaDBID + 27, + tableNamePDProfileAllocs: autoid.PerformanceSchemaDBID + 28, + tableNamePDProfileBlock: autoid.PerformanceSchemaDBID + 29, + tableNamePDProfileGoroutines: autoid.PerformanceSchemaDBID + 30, +} + // perfSchemaTable stands for the fake table all its data is in the memory. type perfSchemaTable struct { infoschema.VirtualTable @@ -120,9 +175,23 @@ func (vt *perfSchemaTable) getRows(ctx sessionctx.Context, cols []*table.Column) case tableNameTiDBProfileBlock: fullRows, err = (&profile.Collector{}).ProfileGraph("block") case tableNameTiDBProfileGoroutines: - fullRows, err = (&profile.Collector{}).Goroutines() + fullRows, err = (&profile.Collector{}).ProfileGraph("goroutine") case tableNameTiKVProfileCPU: - fullRows, err = dataForTiKVProfileCPU(ctx) + interval := fmt.Sprintf("%d", profile.CPUProfileInterval/time.Second) + fullRows, err = dataForRemoteProfile(ctx, "tikv", "/debug/pprof/profile?seconds="+interval, false) + case tableNamePDProfileCPU: + interval := fmt.Sprintf("%d", profile.CPUProfileInterval/time.Second) + fullRows, err = dataForRemoteProfile(ctx, "pd", "/pd/api/v1/debug/pprof/profile?seconds="+interval, false) + case tableNamePDProfileMemory: + fullRows, err = dataForRemoteProfile(ctx, "pd", "/pd/api/v1/debug/pprof/heap", false) + case tableNamePDProfileMutex: + fullRows, err = dataForRemoteProfile(ctx, "pd", "/pd/api/v1/debug/pprof/mutex", false) + case tableNamePDProfileAllocs: + fullRows, err = dataForRemoteProfile(ctx, "pd", "/pd/api/v1/debug/pprof/allocs", false) + case tableNamePDProfileBlock: + fullRows, err = dataForRemoteProfile(ctx, "pd", "/pd/api/v1/debug/pprof/block", false) + case tableNamePDProfileGoroutines: + fullRows, err = dataForRemoteProfile(ctx, "pd", "/pd/api/v1/debug/pprof/goroutine?debug=2", true) } if err != nil { return @@ -163,15 +232,29 @@ func (vt *perfSchemaTable) IterRecords(ctx sessionctx.Context, startKey kv.Key, return nil } -func dataForTiKVProfileCPU(ctx sessionctx.Context) ([][]types.Datum, error) { - servers, err := infoschema.GetTiKVServerInfo(ctx) - failpoint.Inject("mockTiKVNodeStatusAddress", func(val failpoint.Value) { +func dataForRemoteProfile(ctx sessionctx.Context, nodeType, uri string, isGoroutine bool) ([][]types.Datum, error) { + var ( + servers []infoschema.ServerInfo + err error + ) + switch nodeType { + case "tikv": + servers, err = infoschema.GetTiKVServerInfo(ctx) + case "pd": + servers, err = infoschema.GetPDServerInfo(ctx) + default: + return nil, errors.Errorf("%s does not support profile remote component", nodeType) + } + failpoint.Inject("mockRemoteNodeStatusAddress", func(val failpoint.Value) { // The cluster topology is injected by `failpoint` expression and // there is no extra checks for it. (let the test fail if the expression invalid) if s := val.(string); len(s) > 0 { servers = servers[:0] for _, server := range strings.Split(s, ";") { parts := strings.Split(server, ",") + if parts[0] != nodeType { + continue + } servers = append(servers, infoschema.ServerInfo{ ServerType: parts[0], Address: parts[1], @@ -205,13 +288,15 @@ func dataForTiKVProfileCPU(ctx sessionctx.Context) ([][]types.Datum, error) { go func(address string) { util.WithRecovery(func() { defer wg.Done() - interval := int(profile.CPUProfileInterval / time.Second) - url := fmt.Sprintf("http://%s/debug/pprof/profile?seconds=%d", statusAddr, interval) + url := fmt.Sprintf("http://%s%s", statusAddr, uri) req, err := http.NewRequest(http.MethodGet, url, nil) if err != nil { ch <- result{err: errors.Trace(err)} return } + // Forbidden PD follower proxy + req.Header.Add("PD-Allow-follower-handle", "true") + // TiKV output svg format in default req.Header.Add("Content-Type", "application/protobuf") resp, err := http.DefaultClient.Do(req) if err != nil { @@ -222,11 +307,16 @@ func dataForTiKVProfileCPU(ctx sessionctx.Context) ([][]types.Datum, error) { terror.Log(resp.Body.Close()) }() if resp.StatusCode != http.StatusOK { - ch <- result{err: errors.Errorf("request %s failed: %s", statusAddr, resp.Status)} + ch <- result{err: errors.Errorf("request %s failed: %s", url, resp.Status)} return } collector := profile.Collector{} - rows, err := collector.ProfileReaderToDatums(resp.Body) + var rows [][]types.Datum + if isGoroutine { + rows, err = collector.ParseGoroutines(resp.Body) + } else { + rows, err = collector.ProfileReaderToDatums(resp.Body) + } if err != nil { ch <- result{err: errors.Trace(err)} return diff --git a/infoschema/perfschema/tables_test.go b/infoschema/perfschema/tables_test.go index fb3e7566ff169..a052e19586707 100644 --- a/infoschema/perfschema/tables_test.go +++ b/infoschema/perfschema/tables_test.go @@ -21,6 +21,7 @@ import ( "os" "path/filepath" "runtime" + "runtime/pprof" "strings" "testing" @@ -209,28 +210,40 @@ func (s *testTableSuite) TestTiKVProfileCPU(c *C) { mockServer := httptest.NewServer(router) mockAddr := strings.TrimPrefix(mockServer.URL, "http://") - // mock tikv profile - router.HandleFunc("/debug/pprof/profile", func(w http.ResponseWriter, _ *http.Request) { - file, err := os.Open(filepath.Join(currentSourceDir(), "testdata/tikv.cpu.profile")) - if err != nil { - http.Error(w, err.Error(), http.StatusBadRequest) - return + copyHandler := func(filename string) http.HandlerFunc { + return func(w http.ResponseWriter, _ *http.Request) { + file, err := os.Open(filepath.Join(currentSourceDir(), filename)) + if err != nil { + http.Error(w, err.Error(), http.StatusBadRequest) + return + } + defer func() { terror.Log(file.Close()) }() + _, err = io.Copy(w, file) + terror.Log(err) } - defer func() { terror.Log(file.Close()) }() - _, err = io.Copy(w, file) - terror.Log(err) - }) + } + // mock tikv profile + router.HandleFunc("/debug/pprof/profile", copyHandler("testdata/tikv.cpu.profile")) // failpoint setting - fpExpr := strings.Join([]string{"tikv", mockAddr, mockAddr}, ",") - fpName := "github.com/pingcap/tidb/infoschema/perfschema/mockTiKVNodeStatusAddress" + servers := []string{ + strings.Join([]string{"tikv", mockAddr, mockAddr}, ","), + strings.Join([]string{"pd", mockAddr, mockAddr}, ","), + } + fpExpr := strings.Join(servers, ";") + fpName := "github.com/pingcap/tidb/infoschema/perfschema/mockRemoteNodeStatusAddress" c.Assert(failpoint.Enable(fpName, fmt.Sprintf(`return("%s")`, fpExpr)), IsNil) defer func() { c.Assert(failpoint.Disable(fpName), IsNil) }() tk := testkit.NewTestKit(c, s.store) tk.MustExec("use performance_schema") - tk.MustQuery("select function, percent_abs, percent_rel from tikv_profile_cpu where depth < 3").Check(testkit.Rows( + result := tk.MustQuery("select function, percent_abs, percent_rel from tikv_profile_cpu where depth < 3") + + warnings := tk.Se.GetSessionVars().StmtCtx.GetWarnings() + c.Assert(len(warnings), Equals, 0, Commentf("expect no warnings, but found: %+v", warnings)) + + result.Check(testkit.Rows( "root 100% 100%", "├─tikv::server::load_statistics::linux::ThreadLoadStatistics::record::h59facb8d680e7794 75.00% 75.00%", "│ └─procinfo::pid::stat::stat_task::h69e1aa2c331aebb6 75.00% 100%", @@ -256,4 +269,61 @@ func (s *testTableSuite) TestTiKVProfileCPU(c *C) { "│ └─core::iter::range::>::next::hdb23ceb766e7a91f 0.89% 100%", "└─::next::he129c78b3deb639d 0.89% 0.89%", " └─Unknown 0.89% 100%")) + + // We can use current processe profile to mock profile of PD because the PD has the + // same way of retrieving profile with TiDB. And the purpose of this test case is used + // to make sure all profile HTTP API have been accessed. + accessed := map[string]struct{}{} + handlerFactory := func(name string, debug ...int) func(w http.ResponseWriter, _ *http.Request) { + debugLevel := 0 + if len(debug) > 0 { + debugLevel = debug[0] + } + return func(w http.ResponseWriter, _ *http.Request) { + profile := pprof.Lookup(name) + if profile == nil { + http.Error(w, fmt.Sprintf("profile %s not found", name), http.StatusBadRequest) + return + } + if err := profile.WriteTo(w, debugLevel); err != nil { + http.Error(w, err.Error(), http.StatusBadRequest) + return + } + accessed[name] = struct{}{} + } + } + + // mock PD profile + router.HandleFunc("/pd/api/v1/debug/pprof/profile", copyHandler("../../util/profile/testdata/test.pprof")) + router.HandleFunc("/pd/api/v1/debug/pprof/heap", handlerFactory("heap")) + router.HandleFunc("/pd/api/v1/debug/pprof/mutex", handlerFactory("mutex")) + router.HandleFunc("/pd/api/v1/debug/pprof/allocs", handlerFactory("allocs")) + router.HandleFunc("/pd/api/v1/debug/pprof/block", handlerFactory("block")) + router.HandleFunc("/pd/api/v1/debug/pprof/goroutine", handlerFactory("goroutine", 2)) + + tk.MustQuery("select * from pd_profile_cpu where depth < 3") + warnings = tk.Se.GetSessionVars().StmtCtx.GetWarnings() + c.Assert(len(warnings), Equals, 0, Commentf("expect no warnings, but found: %+v", warnings)) + + tk.MustQuery("select * from pd_profile_memory where depth < 3") + warnings = tk.Se.GetSessionVars().StmtCtx.GetWarnings() + c.Assert(len(warnings), Equals, 0, Commentf("expect no warnings, but found: %+v", warnings)) + + tk.MustQuery("select * from pd_profile_mutex where depth < 3") + warnings = tk.Se.GetSessionVars().StmtCtx.GetWarnings() + c.Assert(len(warnings), Equals, 0, Commentf("expect no warnings, but found: %+v", warnings)) + + tk.MustQuery("select * from pd_profile_allocs where depth < 3") + warnings = tk.Se.GetSessionVars().StmtCtx.GetWarnings() + c.Assert(len(warnings), Equals, 0, Commentf("expect no warnings, but found: %+v", warnings)) + + tk.MustQuery("select * from pd_profile_block where depth < 3") + warnings = tk.Se.GetSessionVars().StmtCtx.GetWarnings() + c.Assert(len(warnings), Equals, 0, Commentf("expect no warnings, but found: %+v", warnings)) + + tk.MustQuery("select * from pd_profile_goroutines") + warnings = tk.Se.GetSessionVars().StmtCtx.GetWarnings() + c.Assert(len(warnings), Equals, 0, Commentf("expect no warnings, but found: %+v", warnings)) + + c.Assert(len(accessed), Equals, 5, Commentf("expect all HTTP API had been accessed, but found: %v", accessed)) } diff --git a/infoschema/tables.go b/infoschema/tables.go index eedda6bd585b6..b4a06264ccfc8 100644 --- a/infoschema/tables.go +++ b/infoschema/tables.go @@ -98,12 +98,63 @@ const ( tableTiKVRegionStatus = "TIKV_REGION_STATUS" tableTiKVRegionPeers = "TIKV_REGION_PEERS" tableTiDBServersInfo = "TIDB_SERVERS_INFO" - tableTiDBClusterInfo = "TIDB_CLUSTER_INFO" - tableTiDBClusterConfig = "TIDB_CLUSTER_CONFIG" - tableTiDBClusterLoad = "TIDB_CLUSTER_LOAD" - tableTiFlashReplica = "TIFLASH_REPLICA" + tableTiDBClusterInfo = "CLUSTER_INFO" + // TableTiDBClusterConfig is the string constant of cluster configuration memory table + TableTiDBClusterConfig = "CLUSTER_CONFIG" + tableTiDBClusterLoad = "CLUSTER_LOAD" + tableTiFlashReplica = "TIFLASH_REPLICA" ) +var tableIDMap = map[string]int64{ + tableSchemata: autoid.InformationSchemaDBID + 1, + tableTables: autoid.InformationSchemaDBID + 2, + tableColumns: autoid.InformationSchemaDBID + 3, + tableColumnStatistics: autoid.InformationSchemaDBID + 4, + tableStatistics: autoid.InformationSchemaDBID + 5, + tableCharacterSets: autoid.InformationSchemaDBID + 6, + tableCollations: autoid.InformationSchemaDBID + 7, + tableFiles: autoid.InformationSchemaDBID + 8, + catalogVal: autoid.InformationSchemaDBID + 9, + tableProfiling: autoid.InformationSchemaDBID + 10, + tablePartitions: autoid.InformationSchemaDBID + 11, + tableKeyColumm: autoid.InformationSchemaDBID + 12, + tableReferConst: autoid.InformationSchemaDBID + 13, + tableSessionVar: autoid.InformationSchemaDBID + 14, + tablePlugins: autoid.InformationSchemaDBID + 15, + tableConstraints: autoid.InformationSchemaDBID + 16, + tableTriggers: autoid.InformationSchemaDBID + 17, + tableUserPrivileges: autoid.InformationSchemaDBID + 18, + tableSchemaPrivileges: autoid.InformationSchemaDBID + 19, + tableTablePrivileges: autoid.InformationSchemaDBID + 20, + tableColumnPrivileges: autoid.InformationSchemaDBID + 21, + tableEngines: autoid.InformationSchemaDBID + 22, + tableViews: autoid.InformationSchemaDBID + 23, + tableRoutines: autoid.InformationSchemaDBID + 24, + tableParameters: autoid.InformationSchemaDBID + 25, + tableEvents: autoid.InformationSchemaDBID + 26, + tableGlobalStatus: autoid.InformationSchemaDBID + 27, + tableGlobalVariables: autoid.InformationSchemaDBID + 28, + tableSessionStatus: autoid.InformationSchemaDBID + 29, + tableOptimizerTrace: autoid.InformationSchemaDBID + 30, + tableTableSpaces: autoid.InformationSchemaDBID + 31, + tableCollationCharacterSetApplicability: autoid.InformationSchemaDBID + 32, + tableProcesslist: autoid.InformationSchemaDBID + 33, + tableTiDBIndexes: autoid.InformationSchemaDBID + 34, + tableSlowLog: autoid.InformationSchemaDBID + 35, + tableTiDBHotRegions: autoid.InformationSchemaDBID + 36, + tableTiKVStoreStatus: autoid.InformationSchemaDBID + 37, + tableAnalyzeStatus: autoid.InformationSchemaDBID + 38, + tableTiKVRegionStatus: autoid.InformationSchemaDBID + 39, + tableTiKVRegionPeers: autoid.InformationSchemaDBID + 40, + tableTiDBServersInfo: autoid.InformationSchemaDBID + 41, + tableTiDBClusterInfo: autoid.InformationSchemaDBID + 42, + TableTiDBClusterConfig: autoid.InformationSchemaDBID + 43, + tableTiDBClusterLoad: autoid.InformationSchemaDBID + 44, + tableTiFlashReplica: autoid.InformationSchemaDBID + 45, + clusterTableSlowLog: autoid.InformationSchemaDBID + 46, + clusterTableProcesslist: autoid.InformationSchemaDBID + 47, +} + type columnInfo struct { name string tp byte @@ -983,7 +1034,7 @@ func dataForProcesslist(ctx sessionctx.Context) [][]types.Datum { for _, pi := range pl { // If you have the PROCESS privilege, you can see all threads. // Otherwise, you can see only your own threads. - if !hasProcessPriv && pi.User != loginUser.Username { + if !hasProcessPriv && loginUser != nil && pi.User != loginUser.Username { continue } @@ -1561,10 +1612,15 @@ func dataForColumnsInTable(schema *model.DBInfo, tbl *model.TableInfo) [][]types return rows } -func dataForStatistics(schemas []*model.DBInfo) [][]types.Datum { +func dataForStatistics(ctx sessionctx.Context, schemas []*model.DBInfo) [][]types.Datum { + checker := privilege.GetPrivilegeManager(ctx) var rows [][]types.Datum for _, schema := range schemas { for _, table := range schema.Tables { + if checker != nil && !checker.RequestVerification(ctx.GetSessionVars().ActiveRoles, schema.Name.L, table.Name.L, "", mysql.AllPrivMask) { + continue + } + rs := dataForStatisticsInTable(schema, table) rows = append(rows, rs...) } @@ -2328,7 +2384,7 @@ var tableNameToColumns = map[string][]columnInfo{ tableTiKVRegionPeers: tableTiKVRegionPeersCols, tableTiDBServersInfo: tableTiDBServersInfoCols, tableTiDBClusterInfo: tableTiDBClusterInfoCols, - tableTiDBClusterConfig: tableTiDBClusterConfigCols, + TableTiDBClusterConfig: tableTiDBClusterConfigCols, tableTiDBClusterLoad: tableTiDBClusterLoadCols, tableTiFlashReplica: tableTableTiFlashReplicaCols, } @@ -2338,12 +2394,17 @@ func createInfoSchemaTable(_ autoid.Allocator, meta *model.TableInfo) (table.Tab for i, col := range meta.Columns { columns[i] = table.ToColumn(col) } - return &infoschemaTable{meta: meta, cols: columns}, nil + tp := table.VirtualTable + if isClusterTableByName(util.InformationSchemaName, meta.Name.L) { + tp = table.ClusterTable + } + return &infoschemaTable{meta: meta, cols: columns, tp: tp}, nil } type infoschemaTable struct { meta *model.TableInfo cols []*table.Column + tp table.Type } // schemasSorter implements the sort.Interface interface, sorts DBInfo by name. @@ -2362,7 +2423,7 @@ func (s schemasSorter) Less(i, j int) bool { } func (it *infoschemaTable) getRows(ctx sessionctx.Context, cols []*table.Column) (fullRows [][]types.Datum, err error) { - is := ctx.GetSessionVars().TxnCtx.InfoSchema.(InfoSchema) + is := GetInfoSchema(ctx) dbs := is.AllSchemas() sort.Sort(schemasSorter(dbs)) switch it.meta.Name.O { @@ -2375,7 +2436,7 @@ func (it *infoschemaTable) getRows(ctx sessionctx.Context, cols []*table.Column) case tableColumns: fullRows = dataForColumns(ctx, dbs) case tableStatistics: - fullRows = dataForStatistics(dbs) + fullRows = dataForStatistics(ctx, dbs) case tableCharacterSets: fullRows = dataForCharacterSets() case tableCollations: @@ -2432,12 +2493,15 @@ func (it *infoschemaTable) getRows(ctx sessionctx.Context, cols []*table.Column) fullRows, err = dataForServersInfo() case tableTiDBClusterInfo: fullRows, err = dataForTiDBClusterInfo(ctx) - case tableTiDBClusterConfig: + case TableTiDBClusterConfig: fullRows, err = dataForClusterConfig(ctx) case tableTiDBClusterLoad: fullRows, err = dataForClusterLoadInfo(ctx) case tableTiFlashReplica: fullRows = dataForTableTiFlashReplica(dbs) + // Data for cluster memory table. + case clusterTableSlowLog, clusterTableProcesslist: + fullRows, err = getClusterMemTableRows(ctx, it.meta.Name.O) } if err != nil { return nil, err @@ -2585,7 +2649,7 @@ func (it *infoschemaTable) Seek(ctx sessionctx.Context, h int64) (int64, bool, e // Type implements table.Table Type interface. func (it *infoschemaTable) Type() table.Type { - return table.VirtualTable + return it.tp } // VirtualTable is a dummy table.Table implementation. diff --git a/infoschema/tables_test.go b/infoschema/tables_test.go index 4b1a447848d64..00759547c5345 100644 --- a/infoschema/tables_test.go +++ b/infoschema/tables_test.go @@ -37,6 +37,7 @@ import ( "github.com/pingcap/tidb/domain/infosync" "github.com/pingcap/tidb/infoschema" "github.com/pingcap/tidb/kv" + "github.com/pingcap/tidb/meta/autoid" "github.com/pingcap/tidb/server" "github.com/pingcap/tidb/session" "github.com/pingcap/tidb/statistics" @@ -72,9 +73,9 @@ func (s *testTableSuite) SetUpSuite(c *C) { } func (s *testTableSuite) TearDownSuite(c *C) { - defer testleak.AfterTest(c)() s.dom.Close() s.store.Close() + testleak.AfterTest(c)() } type testClusterTableSuite struct { @@ -86,14 +87,22 @@ type testClusterTableSuite struct { func (s *testClusterTableSuite) SetUpSuite(c *C) { s.testTableSuite.SetUpSuite(c) - s.rpcserver = setUpRPCService(c, "0.0.0.0:10080") + s.rpcserver = s.setUpRPCService(c, "0.0.0.0:10080") s.httpServer, s.mockAddr = setUpMockPDHTTPSercer() } -func setUpRPCService(c *C, addr string) *grpc.Server { +func (s *testClusterTableSuite) setUpRPCService(c *C, addr string) *grpc.Server { lis, err := net.Listen("tcp", addr) c.Assert(err, IsNil) - srv := server.NewRPCServer(config.GetGlobalConfig().Security) + // Fix issue 9836 + sm := &mockSessionManager{make(map[uint64]*util.ProcessInfo, 1)} + sm.processInfoMap[1] = &util.ProcessInfo{ + ID: 1, + User: "root", + Host: "127.0.0.1", + Command: mysql.ComQuery, + } + srv := server.NewRPCServer(config.GetGlobalConfig().Security, s.dom, sm) go func() { err = srv.Serve(lis) c.Assert(err, IsNil) @@ -158,7 +167,6 @@ func setUpMockPDHTTPSercer() (*httptest.Server, string) { } func (s *testClusterTableSuite) TearDownSuite(c *C) { - s.testTableSuite.TearDownSuite(c) if s.rpcserver != nil { s.rpcserver.Stop() s.rpcserver = nil @@ -166,6 +174,7 @@ func (s *testClusterTableSuite) TearDownSuite(c *C) { if s.httpServer != nil { s.httpServer.Close() } + s.testTableSuite.TearDownSuite(c) } func (s *testTableSuite) TestInfoschemaFieldValue(c *C) { @@ -440,7 +449,50 @@ func (s *testTableSuite) TestSomeTables(c *C) { testkit.Rows("def mysql tbl def mysql stats_meta table_id 1 ")) tk.MustQuery("select * from information_schema.STATISTICS where TABLE_NAME='columns_priv' and COLUMN_NAME='Host';").Check( testkit.Rows("def mysql columns_priv 0 mysql PRIMARY 1 Host A BTREE ")) - tk.MustQuery("select * from information_schema.USER_PRIVILEGES where PRIVILEGE_TYPE='Select';").Check(testkit.Rows("'root'@'%' def Select YES")) + + //test the privilege of new user for information_schema + tk.MustExec("create user tester1") + tk1 := testkit.NewTestKit(c, s.store) + tk1.MustExec("use information_schema") + c.Assert(tk1.Se.Auth(&auth.UserIdentity{ + Username: "tester1", + Hostname: "127.0.0.1", + }, nil, nil), IsTrue) + tk1.MustQuery("select * from information_schema.STATISTICS;").Check([][]interface{}{}) + + //test the privilege of user with some privilege for information_schema + tk.MustExec("create user tester2") + tk.MustExec("CREATE ROLE r_columns_priv;") + tk.MustExec("GRANT ALL PRIVILEGES ON mysql.columns_priv TO r_columns_priv;") + tk.MustExec("GRANT r_columns_priv TO tester2;") + tk2 := testkit.NewTestKit(c, s.store) + tk2.MustExec("use information_schema") + c.Assert(tk2.Se.Auth(&auth.UserIdentity{ + Username: "tester2", + Hostname: "127.0.0.1", + }, nil, nil), IsTrue) + tk2.MustExec("set role r_columns_priv") + tk2.MustQuery("select * from information_schema.STATISTICS where TABLE_NAME='columns_priv' and COLUMN_NAME='Host';").Check( + testkit.Rows("def mysql columns_priv 0 mysql PRIMARY 1 Host A BTREE ")) + tk2.MustQuery("select * from information_schema.STATISTICS where TABLE_NAME='tables_priv' and COLUMN_NAME='Host';").Check( + [][]interface{}{}) + + //test the privilege of user with all privilege for information_schema + tk.MustExec("create user tester3") + tk.MustExec("CREATE ROLE r_all_priv;") + tk.MustExec("GRANT ALL PRIVILEGES ON mysql.* TO r_all_priv;") + tk.MustExec("GRANT r_all_priv TO tester3;") + tk3 := testkit.NewTestKit(c, s.store) + tk3.MustExec("use information_schema") + c.Assert(tk3.Se.Auth(&auth.UserIdentity{ + Username: "tester3", + Hostname: "127.0.0.1", + }, nil, nil), IsTrue) + tk3.MustExec("set role r_all_priv") + tk3.MustQuery("select * from information_schema.STATISTICS where TABLE_NAME='columns_priv' and COLUMN_NAME='Host';").Check( + testkit.Rows("def mysql columns_priv 0 mysql PRIMARY 1 Host A BTREE ")) + tk3.MustQuery("select * from information_schema.STATISTICS where TABLE_NAME='tables_priv' and COLUMN_NAME='Host';").Check( + testkit.Rows("def mysql tables_priv 0 mysql PRIMARY 1 Host A BTREE ")) sm := &mockSessionManager{make(map[uint64]*util.ProcessInfo, 2)} sm.processInfoMap[1] = &util.ProcessInfo{ @@ -559,6 +611,35 @@ func (s *testTableSuite) TestTableIDAndIndexID(c *C) { tk.MustQuery("select * from information_schema.tidb_indexes where table_schema = 'test' and table_name = 't'").Check(testkit.Rows("test t 0 PRIMARY 1 a 0", "test t 1 k1 1 b 1")) } +func prepareSlowLogfile(c *C, slowLogFileName string) { + f, err := os.OpenFile(slowLogFileName, os.O_CREATE|os.O_WRONLY, 0644) + c.Assert(err, IsNil) + _, err = f.Write([]byte(`# Time: 2019-02-12T19:33:56.571953+08:00 +# Txn_start_ts: 406315658548871171 +# User: root@127.0.0.1 +# Conn_ID: 6 +# Query_time: 4.895492 +# Parse_time: 0.4 +# Compile_time: 0.2 +# Request_count: 1 Prewrite_time: 0.19 Commit_time: 0.01 Commit_backoff_time: 0.18 Backoff_types: [txnLock] Resolve_lock_time: 0.03 Write_keys: 15 Write_size: 480 Prewrite_region: 1 Txn_retry: 8 +# Process_time: 0.161 Request_count: 1 Total_keys: 100001 Process_keys: 100000 +# Wait_time: 0.101 +# Backoff_time: 0.092 +# DB: test +# Is_internal: false +# Digest: 42a1c8aae6f133e934d4bf0147491709a8812ea05ff8819ec522780fe657b772 +# Stats: t1:1,t2:2 +# Cop_proc_avg: 0.1 Cop_proc_p90: 0.2 Cop_proc_max: 0.03 Cop_proc_addr: 127.0.0.1:20160 +# Cop_wait_avg: 0.05 Cop_wait_p90: 0.6 Cop_wait_max: 0.8 Cop_wait_addr: 0.0.0.0:20160 +# Mem_max: 70724 +# Succ: true +# Plan: abcd +# Prev_stmt: update t set i = 2; +select * from t_slim;`)) + c.Assert(f.Sync(), IsNil) + c.Assert(err, IsNil) +} + func (s *testTableSuite) TestTableRowIDShardingInfo(c *C) { tk := testkit.NewTestKit(c, s.store) tk.MustExec("DROP DATABASE IF EXISTS `sharding_info_test_db`") @@ -605,33 +686,8 @@ func (s *testTableSuite) TestSlowQuery(c *C) { tk := testkit.NewTestKit(c, s.store) // Prepare slow log file. slowLogFileName := "tidb_slow.log" - f, err := os.OpenFile(slowLogFileName, os.O_CREATE|os.O_WRONLY, 0644) - c.Assert(err, IsNil) + prepareSlowLogfile(c, slowLogFileName) defer os.Remove(slowLogFileName) - _, err = f.Write([]byte(`# Time: 2019-02-12T19:33:56.571953+08:00 -# Txn_start_ts: 406315658548871171 -# User: root@127.0.0.1 -# Conn_ID: 6 -# Query_time: 4.895492 -# Parse_time: 0.4 -# Compile_time: 0.2 -# Request_count: 1 Prewrite_time: 0.19 Commit_time: 0.01 Commit_backoff_time: 0.18 Backoff_types: [txnLock] Resolve_lock_time: 0.03 Write_keys: 15 Write_size: 480 Prewrite_region: 1 Txn_retry: 8 -# Process_time: 0.161 Request_count: 1 Total_keys: 100001 Process_keys: 100000 -# Wait_time: 0.101 -# Backoff_time: 0.092 -# DB: test -# Is_internal: false -# Digest: 42a1c8aae6f133e934d4bf0147491709a8812ea05ff8819ec522780fe657b772 -# Stats: t1:1,t2:2 -# Cop_proc_avg: 0.1 Cop_proc_p90: 0.2 Cop_proc_max: 0.03 Cop_proc_addr: 127.0.0.1:20160 -# Cop_wait_avg: 0.05 Cop_wait_p90: 0.6 Cop_wait_max: 0.8 Cop_wait_addr: 0.0.0.0:20160 -# Mem_max: 70724 -# Succ: true -# Plan: abcd -# Prev_stmt: update t set i = 2; -select * from t_slim;`)) - c.Assert(f.Sync(), IsNil) - c.Assert(err, IsNil) tk.MustExec(fmt.Sprintf("set @@tidb_slow_query_file='%v'", slowLogFileName)) tk.MustExec("set time_zone = '+08:00';") @@ -643,6 +699,9 @@ select * from t_slim;`)) re.Check(testutil.RowsWithSep("|", "2019-02-12 11:33:56.571953|406315658548871171|root|127.0.0.1|6|4.895492|0.4|0.2|0.19|0.01|0|0.18|[txnLock]|0.03|0|15|480|1|8|0.161|0.101|0.092|1|100001|100000|test||0|42a1c8aae6f133e934d4bf0147491709a8812ea05ff8819ec522780fe657b772|t1:1,t2:2|0.1|0.2|0.03|127.0.0.1:20160|0.05|0.6|0.8|0.0.0.0:20160|70724|1|abcd|update t set i = 2;|select * from t_slim;")) // Test for long query. + f, err := os.OpenFile(slowLogFileName, os.O_CREATE|os.O_WRONLY, 0644) + c.Assert(err, IsNil) + defer f.Close() _, err = f.Write([]byte(` # Time: 2019-02-13T19:33:56.571953+08:00 `)) @@ -726,7 +785,7 @@ func (s *mockStore) StartGCWorker() error { panic("not implemented") } func (s *testClusterTableSuite) TestTiDBClusterInfo(c *C) { tk := testkit.NewTestKit(c, s.store) - err := tk.QueryToErr("select * from information_schema.tidb_cluster_info") + err := tk.QueryToErr("select * from information_schema.cluster_info") c.Assert(err, NotNil) mockAddr := s.mockAddr store := &mockStore{ @@ -734,7 +793,7 @@ func (s *testClusterTableSuite) TestTiDBClusterInfo(c *C) { mockAddr, } tk = testkit.NewTestKit(c, store) - tk.MustQuery("select * from information_schema.tidb_cluster_info").Check(testkit.Rows( + tk.MustQuery("select * from information_schema.cluster_info").Check(testkit.Rows( "tidb :4000 :10080 5.7.25-TiDB-None None", "pd "+mockAddr+" "+mockAddr+" 4.0.0-alpha mock-pd-githash", "tikv 127.0.0.1:20160 "+mockAddr+" 4.0.0-alpha mock-tikv-githash", @@ -748,7 +807,7 @@ func (s *testClusterTableSuite) TestTiDBClusterInfo(c *C) { fpExpr := `return("` + strings.Join(instances, ";") + `")` c.Assert(failpoint.Enable("github.com/pingcap/tidb/infoschema/mockClusterInfo", fpExpr), IsNil) defer func() { c.Assert(failpoint.Disable("github.com/pingcap/tidb/infoschema/mockClusterInfo"), IsNil) }() - tk.MustQuery("select * from information_schema.tidb_cluster_config").Check(testkit.Rows( + tk.MustQuery("select * from information_schema.cluster_config").Check(testkit.Rows( "pd 127.0.0.1:11080 key1 value1", "pd 127.0.0.1:11080 key2.nest1 n-value1", "pd 127.0.0.1:11080 key2.nest2 n-value2", @@ -771,7 +830,7 @@ func (s *testClusterTableSuite) TestTiDBClusterInfo(c *C) { "tikv 127.0.0.1:11080 key3.nest1 n-value1", "tikv 127.0.0.1:11080 key3.nest2 n-value2", )) - tk.MustQuery("select TYPE, `KEY`, VALUE from information_schema.tidb_cluster_config where `key`='key3.key4.nest4' order by type").Check(testkit.Rows( + tk.MustQuery("select TYPE, `KEY`, VALUE from information_schema.cluster_config where `key`='key3.key4.nest4' order by type").Check(testkit.Rows( "pd key3.key4.nest4 n-value5", "tidb key3.key4.nest4 n-value5", "tikv key3.key4.nest4 n-value5", @@ -822,7 +881,7 @@ func (s *testClusterTableSuite) TestForClusterServerInfo(c *C) { c.Assert(failpoint.Enable("github.com/pingcap/tidb/infoschema/mockClusterInfo", fpExpr), IsNil) defer func() { c.Assert(failpoint.Disable("github.com/pingcap/tidb/infoschema/mockClusterInfo"), IsNil) }() - re := tk.MustQuery("select * from information_schema.TIDB_CLUSTER_LOAD;") + re := tk.MustQuery("select * from information_schema.CLUSTER_LOAD;") rows := re.Rows() c.Assert(len(rows), Greater, 0) @@ -852,3 +911,45 @@ func (s *testClusterTableSuite) TestForClusterServerInfo(c *C) { c.Assert(len(addrMap), Equals, 0) c.Assert(len(nameMap), Equals, 0) } + +func (s *testTableSuite) TestSystemSchemaID(c *C) { + uniqueIDMap := make(map[int64]string) + s.checkSystemSchemaTableID(c, "information_schema", autoid.SystemSchemaIDFlag|1, 1, 10000, uniqueIDMap) + s.checkSystemSchemaTableID(c, "performance_schema", autoid.SystemSchemaIDFlag|10000, 10000, 20000, uniqueIDMap) +} + +func (s *testTableSuite) checkSystemSchemaTableID(c *C, dbName string, dbID, start, end int64, uniqueIDMap map[int64]string) { + is := s.dom.InfoSchema() + c.Assert(is, NotNil) + db, ok := is.SchemaByName(model.NewCIStr(dbName)) + c.Assert(ok, IsTrue) + c.Assert(db.ID, Equals, dbID) + // Test for information_schema table id. + tables := is.SchemaTables(model.NewCIStr(dbName)) + c.Assert(len(tables), Greater, 0) + for _, tbl := range tables { + tid := tbl.Meta().ID + comment := Commentf("table name is %v", tbl.Meta().Name) + c.Assert(tid&autoid.SystemSchemaIDFlag, Greater, int64(0), comment) + c.Assert(tid&^autoid.SystemSchemaIDFlag, Greater, start, comment) + c.Assert(tid&^autoid.SystemSchemaIDFlag, Less, end, comment) + name, ok := uniqueIDMap[tid] + c.Assert(ok, IsFalse, Commentf("schema id of %v is duplicate with %v, both is %v", name, tbl.Meta().Name, tid)) + uniqueIDMap[tid] = tbl.Meta().Name.O + } +} + +func (s *testClusterTableSuite) TestSelectClusterTable(c *C) { + tk := testkit.NewTestKit(c, s.store) + slowLogFileName := "tidb-slow.log" + prepareSlowLogfile(c, slowLogFileName) + defer os.Remove(slowLogFileName) + tk.MustExec("use information_schema") + tk.MustQuery("select count(*) from `CLUSTER_SLOW_QUERY`").Check(testkit.Rows("1")) + tk.MustQuery("select count(*) from `CLUSTER_PROCESSLIST`").Check(testkit.Rows("1")) + tk.MustQuery("select * from `CLUSTER_PROCESSLIST`").Check(testkit.Rows("1 root 127.0.0.1 Query 9223372036 0 0 :10080")) + tk.MustQuery("select query_time, conn_id from `CLUSTER_SLOW_QUERY` order by time limit 1").Check(testkit.Rows("4.895492 6")) + tk.MustQuery("select count(*) from `CLUSTER_SLOW_QUERY` group by digest").Check(testkit.Rows("1")) + tk.MustQuery("select digest, count(*) from `CLUSTER_SLOW_QUERY` group by digest").Check(testkit.Rows("42a1c8aae6f133e934d4bf0147491709a8812ea05ff8819ec522780fe657b772 1")) + tk.MustQuery("select count(*) from `CLUSTER_SLOW_QUERY` where time > now() group by digest").Check(testkit.Rows()) +} diff --git a/kv/kv.go b/kv/kv.go index 6e4060cf91ca4..137064a1d4fc0 100644 --- a/kv/kv.go +++ b/kv/kv.go @@ -226,12 +226,16 @@ const ( TiKV StoreType = iota // TiFlash means the type of a store is TiFlash. TiFlash + // TiDB means the type of a store is TiDB. + TiDB ) // Name returns the name of store type. func (t StoreType) Name() string { if t == TiFlash { return "tiflash" + } else if t == TiDB { + return "tidb" } return "tikv" } @@ -356,8 +360,8 @@ type Iterator interface { Close() } -// SplitableStore is the kv store which supports split regions. -type SplitableStore interface { +// SplittableStore is the kv store which supports split regions. +type SplittableStore interface { SplitRegions(ctx context.Context, splitKey [][]byte, scatter bool) (regionID []uint64, err error) WaitScatterRegionFinish(regionID uint64, backOff int) error CheckRegionInScattering(regionID uint64) (bool, error) diff --git a/meta/autoid/autoid.go b/meta/autoid/autoid.go index 0b65c84595e13..5e7084c322aca 100755 --- a/meta/autoid/autoid.go +++ b/meta/autoid/autoid.go @@ -17,7 +17,6 @@ import ( "context" "math" "sync" - "sync/atomic" "time" "github.com/cznic/mathutil" @@ -31,6 +30,19 @@ import ( "go.uber.org/zap" ) +// Attention: +// For reading cluster TiDB memory tables, the system schema/table should be same. +// Once the system schema/table id been allocated, it can't be changed any more. +// Change the system schema/table id may have the compatibility problem. +const ( + // SystemSchemaIDFlag is the system schema/table id flag, uses the highest bit position as system schema ID flag, it's exports for test. + SystemSchemaIDFlag = 1 << 62 + // InformationSchemaDBID is the information_schema schema id, it's exports for test. + InformationSchemaDBID int64 = SystemSchemaIDFlag | 1 + // PerformanceSchemaDBID is the performance_schema schema id, it's exports for test. + PerformanceSchemaDBID int64 = SystemSchemaIDFlag | 10000 +) + const ( minStep = 30000 maxStep = 2000000 @@ -250,16 +262,9 @@ func NewAllocator(store kv.Storage, dbID int64, isUnsigned bool) Allocator { } } -//codeInvalidTableID is the code of autoid error. +// codeInvalidTableID is the code of autoid error. const codeInvalidTableID terror.ErrCode = 1 -var localSchemaID = int64(math.MaxInt64) - -// GenLocalSchemaID generates a local schema ID. -func GenLocalSchemaID() int64 { - return atomic.AddInt64(&localSchemaID, -1) -} - // Alloc implements autoid.Allocator Alloc interface. func (alloc *allocator) Alloc(tableID int64, n uint64) (int64, int64, error) { if tableID == 0 { diff --git a/planner/cascades/enforcer_rules_test.go b/planner/cascades/enforcer_rules_test.go index 6aa337dc5ed11..94cc96bd31991 100644 --- a/planner/cascades/enforcer_rules_test.go +++ b/planner/cascades/enforcer_rules_test.go @@ -22,7 +22,7 @@ import ( func (s *testCascadesSuite) TestGetEnforcerRules(c *C) { prop := &property.PhysicalProperty{} - group := memo.NewGroupWithSchema(nil, nil) + group := memo.NewGroupWithSchema(nil, expression.NewSchema()) enforcers := GetEnforcerRules(group, prop) c.Assert(enforcers, IsNil) col := &expression.Column{} @@ -37,7 +37,7 @@ func (s *testCascadesSuite) TestGetEnforcerRules(c *C) { func (s *testCascadesSuite) TestNewProperties(c *C) { prop := &property.PhysicalProperty{} col := &expression.Column{} - group := memo.NewGroupWithSchema(nil, nil) + group := memo.NewGroupWithSchema(nil, expression.NewSchema()) prop.Items = append(prop.Items, property.Item{Col: col}) enforcers := GetEnforcerRules(group, prop) orderEnforcer, _ := enforcers[0].(*OrderEnforcer) diff --git a/planner/cascades/implementation_rules.go b/planner/cascades/implementation_rules.go index 0af8b67bc8b72..b54e12f2f60f8 100644 --- a/planner/cascades/implementation_rules.go +++ b/planner/cascades/implementation_rules.go @@ -42,8 +42,11 @@ var defaultImplementationMap = map[memo.Operand][]ImplementationRule{ memo.OperandTableScan: { &ImplTableScan{}, }, - memo.OperandTableGather: { - &ImplTableGather{}, + memo.OperandIndexScan: { + &ImplIndexScan{}, + }, + memo.OperandTiKVSingleGather: { + &ImplTiKVSingleReadGather{}, }, memo.OperandShow: { &ImplShow{}, @@ -68,6 +71,9 @@ var defaultImplementationMap = map[memo.Operand][]ImplementationRule{ &ImplHashJoinBuildLeft{}, &ImplHashJoinBuildRight{}, }, + memo.OperandUnionAll: { + &ImplUnionAll{}, + }, } // ImplTableDual implements LogicalTableDual as PhysicalTableDual. @@ -117,21 +123,26 @@ func (r *ImplProjection) OnImplement(expr *memo.GroupExpr, reqProp *property.Phy return impl.NewProjectionImpl(proj), nil } -// ImplTableGather implements TableGather as PhysicalTableReader. -type ImplTableGather struct { +// ImplTiKVSingleReadGather implements TiKVSingleGather +// as PhysicalTableReader or PhysicalIndexReader. +type ImplTiKVSingleReadGather struct { } // Match implements ImplementationRule Match interface. -func (r *ImplTableGather) Match(expr *memo.GroupExpr, prop *property.PhysicalProperty) (matched bool) { +func (r *ImplTiKVSingleReadGather) Match(expr *memo.GroupExpr, prop *property.PhysicalProperty) (matched bool) { return true } // OnImplement implements ImplementationRule OnImplement interface. -func (r *ImplTableGather) OnImplement(expr *memo.GroupExpr, reqProp *property.PhysicalProperty) (memo.Implementation, error) { +func (r *ImplTiKVSingleReadGather) OnImplement(expr *memo.GroupExpr, reqProp *property.PhysicalProperty) (memo.Implementation, error) { logicProp := expr.Group.Prop - tg := expr.ExprNode.(*plannercore.TableGather) - reader := tg.GetPhysicalReader(logicProp.Schema, logicProp.Stats.ScaleByExpectCnt(reqProp.ExpectedCnt), reqProp) - return impl.NewTableReaderImpl(reader, tg.Source.TblColHists), nil + sg := expr.ExprNode.(*plannercore.TiKVSingleGather) + if sg.IsIndexGather { + reader := sg.GetPhysicalIndexReader(logicProp.Schema, logicProp.Stats.ScaleByExpectCnt(reqProp.ExpectedCnt), reqProp) + return impl.NewIndexReaderImpl(reader, sg.Source.TblColHists), nil + } + reader := sg.GetPhysicalTableReader(logicProp.Schema, logicProp.Stats.ScaleByExpectCnt(reqProp.ExpectedCnt), reqProp) + return impl.NewTableReaderImpl(reader, sg.Source.TblColHists), nil } // ImplTableScan implements TableScan as PhysicalTableScan. @@ -140,14 +151,14 @@ type ImplTableScan struct { // Match implements ImplementationRule Match interface. func (r *ImplTableScan) Match(expr *memo.GroupExpr, prop *property.PhysicalProperty) (matched bool) { - ts := expr.ExprNode.(*plannercore.TableScan) + ts := expr.ExprNode.(*plannercore.LogicalTableScan) return prop.IsEmpty() || (len(prop.Items) == 1 && ts.Handle != nil && prop.Items[0].Col.Equal(nil, ts.Handle)) } // OnImplement implements ImplementationRule OnImplement interface. func (r *ImplTableScan) OnImplement(expr *memo.GroupExpr, reqProp *property.PhysicalProperty) (memo.Implementation, error) { logicProp := expr.Group.Prop - logicalScan := expr.ExprNode.(*plannercore.TableScan) + logicalScan := expr.ExprNode.(*plannercore.LogicalTableScan) ts := logicalScan.GetPhysicalScan(logicProp.Schema, logicProp.Stats.ScaleByExpectCnt(reqProp.ExpectedCnt)) if !reqProp.IsEmpty() { ts.KeepOrder = true @@ -157,6 +168,29 @@ func (r *ImplTableScan) OnImplement(expr *memo.GroupExpr, reqProp *property.Phys return impl.NewTableScanImpl(ts, tblCols, tblColHists), nil } +// ImplIndexScan implements IndexScan as PhysicalIndexScan. +type ImplIndexScan struct { +} + +// Match implements ImplementationRule Match interface. +func (r *ImplIndexScan) Match(expr *memo.GroupExpr, prop *property.PhysicalProperty) (matched bool) { + is := expr.ExprNode.(*plannercore.LogicalIndexScan) + return is.MatchIndexProp(prop) +} + +// OnImplement implements ImplementationRule OnImplement interface. +func (r *ImplIndexScan) OnImplement(expr *memo.GroupExpr, reqProp *property.PhysicalProperty) (memo.Implementation, error) { + logicalScan := expr.ExprNode.(*plannercore.LogicalIndexScan) + is := logicalScan.GetPhysicalIndexScan(expr.Group.Prop.Schema, expr.Group.Prop.Stats.ScaleByExpectCnt(reqProp.ExpectedCnt)) + if !reqProp.IsEmpty() { + is.KeepOrder = true + if reqProp.Items[0].Desc { + is.Desc = true + } + } + return impl.NewIndexScanImpl(is, logicalScan.Source.TblColHists), nil +} + // ImplShow is the implementation rule which implements LogicalShow to // PhysicalShow. type ImplShow struct { @@ -407,3 +441,29 @@ func (r *ImplHashJoinBuildRight) OnImplement(expr *memo.GroupExpr, reqProp *prop return nil, nil } } + +// ImplUnionAll implements LogicalUnionAll to PhysicalUnionAll. +type ImplUnionAll struct { +} + +// Match implements ImplementationRule Match interface. +func (r *ImplUnionAll) Match(expr *memo.GroupExpr, prop *property.PhysicalProperty) (matched bool) { + return prop.IsEmpty() +} + +// OnImplement implements ImplementationRule OnImplement interface. +func (r *ImplUnionAll) OnImplement(expr *memo.GroupExpr, reqProp *property.PhysicalProperty) (memo.Implementation, error) { + logicalUnion := expr.ExprNode.(*plannercore.LogicalUnionAll) + chReqProps := make([]*property.PhysicalProperty, len(expr.Children)) + for i := range expr.Children { + chReqProps[i] = &property.PhysicalProperty{ExpectedCnt: reqProp.ExpectedCnt} + } + physicalUnion := plannercore.PhysicalUnionAll{}.Init( + logicalUnion.SCtx(), + expr.Group.Prop.Stats.ScaleByExpectCnt(reqProp.ExpectedCnt), + logicalUnion.SelectBlockOffset(), + chReqProps..., + ) + physicalUnion.SetSchema(expr.Group.Prop.Schema) + return impl.NewUnionAllImpl(physicalUnion), nil +} diff --git a/planner/cascades/integration_test.go b/planner/cascades/integration_test.go index 564db6ef20da9..8e29d97544001 100644 --- a/planner/cascades/integration_test.go +++ b/planner/cascades/integration_test.go @@ -88,6 +88,30 @@ func (s *testIntegrationSuite) TestPKIsHandleRangeScan(c *C) { } } +func (s *testIntegrationSuite) TestIndexScan(c *C) { + tk := testkit.NewTestKitWithInit(c, s.store) + tk.MustExec("drop table if exists t") + tk.MustExec("create table t(a int primary key, b int, c int, d int, index idx_b(b), index idx_c_b(c, b))") + tk.MustExec("insert into t values(1,2,3,100),(4,5,6,200),(7,8,9,300)") + tk.MustExec("set session tidb_enable_cascades_planner = 1") + var input []string + var output []struct { + SQL string + Plan []string + Result []string + } + s.testData.GetTestCases(c, &input, &output) + for i, sql := range input { + s.testData.OnRecord(func() { + output[i].SQL = sql + output[i].Plan = s.testData.ConvertRowsToStrings(tk.MustQuery("explain " + sql).Rows()) + output[i].Result = s.testData.ConvertRowsToStrings(tk.MustQuery(sql).Rows()) + }) + tk.MustQuery("explain " + sql).Check(testkit.Rows(output[i].Plan...)) + tk.MustQuery(sql).Check(testkit.Rows(output[i].Result...)) + } +} + func (s *testIntegrationSuite) TestBasicShow(c *C) { tk := testkit.NewTestKitWithInit(c, s.store) tk.MustExec("drop table if exists t") diff --git a/planner/cascades/optimize.go b/planner/cascades/optimize.go index f4a318b2efb0c..c4ddff045b230 100644 --- a/planner/cascades/optimize.go +++ b/planner/cascades/optimize.go @@ -107,7 +107,7 @@ func (opt *Optimizer) FindBestPlan(sctx sessionctx.Context, logical plannercore. if err != nil { return nil, 0, err } - rootGroup := convert2Group(logical) + rootGroup := memo.Convert2Group(logical) err = opt.onPhaseExploration(sctx, rootGroup) if err != nil { return nil, 0, err @@ -120,32 +120,11 @@ func (opt *Optimizer) FindBestPlan(sctx sessionctx.Context, logical plannercore. return p, cost, err } -// convert2GroupExpr converts a logical plan to a GroupExpr. -func convert2GroupExpr(node plannercore.LogicalPlan) *memo.GroupExpr { - e := memo.NewGroupExpr(node) - e.Children = make([]*memo.Group, 0, len(node.Children())) - for _, child := range node.Children() { - childGroup := convert2Group(child) - e.Children = append(e.Children, childGroup) - } - return e -} - -// convert2Group converts a logical plan to a Group. -func convert2Group(node plannercore.LogicalPlan) *memo.Group { - e := convert2GroupExpr(node) - g := memo.NewGroupWithSchema(e, node.Schema()) - // Stats property for `Group` would be computed after exploration phase. - return g -} - func (opt *Optimizer) onPhasePreprocessing(sctx sessionctx.Context, plan plannercore.LogicalPlan) (plannercore.LogicalPlan, error) { err := plan.PruneColumns(plan.Schema().Columns) if err != nil { return nil, err } - // TODO: Build key info when convert LogicalPlan to GroupExpr. - plan.BuildKeyInfo() return plan, nil } diff --git a/planner/cascades/optimize_test.go b/planner/cascades/optimize_test.go index 9bc08fdf82418..73d5d6ae33365 100644 --- a/planner/cascades/optimize_test.go +++ b/planner/cascades/optimize_test.go @@ -23,6 +23,7 @@ import ( "github.com/pingcap/parser/model" "github.com/pingcap/tidb/infoschema" plannercore "github.com/pingcap/tidb/planner/core" + "github.com/pingcap/tidb/planner/memo" "github.com/pingcap/tidb/planner/property" "github.com/pingcap/tidb/sessionctx" "github.com/pingcap/tidb/util/testleak" @@ -61,7 +62,7 @@ func (s *testCascadesSuite) TestImplGroupZeroCost(c *C) { c.Assert(err, IsNil) logic, ok := p.(plannercore.LogicalPlan) c.Assert(ok, IsTrue) - rootGroup := convert2Group(logic) + rootGroup := memo.Convert2Group(logic) prop := &property.PhysicalProperty{ ExpectedCnt: math.MaxFloat64, } @@ -77,7 +78,7 @@ func (s *testCascadesSuite) TestInitGroupSchema(c *C) { c.Assert(err, IsNil) logic, ok := p.(plannercore.LogicalPlan) c.Assert(ok, IsTrue) - g := convert2Group(logic) + g := memo.Convert2Group(logic) c.Assert(g, NotNil) c.Assert(g.Prop, NotNil) c.Assert(g.Prop.Schema.Len(), Equals, 1) @@ -91,7 +92,7 @@ func (s *testCascadesSuite) TestFillGroupStats(c *C) { c.Assert(err, IsNil) logic, ok := p.(plannercore.LogicalPlan) c.Assert(ok, IsTrue) - rootGroup := convert2Group(logic) + rootGroup := memo.Convert2Group(logic) err = s.optimizer.fillGroupStats(rootGroup) c.Assert(err, IsNil) c.Assert(rootGroup.Prop.Stats, NotNil) diff --git a/planner/cascades/stringer_test.go b/planner/cascades/stringer_test.go index da5e2ec62a562..ae961832d605c 100644 --- a/planner/cascades/stringer_test.go +++ b/planner/cascades/stringer_test.go @@ -53,7 +53,7 @@ func (s *testStringerSuite) TearDownSuite(c *C) { func (s *testStringerSuite) TestGroupStringer(c *C) { s.optimizer.ResetTransformationRules(map[memo.Operand][]Transformation{ memo.OperandSelection: { - NewRulePushSelDownTableGather(), + NewRulePushSelDownTiKVSingleGather(), NewRulePushSelDownTableScan(), }, memo.OperandDataSource: { @@ -78,9 +78,10 @@ func (s *testStringerSuite) TestGroupStringer(c *C) { c.Assert(ok, IsTrue) logic, err = s.optimizer.onPhasePreprocessing(s.sctx, logic) c.Assert(err, IsNil) - group := convert2Group(logic) + group := memo.Convert2Group(logic) err = s.optimizer.onPhaseExploration(s.sctx, group) c.Assert(err, IsNil) + group.BuildKeyInfo() s.testData.OnRecord(func() { output[i].SQL = sql output[i].Result = ToString(group) diff --git a/planner/cascades/testdata/integration_suite_in.json b/planner/cascades/testdata/integration_suite_in.json index 95191bf9a231e..e4d537155c0c1 100644 --- a/planner/cascades/testdata/integration_suite_in.json +++ b/planner/cascades/testdata/integration_suite_in.json @@ -37,7 +37,17 @@ "select a from t limit 2", "select a from t limit 1 offset 2", "select b from t order by b limit 3", - "select a from t order by a limit 1 offset 2" + "select a from t order by a limit 1 offset 2", + "select * from ((select a as aa from t t1) union all (select b as aa from t t2)) as t3 order by aa" + ] + }, + { + "name": "TestIndexScan", + "cases": [ + "select b from t", + "select a from t order by b", + "select c from t", + "select a from t order by c" ] }, { diff --git a/planner/cascades/testdata/integration_suite_out.json b/planner/cascades/testdata/integration_suite_out.json index 44d97bc7460d6..e9bcfcc5bdf8f 100644 --- a/planner/cascades/testdata/integration_suite_out.json +++ b/planner/cascades/testdata/integration_suite_out.json @@ -38,10 +38,9 @@ { "SQL": "select a from t where a * 3 + 1 > 9 and a < 5", "Plan": [ - "Projection_9 4.00 root test.t.a", - "└─TableReader_10 4.00 root data:Selection_11", - " └─Selection_11 4.00 cop[tikv] gt(plus(mul(test.t.a, 3), 1), 9)", - " └─TableScan_12 5.00 cop[tikv] table:t, range:[-inf,5), keep order:false, stats:pseudo" + "TableReader_9 4.00 root data:Selection_10", + "└─Selection_10 4.00 cop[tikv] gt(plus(mul(test.t.a, 3), 1), 9)", + " └─TableScan_11 5.00 cop[tikv] table:t, range:[-inf,5), keep order:false, stats:pseudo" ], "Result": [ "3" @@ -70,9 +69,8 @@ { "SQL": "select a from t order by a", "Plan": [ - "Projection_7 10000.00 root test.t.a", - "└─TableReader_8 10000.00 root data:TableScan_9", - " └─TableScan_9 10000.00 cop[tikv] table:t, range:[-inf,+inf], keep order:true, stats:pseudo" + "TableReader_7 10000.00 root data:TableScan_8", + "└─TableScan_8 10000.00 cop[tikv] table:t, range:[-inf,+inf], keep order:true, stats:pseudo" ], "Result": [ "1", @@ -84,10 +82,9 @@ { "SQL": "select b from t order by b", "Plan": [ - "Projection_7 10000.00 root test.t.b", - "└─Sort_12 10000.00 root test.t.b:asc", - " └─TableReader_10 10000.00 root data:TableScan_11", - " └─TableScan_11 10000.00 cop[tikv] table:t, range:[-inf,+inf], keep order:false, stats:pseudo" + "Sort_11 10000.00 root test.t.b:asc", + "└─TableReader_9 10000.00 root data:TableScan_10", + " └─TableScan_10 10000.00 cop[tikv] table:t, range:[-inf,+inf], keep order:false, stats:pseudo" ], "Result": [ "11", @@ -140,11 +137,10 @@ { "SQL": "select sum(a) from t", "Plan": [ - "Projection_8 1.00 root Column#3", - "└─HashAgg_13 1.00 root funcs:sum(Column#4)->Column#3", - " └─TableReader_14 1.00 root data:HashAgg_15", - " └─HashAgg_15 1.00 cop[tikv] funcs:sum(test.t.a)->Column#4", - " └─TableScan_11 10000.00 cop[tikv] table:t, range:[-inf,+inf], keep order:false, stats:pseudo" + "HashAgg_12 1.00 root funcs:sum(Column#4)->Column#3", + "└─TableReader_13 1.00 root data:HashAgg_14", + " └─HashAgg_14 1.00 cop[tikv] funcs:sum(test.t.a)->Column#4", + " └─TableScan_10 10000.00 cop[tikv] table:t, range:[-inf,+inf], keep order:false, stats:pseudo" ], "Result": [ "10" @@ -153,11 +149,10 @@ { "SQL": "select max(a), min(b) from t", "Plan": [ - "Projection_8 1.00 root Column#3, Column#4", - "└─HashAgg_12 1.00 root funcs:max(Column#5)->Column#3, funcs:min(Column#6)->Column#4", - " └─TableReader_13 1.00 root data:HashAgg_14", - " └─HashAgg_14 1.00 cop[tikv] funcs:max(test.t.a)->Column#5, funcs:min(test.t.b)->Column#6", - " └─TableScan_11 10000.00 cop[tikv] table:t, range:[-inf,+inf], keep order:false, stats:pseudo" + "HashAgg_11 1.00 root funcs:max(Column#5)->Column#3, funcs:min(Column#6)->Column#4", + "└─TableReader_12 1.00 root data:HashAgg_13", + " └─HashAgg_13 1.00 cop[tikv] funcs:max(test.t.a)->Column#5, funcs:min(test.t.b)->Column#6", + " └─TableScan_10 10000.00 cop[tikv] table:t, range:[-inf,+inf], keep order:false, stats:pseudo" ], "Result": [ "4 11" @@ -218,11 +213,10 @@ { "SQL": "select max(a+b) from t", "Plan": [ - "Projection_8 1.00 root Column#3", - "└─HashAgg_13 1.00 root funcs:max(Column#4)->Column#3", - " └─TableReader_14 1.00 root data:HashAgg_15", - " └─HashAgg_15 1.00 cop[tikv] funcs:max(plus(test.t.a, test.t.b))->Column#4", - " └─TableScan_11 10000.00 cop[tikv] table:t, range:[-inf,+inf], keep order:false, stats:pseudo" + "HashAgg_12 1.00 root funcs:max(Column#4)->Column#3", + "└─TableReader_13 1.00 root data:HashAgg_14", + " └─HashAgg_14 1.00 cop[tikv] funcs:max(plus(test.t.a, test.t.b))->Column#4", + " └─TableScan_10 10000.00 cop[tikv] table:t, range:[-inf,+inf], keep order:false, stats:pseudo" ], "Result": [ "48" @@ -232,12 +226,11 @@ "SQL": "select sum(a) from t group by a, a+b order by a", "Plan": [ "Projection_10 8000.00 root Column#3", - "└─Projection_12 8000.00 root Column#3, test.t.a", - " └─Sort_21 8000.00 root test.t.a:asc", - " └─HashAgg_14 8000.00 root group by:Column#10, Column#11, funcs:sum(Column#8)->Column#3, funcs:firstrow(Column#9)->test.t.a", - " └─Projection_17 10000.00 root cast(test.t.a)->Column#8, test.t.a, test.t.a, plus(test.t.a, test.t.b)->Column#11", - " └─TableReader_15 10000.00 root data:TableScan_16", - " └─TableScan_16 10000.00 cop[tikv] table:t, range:[-inf,+inf], keep order:false, stats:pseudo" + "└─Sort_20 8000.00 root test.t.a:asc", + " └─HashAgg_13 8000.00 root group by:Column#10, Column#11, funcs:sum(Column#8)->Column#3, funcs:firstrow(Column#9)->test.t.a", + " └─Projection_16 10000.00 root cast(test.t.a)->Column#8, test.t.a, test.t.a, plus(test.t.a, test.t.b)->Column#11", + " └─TableReader_14 10000.00 root data:TableScan_15", + " └─TableScan_15 10000.00 cop[tikv] table:t, range:[-inf,+inf], keep order:false, stats:pseudo" ], "Result": [ "1", @@ -255,9 +248,8 @@ "SQL": "select a from t limit 2", "Plan": [ "Limit_6 2.00 root offset:0, count:2", - "└─Projection_7 2.00 root test.t.a", - " └─TableReader_8 2.00 root data:TableScan_9", - " └─TableScan_9 2.00 cop[tikv] table:t, range:[-inf,+inf], keep order:false, stats:pseudo" + "└─TableReader_7 2.00 root data:TableScan_8", + " └─TableScan_8 2.00 cop[tikv] table:t, range:[-inf,+inf], keep order:false, stats:pseudo" ], "Result": [ "1", @@ -268,9 +260,8 @@ "SQL": "select a from t limit 1 offset 2", "Plan": [ "Limit_6 1.00 root offset:2, count:1", - "└─Projection_7 3.00 root test.t.a", - " └─TableReader_8 3.00 root data:TableScan_9", - " └─TableScan_9 3.00 cop[tikv] table:t, range:[-inf,+inf], keep order:false, stats:pseudo" + "└─TableReader_7 3.00 root data:TableScan_8", + " └─TableScan_8 3.00 cop[tikv] table:t, range:[-inf,+inf], keep order:false, stats:pseudo" ], "Result": [ "3" @@ -280,9 +271,8 @@ "SQL": "select b from t order by b limit 3", "Plan": [ "TopN_8 3.00 root test.t.b:asc, offset:0, count:3", - "└─Projection_10 10000.00 root test.t.b", - " └─TableReader_11 10000.00 root data:TableScan_12", - " └─TableScan_12 10000.00 cop[tikv] table:t, range:[-inf,+inf], keep order:false, stats:pseudo" + "└─TableReader_10 10000.00 root data:TableScan_11", + " └─TableScan_11 10000.00 cop[tikv] table:t, range:[-inf,+inf], keep order:false, stats:pseudo" ], "Result": [ "11", @@ -294,13 +284,90 @@ "SQL": "select a from t order by a limit 1 offset 2", "Plan": [ "Limit_9 1.00 root offset:2, count:1", - "└─Projection_13 3.00 root test.t.a", - " └─TableReader_14 3.00 root data:TableScan_15", - " └─TableScan_15 3.00 cop[tikv] table:t, range:[-inf,+inf], keep order:true, stats:pseudo" + "└─TableReader_12 3.00 root data:TableScan_13", + " └─TableScan_13 3.00 cop[tikv] table:t, range:[-inf,+inf], keep order:true, stats:pseudo" ], "Result": [ "3" ] + }, + { + "SQL": "select * from ((select a as aa from t t1) union all (select b as aa from t t2)) as t3 order by aa", + "Plan": [ + "Sort_23 20000.00 root Column#5:asc", + "└─Union_16 20000.00 root ", + " ├─Projection_17 10000.00 root test.t.a", + " │ └─TableReader_18 10000.00 root data:TableScan_19", + " │ └─TableScan_19 10000.00 cop[tikv] table:t1, range:[-inf,+inf], keep order:false, stats:pseudo", + " └─Projection_20 10000.00 root test.t.b", + " └─TableReader_21 10000.00 root data:TableScan_22", + " └─TableScan_22 10000.00 cop[tikv] table:t2, range:[-inf,+inf], keep order:false, stats:pseudo" + ], + "Result": [ + "1", + "2", + "3", + "4", + "11", + "22", + "33", + "44" + ] + } + ] + }, + { + "Name": "TestIndexScan", + "Cases": [ + { + "SQL": "select b from t", + "Plan": [ + "IndexReader_11 10000.00 root index:IndexScan_12", + "└─IndexScan_12 10000.00 cop[tikv] table:t, index:b, range:[NULL,+inf], keep order:false, stats:pseudo" + ], + "Result": [ + "2", + "5", + "8" + ] + }, + { + "SQL": "select a from t order by b", + "Plan": [ + "Projection_11 10000.00 root test.t.a", + "└─IndexReader_14 10000.00 root index:IndexScan_15", + " └─IndexScan_15 10000.00 cop[tikv] table:t, index:b, range:[NULL,+inf], keep order:true, stats:pseudo" + ], + "Result": [ + "1", + "4", + "7" + ] + }, + { + "SQL": "select c from t", + "Plan": [ + "IndexReader_9 10000.00 root index:IndexScan_10", + "└─IndexScan_10 10000.00 cop[tikv] table:t, index:c, b, range:[NULL,+inf], keep order:false, stats:pseudo" + ], + "Result": [ + "3", + "6", + "9" + ] + }, + { + "SQL": "select a from t order by c", + "Plan": [ + "Projection_9 10000.00 root test.t.a", + "└─IndexReader_12 10000.00 root index:IndexScan_13", + " └─IndexScan_13 10000.00 cop[tikv] table:t, index:c, b, range:[NULL,+inf], keep order:true, stats:pseudo" + ], + "Result": [ + "1", + "4", + "7" + ] } ] }, diff --git a/planner/cascades/testdata/stringer_suite_out.json b/planner/cascades/testdata/stringer_suite_out.json index 1e6da38cd12e9..9daac9bdc98d5 100644 --- a/planner/cascades/testdata/stringer_suite_out.json +++ b/planner/cascades/testdata/stringer_suite_out.json @@ -8,7 +8,7 @@ "Group#0 Schema:[test.t.b]", " Projection_3 input:[Group#1], test.t.b", "Group#1 Schema:[test.t.a,test.t.b], UniqueKey:[test.t.a]", - " TableGather_5 input:[Group#2]", + " TiKVSingleGather_5 input:[Group#2], table:t", "Group#2 Schema:[test.t.a,test.t.b], UniqueKey:[test.t.a]", " Selection_8 input:[Group#3], lt(test.t.b, 1)", "Group#3 Schema:[test.t.a,test.t.b], UniqueKey:[test.t.a]", @@ -34,13 +34,31 @@ "Group#2 Schema:[test.t.a,test.t.b,test.t.a]", " Join_3 input:[Group#3,Group#4], inner join", "Group#3 Schema:[test.t.a,test.t.b], UniqueKey:[test.t.a]", - " TableGather_7 input:[Group#5]", + " TiKVSingleGather_7 input:[Group#5], table:t1", "Group#5 Schema:[test.t.a,test.t.b], UniqueKey:[test.t.a]", " TableScan_6 table:t1, pk col:test.t.a", "Group#4 Schema:[test.t.a], UniqueKey:[test.t.a]", - " TableGather_9 input:[Group#6]", + " TiKVSingleGather_9 input:[Group#6], table:t2", + " TiKVSingleGather_21 input:[Group#7], table:t2, index:e_d_c_str_prefix", + " TiKVSingleGather_19 input:[Group#8], table:t2, index:c_d_e_str", + " TiKVSingleGather_17 input:[Group#9], table:t2, index:f_g", + " TiKVSingleGather_15 input:[Group#10], table:t2, index:g", + " TiKVSingleGather_13 input:[Group#11], table:t2, index:f", + " TiKVSingleGather_11 input:[Group#12], table:t2, index:c_d_e", "Group#6 Schema:[test.t.a], UniqueKey:[test.t.a]", - " TableScan_8 table:t2, pk col:test.t.a" + " TableScan_8 table:t2, pk col:test.t.a", + "Group#7 Schema:[test.t.a]", + " IndexScan_20 table:t2, index:e_str, d_str, c_str", + "Group#8 Schema:[test.t.a]", + " IndexScan_18 table:t2, index:c_str, d_str, e_str", + "Group#9 Schema:[test.t.a]", + " IndexScan_16 table:t2, index:f, g", + "Group#10 Schema:[test.t.a]", + " IndexScan_14 table:t2, index:g", + "Group#11 Schema:[test.t.a]", + " IndexScan_12 table:t2, index:f", + "Group#12 Schema:[test.t.a]", + " IndexScan_10 table:t2, index:c, d, e" ] }, { @@ -51,7 +69,7 @@ "Group#1 Schema:[Column#13,Column#14]", " Aggregation_3 input:[Group#2], group by:test.t.d, funcs:max(test.t.b), sum(test.t.a)", "Group#2 Schema:[test.t.a,test.t.b,test.t.c,test.t.d], UniqueKey:[test.t.a]", - " TableGather_6 input:[Group#3]", + " TiKVSingleGather_6 input:[Group#3], table:t", "Group#3 Schema:[test.t.a,test.t.b,test.t.c,test.t.d], UniqueKey:[test.t.a]", " Selection_7 input:[Group#4], gt(test.t.c, 10)", "Group#4 Schema:[test.t.a,test.t.b,test.t.c,test.t.d], UniqueKey:[test.t.a]", @@ -66,7 +84,7 @@ "Group#1 Schema:[Column#13]", " Aggregation_3 input:[Group#2], funcs:avg(test.t.b)", "Group#2 Schema:[test.t.b]", - " TableGather_6 input:[Group#3]", + " TiKVSingleGather_6 input:[Group#3], table:t", "Group#3 Schema:[test.t.b]", " Selection_7 input:[Group#4], gt(test.t.b, 10)", "Group#4 Schema:[test.t.b]", @@ -83,15 +101,45 @@ "Group#2 Schema:[test.t.a,test.t.b], UniqueKey:[test.t.a]", " Apply_6 input:[Group#3,Group#4], semi join", "Group#3 Schema:[test.t.a,test.t.b], UniqueKey:[test.t.a]", - " TableGather_9 input:[Group#5]", + " TiKVSingleGather_9 input:[Group#5], table:t1", "Group#5 Schema:[test.t.a,test.t.b], UniqueKey:[test.t.a]", " TableScan_8 table:t1, pk col:test.t.a", "Group#4 Schema:[test.t.a], UniqueKey:[test.t.a]", - " TableGather_11 input:[Group#6]", + " TiKVSingleGather_11 input:[Group#6], table:t2", + " TiKVSingleGather_13 input:[Group#7], table:t2, index:c_d_e", + " TiKVSingleGather_15 input:[Group#8], table:t2, index:f", + " TiKVSingleGather_17 input:[Group#9], table:t2, index:g", + " TiKVSingleGather_19 input:[Group#10], table:t2, index:f_g", + " TiKVSingleGather_21 input:[Group#11], table:t2, index:c_d_e_str", + " TiKVSingleGather_23 input:[Group#12], table:t2, index:e_d_c_str_prefix", "Group#6 Schema:[test.t.a], UniqueKey:[test.t.a]", - " Selection_12 input:[Group#7], lt(test.t.a, test.t.b)", - "Group#7 Schema:[test.t.a], UniqueKey:[test.t.a]", - " TableScan_10 table:t2, pk col:test.t.a" + " Selection_24 input:[Group#13], lt(test.t.a, test.t.b)", + "Group#13 Schema:[test.t.a], UniqueKey:[test.t.a]", + " TableScan_10 table:t2, pk col:test.t.a", + "Group#7 Schema:[test.t.a]", + " Selection_30 input:[Group#14], lt(test.t.a, test.t.b)", + "Group#14 Schema:[test.t.a]", + " IndexScan_12 table:t2, index:c, d, e", + "Group#8 Schema:[test.t.a]", + " Selection_29 input:[Group#15], lt(test.t.a, test.t.b)", + "Group#15 Schema:[test.t.a]", + " IndexScan_14 table:t2, index:f", + "Group#9 Schema:[test.t.a]", + " Selection_28 input:[Group#16], lt(test.t.a, test.t.b)", + "Group#16 Schema:[test.t.a]", + " IndexScan_16 table:t2, index:g", + "Group#10 Schema:[test.t.a]", + " Selection_27 input:[Group#17], lt(test.t.a, test.t.b)", + "Group#17 Schema:[test.t.a]", + " IndexScan_18 table:t2, index:f, g", + "Group#11 Schema:[test.t.a]", + " Selection_26 input:[Group#18], lt(test.t.a, test.t.b)", + "Group#18 Schema:[test.t.a]", + " IndexScan_20 table:t2, index:c_str, d_str, e_str", + "Group#12 Schema:[test.t.a]", + " Selection_25 input:[Group#19], lt(test.t.a, test.t.b)", + "Group#19 Schema:[test.t.a]", + " IndexScan_22 table:t2, index:e_str, d_str, c_str" ] }, { @@ -106,13 +154,31 @@ "Group#3 Schema:[test.t.a,test.t.b]", " Join_3 input:[Group#4,Group#5], inner join", "Group#4 Schema:[test.t.a], UniqueKey:[test.t.a]", - " TableGather_8 input:[Group#6]", + " TiKVSingleGather_8 input:[Group#6], table:t1", + " TiKVSingleGather_20 input:[Group#7], table:t1, index:e_d_c_str_prefix", + " TiKVSingleGather_18 input:[Group#8], table:t1, index:c_d_e_str", + " TiKVSingleGather_16 input:[Group#9], table:t1, index:f_g", + " TiKVSingleGather_14 input:[Group#10], table:t1, index:g", + " TiKVSingleGather_12 input:[Group#11], table:t1, index:f", + " TiKVSingleGather_10 input:[Group#12], table:t1, index:c_d_e", "Group#6 Schema:[test.t.a], UniqueKey:[test.t.a]", " TableScan_7 table:t1, pk col:test.t.a", + "Group#7 Schema:[test.t.a]", + " IndexScan_19 table:t1, index:e_str, d_str, c_str", + "Group#8 Schema:[test.t.a]", + " IndexScan_17 table:t1, index:c_str, d_str, e_str", + "Group#9 Schema:[test.t.a]", + " IndexScan_15 table:t1, index:f, g", + "Group#10 Schema:[test.t.a]", + " IndexScan_13 table:t1, index:g", + "Group#11 Schema:[test.t.a]", + " IndexScan_11 table:t1, index:f", + "Group#12 Schema:[test.t.a]", + " IndexScan_9 table:t1, index:c, d, e", "Group#5 Schema:[test.t.b]", - " TableGather_10 input:[Group#7]", - "Group#7 Schema:[test.t.b]", - " TableScan_9 table:t2" + " TiKVSingleGather_22 input:[Group#13], table:t2", + "Group#13 Schema:[test.t.b]", + " TableScan_21 table:t2" ] }, { @@ -123,9 +189,39 @@ "Group#1 Schema:[test.t.a], UniqueKey:[test.t.a]", " Projection_3 input:[Group#2], test.t.a", "Group#2 Schema:[test.t.a], UniqueKey:[test.t.a]", - " TableGather_6 input:[Group#3]", + " TiKVSingleGather_6 input:[Group#3], table:t", + " TiKVSingleGather_8 input:[Group#4], table:t, index:c_d_e", + " TiKVSingleGather_10 input:[Group#5], table:t, index:f", + " TiKVSingleGather_12 input:[Group#6], table:t, index:g", + " TiKVSingleGather_14 input:[Group#7], table:t, index:f_g", + " TiKVSingleGather_16 input:[Group#8], table:t, index:c_d_e_str", + " TiKVSingleGather_18 input:[Group#9], table:t, index:e_d_c_str_prefix", "Group#3 Schema:[test.t.a], UniqueKey:[test.t.a]", - " TableScan_8 table:t, pk col:test.t.a, cond:[gt(test.t.a, 10)]" + " TableScan_26 table:t, pk col:test.t.a, cond:[gt(test.t.a, 10)]", + "Group#4 Schema:[test.t.a]", + " Selection_25 input:[Group#10], gt(test.t.a, 10)", + "Group#10 Schema:[test.t.a]", + " IndexScan_7 table:t, index:c, d, e", + "Group#5 Schema:[test.t.a]", + " Selection_24 input:[Group#11], gt(test.t.a, 10)", + "Group#11 Schema:[test.t.a]", + " IndexScan_9 table:t, index:f", + "Group#6 Schema:[test.t.a]", + " Selection_23 input:[Group#12], gt(test.t.a, 10)", + "Group#12 Schema:[test.t.a]", + " IndexScan_11 table:t, index:g", + "Group#7 Schema:[test.t.a]", + " Selection_22 input:[Group#13], gt(test.t.a, 10)", + "Group#13 Schema:[test.t.a]", + " IndexScan_13 table:t, index:f, g", + "Group#8 Schema:[test.t.a]", + " Selection_21 input:[Group#14], gt(test.t.a, 10)", + "Group#14 Schema:[test.t.a]", + " IndexScan_15 table:t, index:c_str, d_str, e_str", + "Group#9 Schema:[test.t.a]", + " Selection_20 input:[Group#15], gt(test.t.a, 10)", + "Group#15 Schema:[test.t.a]", + " IndexScan_17 table:t, index:e_str, d_str, c_str" ] }, { @@ -138,7 +234,7 @@ "Group#2 Schema:[test.t.a,test.t.c], UniqueKey:[test.t.a]", " Projection_3 input:[Group#3], test.t.a, test.t.c", "Group#3 Schema:[test.t.a,test.t.b,test.t.c], UniqueKey:[test.t.a]", - " TableGather_7 input:[Group#4]", + " TiKVSingleGather_7 input:[Group#4], table:t", "Group#4 Schema:[test.t.a,test.t.b,test.t.c], UniqueKey:[test.t.a]", " Selection_8 input:[Group#5], gt(test.t.b, 1)", "Group#5 Schema:[test.t.a,test.t.b,test.t.c], UniqueKey:[test.t.a]", @@ -157,19 +253,37 @@ "Group#4 Schema:[Column#26]", " Aggregation_5 input:[Group#5], funcs:avg(test.t.a)", "Group#5 Schema:[test.t.a], UniqueKey:[test.t.a]", - " TableGather_11 input:[Group#6]", + " TiKVSingleGather_11 input:[Group#6], table:t", + " TiKVSingleGather_23 input:[Group#7], table:t, index:e_d_c_str_prefix", + " TiKVSingleGather_21 input:[Group#8], table:t, index:c_d_e_str", + " TiKVSingleGather_19 input:[Group#9], table:t, index:f_g", + " TiKVSingleGather_17 input:[Group#10], table:t, index:g", + " TiKVSingleGather_15 input:[Group#11], table:t, index:f", + " TiKVSingleGather_13 input:[Group#12], table:t, index:c_d_e", "Group#6 Schema:[test.t.a], UniqueKey:[test.t.a]", " TableScan_10 table:t, pk col:test.t.a", + "Group#7 Schema:[test.t.a]", + " IndexScan_22 table:t, index:e_str, d_str, c_str", + "Group#8 Schema:[test.t.a]", + " IndexScan_20 table:t, index:c_str, d_str, e_str", + "Group#9 Schema:[test.t.a]", + " IndexScan_18 table:t, index:f, g", + "Group#10 Schema:[test.t.a]", + " IndexScan_16 table:t, index:g", + "Group#11 Schema:[test.t.a]", + " IndexScan_14 table:t, index:f", + "Group#12 Schema:[test.t.a]", + " IndexScan_12 table:t, index:c, d, e", "Group#2 Schema:[Column#27]", - " Projection_9 input:[Group#7], Column#13", - "Group#7 Schema:[Column#13]", - " Projection_3 input:[Group#8], Column#13", - "Group#8 Schema:[Column#13]", - " Aggregation_2 input:[Group#9], funcs:avg(test.t.b)", - "Group#9 Schema:[test.t.b]", - " TableGather_13 input:[Group#10]", - "Group#10 Schema:[test.t.b]", - " TableScan_12 table:t" + " Projection_9 input:[Group#13], Column#13", + "Group#13 Schema:[Column#13]", + " Projection_3 input:[Group#14], Column#13", + "Group#14 Schema:[Column#13]", + " Aggregation_2 input:[Group#15], funcs:avg(test.t.b)", + "Group#15 Schema:[test.t.b]", + " TiKVSingleGather_25 input:[Group#16], table:t", + "Group#16 Schema:[test.t.b]", + " TableScan_24 table:t" ] } ] diff --git a/planner/cascades/testdata/transformation_rules_suite_in.json b/planner/cascades/testdata/transformation_rules_suite_in.json index 57a9ccab2b128..f24323ba30f94 100644 --- a/planner/cascades/testdata/transformation_rules_suite_in.json +++ b/planner/cascades/testdata/transformation_rules_suite_in.json @@ -5,8 +5,8 @@ "select a, b from (select a, b from t as t1 order by a) as t2 where t2.b > 10", "select a, b from (select a, b from t as t1 order by a) as t2 where t2.a > 10", "select a, b from (select a, b, a+b as a_b from t as t1) as t2 where a_b > 10 and b = 1", - "select a, @i:=@i+1 as ii from (select a, @i:=0 from t as t1) as t2 where @i < 10", - "select a, @i:=@i+1 as ii from (select a, @i:=0 from t as t1) as t2 where @i < 10 and a > 10", + "select b, @i:=@i+1 as ii from (select b, @i:=0 from t as t1) as t2 where @i < 10", + "select b, @i:=@i+1 as ii from (select a, b, @i:=0 from t as t1) as t2 where @i < 10 and a > 10", "select a, max(b) from t group by a having a > 1", "select a, avg(b) from t group by a having a > 1 and max(b) > 10", "select t1.a, t1.b, t2.b from t t1, t t2 where t1.a = t2.a and t2.b = t1.b and t1.a > 10 and t2.b > 10 and t1.a > t2.b", @@ -16,9 +16,9 @@ { "name": "TestAggPushDownGather", "cases": [ - "select a, sum(b) from t group by a", - "select a, sum(b) from t group by a+c, a", - "select a, sum(b) from t group by sin(a)+sin(c)" + "select b, sum(a) from t group by b", + "select b, sum(a) from t group by c, b", + "select b, sum(a) from t group by sin(b)+sin(c), b" ] }, { @@ -27,5 +27,13 @@ "select b from t order by a limit 2", "select a+b from t order by a limit 1 offset 2" ] + }, + { + "name": "TestProjectionElimination", + "cases": [ + "select a, b from (select a, b from t) as t2", + "select a+b from (select a, b from t) as t2", + "select a from (select floor(a) as a from t) as t2" + ] } ] diff --git a/planner/cascades/testdata/transformation_rules_suite_out.json b/planner/cascades/testdata/transformation_rules_suite_out.json index 1f21428831d40..a90296c2b81ce 100644 --- a/planner/cascades/testdata/transformation_rules_suite_out.json +++ b/planner/cascades/testdata/transformation_rules_suite_out.json @@ -5,107 +5,107 @@ { "SQL": "select a, b from (select a, b from t as t1 order by a) as t2 where t2.b > 10", "Result": [ - "Group#0 Schema:[test.t.a,test.t.b], UniqueKey:[test.t.a]", + "Group#0 Schema:[test.t.a,test.t.b]", " Projection_5 input:[Group#1], test.t.a, test.t.b", - "Group#1 Schema:[test.t.a,test.t.b], UniqueKey:[test.t.a]", + "Group#1 Schema:[test.t.a,test.t.b]", " Sort_3 input:[Group#2], test.t.a:asc", - "Group#2 Schema:[test.t.a,test.t.b], UniqueKey:[test.t.a]", + "Group#2 Schema:[test.t.a,test.t.b]", " Projection_2 input:[Group#3], test.t.a, test.t.b", - "Group#3 Schema:[test.t.a,test.t.b], UniqueKey:[test.t.a]", - " TableGather_7 input:[Group#4]", - "Group#4 Schema:[test.t.a,test.t.b], UniqueKey:[test.t.a]", + "Group#3 Schema:[test.t.a,test.t.b]", + " TiKVSingleGather_7 input:[Group#4], table:t1", + "Group#4 Schema:[test.t.a,test.t.b]", " Selection_9 input:[Group#5], gt(test.t.b, 10)", - "Group#5 Schema:[test.t.a,test.t.b], UniqueKey:[test.t.a]", + "Group#5 Schema:[test.t.a,test.t.b]", " TableScan_6 table:t1, pk col:test.t.a" ] }, { "SQL": "select a, b from (select a, b from t as t1 order by a) as t2 where t2.a > 10", "Result": [ - "Group#0 Schema:[test.t.a,test.t.b], UniqueKey:[test.t.a]", + "Group#0 Schema:[test.t.a,test.t.b]", " Projection_5 input:[Group#1], test.t.a, test.t.b", - "Group#1 Schema:[test.t.a,test.t.b], UniqueKey:[test.t.a]", + "Group#1 Schema:[test.t.a,test.t.b]", " Sort_3 input:[Group#2], test.t.a:asc", - "Group#2 Schema:[test.t.a,test.t.b], UniqueKey:[test.t.a]", + "Group#2 Schema:[test.t.a,test.t.b]", " Projection_2 input:[Group#3], test.t.a, test.t.b", - "Group#3 Schema:[test.t.a,test.t.b], UniqueKey:[test.t.a]", - " TableGather_7 input:[Group#4]", - "Group#4 Schema:[test.t.a,test.t.b], UniqueKey:[test.t.a]", + "Group#3 Schema:[test.t.a,test.t.b]", + " TiKVSingleGather_7 input:[Group#4], table:t1", + "Group#4 Schema:[test.t.a,test.t.b]", " TableScan_10 table:t1, pk col:test.t.a, cond:[gt(test.t.a, 10)]" ] }, { "SQL": "select a, b from (select a, b, a+b as a_b from t as t1) as t2 where a_b > 10 and b = 1", "Result": [ - "Group#0 Schema:[test.t.a,test.t.b], UniqueKey:[test.t.a]", + "Group#0 Schema:[test.t.a,test.t.b]", " Projection_4 input:[Group#1], test.t.a, test.t.b", - "Group#1 Schema:[test.t.a,test.t.b,Column#13], UniqueKey:[test.t.a]", + "Group#1 Schema:[test.t.a,test.t.b,Column#13]", " Projection_2 input:[Group#2], test.t.a, test.t.b, plus(test.t.a, test.t.b)->Column#13", - "Group#2 Schema:[test.t.a,test.t.b], UniqueKey:[test.t.a]", - " TableGather_6 input:[Group#3]", - "Group#3 Schema:[test.t.a,test.t.b], UniqueKey:[test.t.a]", + "Group#2 Schema:[test.t.a,test.t.b]", + " TiKVSingleGather_6 input:[Group#3], table:t1", + "Group#3 Schema:[test.t.a,test.t.b]", " Selection_8 input:[Group#4], eq(test.t.b, 1), gt(plus(test.t.a, test.t.b), 10)", - "Group#4 Schema:[test.t.a,test.t.b], UniqueKey:[test.t.a]", + "Group#4 Schema:[test.t.a,test.t.b]", " TableScan_5 table:t1, pk col:test.t.a" ] }, { - "SQL": "select a, @i:=@i+1 as ii from (select a, @i:=0 from t as t1) as t2 where @i < 10", + "SQL": "select b, @i:=@i+1 as ii from (select b, @i:=0 from t as t1) as t2 where @i < 10", "Result": [ - "Group#0 Schema:[test.t.a,Column#14], UniqueKey:[test.t.a]", - " Projection_4 input:[Group#1], test.t.a, setvar(i, cast(plus(cast(getvar(i)), 1)))->Column#14", - "Group#1 Schema:[test.t.a,Column#13], UniqueKey:[test.t.a]", + "Group#0 Schema:[test.t.b,Column#14]", + " Projection_4 input:[Group#1], test.t.b, setvar(i, cast(plus(cast(getvar(i)), 1)))->Column#14", + "Group#1 Schema:[test.t.b,Column#13]", " Selection_3 input:[Group#2], lt(cast(getvar(\"i\")), 10)", - "Group#2 Schema:[test.t.a,Column#13], UniqueKey:[test.t.a]", - " Projection_2 input:[Group#3], test.t.a, setvar(i, 0)->Column#13", - "Group#3 Schema:[test.t.a], UniqueKey:[test.t.a]", - " TableGather_6 input:[Group#4]", - "Group#4 Schema:[test.t.a], UniqueKey:[test.t.a]", - " TableScan_5 table:t1, pk col:test.t.a" + "Group#2 Schema:[test.t.b,Column#13]", + " Projection_2 input:[Group#3], test.t.b, setvar(i, 0)->Column#13", + "Group#3 Schema:[test.t.b]", + " TiKVSingleGather_6 input:[Group#4], table:t1", + "Group#4 Schema:[test.t.b]", + " TableScan_5 table:t1" ] }, { - "SQL": "select a, @i:=@i+1 as ii from (select a, @i:=0 from t as t1) as t2 where @i < 10 and a > 10", + "SQL": "select b, @i:=@i+1 as ii from (select a, b, @i:=0 from t as t1) as t2 where @i < 10 and a > 10", "Result": [ - "Group#0 Schema:[test.t.a,Column#14], UniqueKey:[test.t.a]", - " Projection_4 input:[Group#1], test.t.a, setvar(i, cast(plus(cast(getvar(i)), 1)))->Column#14", - "Group#1 Schema:[test.t.a,Column#13], UniqueKey:[test.t.a]", + "Group#0 Schema:[test.t.b,Column#14]", + " Projection_4 input:[Group#1], test.t.b, setvar(i, cast(plus(cast(getvar(i)), 1)))->Column#14", + "Group#1 Schema:[test.t.a,test.t.b,Column#13]", " Selection_8 input:[Group#2], lt(cast(getvar(\"i\")), 10)", - "Group#2 Schema:[test.t.a,Column#13], UniqueKey:[test.t.a]", - " Projection_2 input:[Group#3], test.t.a, setvar(i, 0)->Column#13", - "Group#3 Schema:[test.t.a], UniqueKey:[test.t.a]", - " TableGather_6 input:[Group#4]", - "Group#4 Schema:[test.t.a], UniqueKey:[test.t.a]", + "Group#2 Schema:[test.t.a,test.t.b,Column#13]", + " Projection_2 input:[Group#3], test.t.a, test.t.b, setvar(i, 0)->Column#13", + "Group#3 Schema:[test.t.a,test.t.b]", + " TiKVSingleGather_6 input:[Group#4], table:t1", + "Group#4 Schema:[test.t.a,test.t.b]", " TableScan_10 table:t1, pk col:test.t.a, cond:[gt(test.t.a, 10)]" ] }, { "SQL": "select a, max(b) from t group by a having a > 1", "Result": [ - "Group#0 Schema:[test.t.a,Column#13], UniqueKey:[test.t.a,test.t.a]", + "Group#0 Schema:[test.t.a,Column#13]", " Projection_3 input:[Group#1], test.t.a, Column#13", - "Group#1 Schema:[Column#13,test.t.a], UniqueKey:[test.t.a,test.t.a]", + "Group#1 Schema:[Column#13,test.t.a]", " Aggregation_2 input:[Group#2], group by:test.t.a, funcs:max(test.t.b), firstrow(test.t.a)", - "Group#2 Schema:[test.t.a,test.t.b], UniqueKey:[test.t.a]", - " TableGather_6 input:[Group#3]", - "Group#3 Schema:[test.t.a,test.t.b], UniqueKey:[test.t.a]", + "Group#2 Schema:[test.t.a,test.t.b]", + " TiKVSingleGather_6 input:[Group#3], table:t", + "Group#3 Schema:[test.t.a,test.t.b]", " TableScan_10 table:t, pk col:test.t.a, cond:[gt(test.t.a, 1)]" ] }, { "SQL": "select a, avg(b) from t group by a having a > 1 and max(b) > 10", "Result": [ - "Group#0 Schema:[test.t.a,Column#16], UniqueKey:[test.t.a,test.t.a]", + "Group#0 Schema:[test.t.a,Column#16]", " Projection_5 input:[Group#1], test.t.a, Column#13", - "Group#1 Schema:[test.t.a,Column#13,Column#14], UniqueKey:[test.t.a,test.t.a]", + "Group#1 Schema:[test.t.a,Column#13,Column#14]", " Projection_3 input:[Group#2], test.t.a, Column#13, Column#14", - "Group#2 Schema:[Column#13,Column#14,test.t.a], UniqueKey:[test.t.a,test.t.a]", + "Group#2 Schema:[Column#13,Column#14,test.t.a]", " Selection_10 input:[Group#3], gt(Column#14, 10)", - "Group#3 Schema:[Column#13,Column#14,test.t.a], UniqueKey:[test.t.a,test.t.a]", + "Group#3 Schema:[Column#13,Column#14,test.t.a]", " Aggregation_2 input:[Group#4], group by:test.t.a, funcs:avg(test.t.b), max(test.t.b), firstrow(test.t.a)", - "Group#4 Schema:[test.t.a,test.t.b], UniqueKey:[test.t.a]", - " TableGather_7 input:[Group#5]", - "Group#5 Schema:[test.t.a,test.t.b], UniqueKey:[test.t.a]", + "Group#4 Schema:[test.t.a,test.t.b]", + " TiKVSingleGather_7 input:[Group#5], table:t", + "Group#5 Schema:[test.t.a,test.t.b]", " TableScan_12 table:t, pk col:test.t.a, cond:[gt(test.t.a, 1)]" ] }, @@ -116,17 +116,17 @@ " Projection_5 input:[Group#1], test.t.a, test.t.b, test.t.b", "Group#1 Schema:[test.t.a,test.t.b,test.t.a,test.t.b]", " Join_3 input:[Group#2,Group#3], inner join, equal:[eq(test.t.a, test.t.a) eq(test.t.b, test.t.b)], other cond:gt(test.t.a, test.t.b)", - "Group#2 Schema:[test.t.a,test.t.b], UniqueKey:[test.t.a]", - " TableGather_7 input:[Group#4]", - "Group#4 Schema:[test.t.a,test.t.b], UniqueKey:[test.t.a]", + "Group#2 Schema:[test.t.a,test.t.b]", + " TiKVSingleGather_7 input:[Group#4], table:t1", + "Group#4 Schema:[test.t.a,test.t.b]", " Selection_14 input:[Group#5], gt(test.t.a, test.t.b), gt(test.t.b, 10)", - "Group#5 Schema:[test.t.a,test.t.b], UniqueKey:[test.t.a]", + "Group#5 Schema:[test.t.a,test.t.b]", " TableScan_13 table:t1, pk col:test.t.a, cond:[gt(test.t.a, 10)]", - "Group#3 Schema:[test.t.a,test.t.b], UniqueKey:[test.t.a]", - " TableGather_9 input:[Group#6]", - "Group#6 Schema:[test.t.a,test.t.b], UniqueKey:[test.t.a]", + "Group#3 Schema:[test.t.a,test.t.b]", + " TiKVSingleGather_9 input:[Group#6], table:t2", + "Group#6 Schema:[test.t.a,test.t.b]", " Selection_17 input:[Group#7], gt(test.t.a, test.t.b), gt(test.t.b, 10)", - "Group#7 Schema:[test.t.a,test.t.b], UniqueKey:[test.t.a]", + "Group#7 Schema:[test.t.a,test.t.b]", " TableScan_16 table:t2, pk col:test.t.a, cond:[gt(test.t.a, 10)]" ] }, @@ -136,7 +136,7 @@ "Group#0 Schema:[test.t.a,test.t.b]", " Projection_5 input:[Group#1], test.t.a, test.t.b", "Group#1 Schema:[test.t.a,test.t.b,test.t.a]", - " TableDual_10 rowcount:0" + " TableDual_22 rowcount:0" ] } ] @@ -145,50 +145,50 @@ "Name": "TestAggPushDownGather", "Cases": [ { - "SQL": "select a, sum(b) from t group by a", + "SQL": "select b, sum(a) from t group by b", "Result": [ - "Group#0 Schema:[test.t.a,Column#13], UniqueKey:[test.t.a,test.t.a]", - " Projection_3 input:[Group#1], test.t.a, Column#13", - "Group#1 Schema:[Column#13,test.t.a], UniqueKey:[test.t.a,test.t.a]", - " Aggregation_2 input:[Group#2], group by:test.t.a, funcs:sum(test.t.b), firstrow(test.t.a)", - " Aggregation_7 input:[Group#3], group by:test.t.a, funcs:sum(Column#14), firstrow(test.t.a)", + "Group#0 Schema:[test.t.b,Column#13], UniqueKey:[test.t.b]", + " Projection_3 input:[Group#1], test.t.b, Column#13", + "Group#1 Schema:[Column#13,test.t.b], UniqueKey:[test.t.b]", + " Aggregation_2 input:[Group#2], group by:test.t.b, funcs:sum(test.t.a), firstrow(test.t.b)", + " Aggregation_7 input:[Group#3], group by:test.t.b, funcs:sum(Column#14), firstrow(test.t.b)", "Group#2 Schema:[test.t.a,test.t.b], UniqueKey:[test.t.a]", - " TableGather_5 input:[Group#4]", + " TiKVSingleGather_5 input:[Group#4], table:t", "Group#4 Schema:[test.t.a,test.t.b], UniqueKey:[test.t.a]", " TableScan_4 table:t, pk col:test.t.a", - "Group#3 Schema:[Column#14,test.t.a]", - " TableGather_5 input:[Group#5]", - "Group#5 Schema:[Column#14,test.t.a]", - " Aggregation_6 input:[Group#4], group by:test.t.a, funcs:sum(test.t.b)" + "Group#3 Schema:[Column#14,test.t.b]", + " TiKVSingleGather_5 input:[Group#5], table:t", + "Group#5 Schema:[Column#14,test.t.b]", + " Aggregation_6 input:[Group#4], group by:test.t.b, funcs:sum(test.t.a)" ] }, { - "SQL": "select a, sum(b) from t group by a+c, a", + "SQL": "select b, sum(a) from t group by c, b", "Result": [ - "Group#0 Schema:[test.t.a,Column#13], UniqueKey:[test.t.a]", - " Projection_3 input:[Group#1], test.t.a, Column#13", - "Group#1 Schema:[Column#13,test.t.a], UniqueKey:[test.t.a]", - " Aggregation_2 input:[Group#2], group by:plus(test.t.a, test.t.c), test.t.a, funcs:sum(test.t.b), firstrow(test.t.a)", - " Aggregation_7 input:[Group#3], group by:Column#16, test.t.a, funcs:sum(Column#14), firstrow(test.t.a)", + "Group#0 Schema:[test.t.b,Column#13]", + " Projection_3 input:[Group#1], test.t.b, Column#13", + "Group#1 Schema:[Column#13,test.t.b]", + " Aggregation_2 input:[Group#2], group by:test.t.b, test.t.c, funcs:sum(test.t.a), firstrow(test.t.b)", + " Aggregation_7 input:[Group#3], group by:test.t.b, test.t.c, funcs:sum(Column#14), firstrow(test.t.b)", "Group#2 Schema:[test.t.a,test.t.b,test.t.c], UniqueKey:[test.t.a]", - " TableGather_5 input:[Group#4]", + " TiKVSingleGather_5 input:[Group#4], table:t", "Group#4 Schema:[test.t.a,test.t.b,test.t.c], UniqueKey:[test.t.a]", " TableScan_4 table:t, pk col:test.t.a", - "Group#3 Schema:[Column#14,Column#16,test.t.a]", - " TableGather_5 input:[Group#5]", - "Group#5 Schema:[Column#14,Column#16,test.t.a]", - " Aggregation_6 input:[Group#4], group by:plus(test.t.a, test.t.c), test.t.a, funcs:sum(test.t.b)" + "Group#3 Schema:[Column#14,test.t.c,test.t.b]", + " TiKVSingleGather_5 input:[Group#5], table:t", + "Group#5 Schema:[Column#14,test.t.c,test.t.b]", + " Aggregation_6 input:[Group#4], group by:test.t.b, test.t.c, funcs:sum(test.t.a)" ] }, { - "SQL": "select a, sum(b) from t group by sin(a)+sin(c)", + "SQL": "select b, sum(a) from t group by sin(b)+sin(c), b", "Result": [ - "Group#0 Schema:[test.t.a,Column#13], UniqueKey:[test.t.a]", - " Projection_3 input:[Group#1], test.t.a, Column#13", - "Group#1 Schema:[Column#13,test.t.a], UniqueKey:[test.t.a]", - " Aggregation_2 input:[Group#2], group by:plus(sin(cast(test.t.a)), sin(cast(test.t.c))), funcs:sum(test.t.b), firstrow(test.t.a)", + "Group#0 Schema:[test.t.b,Column#13]", + " Projection_3 input:[Group#1], test.t.b, Column#13", + "Group#1 Schema:[Column#13,test.t.b]", + " Aggregation_2 input:[Group#2], group by:plus(sin(cast(test.t.b)), sin(cast(test.t.c))), test.t.b, funcs:sum(test.t.a), firstrow(test.t.b)", "Group#2 Schema:[test.t.a,test.t.b,test.t.c], UniqueKey:[test.t.a]", - " TableGather_5 input:[Group#3]", + " TiKVSingleGather_5 input:[Group#3], table:t", "Group#3 Schema:[test.t.a,test.t.b,test.t.c], UniqueKey:[test.t.a]", " TableScan_4 table:t, pk col:test.t.a" ] @@ -203,13 +203,13 @@ "Result": [ "Group#0 Schema:[test.t.b]", " Projection_5 input:[Group#1], test.t.b", - "Group#1 Schema:[test.t.b,test.t.a], UniqueKey:[test.t.a]", + "Group#1 Schema:[test.t.b,test.t.a]", " TopN_8 input:[Group#2], test.t.a:asc, offset:0, count:2", - "Group#2 Schema:[test.t.b,test.t.a], UniqueKey:[test.t.a]", + "Group#2 Schema:[test.t.b,test.t.a]", " Projection_2 input:[Group#3], test.t.b, test.t.a", - "Group#3 Schema:[test.t.a,test.t.b], UniqueKey:[test.t.a]", - " TableGather_7 input:[Group#4]", - "Group#4 Schema:[test.t.a,test.t.b], UniqueKey:[test.t.a]", + "Group#3 Schema:[test.t.a,test.t.b]", + " TiKVSingleGather_7 input:[Group#4], table:t", + "Group#4 Schema:[test.t.a,test.t.b]", " TableScan_6 table:t, pk col:test.t.a" ] }, @@ -218,16 +218,46 @@ "Result": [ "Group#0 Schema:[Column#14]", " Projection_5 input:[Group#1], Column#13", - "Group#1 Schema:[Column#13,test.t.a], UniqueKey:[test.t.a]", + "Group#1 Schema:[Column#13,test.t.a]", " TopN_8 input:[Group#2], test.t.a:asc, offset:2, count:1", - "Group#2 Schema:[Column#13,test.t.a], UniqueKey:[test.t.a]", + "Group#2 Schema:[Column#13,test.t.a]", " Projection_2 input:[Group#3], plus(test.t.a, test.t.b)->Column#13, test.t.a", - "Group#3 Schema:[test.t.a,test.t.b], UniqueKey:[test.t.a]", - " TableGather_7 input:[Group#4]", - "Group#4 Schema:[test.t.a,test.t.b], UniqueKey:[test.t.a]", + "Group#3 Schema:[test.t.a,test.t.b]", + " TiKVSingleGather_7 input:[Group#4], table:t", + "Group#4 Schema:[test.t.a,test.t.b]", " TableScan_6 table:t, pk col:test.t.a" ] } ] + }, + { + "Name": "TestProjectionElimination", + "Cases": [ + { + "SQL": "select a, b from (select a, b from t) as t2", + "Result": [ + "Group#0 Schema:[test.t.a,test.t.b]", + " TableScan_1 table:t" + ] + }, + { + "SQL": "select a+b from (select a, b from t) as t2", + "Result": [ + "Group#0 Schema:[Column#13]", + " Projection_3 input:[Group#1], plus(test.t.a, test.t.b)->Column#13", + "Group#1 Schema:[test.t.a,test.t.b]", + " TableScan_1 table:t" + ] + }, + { + "SQL": "select a from (select floor(a) as a from t) as t2", + "Result": [ + "Group#0 Schema:[Column#13]", + " Projection_2 input:[Group#1], floor(test.t.a)->Column#13", + "Group#1 Schema:[test.t.a]", + " TableScan_1 table:t" + ] + } + ] } ] diff --git a/planner/cascades/transformation_rules.go b/planner/cascades/transformation_rules.go index c6fd460ccd61e..80447ee24102b 100644 --- a/planner/cascades/transformation_rules.go +++ b/planner/cascades/transformation_rules.go @@ -45,7 +45,7 @@ type Transformation interface { var defaultTransformationMap = map[memo.Operand][]Transformation{ memo.OperandSelection: { NewRulePushSelDownTableScan(), - NewRulePushSelDownTableGather(), + NewRulePushSelDownTiKVSingleGather(), NewRulePushSelDownSort(), NewRulePushSelDownProjection(), NewRulePushSelDownAggregation(), @@ -60,6 +60,9 @@ var defaultTransformationMap = map[memo.Operand][]Transformation{ memo.OperandLimit: { NewRuleTransformLimitToTopN(), }, + memo.OperandProjection: { + NewRuleEliminateProjection(), + }, } type baseRule struct { @@ -101,7 +104,7 @@ func NewRulePushSelDownTableScan() Transformation { // the key ranges of the `ts` operator. func (r *PushSelDownTableScan) OnTransform(old *memo.ExprIter) (newExprs []*memo.GroupExpr, eraseOld bool, eraseAll bool, err error) { sel := old.GetExpr().ExprNode.(*plannercore.LogicalSelection) - ts := old.Children[0].GetExpr().ExprNode.(*plannercore.TableScan) + ts := old.Children[0].GetExpr().ExprNode.(*plannercore.LogicalTableScan) if ts.Handle == nil { return nil, false, false, nil } @@ -109,7 +112,7 @@ func (r *PushSelDownTableScan) OnTransform(old *memo.ExprIter) (newExprs []*memo if accesses == nil { return nil, false, false, nil } - newTblScan := plannercore.TableScan{ + newTblScan := plannercore.LogicalTableScan{ Source: ts.Source, Handle: ts.Handle, AccessConds: ts.AccessConds.Shallow(), @@ -129,19 +132,19 @@ func (r *PushSelDownTableScan) OnTransform(old *memo.ExprIter) (newExprs []*memo return []*memo.GroupExpr{selExpr}, true, false, nil } -// PushSelDownTableGather pushes the selection down to child of TableGather. -type PushSelDownTableGather struct { +// PushSelDownTiKVSingleGather pushes the selection down to child of TiKVSingleGather. +type PushSelDownTiKVSingleGather struct { baseRule } -// NewRulePushSelDownTableGather creates a new Transformation PushSelDownTableGather. -// The pattern of this rule is `Selection -> TableGather -> Any`. -func NewRulePushSelDownTableGather() Transformation { +// NewRulePushSelDownTiKVSingleGather creates a new Transformation PushSelDownTiKVSingleGather. +// The pattern of this rule is `Selection -> TiKVSingleGather -> Any`. +func NewRulePushSelDownTiKVSingleGather() Transformation { any := memo.NewPattern(memo.OperandAny, memo.EngineTiKVOrTiFlash) - tg := memo.BuildPattern(memo.OperandTableGather, memo.EngineTiDBOnly, any) + tg := memo.BuildPattern(memo.OperandTiKVSingleGather, memo.EngineTiDBOnly, any) p := memo.BuildPattern(memo.OperandSelection, memo.EngineTiDBOnly, tg) - rule := &PushSelDownTableGather{} + rule := &PushSelDownTiKVSingleGather{} rule.pattern = p return rule } @@ -151,12 +154,12 @@ func NewRulePushSelDownTableGather() Transformation { // It transforms `oldSel -> oldTg -> any` to one of the following new exprs: // 1. `newTg -> pushedSel -> any` // 2. `remainedSel -> newTg -> pushedSel -> any` -func (r *PushSelDownTableGather) OnTransform(old *memo.ExprIter) (newExprs []*memo.GroupExpr, eraseOld bool, eraseAll bool, err error) { +func (r *PushSelDownTiKVSingleGather) OnTransform(old *memo.ExprIter) (newExprs []*memo.GroupExpr, eraseOld bool, eraseAll bool, err error) { sel := old.GetExpr().ExprNode.(*plannercore.LogicalSelection) - tg := old.Children[0].GetExpr().ExprNode.(*plannercore.TableGather) + sg := old.Children[0].GetExpr().ExprNode.(*plannercore.TiKVSingleGather) childGroup := old.Children[0].Children[0].Group var pushed, remained []expression.Expression - sctx := tg.SCtx() + sctx := sg.SCtx() _, pushed, remained = expression.ExpressionsToPB(sctx.GetSessionVars().StmtCtx, sel.Conditions, sctx.GetClient()) if len(pushed) == 0 { return nil, false, false, nil @@ -165,12 +168,12 @@ func (r *PushSelDownTableGather) OnTransform(old *memo.ExprIter) (newExprs []*me pushedSelExpr := memo.NewGroupExpr(pushedSel) pushedSelExpr.Children = append(pushedSelExpr.Children, childGroup) pushedSelGroup := memo.NewGroupWithSchema(pushedSelExpr, childGroup.Prop.Schema).SetEngineType(childGroup.EngineType) - // The field content of TableGather would not be modified currently, so we + // The field content of TiKVSingleGather would not be modified currently, so we // just reference the same tg instead of making a copy of it. // - // TODO: if we save pushed filters later in TableGather, in order to do partition - // pruning or skyline pruning, we need to make a copy of the TableGather here. - tblGatherExpr := memo.NewGroupExpr(tg) + // TODO: if we save pushed filters later in TiKVSingleGather, in order to do partition + // pruning or skyline pruning, we need to make a copy of the TiKVSingleGather here. + tblGatherExpr := memo.NewGroupExpr(sg) tblGatherExpr.Children = append(tblGatherExpr.Children, pushedSelGroup) if len(remained) == 0 { // `oldSel -> oldTg -> any` is transformed to `newTg -> pushedSel -> any`. @@ -202,7 +205,7 @@ func (r *EnumeratePaths) OnTransform(old *memo.ExprIter) (newExprs []*memo.Group ds := old.GetExpr().ExprNode.(*plannercore.DataSource) gathers := ds.Convert2Gathers() for _, gather := range gathers { - expr := convert2GroupExpr(gather) + expr := memo.Convert2GroupExpr(gather) expr.Children[0].SetEngineType(memo.EngineTiKV) newExprs = append(newExprs, expr) } @@ -210,19 +213,19 @@ func (r *EnumeratePaths) OnTransform(old *memo.ExprIter) (newExprs []*memo.Group } // PushAggDownGather splits Aggregation to two stages, final and partial1, -// and pushed the partial Aggregation down to the child of TableGather. +// and pushed the partial Aggregation down to the child of TiKVSingleGather. type PushAggDownGather struct { baseRule } // NewRulePushAggDownGather creates a new Transformation PushAggDownGather. -// The pattern of this rule is: `Aggregation -> TableGather`. +// The pattern of this rule is: `Aggregation -> TiKVSingleGather`. func NewRulePushAggDownGather() Transformation { rule := &PushAggDownGather{} rule.pattern = memo.BuildPattern( memo.OperandAggregation, memo.EngineTiDBOnly, - memo.NewPattern(memo.OperandTableGather, memo.EngineTiDBOnly), + memo.NewPattern(memo.OperandTiKVSingleGather, memo.EngineTiDBOnly), ) return rule } @@ -248,7 +251,7 @@ func (r *PushAggDownGather) Match(expr *memo.ExprIter) bool { func (r *PushAggDownGather) OnTransform(old *memo.ExprIter) (newExprs []*memo.GroupExpr, eraseOld bool, eraseAll bool, err error) { agg := old.GetExpr().ExprNode.(*plannercore.LogicalAggregation) aggSchema := old.GetExpr().Group.Prop.Schema - gather := old.Children[0].GetExpr().ExprNode.(*plannercore.TableGather) + gather := old.Children[0].GetExpr().ExprNode.(*plannercore.TiKVSingleGather) childGroup := old.Children[0].GetExpr().Children[0] // The old Aggregation should stay unchanged for other transformation. // So we build a new LogicalAggregation for the partialAgg. @@ -356,6 +359,7 @@ func NewRulePushSelDownProjection() Transformation { func (r *PushSelDownProjection) OnTransform(old *memo.ExprIter) (newExprs []*memo.GroupExpr, eraseOld bool, eraseAll bool, err error) { sel := old.GetExpr().ExprNode.(*plannercore.LogicalSelection) proj := old.Children[0].GetExpr().ExprNode.(*plannercore.LogicalProjection) + projSchema := old.Children[0].Prop.Schema childGroup := old.Children[0].GetExpr().Children[0] for _, expr := range proj.Exprs { if expression.HasAssignSetVarFunc(expr) { @@ -366,7 +370,7 @@ func (r *PushSelDownProjection) OnTransform(old *memo.ExprIter) (newExprs []*mem canNotBePushed := make([]expression.Expression, 0, len(sel.Conditions)) for _, cond := range sel.Conditions { if !expression.HasGetSetVarFunc(cond) { - canBePushed = append(canBePushed, expression.ColumnSubstitute(cond, proj.Schema(), proj.Exprs)) + canBePushed = append(canBePushed, expression.ColumnSubstitute(cond, projSchema, proj.Exprs)) } else { canNotBePushed = append(canNotBePushed, cond) } @@ -383,7 +387,7 @@ func (r *PushSelDownProjection) OnTransform(old *memo.ExprIter) (newExprs []*mem if len(canNotBePushed) == 0 { return []*memo.GroupExpr{newProjExpr}, true, false, nil } - newProjGroup := memo.NewGroupWithSchema(newProjExpr, proj.Schema()) + newProjGroup := memo.NewGroupWithSchema(newProjExpr, projSchema) newTopSel := plannercore.LogicalSelection{Conditions: canNotBePushed}.Init(sel.SCtx(), sel.SelectBlockOffset()) newTopSelExpr := memo.NewGroupExpr(newTopSel) newTopSelExpr.SetChildren(newProjGroup) @@ -413,6 +417,7 @@ func NewRulePushSelDownAggregation() Transformation { func (r *PushSelDownAggregation) OnTransform(old *memo.ExprIter) (newExprs []*memo.GroupExpr, eraseOld bool, eraseAll bool, err error) { sel := old.GetExpr().ExprNode.(*plannercore.LogicalSelection) agg := old.Children[0].GetExpr().ExprNode.(*plannercore.LogicalAggregation) + aggSchema := old.Children[0].Prop.Schema var pushedExprs []expression.Expression var remainedExprs []expression.Expression exprsOriginal := make([]expression.Expression, 0, len(agg.AggFuncs)) @@ -439,7 +444,7 @@ func (r *PushSelDownAggregation) OnTransform(old *memo.ExprIter) (newExprs []*me } if canPush { // TODO: Don't substitute since they should be the same column. - newCond := expression.ColumnSubstitute(cond, agg.Schema(), exprsOriginal) + newCond := expression.ColumnSubstitute(cond, aggSchema, exprsOriginal) pushedExprs = append(pushedExprs, newCond) } else { remainedExprs = append(remainedExprs, cond) @@ -466,7 +471,7 @@ func (r *PushSelDownAggregation) OnTransform(old *memo.ExprIter) (newExprs []*me return []*memo.GroupExpr{aggGroupExpr}, true, false, nil } - aggGroup := memo.NewGroupWithSchema(aggGroupExpr, agg.Schema()) + aggGroup := memo.NewGroupWithSchema(aggGroupExpr, aggSchema) remainedSel := plannercore.LogicalSelection{Conditions: remainedExprs}.Init(sctx, sel.SelectBlockOffset()) remainedGroupExpr := memo.NewGroupExpr(remainedSel) remainedGroupExpr.SetChildren(aggGroup) @@ -589,3 +594,43 @@ func (r *PushSelDownJoin) OnTransform(old *memo.ExprIter) (newExprs []*memo.Grou newJoinExpr.SetChildren(leftGroup, rightGroup) return []*memo.GroupExpr{newJoinExpr}, true, false, nil } + +// EliminateProjection eliminates the projection. +type EliminateProjection struct { + baseRule +} + +// NewRuleEliminateProjection creates a new Transformation EliminateProjection. +// The pattern of this rule is `Projection -> Any`. +func NewRuleEliminateProjection() Transformation { + rule := &EliminateProjection{} + rule.pattern = memo.BuildPattern( + memo.OperandProjection, + memo.EngineTiDBOnly, + memo.NewPattern(memo.OperandAny, memo.EngineTiDBOnly), + ) + return rule +} + +// OnTransform implements Transformation interface. +// This rule tries to eliminate the projection whose output columns are the same with its child. +func (r *EliminateProjection) OnTransform(old *memo.ExprIter) (newExprs []*memo.GroupExpr, eraseOld bool, eraseAll bool, err error) { + child := old.Children[0] + if child.Group.Prop.Schema.Len() != old.GetExpr().Group.Prop.Schema.Len() { + return nil, false, false, nil + } + + oldCols := old.GetExpr().Group.Prop.Schema.Columns + for i, col := range child.Group.Prop.Schema.Columns { + if !col.Equal(nil, oldCols[i]) { + return nil, false, false, nil + } + } + + // Promote the children group's expression. + finalGroupExprs := make([]*memo.GroupExpr, 0, child.Group.Equivalents.Len()) + for elem := child.Group.Equivalents.Front(); elem != nil; elem = elem.Next() { + finalGroupExprs = append(finalGroupExprs, elem.Value.(*memo.GroupExpr)) + } + return finalGroupExprs, true, false, nil +} diff --git a/planner/cascades/transformation_rules_test.go b/planner/cascades/transformation_rules_test.go index f72f1f9bbefc3..d0ac612e4076a 100644 --- a/planner/cascades/transformation_rules_test.go +++ b/planner/cascades/transformation_rules_test.go @@ -63,7 +63,7 @@ func testGroupToString(input []string, output []struct { c.Assert(ok, IsTrue) logic, err = s.optimizer.onPhasePreprocessing(s.sctx, logic) c.Assert(err, IsNil) - group := convert2Group(logic) + group := memo.Convert2Group(logic) err = s.optimizer.onPhaseExploration(s.sctx, group) c.Assert(err, IsNil) s.testData.OnRecord(func() { @@ -92,14 +92,33 @@ func (s *testTransformationRuleSuite) TestAggPushDownGather(c *C) { Result []string } s.testData.GetTestCases(c, &input, &output) - testGroupToString(input, output, s, c) + for i, sql := range input { + stmt, err := s.ParseOneStmt(sql, "", "") + c.Assert(err, IsNil) + p, _, err := plannercore.BuildLogicalPlan(context.Background(), s.sctx, stmt, s.is) + c.Assert(err, IsNil) + logic, ok := p.(plannercore.LogicalPlan) + c.Assert(ok, IsTrue) + logic, err = s.optimizer.onPhasePreprocessing(s.sctx, logic) + c.Assert(err, IsNil) + group := memo.Convert2Group(logic) + err = s.optimizer.onPhaseExploration(s.sctx, group) + c.Assert(err, IsNil) + // BuildKeyInfo here to test the KeyInfo for partialAgg. + group.BuildKeyInfo() + s.testData.OnRecord(func() { + output[i].SQL = sql + output[i].Result = ToString(group) + }) + c.Assert(ToString(group), DeepEquals, output[i].Result) + } } func (s *testTransformationRuleSuite) TestPredicatePushDown(c *C) { s.optimizer.ResetTransformationRules(map[memo.Operand][]Transformation{ memo.OperandSelection: { NewRulePushSelDownTableScan(), - NewRulePushSelDownTableGather(), + NewRulePushSelDownTiKVSingleGather(), NewRulePushSelDownSort(), NewRulePushSelDownProjection(), NewRulePushSelDownAggregation(), @@ -138,3 +157,21 @@ func (s *testTransformationRuleSuite) TestTopNRules(c *C) { s.testData.GetTestCases(c, &input, &output) testGroupToString(input, output, s, c) } + +func (s *testTransformationRuleSuite) TestProjectionElimination(c *C) { + s.optimizer.ResetTransformationRules(map[memo.Operand][]Transformation{ + memo.OperandProjection: { + NewRuleEliminateProjection(), + }, + }) + defer func() { + s.optimizer.ResetTransformationRules(defaultTransformationMap) + }() + var input []string + var output []struct { + SQL string + Result []string + } + s.testData.GetTestCases(c, &input, &output) + testGroupToString(input, output, s, c) +} diff --git a/planner/core/cbo_test.go b/planner/core/cbo_test.go index bf3f591dd8c63..878eff8d1a8df 100644 --- a/planner/core/cbo_test.go +++ b/planner/core/cbo_test.go @@ -94,7 +94,7 @@ func (s *testAnalyzeSuite) TestExplainAnalyze(c *C) { rs := tk.MustQuery("explain analyze select t1.a, t1.b, sum(t1.c) from t1 join t2 on t1.a = t2.b where t1.a > 1") c.Assert(len(rs.Rows()), Equals, 10) for _, row := range rs.Rows() { - c.Assert(len(row), Equals, 6) + c.Assert(len(row), Equals, 7) execInfo := row[4].(string) c.Assert(strings.Contains(execInfo, "time"), Equals, true) c.Assert(strings.Contains(execInfo, "loops"), Equals, true) @@ -131,6 +131,8 @@ func (s *testAnalyzeSuite) TestCBOWithoutAnalyze(c *C) { " └─Selection_14 5.99 cop[tikv] not(isnull(test.t2.a))", " └─TableScan_13 6.00 cop[tikv] table:t2, range:[-inf,+inf], keep order:false, stats:pseudo", )) + testKit.MustQuery("explain format = 'hint' select * from t1, t2 where t1.a = t2.a").Check(testkit.Rows( + "USE_INDEX(@`sel_1` `test`.`t1` ), USE_INDEX(@`sel_1` `test`.`t2` ), HASH_JOIN(@`sel_1` `test`.`t1`)")) } func (s *testAnalyzeSuite) TestStraightJoin(c *C) { diff --git a/planner/core/common_plans.go b/planner/core/common_plans.go index 3687edab2d0ac..e5299ab8395ec 100644 --- a/planner/core/common_plans.go +++ b/planner/core/common_plans.go @@ -675,9 +675,11 @@ func (e *Explain) prepareSchema() error { case format == ast.ExplainFormatROW && !e.Analyze: fieldNames = []string{"id", "count", "task", "operator info"} case format == ast.ExplainFormatROW && e.Analyze: - fieldNames = []string{"id", "count", "task", "operator info", "execution info", "memory"} + fieldNames = []string{"id", "count", "task", "operator info", "execution info", "memory", "disk"} case format == ast.ExplainFormatDOT: fieldNames = []string{"dot contents"} + case format == ast.ExplainFormatHint: + fieldNames = []string{"hint"} default: return errors.Errorf("explain format '%s' is not supported now", e.Format) } @@ -709,6 +711,8 @@ func (e *Explain) RenderResult() error { } case ast.ExplainFormatDOT: e.prepareDotInfo(e.TargetPlan.(PhysicalPlan)) + case ast.ExplainFormatHint: + e.Rows = append(e.Rows, []string{GenHintsFromPhysicalPlan(e.TargetPlan)}) default: return errors.Errorf("explain format '%s' is not supported now", e.Format) } @@ -739,14 +743,12 @@ func (e *Explain) explainPlanInRowFormat(p Plan, taskType, indent string, isLast case *PhysicalTableReader: var storeType string switch x.StoreType { - case kv.TiKV: - storeType = kv.TiKV.Name() - case kv.TiFlash: - storeType = kv.TiFlash.Name() + case kv.TiKV, kv.TiFlash, kv.TiDB: + // expected do nothing default: - err = errors.Errorf("the store type %v is unknown", x.StoreType) - return + return errors.Errorf("the store type %v is unknown", x.StoreType) } + storeType = x.StoreType.Name() err = e.explainPlanInRowFormat(x.tablePlan, "cop["+storeType+"]", childIndent, true) case *PhysicalIndexReader: err = e.explainPlanInRowFormat(x.indexPlan, "cop[tikv]", childIndent, true) @@ -788,7 +790,6 @@ func (e *Explain) explainPlanInRowFormat(p Plan, taskType, indent string, isLast // operator id, task type, operator info, and the estemated row count. func (e *Explain) prepareOperatorInfo(p Plan, taskType string, indent string, isLastChild bool) { operatorInfo := p.ExplainInfo() - count := "N/A" if si := p.statsInfo(); si != nil { count = strconv.FormatFloat(si.RowCount, 'f', 2, 64) @@ -815,9 +816,16 @@ func (e *Explain) prepareOperatorInfo(p Plan, taskType string, indent string, is } row = append(row, analyzeInfo) - tracker := e.ctx.GetSessionVars().StmtCtx.MemTracker.SearchTracker(p.ExplainID().String()) - if tracker != nil { - row = append(row, tracker.BytesToString(tracker.MaxConsumed())) + memTracker := e.ctx.GetSessionVars().StmtCtx.MemTracker.SearchTracker(p.ExplainID().String()) + if memTracker != nil { + row = append(row, memTracker.BytesToString(memTracker.MaxConsumed())) + } else { + row = append(row, "N/A") + } + + diskTracker := e.ctx.GetSessionVars().StmtCtx.DiskTracker.SearchTracker(p.ExplainID().String()) + if diskTracker != nil { + row = append(row, diskTracker.BytesToString(diskTracker.MaxConsumed())) } else { row = append(row, "N/A") } diff --git a/planner/core/encode.go b/planner/core/encode.go index 4abb3ed9b7cd9..f7e4e665856a5 100644 --- a/planner/core/encode.go +++ b/planner/core/encode.go @@ -15,6 +15,9 @@ package core import ( "bytes" + "crypto/sha256" + "fmt" + "hash" "sync" "github.com/pingcap/tidb/util/plancodec" @@ -32,10 +35,14 @@ type planEncoder struct { } // EncodePlan is used to encodePlan the plan to the plan tree with compressing. -func EncodePlan(p PhysicalPlan) string { +func EncodePlan(p Plan) string { pn := encoderPool.Get().(*planEncoder) defer encoderPool.Put(pn) - return pn.encodePlanTree(p) + selectPlan := getSelectPlan(p) + if selectPlan == nil { + return "" + } + return pn.encodePlanTree(selectPlan) } func (pn *planEncoder) encodePlanTree(p PhysicalPlan) string { @@ -64,5 +71,95 @@ func (pn *planEncoder) encodePlan(p PhysicalPlan, isRoot bool, depth int) { case *PhysicalIndexLookUpReader: pn.encodePlan(copPlan.indexPlan, false, depth) pn.encodePlan(copPlan.tablePlan, false, depth) + case *PhysicalIndexMergeReader: + for _, p := range copPlan.partialPlans { + pn.encodePlan(p, false, depth) + } + if copPlan.tablePlan != nil { + pn.encodePlan(copPlan.tablePlan, false, depth) + } + } +} + +var digesterPool = sync.Pool{ + New: func() interface{} { + return &planDigester{ + hasher: sha256.New(), + } + }, +} + +type planDigester struct { + buf bytes.Buffer + encodedPlans map[int]bool + hasher hash.Hash +} + +// NormalizePlan is used to normalize the plan and generate plan digest. +func NormalizePlan(p Plan) (normalized, digest string) { + selectPlan := getSelectPlan(p) + if selectPlan == nil { + return "", "" + } + d := digesterPool.Get().(*planDigester) + defer digesterPool.Put(d) + d.normalizePlanTree(selectPlan) + normalized = string(d.buf.Bytes()) + d.hasher.Write(d.buf.Bytes()) + d.buf.Reset() + digest = fmt.Sprintf("%x", d.hasher.Sum(nil)) + d.hasher.Reset() + return +} + +func (d *planDigester) normalizePlanTree(p PhysicalPlan) { + d.encodedPlans = make(map[int]bool) + d.buf.Reset() + d.normalizePlan(p, true, 0) +} + +func (d *planDigester) normalizePlan(p PhysicalPlan, isRoot bool, depth int) { + plancodec.NormalizePlanNode(depth, p.ID(), p.TP(), isRoot, p.ExplainNormalizedInfo(), &d.buf) + d.encodedPlans[p.ID()] = true + + depth++ + for _, child := range p.Children() { + if d.encodedPlans[child.ID()] { + continue + } + d.normalizePlan(child.(PhysicalPlan), isRoot, depth) + } + switch x := p.(type) { + case *PhysicalTableReader: + d.normalizePlan(x.tablePlan, false, depth) + case *PhysicalIndexReader: + d.normalizePlan(x.indexPlan, false, depth) + case *PhysicalIndexLookUpReader: + d.normalizePlan(x.indexPlan, false, depth) + d.normalizePlan(x.tablePlan, false, depth) + case *PhysicalIndexMergeReader: + for _, p := range x.partialPlans { + d.normalizePlan(p, false, depth) + } + if x.tablePlan != nil { + d.normalizePlan(x.tablePlan, false, depth) + } + } +} + +func getSelectPlan(p Plan) PhysicalPlan { + var selectPlan PhysicalPlan + if physicalPlan, ok := p.(PhysicalPlan); ok { + selectPlan = physicalPlan + } else { + switch x := p.(type) { + case *Delete: + selectPlan = x.SelectPlan + case *Update: + selectPlan = x.SelectPlan + case *Insert: + selectPlan = x.SelectPlan + } } + return selectPlan } diff --git a/planner/core/errors.go b/planner/core/errors.go index 72d75a9a5f47a..8097e50285a0e 100644 --- a/planner/core/errors.go +++ b/planner/core/errors.go @@ -18,187 +18,133 @@ import ( "github.com/pingcap/parser/terror" ) -const ( - codeUnsupportedType terror.ErrCode = 1 - codeAnalyzeMissIndex = 2 - codeUnsupported = 3 - codeStmtNotFound = 4 - codeWrongParamCount = 5 - codeSchemaChanged = 6 - - codeNotSupportedYet = mysql.ErrNotSupportedYet - codeWrongUsage = mysql.ErrWrongUsage - codeAmbiguous = mysql.ErrNonUniq - codeUnknown = mysql.ErrUnknown - codeUnknownColumn = mysql.ErrBadField - codeUnknownTable = mysql.ErrUnknownTable - codeWrongArguments = mysql.ErrWrongArguments - codeBadGeneratedColumn = mysql.ErrBadGeneratedColumn - codeFieldNotInGroupBy = mysql.ErrFieldNotInGroupBy - codeBadTable = mysql.ErrBadTable - codeKeyDoesNotExist = mysql.ErrKeyDoesNotExist - codeOperandColumns = mysql.ErrOperandColumns - codeInvalidWildCard = mysql.ErrParse - codeInvalidGroupFuncUse = mysql.ErrInvalidGroupFuncUse - codeIllegalReference = mysql.ErrIllegalReference - codeNoDB = mysql.ErrNoDB - codeUnknownExplainFormat = mysql.ErrUnknownExplainFormat - codeWrongGroupField = mysql.ErrWrongGroupField - codeDupFieldName = mysql.ErrDupFieldName - codeNonUpdatableTable = mysql.ErrNonUpdatableTable - codeInternal = mysql.ErrInternal - codeMixOfGroupFuncAndFields = mysql.ErrMixOfGroupFuncAndFields - codeNonUniqTable = mysql.ErrNonuniqTable - codeWrongNumberOfColumnsInSelect = mysql.ErrWrongNumberOfColumnsInSelect - codeWrongValueCountOnRow = mysql.ErrWrongValueCountOnRow - codeTablenameNotAllowedHere = mysql.ErrTablenameNotAllowedHere - codePrivilegeCheckFail = mysql.ErrUnknown - codeWindowInvalidWindowFuncUse = mysql.ErrWindowInvalidWindowFuncUse - codeWindowInvalidWindowFuncAliasUse = mysql.ErrWindowInvalidWindowFuncAliasUse - codeWindowNoSuchWindow = mysql.ErrWindowNoSuchWindow - codeWindowCircularityInWindowGraph = mysql.ErrWindowCircularityInWindowGraph - codeWindowNoChildPartitioning = mysql.ErrWindowNoChildPartitioning - codeWindowNoInherentFrame = mysql.ErrWindowNoInherentFrame - codeWindowNoRedefineOrderBy = mysql.ErrWindowNoRedefineOrderBy - codeWindowDuplicateName = mysql.ErrWindowDuplicateName - codeErrTooBigPrecision = mysql.ErrTooBigPrecision - codePartitionClauseOnNonpartitioned = mysql.ErrPartitionClauseOnNonpartitioned - codeDBaccessDenied = mysql.ErrDBaccessDenied - codeTableaccessDenied = mysql.ErrTableaccessDenied - codeSpecificAccessDenied = mysql.ErrSpecificAccessDenied - codeViewNoExplain = mysql.ErrViewNoExplain - codeWindowFrameStartIllegal = mysql.ErrWindowFrameStartIllegal - codeWindowFrameEndIllegal = mysql.ErrWindowFrameEndIllegal - codeWindowFrameIllegal = mysql.ErrWindowFrameIllegal - codeWindowRangeFrameOrderType = mysql.ErrWindowRangeFrameOrderType - codeWindowRangeFrameTemporalType = mysql.ErrWindowRangeFrameTemporalType - codeWindowRangeFrameNumericType = mysql.ErrWindowRangeFrameNumericType - codeWindowRangeBoundNotConstant = mysql.ErrWindowRangeBoundNotConstant - codeWindowRowsIntervalUse = mysql.ErrWindowRowsIntervalUse - codeWindowFunctionIgnoresFrame = mysql.ErrWindowFunctionIgnoresFrame - codeUnsupportedOnGeneratedColumn = mysql.ErrUnsupportedOnGeneratedColumn -) - // error definitions. var ( - ErrUnsupportedType = terror.ClassOptimizer.New(codeUnsupportedType, "Unsupported type %T") - ErrAnalyzeMissIndex = terror.ClassOptimizer.New(codeAnalyzeMissIndex, "Index '%s' in field list does not exist in table '%s'") - ErrCartesianProductUnsupported = terror.ClassOptimizer.New(codeUnsupported, "Cartesian product is unsupported") - ErrStmtNotFound = terror.ClassOptimizer.New(codeStmtNotFound, "Prepared statement not found") - ErrWrongParamCount = terror.ClassOptimizer.New(codeWrongParamCount, "Wrong parameter count") - ErrSchemaChanged = terror.ClassOptimizer.New(codeSchemaChanged, "Schema has changed") - ErrTablenameNotAllowedHere = terror.ClassOptimizer.New(codeTablenameNotAllowedHere, "Table '%s' from one of the %ss cannot be used in %s") - - ErrNotSupportedYet = terror.ClassOptimizer.New(codeNotSupportedYet, mysql.MySQLErrName[mysql.ErrNotSupportedYet]) - ErrWrongUsage = terror.ClassOptimizer.New(codeWrongUsage, mysql.MySQLErrName[mysql.ErrWrongUsage]) - ErrAmbiguous = terror.ClassOptimizer.New(codeAmbiguous, mysql.MySQLErrName[mysql.ErrNonUniq]) - ErrUnknown = terror.ClassOptimizer.New(codeUnknown, mysql.MySQLErrName[mysql.ErrUnknown]) - ErrUnknownColumn = terror.ClassOptimizer.New(codeUnknownColumn, mysql.MySQLErrName[mysql.ErrBadField]) - ErrUnknownTable = terror.ClassOptimizer.New(codeUnknownTable, mysql.MySQLErrName[mysql.ErrUnknownTable]) - ErrWrongArguments = terror.ClassOptimizer.New(codeWrongArguments, mysql.MySQLErrName[mysql.ErrWrongArguments]) - ErrWrongNumberOfColumnsInSelect = terror.ClassOptimizer.New(codeWrongNumberOfColumnsInSelect, mysql.MySQLErrName[mysql.ErrWrongNumberOfColumnsInSelect]) - ErrBadGeneratedColumn = terror.ClassOptimizer.New(codeBadGeneratedColumn, mysql.MySQLErrName[mysql.ErrBadGeneratedColumn]) - ErrFieldNotInGroupBy = terror.ClassOptimizer.New(codeFieldNotInGroupBy, mysql.MySQLErrName[mysql.ErrFieldNotInGroupBy]) - ErrBadTable = terror.ClassOptimizer.New(codeBadTable, mysql.MySQLErrName[mysql.ErrBadTable]) - ErrKeyDoesNotExist = terror.ClassOptimizer.New(codeKeyDoesNotExist, mysql.MySQLErrName[mysql.ErrKeyDoesNotExist]) - ErrOperandColumns = terror.ClassOptimizer.New(codeOperandColumns, mysql.MySQLErrName[mysql.ErrOperandColumns]) - ErrInvalidWildCard = terror.ClassOptimizer.New(codeInvalidWildCard, "Wildcard fields without any table name appears in wrong place") - ErrInvalidGroupFuncUse = terror.ClassOptimizer.New(codeInvalidGroupFuncUse, mysql.MySQLErrName[mysql.ErrInvalidGroupFuncUse]) - ErrIllegalReference = terror.ClassOptimizer.New(codeIllegalReference, mysql.MySQLErrName[mysql.ErrIllegalReference]) - ErrNoDB = terror.ClassOptimizer.New(codeNoDB, mysql.MySQLErrName[mysql.ErrNoDB]) - ErrUnknownExplainFormat = terror.ClassOptimizer.New(codeUnknownExplainFormat, mysql.MySQLErrName[mysql.ErrUnknownExplainFormat]) - ErrWrongGroupField = terror.ClassOptimizer.New(codeWrongGroupField, mysql.MySQLErrName[mysql.ErrWrongGroupField]) - ErrDupFieldName = terror.ClassOptimizer.New(codeDupFieldName, mysql.MySQLErrName[mysql.ErrDupFieldName]) - ErrNonUpdatableTable = terror.ClassOptimizer.New(codeNonUpdatableTable, mysql.MySQLErrName[mysql.ErrNonUpdatableTable]) - ErrInternal = terror.ClassOptimizer.New(codeInternal, mysql.MySQLErrName[mysql.ErrInternal]) - ErrMixOfGroupFuncAndFields = terror.ClassOptimizer.New(codeMixOfGroupFuncAndFields, "In aggregated query without GROUP BY, expression #%d of SELECT list contains nonaggregated column '%s'; this is incompatible with sql_mode=only_full_group_by") - ErrNonUniqTable = terror.ClassOptimizer.New(codeNonUniqTable, mysql.MySQLErrName[mysql.ErrNonuniqTable]) - ErrWrongValueCountOnRow = terror.ClassOptimizer.New(mysql.ErrWrongValueCountOnRow, mysql.MySQLErrName[mysql.ErrWrongValueCountOnRow]) - ErrViewInvalid = terror.ClassOptimizer.New(mysql.ErrViewInvalid, mysql.MySQLErrName[mysql.ErrViewInvalid]) - ErrPrivilegeCheckFail = terror.ClassOptimizer.New(codePrivilegeCheckFail, "privilege check fail") - ErrWindowInvalidWindowFuncUse = terror.ClassOptimizer.New(codeWindowInvalidWindowFuncUse, mysql.MySQLErrName[mysql.ErrWindowInvalidWindowFuncUse]) - ErrWindowInvalidWindowFuncAliasUse = terror.ClassOptimizer.New(codeWindowInvalidWindowFuncAliasUse, mysql.MySQLErrName[mysql.ErrWindowInvalidWindowFuncAliasUse]) - ErrWindowNoSuchWindow = terror.ClassOptimizer.New(codeWindowNoSuchWindow, mysql.MySQLErrName[mysql.ErrWindowNoSuchWindow]) - ErrWindowCircularityInWindowGraph = terror.ClassOptimizer.New(codeWindowCircularityInWindowGraph, mysql.MySQLErrName[mysql.ErrWindowCircularityInWindowGraph]) - ErrWindowNoChildPartitioning = terror.ClassOptimizer.New(codeWindowNoChildPartitioning, mysql.MySQLErrName[mysql.ErrWindowNoChildPartitioning]) - ErrWindowNoInherentFrame = terror.ClassOptimizer.New(codeWindowNoInherentFrame, mysql.MySQLErrName[mysql.ErrWindowNoInherentFrame]) - ErrWindowNoRedefineOrderBy = terror.ClassOptimizer.New(codeWindowNoRedefineOrderBy, mysql.MySQLErrName[mysql.ErrWindowNoRedefineOrderBy]) - ErrWindowDuplicateName = terror.ClassOptimizer.New(codeWindowDuplicateName, mysql.MySQLErrName[mysql.ErrWindowDuplicateName]) - ErrPartitionClauseOnNonpartitioned = terror.ClassOptimizer.New(codePartitionClauseOnNonpartitioned, mysql.MySQLErrName[mysql.ErrPartitionClauseOnNonpartitioned]) + ErrUnsupportedType = terror.ClassOptimizer.New(mysql.ErrUnsupportedType, mysql.MySQLErrName[mysql.ErrUnsupportedType]) + ErrAnalyzeMissIndex = terror.ClassOptimizer.New(mysql.ErrAnalyzeMissIndex, mysql.MySQLErrName[mysql.ErrAnalyzeMissIndex]) + ErrWrongParamCount = terror.ClassOptimizer.New(mysql.ErrWrongParamCount, mysql.MySQLErrName[mysql.ErrWrongParamCount]) + ErrSchemaChanged = terror.ClassOptimizer.New(mysql.ErrSchemaChanged, mysql.MySQLErrName[mysql.ErrSchemaChanged]) + ErrTablenameNotAllowedHere = terror.ClassOptimizer.New(mysql.ErrTablenameNotAllowedHere, mysql.MySQLErrName[mysql.ErrTablenameNotAllowedHere]) + ErrNotSupportedYet = terror.ClassOptimizer.New(mysql.ErrNotSupportedYet, mysql.MySQLErrName[mysql.ErrNotSupportedYet]) + ErrWrongUsage = terror.ClassOptimizer.New(mysql.ErrWrongUsage, mysql.MySQLErrName[mysql.ErrWrongUsage]) + ErrUnknown = terror.ClassOptimizer.New(mysql.ErrUnknown, mysql.MySQLErrName[mysql.ErrUnknown]) + ErrUnknownTable = terror.ClassOptimizer.New(mysql.ErrUnknownTable, mysql.MySQLErrName[mysql.ErrUnknownTable]) + ErrWrongArguments = terror.ClassOptimizer.New(mysql.ErrWrongArguments, mysql.MySQLErrName[mysql.ErrWrongArguments]) + ErrWrongNumberOfColumnsInSelect = terror.ClassOptimizer.New(mysql.ErrWrongNumberOfColumnsInSelect, mysql.MySQLErrName[mysql.ErrWrongNumberOfColumnsInSelect]) + ErrBadGeneratedColumn = terror.ClassOptimizer.New(mysql.ErrBadGeneratedColumn, mysql.MySQLErrName[mysql.ErrBadGeneratedColumn]) + ErrFieldNotInGroupBy = terror.ClassOptimizer.New(mysql.ErrFieldNotInGroupBy, mysql.MySQLErrName[mysql.ErrFieldNotInGroupBy]) + ErrBadTable = terror.ClassOptimizer.New(mysql.ErrBadTable, mysql.MySQLErrName[mysql.ErrBadTable]) + ErrKeyDoesNotExist = terror.ClassOptimizer.New(mysql.ErrKeyDoesNotExist, mysql.MySQLErrName[mysql.ErrKeyDoesNotExist]) + ErrOperandColumns = terror.ClassOptimizer.New(mysql.ErrOperandColumns, mysql.MySQLErrName[mysql.ErrOperandColumns]) + ErrInvalidGroupFuncUse = terror.ClassOptimizer.New(mysql.ErrInvalidGroupFuncUse, mysql.MySQLErrName[mysql.ErrInvalidGroupFuncUse]) + ErrIllegalReference = terror.ClassOptimizer.New(mysql.ErrIllegalReference, mysql.MySQLErrName[mysql.ErrIllegalReference]) + ErrNoDB = terror.ClassOptimizer.New(mysql.ErrNoDB, mysql.MySQLErrName[mysql.ErrNoDB]) + ErrUnknownExplainFormat = terror.ClassOptimizer.New(mysql.ErrUnknownExplainFormat, mysql.MySQLErrName[mysql.ErrUnknownExplainFormat]) + ErrWrongGroupField = terror.ClassOptimizer.New(mysql.ErrWrongGroupField, mysql.MySQLErrName[mysql.ErrWrongGroupField]) + ErrDupFieldName = terror.ClassOptimizer.New(mysql.ErrDupFieldName, mysql.MySQLErrName[mysql.ErrDupFieldName]) + ErrNonUpdatableTable = terror.ClassOptimizer.New(mysql.ErrNonUpdatableTable, mysql.MySQLErrName[mysql.ErrNonUpdatableTable]) + ErrInternal = terror.ClassOptimizer.New(mysql.ErrInternal, mysql.MySQLErrName[mysql.ErrInternal]) + ErrNonUniqTable = terror.ClassOptimizer.New(mysql.ErrNonuniqTable, mysql.MySQLErrName[mysql.ErrNonuniqTable]) + ErrWindowInvalidWindowFuncUse = terror.ClassOptimizer.New(mysql.ErrWindowInvalidWindowFuncUse, mysql.MySQLErrName[mysql.ErrWindowInvalidWindowFuncUse]) + ErrWindowInvalidWindowFuncAliasUse = terror.ClassOptimizer.New(mysql.ErrWindowInvalidWindowFuncAliasUse, mysql.MySQLErrName[mysql.ErrWindowInvalidWindowFuncAliasUse]) + ErrWindowNoSuchWindow = terror.ClassOptimizer.New(mysql.ErrWindowNoSuchWindow, mysql.MySQLErrName[mysql.ErrWindowNoSuchWindow]) + ErrWindowCircularityInWindowGraph = terror.ClassOptimizer.New(mysql.ErrWindowCircularityInWindowGraph, mysql.MySQLErrName[mysql.ErrWindowCircularityInWindowGraph]) + ErrWindowNoChildPartitioning = terror.ClassOptimizer.New(mysql.ErrWindowNoChildPartitioning, mysql.MySQLErrName[mysql.ErrWindowNoChildPartitioning]) + ErrWindowNoInherentFrame = terror.ClassOptimizer.New(mysql.ErrWindowNoInherentFrame, mysql.MySQLErrName[mysql.ErrWindowNoInherentFrame]) + ErrWindowNoRedefineOrderBy = terror.ClassOptimizer.New(mysql.ErrWindowNoRedefineOrderBy, mysql.MySQLErrName[mysql.ErrWindowNoRedefineOrderBy]) + ErrWindowDuplicateName = terror.ClassOptimizer.New(mysql.ErrWindowDuplicateName, mysql.MySQLErrName[mysql.ErrWindowDuplicateName]) + ErrPartitionClauseOnNonpartitioned = terror.ClassOptimizer.New(mysql.ErrPartitionClauseOnNonpartitioned, mysql.MySQLErrName[mysql.ErrPartitionClauseOnNonpartitioned]) + ErrWindowFrameStartIllegal = terror.ClassOptimizer.New(mysql.ErrWindowFrameStartIllegal, mysql.MySQLErrName[mysql.ErrWindowFrameStartIllegal]) + ErrWindowFrameEndIllegal = terror.ClassOptimizer.New(mysql.ErrWindowFrameEndIllegal, mysql.MySQLErrName[mysql.ErrWindowFrameEndIllegal]) + ErrWindowFrameIllegal = terror.ClassOptimizer.New(mysql.ErrWindowFrameIllegal, mysql.MySQLErrName[mysql.ErrWindowFrameIllegal]) + ErrWindowRangeFrameOrderType = terror.ClassOptimizer.New(mysql.ErrWindowRangeFrameOrderType, mysql.MySQLErrName[mysql.ErrWindowRangeFrameOrderType]) + ErrWindowRangeFrameTemporalType = terror.ClassOptimizer.New(mysql.ErrWindowRangeFrameTemporalType, mysql.MySQLErrName[mysql.ErrWindowRangeFrameTemporalType]) + ErrWindowRangeFrameNumericType = terror.ClassOptimizer.New(mysql.ErrWindowRangeFrameNumericType, mysql.MySQLErrName[mysql.ErrWindowRangeFrameNumericType]) + ErrWindowRangeBoundNotConstant = terror.ClassOptimizer.New(mysql.ErrWindowRangeBoundNotConstant, mysql.MySQLErrName[mysql.ErrWindowRangeBoundNotConstant]) + ErrWindowRowsIntervalUse = terror.ClassOptimizer.New(mysql.ErrWindowRowsIntervalUse, mysql.MySQLErrName[mysql.ErrWindowRowsIntervalUse]) + ErrWindowFunctionIgnoresFrame = terror.ClassOptimizer.New(mysql.ErrWindowFunctionIgnoresFrame, mysql.MySQLErrName[mysql.ErrWindowFunctionIgnoresFrame]) + ErrUnsupportedOnGeneratedColumn = terror.ClassOptimizer.New(mysql.ErrUnsupportedOnGeneratedColumn, mysql.MySQLErrName[mysql.ErrUnsupportedOnGeneratedColumn]) + ErrPrivilegeCheckFail = terror.ClassOptimizer.New(mysql.ErrPrivilegeCheckFail, mysql.MySQLErrName[mysql.ErrPrivilegeCheckFail]) + ErrInvalidWildCard = terror.ClassOptimizer.New(mysql.ErrInvalidWildCard, mysql.MySQLErrName[mysql.ErrInvalidWildCard]) + ErrMixOfGroupFuncAndFields = terror.ClassOptimizer.New(mysql.ErrMixOfGroupFuncAndFieldsIncompatible, mysql.MySQLErrName[mysql.ErrMixOfGroupFuncAndFieldsIncompatible]) errTooBigPrecision = terror.ClassExpression.New(mysql.ErrTooBigPrecision, mysql.MySQLErrName[mysql.ErrTooBigPrecision]) ErrDBaccessDenied = terror.ClassOptimizer.New(mysql.ErrDBaccessDenied, mysql.MySQLErrName[mysql.ErrDBaccessDenied]) ErrTableaccessDenied = terror.ClassOptimizer.New(mysql.ErrTableaccessDenied, mysql.MySQLErrName[mysql.ErrTableaccessDenied]) ErrSpecificAccessDenied = terror.ClassOptimizer.New(mysql.ErrSpecificAccessDenied, mysql.MySQLErrName[mysql.ErrSpecificAccessDenied]) ErrViewNoExplain = terror.ClassOptimizer.New(mysql.ErrViewNoExplain, mysql.MySQLErrName[mysql.ErrViewNoExplain]) - ErrWindowFrameStartIllegal = terror.ClassOptimizer.New(codeWindowFrameStartIllegal, mysql.MySQLErrName[mysql.ErrWindowFrameStartIllegal]) - ErrWindowFrameEndIllegal = terror.ClassOptimizer.New(codeWindowFrameEndIllegal, mysql.MySQLErrName[mysql.ErrWindowFrameEndIllegal]) - ErrWindowFrameIllegal = terror.ClassOptimizer.New(codeWindowFrameIllegal, mysql.MySQLErrName[mysql.ErrWindowFrameIllegal]) - ErrWindowRangeFrameOrderType = terror.ClassOptimizer.New(codeWindowRangeFrameOrderType, mysql.MySQLErrName[mysql.ErrWindowRangeFrameOrderType]) - ErrWindowRangeFrameTemporalType = terror.ClassOptimizer.New(codeWindowRangeFrameTemporalType, mysql.MySQLErrName[mysql.ErrWindowRangeFrameTemporalType]) - ErrWindowRangeFrameNumericType = terror.ClassOptimizer.New(codeWindowRangeFrameNumericType, mysql.MySQLErrName[mysql.ErrWindowRangeFrameNumericType]) - ErrWindowRangeBoundNotConstant = terror.ClassOptimizer.New(codeWindowRangeBoundNotConstant, mysql.MySQLErrName[mysql.ErrWindowRangeBoundNotConstant]) - ErrWindowRowsIntervalUse = terror.ClassOptimizer.New(codeWindowRowsIntervalUse, mysql.MySQLErrName[mysql.ErrWindowRowsIntervalUse]) - ErrWindowFunctionIgnoresFrame = terror.ClassOptimizer.New(codeWindowFunctionIgnoresFrame, mysql.MySQLErrName[mysql.ErrWindowFunctionIgnoresFrame]) - ErrUnsupportedOnGeneratedColumn = terror.ClassOptimizer.New(codeUnsupportedOnGeneratedColumn, mysql.MySQLErrName[mysql.ErrUnsupportedOnGeneratedColumn]) + ErrWrongValueCountOnRow = terror.ClassOptimizer.New(mysql.ErrWrongValueCountOnRow, mysql.MySQLErrName[mysql.ErrWrongValueCountOnRow]) + ErrViewInvalid = terror.ClassOptimizer.New(mysql.ErrViewInvalid, mysql.MySQLErrName[mysql.ErrViewInvalid]) ErrNoSuchThread = terror.ClassOptimizer.New(mysql.ErrNoSuchThread, mysql.MySQLErrName[mysql.ErrNoSuchThread]) + ErrUnknownColumn = terror.ClassOptimizer.New(mysql.ErrBadField, mysql.MySQLErrName[mysql.ErrBadField]) + ErrCartesianProductUnsupported = terror.ClassOptimizer.New(mysql.ErrCartesianProductUnsupported, mysql.MySQLErrName[mysql.ErrCartesianProductUnsupported]) + ErrStmtNotFound = terror.ClassOptimizer.New(mysql.ErrPreparedStmtNotFound, mysql.MySQLErrName[mysql.ErrPreparedStmtNotFound]) + ErrAmbiguous = terror.ClassOptimizer.New(mysql.ErrNonUniq, mysql.MySQLErrName[mysql.ErrNonUniq]) // Since we cannot know if user loggined with a password, use message of ErrAccessDeniedNoPassword instead ErrAccessDenied = terror.ClassOptimizer.New(mysql.ErrAccessDenied, mysql.MySQLErrName[mysql.ErrAccessDeniedNoPassword]) ) func init() { mysqlErrCodeMap := map[terror.ErrCode]uint16{ - codeNotSupportedYet: mysql.ErrNotSupportedYet, - codeWrongUsage: mysql.ErrWrongUsage, - codeAmbiguous: mysql.ErrNonUniq, - codeUnknownColumn: mysql.ErrBadField, - codeUnknownTable: mysql.ErrBadTable, - codeWrongArguments: mysql.ErrWrongArguments, - codeBadGeneratedColumn: mysql.ErrBadGeneratedColumn, - codeFieldNotInGroupBy: mysql.ErrFieldNotInGroupBy, - codeBadTable: mysql.ErrBadTable, - codeKeyDoesNotExist: mysql.ErrKeyDoesNotExist, - codeOperandColumns: mysql.ErrOperandColumns, - codeInvalidWildCard: mysql.ErrParse, - codeInvalidGroupFuncUse: mysql.ErrInvalidGroupFuncUse, - codeIllegalReference: mysql.ErrIllegalReference, - codeNoDB: mysql.ErrNoDB, - codeUnknownExplainFormat: mysql.ErrUnknownExplainFormat, - codeWrongGroupField: mysql.ErrWrongGroupField, - codeDupFieldName: mysql.ErrDupFieldName, - codeNonUpdatableTable: mysql.ErrUnknownTable, - codeInternal: mysql.ErrInternal, - codeMixOfGroupFuncAndFields: mysql.ErrMixOfGroupFuncAndFields, - codeNonUniqTable: mysql.ErrNonuniqTable, - codeWrongNumberOfColumnsInSelect: mysql.ErrWrongNumberOfColumnsInSelect, - codeWrongValueCountOnRow: mysql.ErrWrongValueCountOnRow, - - codeWindowInvalidWindowFuncUse: mysql.ErrWindowInvalidWindowFuncUse, - codeWindowInvalidWindowFuncAliasUse: mysql.ErrWindowInvalidWindowFuncAliasUse, - codeWindowNoSuchWindow: mysql.ErrWindowNoSuchWindow, - codeWindowCircularityInWindowGraph: mysql.ErrWindowCircularityInWindowGraph, - codeWindowNoChildPartitioning: mysql.ErrWindowNoChildPartitioning, - codeWindowNoInherentFrame: mysql.ErrWindowNoInherentFrame, - codeWindowNoRedefineOrderBy: mysql.ErrWindowNoRedefineOrderBy, - codeWindowDuplicateName: mysql.ErrWindowDuplicateName, - codePartitionClauseOnNonpartitioned: mysql.ErrPartitionClauseOnNonpartitioned, - codeErrTooBigPrecision: mysql.ErrTooBigPrecision, - codeDBaccessDenied: mysql.ErrDBaccessDenied, - codeTableaccessDenied: mysql.ErrTableaccessDenied, - codeSpecificAccessDenied: mysql.ErrSpecificAccessDenied, - codeViewNoExplain: mysql.ErrViewNoExplain, - codeWindowFrameStartIllegal: mysql.ErrWindowFrameStartIllegal, - codeWindowFrameEndIllegal: mysql.ErrWindowFrameEndIllegal, - codeWindowFrameIllegal: mysql.ErrWindowFrameIllegal, - codeWindowRangeFrameOrderType: mysql.ErrWindowRangeFrameOrderType, - codeWindowRangeFrameTemporalType: mysql.ErrWindowRangeFrameTemporalType, - codeWindowRangeFrameNumericType: mysql.ErrWindowRangeFrameNumericType, - codeWindowRangeBoundNotConstant: mysql.ErrWindowRangeBoundNotConstant, - codeWindowRowsIntervalUse: mysql.ErrWindowRowsIntervalUse, - codeWindowFunctionIgnoresFrame: mysql.ErrWindowFunctionIgnoresFrame, - codeUnsupportedOnGeneratedColumn: mysql.ErrUnsupportedOnGeneratedColumn, - - mysql.ErrNoSuchThread: mysql.ErrNoSuchThread, - mysql.ErrAccessDenied: mysql.ErrAccessDenied, + mysql.ErrViewInvalid: mysql.ErrViewInvalid, + mysql.ErrUnknown: mysql.ErrUnknown, + mysql.ErrTablenameNotAllowedHere: mysql.ErrTablenameNotAllowedHere, + mysql.ErrUnsupportedType: mysql.ErrUnsupportedType, + mysql.ErrAnalyzeMissIndex: mysql.ErrAnalyzeMissIndex, + mysql.ErrWrongParamCount: mysql.ErrWrongParamCount, + mysql.ErrSchemaChanged: mysql.ErrSchemaChanged, + mysql.ErrNotSupportedYet: mysql.ErrNotSupportedYet, + mysql.ErrWrongUsage: mysql.ErrWrongUsage, + mysql.ErrUnknownTable: mysql.ErrUnknownTable, + mysql.ErrWrongArguments: mysql.ErrWrongArguments, + mysql.ErrBadGeneratedColumn: mysql.ErrBadGeneratedColumn, + mysql.ErrFieldNotInGroupBy: mysql.ErrFieldNotInGroupBy, + mysql.ErrBadTable: mysql.ErrBadTable, + mysql.ErrKeyDoesNotExist: mysql.ErrKeyDoesNotExist, + mysql.ErrOperandColumns: mysql.ErrOperandColumns, + mysql.ErrInvalidGroupFuncUse: mysql.ErrInvalidGroupFuncUse, + mysql.ErrIllegalReference: mysql.ErrIllegalReference, + mysql.ErrNoDB: mysql.ErrNoDB, + mysql.ErrUnknownExplainFormat: mysql.ErrUnknownExplainFormat, + mysql.ErrWrongGroupField: mysql.ErrWrongGroupField, + mysql.ErrDupFieldName: mysql.ErrDupFieldName, + mysql.ErrNonUpdatableTable: mysql.ErrNonUpdatableTable, + mysql.ErrInternal: mysql.ErrInternal, + mysql.ErrMixOfGroupFuncAndFieldsIncompatible: mysql.ErrMixOfGroupFuncAndFieldsIncompatible, + mysql.ErrWrongNumberOfColumnsInSelect: mysql.ErrWrongNumberOfColumnsInSelect, + mysql.ErrWrongValueCountOnRow: mysql.ErrWrongValueCountOnRow, + mysql.ErrWindowInvalidWindowFuncUse: mysql.ErrWindowInvalidWindowFuncUse, + mysql.ErrWindowInvalidWindowFuncAliasUse: mysql.ErrWindowInvalidWindowFuncAliasUse, + mysql.ErrWindowNoSuchWindow: mysql.ErrWindowNoSuchWindow, + mysql.ErrWindowCircularityInWindowGraph: mysql.ErrWindowCircularityInWindowGraph, + mysql.ErrWindowNoChildPartitioning: mysql.ErrWindowNoChildPartitioning, + mysql.ErrWindowNoInherentFrame: mysql.ErrWindowNoInherentFrame, + mysql.ErrWindowNoRedefineOrderBy: mysql.ErrWindowNoRedefineOrderBy, + mysql.ErrWindowDuplicateName: mysql.ErrWindowDuplicateName, + mysql.ErrPartitionClauseOnNonpartitioned: mysql.ErrPartitionClauseOnNonpartitioned, + mysql.ErrDBaccessDenied: mysql.ErrDBaccessDenied, + mysql.ErrTableaccessDenied: mysql.ErrTableaccessDenied, + mysql.ErrSpecificAccessDenied: mysql.ErrSpecificAccessDenied, + mysql.ErrViewNoExplain: mysql.ErrViewNoExplain, + mysql.ErrWindowFrameStartIllegal: mysql.ErrWindowFrameStartIllegal, + mysql.ErrWindowFrameEndIllegal: mysql.ErrWindowFrameEndIllegal, + mysql.ErrWindowFrameIllegal: mysql.ErrWindowFrameIllegal, + mysql.ErrWindowRangeFrameOrderType: mysql.ErrWindowRangeFrameOrderType, + mysql.ErrWindowRangeFrameTemporalType: mysql.ErrWindowRangeFrameTemporalType, + mysql.ErrWindowRangeFrameNumericType: mysql.ErrWindowRangeFrameNumericType, + mysql.ErrWindowRangeBoundNotConstant: mysql.ErrWindowRangeBoundNotConstant, + mysql.ErrWindowRowsIntervalUse: mysql.ErrWindowRowsIntervalUse, + mysql.ErrWindowFunctionIgnoresFrame: mysql.ErrWindowFunctionIgnoresFrame, + mysql.ErrUnsupportedOnGeneratedColumn: mysql.ErrUnsupportedOnGeneratedColumn, + mysql.ErrNoSuchThread: mysql.ErrNoSuchThread, + mysql.ErrAccessDenied: mysql.ErrAccessDenied, + mysql.ErrPrivilegeCheckFail: mysql.ErrPrivilegeCheckFail, + mysql.ErrCartesianProductUnsupported: mysql.ErrCartesianProductUnsupported, + mysql.ErrPreparedStmtNotFound: mysql.ErrPreparedStmtNotFound, + mysql.ErrNonUniq: mysql.ErrNonUniq, + mysql.ErrBadField: mysql.ErrBadField, + mysql.ErrNonuniqTable: mysql.ErrNonuniqTable, + mysql.ErrTooBigPrecision: mysql.ErrTooBigPrecision, + mysql.ErrInvalidWildCard: mysql.ErrInvalidWildCard, } terror.ErrClassToMySQLCodes[terror.ClassOptimizer] = mysqlErrCodeMap } diff --git a/planner/core/errors_test.go b/planner/core/errors_test.go new file mode 100644 index 0000000000000..bce2a3563978d --- /dev/null +++ b/planner/core/errors_test.go @@ -0,0 +1,90 @@ +// Copyright 2019 PingCAP, Inc. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// See the License for the specific language governing permissions and +// limitations under the License. + +package core + +import ( + . "github.com/pingcap/check" + "github.com/pingcap/parser/mysql" + "github.com/pingcap/parser/terror" +) + +type testErrorSuite struct{} + +var _ = Suite(testErrorSuite{}) + +func (s testErrorSuite) TestError(c *C) { + kvErrs := []*terror.Error{ + ErrUnsupportedType, + ErrAnalyzeMissIndex, + ErrWrongParamCount, + ErrSchemaChanged, + ErrTablenameNotAllowedHere, + ErrNotSupportedYet, + ErrWrongUsage, + ErrUnknownTable, + ErrWrongArguments, + ErrWrongNumberOfColumnsInSelect, + ErrBadGeneratedColumn, + ErrFieldNotInGroupBy, + ErrBadTable, + ErrKeyDoesNotExist, + ErrOperandColumns, + ErrInvalidGroupFuncUse, + ErrIllegalReference, + ErrNoDB, + ErrUnknownExplainFormat, + ErrWrongGroupField, + ErrDupFieldName, + ErrNonUpdatableTable, + ErrInternal, + ErrNonUniqTable, + ErrWindowInvalidWindowFuncUse, + ErrWindowInvalidWindowFuncAliasUse, + ErrWindowNoSuchWindow, + ErrWindowCircularityInWindowGraph, + ErrWindowNoChildPartitioning, + ErrWindowNoInherentFrame, + ErrWindowNoRedefineOrderBy, + ErrWindowDuplicateName, + ErrPartitionClauseOnNonpartitioned, + ErrWindowFrameStartIllegal, + ErrWindowFrameEndIllegal, + ErrWindowFrameIllegal, + ErrWindowRangeFrameOrderType, + ErrWindowRangeFrameTemporalType, + ErrWindowRangeFrameNumericType, + ErrWindowRangeBoundNotConstant, + ErrWindowRowsIntervalUse, + ErrWindowFunctionIgnoresFrame, + ErrUnsupportedOnGeneratedColumn, + ErrPrivilegeCheckFail, + ErrInvalidWildCard, + ErrMixOfGroupFuncAndFields, + ErrDBaccessDenied, + ErrTableaccessDenied, + ErrSpecificAccessDenied, + ErrViewNoExplain, + ErrWrongValueCountOnRow, + ErrViewInvalid, + ErrNoSuchThread, + ErrUnknownColumn, + ErrCartesianProductUnsupported, + ErrStmtNotFound, + ErrAmbiguous, + } + for _, err := range kvErrs { + code := err.ToSQLError().Code + c.Assert(code != mysql.ErrUnknown && code == uint16(err.Code()), IsTrue, Commentf("err: %v", err)) + } +} diff --git a/planner/core/exhaust_physical_plans.go b/planner/core/exhaust_physical_plans.go index 4da7696e17949..4d3b73d69c18c 100644 --- a/planner/core/exhaust_physical_plans.go +++ b/planner/core/exhaust_physical_plans.go @@ -26,6 +26,7 @@ import ( "github.com/pingcap/tidb/expression/aggregation" "github.com/pingcap/tidb/kv" "github.com/pingcap/tidb/planner/property" + "github.com/pingcap/tidb/planner/util" "github.com/pingcap/tidb/sessionctx" "github.com/pingcap/tidb/types" "github.com/pingcap/tidb/util/chunk" @@ -300,7 +301,7 @@ func (p *LogicalJoin) constructIndexJoin( innerTask task, ranges []*ranger.Range, keyOff2IdxOff []int, - path *accessPath, + path *util.AccessPath, compareFilters *ColWithCmpFuncManager, ) []PhysicalPlan { joinType := p.JoinType @@ -353,7 +354,7 @@ func (p *LogicalJoin) constructIndexJoin( CompareFilters: compareFilters, }.Init(p.ctx, p.stats.ScaleByExpectCnt(prop.ExpectedCnt), p.blockOffset, chReqProps...) if path != nil { - join.IdxColLens = path.idxColLens + join.IdxColLens = path.IdxColLens } join.SetSchema(p.schema) return []PhysicalPlan{join} @@ -365,7 +366,7 @@ func (p *LogicalJoin) constructIndexMergeJoin( innerTask task, ranges []*ranger.Range, keyOff2IdxOff []int, - path *accessPath, + path *util.AccessPath, compareFilters *ColWithCmpFuncManager, ) []PhysicalPlan { indexJoins := p.constructIndexJoin(prop, outerIdx, innerTask, ranges, keyOff2IdxOff, path, compareFilters) @@ -442,7 +443,7 @@ func (p *LogicalJoin) constructIndexHashJoin( innerTask task, ranges []*ranger.Range, keyOff2IdxOff []int, - path *accessPath, + path *util.AccessPath, compareFilters *ColWithCmpFuncManager, ) []PhysicalPlan { indexJoins := p.constructIndexJoin(prop, outerIdx, innerTask, ranges, keyOff2IdxOff, path, compareFilters) @@ -519,9 +520,9 @@ func (p *LogicalJoin) getIndexJoinByOuterIdx(prop *property.PhysicalProperty, ou func (p *LogicalJoin) buildIndexJoinInner2TableScan( prop *property.PhysicalProperty, ds *DataSource, innerJoinKeys, outerJoinKeys []*expression.Column, outerIdx int, us *LogicalUnionScan, avgInnerRowCnt float64) (joins []PhysicalPlan) { - var tblPath *accessPath + var tblPath *util.AccessPath for _, path := range ds.possibleAccessPaths { - if path.isTablePath && path.storeType == kv.TiKV { + if path.IsTablePath && path.StoreType == kv.TiKV { tblPath = path break } @@ -568,7 +569,7 @@ func (p *LogicalJoin) buildIndexJoinInner2IndexScan( outerIdx int, us *LogicalUnionScan, avgInnerRowCnt float64) (joins []PhysicalPlan) { helper := &indexJoinBuildHelper{join: p} for _, path := range ds.possibleAccessPaths { - if path.isTablePath { + if path.IsTablePath { continue } emptyRange, err := helper.analyzeLookUpFilters(path, ds, innerJoinKeys) @@ -592,7 +593,7 @@ func (p *LogicalJoin) buildIndexJoinInner2IndexScan( } } joins = make([]PhysicalPlan, 0, 3) - rangeInfo := helper.buildRangeDecidedByInformation(helper.chosenPath.idxCols, outerJoinKeys) + rangeInfo := helper.buildRangeDecidedByInformation(helper.chosenPath.IdxCols, outerJoinKeys) innerTask := p.constructInnerIndexScanTask(ds, helper.chosenPath, helper.chosenRemained, outerJoinKeys, us, rangeInfo, false, false, avgInnerRowCnt) joins = append(joins, p.constructIndexJoin(prop, outerIdx, innerTask, helper.chosenRanges, keyOff2IdxOff, helper.chosenPath, helper.lastColManager)...) @@ -620,7 +621,7 @@ type indexJoinBuildHelper struct { idxOff2KeyOff []int lastColManager *ColWithCmpFuncManager chosenRanges []*ranger.Range - chosenPath *accessPath + chosenPath *util.AccessPath curPossibleUsedKeys []*expression.Column curNotUsedIndexCols []*expression.Column @@ -720,7 +721,7 @@ func (p *LogicalJoin) constructInnerUnionScan(us *LogicalUnionScan, reader Physi // constructInnerIndexScanTask is specially used to construct the inner plan for PhysicalIndexJoin. func (p *LogicalJoin) constructInnerIndexScanTask( ds *DataSource, - path *accessPath, + path *util.AccessPath, filterConds []expression.Expression, outerJoinKeys []*expression.Column, us *LogicalUnionScan, @@ -734,9 +735,9 @@ func (p *LogicalJoin) constructInnerIndexScanTask( TableAsName: ds.TableAsName, DBName: ds.DBName, Columns: ds.Columns, - Index: path.index, - IdxCols: path.idxCols, - IdxColLens: path.idxColLens, + Index: path.Index, + IdxCols: path.IdxCols, + IdxColLens: path.IdxColLens, dataSourceSchema: ds.schema, KeepOrder: keepOrder, Ranges: ranger.FullRange(), @@ -752,7 +753,7 @@ func (p *LogicalJoin) constructInnerIndexScanTask( tblCols: ds.TblCols, keepOrder: is.KeepOrder, } - if !isCoveringIndex(ds.schema.Columns, path.fullIdxCols, path.fullIdxColLens, is.Table.PKIsHandle) { + if !isCoveringIndex(ds.schema.Columns, path.FullIdxCols, path.FullIdxColLens, is.Table.PKIsHandle) { // On this way, it's double read case. ts := PhysicalTableScan{ Columns: ds.Columns, @@ -768,24 +769,24 @@ func (p *LogicalJoin) constructInnerIndexScanTask( } cop.tablePlan = ts } - is.initSchema(path.index, path.fullIdxCols, cop.tablePlan != nil) - rowSize := is.indexScanRowSize(path.index, ds, true) + is.initSchema(path.Index, path.FullIdxCols, cop.tablePlan != nil) + rowSize := is.indexScanRowSize(path.Index, ds, true) sessVars := ds.ctx.GetSessionVars() cop.cst = rowCount * rowSize * sessVars.ScanFactor - indexConds, tblConds := splitIndexFilterConditions(filterConds, path.fullIdxCols, path.fullIdxColLens, ds.tableInfo) - tmpPath := &accessPath{ - indexFilters: indexConds, - tableFilters: tblConds, - countAfterAccess: rowCount, + indexConds, tblConds := splitIndexFilterConditions(filterConds, path.FullIdxCols, path.FullIdxColLens, ds.tableInfo) + tmpPath := &util.AccessPath{ + IndexFilters: indexConds, + TableFilters: tblConds, + CountAfterAccess: rowCount, } // Assume equal conditions used by index join and other conditions are independent. if len(indexConds) > 0 { - selectivity, _, err := ds.tableStats.HistColl.Selectivity(ds.ctx, indexConds) + selectivity, _, err := ds.tableStats.HistColl.Selectivity(ds.ctx, indexConds, nil) if err != nil { logutil.BgLogger().Debug("calculate selectivity failed, use selection factor", zap.Error(err)) selectivity = selectionFactor } - tmpPath.countAfterIndex = rowCount * selectivity + tmpPath.CountAfterIndex = rowCount * selectivity } selectivity := ds.stats.RowCount / ds.tableStats.RowCount finalStats := ds.stats.ScaleByExpectCnt(selectivity * rowCount) @@ -987,15 +988,15 @@ func (ijHelper *indexJoinBuildHelper) removeUselessEqAndInFunc( return notKeyEqAndIn, nil } -func (ijHelper *indexJoinBuildHelper) analyzeLookUpFilters(path *accessPath, innerPlan *DataSource, innerJoinKeys []*expression.Column) (emptyRange bool, err error) { - if len(path.idxCols) == 0 { +func (ijHelper *indexJoinBuildHelper) analyzeLookUpFilters(path *util.AccessPath, innerPlan *DataSource, innerJoinKeys []*expression.Column) (emptyRange bool, err error) { + if len(path.IdxCols) == 0 { return false, nil } - accesses := make([]expression.Expression, 0, len(path.idxCols)) - ijHelper.resetContextForIndex(innerJoinKeys, path.idxCols, path.idxColLens) + accesses := make([]expression.Expression, 0, len(path.IdxCols)) + ijHelper.resetContextForIndex(innerJoinKeys, path.IdxCols, path.IdxColLens) notKeyEqAndIn, remained, rangeFilterCandidates := ijHelper.findUsefulEqAndInFilters(innerPlan) var remainedEqAndIn []expression.Expression - notKeyEqAndIn, remainedEqAndIn = ijHelper.removeUselessEqAndInFunc(path.idxCols, notKeyEqAndIn) + notKeyEqAndIn, remainedEqAndIn = ijHelper.removeUselessEqAndInFunc(path.IdxCols, notKeyEqAndIn) matchedKeyCnt := len(ijHelper.curPossibleUsedKeys) // If no join key is matched while join keys actually are not empty. We don't choose index join for now. if matchedKeyCnt <= 0 && len(innerJoinKeys) > 0 { @@ -1010,7 +1011,7 @@ func (ijHelper *indexJoinBuildHelper) analyzeLookUpFilters(path *accessPath, inn return false, nil } // If all the index columns are covered by eq/in conditions, we don't need to consider other conditions anymore. - if lastColPos == len(path.idxCols) { + if lastColPos == len(path.IdxCols) { // If there's join key matched index column. Then choose hash join is always a better idea. // e.g. select * from t1, t2 where t2.a=1 and t2.b=1. And t2 has index(a, b). // If we don't have the following check, TiDB will build index join for this case. @@ -1028,10 +1029,10 @@ func (ijHelper *indexJoinBuildHelper) analyzeLookUpFilters(path *accessPath, inn ijHelper.updateBestChoice(ranges, path, accesses, remained, nil) return false, nil } - lastPossibleCol := path.idxCols[lastColPos] + lastPossibleCol := path.IdxCols[lastColPos] lastColManager := &ColWithCmpFuncManager{ TargetCol: lastPossibleCol, - colLength: path.idxColLens[lastColPos], + colLength: path.IdxColLens[lastColPos], affectedColSchema: expression.NewSchema(), } lastColAccess := ijHelper.buildLastColManager(lastPossibleCol, innerPlan, lastColManager) @@ -1047,7 +1048,7 @@ func (ijHelper *indexJoinBuildHelper) analyzeLookUpFilters(path *accessPath, inn var ranges, nextColRange []*ranger.Range var err error if len(colAccesses) > 0 { - nextColRange, err = ranger.BuildColumnRange(colAccesses, ijHelper.join.ctx.GetSessionVars().StmtCtx, lastPossibleCol.RetType, path.idxColLens[lastColPos]) + nextColRange, err = ranger.BuildColumnRange(colAccesses, ijHelper.join.ctx.GetSessionVars().StmtCtx, lastPossibleCol.RetType, path.IdxColLens[lastColPos]) if err != nil { return false, err } @@ -1060,7 +1061,7 @@ func (ijHelper *indexJoinBuildHelper) analyzeLookUpFilters(path *accessPath, inn return true, nil } remained = append(remained, colRemained...) - if path.idxColLens[lastColPos] != types.UnspecifiedLength { + if path.IdxColLens[lastColPos] != types.UnspecifiedLength { remained = append(remained, colAccesses...) } accesses = append(accesses, colAccesses...) @@ -1080,7 +1081,7 @@ func (ijHelper *indexJoinBuildHelper) analyzeLookUpFilters(path *accessPath, inn return false, nil } -func (ijHelper *indexJoinBuildHelper) updateBestChoice(ranges []*ranger.Range, path *accessPath, accesses, +func (ijHelper *indexJoinBuildHelper) updateBestChoice(ranges []*ranger.Range, path *util.AccessPath, accesses, remained []expression.Expression, lastColManager *ColWithCmpFuncManager) { // We choose the index by the number of used columns of the range, the much the better. // Notice that there may be the cases like `t1.a=t2.a and b > 2 and b < 1`. So ranges can be nil though the conditions are valid. diff --git a/planner/core/exhaust_physical_plans_test.go b/planner/core/exhaust_physical_plans_test.go index 98fba2158aaa6..dcfe267628535 100644 --- a/planner/core/exhaust_physical_plans_test.go +++ b/planner/core/exhaust_physical_plans_test.go @@ -21,6 +21,7 @@ import ( "github.com/pingcap/parser/model" "github.com/pingcap/parser/mysql" "github.com/pingcap/tidb/expression" + "github.com/pingcap/tidb/planner/util" "github.com/pingcap/tidb/types" ) @@ -120,9 +121,9 @@ func (s *testUnitTestSuit) TestIndexJoinAnalyzeLookUpFilters(c *C) { DBName: model.NewCIStr("test"), }) joinNode.SetSchema(expression.MergeSchema(dsSchema, outerChildSchema)) - path := &accessPath{ - idxCols: append(make([]*expression.Column, 0, 4), dsSchema.Columns...), - idxColLens: []int{types.UnspecifiedLength, types.UnspecifiedLength, 2, types.UnspecifiedLength}, + path := &util.AccessPath{ + IdxCols: append(make([]*expression.Column, 0, 4), dsSchema.Columns...), + IdxColLens: []int{types.UnspecifiedLength, types.UnspecifiedLength, 2, types.UnspecifiedLength}, } joinColNames := append(dsNames.Shallow(), outerChildNames...) diff --git a/planner/core/explain.go b/planner/core/explain.go index d53cb3c104d10..526a051fcceba 100644 --- a/planner/core/explain.go +++ b/planner/core/explain.go @@ -33,6 +33,10 @@ func (p *PhysicalLock) ExplainInfo() string { // ExplainInfo implements Plan interface. func (p *PhysicalIndexScan) ExplainInfo() string { + return p.explainInfo(false) +} + +func (p *PhysicalIndexScan) explainInfo(normalized bool) string { buffer := bytes.NewBufferString("") tblName := p.Table.Name.O if p.TableAsName != nil && p.TableAsName.O != "" { @@ -64,13 +68,21 @@ func (p *PhysicalIndexScan) ExplainInfo() string { if len(p.rangeInfo) > 0 { fmt.Fprintf(buffer, ", range: decided by %v", p.rangeInfo) } else if haveCorCol { - fmt.Fprintf(buffer, ", range: decided by %v", p.AccessCondition) + if normalized { + fmt.Fprintf(buffer, ", range: decided by %s", expression.SortedExplainNormalizedExpressionList(p.AccessCondition)) + } else { + fmt.Fprintf(buffer, ", range: decided by %v", p.AccessCondition) + } } else if len(p.Ranges) > 0 { - fmt.Fprint(buffer, ", range:") - for i, idxRange := range p.Ranges { - fmt.Fprint(buffer, idxRange.String()) - if i+1 < len(p.Ranges) { - fmt.Fprint(buffer, ", ") + if normalized { + fmt.Fprint(buffer, ", range:[?,?]") + } else { + fmt.Fprint(buffer, ", range:") + for i, idxRange := range p.Ranges { + fmt.Fprint(buffer, idxRange.String()) + if i+1 < len(p.Ranges) { + fmt.Fprint(buffer, ", ") + } } } } @@ -78,14 +90,28 @@ func (p *PhysicalIndexScan) ExplainInfo() string { if p.Desc { buffer.WriteString(", desc") } - if p.stats.StatsVersion == statistics.PseudoVersion { + if p.stats.StatsVersion == statistics.PseudoVersion && !normalized { buffer.WriteString(", stats:pseudo") } return buffer.String() } +// ExplainNormalizedInfo implements Plan interface. +func (p *PhysicalIndexScan) ExplainNormalizedInfo() string { + return p.explainInfo(true) +} + // ExplainInfo implements Plan interface. func (p *PhysicalTableScan) ExplainInfo() string { + return p.explainInfo(false) +} + +// ExplainNormalizedInfo implements Plan interface. +func (p *PhysicalTableScan) ExplainNormalizedInfo() string { + return p.explainInfo(true) +} + +func (p *PhysicalTableScan) explainInfo(normalized bool) string { buffer := bytes.NewBufferString("") tblName := p.Table.Name.O if p.TableAsName != nil && p.TableAsName.O != "" { @@ -111,9 +137,15 @@ func (p *PhysicalTableScan) ExplainInfo() string { if len(p.rangeDecidedBy) > 0 { fmt.Fprintf(buffer, ", range: decided by %v", p.rangeDecidedBy) } else if haveCorCol { - fmt.Fprintf(buffer, ", range: decided by %v", p.AccessCondition) + if normalized { + fmt.Fprintf(buffer, ", range: decided by %s", expression.SortedExplainNormalizedExpressionList(p.AccessCondition)) + } else { + fmt.Fprintf(buffer, ", range: decided by %v", p.AccessCondition) + } } else if len(p.Ranges) > 0 { - if p.StoreType == kv.TiFlash { + if normalized { + fmt.Fprint(buffer, ", range:[?,?]") + } else if p.StoreType == kv.TiFlash { // TiFlash table always use full range scan for each region, // the ranges in p.Ranges is used to prune cop task fmt.Fprintf(buffer, ", range:"+ranger.FullIntRange(false)[0].String()) @@ -131,7 +163,7 @@ func (p *PhysicalTableScan) ExplainInfo() string { if p.Desc { buffer.WriteString(", desc") } - if p.stats.StatsVersion == statistics.PseudoVersion { + if p.stats.StatsVersion == statistics.PseudoVersion && !normalized { buffer.WriteString(", stats:pseudo") } return buffer.String() @@ -142,11 +174,21 @@ func (p *PhysicalTableReader) ExplainInfo() string { return "data:" + p.tablePlan.ExplainID().String() } +// ExplainNormalizedInfo implements Plan interface. +func (p *PhysicalTableReader) ExplainNormalizedInfo() string { + return p.ExplainInfo() +} + // ExplainInfo implements Plan interface. func (p *PhysicalIndexReader) ExplainInfo() string { return "index:" + p.indexPlan.ExplainID().String() } +// ExplainNormalizedInfo implements Plan interface. +func (p *PhysicalIndexReader) ExplainNormalizedInfo() string { + return p.ExplainInfo() +} + // ExplainInfo implements Plan interface. func (p *PhysicalIndexLookUpReader) ExplainInfo() string { // The children can be inferred by the relation symbol. @@ -171,11 +213,21 @@ func (p *PhysicalSelection) ExplainInfo() string { return string(expression.SortedExplainExpressionList(p.Conditions)) } +// ExplainNormalizedInfo implements Plan interface. +func (p *PhysicalSelection) ExplainNormalizedInfo() string { + return string(expression.SortedExplainNormalizedExpressionList(p.Conditions)) +} + // ExplainInfo implements Plan interface. func (p *PhysicalProjection) ExplainInfo() string { return expression.ExplainExpressionList(p.Exprs, p.schema) } +// ExplainNormalizedInfo implements Plan interface. +func (p *PhysicalProjection) ExplainNormalizedInfo() string { + return string(expression.SortedExplainNormalizedExpressionList(p.Exprs)) +} + // ExplainInfo implements Plan interface. func (p *PhysicalTableDual) ExplainInfo() string { return fmt.Sprintf("rows:%v", p.RowCount) @@ -194,10 +246,19 @@ func (p *PhysicalLimit) ExplainInfo() string { // ExplainInfo implements Plan interface. func (p *basePhysicalAgg) ExplainInfo() string { + return p.explainInfo(false) +} + +func (p *basePhysicalAgg) explainInfo(normalized bool) string { + sortedExplainExpressionList := expression.SortedExplainExpressionList + if normalized { + sortedExplainExpressionList = expression.SortedExplainNormalizedExpressionList + } + builder := &strings.Builder{} if len(p.GroupByItems) > 0 { fmt.Fprintf(builder, "group by:%s, ", - expression.SortedExplainExpressionList(p.GroupByItems)) + sortedExplainExpressionList(p.GroupByItems)) } for i := 0; i < len(p.AggFuncs); i++ { builder.WriteString("funcs:") @@ -209,8 +270,22 @@ func (p *basePhysicalAgg) ExplainInfo() string { return builder.String() } +// ExplainNormalizedInfo implements Plan interface. +func (p *basePhysicalAgg) ExplainNormalizedInfo() string { + return p.explainInfo(true) +} + // ExplainInfo implements Plan interface. func (p *PhysicalIndexJoin) ExplainInfo() string { + return p.explainInfo(false) +} + +func (p *PhysicalIndexJoin) explainInfo(normalized bool) string { + sortedExplainExpressionList := expression.SortedExplainExpressionList + if normalized { + sortedExplainExpressionList = expression.SortedExplainNormalizedExpressionList + } + buffer := bytes.NewBufferString(p.JoinType.String()) fmt.Fprintf(buffer, ", inner:%s", p.Children()[p.InnerChildIdx].ExplainID()) if len(p.OuterJoinKeys) > 0 { @@ -223,21 +298,40 @@ func (p *PhysicalIndexJoin) ExplainInfo() string { } if len(p.LeftConditions) > 0 { fmt.Fprintf(buffer, ", left cond:%s", - expression.SortedExplainExpressionList(p.LeftConditions)) + sortedExplainExpressionList(p.LeftConditions)) } if len(p.RightConditions) > 0 { fmt.Fprintf(buffer, ", right cond:%s", - expression.SortedExplainExpressionList(p.RightConditions)) + sortedExplainExpressionList(p.RightConditions)) } if len(p.OtherConditions) > 0 { fmt.Fprintf(buffer, ", other cond:%s", - expression.SortedExplainExpressionList(p.OtherConditions)) + sortedExplainExpressionList(p.OtherConditions)) } return buffer.String() } +// ExplainNormalizedInfo implements Plan interface. +func (p *PhysicalIndexJoin) ExplainNormalizedInfo() string { + return p.explainInfo(true) +} + // ExplainInfo implements Plan interface. func (p *PhysicalHashJoin) ExplainInfo() string { + return p.explainInfo(false) +} + +// ExplainNormalizedInfo implements Plan interface. +func (p *PhysicalHashJoin) ExplainNormalizedInfo() string { + return p.explainInfo(true) +} + +func (p *PhysicalHashJoin) explainInfo(normalized bool) string { + sortedExplainExpressionList := expression.SortedExplainExpressionList + if normalized { + sortedExplainExpressionList = expression.SortedExplainNormalizedExpressionList + } + buffer := new(bytes.Buffer) if len(p.EqualConditions) == 0 { @@ -255,24 +349,41 @@ func (p *PhysicalHashJoin) ExplainInfo() string { buffer.WriteString(" (REVERSED)") } if len(p.EqualConditions) > 0 { - fmt.Fprintf(buffer, ", equal:%v", p.EqualConditions) + if normalized { + fmt.Fprintf(buffer, ", equal:%s", expression.SortedExplainNormalizedScalarFuncList(p.EqualConditions)) + } else { + fmt.Fprintf(buffer, ", equal:%v", p.EqualConditions) + } } if len(p.LeftConditions) > 0 { - fmt.Fprintf(buffer, ", left cond:%s", p.LeftConditions) + if normalized { + fmt.Fprintf(buffer, ", left cond:%s", expression.SortedExplainNormalizedExpressionList(p.LeftConditions)) + } else { + fmt.Fprintf(buffer, ", left cond:%s", p.LeftConditions) + } } if len(p.RightConditions) > 0 { fmt.Fprintf(buffer, ", right cond:%s", - expression.SortedExplainExpressionList(p.RightConditions)) + sortedExplainExpressionList(p.RightConditions)) } if len(p.OtherConditions) > 0 { fmt.Fprintf(buffer, ", other cond:%s", - expression.SortedExplainExpressionList(p.OtherConditions)) + sortedExplainExpressionList(p.OtherConditions)) } return buffer.String() } // ExplainInfo implements Plan interface. func (p *PhysicalMergeJoin) ExplainInfo() string { + return p.explainInfo(false) +} + +func (p *PhysicalMergeJoin) explainInfo(normalized bool) string { + sortedExplainExpressionList := expression.SortedExplainExpressionList + if normalized { + sortedExplainExpressionList = expression.SortedExplainNormalizedExpressionList + } + buffer := bytes.NewBufferString(p.JoinType.String()) if len(p.LeftJoinKeys) > 0 { fmt.Fprintf(buffer, ", left key:%s", @@ -283,19 +394,28 @@ func (p *PhysicalMergeJoin) ExplainInfo() string { expression.ExplainColumnList(p.RightJoinKeys)) } if len(p.LeftConditions) > 0 { - fmt.Fprintf(buffer, ", left cond:%s", p.LeftConditions) + if normalized { + fmt.Fprintf(buffer, ", left cond:%s", expression.SortedExplainNormalizedExpressionList(p.LeftConditions)) + } else { + fmt.Fprintf(buffer, ", left cond:%s", p.LeftConditions) + } } if len(p.RightConditions) > 0 { fmt.Fprintf(buffer, ", right cond:%s", - expression.SortedExplainExpressionList(p.RightConditions)) + sortedExplainExpressionList(p.RightConditions)) } if len(p.OtherConditions) > 0 { fmt.Fprintf(buffer, ", other cond:%s", - expression.SortedExplainExpressionList(p.OtherConditions)) + sortedExplainExpressionList(p.OtherConditions)) } return buffer.String() } +// ExplainNormalizedInfo implements Plan interface. +func (p *PhysicalMergeJoin) ExplainNormalizedInfo() string { + return p.explainInfo(true) +} + // ExplainInfo implements Plan interface. func (p *PhysicalTopN) ExplainInfo() string { buffer := bytes.NewBufferString("") @@ -304,6 +424,13 @@ func (p *PhysicalTopN) ExplainInfo() string { return buffer.String() } +// ExplainNormalizedInfo implements Plan interface. +func (p *PhysicalTopN) ExplainNormalizedInfo() string { + buffer := bytes.NewBufferString("") + buffer = explainNormalizedByItems(buffer, p.ByItems) + return buffer.String() +} + func (p *PhysicalWindow) formatFrameBound(buffer *bytes.Buffer, bound *FrameBound) { if bound.Type == ast.CurrentRow { buffer.WriteString("current row") @@ -467,9 +594,6 @@ func (p *DataSource) ExplainInfo() string { fmt.Fprintf(buffer, ", partition:%s", partitionName) } } - if p.handleCol != nil { - fmt.Fprintf(buffer, ", pk col:%s", p.handleCol.ExplainInfo()) - } return buffer.String() } @@ -496,6 +620,20 @@ func explainByItems(buffer *bytes.Buffer, byItems []*ByItems) *bytes.Buffer { return buffer } +func explainNormalizedByItems(buffer *bytes.Buffer, byItems []*ByItems) *bytes.Buffer { + for i, item := range byItems { + order := "asc" + if item.Desc { + order = "desc" + } + fmt.Fprintf(buffer, "%s:%s", item.Expr.ExplainNormalizedInfo(), order) + if i+1 < len(byItems) { + buffer.WriteString(", ") + } + } + return buffer +} + // ExplainInfo implements Plan interface. func (p *LogicalSort) ExplainInfo() string { buffer := bytes.NewBufferString("") @@ -516,10 +654,41 @@ func (p *LogicalLimit) ExplainInfo() string { } // ExplainInfo implements Plan interface. -func (p *TableScan) ExplainInfo() string { +func (p *LogicalTableScan) ExplainInfo() string { + buffer := bytes.NewBufferString(p.Source.ExplainInfo()) + if p.Source.handleCol != nil { + fmt.Fprintf(buffer, ", pk col:%s", p.Source.handleCol.ExplainInfo()) + } + if len(p.AccessConds) > 0 { + fmt.Fprintf(buffer, ", cond:%v", p.AccessConds) + } + return buffer.String() +} + +// ExplainInfo implements Plan interface. +func (p *LogicalIndexScan) ExplainInfo() string { buffer := bytes.NewBufferString(p.Source.ExplainInfo()) + index := p.Index + if len(index.Columns) > 0 { + buffer.WriteString(", index:") + for i, idxCol := range index.Columns { + buffer.WriteString(idxCol.Name.O) + if i+1 < len(index.Columns) { + buffer.WriteString(", ") + } + } + } if len(p.AccessConds) > 0 { fmt.Fprintf(buffer, ", cond:%v", p.AccessConds) } return buffer.String() } + +// ExplainInfo implements Plan interface. +func (p *TiKVSingleGather) ExplainInfo() string { + buffer := bytes.NewBufferString(p.Source.ExplainInfo()) + if p.IsIndexGather { + buffer.WriteString(", index:" + p.Index.Name.String()) + } + return buffer.String() +} diff --git a/planner/core/find_best_task.go b/planner/core/find_best_task.go index 886064fea4f6f..2aab18735460c 100644 --- a/planner/core/find_best_task.go +++ b/planner/core/find_best_task.go @@ -22,6 +22,7 @@ import ( "github.com/pingcap/tidb/expression" "github.com/pingcap/tidb/kv" "github.com/pingcap/tidb/planner/property" + "github.com/pingcap/tidb/planner/util" "github.com/pingcap/tidb/sessionctx/stmtctx" "github.com/pingcap/tidb/statistics" "github.com/pingcap/tidb/types" @@ -187,8 +188,8 @@ func (p *LogicalMemTable) findBestTask(prop *property.PhysicalProperty) (t task, return invalidTask, nil } memTable := PhysicalMemTable{ - DBName: p.dbName, - Table: p.tableInfo, + Table: p.tableInfo, + Columns: p.tableInfo.Columns, }.Init(p.ctx, p.stats, p.blockOffset) memTable.SetSchema(p.schema) return &rootTask{p: memTable}, nil @@ -216,7 +217,7 @@ func (ds *DataSource) tryToGetDualTask() (task, error) { // candidatePath is used to maintain required info for skyline pruning. type candidatePath struct { - path *accessPath + path *util.AccessPath columnSet *intsets.Sparse // columnSet is the set of columns that occurred in the access conditions. isSingleScan bool isMatchProp bool @@ -275,36 +276,41 @@ func compareCandidates(lhs, rhs *candidatePath) int { return 0 } -func (ds *DataSource) getTableCandidate(path *accessPath, prop *property.PhysicalProperty) *candidatePath { +func (ds *DataSource) getTableCandidate(path *util.AccessPath, prop *property.PhysicalProperty) *candidatePath { candidate := &candidatePath{path: path} pkCol := ds.getPKIsHandleCol() - candidate.isMatchProp = len(prop.Items) == 1 && pkCol != nil && prop.Items[0].Col.Equal(nil, pkCol) - candidate.columnSet = expression.ExtractColumnSet(path.accessConds) + if len(prop.Items) == 1 && pkCol != nil { + candidate.isMatchProp = prop.Items[0].Col.Equal(nil, pkCol) + if path.StoreType == kv.TiFlash { + candidate.isMatchProp = candidate.isMatchProp && !prop.Items[0].Desc + } + } + candidate.columnSet = expression.ExtractColumnSet(path.AccessConds) candidate.isSingleScan = true return candidate } -func (ds *DataSource) getIndexCandidate(path *accessPath, prop *property.PhysicalProperty, isSingleScan bool) *candidatePath { +func (ds *DataSource) getIndexCandidate(path *util.AccessPath, prop *property.PhysicalProperty, isSingleScan bool) *candidatePath { candidate := &candidatePath{path: path} all, _ := prop.AllSameOrder() // When the prop is empty or `all` is false, `isMatchProp` is better to be `false` because // it needs not to keep order for index scan. if !prop.IsEmpty() && all { - for i, col := range path.idxCols { + for i, col := range path.IdxCols { if col.Equal(nil, prop.Items[0].Col) { - candidate.isMatchProp = matchIndicesProp(path.idxCols[i:], path.idxColLens[i:], prop.Items) + candidate.isMatchProp = matchIndicesProp(path.IdxCols[i:], path.IdxColLens[i:], prop.Items) break - } else if i >= path.eqCondCount { + } else if i >= path.EqCondCount { break } } } - candidate.columnSet = expression.ExtractColumnSet(path.accessConds) + candidate.columnSet = expression.ExtractColumnSet(path.AccessConds) candidate.isSingleScan = isSingleScan return candidate } -func (ds *DataSource) getIndexMergeCandidate(path *accessPath) *candidatePath { +func (ds *DataSource) getIndexMergeCandidate(path *util.AccessPath) *candidatePath { candidate := &candidatePath{path: path} return candidate } @@ -314,20 +320,20 @@ func (ds *DataSource) getIndexMergeCandidate(path *accessPath) *candidatePath { func (ds *DataSource) skylinePruning(prop *property.PhysicalProperty) []*candidatePath { candidates := make([]*candidatePath, 0, 4) for _, path := range ds.possibleAccessPaths { - if path.partialIndexPaths != nil { + if path.PartialIndexPaths != nil { candidates = append(candidates, ds.getIndexMergeCandidate(path)) continue } // if we already know the range of the scan is empty, just return a TableDual - if len(path.ranges) == 0 && !ds.ctx.GetSessionVars().StmtCtx.UseCache { + if len(path.Ranges) == 0 && !ds.ctx.GetSessionVars().StmtCtx.UseCache { return []*candidatePath{{path: path}} } var currentCandidate *candidatePath - if path.isTablePath { + if path.IsTablePath { currentCandidate = ds.getTableCandidate(path, prop) } else { - coveredByIdx := isCoveringIndex(ds.schema.Columns, path.fullIdxCols, path.fullIdxColLens, ds.tableInfo.PKIsHandle) - if len(path.accessConds) > 0 || !prop.IsEmpty() || path.forced || coveredByIdx { + coveredByIdx := isCoveringIndex(ds.schema.Columns, path.FullIdxCols, path.FullIdxColLens, ds.tableInfo.PKIsHandle) + if len(path.AccessConds) > 0 || !prop.IsEmpty() || path.Forced || coveredByIdx { // We will use index to generate physical plan if any of the following conditions is satisfied: // 1. This path's access cond is not nil. // 2. We have a non-empty prop to match. @@ -340,7 +346,7 @@ func (ds *DataSource) skylinePruning(prop *property.PhysicalProperty) []*candida } pruned := false for i := len(candidates) - 1; i >= 0; i-- { - if candidates[i].path.storeType == kv.TiFlash { + if candidates[i].path.StoreType == kv.TiFlash { continue } result := compareCandidates(candidates[i], currentCandidate) @@ -411,7 +417,7 @@ func (ds *DataSource) findBestTask(prop *property.PhysicalProperty) (t task, err for _, candidate := range candidates { path := candidate.path - if path.partialIndexPaths != nil { + if path.PartialIndexPaths != nil { idxMergeTask, err := ds.convertToIndexMergeScan(prop, candidate) if err != nil { return nil, err @@ -422,14 +428,20 @@ func (ds *DataSource) findBestTask(prop *property.PhysicalProperty) (t task, err continue } // if we already know the range of the scan is empty, just return a TableDual - if len(path.ranges) == 0 && !ds.ctx.GetSessionVars().StmtCtx.UseCache { + if len(path.Ranges) == 0 && !ds.ctx.GetSessionVars().StmtCtx.UseCache { dual := PhysicalTableDual{}.Init(ds.ctx, ds.stats, ds.blockOffset) dual.SetSchema(ds.schema) return &rootTask{ p: dual, }, nil } - if path.isTablePath { + if path.IsTablePath { + if ds.preferStoreType&preferTiFlash != 0 && path.StoreType == kv.TiKV { + continue + } + if ds.preferStoreType&preferTiKV != 0 && path.StoreType == kv.TiFlash { + continue + } tblTask, err := ds.convertToTableScan(prop, candidate) if err != nil { return nil, err @@ -461,17 +473,17 @@ func (ds *DataSource) convertToIndexMergeScan(prop *property.PhysicalProperty, c } path := candidate.path var totalCost, totalRowCount float64 - scans := make([]PhysicalPlan, 0, len(path.partialIndexPaths)) + scans := make([]PhysicalPlan, 0, len(path.PartialIndexPaths)) cop := &copTask{ indexPlanFinished: true, tblColHists: ds.TblColHists, } allCovered := true - for _, partPath := range path.partialIndexPaths { + for _, partPath := range path.PartialIndexPaths { var scan PhysicalPlan var partialCost, rowCount float64 var tempCovered bool - if partPath.isTablePath { + if partPath.IsTablePath { scan, partialCost, rowCount, tempCovered = ds.convertToPartialTableScan(prop, partPath) } else { scan, partialCost, rowCount, tempCovered = ds.convertToPartialIndexScan(prop, partPath) @@ -482,8 +494,8 @@ func (ds *DataSource) convertToIndexMergeScan(prop *property.PhysicalProperty, c allCovered = allCovered && tempCovered } - if !allCovered || len(path.tableFilters) > 0 { - ts, partialCost := ds.buildIndexMergeTableScan(prop, path.tableFilters, totalRowCount) + if !allCovered || len(path.TableFilters) > 0 { + ts, partialCost := ds.buildIndexMergeTableScan(prop, path.TableFilters, totalRowCount) totalCost += partialCost cop.tablePlan = ts } @@ -493,22 +505,22 @@ func (ds *DataSource) convertToIndexMergeScan(prop *property.PhysicalProperty, c return task, nil } -func (ds *DataSource) convertToPartialIndexScan(prop *property.PhysicalProperty, path *accessPath) ( +func (ds *DataSource) convertToPartialIndexScan(prop *property.PhysicalProperty, path *util.AccessPath) ( indexPlan PhysicalPlan, partialCost float64, rowCount float64, isCovered bool) { - idx := path.index + idx := path.Index is, partialCost, rowCount := ds.getOriginalPhysicalIndexScan(prop, path, false, false) rowSize := is.indexScanRowSize(idx, ds, false) - isCovered = isCoveringIndex(ds.schema.Columns, path.fullIdxCols, path.fullIdxColLens, ds.tableInfo.PKIsHandle) - indexConds := path.indexFilters + isCovered = isCoveringIndex(ds.schema.Columns, path.FullIdxCols, path.FullIdxColLens, ds.tableInfo.PKIsHandle) + indexConds := path.IndexFilters sessVars := ds.ctx.GetSessionVars() if indexConds != nil { var selectivity float64 partialCost += rowCount * sessVars.CopCPUFactor - if path.countAfterAccess > 0 { - selectivity = path.countAfterIndex / path.countAfterAccess + if path.CountAfterAccess > 0 { + selectivity = path.CountAfterIndex / path.CountAfterAccess } rowCount = is.stats.RowCount * selectivity stats := &property.StatsInfo{RowCount: rowCount} @@ -526,7 +538,7 @@ func (ds *DataSource) convertToPartialIndexScan(prop *property.PhysicalProperty, return indexPlan, partialCost, rowCount, isCovered } -func (ds *DataSource) convertToPartialTableScan(prop *property.PhysicalProperty, path *accessPath) ( +func (ds *DataSource) convertToPartialTableScan(prop *property.PhysicalProperty, path *util.AccessPath) ( tablePlan PhysicalPlan, partialCost float64, rowCount float64, @@ -535,7 +547,7 @@ func (ds *DataSource) convertToPartialTableScan(prop *property.PhysicalProperty, rowSize := ds.TblColHists.GetAvgRowSize(ds.TblCols, false) sessVars := ds.ctx.GetSessionVars() if len(ts.filterCondition) > 0 { - selectivity, _, err := ds.tableStats.HistColl.Selectivity(ds.ctx, ts.filterCondition) + selectivity, _, err := ds.tableStats.HistColl.Selectivity(ds.ctx, ts.filterCondition, nil) if err != nil { logutil.BgLogger().Debug("calculate selectivity failed, use selection factor", zap.Error(err)) selectivity = selectionFactor @@ -578,7 +590,7 @@ func (ds *DataSource) buildIndexMergeTableScan(prop *property.PhysicalProperty, } if len(tableFilters) > 0 { partialCost += totalRowCount * sessVars.CopCPUFactor - selectivity, _, err := ds.tableStats.HistColl.Selectivity(ds.ctx, tableFilters) + selectivity, _, err := ds.tableStats.HistColl.Selectivity(ds.ctx, tableFilters, nil) if err != nil { logutil.BgLogger().Debug("calculate selectivity failed, use selection factor", zap.Error(err)) selectivity = selectionFactor @@ -729,14 +741,18 @@ func (is *PhysicalIndexScan) initSchema(idx *model.IndexInfo, idxExprCols []*exp // If it's double read case, the first index must return handle. So we should add extra handle column // if there isn't a handle column. if isDoubleRead && !setHandle { - indexCols = append(indexCols, &expression.Column{ID: model.ExtraHandleID, UniqueID: is.ctx.GetSessionVars().AllocPlanColumnID()}) + indexCols = append(indexCols, &expression.Column{ + RetType: types.NewFieldType(mysql.TypeLonglong), + ID: model.ExtraHandleID, + UniqueID: is.ctx.GetSessionVars().AllocPlanColumnID(), + }) } is.SetSchema(expression.NewSchema(indexCols...)) } -func (is *PhysicalIndexScan) addPushedDownSelection(copTask *copTask, p *DataSource, path *accessPath, finalStats *property.StatsInfo) { +func (is *PhysicalIndexScan) addPushedDownSelection(copTask *copTask, p *DataSource, path *util.AccessPath, finalStats *property.StatsInfo) { // Add filter condition to table plan now. - indexConds, tableConds := path.indexFilters, path.tableFilters + indexConds, tableConds := path.IndexFilters, path.TableFilters tableConds, copTask.rootTaskConds = splitSelCondsWithVirtualColumn(tableConds) @@ -744,8 +760,8 @@ func (is *PhysicalIndexScan) addPushedDownSelection(copTask *copTask, p *DataSou if indexConds != nil { copTask.cst += copTask.count() * sessVars.CopCPUFactor var selectivity float64 - if path.countAfterAccess > 0 { - selectivity = path.countAfterIndex / path.countAfterAccess + if path.CountAfterAccess > 0 { + selectivity = path.CountAfterIndex / path.CountAfterAccess } count := is.stats.RowCount * selectivity stats := p.tableStats.ScaleByExpectCnt(count) @@ -890,19 +906,19 @@ func convertRangeFromExpectedCnt(ranges []*ranger.Range, rangeCounts []float64, return convertedRanges, count, false } -// crossEstimateRowCount estimates row count of table scan using histogram of another column which is in tableFilters +// crossEstimateRowCount estimates row count of table scan using histogram of another column which is in TableFilters // and has high order correlation with handle column. For example, if the query is like: // `select * from tbl where a = 1 order by pk limit 1` // if order of column `a` is strictly correlated with column `pk`, the row count of table scan should be: // `1 + row_count(a < 1 or a is null)` -func (ds *DataSource) crossEstimateRowCount(path *accessPath, expectedCnt float64, desc bool) (float64, bool, float64) { - if ds.statisticTable.Pseudo || len(path.tableFilters) == 0 { +func (ds *DataSource) crossEstimateRowCount(path *util.AccessPath, expectedCnt float64, desc bool) (float64, bool, float64) { + if ds.statisticTable.Pseudo || len(path.TableFilters) == 0 { return 0, false, 0 } - col, corr := getMostCorrColFromExprs(path.tableFilters, ds.statisticTable, ds.ctx.GetSessionVars().CorrelationThreshold) + col, corr := getMostCorrColFromExprs(path.TableFilters, ds.statisticTable, ds.ctx.GetSessionVars().CorrelationThreshold) // If table scan is not full range scan, we cannot use histogram of other columns for estimation, because // the histogram reflects value distribution in the whole table level. - if col == nil || len(path.accessConds) > 0 { + if col == nil || len(path.AccessConds) > 0 { return 0, false, corr } colInfoID := col.ID @@ -911,7 +927,7 @@ func (ds *DataSource) crossEstimateRowCount(path *accessPath, expectedCnt float6 if colHist.Correlation < 0 { desc = !desc } - accessConds, remained := ranger.DetachCondsForColumn(ds.ctx, path.tableFilters, col) + accessConds, remained := ranger.DetachCondsForColumn(ds.ctx, path.TableFilters, col) if len(accessConds) == 0 { return 0, false, corr } @@ -930,7 +946,7 @@ func (ds *DataSource) crossEstimateRowCount(path *accessPath, expectedCnt float6 } convertedRanges, count, isFull := convertRangeFromExpectedCnt(ranges, rangeCounts, expectedCnt, desc) if isFull { - return path.countAfterAccess, true, 0 + return path.CountAfterAccess, true, 0 } var rangeCount float64 if idxExists { @@ -945,12 +961,12 @@ func (ds *DataSource) crossEstimateRowCount(path *accessPath, expectedCnt float6 if len(remained) > 0 { scanCount = scanCount / selectionFactor } - scanCount = math.Min(scanCount, path.countAfterAccess) + scanCount = math.Min(scanCount, path.CountAfterAccess) return scanCount, true, 0 } -// GetPhysicalScan returns PhysicalTableScan for the logical TableScan. -func (s *TableScan) GetPhysicalScan(schema *expression.Schema, stats *property.StatsInfo) *PhysicalTableScan { +// GetPhysicalScan returns PhysicalTableScan for the LogicalTableScan. +func (s *LogicalTableScan) GetPhysicalScan(schema *expression.Schema, stats *property.StatsInfo) *PhysicalTableScan { ds := s.Source ts := PhysicalTableScan{ Table: ds.tableInfo, @@ -974,6 +990,28 @@ func (s *TableScan) GetPhysicalScan(schema *expression.Schema, stats *property.S return ts } +// GetPhysicalIndexScan returns PhysicalIndexScan for the logical IndexScan. +func (s *LogicalIndexScan) GetPhysicalIndexScan(schema *expression.Schema, stats *property.StatsInfo) *PhysicalIndexScan { + ds := s.Source + is := PhysicalIndexScan{ + Table: ds.tableInfo, + TableAsName: ds.TableAsName, + DBName: ds.DBName, + Columns: s.Columns, + Index: s.Index, + IdxCols: s.idxCols, + IdxColLens: s.idxColLens, + AccessCondition: s.AccessConds, + Ranges: s.Ranges, + dataSourceSchema: ds.schema, + isPartition: ds.isPartition, + physicalTableID: ds.physicalTableID, + }.Init(ds.ctx, ds.blockOffset) + is.stats = stats + is.initSchema(s.Index, s.fullIdxCols, s.IsDoubleRead) + return is +} + // convertToTableScan converts the DataSource to table scan. func (ds *DataSource) convertToTableScan(prop *property.PhysicalProperty, candidate *candidatePath) (task task, err error) { // It will be handled in convertToIndexScan. @@ -1016,7 +1054,7 @@ func (ts *PhysicalTableScan) addPushedDownSelection(copTask *copTask, stats *pro } } -func (ds *DataSource) getOriginalPhysicalTableScan(prop *property.PhysicalProperty, path *accessPath, isMatchProp bool) (*PhysicalTableScan, float64, float64) { +func (ds *DataSource) getOriginalPhysicalTableScan(prop *property.PhysicalProperty, path *util.AccessPath, isMatchProp bool) (*PhysicalTableScan, float64, float64) { ts := PhysicalTableScan{ Table: ds.tableInfo, Columns: ds.Columns, @@ -1024,17 +1062,11 @@ func (ds *DataSource) getOriginalPhysicalTableScan(prop *property.PhysicalProper DBName: ds.DBName, isPartition: ds.isPartition, physicalTableID: ds.physicalTableID, - Ranges: path.ranges, - AccessCondition: path.accessConds, - filterCondition: path.tableFilters, - StoreType: path.storeType, + Ranges: path.Ranges, + AccessCondition: path.AccessConds, + filterCondition: path.TableFilters, + StoreType: path.StoreType, }.Init(ds.ctx, ds.blockOffset) - if ds.preferStoreType&preferTiFlash != 0 { - ts.StoreType = kv.TiFlash - } - if ds.preferStoreType&preferTiKV != 0 { - ts.StoreType = kv.TiKV - } if ts.StoreType == kv.TiFlash { // Append the AccessCondition to filterCondition because TiFlash only support full range scan for each // region, do not reset ts.Ranges as it will help prune regions during `buildCopTasks` @@ -1049,7 +1081,7 @@ func (ds *DataSource) getOriginalPhysicalTableScan(prop *property.PhysicalProper } } } - rowCount := path.countAfterAccess + rowCount := path.CountAfterAccess if prop.ExpectedCnt < ds.stats.RowCount { count, ok, corr := ds.crossEstimateRowCount(path, prop.ExpectedCnt, isMatchProp && prop.Items[0].Desc) if ok { @@ -1099,18 +1131,18 @@ func (ds *DataSource) getOriginalPhysicalTableScan(prop *property.PhysicalProper return ts, cost, rowCount } -func (ds *DataSource) getOriginalPhysicalIndexScan(prop *property.PhysicalProperty, path *accessPath, isMatchProp bool, isSingleScan bool) (*PhysicalIndexScan, float64, float64) { - idx := path.index +func (ds *DataSource) getOriginalPhysicalIndexScan(prop *property.PhysicalProperty, path *util.AccessPath, isMatchProp bool, isSingleScan bool) (*PhysicalIndexScan, float64, float64) { + idx := path.Index is := PhysicalIndexScan{ Table: ds.tableInfo, TableAsName: ds.TableAsName, DBName: ds.DBName, Columns: ds.Columns, Index: idx, - IdxCols: path.idxCols, - IdxColLens: path.idxColLens, - AccessCondition: path.accessConds, - Ranges: path.ranges, + IdxCols: path.IdxCols, + IdxColLens: path.IdxColLens, + AccessCondition: path.AccessConds, + Ranges: path.Ranges, dataSourceSchema: ds.schema, isPartition: ds.isPartition, physicalTableID: ds.physicalTableID, @@ -1119,13 +1151,13 @@ func (ds *DataSource) getOriginalPhysicalIndexScan(prop *property.PhysicalProper if statsTbl.Indices[idx.ID] != nil { is.Hist = &statsTbl.Indices[idx.ID].Histogram } - rowCount := path.countAfterAccess - is.initSchema(idx, path.fullIdxCols, !isSingleScan) + rowCount := path.CountAfterAccess + is.initSchema(idx, path.FullIdxCols, !isSingleScan) // Only use expectedCnt when it's smaller than the count we calculated. // e.g. IndexScan(count1)->After Filter(count2). The `ds.stats.RowCount` is count2. count1 is the one we need to calculate // If expectedCnt and count2 are both zero and we go into the below `if` block, the count1 will be set to zero though it's shouldn't be. if (isMatchProp || prop.IsEmpty()) && prop.ExpectedCnt < ds.stats.RowCount { - selectivity := ds.stats.RowCount / path.countAfterAccess + selectivity := ds.stats.RowCount / path.CountAfterAccess rowCount = math.Min(prop.ExpectedCnt/selectivity, rowCount) } is.stats = ds.tableStats.ScaleByExpectCnt(rowCount) diff --git a/planner/core/indexmerge_test.go b/planner/core/indexmerge_test.go index bdbc86001c58f..bbcc701a0754a 100644 --- a/planner/core/indexmerge_test.go +++ b/planner/core/indexmerge_test.go @@ -20,6 +20,7 @@ import ( "github.com/pingcap/parser" "github.com/pingcap/parser/model" "github.com/pingcap/tidb/infoschema" + "github.com/pingcap/tidb/planner/util" "github.com/pingcap/tidb/sessionctx" "github.com/pingcap/tidb/util/testleak" "github.com/pingcap/tidb/util/testutil" @@ -49,7 +50,7 @@ func (s *testIndexMergeSuite) TearDownSuite(c *C) { c.Assert(s.testdata.GenerateOutputIfNeeded(), IsNil) } -func getIndexMergePathDigest(paths []*accessPath, startIndex int) string { +func getIndexMergePathDigest(paths []*util.AccessPath, startIndex int) string { if len(paths) == startIndex { return "[]" } @@ -60,18 +61,18 @@ func getIndexMergePathDigest(paths []*accessPath, startIndex int) string { } path := paths[i] idxMergeDisgest += "{Idxs:[" - for j := 0; j < len(path.partialIndexPaths); j++ { + for j := 0; j < len(path.PartialIndexPaths); j++ { if j > 0 { idxMergeDisgest += "," } - idxMergeDisgest += path.partialIndexPaths[j].index.Name.L + idxMergeDisgest += path.PartialIndexPaths[j].Index.Name.L } idxMergeDisgest += "],TbFilters:[" - for j := 0; j < len(path.tableFilters); j++ { + for j := 0; j < len(path.TableFilters); j++ { if j > 0 { idxMergeDisgest += "," } - idxMergeDisgest += path.tableFilters[j].String() + idxMergeDisgest += path.TableFilters[j].String() } idxMergeDisgest += "]}" } diff --git a/planner/core/initialize.go b/planner/core/initialize.go index 04647e1d1361f..b3b0f1831f3d7 100644 --- a/planner/core/initialize.go +++ b/planner/core/initialize.go @@ -39,18 +39,24 @@ func (ds DataSource) Init(ctx sessionctx.Context, offset int) *DataSource { return &ds } -// Init initializes TableGather. -func (tg TableGather) Init(ctx sessionctx.Context, offset int) *TableGather { - tg.baseLogicalPlan = newBaseLogicalPlan(ctx, plancodec.TypeTableGather, &tg, offset) - return &tg +// Init initializes TiKVSingleGather. +func (sg TiKVSingleGather) Init(ctx sessionctx.Context, offset int) *TiKVSingleGather { + sg.baseLogicalPlan = newBaseLogicalPlan(ctx, plancodec.TypeTiKVSingleGather, &sg, offset) + return &sg } -// Init initializes TableScan. -func (ts TableScan) Init(ctx sessionctx.Context, offset int) *TableScan { +// Init initializes LogicalTableScan. +func (ts LogicalTableScan) Init(ctx sessionctx.Context, offset int) *LogicalTableScan { ts.baseLogicalPlan = newBaseLogicalPlan(ctx, plancodec.TypeTableScan, &ts, offset) return &ts } +// Init initializes LogicalIndexScan. +func (is LogicalIndexScan) Init(ctx sessionctx.Context, offset int) *LogicalIndexScan { + is.baseLogicalPlan = newBaseLogicalPlan(ctx, plancodec.TypeIdxScan, &is, offset) + return &is +} + // Init initializes LogicalApply. func (la LogicalApply) Init(ctx sessionctx.Context, offset int) *LogicalApply { la.baseLogicalPlan = newBaseLogicalPlan(ctx, plancodec.TypeApply, &la, offset) @@ -393,15 +399,7 @@ func (p PhysicalTableReader) Init(ctx sessionctx.Context, offset int) *PhysicalT // Init initializes PhysicalIndexReader. func (p PhysicalIndexReader) Init(ctx sessionctx.Context, offset int) *PhysicalIndexReader { p.basePhysicalPlan = newBasePhysicalPlan(ctx, plancodec.TypeIndexReader, &p, offset) - p.IndexPlans = flattenPushDownPlan(p.indexPlan) - switch p.indexPlan.(type) { - case *PhysicalHashAgg, *PhysicalStreamAgg: - p.schema = p.indexPlan.Schema() - default: - is := p.IndexPlans[0].(*PhysicalIndexScan) - p.schema = is.dataSourceSchema - } - p.OutputColumns = p.schema.Clone().Columns + p.SetSchema(nil) return &p } diff --git a/planner/core/logical_plan_builder.go b/planner/core/logical_plan_builder.go index 25477a1998877..15d8f29567d47 100644 --- a/planner/core/logical_plan_builder.go +++ b/planner/core/logical_plan_builder.go @@ -35,8 +35,10 @@ import ( "github.com/pingcap/tidb/expression" "github.com/pingcap/tidb/expression/aggregation" "github.com/pingcap/tidb/infoschema" + "github.com/pingcap/tidb/kv" "github.com/pingcap/tidb/metrics" "github.com/pingcap/tidb/planner/property" + "github.com/pingcap/tidb/planner/util" "github.com/pingcap/tidb/privilege" "github.com/pingcap/tidb/sessionctx" "github.com/pingcap/tidb/statistics" @@ -436,10 +438,10 @@ func (ds *DataSource) setPreferredStoreType(hintInfo *tableHintInfo) { } else { alias = &hintTableInfo{dbName: ds.DBName, tblName: ds.tableInfo.Name, selectOffset: ds.SelectBlockOffset()} } - if hintInfo.ifPreferTiFlash(alias) { - ds.preferStoreType |= preferTiFlash - } if hintInfo.ifPreferTiKV(alias) { + ds.preferStoreType |= preferTiKV + } + if hintInfo.ifPreferTiFlash(alias) { if ds.preferStoreType != 0 { errMsg := fmt.Sprintf("Storage hints are conflict, you can only specify one storage type of table %s.%s", alias.dbName.L, alias.tblName.L) @@ -448,7 +450,19 @@ func (ds *DataSource) setPreferredStoreType(hintInfo *tableHintInfo) { ds.preferStoreType = 0 return } - ds.preferStoreType |= preferTiKV + ds.preferStoreType |= preferTiFlash + hasTiFlashPath := false + for _, path := range ds.possibleAccessPaths { + if path.StoreType == kv.TiFlash { + hasTiFlashPath = true + break + } + } + // TODO: For now, if there is a TiFlash hint for a table, we enforce a TiFlash path. But hint is just a suggestion + // for the planner. We can keep it since we need it to debug with PD and TiFlash. In future, this should be removed. + if !hasTiFlashPath { + ds.possibleAccessPaths = append(ds.possibleAccessPaths, &util.AccessPath{IsTablePath: true, StoreType: kv.TiFlash}) + } } } @@ -2492,7 +2506,7 @@ func (b *PlanBuilder) buildDataSource(ctx context.Context, tn *ast.TableName, as } b.visitInfo = appendVisitInfo(b.visitInfo, mysql.SelectPriv, dbName.L, tableInfo.Name.L, "", authErr) - if tbl.Type() == table.VirtualTable { + if tbl.Type().IsVirtualTable() { return b.buildMemTable(ctx, dbName, tableInfo) } @@ -2517,7 +2531,7 @@ func (b *PlanBuilder) buildDataSource(ctx context.Context, tn *ast.TableName, as if tblName.L == "" { tblName = tn.Name } - possiblePaths, err := b.getPossibleAccessPaths(tn.IndexHints, tableInfo, dbName, tblName) + possiblePaths, err := b.getPossibleAccessPaths(tn.IndexHints, tbl, dbName, tblName) if err != nil { return nil, err } @@ -2618,10 +2632,10 @@ func (b *PlanBuilder) buildDataSource(ctx context.Context, tn *ast.TableName, as ds.names = names ds.setPreferredStoreType(b.TableHints()) - // Init fullIdxCols, fullIdxColLens for accessPaths. + // Init FullIdxCols, FullIdxColLens for accessPaths. for _, path := range ds.possibleAccessPaths { - if !path.isTablePath { - path.fullIdxCols, path.fullIdxColLens = expression.IndexInfo2Cols(ds.Columns, ds.schema.Columns, path.index) + if !path.IsTablePath { + path.FullIdxCols, path.FullIdxColLens = expression.IndexInfo2Cols(ds.Columns, ds.schema.Columns, path.Index) } } @@ -2698,6 +2712,13 @@ func (b *PlanBuilder) buildMemTable(ctx context.Context, dbName model.CIStr, tab }.Init(b.ctx, b.getSelectOffset()) p.SetSchema(schema) p.names = names + + // Some memory tables can receive some predicates + switch tableInfo.Name.L { + case strings.ToLower(infoschema.TableTiDBClusterConfig): + p.Extractor = &ClusterConfigTableExtractor{} + } + return p, nil } diff --git a/planner/core/logical_plan_test.go b/planner/core/logical_plan_test.go index 79a1dd565c86f..8c8f139ae323b 100644 --- a/planner/core/logical_plan_test.go +++ b/planner/core/logical_plan_test.go @@ -1349,10 +1349,10 @@ func byItemsToProperty(byItems []*ByItems) *property.PhysicalProperty { func pathsName(paths []*candidatePath) string { var names []string for _, path := range paths { - if path.path.isTablePath { + if path.path.IsTablePath { names = append(names, "PRIMARY_KEY") } else { - names = append(names, path.path.index.Name.O) + names = append(names, path.path.Index.Name.O) } } return strings.Join(names, ",") diff --git a/planner/core/logical_plans.go b/planner/core/logical_plans.go index a9d4c656db41e..5bfa6614b4638 100644 --- a/planner/core/logical_plans.go +++ b/planner/core/logical_plans.go @@ -22,8 +22,8 @@ import ( "github.com/pingcap/parser/mysql" "github.com/pingcap/tidb/expression" "github.com/pingcap/tidb/expression/aggregation" - "github.com/pingcap/tidb/kv" "github.com/pingcap/tidb/planner/property" + "github.com/pingcap/tidb/planner/util" "github.com/pingcap/tidb/statistics" "github.com/pingcap/tidb/table" "github.com/pingcap/tidb/types" @@ -41,8 +41,9 @@ var ( _ LogicalPlan = &LogicalMaxOneRow{} _ LogicalPlan = &LogicalTableDual{} _ LogicalPlan = &DataSource{} - _ LogicalPlan = &TableGather{} - _ LogicalPlan = &TableScan{} + _ LogicalPlan = &TiKVSingleGather{} + _ LogicalPlan = &LogicalTableScan{} + _ LogicalPlan = &LogicalIndexScan{} _ LogicalPlan = &LogicalUnionAll{} _ LogicalPlan = &LogicalSort{} _ LogicalPlan = &LogicalLock{} @@ -282,6 +283,12 @@ func (la *LogicalAggregation) CopyAggHints(agg *LogicalAggregation) { la.aggHints = agg.aggHints } +// IsPartialModeAgg returns if all of the AggFuncs are partialMode. +func (la *LogicalAggregation) IsPartialModeAgg() bool { + // Since all of the AggFunc share the same AggMode, we only need to check the first one. + return la.AggFuncs[0].Mode == aggregation.Partial1Mode +} + // GetGroupByCols returns the groupByCols. If the groupByCols haven't be collected, // this method would collect them at first. If the GroupByItems have been changed, // we should explicitly collect GroupByColumns before this method. @@ -355,8 +362,8 @@ type LogicalTableDual struct { // LogicalMemTable represents a memory table or virtual table // Some memory tables wants to take the ownership of some predications // e.g -// SELECT * FROM tidb_cluster_log WHERE type='tikv' AND address='192.16.5.32' -// Assume that the table `tidb_cluster_log` is a memory table, which is used +// SELECT * FROM cluster_log WHERE type='tikv' AND address='192.16.5.32' +// Assume that the table `cluster_log` is a memory table, which is used // to retrieve logs from remote components. In the above situation we should // send log search request to the target TiKV (192.16.5.32) directly instead of // requesting all cluster components log search gRPC interface to retrieve @@ -364,6 +371,7 @@ type LogicalTableDual struct { type LogicalMemTable struct { logicalSchemaProducer + Extractor MemTablePredicateExtractor dbName model.CIStr tableInfo *model.TableInfo } @@ -400,7 +408,7 @@ type DataSource struct { tableStats *property.StatsInfo // possibleAccessPaths stores all the possible access path for physical plan, including table scan. - possibleAccessPaths []*accessPath + possibleAccessPaths []*util.AccessPath // The data source may be a partition, rather than a real table. isPartition bool @@ -420,15 +428,20 @@ type DataSource struct { preferStoreType int } -// TableGather is a leaf logical operator of TiDB layer to gather +// TiKVSingleGather is a leaf logical operator of TiDB layer to gather // tuples from TiKV regions. -type TableGather struct { +type TiKVSingleGather struct { logicalSchemaProducer Source *DataSource + // IsIndexGather marks if this TiKVSingleGather gathers tuples from an IndexScan. + // in implementation phase, we need this flag to determine whether to generate + // PhysicalTableReader or PhysicalIndexReader. + IsIndexGather bool + Index *model.IndexInfo } -// TableScan is the logical table scan operator for TiKV. -type TableScan struct { +// LogicalTableScan is the logical table scan operator for TiKV. +type LogicalTableScan struct { logicalSchemaProducer Source *DataSource Handle *expression.Column @@ -436,37 +449,47 @@ type TableScan struct { Ranges []*ranger.Range } -// accessPath indicates the way we access a table: by using single index, or by using multiple indexes, -// or just by using table scan. -type accessPath struct { - index *model.IndexInfo +// LogicalIndexScan is the logical index scan operator for TiKV. +type LogicalIndexScan struct { + logicalSchemaProducer + // DataSource should be read-only here. + Source *DataSource + IsDoubleRead bool + + EqCondCount int + AccessConds expression.CNFExprs + Ranges []*ranger.Range + + Index *model.IndexInfo + Columns []*model.ColumnInfo fullIdxCols []*expression.Column fullIdxColLens []int idxCols []*expression.Column idxColLens []int - ranges []*ranger.Range - // countAfterAccess is the row count after we apply range seek and before we use other filter to filter data. - countAfterAccess float64 - // countAfterIndex is the row count after we apply filters on index and before we apply the table filters. - countAfterIndex float64 - accessConds []expression.Expression - eqCondCount int - indexFilters []expression.Expression - tableFilters []expression.Expression - // isTablePath indicates whether this path is table path. - isTablePath bool - storeType kv.StoreType - // forced means this path is generated by `use/force index()`. - forced bool - // partialIndexPaths store all index access paths. - // If there are extra filters, store them in tableFilters. - partialIndexPaths []*accessPath +} + +// MatchIndexProp checks if the indexScan can match the required property. +func (p *LogicalIndexScan) MatchIndexProp(prop *property.PhysicalProperty) (match bool) { + if prop.IsEmpty() { + return true + } + if all, _ := prop.AllSameOrder(); !all { + return false + } + for i, col := range p.idxCols { + if col.Equal(nil, prop.Items[0].Col) { + return matchIndicesProp(p.idxCols[i:], p.idxColLens[i:], prop.Items) + } else if i >= p.EqCondCount { + break + } + } + return false } // getTablePath finds the TablePath from a group of accessPaths. -func getTablePath(paths []*accessPath) *accessPath { +func getTablePath(paths []*util.AccessPath) *util.AccessPath { for _, path := range paths { - if path.isTablePath { + if path.IsTablePath { return path } } @@ -474,34 +497,60 @@ func getTablePath(paths []*accessPath) *accessPath { } func (ds *DataSource) buildTableGather() LogicalPlan { - ts := TableScan{Source: ds, Handle: ds.getHandleCol()}.Init(ds.ctx, ds.blockOffset) + ts := LogicalTableScan{Source: ds, Handle: ds.getHandleCol()}.Init(ds.ctx, ds.blockOffset) ts.SetSchema(ds.Schema()) - tg := TableGather{Source: ds}.Init(ds.ctx, ds.blockOffset) - tg.SetSchema(ds.Schema()) - tg.SetChildren(ts) - return tg -} - -// Convert2Gathers builds logical TableGather and IndexGather(to be implemented) from DataSource. + sg := TiKVSingleGather{Source: ds, IsIndexGather: false}.Init(ds.ctx, ds.blockOffset) + sg.SetSchema(ds.Schema()) + sg.SetChildren(ts) + return sg +} + +func (ds *DataSource) buildIndexGather(path *util.AccessPath) LogicalPlan { + is := LogicalIndexScan{ + Source: ds, + IsDoubleRead: false, + Index: path.Index, + }.Init(ds.ctx, ds.blockOffset) + + is.Columns = make([]*model.ColumnInfo, len(ds.Columns)) + copy(is.Columns, ds.Columns) + is.SetSchema(ds.Schema()) + + sg := TiKVSingleGather{ + Source: ds, + IsIndexGather: true, + Index: path.Index, + }.Init(ds.ctx, ds.blockOffset) + sg.SetSchema(ds.Schema()) + sg.SetChildren(is) + return sg +} + +// Convert2Gathers builds logical TiKVSingleGathers from DataSource. func (ds *DataSource) Convert2Gathers() (gathers []LogicalPlan) { tg := ds.buildTableGather() gathers = append(gathers, tg) for _, path := range ds.possibleAccessPaths { - if !path.isTablePath { - // TODO: add IndexGather + if !path.IsTablePath { + path.FullIdxCols, path.FullIdxColLens = expression.IndexInfo2Cols(ds.Columns, ds.schema.Columns, path.Index) + // If index columns can cover all of the needed columns, we can use a IndexGather + IndexScan. + if isCoveringIndex(ds.schema.Columns, path.FullIdxCols, path.FullIdxColLens, ds.tableInfo.PKIsHandle) { + gathers = append(gathers, ds.buildIndexGather(path)) + } + // TODO: If index columns can not cover the schema, use IndexLookUpGather. } } return gathers } -// deriveTablePathStats will fulfill the information that the accessPath need. +// deriveTablePathStats will fulfill the information that the AccessPath need. // And it will check whether the primary key is covered only by point query. // isIm indicates whether this function is called to generate the partial path for IndexMerge. -func (ds *DataSource) deriveTablePathStats(path *accessPath, conds []expression.Expression, isIm bool) (bool, error) { +func (ds *DataSource) deriveTablePathStats(path *util.AccessPath, conds []expression.Expression, isIm bool) (bool, error) { var err error sc := ds.ctx.GetSessionVars().StmtCtx - path.countAfterAccess = float64(ds.statisticTable.Count) - path.tableFilters = conds + path.CountAfterAccess = float64(ds.statisticTable.Count) + path.TableFilters = conds var pkCol *expression.Column columnLen := len(ds.schema.Columns) isUnsigned := false @@ -514,20 +563,20 @@ func (ds *DataSource) deriveTablePathStats(path *accessPath, conds []expression. pkCol = ds.schema.Columns[columnLen-1] } if pkCol == nil { - path.ranges = ranger.FullIntRange(isUnsigned) + path.Ranges = ranger.FullIntRange(isUnsigned) return false, nil } - path.ranges = ranger.FullIntRange(isUnsigned) + path.Ranges = ranger.FullIntRange(isUnsigned) if len(conds) == 0 { return false, nil } - path.accessConds, path.tableFilters = ranger.DetachCondsForColumn(ds.ctx, conds, pkCol) + path.AccessConds, path.TableFilters = ranger.DetachCondsForColumn(ds.ctx, conds, pkCol) // If there's no access cond, we try to find that whether there's expression containing correlated column that // can be used to access data. corColInAccessConds := false - if len(path.accessConds) == 0 { - for i, filter := range path.tableFilters { + if len(path.AccessConds) == 0 { + for i, filter := range path.TableFilters { eqFunc, ok := filter.(*expression.ScalarFunction) if !ok || eqFunc.FuncName.L != ast.EQ { continue @@ -536,8 +585,8 @@ func (ds *DataSource) deriveTablePathStats(path *accessPath, conds []expression. if lOk && lCol.Equal(ds.ctx, pkCol) { _, rOk := eqFunc.GetArgs()[1].(*expression.CorrelatedColumn) if rOk { - path.accessConds = append(path.accessConds, filter) - path.tableFilters = append(path.tableFilters[:i], path.tableFilters[i+1:]...) + path.AccessConds = append(path.AccessConds, filter) + path.TableFilters = append(path.TableFilters[:i], path.TableFilters[i+1:]...) corColInAccessConds = true break } @@ -546,8 +595,8 @@ func (ds *DataSource) deriveTablePathStats(path *accessPath, conds []expression. if rOk && rCol.Equal(ds.ctx, pkCol) { _, lOk := eqFunc.GetArgs()[0].(*expression.CorrelatedColumn) if lOk { - path.accessConds = append(path.accessConds, filter) - path.tableFilters = append(path.tableFilters[:i], path.tableFilters[i+1:]...) + path.AccessConds = append(path.AccessConds, filter) + path.TableFilters = append(path.TableFilters[:i], path.TableFilters[i+1:]...) corColInAccessConds = true break } @@ -555,22 +604,22 @@ func (ds *DataSource) deriveTablePathStats(path *accessPath, conds []expression. } } if corColInAccessConds { - path.countAfterAccess = 1 + path.CountAfterAccess = 1 return true, nil } - path.ranges, err = ranger.BuildTableRange(path.accessConds, sc, pkCol.RetType) + path.Ranges, err = ranger.BuildTableRange(path.AccessConds, sc, pkCol.RetType) if err != nil { return false, err } - path.countAfterAccess, err = ds.statisticTable.GetRowCountByIntColumnRanges(sc, pkCol.ID, path.ranges) - // If the `countAfterAccess` is less than `stats.RowCount`, there must be some inconsistent stats info. + path.CountAfterAccess, err = ds.statisticTable.GetRowCountByIntColumnRanges(sc, pkCol.ID, path.Ranges) + // If the `CountAfterAccess` is less than `stats.RowCount`, there must be some inconsistent stats info. // We prefer the `stats.RowCount` because it could use more stats info to calculate the selectivity. - if path.countAfterAccess < ds.stats.RowCount && !isIm { - path.countAfterAccess = math.Min(ds.stats.RowCount/selectionFactor, float64(ds.statisticTable.Count)) + if path.CountAfterAccess < ds.stats.RowCount && !isIm { + path.CountAfterAccess = math.Min(ds.stats.RowCount/selectionFactor, float64(ds.statisticTable.Count)) } // Check whether the primary key is covered by point query. noIntervalRange := true - for _, ran := range path.ranges { + for _, ran := range path.Ranges { if !ran.IsPoint(sc) { noIntervalRange = false break @@ -579,90 +628,95 @@ func (ds *DataSource) deriveTablePathStats(path *accessPath, conds []expression. return noIntervalRange, err } -// deriveIndexPathStats will fulfill the information that the accessPath need. -// And it will check whether this index is full matched by point query. We will use this check to -// determine whether we remove other paths or not. -// conds is the conditions used to generate the DetachRangeResult for path. -// isIm indicates whether this function is called to generate the partial path for IndexMerge. -func (ds *DataSource) deriveIndexPathStats(path *accessPath, conds []expression.Expression, isIm bool) (bool, error) { +func (ds *DataSource) fillIndexPath(path *util.AccessPath, conds []expression.Expression) error { sc := ds.ctx.GetSessionVars().StmtCtx - path.ranges = ranger.FullRange() - path.countAfterAccess = float64(ds.statisticTable.Count) - path.idxCols, path.idxColLens = expression.IndexInfo2PrefixCols(ds.Columns, ds.schema.Columns, path.index) - path.fullIdxCols, path.fullIdxColLens = expression.IndexInfo2Cols(ds.Columns, ds.schema.Columns, path.index) - if !path.index.Unique && !path.index.Primary && len(path.index.Columns) == len(path.idxCols) { + path.Ranges = ranger.FullRange() + path.CountAfterAccess = float64(ds.statisticTable.Count) + path.IdxCols, path.IdxColLens = expression.IndexInfo2PrefixCols(ds.Columns, ds.schema.Columns, path.Index) + path.FullIdxCols, path.FullIdxColLens = expression.IndexInfo2Cols(ds.Columns, ds.schema.Columns, path.Index) + if !path.Index.Unique && !path.Index.Primary && len(path.Index.Columns) == len(path.IdxCols) { handleCol := ds.getPKIsHandleCol() if handleCol != nil && !mysql.HasUnsignedFlag(handleCol.RetType.Flag) { - path.idxCols = append(path.idxCols, handleCol) - path.idxColLens = append(path.idxColLens, types.UnspecifiedLength) + path.IdxCols = append(path.IdxCols, handleCol) + path.IdxColLens = append(path.IdxColLens, types.UnspecifiedLength) } } - eqOrInCount := 0 - if len(path.idxCols) != 0 { - res, err := ranger.DetachCondAndBuildRangeForIndex(ds.ctx, conds, path.idxCols, path.idxColLens) + if len(path.IdxCols) != 0 { + res, err := ranger.DetachCondAndBuildRangeForIndex(ds.ctx, conds, path.IdxCols, path.IdxColLens) if err != nil { - return false, err + return err } - path.ranges = res.Ranges - path.accessConds = res.AccessConds - path.tableFilters = res.RemainedConds - path.eqCondCount = res.EqCondCount - eqOrInCount = res.EqOrInCount - path.countAfterAccess, err = ds.tableStats.HistColl.GetRowCountByIndexRanges(sc, path.index.ID, path.ranges) + path.Ranges = res.Ranges + path.AccessConds = res.AccessConds + path.TableFilters = res.RemainedConds + path.EqCondCount = res.EqCondCount + path.EqOrInCondCount = res.EqOrInCount + path.IsDNFCond = res.IsDNFCond + path.CountAfterAccess, err = ds.tableStats.HistColl.GetRowCountByIndexRanges(sc, path.Index.ID, path.Ranges) if err != nil { - return false, err + return err } } else { - path.tableFilters = conds + path.TableFilters = conds } - if eqOrInCount == len(path.accessConds) { - accesses, remained := path.splitCorColAccessCondFromFilters(eqOrInCount) - path.accessConds = append(path.accessConds, accesses...) - path.tableFilters = remained + return nil +} + +// deriveIndexPathStats will fulfill the information that the AccessPath need. +// And it will check whether this index is full matched by point query. We will use this check to +// determine whether we remove other paths or not. +// conds is the conditions used to generate the DetachRangeResult for path. +// isIm indicates whether this function is called to generate the partial path for IndexMerge. +func (ds *DataSource) deriveIndexPathStats(path *util.AccessPath, conds []expression.Expression, isIm bool) bool { + sc := ds.ctx.GetSessionVars().StmtCtx + if path.EqOrInCondCount == len(path.AccessConds) { + accesses, remained := path.SplitCorColAccessCondFromFilters(path.EqOrInCondCount) + path.AccessConds = append(path.AccessConds, accesses...) + path.TableFilters = remained if len(accesses) > 0 && ds.statisticTable.Pseudo { - path.countAfterAccess = ds.statisticTable.PseudoAvgCountPerValue() + path.CountAfterAccess = ds.statisticTable.PseudoAvgCountPerValue() } else { - selectivity := path.countAfterAccess / float64(ds.statisticTable.Count) + selectivity := path.CountAfterAccess / float64(ds.statisticTable.Count) for i := range accesses { - col := path.idxCols[eqOrInCount+i] + col := path.IdxCols[path.EqOrInCondCount+i] ndv := ds.getColumnNDV(col.ID) ndv *= selectivity if ndv < 1 { ndv = 1.0 } - path.countAfterAccess = path.countAfterAccess / ndv + path.CountAfterAccess = path.CountAfterAccess / ndv } } } - path.indexFilters, path.tableFilters = splitIndexFilterConditions(path.tableFilters, path.fullIdxCols, path.fullIdxColLens, ds.tableInfo) - // If the `countAfterAccess` is less than `stats.RowCount`, there must be some inconsistent stats info. + path.IndexFilters, path.TableFilters = splitIndexFilterConditions(path.TableFilters, path.FullIdxCols, path.FullIdxColLens, ds.tableInfo) + // If the `CountAfterAccess` is less than `stats.RowCount`, there must be some inconsistent stats info. // We prefer the `stats.RowCount` because it could use more stats info to calculate the selectivity. - if path.countAfterAccess < ds.stats.RowCount && !isIm { - path.countAfterAccess = math.Min(ds.stats.RowCount/selectionFactor, float64(ds.statisticTable.Count)) + if path.CountAfterAccess < ds.stats.RowCount && !isIm { + path.CountAfterAccess = math.Min(ds.stats.RowCount/selectionFactor, float64(ds.statisticTable.Count)) } - if path.indexFilters != nil { - selectivity, _, err := ds.tableStats.HistColl.Selectivity(ds.ctx, path.indexFilters) + if path.IndexFilters != nil { + selectivity, _, err := ds.tableStats.HistColl.Selectivity(ds.ctx, path.IndexFilters, nil) if err != nil { logutil.BgLogger().Debug("calculate selectivity failed, use selection factor", zap.Error(err)) selectivity = selectionFactor } if isIm { - path.countAfterIndex = path.countAfterAccess * selectivity + path.CountAfterIndex = path.CountAfterAccess * selectivity } else { - path.countAfterIndex = math.Max(path.countAfterAccess*selectivity, ds.stats.RowCount) + path.CountAfterIndex = math.Max(path.CountAfterAccess*selectivity, ds.stats.RowCount) } } // Check whether there's only point query. noIntervalRanges := true haveNullVal := false - for _, ran := range path.ranges { + for _, ran := range path.Ranges { // Not point or the not full matched. - if !ran.IsPoint(sc) || len(ran.HighVal) != len(path.index.Columns) { + if !ran.IsPoint(sc) || len(ran.HighVal) != len(path.Index.Columns) { noIntervalRanges = false break } // Check whether there's null value. - for i := 0; i < len(path.index.Columns); i++ { + for i := 0; i < len(path.Index.Columns); i++ { if ran.HighVal[i].IsNull() { haveNullVal = true break @@ -672,90 +726,38 @@ func (ds *DataSource) deriveIndexPathStats(path *accessPath, conds []expression. break } } - return noIntervalRanges && !haveNullVal, nil -} - -func (path *accessPath) splitCorColAccessCondFromFilters(eqOrInCount int) (access, remained []expression.Expression) { - access = make([]expression.Expression, len(path.idxCols)-eqOrInCount) - used := make([]bool, len(path.tableFilters)) - for i := eqOrInCount; i < len(path.idxCols); i++ { - matched := false - for j, filter := range path.tableFilters { - if used[j] || !isColEqCorColOrConstant(filter, path.idxCols[i]) { - continue - } - matched = true - access[i-eqOrInCount] = filter - if path.idxColLens[i] == types.UnspecifiedLength { - used[j] = true - } - break - } - if !matched { - access = access[:i-eqOrInCount] - break - } - } - for i, ok := range used { - if !ok { - remained = append(remained, path.tableFilters[i]) - } - } - return access, remained + return noIntervalRanges && !haveNullVal } -// getEqOrInColOffset checks if the expression is a eq function that one side is constant or correlated column -// and another is column. -func isColEqCorColOrConstant(filter expression.Expression, col *expression.Column) bool { - f, ok := filter.(*expression.ScalarFunction) - if !ok || f.FuncName.L != ast.EQ { - return false - } - if c, ok := f.GetArgs()[0].(*expression.Column); ok { - if _, ok := f.GetArgs()[1].(*expression.Constant); ok { - if col.Equal(nil, c) { - return true - } - } - if _, ok := f.GetArgs()[1].(*expression.CorrelatedColumn); ok { - if col.Equal(nil, c) { - return true - } - } - } - if c, ok := f.GetArgs()[1].(*expression.Column); ok { - if _, ok := f.GetArgs()[0].(*expression.Constant); ok { - if col.Equal(nil, c) { - return true - } - } - if _, ok := f.GetArgs()[0].(*expression.CorrelatedColumn); ok { - if col.Equal(nil, c) { - return true - } - } - } - return false -} - -func (ds *DataSource) getPKIsHandleCol() *expression.Column { - if !ds.tableInfo.PKIsHandle { +func getPKIsHandleColFromSchema(cols []*model.ColumnInfo, schema *expression.Schema, pkIsHandle bool) *expression.Column { + if !pkIsHandle { // If the PKIsHandle is false, return the ExtraHandleColumn. - for i, col := range ds.Columns { + for i, col := range cols { if col.ID == model.ExtraHandleID { - return ds.schema.Columns[i] + return schema.Columns[i] } } return nil } - for i, col := range ds.Columns { + for i, col := range cols { if mysql.HasPriKeyFlag(col.Flag) { - return ds.schema.Columns[i] + return schema.Columns[i] } } return nil } +func (ds *DataSource) getPKIsHandleCol() *expression.Column { + return getPKIsHandleColFromSchema(ds.Columns, ds.schema, ds.tableInfo.PKIsHandle) +} + +func (p *LogicalIndexScan) getPKIsHandleCol() *expression.Column { + // We cannot use p.Source.getPKIsHandleCol() here, + // Because we may re-prune p.Columns and p.schema during the transformation. + // That will make p.Columns different from p.Source.Columns. + return getPKIsHandleColFromSchema(p.Columns, p.schema, p.Source.tableInfo.PKIsHandle) +} + func (ds *DataSource) getHandleCol() *expression.Column { if ds.handleCol != nil { return ds.handleCol diff --git a/planner/core/logical_plans_test.go b/planner/core/logical_plans_test.go index aa225a3f0b732..c083566121eef 100644 --- a/planner/core/logical_plans_test.go +++ b/planner/core/logical_plans_test.go @@ -22,6 +22,7 @@ import ( "github.com/pingcap/parser/model" "github.com/pingcap/parser/mysql" "github.com/pingcap/tidb/expression" + "github.com/pingcap/tidb/planner/util" "github.com/pingcap/tidb/sessionctx" "github.com/pingcap/tidb/types" "github.com/pingcap/tidb/util/testleak" @@ -183,14 +184,14 @@ func (s *testUnitTestSuit) TestIndexPathSplitCorColCond(c *C) { c.Assert(err, IsNil, comment) trueFilters = append(trueFilters, trueFilter) } - path := accessPath{ - eqCondCount: 0, - tableFilters: trueFilters, - idxCols: expression.FindPrefixOfIndex(totalSchema.Columns, tt.idxColIDs), - idxColLens: tt.idxColLens, + path := util.AccessPath{ + EqCondCount: 0, + TableFilters: trueFilters, + IdxCols: expression.FindPrefixOfIndex(totalSchema.Columns, tt.idxColIDs), + IdxColLens: tt.idxColLens, } - access, remained := path.splitCorColAccessCondFromFilters(path.eqCondCount) + access, remained := path.SplitCorColAccessCondFromFilters(path.EqCondCount) c.Assert(fmt.Sprintf("%s", access), Equals, tt.access, comment) c.Assert(fmt.Sprintf("%s", remained), Equals, tt.remained, comment) } diff --git a/planner/core/memtable_predicate_extractor.go b/planner/core/memtable_predicate_extractor.go new file mode 100644 index 0000000000000..de03644cc2890 --- /dev/null +++ b/planner/core/memtable_predicate_extractor.go @@ -0,0 +1,201 @@ +// Copyright 2019 PingCAP, Inc. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// See the License for the specific language governing permissions and +// limitations under the License. + +package core + +import ( + "fmt" + "strings" + + "github.com/pingcap/parser/ast" + "github.com/pingcap/tidb/expression" + "github.com/pingcap/tidb/types" + "github.com/pingcap/tidb/util/set" +) + +// MemTablePredicateExtractor is used to extract some predicates from `WHERE` clause +// and push the predicates down to the data retrieving on reading memory table stage. +// +// e.g: +// SELECT * FROM cluster_config WHERE type='tikv' AND address='192.168.1.9:2379' +// We must request all components in the cluster via HTTP API for retrieving +// configurations and filter them by `type/address` columns. +// +// The purpose of defining a `MemTablePredicateExtractor` is to optimize this +// 1. Define a `ClusterConfigTablePredicateExtractor` +// 2. Extract the `type/address` columns on the logic optimizing stage and save them via fields. +// 3. Passing the extractor to the `ClusterConfigReaderExec` executor +// 4. Executor sends requests to the target components instead of all of the components +type MemTablePredicateExtractor interface { + // Extracts predicates which can be pushed down and returns the remained predicates + Extract(*expression.Schema, []*types.FieldName, []expression.Expression) (remained []expression.Expression) +} + +// extractHelper contains some common utililty functions for all extractor. +// define an individual struct instead of a bunch of un-exported functions +// to avoid polluting the global scope of current package. +type extractHelper struct{} + +func (helper extractHelper) extractColInConsExpr(extractCols map[int64]*types.FieldName, expr *expression.ScalarFunction) (string, []types.Datum) { + args := expr.GetArgs() + col, isCol := args[0].(*expression.Column) + if !isCol { + return "", nil + } + name, found := extractCols[col.UniqueID] + if !found { + return "", nil + } + // All expressions in IN must be a constant + // SELECT * FROM t1 WHERE c IN ('1', '2') + var results []types.Datum + for _, arg := range args[1:] { + constant, ok := arg.(*expression.Constant) + if !ok || constant.DeferredExpr != nil || constant.ParamMarker != nil { + return "", nil + } + results = append(results, constant.Value) + } + return name.ColName.L, results +} + +func (helper extractHelper) extractColEqConsExpr(extractCols map[int64]*types.FieldName, expr *expression.ScalarFunction) (string, []types.Datum) { + args := expr.GetArgs() + var col *expression.Column + var colIdx int + // c = 'rhs' + // 'lhs' = c + for i := 0; i < 2; i++ { + var isCol bool + col, isCol = args[i].(*expression.Column) + if isCol { + colIdx = i + break + } + } + if col == nil { + return "", nil + } + + name, found := extractCols[col.UniqueID] + if !found { + return "", nil + } + // The `lhs/rhs` of EQ expression must be a constant + // SELECT * FROM t1 WHERE c='rhs' + // SELECT * FROM t1 WHERE 'lhs'=c + constant, ok := args[1-colIdx].(*expression.Constant) + if !ok || constant.DeferredExpr != nil || constant.ParamMarker != nil { + return "", nil + } + return name.ColName.L, []types.Datum{constant.Value} +} + +func (helper extractHelper) intersection(lhs set.StringSet, datums []types.Datum, toLower bool) set.StringSet { + tmpNodeTypes := set.NewStringSet() + for _, datum := range datums { + var s string + if toLower { + s = strings.ToLower(datum.GetString()) + } else { + s = datum.GetString() + } + tmpNodeTypes.Insert(s) + } + if len(lhs) > 0 { + return lhs.Intersection(tmpNodeTypes) + } + return tmpNodeTypes +} + +// ClusterConfigTableExtractor is used to extract some predicates of `cluster_config` +type ClusterConfigTableExtractor struct { + extractHelper + + // SkipRequest means the where clause always false, we don't need to request any component + SkipRequest bool + + // NodeTypes represents all components types we should send request to. + // e.g: + // 1. SELECT * FROM cluster_config WHERE type='tikv' + // 2. SELECT * FROM cluster_config WHERE type in ('tikv', 'tidb') + NodeTypes set.StringSet + + // Addresses represents all components addresses we should send request to. + // e.g: + // 1. SELECT * FROM cluster_config WHERE address='192.168.1.7:2379' + // 2. SELECT * FROM cluster_config WHERE type in ('192.168.1.7:2379', '192.168.1.9:2379') + Addresses set.StringSet +} + +// Extract implements the MemTablePredicateExtractor Extract interface +func (e *ClusterConfigTableExtractor) Extract(schema *expression.Schema, names []*types.FieldName, predicates []expression.Expression) []expression.Expression { + remained := make([]expression.Expression, 0, len(predicates)) + // All columns can be pushed down to the memory table `cluster_config` + const ( + ColNameType = "type" + ColNameAddress = "address" + ) + extractCols := make(map[int64]*types.FieldName) + for i, name := range names { + if ln := name.ColName.L; ln == ColNameType || ln == ColNameAddress { + extractCols[schema.Columns[i].UniqueID] = name + } + } + // We use the column name literal (local constant) to find the column in `names` + // instead of using a global constant. So the assumption (named `type/address`) + // maybe not satisfied if the column name has been changed in the future. + // The purpose of the following assert is used to make sure our assumption doesn't + // be broken (or hint the author who refactors this part to change here too). + if len(extractCols) != 2 { + panic(fmt.Sprintf("push down columns `type/address` not found in schema, got: %+v", extractCols)) + } + + skipRequest := false + nodeTypes := set.NewStringSet() + addresses := set.NewStringSet() + + // We should use INTERSECTION of sets because of the predicates is CNF array + for _, expr := range predicates { + var colName string + var datums []types.Datum + switch x := expr.(type) { + case *expression.ScalarFunction: + switch x.FuncName.L { + case ast.EQ: + colName, datums = e.extractColEqConsExpr(extractCols, x) + case ast.In: + colName, datums = e.extractColInConsExpr(extractCols, x) + } + } + switch colName { + case ColNameType: + nodeTypes = e.intersection(nodeTypes, datums, true) + skipRequest = len(nodeTypes) == 0 + case ColNameAddress: + addresses = e.intersection(addresses, datums, false) + skipRequest = len(addresses) == 0 + default: + remained = append(remained, expr) + } + // There are no data if the low-level executor skip request, so the filter can be droped + if skipRequest { + remained = remained[:0] + break + } + } + e.SkipRequest = skipRequest + e.NodeTypes = nodeTypes + e.Addresses = addresses + return remained +} diff --git a/planner/core/memtable_predicate_extractor_test.go b/planner/core/memtable_predicate_extractor_test.go new file mode 100644 index 0000000000000..3c76e07294397 --- /dev/null +++ b/planner/core/memtable_predicate_extractor_test.go @@ -0,0 +1,217 @@ +// Copyright 2019 PingCAP, Inc. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// See the License for the specific language governing permissions and +// limitations under the License. + +package core_test + +import ( + "context" + + . "github.com/pingcap/check" + "github.com/pingcap/parser" + "github.com/pingcap/tidb/domain" + "github.com/pingcap/tidb/kv" + plannercore "github.com/pingcap/tidb/planner/core" + "github.com/pingcap/tidb/session" + "github.com/pingcap/tidb/store/mockstore" + "github.com/pingcap/tidb/util/set" +) + +var _ = Suite(&extractorSuite{}) + +type extractorSuite struct { + store kv.Storage + dom *domain.Domain +} + +func (s *extractorSuite) SetUpSuite(c *C) { + store, err := mockstore.NewMockTikvStore() + c.Assert(err, IsNil) + c.Assert(store, NotNil) + + session.SetSchemaLease(0) + session.DisableStats4Test() + dom, err := session.BootstrapSession(store) + c.Assert(err, IsNil) + c.Assert(dom, NotNil) + + s.store = store + s.dom = dom +} + +func (s *extractorSuite) TearDownSuite(c *C) { + s.dom.Close() + s.store.Close() +} + +func (s *extractorSuite) TestClusterConfigTableExtractor(c *C) { + se, err := session.CreateSession4Test(s.store) + c.Assert(err, IsNil) + + parser := parser.New() + var cases = []struct { + sql string + nodeTypes set.StringSet + addresses set.StringSet + skipRequest bool + }{ + { + sql: "select * from information_schema.cluster_config", + nodeTypes: nil, + addresses: nil, + }, + { + sql: "select * from information_schema.cluster_config where type='tikv'", + nodeTypes: set.NewStringSet("tikv"), + addresses: set.NewStringSet(), + }, + { + sql: "select * from information_schema.cluster_config where 'tikv'=type", + nodeTypes: set.NewStringSet("tikv"), + addresses: set.NewStringSet(), + }, + { + sql: "select * from information_schema.cluster_config where 'TiKV'=type", + nodeTypes: set.NewStringSet("tikv"), + addresses: set.NewStringSet(), + }, + { + sql: "select * from information_schema.cluster_config where type in ('tikv', 'pd')", + nodeTypes: set.NewStringSet("tikv", "pd"), + addresses: set.NewStringSet(), + }, + { + sql: "select * from information_schema.cluster_config where type in ('tikv', 'pd') and address='123.1.1.2:1234'", + nodeTypes: set.NewStringSet("tikv", "pd"), + addresses: set.NewStringSet("123.1.1.2:1234"), + }, + { + sql: "select * from information_schema.cluster_config where type in ('tikv', 'pd') and address in ('123.1.1.2:1234', '123.1.1.4:1234')", + nodeTypes: set.NewStringSet("tikv", "pd"), + addresses: set.NewStringSet("123.1.1.2:1234", "123.1.1.4:1234"), + }, + { + sql: "select * from information_schema.cluster_config where type='tikv' and address in ('123.1.1.2:1234', '123.1.1.4:1234')", + nodeTypes: set.NewStringSet("tikv"), + addresses: set.NewStringSet("123.1.1.2:1234", "123.1.1.4:1234"), + }, + { + sql: "select * from information_schema.cluster_config where type='tikv' and address='123.1.1.4:1234'", + nodeTypes: set.NewStringSet("tikv"), + addresses: set.NewStringSet("123.1.1.4:1234"), + }, + { + sql: "select * from information_schema.cluster_config where type='tikv' and address='123.1.1.4:1234'", + nodeTypes: set.NewStringSet("tikv"), + addresses: set.NewStringSet("123.1.1.4:1234"), + }, + { + sql: "select * from information_schema.cluster_config where type='tikv' and address='cNs2dm.tikv.pingcap.com:1234'", + nodeTypes: set.NewStringSet("tikv"), + addresses: set.NewStringSet("cNs2dm.tikv.pingcap.com:1234"), + }, + { + sql: "select * from information_schema.cluster_config where type='TIKV' and address='cNs2dm.tikv.pingcap.com:1234'", + nodeTypes: set.NewStringSet("tikv"), + addresses: set.NewStringSet("cNs2dm.tikv.pingcap.com:1234"), + }, + { + sql: "select * from information_schema.cluster_config where type='tikv' and type='pd'", + nodeTypes: set.NewStringSet(), + addresses: set.NewStringSet(), + skipRequest: true, + }, + { + sql: "select * from information_schema.cluster_config where type='tikv' and type in ('pd', 'tikv')", + nodeTypes: set.NewStringSet("tikv"), + addresses: set.NewStringSet(), + }, + { + sql: "select * from information_schema.cluster_config where type='tikv' and type in ('pd', 'tidb')", + nodeTypes: set.NewStringSet(), + addresses: set.NewStringSet(), + skipRequest: true, + }, + { + sql: "select * from information_schema.cluster_config where type in ('tikv', 'tidb') and type in ('pd', 'tidb')", + nodeTypes: set.NewStringSet("tidb"), + addresses: set.NewStringSet(), + }, + { + sql: "select * from information_schema.cluster_config where address='123.1.1.4:1234' and address='123.1.1.5:1234'", + nodeTypes: set.NewStringSet(), + addresses: set.NewStringSet(), + skipRequest: true, + }, + { + sql: "select * from information_schema.cluster_config where address='123.1.1.4:1234' and address in ('123.1.1.5:1234', '123.1.1.4:1234')", + nodeTypes: set.NewStringSet(), + addresses: set.NewStringSet("123.1.1.4:1234"), + }, + { + sql: "select * from information_schema.cluster_config where address='123.1.1.4:1234' and address in ('123.1.1.5:1234', '123.1.1.6:1234')", + nodeTypes: set.NewStringSet(), + addresses: set.NewStringSet(), + skipRequest: true, + }, + { + sql: "select * from information_schema.cluster_config where address in ('123.1.1.5:1234', '123.1.1.4:1234') and address in ('123.1.1.5:1234', '123.1.1.6:1234')", + nodeTypes: set.NewStringSet(), + addresses: set.NewStringSet("123.1.1.5:1234"), + }, + { + sql: `select * from information_schema.cluster_config + where address in ('123.1.1.5:1234', '123.1.1.4:1234') + and address in ('123.1.1.5:1234', '123.1.1.6:1234') + and type in ('tikv', 'tidb') + and type in ('pd', 'tidb')`, + nodeTypes: set.NewStringSet("tidb"), + addresses: set.NewStringSet("123.1.1.5:1234"), + }, + { + sql: `select * from information_schema.cluster_config + where address in ('123.1.1.5:1234', '123.1.1.4:1234') + and address in ('123.1.1.5:1234', '123.1.1.6:1234') + and address in ('123.1.1.6:1234', '123.1.1.7:1234') + and address in ('123.1.1.7:1234', '123.1.1.8:1234')`, + nodeTypes: set.NewStringSet(), + addresses: set.NewStringSet(), + skipRequest: true, + }, + } + for _, ca := range cases { + stmt, err := parser.ParseOneStmt(ca.sql, "", "") + c.Assert(err, IsNil) + + ctx := context.Background() + builder := plannercore.NewPlanBuilder(se, s.dom.InfoSchema(), &plannercore.BlockHintProcessor{}) + plan, err := builder.Build(ctx, stmt) + c.Assert(err, IsNil) + + logicalPlan, err := plannercore.LogicalOptimize(ctx, builder.GetOptFlag(), plan.(plannercore.LogicalPlan)) + c.Assert(err, IsNil) + + // Obtain the leaf plan + leafPlan := logicalPlan + for len(leafPlan.Children()) > 0 { + leafPlan = leafPlan.Children()[0] + } + + logicalMemTable := leafPlan.(*plannercore.LogicalMemTable) + c.Assert(logicalMemTable.Extractor, NotNil) + + clusterConfigExtractor := logicalMemTable.Extractor.(*plannercore.ClusterConfigTableExtractor) + c.Assert(clusterConfigExtractor.NodeTypes, DeepEquals, ca.nodeTypes, Commentf("SQL: %v", ca.sql)) + c.Assert(clusterConfigExtractor.Addresses, DeepEquals, ca.addresses, Commentf("SQL: %v", ca.sql)) + c.Assert(clusterConfigExtractor.SkipRequest, DeepEquals, ca.skipRequest, Commentf("SQL: %v", ca.sql)) + } +} diff --git a/planner/core/optimizer_test.go b/planner/core/optimizer_test.go new file mode 100644 index 0000000000000..1a8b91ec682e2 --- /dev/null +++ b/planner/core/optimizer_test.go @@ -0,0 +1,18 @@ +// Copyright 2019 PingCAP, Inc. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// See the License for the specific language governing permissions and +// limitations under the License. + +package core + +// LogicalOptimize exports the `logicalOptimize` function for test packages and +// doesn't affect the normal package and access control of Golang (tricky ^_^) +var LogicalOptimize = logicalOptimize diff --git a/planner/core/pb_to_plan.go b/planner/core/pb_to_plan.go new file mode 100644 index 0000000000000..1302254686e49 --- /dev/null +++ b/planner/core/pb_to_plan.go @@ -0,0 +1,220 @@ +// Copyright 2019 PingCAP, Inc. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// See the License for the specific language governing permissions and +// limitations under the License. + +package core + +import ( + "github.com/pingcap/errors" + "github.com/pingcap/parser/model" + "github.com/pingcap/tidb/expression" + "github.com/pingcap/tidb/expression/aggregation" + "github.com/pingcap/tidb/infoschema" + "github.com/pingcap/tidb/sessionctx" + "github.com/pingcap/tidb/types" + "github.com/pingcap/tipb/go-tipb" +) + +// PBPlanBuilder uses to build physical plan from dag protocol buffers. +type PBPlanBuilder struct { + sctx sessionctx.Context + tps []*types.FieldType + is infoschema.InfoSchema +} + +// NewPBPlanBuilder creates a new pb plan builder. +func NewPBPlanBuilder(sctx sessionctx.Context, is infoschema.InfoSchema) *PBPlanBuilder { + return &PBPlanBuilder{sctx: sctx, is: is} +} + +// Build builds physical plan from dag protocol buffers. +func (b *PBPlanBuilder) Build(executors []*tipb.Executor) (p PhysicalPlan, err error) { + var src PhysicalPlan + for i := 0; i < len(executors); i++ { + curr, err := b.pbToPhysicalPlan(executors[i]) + if err != nil { + return nil, errors.Trace(err) + } + curr.SetChildren(src) + src = curr + } + return src, nil +} + +func (b *PBPlanBuilder) pbToPhysicalPlan(e *tipb.Executor) (p PhysicalPlan, err error) { + switch e.Tp { + case tipb.ExecType_TypeTableScan: + p, err = b.pbToTableScan(e) + case tipb.ExecType_TypeSelection: + p, err = b.pbToSelection(e) + case tipb.ExecType_TypeTopN: + p, err = b.pbToTopN(e) + case tipb.ExecType_TypeLimit: + p, err = b.pbToLimit(e) + case tipb.ExecType_TypeAggregation: + p, err = b.pbToAgg(e, false) + case tipb.ExecType_TypeStreamAgg: + p, err = b.pbToAgg(e, true) + default: + // TODO: Support other types. + err = errors.Errorf("this exec type %v doesn't support yet.", e.GetTp()) + } + return p, err +} + +func (b *PBPlanBuilder) pbToTableScan(e *tipb.Executor) (PhysicalPlan, error) { + tblScan := e.TblScan + tbl, ok := b.is.TableByID(tblScan.TableId) + if !ok { + return nil, infoschema.ErrTableNotExists.GenWithStack("Table which ID = %d does not exist.", tblScan.TableId) + } + // Currently only support cluster table. + if !tbl.Type().IsClusterTable() { + return nil, errors.Errorf("table %s is not a cluster table", tbl.Meta().Name.L) + } + columns, err := b.convertColumnInfo(tbl.Meta(), tblScan.Columns) + if err != nil { + return nil, err + } + schema := b.buildTableScanSchema(tbl.Meta(), columns) + p := PhysicalMemTable{ + Table: tbl.Meta(), + Columns: columns, + }.Init(b.sctx, nil, 0) + p.SetSchema(schema) + return p, nil +} + +func (b *PBPlanBuilder) buildTableScanSchema(tblInfo *model.TableInfo, columns []*model.ColumnInfo) *expression.Schema { + schema := expression.NewSchema(make([]*expression.Column, 0, len(columns))...) + for _, col := range tblInfo.Columns { + for _, colInfo := range columns { + if col.ID != colInfo.ID { + continue + } + newCol := &expression.Column{ + UniqueID: b.sctx.GetSessionVars().AllocPlanColumnID(), + ID: col.ID, + RetType: &col.FieldType, + } + schema.Append(newCol) + } + } + return schema +} + +func (b *PBPlanBuilder) pbToSelection(e *tipb.Executor) (PhysicalPlan, error) { + conds, err := expression.PBToExprs(e.Selection.Conditions, b.tps, b.sctx.GetSessionVars().StmtCtx) + if err != nil { + return nil, err + } + p := PhysicalSelection{ + Conditions: conds, + }.Init(b.sctx, nil, 0) + return p, nil +} + +func (b *PBPlanBuilder) pbToTopN(e *tipb.Executor) (PhysicalPlan, error) { + topN := e.TopN + sc := b.sctx.GetSessionVars().StmtCtx + byItems := make([]*ByItems, 0, len(topN.OrderBy)) + for _, item := range topN.OrderBy { + expr, err := expression.PBToExpr(item.Expr, b.tps, sc) + if err != nil { + return nil, errors.Trace(err) + } + byItems = append(byItems, &ByItems{Expr: expr, Desc: item.Desc}) + } + p := PhysicalTopN{ + ByItems: byItems, + Count: topN.Limit, + }.Init(b.sctx, nil, 0) + return p, nil +} + +func (b *PBPlanBuilder) pbToLimit(e *tipb.Executor) (PhysicalPlan, error) { + p := PhysicalLimit{ + Count: e.Limit.Limit, + }.Init(b.sctx, nil, 0) + return p, nil +} + +func (b *PBPlanBuilder) pbToAgg(e *tipb.Executor, isStreamAgg bool) (PhysicalPlan, error) { + aggFuncs, groupBys, err := b.getAggInfo(e) + if err != nil { + return nil, errors.Trace(err) + } + schema := b.buildAggSchema(aggFuncs, groupBys) + baseAgg := basePhysicalAgg{ + AggFuncs: aggFuncs, + GroupByItems: groupBys, + } + baseAgg.schema = schema + var partialAgg PhysicalPlan + if isStreamAgg { + partialAgg = baseAgg.initForHash(b.sctx, nil, 0) + } else { + partialAgg = baseAgg.initForStream(b.sctx, nil, 0) + } + return partialAgg, nil +} + +func (b *PBPlanBuilder) buildAggSchema(aggFuncs []*aggregation.AggFuncDesc, groupBys []expression.Expression) *expression.Schema { + schema := expression.NewSchema(make([]*expression.Column, 0, len(aggFuncs)+len(groupBys))...) + for _, agg := range aggFuncs { + newCol := &expression.Column{ + UniqueID: b.sctx.GetSessionVars().AllocPlanColumnID(), + RetType: agg.RetTp, + } + schema.Append(newCol) + } + return schema +} + +func (b *PBPlanBuilder) getAggInfo(executor *tipb.Executor) ([]*aggregation.AggFuncDesc, []expression.Expression, error) { + var err error + aggFuncs := make([]*aggregation.AggFuncDesc, 0, len(executor.Aggregation.AggFunc)) + sc := b.sctx.GetSessionVars().StmtCtx + for _, expr := range executor.Aggregation.AggFunc { + aggFunc, err := aggregation.PBExprToAggFuncDesc(sc, expr, b.tps) + if err != nil { + return nil, nil, errors.Trace(err) + } + aggFuncs = append(aggFuncs, aggFunc) + } + groupBys, err := expression.PBToExprs(executor.Aggregation.GetGroupBy(), b.tps, b.sctx.GetSessionVars().StmtCtx) + if err != nil { + return nil, nil, errors.Trace(err) + } + return aggFuncs, groupBys, nil +} + +func (b *PBPlanBuilder) convertColumnInfo(tblInfo *model.TableInfo, pbColumns []*tipb.ColumnInfo) ([]*model.ColumnInfo, error) { + columns := make([]*model.ColumnInfo, 0, len(pbColumns)) + tps := make([]*types.FieldType, 0, len(pbColumns)) + for _, col := range pbColumns { + found := false + for _, colInfo := range tblInfo.Columns { + if col.ColumnId == colInfo.ID { + columns = append(columns, colInfo) + tps = append(tps, colInfo.FieldType.Clone()) + found = true + break + } + } + if !found { + return nil, errors.Errorf("Column ID %v of table %v not found", col.ColumnId, tblInfo.Name.L) + } + } + b.tps = tps + return columns, nil +} diff --git a/planner/core/physical_plan_test.go b/planner/core/physical_plan_test.go index db3d35a78584a..5257289563420 100644 --- a/planner/core/physical_plan_test.go +++ b/planner/core/physical_plan_test.go @@ -39,29 +39,33 @@ var _ = SerialSuites(&testPlanSerialSuite{}) type testPlanSuiteBase struct { *parser.Parser is infoschema.InfoSchema +} - testData testutil.TestData +func (s *testPlanSuiteBase) SetUpSuite(c *C) { + s.is = infoschema.MockInfoSchema([]*model.TableInfo{core.MockSignedTable(), core.MockUnsignedTable()}) + s.Parser = parser.New() + s.Parser.EnableWindowFunc(true) } -type testPlanSuite struct { +type testPlanSerialSuite struct { testPlanSuiteBase } -type testPlanSerialSuite struct { +type testPlanSuite struct { testPlanSuiteBase + + testData testutil.TestData } -func (s *testPlanSuiteBase) SetUpSuite(c *C) { - s.is = infoschema.MockInfoSchema([]*model.TableInfo{core.MockSignedTable(), core.MockUnsignedTable()}) - s.Parser = parser.New() - s.Parser.EnableWindowFunc(true) +func (s *testPlanSuite) SetUpSuite(c *C) { + s.testPlanSuiteBase.SetUpSuite(c) var err error s.testData, err = testutil.LoadTestSuiteData("testdata", "plan_suite") c.Assert(err, IsNil) } -func (s *testPlanSuiteBase) TearDownSuite(c *C) { +func (s *testPlanSuite) TearDownSuite(c *C) { c.Assert(s.testData.GenerateOutputIfNeeded(), IsNil) } diff --git a/planner/core/physical_plans.go b/planner/core/physical_plans.go index 5a91ca6f93026..23b17a1e26786 100644 --- a/planner/core/physical_plans.go +++ b/planner/core/physical_plans.go @@ -67,9 +67,18 @@ type PhysicalTableReader struct { StoreType kv.StoreType } -// GetPhysicalReader returns PhysicalTableReader for logical TableGather. -func (tg *TableGather) GetPhysicalReader(schema *expression.Schema, stats *property.StatsInfo, props ...*property.PhysicalProperty) *PhysicalTableReader { - reader := PhysicalTableReader{}.Init(tg.ctx, tg.blockOffset) +// GetPhysicalTableReader returns PhysicalTableReader for logical TiKVSingleGather. +func (sg *TiKVSingleGather) GetPhysicalTableReader(schema *expression.Schema, stats *property.StatsInfo, props ...*property.PhysicalProperty) *PhysicalTableReader { + reader := PhysicalTableReader{}.Init(sg.ctx, sg.blockOffset) + reader.stats = stats + reader.SetSchema(schema) + reader.childrenReqProps = props + return reader +} + +// GetPhysicalIndexReader returns PhysicalIndexReader for logical TiKVSingleGather. +func (sg *TiKVSingleGather) GetPhysicalIndexReader(schema *expression.Schema, stats *property.StatsInfo, props ...*property.PhysicalProperty) *PhysicalIndexReader { + reader := PhysicalIndexReader{}.Init(sg.ctx, sg.blockOffset) reader.stats = stats reader.SetSchema(schema) reader.childrenReqProps = props @@ -94,6 +103,27 @@ type PhysicalIndexReader struct { OutputColumns []*expression.Column } +// SetSchema overrides PhysicalPlan SetSchema interface. +func (p *PhysicalIndexReader) SetSchema(_ *expression.Schema) { + if p.indexPlan != nil { + p.IndexPlans = flattenPushDownPlan(p.indexPlan) + switch p.indexPlan.(type) { + case *PhysicalHashAgg, *PhysicalStreamAgg: + p.schema = p.indexPlan.Schema() + default: + is := p.IndexPlans[0].(*PhysicalIndexScan) + p.schema = is.dataSourceSchema + } + p.OutputColumns = p.schema.Clone().Columns + } +} + +// SetChildren overrides PhysicalPlan SetChildren interface. +func (p *PhysicalIndexReader) SetChildren(children ...PhysicalPlan) { + p.indexPlan = children[0] + p.SetSchema(nil) +} + // PushedDownLimit is the limit operator pushed down into PhysicalIndexLookUpReader. type PushedDownLimit struct { Offset uint64 @@ -172,8 +202,8 @@ type PhysicalIndexScan struct { type PhysicalMemTable struct { physicalSchemaProducer - DBName model.CIStr - Table *model.TableInfo + Table *model.TableInfo + Columns []*model.ColumnInfo } // PhysicalTableScan represents a table scan plan. diff --git a/planner/core/plan.go b/planner/core/plan.go index 82814cbb399b5..6cdf570f7072e 100644 --- a/planner/core/plan.go +++ b/planner/core/plan.go @@ -95,7 +95,10 @@ type LogicalPlan interface { findBestTask(prop *property.PhysicalProperty) (task, error) // BuildKeyInfo will collect the information of unique keys into schema. - BuildKeyInfo() + // Because this method is also used in cascades planner, we cannot use + // things like `p.schema` or `p.children` inside it. We should use the `selfSchema` + // and `childSchema` instead. + BuildKeyInfo(selfSchema *expression.Schema, childSchema []*expression.Schema) // pushDownTopN will push down the topN or limit operator during logical optimization. pushDownTopN(topN *LogicalTopN) LogicalPlan @@ -163,6 +166,9 @@ type PhysicalPlan interface { // Stats returns the StatsInfo of the plan. Stats() *property.StatsInfo + + // ExplainNormalizedInfo returns operator normalized information for generating digest. + ExplainNormalizedInfo() string } type baseLogicalPlan struct { @@ -196,6 +202,11 @@ func (p *basePhysicalPlan) ExplainInfo() string { return "" } +// ExplainInfo implements Plan interface. +func (p *basePhysicalPlan) ExplainNormalizedInfo() string { + return "" +} + func (p *basePhysicalPlan) GetChildReqProps(idx int) *property.PhysicalProperty { return p.childrenReqProps[idx] } @@ -210,17 +221,44 @@ func (p *baseLogicalPlan) storeTask(prop *property.PhysicalProperty, task task) p.taskMap[string(key)] = task } -// BuildKeyInfo implements LogicalPlan BuildKeyInfo interface. -func (p *baseLogicalPlan) BuildKeyInfo() { - for _, child := range p.children { - child.BuildKeyInfo() +// HasMaxOneRow returns if the LogicalPlan will output at most one row. +func HasMaxOneRow(p LogicalPlan, childMaxOneRow []bool) bool { + if len(childMaxOneRow) == 0 { + // The reason why we use this check is that, this function + // is used both in planner/core and planner/cascades. + // In cascades planner, LogicalPlan may have no `children`. + return false } - switch p.self.(type) { - case *LogicalLock, *LogicalLimit, *LogicalSort, *LogicalSelection, *LogicalApply, *LogicalProjection: - p.maxOneRow = p.children[0].MaxOneRow() + switch x := p.(type) { + case *LogicalLock, *LogicalLimit, *LogicalSort, *LogicalSelection, + *LogicalApply, *LogicalProjection, *LogicalWindow, *LogicalAggregation: + return childMaxOneRow[0] case *LogicalMaxOneRow: - p.maxOneRow = true + return true + case *LogicalJoin: + switch x.JoinType { + case SemiJoin, AntiSemiJoin, LeftOuterSemiJoin, AntiLeftOuterSemiJoin: + return childMaxOneRow[0] + default: + return childMaxOneRow[0] && childMaxOneRow[1] + } } + return false +} + +// BuildKeyInfo implements LogicalPlan BuildKeyInfo interface. +func (p *baseLogicalPlan) BuildKeyInfo(selfSchema *expression.Schema, childSchema []*expression.Schema) { + childMaxOneRow := make([]bool, len(p.children)) + for i := range p.children { + childMaxOneRow[i] = p.children[i].MaxOneRow() + } + p.maxOneRow = HasMaxOneRow(p.self, childMaxOneRow) +} + +// BuildKeyInfo implements LogicalPlan BuildKeyInfo interface. +func (p *logicalSchemaProducer) BuildKeyInfo(selfSchema *expression.Schema, childSchema []*expression.Schema) { + selfSchema.Keys = nil + p.baseLogicalPlan.BuildKeyInfo(selfSchema, childSchema) } func newBasePlan(ctx sessionctx.Context, tp string, offset int) basePlan { diff --git a/planner/core/plan_test.go b/planner/core/plan_test.go new file mode 100644 index 0000000000000..39df6d3652873 --- /dev/null +++ b/planner/core/plan_test.go @@ -0,0 +1,210 @@ +// Copyright 2019 PingCAP, Inc. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by aprettyPrintlicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// See the License for the specific language governing permissions and +// limitations under the License. + +package core_test + +import ( + "strings" + + . "github.com/pingcap/check" + "github.com/pingcap/tidb/domain" + "github.com/pingcap/tidb/kv" + "github.com/pingcap/tidb/planner/core" + "github.com/pingcap/tidb/util/plancodec" + "github.com/pingcap/tidb/util/testkit" + "github.com/pingcap/tidb/util/testleak" + "github.com/pingcap/tidb/util/testutil" +) + +var _ = Suite(&testPlanNormalize{}) + +type testPlanNormalize struct { + store kv.Storage + dom *domain.Domain + + testData testutil.TestData +} + +func (s *testPlanNormalize) SetUpSuite(c *C) { + testleak.BeforeTest() + store, dom, err := newStoreWithBootstrap() + c.Assert(err, IsNil) + s.store = store + s.dom = dom + + s.testData, err = testutil.LoadTestSuiteData("testdata", "plan_normalized_suite") + c.Assert(err, IsNil) +} + +func (s *testPlanNormalize) TearDownSuite(c *C) { + c.Assert(s.testData.GenerateOutputIfNeeded(), IsNil) + s.dom.Close() + s.store.Close() + testleak.AfterTest(c)() +} + +func (s *testPlanNormalize) TestNormalizedPlan(c *C) { + tk := testkit.NewTestKit(c, s.store) + tk.MustExec("use test") + tk.MustExec("drop table if exists t1,t2") + tk.MustExec("create table t1 (a int key,b int,c int, index (b));") + tk.MustExec("create table t2 (a int key,b int,c int, index (b));") + var input []string + var output []struct { + SQL string + Plan []string + } + s.testData.GetTestCases(c, &input, &output) + for i, tt := range input { + tk.Se.GetSessionVars().PlanID = 0 + tk.MustExec(tt) + info := tk.Se.ShowProcess() + c.Assert(info, NotNil) + p, ok := info.Plan.(core.Plan) + c.Assert(ok, IsTrue) + normalized, _ := core.NormalizePlan(p) + normalizedPlan, err := plancodec.DecodeNormalizedPlan(normalized) + normalizedPlanRows := getPlanRows(normalizedPlan) + c.Assert(err, IsNil) + s.testData.OnRecord(func() { + output[i].SQL = tt + output[i].Plan = normalizedPlanRows + }) + compareStringSlice(c, normalizedPlanRows, output[i].Plan) + } +} + +func (s *testPlanNormalize) TestNormalizedDigest(c *C) { + tk := testkit.NewTestKit(c, s.store) + tk.MustExec("use test") + tk.MustExec("drop table if exists t1,t2") + tk.MustExec("create table t1 (a int key,b int,c int, index (b));") + tk.MustExec("create table t2 (a int key,b int,c int, index (b));") + normalizedDigestCases := []struct { + sql1 string + sql2 string + isSame bool + }{ + { + sql1: "select * from t1;", + sql2: "select * from t2;", + isSame: false, + }, + { // test for tableReader and tableScan. + sql1: "select * from t1 where a<1", + sql2: "select * from t1 where a<2", + isSame: true, + }, + { + sql1: "select * from t1 where a<1", + sql2: "select * from t1 where a=2", + isSame: false, + }, + { // test for point get. + sql1: "select * from t1 where a=3", + sql2: "select * from t1 where a=2", + isSame: true, + }, + { // test for indexLookUp. + sql1: "select * from t1 use index(b) where b=3", + sql2: "select * from t1 use index(b) where b=1", + isSame: true, + }, + { // test for indexReader. + sql1: "select a+1,b+2 from t1 use index(b) where b=3", + sql2: "select a+2,b+3 from t1 use index(b) where b>2", + isSame: true, + }, + { // test for merge join. + sql1: "SELECT /*+ TIDB_SMJ(t1, t2) */ * from t1, t2 where t1.a = t2.a and t1.c>1;", + sql2: "SELECT /*+ TIDB_SMJ(t1, t2) */ * from t1, t2 where t1.a = t2.a and t1.c>2;", + isSame: true, + }, + { // test for indexLookUpJoin. + sql1: "SELECT /*+ TIDB_INLJ(t1, t2) */ * from t1, t2 where t1.a = t2.a and t1.c>1;", + sql2: "SELECT /*+ TIDB_INLJ(t1, t2) */ * from t1, t2 where t1.a = t2.a and t1.c>3;", + isSame: true, + }, + { // test for hashJoin. + sql1: "SELECT /*+ TIDB_HJ(t1, t2) */ * from t1, t2 where t1.a = t2.a and t1.c>1;", + sql2: "SELECT /*+ TIDB_HJ(t1, t2) */ * from t1, t2 where t1.a = t2.a and t1.c>3;", + isSame: true, + }, + { // test for diff join. + sql1: "SELECT /*+ TIDB_HJ(t1, t2) */ * from t1, t2 where t1.a = t2.a and t1.c>1;", + sql2: "SELECT /*+ TIDB_INLJ(t1, t2) */ * from t1, t2 where t1.a = t2.a and t1.c>3;", + isSame: false, + }, + { // test for diff join. + sql1: "SELECT /*+ TIDB_INLJ(t1, t2) */ * from t1, t2 where t1.a = t2.a and t1.c>1;", + sql2: "SELECT /*+ TIDB_SMJ(t1, t2) */ * from t1, t2 where t1.a = t2.a and t1.c>3;", + isSame: false, + }, + { // test for apply. + sql1: "select * from t1 where t1.b > 0 and t1.a in (select sum(t2.b) from t2 where t2.a=t1.a and t2.b is not null and t2.c >1)", + sql2: "select * from t1 where t1.b > 1 and t1.a in (select sum(t2.b) from t2 where t2.a=t1.a and t2.b is not null and t2.c >0)", + isSame: true, + }, + { // test for apply. + sql1: "select * from t1 where t1.b > 0 and t1.a in (select sum(t2.b) from t2 where t2.a=t1.a and t2.b is not null and t2.c >1)", + sql2: "select * from t1 where t1.b > 1 and t1.a in (select sum(t2.b) from t2 where t2.a=t1.a and t2.b is not null)", + isSame: false, + }, + { // test for topN. + sql1: "SELECT * from t1 where a!=1 order by c limit 1", + sql2: "SELECT * from t1 where a!=2 order by c limit 2", + isSame: true, + }, + } + for _, testCase := range normalizedDigestCases { + testNormalizeDigest(tk, c, testCase.sql1, testCase.sql2, testCase.isSame) + } +} + +func testNormalizeDigest(tk *testkit.TestKit, c *C, sql1, sql2 string, isSame bool) { + tk.Se.GetSessionVars().PlanID = 0 + tk.MustQuery(sql1) + info := tk.Se.ShowProcess() + c.Assert(info, NotNil) + physicalPlan, ok := info.Plan.(core.PhysicalPlan) + c.Assert(ok, IsTrue) + normalized1, digest1 := core.NormalizePlan(physicalPlan) + + tk.Se.GetSessionVars().PlanID = 0 + tk.MustQuery(sql2) + info = tk.Se.ShowProcess() + c.Assert(info, NotNil) + physicalPlan, ok = info.Plan.(core.PhysicalPlan) + c.Assert(ok, IsTrue) + normalized2, digest2 := core.NormalizePlan(physicalPlan) + comment := Commentf("sql1: %v, sql2: %v\n%v !=\n%v\n", sql1, sql2, normalized1, normalized2) + if isSame { + c.Assert(normalized1, Equals, normalized2, comment) + c.Assert(digest1, Equals, digest2, comment) + } else { + c.Assert(normalized1 != normalized2, IsTrue, comment) + c.Assert(digest1 != digest2, IsTrue, comment) + } +} + +func getPlanRows(planStr string) []string { + planStr = strings.Replace(planStr, "\t", " ", -1) + return strings.Split(planStr, "\n") +} + +func compareStringSlice(c *C, ss1, ss2 []string) { + c.Assert(len(ss1), Equals, len(ss2)) + for i, s := range ss1 { + c.Assert(s, Equals, ss2[i]) + } +} diff --git a/planner/core/plan_to_pb.go b/planner/core/plan_to_pb.go index 3b07385f9584f..ed1b3448ddd01 100644 --- a/planner/core/plan_to_pb.go +++ b/planner/core/plan_to_pb.go @@ -18,6 +18,7 @@ import ( "github.com/pingcap/parser/model" "github.com/pingcap/tidb/expression" "github.com/pingcap/tidb/expression/aggregation" + "github.com/pingcap/tidb/kv" "github.com/pingcap/tidb/sessionctx" "github.com/pingcap/tidb/table" "github.com/pingcap/tidb/tablecodec" @@ -90,10 +91,9 @@ func (p *PhysicalLimit) ToPB(ctx sessionctx.Context) (*tipb.Executor, error) { // ToPB implements PhysicalPlan ToPB interface. func (p *PhysicalTableScan) ToPB(ctx sessionctx.Context) (*tipb.Executor, error) { - columns := p.Columns tsExec := &tipb.TableScan{ TableId: p.Table.ID, - Columns: model.ColumnsToProto(columns, p.Table.PKIsHandle), + Columns: model.ColumnsToProto(p.Columns, p.Table.PKIsHandle), Desc: p.Desc, } err := SetPBColumnsDefaultValue(ctx, tsExec.Columns, p.Columns) @@ -180,9 +180,12 @@ func SetPBColumnsDefaultValue(ctx sessionctx.Context, pbColumns []*tipb.ColumnIn // Some plans are difficult (if possible) to implement streaming, and some are pointless to do so. // TODO: Support more kinds of physical plan. func SupportStreaming(p PhysicalPlan) bool { - switch p.(type) { - case *PhysicalTableScan, *PhysicalIndexScan, *PhysicalSelection: + switch x := p.(type) { + case *PhysicalIndexScan, *PhysicalSelection: return true + case *PhysicalTableScan: + // TODO: remove this after TiDB coprocessor support stream. + return x.StoreType != kv.TiDB } return false } diff --git a/planner/core/planbuilder.go b/planner/core/planbuilder.go index adbbae9195438..1868cd734a779 100644 --- a/planner/core/planbuilder.go +++ b/planner/core/planbuilder.go @@ -33,6 +33,7 @@ import ( "github.com/pingcap/tidb/infoschema" "github.com/pingcap/tidb/kv" "github.com/pingcap/tidb/planner/property" + "github.com/pingcap/tidb/planner/util" "github.com/pingcap/tidb/sessionctx" "github.com/pingcap/tidb/sessionctx/stmtctx" "github.com/pingcap/tidb/sessionctx/variable" @@ -584,14 +585,14 @@ func (b *PlanBuilder) detectSelectWindow(sel *ast.SelectStmt) bool { return false } -func getPathByIndexName(paths []*accessPath, idxName model.CIStr, tblInfo *model.TableInfo) *accessPath { - var tablePath *accessPath +func getPathByIndexName(paths []*util.AccessPath, idxName model.CIStr, tblInfo *model.TableInfo) *util.AccessPath { + var tablePath *util.AccessPath for _, path := range paths { - if path.isTablePath { + if path.IsTablePath { tablePath = path continue } - if path.index.Name.L == idxName.L { + if path.Index.Name.L == idxName.L { return path } } @@ -605,21 +606,26 @@ func isPrimaryIndex(indexName model.CIStr) bool { return indexName.L == "primary" } -func (b *PlanBuilder) getPossibleAccessPaths(indexHints []*ast.IndexHint, tblInfo *model.TableInfo, dbName, tblName model.CIStr) ([]*accessPath, error) { - publicPaths := make([]*accessPath, 0, len(tblInfo.Indices)+2) - publicPaths = append(publicPaths, &accessPath{isTablePath: true, storeType: kv.TiKV}) +func (b *PlanBuilder) getPossibleAccessPaths(indexHints []*ast.IndexHint, tbl table.Table, dbName, tblName model.CIStr) ([]*util.AccessPath, error) { + tblInfo := tbl.Meta() + publicPaths := make([]*util.AccessPath, 0, len(tblInfo.Indices)+2) + tp := kv.TiKV + if tbl.Type().IsClusterTable() { + tp = kv.TiDB + } + publicPaths = append(publicPaths, &util.AccessPath{IsTablePath: true, StoreType: tp}) if tblInfo.TiFlashReplica != nil && tblInfo.TiFlashReplica.Available { - publicPaths = append(publicPaths, &accessPath{isTablePath: true, storeType: kv.TiFlash}) + publicPaths = append(publicPaths, &util.AccessPath{IsTablePath: true, StoreType: kv.TiFlash}) } for _, index := range tblInfo.Indices { if index.State == model.StatePublic { - publicPaths = append(publicPaths, &accessPath{index: index}) + publicPaths = append(publicPaths, &util.AccessPath{Index: index}) } } hasScanHint, hasUseOrForce := false, false - available := make([]*accessPath, 0, len(publicPaths)) - ignored := make([]*accessPath, 0, len(publicPaths)) + available := make([]*util.AccessPath, 0, len(publicPaths)) + ignored := make([]*util.AccessPath, 0, len(publicPaths)) // Extract comment-style index hint like /*+ INDEX(t, idx1, idx2) */. indexHintsLen := len(indexHints) @@ -644,7 +650,7 @@ func (b *PlanBuilder) getPossibleAccessPaths(indexHints []*ast.IndexHint, tblInf if hint.IndexNames == nil && hint.HintType != ast.HintIgnore { if path := getTablePath(publicPaths); path != nil { hasUseOrForce = true - path.forced = true + path.Forced = true available = append(available, path) } } @@ -667,7 +673,7 @@ func (b *PlanBuilder) getPossibleAccessPaths(indexHints []*ast.IndexHint, tblInf // Currently we don't distinguish between "FORCE" and "USE" because // our cost estimation is not reliable. hasUseOrForce = true - path.forced = true + path.Forced = true available = append(available, path) } } @@ -681,25 +687,25 @@ func (b *PlanBuilder) getPossibleAccessPaths(indexHints []*ast.IndexHint, tblInf // If we have got "FORCE" or "USE" index hint but got no available index, // we have to use table scan. if len(available) == 0 { - available = append(available, &accessPath{isTablePath: true}) + available = append(available, &util.AccessPath{IsTablePath: true}) } return available, nil } -func (b *PlanBuilder) filterPathByIsolationRead(paths []*accessPath) ([]*accessPath, error) { +func (b *PlanBuilder) filterPathByIsolationRead(paths []*util.AccessPath) ([]*util.AccessPath, error) { // TODO: filter paths with isolation read locations. isolationReadEngines := b.ctx.GetSessionVars().GetIsolationReadEngines() availableEngine := map[kv.StoreType]struct{}{} var availableEngineStr string for i := len(paths) - 1; i >= 0; i-- { - if _, ok := availableEngine[paths[i].storeType]; !ok { - availableEngine[paths[i].storeType] = struct{}{} + if _, ok := availableEngine[paths[i].StoreType]; !ok { + availableEngine[paths[i].StoreType] = struct{}{} if availableEngineStr != "" { availableEngineStr += ", " } - availableEngineStr += paths[i].storeType.Name() + availableEngineStr += paths[i].StoreType.Name() } - if _, ok := isolationReadEngines[paths[i].storeType]; !ok { + if _, ok := isolationReadEngines[paths[i].StoreType]; !ok { paths = append(paths[:i], paths[i+1:]...) } } @@ -712,13 +718,13 @@ func (b *PlanBuilder) filterPathByIsolationRead(paths []*accessPath) ([]*accessP return paths, err } -func removeIgnoredPaths(paths, ignoredPaths []*accessPath, tblInfo *model.TableInfo) []*accessPath { +func removeIgnoredPaths(paths, ignoredPaths []*util.AccessPath, tblInfo *model.TableInfo) []*util.AccessPath { if len(ignoredPaths) == 0 { return paths } - remainedPaths := make([]*accessPath, 0, len(paths)) + remainedPaths := make([]*util.AccessPath, 0, len(paths)) for _, path := range paths { - if path.isTablePath || getPathByIndexName(ignoredPaths, path.index.Name, tblInfo) == nil { + if path.IsTablePath || getPathByIndexName(ignoredPaths, path.Index.Name, tblInfo) == nil { remainedPaths = append(remainedPaths, path) } } @@ -1614,12 +1620,13 @@ func (b *PlanBuilder) buildSimple(node ast.StmtNode) (Plan, error) { } b.visitInfo = collectVisitInfoFromGrantStmt(b.ctx, b.visitInfo, raw) case *ast.GrantRoleStmt: - err := ErrSpecificAccessDenied.GenWithStackByArgs("GRANT ROLE") - b.visitInfo = appendVisitInfo(b.visitInfo, mysql.GrantPriv, "", "", "", err) + err := ErrSpecificAccessDenied.GenWithStackByArgs("SUPER") + b.visitInfo = appendVisitInfo(b.visitInfo, mysql.SuperPriv, "", "", "", err) case *ast.RevokeStmt: b.visitInfo = collectVisitInfoFromRevokeStmt(b.ctx, b.visitInfo, raw) case *ast.RevokeRoleStmt: - b.visitInfo = appendVisitInfo(b.visitInfo, mysql.SuperPriv, "", "", "", nil) + err := ErrSpecificAccessDenied.GenWithStackByArgs("SUPER") + b.visitInfo = appendVisitInfo(b.visitInfo, mysql.SuperPriv, "", "", "", err) case *ast.KillStmt: // If you have the SUPER privilege, you can kill all threads and statements. // Otherwise, you can kill only your own threads and statements. @@ -2528,7 +2535,7 @@ func (b *PlanBuilder) buildDDL(ctx context.Context, node ast.DDLNode) (Plan, err } b.visitInfo = appendVisitInfo(b.visitInfo, mysql.InsertPriv, v.NewTable.Schema.L, v.NewTable.Name.L, "", authErr) - case *ast.RecoverTableStmt: + case *ast.RecoverTableStmt, *ast.FlashBackTableStmt: // Recover table command can only be executed by administrator. b.visitInfo = appendVisitInfo(b.visitInfo, mysql.SuperPriv, "", "", "", nil) case *ast.LockTablesStmt, *ast.UnlockTablesStmt: @@ -2536,6 +2543,9 @@ func (b *PlanBuilder) buildDDL(ctx context.Context, node ast.DDLNode) (Plan, err case *ast.CleanupTableLockStmt: // This command can only be executed by administrator. b.visitInfo = appendVisitInfo(b.visitInfo, mysql.SuperPriv, "", "", "", nil) + case *ast.RepairTableStmt: + // Repair table command can only be executed by administrator. + b.visitInfo = appendVisitInfo(b.visitInfo, mysql.SuperPriv, "", "", "", nil) } p := &DDL{Statement: node} return p, nil @@ -2812,6 +2822,9 @@ func buildShowSchema(s *ast.ShowStmt, isView bool) (schema *expression.Schema, o case ast.ShowAnalyzeStatus: names = []string{"Table_schema", "Table_name", "Partition_name", "Job_info", "Processed_rows", "Start_time", "State"} ftypes = []byte{mysql.TypeVarchar, mysql.TypeVarchar, mysql.TypeVarchar, mysql.TypeVarchar, mysql.TypeLonglong, mysql.TypeDatetime, mysql.TypeVarchar} + case ast.ShowBuiltins: + names = []string{"Supported_builtin_functions"} + ftypes = []byte{mysql.TypeVarchar} } schema = expression.NewSchema(make([]*expression.Column, 0, len(names))...) diff --git a/planner/core/planbuilder_test.go b/planner/core/planbuilder_test.go index 3d5c35949696d..0908ff88dde69 100644 --- a/planner/core/planbuilder_test.go +++ b/planner/core/planbuilder_test.go @@ -21,6 +21,7 @@ import ( "github.com/pingcap/parser/ast" "github.com/pingcap/parser/model" "github.com/pingcap/tidb/expression" + "github.com/pingcap/tidb/planner/util" "github.com/pingcap/tidb/types" ) @@ -71,9 +72,9 @@ func (s *testPlanBuilderSuite) TestGetPathByIndexName(c *C) { PKIsHandle: true, } - accessPath := []*accessPath{ - {isTablePath: true}, - {index: &model.IndexInfo{Name: model.NewCIStr("idx")}}, + accessPath := []*util.AccessPath{ + {IsTablePath: true}, + {Index: &model.IndexInfo{Name: model.NewCIStr("idx")}}, } path := getPathByIndexName(accessPath, model.NewCIStr("idx"), tblInfo) diff --git a/planner/core/point_get_plan.go b/planner/core/point_get_plan.go index 31d68a7cb11d9..2b5b0496a8c6b 100644 --- a/planner/core/point_get_plan.go +++ b/planner/core/point_get_plan.go @@ -82,6 +82,11 @@ func (p *PointGetPlan) ToPB(ctx sessionctx.Context) (*tipb.Executor, error) { // ExplainInfo returns operator information to be explained. func (p *PointGetPlan) ExplainInfo() string { + return p.explainInfo(false) +} + +// ExplainInfo returns operator information to be explained. +func (p *PointGetPlan) explainInfo(normalized bool) string { buffer := bytes.NewBufferString("") tblName := p.TblInfo.Name.O fmt.Fprintf(buffer, "table:%s", tblName) @@ -94,10 +99,14 @@ func (p *PointGetPlan) ExplainInfo() string { } } } else { - if p.UnsignedHandle { - fmt.Fprintf(buffer, ", handle:%d", uint64(p.Handle)) + if normalized { + fmt.Fprintf(buffer, ", handle:?") } else { - fmt.Fprintf(buffer, ", handle:%d", p.Handle) + if p.UnsignedHandle { + fmt.Fprintf(buffer, ", handle:%d", uint64(p.Handle)) + } else { + fmt.Fprintf(buffer, ", handle:%d", p.Handle) + } } } if p.Lock { @@ -106,6 +115,11 @@ func (p *PointGetPlan) ExplainInfo() string { return buffer.String() } +// ExplainNormalizedInfo returns normalized operator information to be explained. +func (p *PointGetPlan) ExplainNormalizedInfo() string { + return p.explainInfo(true) +} + // GetChildReqProps gets the required property by child index. func (p *PointGetPlan) GetChildReqProps(idx int) *property.PhysicalProperty { return nil @@ -192,6 +206,11 @@ func (p *BatchPointGetPlan) ExplainInfo() string { return buffer.String() } +// ExplainNormalizedInfo returns normalized operator information to be explained. +func (p *BatchPointGetPlan) ExplainNormalizedInfo() string { + return p.ExplainInfo() +} + // GetChildReqProps gets the required property by child index. func (p *BatchPointGetPlan) GetChildReqProps(idx int) *property.PhysicalProperty { return nil diff --git a/planner/core/preprocess.go b/planner/core/preprocess.go index dc478fdc1ac1b..8c2b192c17ea5 100644 --- a/planner/core/preprocess.go +++ b/planner/core/preprocess.go @@ -30,6 +30,8 @@ import ( "github.com/pingcap/tidb/sessionctx" "github.com/pingcap/tidb/types" "github.com/pingcap/tidb/types/parser_driver" + "github.com/pingcap/tidb/util" + "github.com/pingcap/tidb/util/domainutil" ) // PreprocessOpt presents optional parameters to `Preprocess` method. @@ -66,6 +68,8 @@ const ( inCreateOrDropTable // parentIsJoin is set when visiting node's parent is join. parentIsJoin + // inRepairTable is set when visiting a repair table statement. + inRepairTable ) // preprocessor is an ast.Visitor that preprocess @@ -121,11 +125,15 @@ func (p *preprocessor) Enter(in ast.Node) (out ast.Node, skipChildren bool) { if node.HintedSel != nil { p.checkBindGrammar(node.OriginSel, node.HintedSel) } - case *ast.RecoverTableStmt: + case *ast.RecoverTableStmt, *ast.FlashBackTableStmt: // The specified table in recover table statement maybe already been dropped. // So skip check table name here, otherwise, recover table [table_name] syntax will return // table not exists error. But recover table statement is use to recover the dropped table. So skip children here. return in, true + case *ast.RepairTableStmt: + // The RepairTable should consist of the logic for creating tables and renaming tables. + p.flag |= inRepairTable + p.checkRepairTableGrammar(node) default: p.flag &= ^parentIsJoin } @@ -203,6 +211,8 @@ func (p *preprocessor) Leave(in ast.Node) (out ast.Node, ok bool) { x.Args[0] = ast.NewValueExpr(0) } } + case *ast.RepairTableStmt: + p.flag &= ^inRepairTable } return in, p.err == nil @@ -497,6 +507,10 @@ func (p *preprocessor) checkRenameTableGrammar(stmt *ast.RenameTableStmt) { oldTable := stmt.OldTable.Name.String() newTable := stmt.NewTable.Name.String() + p.checkRenameTable(oldTable, newTable) +} + +func (p *preprocessor) checkRenameTable(oldTable, newTable string) { if isIncorrectName(oldTable) { p.err = ddl.ErrWrongTableName.GenWithStackByArgs(oldTable) return @@ -508,6 +522,23 @@ func (p *preprocessor) checkRenameTableGrammar(stmt *ast.RenameTableStmt) { } } +func (p *preprocessor) checkRepairTableGrammar(stmt *ast.RepairTableStmt) { + // Check create table stmt whether it's is in REPAIR MODE. + if !domainutil.RepairInfo.InRepairMode() { + p.err = ddl.ErrRepairTableFail.GenWithStackByArgs("TiDB is not in REPAIR MODE") + return + } + if len(domainutil.RepairInfo.GetRepairTableList()) == 0 { + p.err = ddl.ErrRepairTableFail.GenWithStackByArgs("repair list is empty") + return + } + + // Check rename action as the rename statement does. + oldTable := stmt.Table.Name.String() + newTable := stmt.CreateStmt.Table.Name.String() + p.checkRenameTable(oldTable, newTable) +} + func (p *preprocessor) checkAlterTableGrammar(stmt *ast.AlterTableStmt) { tName := stmt.Table.Name.String() if isIncorrectName(tName) { @@ -715,9 +746,23 @@ func (p *preprocessor) handleTableName(tn *ast.TableName) { } if p.flag&inCreateOrDropTable > 0 { // The table may not exist in create table or drop table statement. - // Skip resolving the table to avoid error. + if p.flag&inRepairTable > 0 { + // Create stmt is in repair stmt, skip resolving the table to avoid error. + return + } + // Create stmt is not in repair stmt, check the table not in repair list. + if domainutil.RepairInfo.InRepairMode() { + p.checkNotInRepair(tn) + } + return + } + // repairStmt: admin repair table A create table B ... + // repairStmt's tableName is whether `inCreateOrDropTable` or `inRepairTable` flag. + if p.flag&inRepairTable > 0 { + p.handleRepairName(tn) return } + table, err := p.is.TableByName(tn.Schema, tn.Name) if err != nil { p.err = err @@ -728,6 +773,36 @@ func (p *preprocessor) handleTableName(tn *ast.TableName) { tn.DBInfo = dbInfo } +func (p *preprocessor) checkNotInRepair(tn *ast.TableName) { + tableInfo, dbInfo := domainutil.RepairInfo.GetRepairedTableInfoByTableName(tn.Schema.L, tn.Name.L) + if dbInfo == nil { + return + } + if tableInfo != nil { + p.err = ddl.ErrWrongTableName.GenWithStackByArgs(tn.Name.L, "this table is in repair") + } +} + +func (p *preprocessor) handleRepairName(tn *ast.TableName) { + // Check the whether the repaired table is system table. + if util.IsMemOrSysDB(tn.Schema.L) { + p.err = ddl.ErrRepairTableFail.GenWithStackByArgs("memory or system database is not for repair") + return + } + tableInfo, dbInfo := domainutil.RepairInfo.GetRepairedTableInfoByTableName(tn.Schema.L, tn.Name.L) + // tableName here only has the schema rather than DBInfo. + if dbInfo == nil { + p.err = ddl.ErrRepairTableFail.GenWithStackByArgs("database " + tn.Schema.L + " is not in repair") + return + } + if tableInfo == nil { + p.err = ddl.ErrRepairTableFail.GenWithStackByArgs("table " + tn.Name.L + " is not in repair") + return + } + p.ctx.SetValue(domainutil.RepairedTable, tableInfo) + p.ctx.SetValue(domainutil.RepairedDatabase, dbInfo) +} + func (p *preprocessor) resolveShowStmt(node *ast.ShowStmt) { if node.DBName == "" { if node.Table != nil && node.Table.Schema.L != "" { diff --git a/planner/core/property_cols_prune.go b/planner/core/property_cols_prune.go index ebf523d572484..b45e761785b4b 100644 --- a/planner/core/property_cols_prune.go +++ b/planner/core/property_cols_prune.go @@ -21,7 +21,7 @@ func (ds *DataSource) preparePossibleProperties() [][]*expression.Column { result := make([][]*expression.Column, 0, len(ds.possibleAccessPaths)) for _, path := range ds.possibleAccessPaths { - if path.isTablePath { + if path.IsTablePath { col := ds.getPKIsHandleCol() if col != nil { result = append(result, []*expression.Column{col}) @@ -29,14 +29,14 @@ func (ds *DataSource) preparePossibleProperties() [][]*expression.Column { continue } - if len(path.idxCols) == 0 { + if len(path.IdxCols) == 0 { continue } - result = append(result, make([]*expression.Column, len(path.idxCols))) - copy(result[len(result)-1], path.idxCols) - for i := 0; i < path.eqCondCount && i+1 < len(path.idxCols); i++ { - result = append(result, make([]*expression.Column, len(path.idxCols)-i-1)) - copy(result[len(result)-1], path.idxCols[i+1:]) + result = append(result, make([]*expression.Column, len(path.IdxCols))) + copy(result[len(result)-1], path.IdxCols) + for i := 0; i < path.EqCondCount && i+1 < len(path.IdxCols); i++ { + result = append(result, make([]*expression.Column, len(path.IdxCols)-i-1)) + copy(result[len(result)-1], path.IdxCols[i+1:]) } } return result diff --git a/planner/core/rule_aggregation_push_down.go b/planner/core/rule_aggregation_push_down.go index f82de643831a2..278c83261e3be 100644 --- a/planner/core/rule_aggregation_push_down.go +++ b/planner/core/rule_aggregation_push_down.go @@ -354,7 +354,7 @@ func (a *aggregationPushDownSolver) aggPushDown(p LogicalPlan) (_ LogicalPlan, e } join.SetChildren(lChild, rChild) join.SetSchema(expression.MergeSchema(lChild.Schema(), rChild.Schema())) - join.BuildKeyInfo() + buildKeyInfo(join) proj := a.tryToEliminateAggregation(agg) if proj != nil { p = proj diff --git a/planner/core/rule_build_key_info.go b/planner/core/rule_build_key_info.go index 45a7c45d33224..e875b85ee85a5 100644 --- a/planner/core/rule_build_key_info.go +++ b/planner/core/rule_build_key_info.go @@ -15,8 +15,8 @@ package core import ( "context" - "github.com/pingcap/parser/ast" + "github.com/pingcap/parser/model" "github.com/pingcap/parser/mysql" "github.com/pingcap/tidb/expression" ) @@ -24,33 +24,47 @@ import ( type buildKeySolver struct{} func (s *buildKeySolver) optimize(ctx context.Context, lp LogicalPlan) (LogicalPlan, error) { - lp.BuildKeyInfo() + buildKeyInfo(lp) return lp, nil } +// buildKeyInfo recursively calls LogicalPlan's BuildKeyInfo method. +func buildKeyInfo(lp LogicalPlan) { + for _, child := range lp.Children() { + buildKeyInfo(child) + } + childSchema := make([]*expression.Schema, len(lp.Children())) + for i, child := range lp.Children() { + childSchema[i] = child.Schema() + } + lp.BuildKeyInfo(lp.Schema(), childSchema) +} + // BuildKeyInfo implements LogicalPlan BuildKeyInfo interface. -func (la *LogicalAggregation) BuildKeyInfo() { - la.schema.Keys = nil - la.baseLogicalPlan.BuildKeyInfo() - for _, key := range la.Children()[0].Schema().Keys { - indices := la.schema.ColumnsIndices(key) +func (la *LogicalAggregation) BuildKeyInfo(selfSchema *expression.Schema, childSchema []*expression.Schema) { + if la.IsPartialModeAgg() { + return + } + la.logicalSchemaProducer.BuildKeyInfo(selfSchema, childSchema) + for _, key := range childSchema[0].Keys { + indices := selfSchema.ColumnsIndices(key) if indices == nil { continue } newKey := make([]*expression.Column, 0, len(key)) for _, i := range indices { - newKey = append(newKey, la.schema.Columns[i]) + newKey = append(newKey, selfSchema.Columns[i]) } - la.schema.Keys = append(la.schema.Keys, newKey) + selfSchema.Keys = append(selfSchema.Keys, newKey) } if len(la.groupByCols) == len(la.GroupByItems) && len(la.GroupByItems) > 0 { - indices := la.schema.ColumnsIndices(la.groupByCols) + indices := selfSchema.ColumnsIndices(la.groupByCols) if indices != nil { newKey := make([]*expression.Column, 0, len(indices)) for _, i := range indices { - newKey = append(newKey, la.schema.Columns[i]) + newKey = append(newKey, selfSchema.Columns[i]) } - la.schema.Keys = append(la.schema.Keys, newKey) + selfSchema.Keys = append(selfSchema.Keys, newKey) } } if len(la.GroupByItems) == 0 { @@ -60,12 +74,12 @@ func (la *LogicalAggregation) BuildKeyInfo() { // If a condition is the form of (uniqueKey = constant) or (uniqueKey = Correlated column), it returns at most one row. // This function will check it. -func (p *LogicalSelection) checkMaxOneRowCond(unique expression.Expression, constOrCorCol expression.Expression) bool { +func (p *LogicalSelection) checkMaxOneRowCond(unique expression.Expression, constOrCorCol expression.Expression, childSchema *expression.Schema) bool { col, ok := unique.(*expression.Column) if !ok { return false } - if !p.children[0].Schema().IsUniqueKey(col) { + if !childSchema.IsUniqueKey(col) { return false } _, okCon := constOrCorCol.(*expression.Constant) @@ -77,11 +91,11 @@ func (p *LogicalSelection) checkMaxOneRowCond(unique expression.Expression, cons } // BuildKeyInfo implements LogicalPlan BuildKeyInfo interface. -func (p *LogicalSelection) BuildKeyInfo() { - p.baseLogicalPlan.BuildKeyInfo() +func (p *LogicalSelection) BuildKeyInfo(selfSchema *expression.Schema, childSchema []*expression.Schema) { + p.baseLogicalPlan.BuildKeyInfo(selfSchema, childSchema) for _, cond := range p.Conditions { if sf, ok := cond.(*expression.ScalarFunction); ok && sf.FuncName.L == ast.EQ { - if p.checkMaxOneRowCond(sf.GetArgs()[0], sf.GetArgs()[1]) || p.checkMaxOneRowCond(sf.GetArgs()[1], sf.GetArgs()[0]) { + if p.checkMaxOneRowCond(sf.GetArgs()[0], sf.GetArgs()[1], childSchema[0]) || p.checkMaxOneRowCond(sf.GetArgs()[1], sf.GetArgs()[0], childSchema[0]) { p.maxOneRow = true break } @@ -90,8 +104,8 @@ func (p *LogicalSelection) BuildKeyInfo() { } // BuildKeyInfo implements LogicalPlan BuildKeyInfo interface. -func (p *LogicalLimit) BuildKeyInfo() { - p.baseLogicalPlan.BuildKeyInfo() +func (p *LogicalLimit) BuildKeyInfo(selfSchema *expression.Schema, childSchema []*expression.Schema) { + p.baseLogicalPlan.BuildKeyInfo(selfSchema, childSchema) if p.Count == 1 { p.maxOneRow = true } @@ -99,8 +113,8 @@ func (p *LogicalLimit) BuildKeyInfo() { // A bijection exists between columns of a projection's schema and this projection's Exprs. // Sometimes we need a schema made by expr of Exprs to convert a column in child's schema to a column in this projection's Schema. -func (p *LogicalProjection) buildSchemaByExprs() *expression.Schema { - schema := expression.NewSchema(make([]*expression.Column, 0, p.schema.Len())...) +func (p *LogicalProjection) buildSchemaByExprs(selfSchema *expression.Schema) *expression.Schema { + schema := expression.NewSchema(make([]*expression.Column, 0, selfSchema.Len())...) for _, expr := range p.Exprs { if col, isCol := expr.(*expression.Column); isCol { schema.Append(col) @@ -116,31 +130,28 @@ func (p *LogicalProjection) buildSchemaByExprs() *expression.Schema { } // BuildKeyInfo implements LogicalPlan BuildKeyInfo interface. -func (p *LogicalProjection) BuildKeyInfo() { - p.schema.Keys = nil - p.baseLogicalPlan.BuildKeyInfo() - schema := p.buildSchemaByExprs() - for _, key := range p.Children()[0].Schema().Keys { +func (p *LogicalProjection) BuildKeyInfo(selfSchema *expression.Schema, childSchema []*expression.Schema) { + p.logicalSchemaProducer.BuildKeyInfo(selfSchema, childSchema) + schema := p.buildSchemaByExprs(selfSchema) + for _, key := range childSchema[0].Keys { indices := schema.ColumnsIndices(key) if indices == nil { continue } newKey := make([]*expression.Column, 0, len(key)) for _, i := range indices { - newKey = append(newKey, p.schema.Columns[i]) + newKey = append(newKey, selfSchema.Columns[i]) } - p.schema.Keys = append(p.schema.Keys, newKey) + selfSchema.Keys = append(selfSchema.Keys, newKey) } } // BuildKeyInfo implements LogicalPlan BuildKeyInfo interface. -func (p *LogicalJoin) BuildKeyInfo() { - p.schema.Keys = nil - p.baseLogicalPlan.BuildKeyInfo() - p.maxOneRow = p.children[0].MaxOneRow() && p.children[1].MaxOneRow() +func (p *LogicalJoin) BuildKeyInfo(selfSchema *expression.Schema, childSchema []*expression.Schema) { + p.logicalSchemaProducer.BuildKeyInfo(selfSchema, childSchema) switch p.JoinType { case SemiJoin, LeftOuterSemiJoin, AntiSemiJoin, AntiLeftOuterSemiJoin: - p.schema.Keys = p.children[0].Schema().Clone().Keys + selfSchema.Keys = childSchema[0].Clone().Keys case InnerJoin, LeftOuterJoin, RightOuterJoin: // If there is no equal conditions, then cartesian product can't be prevented and unique key information will destroy. if len(p.EqualConditions) == 0 { @@ -155,13 +166,13 @@ func (p *LogicalJoin) BuildKeyInfo() { for _, expr := range p.EqualConditions { ln := expr.GetArgs()[0].(*expression.Column) rn := expr.GetArgs()[1].(*expression.Column) - for _, key := range p.children[0].Schema().Keys { + for _, key := range childSchema[0].Keys { if len(key) == 1 && key[0].Equal(p.ctx, ln) { lOk = true break } } - for _, key := range p.children[1].Schema().Keys { + for _, key := range childSchema[1].Keys { if len(key) == 1 && key[0].Equal(p.ctx, rn) { rOk = true break @@ -172,61 +183,94 @@ func (p *LogicalJoin) BuildKeyInfo() { // another side's unique key information will all be reserved. // If it's an outer join, NULL value will fill some position, which will destroy the unique key information. if lOk && p.JoinType != LeftOuterJoin { - p.schema.Keys = append(p.schema.Keys, p.children[1].Schema().Keys...) + selfSchema.Keys = append(selfSchema.Keys, childSchema[1].Keys...) } if rOk && p.JoinType != RightOuterJoin { - p.schema.Keys = append(p.schema.Keys, p.children[0].Schema().Keys...) + selfSchema.Keys = append(selfSchema.Keys, childSchema[0].Keys...) } } } -// BuildKeyInfo implements LogicalPlan BuildKeyInfo interface. -func (ds *DataSource) BuildKeyInfo() { - ds.schema.Keys = nil - ds.baseLogicalPlan.BuildKeyInfo() - for _, path := range ds.possibleAccessPaths { - if path.isTablePath { - continue - } - idx := path.index - if !idx.Unique { - continue - } - newKey := make([]*expression.Column, 0, len(idx.Columns)) - ok := true - for _, idxCol := range idx.Columns { - // The columns of this index should all occur in column schema. - // Since null value could be duplicate in unique key. So we check NotNull flag of every column. - find := false - for i, col := range ds.Columns { - if idxCol.Name.L == col.Name.L { - if !mysql.HasNotNullFlag(ds.Columns[i].Flag) { - break - } - newKey = append(newKey, ds.schema.Columns[i]) - find = true +// checkIndexCanBeKey checks whether an Index can be a Key in schema. +func checkIndexCanBeKey(idx *model.IndexInfo, columns []*model.ColumnInfo, schema *expression.Schema) expression.KeyInfo { + if !idx.Unique { + return nil + } + newKey := make([]*expression.Column, 0, len(idx.Columns)) + ok := true + for _, idxCol := range idx.Columns { + // The columns of this index should all occur in column schema. + // Since null value could be duplicate in unique key. So we check NotNull flag of every column. + find := false + for i, col := range columns { + if idxCol.Name.L == col.Name.L { + if !mysql.HasNotNullFlag(col.Flag) { break } - } - if !find { - ok = false + newKey = append(newKey, schema.Columns[i]) + find = true break } } - if ok { - ds.schema.Keys = append(ds.schema.Keys, newKey) + if !find { + ok = false + break + } + } + if ok { + return newKey + } + return nil +} + +// BuildKeyInfo implements LogicalPlan BuildKeyInfo interface. +func (ds *DataSource) BuildKeyInfo(selfSchema *expression.Schema, childSchema []*expression.Schema) { + selfSchema.Keys = nil + for _, path := range ds.possibleAccessPaths { + if path.IsTablePath { + continue + } + if newKey := checkIndexCanBeKey(path.Index, ds.Columns, selfSchema); newKey != nil { + selfSchema.Keys = append(selfSchema.Keys, newKey) } } if ds.tableInfo.PKIsHandle { for i, col := range ds.Columns { if mysql.HasPriKeyFlag(col.Flag) { - ds.schema.Keys = append(ds.schema.Keys, []*expression.Column{ds.schema.Columns[i]}) + selfSchema.Keys = append(selfSchema.Keys, []*expression.Column{selfSchema.Columns[i]}) break } } } } +// BuildKeyInfo implements LogicalPlan BuildKeyInfo interface. +func (ts *LogicalTableScan) BuildKeyInfo(selfSchema *expression.Schema, childSchema []*expression.Schema) { + ts.Source.BuildKeyInfo(selfSchema, childSchema) +} + +// BuildKeyInfo implements LogicalPlan BuildKeyInfo interface. +func (is *LogicalIndexScan) BuildKeyInfo(selfSchema *expression.Schema, childSchema []*expression.Schema) { + selfSchema.Keys = nil + for _, path := range is.Source.possibleAccessPaths { + if path.IsTablePath { + continue + } + if newKey := checkIndexCanBeKey(path.Index, is.Columns, selfSchema); newKey != nil { + selfSchema.Keys = append(selfSchema.Keys, newKey) + } + } + handle := is.getPKIsHandleCol() + if handle != nil { + selfSchema.Keys = append(selfSchema.Keys, []*expression.Column{handle}) + } +} + +// BuildKeyInfo implements LogicalPlan BuildKeyInfo interface. +func (tg *TiKVSingleGather) BuildKeyInfo(selfSchema *expression.Schema, childSchema []*expression.Schema) { + selfSchema.Keys = childSchema[0].Keys +} + func (*buildKeySolver) name() string { return "build_keys" } diff --git a/planner/core/rule_column_pruning.go b/planner/core/rule_column_pruning.go index d1c0753d2bbcb..c90ff5cdfe236 100644 --- a/planner/core/rule_column_pruning.go +++ b/planner/core/rule_column_pruning.go @@ -209,6 +209,8 @@ func (ds *DataSource) PruneColumns(parentUsedCols []*expression.Column) error { handleCol = ds.handleCol handleColInfo = ds.Columns[ds.schema.ColumnIndex(handleCol)] } + originSchemaColumns := ds.schema.Columns + originColumns := ds.Columns for i := len(used) - 1; i >= 0; i-- { if !used[i] { ds.schema.Columns = append(ds.schema.Columns[:i], ds.schema.Columns[i+1:]...) @@ -218,7 +220,11 @@ func (ds *DataSource) PruneColumns(parentUsedCols []*expression.Column) error { // For SQL like `select 1 from t`, tikv's response will be empty if no column is in schema. // So we'll force to push one if schema doesn't have any column. if ds.schema.Len() == 0 { - if handleCol == nil { + if ds.table.Type().IsClusterTable() && len(originColumns) > 0 { + // use the first line. + handleCol = originSchemaColumns[0] + handleColInfo = originColumns[0] + } else if handleCol == nil { handleCol = ds.newExtraHandleSchemaCol() handleColInfo = model.NewExtraHandleColInfo() } diff --git a/planner/core/rule_join_elimination.go b/planner/core/rule_join_elimination.go index c27be9823d3da..6398c19be5939 100644 --- a/planner/core/rule_join_elimination.go +++ b/planner/core/rule_join_elimination.go @@ -126,14 +126,14 @@ func (o *outerJoinEliminator) isInnerJoinKeysContainIndex(innerPlan LogicalPlan, return false, nil } for _, path := range ds.possibleAccessPaths { - if path.isTablePath { + if path.IsTablePath { continue } - if !path.index.Unique { + if !path.Index.Unique { continue } joinKeysContainIndex := true - for _, idxCol := range path.idxCols { + for _, idxCol := range path.IdxCols { if !joinKeys.Contains(idxCol) { joinKeysContainIndex = false break diff --git a/planner/core/rule_max_min_eliminate.go b/planner/core/rule_max_min_eliminate.go index 365144ca43ab8..09e586c0a28c9 100644 --- a/planner/core/rule_max_min_eliminate.go +++ b/planner/core/rule_max_min_eliminate.go @@ -20,6 +20,7 @@ import ( "github.com/pingcap/parser/mysql" "github.com/pingcap/tidb/expression" "github.com/pingcap/tidb/expression/aggregation" + "github.com/pingcap/tidb/planner/util" "github.com/pingcap/tidb/types" "github.com/pingcap/tidb/util/ranger" ) @@ -49,8 +50,8 @@ func (a *maxMinEliminator) composeAggsByInnerJoin(aggs []*LogicalAggregation) (p return } -// checkColCanUseIndex checks whether there is an accessPath satisfy the conditions: -// 1. all of the selection's condition can be pushed down as accessConds of the path. +// checkColCanUseIndex checks whether there is an AccessPath satisfy the conditions: +// 1. all of the selection's condition can be pushed down as AccessConds of the path. // 2. the path can keep order for `col` after pushing down the conditions. func (a *maxMinEliminator) checkColCanUseIndex(plan LogicalPlan, col *expression.Column, conditions []expression.Expression) bool { switch p := plan.(type) { @@ -58,9 +59,9 @@ func (a *maxMinEliminator) checkColCanUseIndex(plan LogicalPlan, col *expression conditions = append(conditions, p.Conditions...) return a.checkColCanUseIndex(p.children[0], col, conditions) case *DataSource: - // Check whether there is an accessPath can use index for col. + // Check whether there is an AccessPath can use index for col. for _, path := range p.possibleAccessPaths { - if path.isTablePath { + if path.IsTablePath { // Since table path can contain accessConds of at most one column, // we only need to check if all of the conditions can be pushed down as accessConds // and `col` is the handle column. @@ -73,13 +74,13 @@ func (a *maxMinEliminator) checkColCanUseIndex(plan LogicalPlan, col *expression } else { // For index paths, we have to check: // 1. whether all of the conditions can be pushed down as accessConds. - // 2. whether the accessPath can satisfy the order property of `col` with these accessConds. - result, err := ranger.DetachCondAndBuildRangeForIndex(p.ctx, conditions, path.fullIdxCols, path.fullIdxColLens) + // 2. whether the AccessPath can satisfy the order property of `col` with these accessConds. + result, err := ranger.DetachCondAndBuildRangeForIndex(p.ctx, conditions, path.FullIdxCols, path.FullIdxColLens) if err != nil || len(result.RemainedConds) != 0 { continue } for i := 0; i <= result.EqCondCount; i++ { - if i < len(path.fullIdxCols) && col.Equal(nil, path.fullIdxCols[i]) { + if i < len(path.FullIdxCols) && col.Equal(nil, path.FullIdxCols[i]) { return true } } @@ -109,7 +110,7 @@ func (a *maxMinEliminator) cloneSubPlans(plan LogicalPlan) LogicalPlan { newDs.schema = p.schema.Clone() newDs.Columns = make([]*model.ColumnInfo, len(p.Columns)) copy(newDs.Columns, p.Columns) - newAccessPaths := make([]*accessPath, 0, len(p.possibleAccessPaths)) + newAccessPaths := make([]*util.AccessPath, 0, len(p.possibleAccessPaths)) for _, path := range p.possibleAccessPaths { newPath := *path newAccessPaths = append(newAccessPaths, &newPath) diff --git a/planner/core/rule_partition_processor.go b/planner/core/rule_partition_processor.go index ae439cbc0153f..0e7aa9500ceae 100644 --- a/planner/core/rule_partition_processor.go +++ b/planner/core/rule_partition_processor.go @@ -18,6 +18,7 @@ import ( "github.com/pingcap/errors" "github.com/pingcap/parser/model" "github.com/pingcap/tidb/expression" + "github.com/pingcap/tidb/planner/util" "github.com/pingcap/tidb/sessionctx" "github.com/pingcap/tidb/table/tables" "github.com/pingcap/tidb/types" @@ -164,7 +165,7 @@ func (s *partitionProcessor) prune(ds *DataSource) (LogicalPlan, error) { newDataSource.baseLogicalPlan = newBaseLogicalPlan(ds.SCtx(), plancodec.TypeTableScan, &newDataSource, ds.blockOffset) newDataSource.isPartition = true newDataSource.physicalTableID = pi.Definitions[i].ID - newDataSource.possibleAccessPaths = make([]*accessPath, len(ds.possibleAccessPaths)) + newDataSource.possibleAccessPaths = make([]*util.AccessPath, len(ds.possibleAccessPaths)) for i := range ds.possibleAccessPaths { newPath := *ds.possibleAccessPaths[i] newDataSource.possibleAccessPaths[i] = &newPath diff --git a/planner/core/rule_predicate_push_down.go b/planner/core/rule_predicate_push_down.go index d6380d4dc1a48..f0baf57bc4e1d 100644 --- a/planner/core/rule_predicate_push_down.go +++ b/planner/core/rule_predicate_push_down.go @@ -204,7 +204,7 @@ func (p *LogicalJoin) PredicatePushDown(predicates []expression.Expression) (ret p.RightJoinKeys = append(p.RightJoinKeys, eqCond.GetArgs()[1].(*expression.Column)) } p.mergeSchema() - p.BuildKeyInfo() + buildKeyInfo(p) return ret, p.self } @@ -578,6 +578,14 @@ func (p *LogicalWindow) PredicatePushDown(predicates []expression.Expression) ([ return canNotBePushed, p } +// PredicatePushDown implements LogicalPlan PredicatePushDown interface. +func (p *LogicalMemTable) PredicatePushDown(predicates []expression.Expression) ([]expression.Expression, LogicalPlan) { + if p.Extractor != nil { + predicates = p.Extractor.Extract(p.schema, p.names, predicates) + } + return predicates, p.self +} + func (*ppdSolver) name() string { return "predicate_push_down" } diff --git a/planner/core/stats.go b/planner/core/stats.go index 6cceed0abf0b4..0e9aa4cd432f8 100644 --- a/planner/core/stats.go +++ b/planner/core/stats.go @@ -21,7 +21,9 @@ import ( "github.com/pingcap/parser/mysql" "github.com/pingcap/tidb/expression" "github.com/pingcap/tidb/planner/property" + "github.com/pingcap/tidb/planner/util" "github.com/pingcap/tidb/statistics" + "github.com/pingcap/tidb/types" "github.com/pingcap/tidb/util/logutil" "github.com/pingcap/tidb/util/ranger" "go.uber.org/zap" @@ -136,61 +138,74 @@ func (ds *DataSource) getColumnNDV(colID int64) (ndv float64) { return ndv } -func (ds *DataSource) deriveStatsByFilter(conds expression.CNFExprs) { - if ds.tableStats == nil { - tableStats := &property.StatsInfo{ - RowCount: float64(ds.statisticTable.Count), - Cardinality: make([]float64, len(ds.Columns)), - HistColl: ds.statisticTable.GenerateHistCollFromColumnInfo(ds.Columns, ds.schema.Columns), - StatsVersion: ds.statisticTable.Version, - } - if ds.statisticTable.Pseudo { - tableStats.StatsVersion = statistics.PseudoVersion - } - for i, col := range ds.Columns { - tableStats.Cardinality[i] = ds.getColumnNDV(col.ID) - } - ds.tableStats = tableStats - ds.TblColHists = ds.statisticTable.ID2UniqueID(ds.TblCols) +func (ds *DataSource) initStats() { + if ds.tableStats != nil { + return + } + tableStats := &property.StatsInfo{ + RowCount: float64(ds.statisticTable.Count), + Cardinality: make([]float64, len(ds.Columns)), + HistColl: ds.statisticTable.GenerateHistCollFromColumnInfo(ds.Columns, ds.schema.Columns), + StatsVersion: ds.statisticTable.Version, + } + if ds.statisticTable.Pseudo { + tableStats.StatsVersion = statistics.PseudoVersion + } + for i, col := range ds.Columns { + tableStats.Cardinality[i] = ds.getColumnNDV(col.ID) } - selectivity, nodes, err := ds.tableStats.HistColl.Selectivity(ds.ctx, conds) + ds.tableStats = tableStats + ds.TblColHists = ds.statisticTable.ID2UniqueID(ds.TblCols) +} + +func (ds *DataSource) deriveStatsByFilter(conds expression.CNFExprs, filledPaths []*util.AccessPath) *property.StatsInfo { + ds.initStats() + selectivity, nodes, err := ds.tableStats.HistColl.Selectivity(ds.ctx, conds, filledPaths) if err != nil { logutil.BgLogger().Debug("something wrong happened, use the default selectivity", zap.Error(err)) selectivity = selectionFactor } - ds.stats = ds.tableStats.Scale(selectivity) + stats := ds.tableStats.Scale(selectivity) if ds.ctx.GetSessionVars().OptimizerSelectivityLevel >= 1 { - ds.stats.HistColl = ds.stats.HistColl.NewHistCollBySelectivity(ds.ctx.GetSessionVars().StmtCtx, nodes) + stats.HistColl = stats.HistColl.NewHistCollBySelectivity(ds.ctx.GetSessionVars().StmtCtx, nodes) } + return stats } // DeriveStats implement LogicalPlan DeriveStats interface. func (ds *DataSource) DeriveStats(childStats []*property.StatsInfo, selfSchema *expression.Schema, childSchema []*expression.Schema) (*property.StatsInfo, error) { + ds.initStats() // PushDownNot here can convert query 'not (a != 1)' to 'a = 1'. for i, expr := range ds.pushedDownConds { - ds.pushedDownConds[i] = expression.PushDownNot(nil, expr) + ds.pushedDownConds[i] = expression.PushDownNot(ds.ctx, expr) + } + for _, path := range ds.possibleAccessPaths { + if path.IsTablePath { + continue + } + err := ds.fillIndexPath(path, ds.pushedDownConds) + if err != nil { + return nil, err + } } - ds.deriveStatsByFilter(ds.pushedDownConds) + ds.stats = ds.deriveStatsByFilter(ds.pushedDownConds, ds.possibleAccessPaths) for _, path := range ds.possibleAccessPaths { - if path.isTablePath { + if path.IsTablePath { noIntervalRanges, err := ds.deriveTablePathStats(path, ds.pushedDownConds, false) if err != nil { return nil, err } // If we have point or empty range, just remove other possible paths. - if noIntervalRanges || len(path.ranges) == 0 { + if noIntervalRanges || len(path.Ranges) == 0 { ds.possibleAccessPaths[0] = path ds.possibleAccessPaths = ds.possibleAccessPaths[:1] break } continue } - noIntervalRanges, err := ds.deriveIndexPathStats(path, ds.pushedDownConds, false) - if err != nil { - return nil, err - } + noIntervalRanges := ds.deriveIndexPathStats(path, ds.pushedDownConds, false) // If we have empty range, or point range on unique index, just remove other possible paths. - if (noIntervalRanges && path.index.Unique) || len(path.ranges) == 0 { + if (noIntervalRanges && path.Index.Unique) || len(path.Ranges) == 0 { ds.possibleAccessPaths[0] = path ds.possibleAccessPaths = ds.possibleAccessPaths[:1] break @@ -202,7 +217,7 @@ func (ds *DataSource) DeriveStats(childStats []*property.StatsInfo, selfSchema * // If there is an index path, we current do not consider `IndexMergePath`. needConsiderIndexMerge := true for i := 1; i < len(ds.possibleAccessPaths); i++ { - if len(ds.possibleAccessPaths[i].accessConds) != 0 { + if len(ds.possibleAccessPaths[i].AccessConds) != 0 { needConsiderIndexMerge = false break } @@ -233,15 +248,15 @@ func (ds *DataSource) generateAndPruneIndexMergePath() { ds.possibleAccessPaths = ds.possibleAccessPaths[regularPathCount:] } -// DeriveStats implement LogicalPlan DeriveStats interface. -func (ts *TableScan) DeriveStats(childStats []*property.StatsInfo, selfSchema *expression.Schema, childSchema []*expression.Schema) (_ *property.StatsInfo, err error) { +// DeriveStats implements LogicalPlan DeriveStats interface. +func (ts *LogicalTableScan) DeriveStats(childStats []*property.StatsInfo, selfSchema *expression.Schema, childSchema []*expression.Schema) (_ *property.StatsInfo, err error) { // PushDownNot here can convert query 'not (a != 1)' to 'a = 1'. for i, expr := range ts.AccessConds { // TODO The expressions may be shared by TableScan and several IndexScans, there would be redundant // `PushDownNot` function call in multiple `DeriveStats` then. - ts.AccessConds[i] = expression.PushDownNot(nil, expr) + ts.AccessConds[i] = expression.PushDownNot(ts.ctx, expr) } - ts.Source.deriveStatsByFilter(ts.AccessConds) + ts.stats = ts.Source.deriveStatsByFilter(ts.AccessConds, nil) sc := ts.SCtx().GetSessionVars().StmtCtx // ts.Handle could be nil if PK is Handle, and PK column has been pruned. if ts.Handle != nil { @@ -258,7 +273,30 @@ func (ts *TableScan) DeriveStats(childStats []*property.StatsInfo, selfSchema *e if err != nil { return nil, err } - return ts.Source.stats, nil + return ts.stats, nil +} + +// DeriveStats implements LogicalPlan DeriveStats interface. +func (is *LogicalIndexScan) DeriveStats(childStats []*property.StatsInfo, selfSchema *expression.Schema, childSchema []*expression.Schema) (*property.StatsInfo, error) { + for i, expr := range is.AccessConds { + is.AccessConds[i] = expression.PushDownNot(is.ctx, expr) + } + is.stats = is.Source.deriveStatsByFilter(is.AccessConds, nil) + if len(is.AccessConds) == 0 { + is.Ranges = ranger.FullRange() + } + // TODO: If the AccessConds is not empty, we have set the range when push down the selection. + + is.idxCols, is.idxColLens = expression.IndexInfo2PrefixCols(is.Columns, is.schema.Columns, is.Index) + is.fullIdxCols, is.fullIdxColLens = expression.IndexInfo2Cols(is.Columns, is.schema.Columns, is.Index) + if !is.Index.Unique && !is.Index.Primary && len(is.Index.Columns) == len(is.idxCols) { + handleCol := is.getPKIsHandleCol() + if handleCol != nil && !mysql.HasUnsignedFlag(handleCol.RetType.Flag) { + is.idxCols = append(is.idxCols, handleCol) + is.idxColLens = append(is.idxColLens, types.UnspecifiedLength) + } + } + return is.stats, nil } // getIndexMergeOrPath generates all possible IndexMergeOrPaths. @@ -269,7 +307,7 @@ func (ds *DataSource) generateIndexMergeOrPaths() { if !ok || sf.FuncName.L != ast.LogicOr { continue } - var partialPaths = make([]*accessPath, 0, usedIndexCount) + var partialPaths = make([]*util.AccessPath, 0, usedIndexCount) dnfItems := expression.FlattenDNFConditions(sf) for _, item := range dnfItems { cnfItems := expression.SplitCNFItems(item) @@ -311,49 +349,50 @@ func (ds *DataSource) isInIndexMergeHints(name string) bool { } // accessPathsForConds generates all possible index paths for conditions. -func (ds *DataSource) accessPathsForConds(conditions []expression.Expression, usedIndexCount int) []*accessPath { - var results = make([]*accessPath, 0, usedIndexCount) +func (ds *DataSource) accessPathsForConds(conditions []expression.Expression, usedIndexCount int) []*util.AccessPath { + var results = make([]*util.AccessPath, 0, usedIndexCount) for i := 0; i < usedIndexCount; i++ { - path := &accessPath{} - if ds.possibleAccessPaths[i].isTablePath { + path := &util.AccessPath{} + if ds.possibleAccessPaths[i].IsTablePath { if !ds.isInIndexMergeHints("primary") { continue } - path.isTablePath = true + path.IsTablePath = true noIntervalRanges, err := ds.deriveTablePathStats(path, conditions, true) if err != nil { logutil.BgLogger().Debug("can not derive statistics of a path", zap.Error(err)) continue } // If we have point or empty range, just remove other possible paths. - if noIntervalRanges || len(path.ranges) == 0 { + if noIntervalRanges || len(path.Ranges) == 0 { results[0] = path results = results[:1] break } } else { - path.index = ds.possibleAccessPaths[i].index - if !ds.isInIndexMergeHints(path.index.Name.L) { + path.Index = ds.possibleAccessPaths[i].Index + if !ds.isInIndexMergeHints(path.Index.Name.L) { continue } - noIntervalRanges, err := ds.deriveIndexPathStats(path, conditions, true) + err := ds.fillIndexPath(path, conditions) if err != nil { logutil.BgLogger().Debug("can not derive statistics of a path", zap.Error(err)) continue } + noIntervalRanges := ds.deriveIndexPathStats(path, conditions, true) // If we have empty range, or point range on unique index, just remove other possible paths. - if (noIntervalRanges && path.index.Unique) || len(path.ranges) == 0 { + if (noIntervalRanges && path.Index.Unique) || len(path.Ranges) == 0 { results[0] = path results = results[:1] break } } - // If accessConds is empty or tableFilter is not empty, we ignore the access path. + // If AccessConds is empty or tableFilter is not empty, we ignore the access path. // Now these conditions are too strict. // For example, a sql `select * from t where a > 1 or (b < 2 and c > 3)` and table `t` with indexes // on a and b separately. we can generate a `IndexMergePath` with table filter `a > 1 or (b < 2 and c > 3)`. // TODO: solve the above case - if len(path.tableFilters) > 0 || len(path.accessConds) == 0 { + if len(path.TableFilters) > 0 || len(path.AccessConds) == 0 { continue } results = append(results, path) @@ -367,15 +406,15 @@ func (ds *DataSource) accessPathsForConds(conditions []expression.Expression, us // with most columns, e.g, filter is c > 1 and the input indexes are c and c_d_e, // the former one is enough, and it is less expensive in execution compared with the latter one. // TODO: improve strategy of the partial path selection -func (ds *DataSource) buildIndexMergePartialPath(indexAccessPaths []*accessPath) *accessPath { +func (ds *DataSource) buildIndexMergePartialPath(indexAccessPaths []*util.AccessPath) *util.AccessPath { if len(indexAccessPaths) == 1 { return indexAccessPaths[0] } maxColsIndex := 0 - maxCols := len(indexAccessPaths[0].idxCols) + maxCols := len(indexAccessPaths[0].IdxCols) for i := 1; i < len(indexAccessPaths); i++ { - current := len(indexAccessPaths[i].idxCols) + current := len(indexAccessPaths[i].IdxCols) if current > maxCols { maxColsIndex = i maxCols = current @@ -385,10 +424,10 @@ func (ds *DataSource) buildIndexMergePartialPath(indexAccessPaths []*accessPath) } // buildIndexMergeOrPath generates one possible IndexMergePath. -func (ds *DataSource) buildIndexMergeOrPath(partialPaths []*accessPath, current int) *accessPath { - indexMergePath := &accessPath{partialIndexPaths: partialPaths} - indexMergePath.tableFilters = append(indexMergePath.tableFilters, ds.pushedDownConds[:current]...) - indexMergePath.tableFilters = append(indexMergePath.tableFilters, ds.pushedDownConds[current+1:]...) +func (ds *DataSource) buildIndexMergeOrPath(partialPaths []*util.AccessPath, current int) *util.AccessPath { + indexMergePath := &util.AccessPath{PartialIndexPaths: partialPaths} + indexMergePath.TableFilters = append(indexMergePath.TableFilters, ds.pushedDownConds[:current]...) + indexMergePath.TableFilters = append(indexMergePath.TableFilters, ds.pushedDownConds[current+1:]...) return indexMergePath } diff --git a/planner/core/task.go b/planner/core/task.go index 6becdc2b2e500..b280a09640b13 100644 --- a/planner/core/task.go +++ b/planner/core/task.go @@ -19,12 +19,14 @@ import ( "github.com/pingcap/parser/ast" "github.com/pingcap/parser/charset" "github.com/pingcap/parser/mysql" + "github.com/pingcap/tidb/config" "github.com/pingcap/tidb/expression" "github.com/pingcap/tidb/expression/aggregation" "github.com/pingcap/tidb/kv" "github.com/pingcap/tidb/sessionctx" "github.com/pingcap/tidb/statistics" "github.com/pingcap/tidb/types" + "github.com/pingcap/tidb/util/chunk" "github.com/pingcap/tidb/util/plancodec" ) @@ -140,6 +142,20 @@ func (t *copTask) finishIndexPlan() { t.cst += cnt * rowSize * sessVars.ScanFactor } +func (t *copTask) getStoreType() kv.StoreType { + if t.tablePlan == nil { + return kv.TiKV + } + tp := t.tablePlan + for len(tp.Children()) > 0 { + tp = tp.Children()[0] + } + if ts, ok := tp.(*PhysicalTableScan); ok { + return ts.StoreType + } + return kv.TiKV +} + func (p *basePhysicalPlan) attach2Task(tasks ...task) task { t := finishCopTask(p.ctx, tasks[0].copy()) return attachPlan2Task(p.self, t) @@ -390,17 +406,38 @@ func (p *PhysicalIndexJoin) GetCost(outerTask, innerTask task) float64 { return outerTask.cost() + innerPlanCost + cpuCost + memoryCost } +func (p *PhysicalHashJoin) avgRowSize(inner PhysicalPlan) (size float64) { + padChar := p.ctx.GetSessionVars().StmtCtx.PadCharToFullLength + if inner.statsInfo().HistColl != nil { + size = inner.statsInfo().HistColl.GetAvgRowSizeListInDisk(inner.Schema().Columns, padChar) + } else { + // Estimate using just the type info. + cols := inner.Schema().Columns + for _, col := range cols { + size += float64(chunk.EstimateTypeWidth(padChar, col.GetType())) + } + } + return +} + // GetCost computes cost of hash join operator itself. func (p *PhysicalHashJoin) GetCost(lCnt, rCnt float64) float64 { - innerCnt, outerCnt := lCnt, rCnt + buildCnt, probeCnt := lCnt, rCnt + build := p.children[0] // Taking the right as the inner for right join or using the outer to build a hash table. if (p.InnerChildIdx == 1 && !p.UseOuterToBuild) || (p.InnerChildIdx == 0 && p.UseOuterToBuild) { - innerCnt, outerCnt = rCnt, lCnt + buildCnt, probeCnt = rCnt, lCnt + build = p.children[1] } sessVars := p.ctx.GetSessionVars() + oomUseTmpStorage := config.GetGlobalConfig().OOMUseTmpStorage + memQuota := sessVars.StmtCtx.MemTracker.GetBytesLimit() // sessVars.MemQuotaQuery && hint + rowSize := p.avgRowSize(build) + spill := oomUseTmpStorage && memQuota > 0 && rowSize*buildCnt > float64(memQuota) // Cost of building hash table. - cpuCost := innerCnt * sessVars.CPUFactor - memoryCost := innerCnt * sessVars.MemoryFactor + cpuCost := buildCnt * sessVars.CPUFactor + memoryCost := buildCnt * sessVars.MemoryFactor + diskCost := buildCnt * sessVars.DiskFactor * rowSize // Number of matched row pairs regarding the equal join conditions. helper := &fullJoinRowCountHelper{ cartesian: false, @@ -428,23 +465,38 @@ func (p *PhysicalHashJoin) GetCost(lCnt, rCnt float64) float64 { numPairs = 0 } } - // Cost of quering hash table is cheap actually, so we just compute the cost of + // Cost of querying hash table is cheap actually, so we just compute the cost of // evaluating `OtherConditions` and joining row pairs. probeCost := numPairs * sessVars.CPUFactor + probeDiskCost := numPairs * sessVars.DiskFactor * rowSize // Cost of evaluating outer filter. if len(p.LeftConditions)+len(p.RightConditions) > 0 { // Input outer count for the above compution should be adjusted by selectionFactor. probeCost *= selectionFactor - probeCost += outerCnt * sessVars.CPUFactor + probeDiskCost *= selectionFactor + probeCost += probeCnt * sessVars.CPUFactor } + diskCost += probeDiskCost probeCost /= float64(p.Concurrency) // Cost of additional concurrent goroutines. cpuCost += probeCost + float64(p.Concurrency+1)*sessVars.ConcurrencyFactor // Cost of traveling the hash table to resolve missing matched cases when building the hash table from the outer table if p.UseOuterToBuild { - cpuCost += innerCnt * sessVars.CPUFactor / float64(p.Concurrency) + if spill { + // It runs in sequence when build data is on disk. See handleUnmatchedRowsFromHashTableInDisk + cpuCost += buildCnt * sessVars.CPUFactor + } else { + cpuCost += buildCnt * sessVars.CPUFactor / float64(p.Concurrency) + } + diskCost += buildCnt * sessVars.DiskFactor * rowSize } - return cpuCost + memoryCost + + if spill { + memoryCost *= float64(memQuota) / (rowSize * buildCnt) + } else { + diskCost = 0 + } + return cpuCost + memoryCost + diskCost } func (p *PhysicalHashJoin) attach2Task(tasks ...task) task { @@ -968,14 +1020,24 @@ func BuildFinalModeAggregation( return } -func (p *basePhysicalAgg) newPartialAggregate(copToFlash bool) (partial, final PhysicalPlan) { +func (p *basePhysicalAgg) newPartialAggregate(copTaskType kv.StoreType) (partial, final PhysicalPlan) { // Check if this aggregation can push down. - if !CheckAggCanPushCop(p.ctx, p.AggFuncs, p.GroupByItems, copToFlash) { + if !CheckAggCanPushCop(p.ctx, p.AggFuncs, p.GroupByItems, copTaskType == kv.TiFlash) { return nil, p.self } finalAggFuncs, finalGbyItems, partialSchema := BuildFinalModeAggregation(p.ctx, p.AggFuncs, p.GroupByItems, p.schema) // Remove unnecessary FirstRow. p.AggFuncs = RemoveUnnecessaryFirstRow(p.ctx, finalAggFuncs, finalGbyItems, p.AggFuncs, p.GroupByItems, partialSchema) + if copTaskType == kv.TiDB { + // For partial agg of TiDB cop task, since TiDB coprocessor reuse the TiDB executor, + // and TiDB aggregation executor won't output the group by value, + // so we need add `firstrow` aggregation function to output the group by value. + aggFuncs, err := genFirstRowAggForGroupBy(p.ctx, p.GroupByItems) + if err != nil { + return nil, p.self + } + p.AggFuncs = append(p.AggFuncs, aggFuncs...) + } finalSchema := p.schema p.schema = partialSchema partialAgg := p.self @@ -997,6 +1059,18 @@ func (p *basePhysicalAgg) newPartialAggregate(copToFlash bool) (partial, final P return partialAgg, finalAgg } +func genFirstRowAggForGroupBy(ctx sessionctx.Context, groupByItems []expression.Expression) ([]*aggregation.AggFuncDesc, error) { + aggFuncs := make([]*aggregation.AggFuncDesc, 0, len(groupByItems)) + for _, groupBy := range groupByItems { + agg, err := aggregation.NewAggFuncDesc(ctx, ast.AggFuncFirstRow, []expression.Expression{groupBy}, false) + if err != nil { + return nil, err + } + aggFuncs = append(aggFuncs, agg) + } + return aggFuncs, nil +} + // RemoveUnnecessaryFirstRow removes unnecessary FirstRow of the aggregation. This function can be // used for both LogicalAggregation and PhysicalAggregation. // When the select column is same with the group by key, the column can be removed and gets value from the group by key. @@ -1047,8 +1121,8 @@ func (p *PhysicalStreamAgg) attach2Task(tasks ...task) task { // The `extraHandleCol` is added if the double read needs to keep order. So we just use it to decided // whether the following plan is double read with order reserved. if cop.extraHandleCol == nil { - copToFlash := isFlashCopTask(cop) - partialAgg, finalAgg := p.newPartialAggregate(copToFlash) + copTaskType := cop.getStoreType() + partialAgg, finalAgg := p.newPartialAggregate(copTaskType) if partialAgg != nil { if cop.tablePlan != nil { cop.finishIndexPlan() @@ -1108,27 +1182,12 @@ func (p *PhysicalHashAgg) cpuCostDivisor(hasDistinct bool) (float64, float64) { return math.Min(float64(finalCon), float64(partialCon)), float64(finalCon + partialCon) } -func isFlashCopTask(cop *copTask) bool { - if cop.tablePlan == nil { - return false - } - tp := cop.tablePlan - for len(tp.Children()) > 0 { - tp = tp.Children()[0] - } - if ts, ok := tp.(*PhysicalTableScan); ok { - return ts.StoreType == kv.TiFlash - } - return false -} - func (p *PhysicalHashAgg) attach2Task(tasks ...task) task { t := tasks[0].copy() inputRows := t.count() if cop, ok := t.(*copTask); ok { - // copToFlash means whether the cop task is running on flash storage - copToFlash := isFlashCopTask(cop) - partialAgg, finalAgg := p.newPartialAggregate(copToFlash) + copTaskType := cop.getStoreType() + partialAgg, finalAgg := p.newPartialAggregate(copTaskType) if partialAgg != nil { if cop.tablePlan != nil { cop.finishIndexPlan() diff --git a/planner/core/testdata/plan_normalized_suite_in.json b/planner/core/testdata/plan_normalized_suite_in.json new file mode 100644 index 0000000000000..3753c984adb00 --- /dev/null +++ b/planner/core/testdata/plan_normalized_suite_in.json @@ -0,0 +1,27 @@ +[ + { + "name": "TestNormalizedPlan", + "cases": [ + "select * from t1;", + "select * from t1 where a<1;", + "select * from t1 where a>1", + "select * from t1 where a=1", + "select * from t1 where a in (1,2,3)", + "select * from t1 where b=1", + "select a+1,b+2 from t1 use index(b) where b=3", + "select * from t1 where t1.b > 1 and t1.a in (select sum(t2.b) from t2 where t2.a=t1.a and t2.b is not null)", + "SELECT * from t1 where a!=1 order by c limit 1", + "SELECT /*+ TIDB_SMJ(t1, t2) */ * from t1, t2 where t1.a = t2.a and t1.c>1;", + "SELECT /*+ TIDB_INLJ(t1, t2) */ * from t1, t2 where t1.a = t2.a and t1.c>1;", + "SELECT /*+ TIDB_HJ(t1, t2) */ * from t1, t2 where t1.a = t2.a and t1.c>1;", + "SELECT /*+ TIDB_HJ(t1, t2) */ * from t1, t2 where t1.a = t2.a and t1.c>1;", + "SELECT /*+ TIDB_INLJ(t1, t2) */ * from t1, t2 where t1.a = t2.a and t1.c>1;", + "insert into t1 values (1,1,1)", + "insert into t1 select * from t2 where t2.a>0 and t2.b!=0", + "update t1 set a=a+1", + "update t1 set a=a+1 where a>0", + "delete from t1", + "delete from t1 where a>0 and b=1 and c!=2" + ] + } +] diff --git a/planner/core/testdata/plan_normalized_suite_out.json b/planner/core/testdata/plan_normalized_suite_out.json new file mode 100644 index 0000000000000..23ba1d16b6863 --- /dev/null +++ b/planner/core/testdata/plan_normalized_suite_out.json @@ -0,0 +1,180 @@ +[ + { + "Name": "TestNormalizedPlan", + "Cases": [ + { + "SQL": "select * from t1;", + "Plan": [ + " TableReader_5 root data:TableScan_4", + " └─TableScan_4 cop table:t1, range:[?,?], keep order:false" + ] + }, + { + "SQL": "select * from t1 where a<1;", + "Plan": [ + " TableReader_6 root data:TableScan_5", + " └─TableScan_5 cop table:t1, range:[?,?], keep order:false" + ] + }, + { + "SQL": "select * from t1 where a>1", + "Plan": [ + " TableReader_6 root data:TableScan_5", + " └─TableScan_5 cop table:t1, range:[?,?], keep order:false" + ] + }, + { + "SQL": "select * from t1 where a=1", + "Plan": [ + " Point_Get_1 root table:t1, handle:?" + ] + }, + { + "SQL": "select * from t1 where a in (1,2,3)", + "Plan": [ + " Batch_Point_Get_1 root table:t1" + ] + }, + { + "SQL": "select * from t1 where b=1", + "Plan": [ + " IndexLookUp_10 root ", + " ├─IndexScan_8 cop table:t1, index:b, range:[?,?], keep order:false", + " └─TableScan_9 cop table:t1, keep order:false" + ] + }, + { + "SQL": "select a+1,b+2 from t1 use index(b) where b=3", + "Plan": [ + " Projection_4 root plus(test.t1.a, ?), plus(test.t1.b, ?)", + " └─IndexReader_6 root index:IndexScan_5", + " └─IndexScan_5 cop table:t1, index:b, range:[?,?], keep order:false" + ] + }, + { + "SQL": "select * from t1 where t1.b > 1 and t1.a in (select sum(t2.b) from t2 where t2.a=t1.a and t2.b is not null)", + "Plan": [ + " Projection_10 root test.t1.a, test.t1.b, test.t1.c", + " └─Apply_12 root semi join, inner:StreamAgg_34, equal:eq(Column#8, Column#7)", + " ├─Projection_13 root cast(test.t1.a), test.t1.a, test.t1.b, test.t1.c", + " │ └─TableReader_16 root data:Selection_15", + " │ └─Selection_15 cop gt(test.t1.b, ?)", + " │ └─TableScan_14 cop table:t1, range:[?,?], keep order:false", + " └─StreamAgg_34 root funcs:sum(Column#11)->Column#7", + " └─TableReader_35 root data:StreamAgg_23", + " └─StreamAgg_23 cop funcs:sum(test.t2.b)->Column#11", + " └─Selection_33 cop not(isnull(test.t2.b))", + " └─TableScan_32 cop table:t2, range: decided by eq(test.t2.a, test.t1.a), keep order:false" + ] + }, + { + "SQL": "SELECT * from t1 where a!=1 order by c limit 1", + "Plan": [ + " TopN_8 root test.t1.c:asc", + " └─TableReader_16 root data:TopN_15", + " └─TopN_15 cop test.t1.c:asc", + " └─TableScan_14 cop table:t1, range:[?,?], keep order:false" + ] + }, + { + "SQL": "SELECT /*+ TIDB_SMJ(t1, t2) */ * from t1, t2 where t1.a = t2.a and t1.c>1;", + "Plan": [ + " MergeJoin_7 root inner join, left key:test.t1.a, right key:test.t2.a", + " ├─TableReader_11 root data:Selection_10", + " │ └─Selection_10 cop gt(test.t1.c, ?)", + " │ └─TableScan_9 cop table:t1, range:[?,?], keep order:true", + " └─TableReader_13 root data:TableScan_12", + " └─TableScan_12 cop table:t2, range:[?,?], keep order:true" + ] + }, + { + "SQL": "SELECT /*+ TIDB_INLJ(t1, t2) */ * from t1, t2 where t1.a = t2.a and t1.c>1;", + "Plan": [ + " IndexJoin_10 root inner join, inner:TableReader_9, outer key:test.t1.a, inner key:test.t2.a", + " ├─TableReader_19 root data:Selection_18", + " │ └─Selection_18 cop gt(test.t1.c, ?)", + " │ └─TableScan_17 cop table:t1, range:[?,?], keep order:false", + " └─TableReader_9 root data:TableScan_8", + " └─TableScan_8 cop table:t2, range: decided by [test.t1.a], keep order:false" + ] + }, + { + "SQL": "SELECT /*+ TIDB_HJ(t1, t2) */ * from t1, t2 where t1.a = t2.a and t1.c>1;", + "Plan": [ + " HashRightJoin_18 root inner join, inner:TableReader_21, equal:eq(test.t1.a, test.t2.a)", + " ├─TableReader_21 root data:Selection_20", + " │ └─Selection_20 cop gt(test.t1.c, ?)", + " │ └─TableScan_19 cop table:t1, range:[?,?], keep order:false", + " └─TableReader_23 root data:TableScan_22", + " └─TableScan_22 cop table:t2, range:[?,?], keep order:false" + ] + }, + { + "SQL": "SELECT /*+ TIDB_HJ(t1, t2) */ * from t1, t2 where t1.a = t2.a and t1.c>1;", + "Plan": [ + " HashRightJoin_18 root inner join, inner:TableReader_21, equal:eq(test.t1.a, test.t2.a)", + " ├─TableReader_21 root data:Selection_20", + " │ └─Selection_20 cop gt(test.t1.c, ?)", + " │ └─TableScan_19 cop table:t1, range:[?,?], keep order:false", + " └─TableReader_23 root data:TableScan_22", + " └─TableScan_22 cop table:t2, range:[?,?], keep order:false" + ] + }, + { + "SQL": "SELECT /*+ TIDB_INLJ(t1, t2) */ * from t1, t2 where t1.a = t2.a and t1.c>1;", + "Plan": [ + " IndexJoin_10 root inner join, inner:TableReader_9, outer key:test.t1.a, inner key:test.t2.a", + " ├─TableReader_19 root data:Selection_18", + " │ └─Selection_18 cop gt(test.t1.c, ?)", + " │ └─TableScan_17 cop table:t1, range:[?,?], keep order:false", + " └─TableReader_9 root data:TableScan_8", + " └─TableScan_8 cop table:t2, range: decided by [test.t1.a], keep order:false" + ] + }, + { + "SQL": "insert into t1 values (1,1,1)", + "Plan": [ + "" + ] + }, + { + "SQL": "insert into t1 select * from t2 where t2.a>0 and t2.b!=0", + "Plan": [ + " TableReader_9 root data:Selection_8", + " └─Selection_8 cop ne(test.t2.b, ?)", + " └─TableScan_7 cop table:t2, range:[?,?], keep order:false" + ] + }, + { + "SQL": "update t1 set a=a+1", + "Plan": [ + " TableReader_6 root data:TableScan_5", + " └─TableScan_5 cop table:t1, range:[?,?], keep order:false" + ] + }, + { + "SQL": "update t1 set a=a+1 where a>0", + "Plan": [ + " TableReader_7 root data:TableScan_6", + " └─TableScan_6 cop table:t1, range:[?,?], keep order:false" + ] + }, + { + "SQL": "delete from t1", + "Plan": [ + " TableReader_6 root data:TableScan_5", + " └─TableScan_5 cop table:t1, range:[?,?], keep order:false" + ] + }, + { + "SQL": "delete from t1 where a>0 and b=1 and c!=2", + "Plan": [ + " IndexLookUp_12 root ", + " ├─IndexScan_9 cop table:t1, index:b, range:[?,?], keep order:false", + " └─Selection_11 cop ne(test.t1.c, ?)", + " └─TableScan_10 cop table:t1, keep order:false" + ] + } + ] + } +] diff --git a/planner/implementation/datasource.go b/planner/implementation/datasource.go index a5fea2af32409..783d78d3fcbc6 100644 --- a/planner/implementation/datasource.go +++ b/planner/implementation/datasource.go @@ -74,7 +74,7 @@ func (impl *TableReaderImpl) ScaleCostLimit(costLimit float64) float64 { sessVars := reader.SCtx().GetSessionVars() copIterWorkers := float64(sessVars.DistSQLScanConcurrency) if math.MaxFloat64/copIterWorkers < costLimit { - return costLimit + return math.MaxFloat64 } return costLimit * copIterWorkers } @@ -108,3 +108,66 @@ func (impl *TableScanImpl) CalcCost(outCount float64, children ...memo.Implement } return impl.cost } + +// IndexReaderImpl is the implementation of PhysicalIndexReader. +type IndexReaderImpl struct { + baseImpl + tblColHists *statistics.HistColl +} + +// ScaleCostLimit implements Implementation interface. +func (impl *IndexReaderImpl) ScaleCostLimit(costLimit float64) float64 { + reader := impl.plan.(*plannercore.PhysicalIndexReader) + sessVars := reader.SCtx().GetSessionVars() + copIterWorkers := float64(sessVars.DistSQLScanConcurrency) + if math.MaxFloat64/copIterWorkers < costLimit { + return math.MaxFloat64 + } + return costLimit * copIterWorkers +} + +// CalcCost implements Implementation interface. +func (impl *IndexReaderImpl) CalcCost(outCount float64, children ...memo.Implementation) float64 { + reader := impl.plan.(*plannercore.PhysicalIndexReader) + sessVars := reader.SCtx().GetSessionVars() + networkCost := outCount * sessVars.NetworkFactor * impl.tblColHists.GetAvgRowSize(children[0].GetPlan().Schema().Columns, true) + copIterWorkers := float64(sessVars.DistSQLScanConcurrency) + impl.cost = (networkCost + children[0].GetCost()) / copIterWorkers + return impl.cost +} + +// NewIndexReaderImpl creates a new IndexReader Implementation. +func NewIndexReaderImpl(reader *plannercore.PhysicalIndexReader, tblColHists *statistics.HistColl) *IndexReaderImpl { + return &IndexReaderImpl{ + baseImpl: baseImpl{plan: reader}, + tblColHists: tblColHists, + } +} + +// IndexScanImpl is the Implementation of PhysicalIndexScan. +type IndexScanImpl struct { + baseImpl + tblColHists *statistics.HistColl +} + +// CalcCost implements Implementation interface. +func (impl *IndexScanImpl) CalcCost(outCount float64, children ...memo.Implementation) float64 { + is := impl.plan.(*plannercore.PhysicalIndexScan) + sessVars := is.SCtx().GetSessionVars() + rowSize := impl.tblColHists.GetIndexAvgRowSize(is.Schema().Columns, is.Index.Unique) + cost := outCount * rowSize * sessVars.ScanFactor + if is.Desc { + cost = outCount * rowSize * sessVars.DescScanFactor + } + cost += float64(len(is.Ranges)) * sessVars.SeekFactor + impl.cost = cost + return impl.cost +} + +// NewIndexScanImpl creates a new IndexScan Implementation. +func NewIndexScanImpl(scan *plannercore.PhysicalIndexScan, tblColHists *statistics.HistColl) *IndexScanImpl { + return &IndexScanImpl{ + baseImpl: baseImpl{plan: scan}, + tblColHists: tblColHists, + } +} diff --git a/planner/implementation/simple_plans.go b/planner/implementation/simple_plans.go index 2fd2d7f8c773a..7d53d2ea5d839 100644 --- a/planner/implementation/simple_plans.go +++ b/planner/implementation/simple_plans.go @@ -144,3 +144,28 @@ func (impl *TiDBTopNImpl) CalcCost(outCount float64, children ...memo.Implementa func NewTiDBTopNImpl(topN *plannercore.PhysicalTopN) *TiDBTopNImpl { return &TiDBTopNImpl{baseImpl{plan: topN}} } + +// UnionAllImpl is the implementation of PhysicalUnionAll. +type UnionAllImpl struct { + baseImpl +} + +// CalcCost implements Implementation CalcCost interface. +func (impl *UnionAllImpl) CalcCost(outCount float64, children ...memo.Implementation) float64 { + var childMaxCost float64 + for _, child := range children { + childCost := child.GetCost() + if childCost > childMaxCost { + childMaxCost = childCost + } + } + selfCost := float64(1+len(children)) * impl.plan.SCtx().GetSessionVars().ConcurrencyFactor + // Children of UnionAll are executed in parallel. + impl.cost = selfCost + childMaxCost + return selfCost +} + +// NewUnionAllImpl creates a new UnionAllImpl. +func NewUnionAllImpl(union *plannercore.PhysicalUnionAll) *UnionAllImpl { + return &UnionAllImpl{baseImpl{plan: union}} +} diff --git a/planner/memo/expr_iterator_test.go b/planner/memo/expr_iterator_test.go index fceaa75f4580a..baf7f39fdaec8 100644 --- a/planner/memo/expr_iterator_test.go +++ b/planner/memo/expr_iterator_test.go @@ -19,12 +19,12 @@ import ( ) func (s *testMemoSuite) TestNewExprIterFromGroupElem(c *C) { - g0 := NewGroupWithSchema(NewGroupExpr(plannercore.LogicalSelection{}.Init(s.sctx, 0)), nil) + g0 := NewGroupWithSchema(NewGroupExpr(plannercore.LogicalSelection{}.Init(s.sctx, 0)), s.schema) g0.Insert(NewGroupExpr(plannercore.LogicalLimit{}.Init(s.sctx, 0))) g0.Insert(NewGroupExpr(plannercore.LogicalProjection{}.Init(s.sctx, 0))) g0.Insert(NewGroupExpr(plannercore.LogicalLimit{}.Init(s.sctx, 0))) - g1 := NewGroupWithSchema(NewGroupExpr(plannercore.LogicalSelection{}.Init(s.sctx, 0)), nil) + g1 := NewGroupWithSchema(NewGroupExpr(plannercore.LogicalSelection{}.Init(s.sctx, 0)), s.schema) g1.Insert(NewGroupExpr(plannercore.LogicalLimit{}.Init(s.sctx, 0))) g1.Insert(NewGroupExpr(plannercore.LogicalProjection{}.Init(s.sctx, 0))) g1.Insert(NewGroupExpr(plannercore.LogicalLimit{}.Init(s.sctx, 0))) @@ -32,7 +32,7 @@ func (s *testMemoSuite) TestNewExprIterFromGroupElem(c *C) { expr := NewGroupExpr(plannercore.LogicalJoin{}.Init(s.sctx, 0)) expr.Children = append(expr.Children, g0) expr.Children = append(expr.Children, g1) - g2 := NewGroupWithSchema(expr, nil) + g2 := NewGroupWithSchema(expr, s.schema) pattern := BuildPattern(OperandJoin, EngineAll, BuildPattern(OperandProjection, EngineAll), BuildPattern(OperandSelection, EngineAll)) iter := NewExprIterFromGroupElem(g2.Equivalents.Front(), pattern) @@ -58,13 +58,13 @@ func (s *testMemoSuite) TestNewExprIterFromGroupElem(c *C) { } func (s *testMemoSuite) TestExprIterNext(c *C) { - g0 := NewGroupWithSchema(NewGroupExpr(plannercore.LogicalProjection{}.Init(s.sctx, 0)), nil) + g0 := NewGroupWithSchema(NewGroupExpr(plannercore.LogicalProjection{}.Init(s.sctx, 0)), s.schema) g0.Insert(NewGroupExpr(plannercore.LogicalLimit{}.Init(s.sctx, 0))) g0.Insert(NewGroupExpr(plannercore.LogicalProjection{}.Init(s.sctx, 0))) g0.Insert(NewGroupExpr(plannercore.LogicalLimit{}.Init(s.sctx, 0))) g0.Insert(NewGroupExpr(plannercore.LogicalProjection{}.Init(s.sctx, 0))) - g1 := NewGroupWithSchema(NewGroupExpr(plannercore.LogicalSelection{}.Init(s.sctx, 0)), nil) + g1 := NewGroupWithSchema(NewGroupExpr(plannercore.LogicalSelection{}.Init(s.sctx, 0)), s.schema) g1.Insert(NewGroupExpr(plannercore.LogicalLimit{}.Init(s.sctx, 0))) g1.Insert(NewGroupExpr(plannercore.LogicalSelection{}.Init(s.sctx, 0))) g1.Insert(NewGroupExpr(plannercore.LogicalLimit{}.Init(s.sctx, 0))) @@ -73,7 +73,7 @@ func (s *testMemoSuite) TestExprIterNext(c *C) { expr := NewGroupExpr(plannercore.LogicalJoin{}.Init(s.sctx, 0)) expr.Children = append(expr.Children, g0) expr.Children = append(expr.Children, g1) - g2 := NewGroupWithSchema(expr, nil) + g2 := NewGroupWithSchema(expr, s.schema) pattern := BuildPattern(OperandJoin, EngineAll, BuildPattern(OperandProjection, EngineAll), BuildPattern(OperandSelection, EngineAll)) iter := NewExprIterFromGroupElem(g2.Equivalents.Front(), pattern) @@ -102,7 +102,7 @@ func (s *testMemoSuite) TestExprIterNext(c *C) { } func (s *testMemoSuite) TestExprIterReset(c *C) { - g0 := NewGroupWithSchema(NewGroupExpr(plannercore.LogicalProjection{}.Init(s.sctx, 0)), nil) + g0 := NewGroupWithSchema(NewGroupExpr(plannercore.LogicalProjection{}.Init(s.sctx, 0)), s.schema) g0.Insert(NewGroupExpr(plannercore.LogicalLimit{}.Init(s.sctx, 0))) g0.Insert(NewGroupExpr(plannercore.LogicalProjection{}.Init(s.sctx, 0))) g0.Insert(NewGroupExpr(plannercore.LogicalLimit{}.Init(s.sctx, 0))) @@ -111,13 +111,13 @@ func (s *testMemoSuite) TestExprIterReset(c *C) { sel1 := NewGroupExpr(plannercore.LogicalSelection{}.Init(s.sctx, 0)) sel2 := NewGroupExpr(plannercore.LogicalSelection{}.Init(s.sctx, 0)) sel3 := NewGroupExpr(plannercore.LogicalSelection{}.Init(s.sctx, 0)) - g1 := NewGroupWithSchema(sel1, nil) + g1 := NewGroupWithSchema(sel1, s.schema) g1.Insert(NewGroupExpr(plannercore.LogicalLimit{}.Init(s.sctx, 0))) g1.Insert(sel2) g1.Insert(NewGroupExpr(plannercore.LogicalLimit{}.Init(s.sctx, 0))) g1.Insert(sel3) - g2 := NewGroupWithSchema(NewGroupExpr(plannercore.LogicalSelection{}.Init(s.sctx, 0)), nil) + g2 := NewGroupWithSchema(NewGroupExpr(plannercore.LogicalSelection{}.Init(s.sctx, 0)), s.schema) g2.Insert(NewGroupExpr(plannercore.LogicalLimit{}.Init(s.sctx, 0))) g2.Insert(NewGroupExpr(plannercore.LogicalSelection{}.Init(s.sctx, 0))) g2.Insert(NewGroupExpr(plannercore.LogicalLimit{}.Init(s.sctx, 0))) @@ -127,7 +127,7 @@ func (s *testMemoSuite) TestExprIterReset(c *C) { expr := NewGroupExpr(plannercore.LogicalJoin{}.Init(s.sctx, 0)) expr.Children = append(expr.Children, g0) expr.Children = append(expr.Children, g1) - g3 := NewGroupWithSchema(expr, nil) + g3 := NewGroupWithSchema(expr, s.schema) // link sel 1~3 with Group 2 sel1.Children = append(sel1.Children, g2) @@ -185,34 +185,34 @@ func countMatchedIter(group *Group, pattern *Pattern) int { } func (s *testMemoSuite) TestExprIterWithEngineType(c *C) { - g1 := NewGroupWithSchema(NewGroupExpr(plannercore.LogicalSelection{}.Init(s.sctx, 0)), nil).SetEngineType(EngineTiFlash) + g1 := NewGroupWithSchema(NewGroupExpr(plannercore.LogicalSelection{}.Init(s.sctx, 0)), s.schema).SetEngineType(EngineTiFlash) g1.Insert(NewGroupExpr(plannercore.LogicalLimit{}.Init(s.sctx, 0))) g1.Insert(NewGroupExpr(plannercore.LogicalProjection{}.Init(s.sctx, 0))) g1.Insert(NewGroupExpr(plannercore.LogicalLimit{}.Init(s.sctx, 0))) - g2 := NewGroupWithSchema(NewGroupExpr(plannercore.LogicalSelection{}.Init(s.sctx, 0)), nil).SetEngineType(EngineTiKV) + g2 := NewGroupWithSchema(NewGroupExpr(plannercore.LogicalSelection{}.Init(s.sctx, 0)), s.schema).SetEngineType(EngineTiKV) g2.Insert(NewGroupExpr(plannercore.LogicalLimit{}.Init(s.sctx, 0))) g2.Insert(NewGroupExpr(plannercore.LogicalProjection{}.Init(s.sctx, 0))) g2.Insert(NewGroupExpr(plannercore.LogicalLimit{}.Init(s.sctx, 0))) - flashGather := NewGroupExpr(plannercore.TableGather{}.Init(s.sctx, 0)) + flashGather := NewGroupExpr(plannercore.TiKVSingleGather{}.Init(s.sctx, 0)) flashGather.Children = append(flashGather.Children, g1) - g3 := NewGroupWithSchema(flashGather, nil).SetEngineType(EngineTiDB) + g3 := NewGroupWithSchema(flashGather, s.schema).SetEngineType(EngineTiDB) - tikvGather := NewGroupExpr(plannercore.TableGather{}.Init(s.sctx, 0)) + tikvGather := NewGroupExpr(plannercore.TiKVSingleGather{}.Init(s.sctx, 0)) tikvGather.Children = append(tikvGather.Children, g2) g3.Insert(tikvGather) join := NewGroupExpr(plannercore.LogicalJoin{}.Init(s.sctx, 0)) join.Children = append(join.Children, g3, g3) - g4 := NewGroupWithSchema(join, nil).SetEngineType(EngineTiDB) + g4 := NewGroupWithSchema(join, s.schema).SetEngineType(EngineTiDB) // The Groups look like this: // Group 4 // Join input:[Group3, Group3] // Group 3 - // TableGather input:[Group2] EngineTiKV - // TableGather input:[Group1] EngineTiFlash + // TiKVSingleGather input:[Group2] EngineTiKV + // TiKVSingleGather input:[Group1] EngineTiFlash // Group 2 // Selection // Projection @@ -224,36 +224,36 @@ func (s *testMemoSuite) TestExprIterWithEngineType(c *C) { // Limit // Limit - p0 := BuildPattern(OperandTableGather, EngineTiDBOnly, BuildPattern(OperandLimit, EngineTiKVOnly)) + p0 := BuildPattern(OperandTiKVSingleGather, EngineTiDBOnly, BuildPattern(OperandLimit, EngineTiKVOnly)) c.Assert(countMatchedIter(g3, p0), Equals, 2) - p1 := BuildPattern(OperandTableGather, EngineTiDBOnly, BuildPattern(OperandLimit, EngineTiFlashOnly)) + p1 := BuildPattern(OperandTiKVSingleGather, EngineTiDBOnly, BuildPattern(OperandLimit, EngineTiFlashOnly)) c.Assert(countMatchedIter(g3, p1), Equals, 2) - p2 := BuildPattern(OperandTableGather, EngineTiDBOnly, BuildPattern(OperandLimit, EngineTiKVOrTiFlash)) + p2 := BuildPattern(OperandTiKVSingleGather, EngineTiDBOnly, BuildPattern(OperandLimit, EngineTiKVOrTiFlash)) c.Assert(countMatchedIter(g3, p2), Equals, 4) - p3 := BuildPattern(OperandTableGather, EngineTiDBOnly, BuildPattern(OperandSelection, EngineTiFlashOnly)) + p3 := BuildPattern(OperandTiKVSingleGather, EngineTiDBOnly, BuildPattern(OperandSelection, EngineTiFlashOnly)) c.Assert(countMatchedIter(g3, p3), Equals, 1) - p4 := BuildPattern(OperandTableGather, EngineTiDBOnly, BuildPattern(OperandProjection, EngineTiKVOnly)) + p4 := BuildPattern(OperandTiKVSingleGather, EngineTiDBOnly, BuildPattern(OperandProjection, EngineTiKVOnly)) c.Assert(countMatchedIter(g3, p4), Equals, 1) p5 := BuildPattern( OperandJoin, EngineTiDBOnly, - BuildPattern(OperandTableGather, EngineTiDBOnly, BuildPattern(OperandLimit, EngineTiKVOnly)), - BuildPattern(OperandTableGather, EngineTiDBOnly, BuildPattern(OperandLimit, EngineTiKVOnly)), + BuildPattern(OperandTiKVSingleGather, EngineTiDBOnly, BuildPattern(OperandLimit, EngineTiKVOnly)), + BuildPattern(OperandTiKVSingleGather, EngineTiDBOnly, BuildPattern(OperandLimit, EngineTiKVOnly)), ) c.Assert(countMatchedIter(g4, p5), Equals, 4) p6 := BuildPattern( OperandJoin, EngineTiDBOnly, - BuildPattern(OperandTableGather, EngineTiDBOnly, BuildPattern(OperandLimit, EngineTiFlashOnly)), - BuildPattern(OperandTableGather, EngineTiDBOnly, BuildPattern(OperandLimit, EngineTiKVOnly)), + BuildPattern(OperandTiKVSingleGather, EngineTiDBOnly, BuildPattern(OperandLimit, EngineTiFlashOnly)), + BuildPattern(OperandTiKVSingleGather, EngineTiDBOnly, BuildPattern(OperandLimit, EngineTiKVOnly)), ) c.Assert(countMatchedIter(g4, p6), Equals, 4) p7 := BuildPattern( OperandJoin, EngineTiDBOnly, - BuildPattern(OperandTableGather, EngineTiDBOnly, BuildPattern(OperandLimit, EngineTiKVOrTiFlash)), - BuildPattern(OperandTableGather, EngineTiDBOnly, BuildPattern(OperandLimit, EngineTiKVOrTiFlash)), + BuildPattern(OperandTiKVSingleGather, EngineTiDBOnly, BuildPattern(OperandLimit, EngineTiKVOrTiFlash)), + BuildPattern(OperandTiKVSingleGather, EngineTiDBOnly, BuildPattern(OperandLimit, EngineTiKVOrTiFlash)), ) c.Assert(countMatchedIter(g4, p7), Equals, 16) @@ -263,8 +263,8 @@ func (s *testMemoSuite) TestExprIterWithEngineType(c *C) { p8 := BuildPattern( OperandJoin, EngineTiDBOnly, - BuildPattern(OperandTableGather, EngineTiDBOnly), - BuildPattern(OperandTableGather, EngineTiDBOnly), + BuildPattern(OperandTiKVSingleGather, EngineTiDBOnly), + BuildPattern(OperandTiKVSingleGather, EngineTiDBOnly), ) c.Assert(countMatchedIter(g4, p8), Equals, 4) } diff --git a/planner/memo/group.go b/planner/memo/group.go index 036254fd538f4..da73641dc38c0 100644 --- a/planner/memo/group.go +++ b/planner/memo/group.go @@ -18,6 +18,7 @@ import ( "fmt" "github.com/pingcap/tidb/expression" + plannercore "github.com/pingcap/tidb/planner/core" "github.com/pingcap/tidb/planner/property" ) @@ -85,11 +86,17 @@ type Group struct { Prop *property.LogicalProperty EngineType EngineType + + //hasBuiltKeyInfo indicates whether this group has called `BuildKeyInfo`. + // BuildKeyInfo is lazily called when a rule needs information of + // unique key or maxOneRow (in LogicalProp). For each Group, we only need + // to collect these information once. + hasBuiltKeyInfo bool } // NewGroupWithSchema creates a new Group with given schema. func NewGroupWithSchema(e *GroupExpr, s *expression.Schema) *Group { - prop := &property.LogicalProperty{Schema: s} + prop := &property.LogicalProperty{Schema: expression.NewSchema(s.Columns...)} g := &Group{ Equivalents: list.New(), Fingerprints: make(map[string]*list.Element), @@ -197,3 +204,46 @@ func (g *Group) InsertImpl(prop *property.PhysicalProperty, impl Implementation) key := prop.HashCode() g.ImplMap[string(key)] = impl } + +// Convert2GroupExpr converts a logical plan to a GroupExpr. +func Convert2GroupExpr(node plannercore.LogicalPlan) *GroupExpr { + e := NewGroupExpr(node) + e.Children = make([]*Group, 0, len(node.Children())) + for _, child := range node.Children() { + childGroup := Convert2Group(child) + e.Children = append(e.Children, childGroup) + } + return e +} + +// Convert2Group converts a logical plan to a Group. +func Convert2Group(node plannercore.LogicalPlan) *Group { + e := Convert2GroupExpr(node) + g := NewGroupWithSchema(e, node.Schema()) + // Stats property for `Group` would be computed after exploration phase. + return g +} + +// BuildKeyInfo recursively builds UniqueKey and MaxOneRow info in the LogicalProperty. +func (g *Group) BuildKeyInfo() { + if g.hasBuiltKeyInfo { + return + } + g.hasBuiltKeyInfo = true + + e := g.Equivalents.Front().Value.(*GroupExpr) + childSchema := make([]*expression.Schema, len(e.Children)) + childMaxOneRow := make([]bool, len(e.Children)) + for i := range e.Children { + e.Children[i].BuildKeyInfo() + childSchema[i] = e.Children[i].Prop.Schema + childMaxOneRow[i] = e.Children[i].Prop.MaxOneRow + } + if len(childSchema) == 1 { + // For UnaryPlan(such as Selection, Limit ...), we can set the child's unique key as its unique key. + // If the GroupExpr is a schemaProducer, schema.Keys will be reset below in `BuildKeyInfo()`. + g.Prop.Schema.Keys = childSchema[0].Keys + } + e.ExprNode.BuildKeyInfo(g.Prop.Schema, childSchema) + g.Prop.MaxOneRow = e.ExprNode.MaxOneRow() || plannercore.HasMaxOneRow(e.ExprNode, childMaxOneRow) +} diff --git a/planner/memo/group_test.go b/planner/memo/group_test.go index e77ea3e22844e..fb8ff8a60c274 100644 --- a/planner/memo/group_test.go +++ b/planner/memo/group_test.go @@ -14,6 +14,7 @@ package memo import ( + "context" "testing" . "github.com/pingcap/check" @@ -36,8 +37,9 @@ var _ = Suite(&testMemoSuite{}) type testMemoSuite struct { *parser.Parser - is infoschema.InfoSchema - sctx sessionctx.Context + is infoschema.InfoSchema + schema *expression.Schema + sctx sessionctx.Context } func (s *testMemoSuite) SetUpSuite(c *C) { @@ -45,6 +47,7 @@ func (s *testMemoSuite) SetUpSuite(c *C) { s.is = infoschema.MockInfoSchema([]*model.TableInfo{plannercore.MockSignedTable()}) s.sctx = plannercore.MockContext() s.Parser = parser.New() + s.schema = expression.NewSchema() } func (s *testMemoSuite) TearDownSuite(c *C) { @@ -54,7 +57,7 @@ func (s *testMemoSuite) TearDownSuite(c *C) { func (s *testMemoSuite) TestNewGroup(c *C) { p := &plannercore.LogicalLimit{} expr := NewGroupExpr(p) - g := NewGroupWithSchema(expr, nil) + g := NewGroupWithSchema(expr, s.schema) c.Assert(g.Equivalents.Len(), Equals, 1) c.Assert(g.Equivalents.Front().Value.(*GroupExpr), Equals, expr) @@ -65,7 +68,7 @@ func (s *testMemoSuite) TestNewGroup(c *C) { func (s *testMemoSuite) TestGroupInsert(c *C) { p := &plannercore.LogicalLimit{} expr := NewGroupExpr(p) - g := NewGroupWithSchema(expr, nil) + g := NewGroupWithSchema(expr, s.schema) c.Assert(g.Insert(expr), IsFalse) expr.selfFingerprint = "1" c.Assert(g.Insert(expr), IsTrue) @@ -74,7 +77,7 @@ func (s *testMemoSuite) TestGroupInsert(c *C) { func (s *testMemoSuite) TestGroupDelete(c *C) { p := &plannercore.LogicalLimit{} expr := NewGroupExpr(p) - g := NewGroupWithSchema(expr, nil) + g := NewGroupWithSchema(expr, s.schema) c.Assert(g.Equivalents.Len(), Equals, 1) g.Delete(expr) @@ -86,7 +89,7 @@ func (s *testMemoSuite) TestGroupDelete(c *C) { func (s *testMemoSuite) TestGroupDeleteAll(c *C) { expr := NewGroupExpr(plannercore.LogicalSelection{}.Init(s.sctx, 0)) - g := NewGroupWithSchema(expr, nil) + g := NewGroupWithSchema(expr, s.schema) c.Assert(g.Insert(NewGroupExpr(plannercore.LogicalLimit{}.Init(s.sctx, 0))), IsTrue) c.Assert(g.Insert(NewGroupExpr(plannercore.LogicalProjection{}.Init(s.sctx, 0))), IsTrue) c.Assert(g.Equivalents.Len(), Equals, 3) @@ -102,7 +105,7 @@ func (s *testMemoSuite) TestGroupDeleteAll(c *C) { func (s *testMemoSuite) TestGroupExists(c *C) { p := &plannercore.LogicalLimit{} expr := NewGroupExpr(p) - g := NewGroupWithSchema(expr, nil) + g := NewGroupWithSchema(expr, s.schema) c.Assert(g.Exists(expr), IsTrue) g.Delete(expr) @@ -116,7 +119,7 @@ func (s *testMemoSuite) TestGroupGetFirstElem(c *C) { expr3 := NewGroupExpr(plannercore.LogicalLimit{}.Init(s.sctx, 0)) expr4 := NewGroupExpr(plannercore.LogicalProjection{}.Init(s.sctx, 0)) - g := NewGroupWithSchema(expr0, nil) + g := NewGroupWithSchema(expr0, s.schema) g.Insert(expr1) g.Insert(expr2) g.Insert(expr3) @@ -139,7 +142,7 @@ func (impl *fakeImpl) GetPlan() plannercore.PhysicalPlan { return func (impl *fakeImpl) AttachChildren(...Implementation) Implementation { return nil } func (impl *fakeImpl) ScaleCostLimit(float64) float64 { return 0 } func (s *testMemoSuite) TestGetInsertGroupImpl(c *C) { - g := NewGroupWithSchema(NewGroupExpr(plannercore.LogicalLimit{}.Init(s.sctx, 0)), nil) + g := NewGroupWithSchema(NewGroupExpr(plannercore.LogicalLimit{}.Init(s.sctx, 0)), s.schema) emptyProp := &property.PhysicalProperty{} orderProp := &property.PhysicalProperty{Items: []property.Item{{Col: &expression.Column{}}}} @@ -180,7 +183,7 @@ func (s *testMemoSuite) TestEngineTypeSet(c *C) { func (s *testMemoSuite) TestFirstElemAfterDelete(c *C) { oldExpr := NewGroupExpr(plannercore.LogicalLimit{}.Init(s.sctx, 0)) - g := NewGroupWithSchema(oldExpr, nil) + g := NewGroupWithSchema(oldExpr, s.schema) newExpr := NewGroupExpr(plannercore.LogicalLimit{}.Init(s.sctx, 0)) g.Insert(newExpr) c.Assert(g.GetFirstElem(OperandLimit), NotNil) @@ -191,3 +194,45 @@ func (s *testMemoSuite) TestFirstElemAfterDelete(c *C) { g.Delete(newExpr) c.Assert(g.GetFirstElem(OperandLimit), IsNil) } + +func (s *testMemoSuite) TestBuildKeyInfo(c *C) { + // case 1: primary key has constant constraint + stmt1, err := s.ParseOneStmt("select a from t where a = 10", "", "") + c.Assert(err, IsNil) + p1, _, err := plannercore.BuildLogicalPlan(context.Background(), s.sctx, stmt1, s.is) + c.Assert(err, IsNil) + logic1, ok := p1.(plannercore.LogicalPlan) + c.Assert(ok, IsTrue) + group1 := Convert2Group(logic1) + group1.BuildKeyInfo() + c.Assert(group1.Prop.MaxOneRow, IsTrue) + c.Assert(len(group1.Prop.Schema.Keys), Equals, 1) + + // case 2: group by column is key + stmt2, err := s.ParseOneStmt("select b, sum(a) from t group by b", "", "") + c.Assert(err, IsNil) + p2, _, err := plannercore.BuildLogicalPlan(context.Background(), s.sctx, stmt2, s.is) + c.Assert(err, IsNil) + logic2, ok := p2.(plannercore.LogicalPlan) + c.Assert(ok, IsTrue) + group2 := Convert2Group(logic2) + group2.BuildKeyInfo() + c.Assert(group2.Prop.MaxOneRow, IsFalse) + c.Assert(len(group2.Prop.Schema.Keys), Equals, 1) + + // case 3: build key info for new Group + newSel := plannercore.LogicalSelection{}.Init(s.sctx, 0) + newExpr1 := NewGroupExpr(newSel) + newExpr1.SetChildren(group2) + newGroup1 := NewGroupWithSchema(newExpr1, group2.Prop.Schema) + newGroup1.BuildKeyInfo() + c.Assert(len(newGroup1.Prop.Schema.Keys), Equals, 1) + + // case 4: build maxOneRow for new Group + newLimit := plannercore.LogicalLimit{Count: 1}.Init(s.sctx, 0) + newExpr2 := NewGroupExpr(newLimit) + newExpr2.SetChildren(group2) + newGroup2 := NewGroupWithSchema(newExpr2, group2.Prop.Schema) + newGroup2.BuildKeyInfo() + c.Assert(newGroup2.Prop.MaxOneRow, IsTrue) +} diff --git a/planner/memo/implementation.go b/planner/memo/implementation.go index 56980c67bf64f..dd5a430f181b5 100644 --- a/planner/memo/implementation.go +++ b/planner/memo/implementation.go @@ -28,7 +28,7 @@ type Implementation interface { AttachChildren(children ...Implementation) Implementation // ScaleCostLimit scales costLimit by the Implementation's concurrency factor. - // Implementation like TableGather may divide the cost by its scan concurrency, + // Implementation like TiKVSingleGather may divide the cost by its scan concurrency, // so when we pass the costLimit for pruning the search space, we have to scale // the costLimit by its concurrency factor. ScaleCostLimit(costLimit float64) float64 diff --git a/planner/memo/pattern.go b/planner/memo/pattern.go index fff3529bd4fba..28f94ffc41cf5 100644 --- a/planner/memo/pattern.go +++ b/planner/memo/pattern.go @@ -55,10 +55,12 @@ const ( OperandLock // OperandLimit is the operand for LogicalLimit. OperandLimit - // OperandTableGather is the operand for TableGather. - OperandTableGather + // OperandTiKVSingleGather is the operand for TiKVSingleGather. + OperandTiKVSingleGather // OperandTableScan is the operand for TableScan. OperandTableScan + // OperandIndexScan is the operand for IndexScan. + OperandIndexScan // OperandShow is the operand for Show. OperandShow // OperandUnsupported is the operand for unsupported operators. @@ -96,10 +98,12 @@ func GetOperand(p plannercore.LogicalPlan) Operand { return OperandLock case *plannercore.LogicalLimit: return OperandLimit - case *plannercore.TableGather: - return OperandTableGather - case *plannercore.TableScan: + case *plannercore.TiKVSingleGather: + return OperandTiKVSingleGather + case *plannercore.LogicalTableScan: return OperandTableScan + case *plannercore.LogicalIndexScan: + return OperandIndexScan case *plannercore.LogicalShow: return OperandShow default: diff --git a/planner/property/logical_property.go b/planner/property/logical_property.go index a770e83755312..2a45e55c73c23 100644 --- a/planner/property/logical_property.go +++ b/planner/property/logical_property.go @@ -21,6 +21,7 @@ import ( // or statistics of columns in schema for output of Group. // All group expressions in a group share same logical property. type LogicalProperty struct { - Stats *StatsInfo - Schema *expression.Schema + Stats *StatsInfo + Schema *expression.Schema + MaxOneRow bool } diff --git a/planner/util/path.go b/planner/util/path.go new file mode 100644 index 0000000000000..5f12f9231eeb0 --- /dev/null +++ b/planner/util/path.go @@ -0,0 +1,118 @@ +// Copyright 2019 PingCAP, Inc. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// See the License for the specific language governing permissions and +// limitations under the License. + +package util + +import ( + "github.com/pingcap/parser/ast" + "github.com/pingcap/parser/model" + "github.com/pingcap/tidb/expression" + "github.com/pingcap/tidb/kv" + "github.com/pingcap/tidb/types" + "github.com/pingcap/tidb/util/ranger" +) + +// AccessPath indicates the way we access a table: by using single index, or by using multiple indexes, +// or just by using table scan. +type AccessPath struct { + Index *model.IndexInfo + FullIdxCols []*expression.Column + FullIdxColLens []int + IdxCols []*expression.Column + IdxColLens []int + Ranges []*ranger.Range + // CountAfterAccess is the row count after we apply range seek and before we use other filter to filter data. + CountAfterAccess float64 + // CountAfterIndex is the row count after we apply filters on index and before we apply the table filters. + CountAfterIndex float64 + AccessConds []expression.Expression + EqCondCount int + EqOrInCondCount int + IndexFilters []expression.Expression + TableFilters []expression.Expression + // IsTablePath indicates whether this path is table path. + IsTablePath bool + // Forced means this path is generated by `use/force index()`. + Forced bool + // PartialIndexPaths store all index access paths. + // If there are extra filters, store them in TableFilters. + PartialIndexPaths []*AccessPath + + IsDNFCond bool + + StoreType kv.StoreType +} + +// SplitCorColAccessCondFromFilters move the necessary filter in the form of index_col = corrlated_col to access conditions. +func (path *AccessPath) SplitCorColAccessCondFromFilters(eqOrInCount int) (access, remained []expression.Expression) { + access = make([]expression.Expression, len(path.IdxCols)-eqOrInCount) + used := make([]bool, len(path.TableFilters)) + for i := eqOrInCount; i < len(path.IdxCols); i++ { + matched := false + for j, filter := range path.TableFilters { + if used[j] || !isColEqCorColOrConstant(filter, path.IdxCols[i]) { + continue + } + matched = true + access[i-eqOrInCount] = filter + if path.IdxColLens[i] == types.UnspecifiedLength { + used[j] = true + } + break + } + if !matched { + access = access[:i-eqOrInCount] + break + } + } + for i, ok := range used { + if !ok { + remained = append(remained, path.TableFilters[i]) + } + } + return access, remained +} + +// isColEqCorColOrConstant checks if the expression is a eq function that one side is constant or correlated column +// and another is column. +func isColEqCorColOrConstant(filter expression.Expression, col *expression.Column) bool { + f, ok := filter.(*expression.ScalarFunction) + if !ok || f.FuncName.L != ast.EQ { + return false + } + if c, ok := f.GetArgs()[0].(*expression.Column); ok { + if _, ok := f.GetArgs()[1].(*expression.Constant); ok { + if col.Equal(nil, c) { + return true + } + } + if _, ok := f.GetArgs()[1].(*expression.CorrelatedColumn); ok { + if col.Equal(nil, c) { + return true + } + } + } + if c, ok := f.GetArgs()[1].(*expression.Column); ok { + if _, ok := f.GetArgs()[0].(*expression.Constant); ok { + if col.Equal(nil, c) { + return true + } + } + if _, ok := f.GetArgs()[0].(*expression.CorrelatedColumn); ok { + if col.Equal(nil, c) { + return true + } + } + } + return false +} diff --git a/plugin/conn_ip_example/conn_ip_example.go b/plugin/conn_ip_example/conn_ip_example.go index 08c14f9250445..5aca75690d317 100644 --- a/plugin/conn_ip_example/conn_ip_example.go +++ b/plugin/conn_ip_example/conn_ip_example.go @@ -45,5 +45,4 @@ func OnGeneralEvent(ctx context.Context, sctx *variable.SessionVars, event plugi fmt.Println("conn_ip_example notifiy called") fmt.Println("variable test: ", variable.GetSysVar("conn_ip_example_test_variable").Value) fmt.Printf("new connection by %s\n", ctx.Value("ip")) - return } diff --git a/plugin/plugin.go b/plugin/plugin.go index 6549660c87ca4..0b0c3c060804e 100644 --- a/plugin/plugin.go +++ b/plugin/plugin.go @@ -59,9 +59,7 @@ func (p *plugins) clone() *plugins { for key, value := range p.versions { np.versions[key] = value } - for key, value := range p.dyingPlugins { - np.dyingPlugins[key] = value - } + copy(np.dyingPlugins, p.dyingPlugins) return np } diff --git a/privilege/privileges/errors.go b/privilege/privileges/errors.go index f4cdca47e41a8..55fbc0a3d3842 100644 --- a/privilege/privileges/errors.go +++ b/privilege/privileges/errors.go @@ -20,19 +20,17 @@ import ( // privilege error codes. const ( - codeInvalidPrivilegeType terror.ErrCode = 1 - codeInvalidUserNameFormat = 2 - codeErrNonexistingGrant = mysql.ErrNonexistingGrant + codeInvalidPrivilegeType terror.ErrCode = 1 ) var ( errInvalidPrivilegeType = terror.ClassPrivilege.New(codeInvalidPrivilegeType, "unknown privilege type %s") - errNonexistingGrant = terror.ClassPrivilege.New(codeErrNonexistingGrant, mysql.MySQLErrName[mysql.ErrNonexistingGrant]) + errNonexistingGrant = terror.ClassPrivilege.New(mysql.ErrNonexistingGrant, mysql.MySQLErrName[mysql.ErrNonexistingGrant]) ) func init() { privilegeMySQLErrCodes := map[terror.ErrCode]uint16{ - codeErrNonexistingGrant: mysql.ErrNonexistingGrant, + mysql.ErrNonexistingGrant: mysql.ErrNonexistingGrant, } terror.ErrClassToMySQLCodes[terror.ClassPrivilege] = privilegeMySQLErrCodes } diff --git a/server/http_handler.go b/server/http_handler.go index df44009cb1022..0f8a8841b7d54 100644 --- a/server/http_handler.go +++ b/server/http_handler.go @@ -77,8 +77,12 @@ const ( ) // For query string -const qTableID = "table_id" -const qLimit = "limit" +const ( + qTableID = "table_id" + qLimit = "limit" + qOperation = "op" + qSeconds = "seconds" +) const ( headerContentType = "Content-Type" @@ -669,7 +673,27 @@ func (h configReloadHandler) ServeHTTP(w http.ResponseWriter, req *http.Request) // ServeHTTP recovers binlog service. func (h binlogRecover) ServeHTTP(w http.ResponseWriter, req *http.Request) { - binloginfo.DisableSkipBinlogFlag() + op := req.FormValue(qOperation) + switch op { + case "reset": + binloginfo.ResetSkippedCommitterCounter() + case "nowait": + binloginfo.DisableSkipBinlogFlag() + case "status": + default: + sec, err := strconv.ParseInt(req.FormValue(qSeconds), 10, 64) + if sec <= 0 || err != nil { + sec = 1800 + } + binloginfo.DisableSkipBinlogFlag() + timeout := time.Duration(sec) * time.Second + err = binloginfo.WaitBinlogRecover(timeout) + if err != nil { + writeError(w, err) + return + } + } + writeData(w, binloginfo.GetBinlogStatus()) } type tableFlashReplicaInfo struct { @@ -1288,50 +1312,6 @@ func (h regionHandler) ServeHTTP(w http.ResponseWriter, req *http.Request) { writeData(w, regionDetail) } -// NewFrameItemFromRegionKey creates a FrameItem with region's startKey or endKey, -// returns err when key is illegal. -func NewFrameItemFromRegionKey(key []byte) (frame *FrameItem, err error) { - frame = &FrameItem{} - frame.TableID, frame.IndexID, frame.IsRecord, err = tablecodec.DecodeKeyHead(key) - if err == nil { - if frame.IsRecord { - _, frame.RecordID, err = tablecodec.DecodeRecordKey(key) - } else { - _, _, frame.IndexValues, err = tablecodec.DecodeIndexKey(key) - } - log.Warnf("decode region key %q fail: %v", key, err) - // Ignore decode errors. - err = nil - return - } - if bytes.HasPrefix(key, tablecodec.TablePrefix()) { - // If SplitTable is enabled, the key may be `t{id}`. - if len(key) == tablecodec.TableSplitKeyLen { - frame.TableID = tablecodec.DecodeTableID(key) - return frame, nil - } - return nil, errors.Trace(err) - } - - // key start with tablePrefix must be either record key or index key - // That's means table's record key and index key are always together - // in the continuous interval. And for key with prefix smaller than - // tablePrefix, is smaller than all tables. While for key with prefix - // bigger than tablePrefix, means is bigger than all tables. - err = nil - if bytes.Compare(key, tablecodec.TablePrefix()) < 0 { - frame.TableID = math.MinInt64 - frame.IndexID = math.MinInt64 - frame.IsRecord = false - return - } - // bigger than tablePrefix, means is bigger than all tables. - frame.TableID = math.MaxInt64 - frame.IndexID = math.MaxInt64 - frame.IsRecord = true - return -} - // parseQuery is used to parse query string in URL with shouldUnescape, due to golang http package can not distinguish // query like "?a=" and "?a". We rewrite it to separate these two queries. e.g. // "?a=" which means that a is an empty string ""; diff --git a/server/http_handler_test.go b/server/http_handler_test.go index 5369ed298d3a5..0b0ff73439511 100644 --- a/server/http_handler_test.go +++ b/server/http_handler_test.go @@ -40,6 +40,7 @@ import ( "github.com/pingcap/tidb/meta" "github.com/pingcap/tidb/session" "github.com/pingcap/tidb/sessionctx" + "github.com/pingcap/tidb/sessionctx/binloginfo" "github.com/pingcap/tidb/sessionctx/stmtctx" "github.com/pingcap/tidb/sessionctx/variable" "github.com/pingcap/tidb/store/helper" @@ -237,6 +238,73 @@ func (ts *HTTPHandlerTestSuite) TestGetRegionByIDWithError(c *C) { defer resp.Body.Close() } +func (ts *HTTPHandlerTestSuite) TestBinlogRecover(c *C) { + ts.startServer(c) + defer ts.stopServer(c) + binloginfo.EnableSkipBinlogFlag() + c.Assert(binloginfo.IsBinlogSkipped(), Equals, true) + resp, err := http.Get(fmt.Sprintf("http://127.0.0.1:10090/binlog/recover")) + c.Assert(err, IsNil) + defer resp.Body.Close() + c.Assert(resp.StatusCode, Equals, http.StatusOK) + c.Assert(binloginfo.IsBinlogSkipped(), Equals, false) + + // Invalid operation will use the default operation. + binloginfo.EnableSkipBinlogFlag() + c.Assert(binloginfo.IsBinlogSkipped(), Equals, true) + resp, err = http.Get(fmt.Sprintf("http://127.0.0.1:10090/binlog/recover?op=abc")) + c.Assert(err, IsNil) + defer resp.Body.Close() + c.Assert(resp.StatusCode, Equals, http.StatusOK) + c.Assert(binloginfo.IsBinlogSkipped(), Equals, false) + + binloginfo.EnableSkipBinlogFlag() + c.Assert(binloginfo.IsBinlogSkipped(), Equals, true) + resp, err = http.Get(fmt.Sprintf("http://127.0.0.1:10090/binlog/recover?op=abc&seconds=1")) + c.Assert(err, IsNil) + defer resp.Body.Close() + c.Assert(resp.StatusCode, Equals, http.StatusOK) + c.Assert(binloginfo.IsBinlogSkipped(), Equals, false) + + binloginfo.EnableSkipBinlogFlag() + c.Assert(binloginfo.IsBinlogSkipped(), Equals, true) + binloginfo.AddOneSkippedCommitter() + resp, err = http.Get(fmt.Sprintf("http://127.0.0.1:10090/binlog/recover?op=abc&seconds=1")) + c.Assert(err, IsNil) + defer resp.Body.Close() + c.Assert(resp.StatusCode, Equals, http.StatusBadRequest) + c.Assert(binloginfo.IsBinlogSkipped(), Equals, false) + binloginfo.RemoveOneSkippedCommitter() + + binloginfo.AddOneSkippedCommitter() + c.Assert(binloginfo.SkippedCommitterCount(), Equals, int32(1)) + resp, err = http.Get(fmt.Sprintf("http://127.0.0.1:10090/binlog/recover?op=reset")) + c.Assert(err, IsNil) + defer resp.Body.Close() + c.Assert(resp.StatusCode, Equals, http.StatusOK) + c.Assert(binloginfo.SkippedCommitterCount(), Equals, int32(0)) + + binloginfo.EnableSkipBinlogFlag() + resp, err = http.Get(fmt.Sprintf("http://127.0.0.1:10090/binlog/recover?op=nowait")) + c.Assert(err, IsNil) + defer resp.Body.Close() + c.Assert(resp.StatusCode, Equals, http.StatusOK) + c.Assert(binloginfo.IsBinlogSkipped(), Equals, false) + + // Only the first should work. + binloginfo.EnableSkipBinlogFlag() + resp, err = http.Get(fmt.Sprintf("http://127.0.0.1:10090/binlog/recover?op=nowait&op=reset")) + c.Assert(err, IsNil) + defer resp.Body.Close() + c.Assert(resp.StatusCode, Equals, http.StatusOK) + c.Assert(binloginfo.IsBinlogSkipped(), Equals, false) + + resp, err = http.Get(fmt.Sprintf("http://127.0.0.1:10090/binlog/recover?op=status")) + c.Assert(err, IsNil) + defer resp.Body.Close() + c.Assert(resp.StatusCode, Equals, http.StatusOK) +} + func (ts *HTTPHandlerTestSuite) TestRegionsFromMeta(c *C) { ts.startServer(c) defer ts.stopServer(c) @@ -379,6 +447,17 @@ func (ts *HTTPHandlerTestSuite) TestGetTableMVCC(c *C) { err = decoder.Decode(&data2) c.Assert(err, IsNil) c.Assert(data2, DeepEquals, data) + + resp, err = http.Get(fmt.Sprintf("http://127.0.0.1:10090/mvcc/key/tidb/test/1?decode=true")) + c.Assert(err, IsNil) + decoder = json.NewDecoder(resp.Body) + var data3 map[string]interface{} + err = decoder.Decode(&data3) + c.Assert(err, IsNil) + c.Assert(data3["key"], NotNil) + c.Assert(data3["info"], NotNil) + c.Assert(data3["data"], NotNil) + c.Assert(data3["decode_error"], IsNil) } func (ts *HTTPHandlerTestSuite) TestGetMVCCNotFound(c *C) { diff --git a/server/http_status.go b/server/http_status.go index d2dd4832531e2..90db784cbab2d 100644 --- a/server/http_status.go +++ b/server/http_status.go @@ -280,7 +280,7 @@ func (s *Server) setupStatuServerAndRPCServer(addr string, serverMux *http.Serve grpcL := m.Match(cmux.Any()) s.statusServer = &http.Server{Addr: addr, Handler: CorsHandler{handler: serverMux, cfg: s.cfg}} - s.grpcServer = NewRPCServer(s.cfg.Security) + s.grpcServer = NewRPCServer(s.cfg.Security, s.dom, s) go util.WithRecovery(func() { err := s.grpcServer.Serve(grpcL) diff --git a/server/rpc_server.go b/server/rpc_server.go index e195be0cb35a5..8ce8a8f126090 100644 --- a/server/rpc_server.go +++ b/server/rpc_server.go @@ -14,17 +14,29 @@ package server import ( + "context" + "fmt" + + "github.com/pingcap/kvproto/pkg/coprocessor" "github.com/pingcap/kvproto/pkg/diagnosticspb" + "github.com/pingcap/kvproto/pkg/tikvpb" "github.com/pingcap/sysutil" "github.com/pingcap/tidb/config" + "github.com/pingcap/tidb/domain" + "github.com/pingcap/tidb/executor" + "github.com/pingcap/tidb/session" + "github.com/pingcap/tidb/store/mockstore/mocktikv" + "github.com/pingcap/tidb/util" "github.com/pingcap/tidb/util/logutil" + "github.com/pingcap/tidb/util/memory" + "github.com/pingcap/tidb/util/stringutil" "go.uber.org/zap" "google.golang.org/grpc" "google.golang.org/grpc/credentials" ) // NewRPCServer creates a new rpc server. -func NewRPCServer(security config.Security) *grpc.Server { +func NewRPCServer(security config.Security, dom *domain.Domain, sm util.SessionManager) *grpc.Server { defer func() { if v := recover(); v != nil { logutil.BgLogger().Error("panic in TiDB RPC server", zap.Any("stack", v)) @@ -41,6 +53,69 @@ func NewRPCServer(security config.Security) *grpc.Server { if s == nil { s = grpc.NewServer() } - diagnosticspb.RegisterDiagnosticsServer(s, &sysutil.DiagnoseServer{}) + rpcSrv := &rpcServer{ + dom: dom, + sm: sm, + } + // For redirection the cop task. + mocktikv.TiDBRPCServerCoprocessorHandler = rpcSrv.handleCopRequest + diagnosticspb.RegisterDiagnosticsServer(s, rpcSrv) + tikvpb.RegisterTikvServer(s, rpcSrv) return s } + +// rpcServer contains below 2 services: +// 1. Diagnose service, it's used for SQL diagnose. +// 2. Coprocessor service, it reuse the TikvServer interface, but only support the Coprocessor interface now. +// Coprocessor service will handle the cop task from other TiDB server. Currently, it's only use for read the cluster memory table. +type rpcServer struct { + sysutil.DiagnoseServer + tikvpb.TikvServer + dom *domain.Domain + sm util.SessionManager +} + +// Coprocessor implements the TiKVServer interface. +func (s *rpcServer) Coprocessor(ctx context.Context, in *coprocessor.Request) (resp *coprocessor.Response, err error) { + resp = &coprocessor.Response{} + defer func() { + if v := recover(); v != nil { + logutil.BgLogger().Error("panic in TiDB RPC server coprocessor", zap.Any("stack", v)) + resp.OtherError = fmt.Sprintf("rpc coprocessor panic, :%v", v) + } + }() + resp = s.handleCopRequest(ctx, in) + return resp, nil +} + +// handleCopRequest handles the cop dag request. +func (s *rpcServer) handleCopRequest(ctx context.Context, req *coprocessor.Request) *coprocessor.Response { + resp := &coprocessor.Response{} + se, err := s.createSession() + if err != nil { + resp.OtherError = err.Error() + return resp + } + defer se.Close() + + h := executor.NewCoprocessorDAGHandler(se) + return h.HandleRequest(ctx, req) +} + +func (s *rpcServer) createSession() (session.Session, error) { + se, err := session.CreateSessionWithDomain(s.dom.Store(), s.dom) + if err != nil { + return nil, err + } + do := domain.GetDomain(se) + is := do.InfoSchema() + // TODO: Need user and host to do privilege check. + se.GetSessionVars().TxnCtx.InfoSchema = is + // This is for disable parallel hash agg. + // TODO: remove this. + se.GetSessionVars().HashAggPartialConcurrency = 1 + se.GetSessionVars().HashAggFinalConcurrency = 1 + se.GetSessionVars().StmtCtx.MemTracker = memory.NewTracker(stringutil.StringerStr("coprocessor"), -1) + se.SetSessionManager(s.sm) + return se, nil +} diff --git a/server/server.go b/server/server.go index b83d7b945f43e..74f09b2bdb020 100644 --- a/server/server.go +++ b/server/server.go @@ -52,6 +52,7 @@ import ( "github.com/pingcap/parser/mysql" "github.com/pingcap/parser/terror" "github.com/pingcap/tidb/config" + "github.com/pingcap/tidb/domain" "github.com/pingcap/tidb/metrics" "github.com/pingcap/tidb/plugin" "github.com/pingcap/tidb/sessionctx/variable" @@ -87,8 +88,8 @@ var ( errUnknownFieldType = terror.ClassServer.New(codeUnknownFieldType, "unknown field type") errInvalidSequence = terror.ClassServer.New(codeInvalidSequence, "invalid sequence") errInvalidType = terror.ClassServer.New(codeInvalidType, "invalid type") - errNotAllowedCommand = terror.ClassServer.New(codeNotAllowedCommand, "the used command is not allowed with this TiDB version") - errAccessDenied = terror.ClassServer.New(codeAccessDenied, mysql.MySQLErrName[mysql.ErrAccessDenied]) + errNotAllowedCommand = terror.ClassServer.New(mysql.ErrNotAllowedCommand, mysql.MySQLErrName[mysql.ErrNotAllowedCommand]) + errAccessDenied = terror.ClassServer.New(mysql.ErrAccessDenied, mysql.MySQLErrName[mysql.ErrAccessDenied]) ) // DefaultCapability is the capability of the server when it is created using the default configuration. @@ -110,6 +111,7 @@ type Server struct { concurrentLimiter *TokenLimiter clients map[uint32]*clientConn capability uint32 + dom *domain.Domain // stopListenerCh is used when a critical error occurred, we don't want to exit the process, because there may be // a supervisor automatically restart it, then new client connection will be created, but we can't server it. @@ -139,6 +141,11 @@ func (s *Server) releaseToken(token *Token) { s.concurrentLimiter.Put(token) } +// SetDomain use to set the server domain. +func (s *Server) SetDomain(dom *domain.Domain) { + s.dom = dom +} + // newConn creates a new *clientConn from a net.Conn. // It allocates a connection ID and random salt data for authentication. func (s *Server) newConn(conn net.Conn) *clientConn { @@ -617,21 +624,15 @@ func (s *Server) kickIdleConnection() { // Server error codes. const ( - codeUnknownFieldType = 1 - codeInvalidPayloadLen = 2 - codeInvalidSequence = 3 - codeInvalidType = 4 - - codeNotAllowedCommand = 1148 - codeAccessDenied = mysql.ErrAccessDenied - codeMaxExecTimeExceeded = mysql.ErrMaxExecTimeExceeded + codeUnknownFieldType = 1 + codeInvalidSequence = 3 + codeInvalidType = 4 ) func init() { serverMySQLErrCodes := map[terror.ErrCode]uint16{ - codeNotAllowedCommand: mysql.ErrNotAllowedCommand, - codeAccessDenied: mysql.ErrAccessDenied, - codeMaxExecTimeExceeded: mysql.ErrMaxExecTimeExceeded, + mysql.ErrNotAllowedCommand: mysql.ErrNotAllowedCommand, + mysql.ErrAccessDenied: mysql.ErrAccessDenied, } terror.ErrClassToMySQLCodes[terror.ClassServer] = serverMySQLErrCodes } diff --git a/server/server_test.go b/server/server_test.go index f0bcc9a95e680..e4dcdc039c3cc 100644 --- a/server/server_test.go +++ b/server/server_test.go @@ -808,7 +808,7 @@ func runTestErrorCode(c *C) { // Optimizer errors _, err = txn2.Exec("select *, * from test;") - checkErrorCode(c, err, tmysql.ErrParse) + checkErrorCode(c, err, tmysql.ErrInvalidWildCard) _, err = txn2.Exec("select row(1, 2) > 1;") checkErrorCode(c, err, tmysql.ErrOperandColumns) _, err = txn2.Exec("select * from test order by row(c, c);") diff --git a/session/bootstrap.go b/session/bootstrap.go index 04e8e439a0b70..fa5314d97e537 100644 --- a/session/bootstrap.go +++ b/session/bootstrap.go @@ -351,6 +351,7 @@ const ( version34 = 34 version35 = 35 version36 = 36 + version37 = 37 ) func checkBootstrapped(s Session) (bool, error) { @@ -555,6 +556,10 @@ func upgrade(s Session) { upgradeToVer36(s) } + if ver < version37 { + upgradeToVer37(s) + } + updateBootstrapVer(s) _, err = s.Execute(context.Background(), "COMMIT") @@ -877,6 +882,13 @@ func upgradeToVer36(s Session) { mustExecute(s, "UPDATE HIGH_PRIORITY mysql.user SET Shutdown_priv='Y' where User = 'root'") } +func upgradeToVer37(s Session) { + // when upgrade from old tidb and no 'tidb_enable_window_function' in GLOBAL_VARIABLES, init it with 0. + sql := fmt.Sprintf("INSERT IGNORE INTO %s.%s (`VARIABLE_NAME`, `VARIABLE_VALUE`) VALUES ('%s', '%d')", + mysql.SystemDB, mysql.GlobalVariablesTable, variable.TiDBEnableWindowFunction, 0) + mustExecute(s, sql) +} + // updateBootstrapVer updates bootstrap version variable in mysql.TiDB table. func updateBootstrapVer(s Session) { // Update bootstrap version. diff --git a/session/pessimistic_test.go b/session/pessimistic_test.go index f9648887abc9d..dd8dd3c7e5725 100644 --- a/session/pessimistic_test.go +++ b/session/pessimistic_test.go @@ -48,7 +48,7 @@ type testPessimisticSuite struct { func (s *testPessimisticSuite) SetUpSuite(c *C) { testleak.BeforeTest() // Set it to 300ms for testing lock resolve. - tikv.PessimisticLockTTL = 300 + tikv.ManagedLockTTL = 300 tikv.PrewriteMaxBackoff = 500 s.cluster = mocktikv.NewCluster() mocktikv.BootstrapWithSingleStore(s.cluster) diff --git a/session/session.go b/session/session.go index 4c144fe260864..79307a9bb74bb 100644 --- a/session/session.go +++ b/session/session.go @@ -44,6 +44,7 @@ import ( "github.com/pingcap/tidb/config" "github.com/pingcap/tidb/domain" "github.com/pingcap/tidb/executor" + "github.com/pingcap/tidb/infoschema" "github.com/pingcap/tidb/kv" "github.com/pingcap/tidb/meta" "github.com/pingcap/tidb/metrics" @@ -367,7 +368,7 @@ func (s *session) StoreQueryFeedback(feedback interface{}) { // FieldList returns fields list of a table. func (s *session) FieldList(tableName string) ([]*ast.ResultField, error) { - is := executor.GetInfoSchema(s) + is := infoschema.GetInfoSchema(s) dbName := model.NewCIStr(s.GetSessionVars().CurrentDB) tName := model.NewCIStr(tableName) table, err := is.TableByName(dbName, tName) @@ -596,7 +597,7 @@ func (s *session) String() string { const sqlLogMaxLen = 1024 // SchemaChangedWithoutRetry is used for testing. -var SchemaChangedWithoutRetry bool +var SchemaChangedWithoutRetry uint32 func (s *session) getSQLLabel() string { if s.sessionVars.InRestrictedSQL { @@ -610,7 +611,7 @@ func (s *session) isInternal() bool { } func (s *session) isTxnRetryableError(err error) bool { - if SchemaChangedWithoutRetry { + if atomic.LoadUint32(&SchemaChangedWithoutRetry) == 1 { return kv.IsTxnRetryableError(err) } return kv.IsTxnRetryableError(err) || domain.ErrInfoSchemaChanged.Equal(err) @@ -662,9 +663,6 @@ func (s *session) retry(ctx context.Context, maxCnt uint) (err error) { for i, sr := range nh.history { st := sr.st s.sessionVars.StmtCtx = sr.stmtCtx - s.sessionVars.StartTime = time.Now() - s.sessionVars.DurationCompile = time.Duration(0) - s.sessionVars.DurationParse = time.Duration(0) s.sessionVars.StmtCtx.ResetForRetry() s.sessionVars.PreparedParams = s.sessionVars.PreparedParams[:0] schemaVersion, err = st.RebuildPlan(ctx) @@ -799,6 +797,10 @@ func (s *session) ExecRestrictedSQLWithSnapshot(sql string) ([]chunk.Row, []*ast } // Set snapshot. if snapshot != 0 { + se.sessionVars.SnapshotInfoschema, err = domain.GetDomain(s).GetSnapshotInfoSchema(snapshot) + if err != nil { + return nil, nil, err + } if err := se.sessionVars.SetSystemVar(variable.TiDBSnapshot, strconv.FormatUint(snapshot, 10)); err != nil { return nil, nil, err } @@ -806,6 +808,7 @@ func (s *session) ExecRestrictedSQLWithSnapshot(sql string) ([]chunk.Row, []*ast if err := se.sessionVars.SetSystemVar(variable.TiDBSnapshot, ""); err != nil { logutil.BgLogger().Error("set tidbSnapshot error", zap.Error(err)) } + se.sessionVars.SnapshotInfoschema = nil }() } return execRestrictedSQL(ctx, se, sql) @@ -863,7 +866,7 @@ func createSessionFunc(store kv.Storage) pools.Factory { func createSessionWithDomainFunc(store kv.Storage) func(*domain.Domain) (pools.Resource, error) { return func(dom *domain.Domain) (pools.Resource, error) { - se, err := createSessionWithDomain(store, dom) + se, err := CreateSessionWithDomain(store, dom) if err != nil { return nil, err } @@ -1083,8 +1086,7 @@ func (s *session) execute(ctx context.Context, sql string) (recordSets []sqlexec charsetInfo, collation := s.sessionVars.GetCharsetInfo() // Step1: Compile query string to abstract syntax trees(ASTs). - startTS := time.Now() - s.GetSessionVars().StartTime = startTS + parseStartTime := time.Now() stmtNodes, warns, err := s.ParseSQL(ctx, sql, charsetInfo, collation) if err != nil { s.rollbackOnError(ctx) @@ -1093,7 +1095,7 @@ func (s *session) execute(ctx context.Context, sql string) (recordSets []sqlexec zap.String("SQL", sql)) return nil, util.SyntaxError(err) } - durParse := time.Since(startTS) + durParse := time.Since(parseStartTime) s.GetSessionVars().DurationParse = durParse isInternal := s.isInternal() if isInternal { @@ -1105,10 +1107,10 @@ func (s *session) execute(ctx context.Context, sql string) (recordSets []sqlexec compiler := executor.Compiler{Ctx: s} multiQuery := len(stmtNodes) > 1 for _, stmtNode := range stmtNodes { + s.sessionVars.StartTime = time.Now() s.PrepareTxnCtx(ctx) // Step2: Transform abstract syntax tree to a physical plan(stored in executor.ExecStmt). - startTS = time.Now() // Some executions are done in compile stage, so we reset them before compile. if err := executor.ResetContextOfStmt(s, stmtNode); err != nil { return nil, err @@ -1121,7 +1123,7 @@ func (s *session) execute(ctx context.Context, sql string) (recordSets []sqlexec zap.String("SQL", sql)) return nil, err } - durCompile := time.Since(startTS) + durCompile := time.Since(s.sessionVars.StartTime) s.GetSessionVars().DurationCompile = durCompile if isInternal { sessionExecuteCompileDurationInternal.Observe(durCompile.Seconds()) @@ -1171,7 +1173,7 @@ func (s *session) PrepareStmt(sql string) (stmtID uint32, paramCount int, fields // So we have to call PrepareTxnCtx here. s.PrepareTxnCtx(ctx) s.PrepareTxnFuture(ctx) - prepareExec := executor.NewPrepareExec(s, executor.GetInfoSchema(s), sql) + prepareExec := executor.NewPrepareExec(s, infoschema.GetInfoSchema(s), sql) err = prepareExec.Next(ctx, nil) if err != nil { return @@ -1198,7 +1200,7 @@ func (s *session) CachedPlanExec(ctx context.Context, stmtID uint32, prepareStmt *plannercore.CachedPrepareStmt, args []types.Datum) (sqlexec.RecordSet, error) { prepared := prepareStmt.PreparedAst // compile ExecStmt - is := executor.GetInfoSchema(s) + is := infoschema.GetInfoSchema(s) execAst := &ast.ExecuteStmt{ExecID: stmtID} if err := executor.ResetContextOfStmt(s, execAst); err != nil { return nil, err @@ -1251,7 +1253,7 @@ func (s *session) IsCachedExecOk(ctx context.Context, preparedStmt *plannercore. return false, nil } // check schema version - is := executor.GetInfoSchema(s) + is := infoschema.GetInfoSchema(s) if prepared.SchemaVersion != is.SchemaMetaVersion() { prepared.CachedPlan = nil return false, nil @@ -1710,11 +1712,11 @@ func createSession(store kv.Storage) (*session, error) { return s, nil } -// createSessionWithDomain creates a new Session and binds it with a Domain. +// CreateSessionWithDomain creates a new Session and binds it with a Domain. // We need this because when we start DDL in Domain, the DDL need a session // to change some system tables. But at that time, we have been already in // a lock context, which cause we can't call createSesion directly. -func createSessionWithDomain(store kv.Storage, dom *domain.Domain) (*session, error) { +func CreateSessionWithDomain(store kv.Storage, dom *domain.Domain) (*session, error) { s := &session{ store: store, parser: parser.New(), @@ -1736,7 +1738,7 @@ func createSessionWithDomain(store kv.Storage, dom *domain.Domain) (*session, er const ( notBootstrapped = 0 - currentBootstrapVersion = version36 + currentBootstrapVersion = version37 ) func getStoreBootstrapVersion(store kv.Storage) int64 { @@ -1836,6 +1838,7 @@ var builtinGlobalVariable = []string{ variable.TiDBOptScanFactor, variable.TiDBOptDescScanFactor, variable.TiDBOptMemoryFactor, + variable.TiDBOptDiskFactor, variable.TiDBOptConcurrencyFactor, variable.TiDBDistSQLScanConcurrency, variable.TiDBInitChunkSize, diff --git a/session/session_test.go b/session/session_test.go index 532e2ed1bc633..bf8502149b9fe 100644 --- a/session/session_test.go +++ b/session/session_test.go @@ -587,41 +587,6 @@ func (s *testSessionSuite) TestReadOnlyNotInHistory(c *C) { c.Assert(history.Count(), Equals, 0) } -func (s *testSessionSuite) TestNoHistoryWhenDisableRetry(c *C) { - tk := testkit.NewTestKitWithInit(c, s.store) - tk.MustExec("create table history (a int)") - tk.MustExec("set @@autocommit = 0") - - // retry_limit = 0 will not add history. - tk.MustExec("set @@tidb_retry_limit = 0") - tk.MustExec("insert history values (1)") - c.Assert(session.GetHistory(tk.Se).Count(), Equals, 0) - - // Disable auto_retry will add history for auto committed only - tk.MustExec("set @@autocommit = 1") - tk.MustExec("set @@tidb_retry_limit = 10") - tk.MustExec("set @@tidb_disable_txn_auto_retry = 1") - c.Assert(failpoint.Enable("github.com/pingcap/tidb/session/keepHistory", `1*return(true)->return(false)`), IsNil) - tk.MustExec("insert history values (1)") - c.Assert(session.GetHistory(tk.Se).Count(), Equals, 1) - c.Assert(failpoint.Disable("github.com/pingcap/tidb/session/keepHistory"), IsNil) - tk.MustExec("begin") - tk.MustExec("insert history values (1)") - c.Assert(session.GetHistory(tk.Se).Count(), Equals, 0) - tk.MustExec("commit") - - // Enable auto_retry will add history for both. - tk.MustExec("set @@tidb_disable_txn_auto_retry = 0") - c.Assert(failpoint.Enable("github.com/pingcap/tidb/session/keepHistory", `1*return(true)->return(false)`), IsNil) - tk.MustExec("insert history values (1)") - c.Assert(failpoint.Disable("github.com/pingcap/tidb/session/keepHistory"), IsNil) - c.Assert(session.GetHistory(tk.Se).Count(), Equals, 1) - tk.MustExec("begin") - tk.MustExec("insert history values (1)") - c.Assert(session.GetHistory(tk.Se).Count(), Equals, 2) - tk.MustExec("commit") -} - func (s *testSessionSuite) TestNoRetryForCurrentTxn(c *C) { tk := testkit.NewTestKitWithInit(c, s.store) tk1 := testkit.NewTestKitWithInit(c, s.store) @@ -783,7 +748,7 @@ func (s *testSessionSuite) TestSessionAuth(c *C) { c.Assert(tk.Se.Auth(&auth.UserIdentity{Username: "Any not exist username with zero password!", Hostname: "anyhost"}, []byte(""), []byte("")), IsFalse) } -func (s *testSessionSuite) TestSkipWithGrant(c *C) { +func (s *testSessionSerialSuite) TestSkipWithGrant(c *C) { tk := testkit.NewTestKitWithInit(c, s.store) save2 := privileges.SkipWithGrant @@ -1788,7 +1753,6 @@ type testSchemaSuiteBase struct { store kv.Storage lease time.Duration dom *domain.Domain - checkLeak func() } type testSchemaSuite struct { @@ -1902,9 +1866,9 @@ func (s *testSchemaSerialSuite) TestSchemaCheckerSQL(c *C) { tk.MustExec(`commit;`) // The schema version is out of date in the first transaction, and the SQL can't be retried. - session.SchemaChangedWithoutRetry = true + atomic.StoreUint32(&session.SchemaChangedWithoutRetry, 1) defer func() { - session.SchemaChangedWithoutRetry = false + atomic.StoreUint32(&session.SchemaChangedWithoutRetry, 0) }() tk.MustExec(`begin;`) tk1.MustExec(`alter table t modify column c bigint;`) @@ -2858,7 +2822,7 @@ func (s *testSessionSuite2) TestIsolationRead(c *C) { tk := testkit.NewTestKit(c, s.store) tk.Se, err = session.CreateSession4Test(s.store) c.Assert(err, IsNil) - c.Assert(len(tk.Se.GetSessionVars().GetIsolationReadEngines()), Equals, 2) + c.Assert(len(tk.Se.GetSessionVars().GetIsolationReadEngines()), Equals, 3) tk.MustExec("set @@tidb_isolation_read_engines = 'tiflash';") engines := tk.Se.GetSessionVars().GetIsolationReadEngines() c.Assert(len(engines), Equals, 1) diff --git a/session/tidb.go b/session/tidb.go index e50098d669330..a556bfbcbdb2a 100644 --- a/session/tidb.go +++ b/session/tidb.go @@ -367,17 +367,12 @@ func ResultSetToStringSlice(ctx context.Context, s Session, rs sqlexec.RecordSet // Session errors. var ( - ErrForUpdateCantRetry = terror.ClassSession.New(codeForUpdateCantRetry, - mysql.MySQLErrName[mysql.ErrForUpdateCantRetry]) -) - -const ( - codeForUpdateCantRetry terror.ErrCode = mysql.ErrForUpdateCantRetry + ErrForUpdateCantRetry = terror.ClassSession.New(mysql.ErrForUpdateCantRetry, mysql.MySQLErrName[mysql.ErrForUpdateCantRetry]) ) func init() { sessionMySQLErrCodes := map[terror.ErrCode]uint16{ - codeForUpdateCantRetry: mysql.ErrForUpdateCantRetry, + mysql.ErrForUpdateCantRetry: mysql.ErrForUpdateCantRetry, } terror.ErrClassToMySQLCodes[terror.ClassSession] = sessionMySQLErrCodes } diff --git a/session/txn.go b/session/txn.go index 4a04cb1acde57..45e89744ca3f1 100755 --- a/session/txn.go +++ b/session/txn.go @@ -292,7 +292,15 @@ func (st *TxnState) IterReverse(k kv.Key) (kv.Iterator, error) { } func (st *TxnState) cleanup() { - st.buf.Reset() + const sz4M = 4 << 20 + if st.buf.Size() > sz4M { + // The memory footprint for the large transaction could be huge here. + // Each active session has its own buffer, we should free the buffer to + // avoid memory leak. + st.buf = kv.NewMemDbBuffer(kv.DefaultTxnMembufCap) + } else { + st.buf.Reset() + } for key := range st.mutations { delete(st.mutations, key) } @@ -301,7 +309,12 @@ func (st *TxnState) cleanup() { for i := 0; i < len(st.dirtyTableOP); i++ { st.dirtyTableOP[i] = empty } - st.dirtyTableOP = st.dirtyTableOP[:0] + if len(st.dirtyTableOP) > 256 { + // Reduce memory footprint for the large transaction. + st.dirtyTableOP = nil + } else { + st.dirtyTableOP = st.dirtyTableOP[:0] + } } } @@ -461,7 +474,6 @@ func (s *session) StmtCommit() error { // StmtRollback implements the sessionctx.Context interface. func (s *session) StmtRollback() { s.txn.cleanup() - return } // StmtGetMutation implements the sessionctx.Context interface. diff --git a/sessionctx/binloginfo/binloginfo.go b/sessionctx/binloginfo/binloginfo.go index e20c0822bd64a..cf8a61ac6148a 100644 --- a/sessionctx/binloginfo/binloginfo.go +++ b/sessionctx/binloginfo/binloginfo.go @@ -112,12 +112,83 @@ var statusListener = func(_ BinlogStatus) error { return nil } +// EnableSkipBinlogFlag enables the skipBinlog flag. +// NOTE: it is used *ONLY* for test. +func EnableSkipBinlogFlag() { + atomic.StoreUint32(&skipBinlog, 1) + logutil.BgLogger().Warn("[binloginfo] enable the skipBinlog flag") +} + // DisableSkipBinlogFlag disable the skipBinlog flag. func DisableSkipBinlogFlag() { atomic.StoreUint32(&skipBinlog, 0) logutil.BgLogger().Warn("[binloginfo] disable the skipBinlog flag") } +// IsBinlogSkipped gets the skipBinlog flag. +func IsBinlogSkipped() bool { + return atomic.LoadUint32(&skipBinlog) > 0 +} + +// BinlogRecoverStatus is used for display the binlog recovered status after some operations. +type BinlogRecoverStatus struct { + Skipped bool + SkippedCommitterCounter int32 +} + +// GetBinlogStatus returns the binlog recovered status. +func GetBinlogStatus() *BinlogRecoverStatus { + return &BinlogRecoverStatus{ + Skipped: IsBinlogSkipped(), + SkippedCommitterCounter: SkippedCommitterCount(), + } +} + +var skippedCommitterCounter int32 + +// WaitBinlogRecover returns when all committing transaction finished. +func WaitBinlogRecover(timeout time.Duration) error { + logutil.BgLogger().Warn("[binloginfo] start waiting for binlog recovering") + ticker := time.NewTicker(500 * time.Millisecond) + defer ticker.Stop() + start := time.Now() + for { + select { + case <-ticker.C: + if atomic.LoadInt32(&skippedCommitterCounter) == 0 { + logutil.BgLogger().Warn("[binloginfo] binlog recovered") + return nil + } + if time.Since(start) > timeout { + logutil.BgLogger().Warn("[binloginfo] waiting for binlog recovering timed out", + zap.Duration("duration", timeout)) + return errors.New("timeout") + } + } + } +} + +// SkippedCommitterCount returns the number of alive committers whick skipped the binlog writing. +func SkippedCommitterCount() int32 { + return atomic.LoadInt32(&skippedCommitterCounter) +} + +// ResetSkippedCommitterCounter is used to reset the skippedCommitterCounter. +func ResetSkippedCommitterCounter() { + atomic.StoreInt32(&skippedCommitterCounter, 0) + logutil.BgLogger().Warn("[binloginfo] skippedCommitterCounter is reset to 0") +} + +// AddOneSkippedCommitter adds one committer to skippedCommitterCounter. +func AddOneSkippedCommitter() { + atomic.AddInt32(&skippedCommitterCounter, 1) +} + +// RemoveOneSkippedCommitter removes one committer from skippedCommitterCounter. +func RemoveOneSkippedCommitter() { + atomic.AddInt32(&skippedCommitterCounter, -1) +} + // SetIgnoreError sets the ignoreError flag, this function called when TiDB start // up and find config.Binlog.IgnoreError is true. func SetIgnoreError(on bool) { @@ -146,16 +217,32 @@ func RegisterStatusListener(listener func(BinlogStatus) error) { statusListener = listener } +// WriteResult is used for the returned chan of WriteBinlog. +type WriteResult struct { + skipped bool + err error +} + +// Skipped if true stands for the binlog writing is skipped. +func (wr *WriteResult) Skipped() bool { + return wr.skipped +} + +// GetError gets the error of WriteBinlog. +func (wr *WriteResult) GetError() error { + return wr.err +} + // WriteBinlog writes a binlog to Pump. -func (info *BinlogInfo) WriteBinlog(clusterID uint64) error { +func (info *BinlogInfo) WriteBinlog(clusterID uint64) *WriteResult { skip := atomic.LoadUint32(&skipBinlog) if skip > 0 { metrics.CriticalErrorCounter.Add(1) - return nil + return &WriteResult{true, nil} } if info.Client == nil { - return errors.New("pumps client is nil") + return &WriteResult{false, errors.New("pumps client is nil")} } // it will retry in PumpsClient if write binlog fail. @@ -176,18 +263,18 @@ func (info *BinlogInfo) WriteBinlog(clusterID uint64) error { logutil.BgLogger().Warn("update binlog status failed", zap.Error(err)) } } - return nil + return &WriteResult{true, nil} } if strings.Contains(err.Error(), "received message larger than max") { // This kind of error is not critical, return directly. - return errors.Errorf("binlog data is too large (%s)", err.Error()) + return &WriteResult{false, errors.Errorf("binlog data is too large (%s)", err.Error())} } - return terror.ErrCritical.GenWithStackByArgs(err) + return &WriteResult{false, terror.ErrCritical.GenWithStackByArgs(err)} } - return nil + return &WriteResult{false, nil} } // SetDDLBinlog sets DDL binlog in the kv.Transaction. diff --git a/sessionctx/binloginfo/binloginfo_test.go b/sessionctx/binloginfo/binloginfo_test.go index 0f8dc4c3bb26e..b7e4c7669d736 100644 --- a/sessionctx/binloginfo/binloginfo_test.go +++ b/sessionctx/binloginfo/binloginfo_test.go @@ -270,7 +270,8 @@ func (s *testBinlogSuite) TestMaxRecvSize(c *C) { }, Client: s.client, } - err := info.WriteBinlog(1) + binlogWR := info.WriteBinlog(1) + err := binlogWR.GetError() c.Assert(err, NotNil) c.Assert(terror.ErrCritical.Equal(err), IsFalse, Commentf("%v", err)) } diff --git a/sessionctx/stmtctx/stmtctx.go b/sessionctx/stmtctx/stmtctx.go index b3b9807ec98d4..9bff0a4df9860 100644 --- a/sessionctx/stmtctx/stmtctx.go +++ b/sessionctx/stmtctx/stmtctx.go @@ -23,6 +23,7 @@ import ( "github.com/pingcap/parser" "github.com/pingcap/parser/model" "github.com/pingcap/parser/mysql" + "github.com/pingcap/tidb/util/disk" "github.com/pingcap/tidb/util/execdetails" "github.com/pingcap/tidb/util/memory" "go.uber.org/zap" @@ -126,6 +127,7 @@ type StatementContext struct { Priority mysql.PriorityEnum NotFillCache bool MemTracker *memory.Tracker + DiskTracker *disk.Tracker RuntimeStatsColl *execdetails.RuntimeStatsColl TableIDs []int64 IndexNames []string @@ -311,30 +313,12 @@ func (sc *StatementContext) WarningCount() uint16 { return wc } -const zero = "0" - // NumErrorWarnings gets warning and error count. -func (sc *StatementContext) NumErrorWarnings() (ec, wc string) { - var ( - ecNum uint16 - wcNum int - ) +func (sc *StatementContext) NumErrorWarnings() (ec uint16, wc int) { sc.mu.Lock() - ecNum = sc.mu.errorCount - wcNum = len(sc.mu.warnings) + ec = sc.mu.errorCount + wc = len(sc.mu.warnings) sc.mu.Unlock() - - if ecNum == 0 { - ec = zero - } else { - ec = strconv.Itoa(int(ecNum)) - } - - if wcNum == 0 { - wc = zero - } else { - wc = strconv.Itoa(wcNum) - } return } @@ -518,7 +502,15 @@ func (sc *StatementContext) CopTasksDetails() *CopTasksDetails { sc.mu.Lock() defer sc.mu.Unlock() n := len(sc.mu.allExecDetails) - d := &CopTasksDetails{NumCopTasks: n} + d := &CopTasksDetails{ + NumCopTasks: n, + MaxBackoffTime: make(map[string]time.Duration), + AvgBackoffTime: make(map[string]time.Duration), + P90BackoffTime: make(map[string]time.Duration), + TotBackoffTime: make(map[string]time.Duration), + TotBackoffTimes: make(map[string]int), + MaxBackoffAddress: make(map[string]string), + } if n == 0 { return d } @@ -538,9 +530,60 @@ func (sc *StatementContext) CopTasksDetails() *CopTasksDetails { d.P90WaitTime = sc.mu.allExecDetails[n*9/10].WaitTime d.MaxWaitTime = sc.mu.allExecDetails[n-1].WaitTime d.MaxWaitAddress = sc.mu.allExecDetails[n-1].CalleeAddress + + // calculate backoff details + type backoffItem struct { + callee string + sleepTime time.Duration + times int + } + backoffInfo := make(map[string][]backoffItem) + for _, ed := range sc.mu.allExecDetails { + for backoff := range ed.BackoffTimes { + backoffInfo[backoff] = append(backoffInfo[backoff], backoffItem{ + callee: ed.CalleeAddress, + sleepTime: ed.BackoffSleep[backoff], + times: ed.BackoffTimes[backoff], + }) + } + } + for backoff, items := range backoffInfo { + if len(items) == 0 { + continue + } + sort.Slice(items, func(i, j int) bool { + return items[i].sleepTime < items[j].sleepTime + }) + n := len(items) + d.MaxBackoffAddress[backoff] = items[n-1].callee + d.MaxBackoffTime[backoff] = items[n-1].sleepTime + d.P90BackoffTime[backoff] = items[n*9/10].sleepTime + + var totalTime time.Duration + totalTimes := 0 + for _, it := range items { + totalTime += it.sleepTime + totalTimes += it.times + } + d.AvgBackoffTime[backoff] = totalTime / time.Duration(n) + d.TotBackoffTime[backoff] = totalTime + d.TotBackoffTimes[backoff] = totalTimes + } return d } +// SetFlagsFromPBFlag set the flag of StatementContext from a `tipb.SelectRequest.Flags`. +func (sc *StatementContext) SetFlagsFromPBFlag(flags uint64) { + sc.IgnoreTruncate = (flags & model.FlagIgnoreTruncate) > 0 + sc.TruncateAsWarning = (flags & model.FlagTruncateAsWarning) > 0 + sc.PadCharToFullLength = (flags & model.FlagPadCharToFullLength) > 0 + sc.InInsertStmt = (flags & model.FlagInInsertStmt) > 0 + sc.InSelectStmt = (flags & model.FlagInSelectStmt) > 0 + sc.OverflowAsWarning = (flags & model.FlagOverflowAsWarning) > 0 + sc.IgnoreZeroInDate = (flags & model.FlagIgnoreZeroInDate) > 0 + sc.DividedByZeroAsWarning = (flags & model.FlagDividedByZeroAsWarning) > 0 +} + //CopTasksDetails collects some useful information of cop-tasks during execution. type CopTasksDetails struct { NumCopTasks int @@ -554,6 +597,13 @@ type CopTasksDetails struct { P90WaitTime time.Duration MaxWaitAddress string MaxWaitTime time.Duration + + MaxBackoffTime map[string]time.Duration + MaxBackoffAddress map[string]string + AvgBackoffTime map[string]time.Duration + P90BackoffTime map[string]time.Duration + TotBackoffTime map[string]time.Duration + TotBackoffTimes map[string]int } // ToZapFields wraps the CopTasksDetails as zap.Fileds. diff --git a/sessionctx/stmtctx/stmtctx_test.go b/sessionctx/stmtctx/stmtctx_test.go index 6ba2e862f27dc..01926518c644a 100644 --- a/sessionctx/stmtctx/stmtctx_test.go +++ b/sessionctx/stmtctx/stmtctx_test.go @@ -33,11 +33,18 @@ var _ = Suite(&stmtctxSuit{}) func (s *stmtctxSuit) TestCopTasksDetails(c *C) { ctx := new(stmtctx.StatementContext) + backoffs := []string{"tikvRPC", "pdRPC", "regionMiss"} for i := 0; i < 100; i++ { d := &execdetails.ExecDetails{ CalleeAddress: fmt.Sprintf("%v", i+1), ProcessTime: time.Second * time.Duration(i+1), WaitTime: time.Millisecond * time.Duration(i+1), + BackoffSleep: make(map[string]time.Duration), + BackoffTimes: make(map[string]int), + } + for _, backoff := range backoffs { + d.BackoffSleep[backoff] = time.Millisecond * 100 * time.Duration(i+1) + d.BackoffTimes[backoff] = i + 1 } ctx.MergeExecDetails(d, nil) } @@ -53,6 +60,14 @@ func (s *stmtctxSuit) TestCopTasksDetails(c *C) { c.Assert(d.MaxWaitAddress, Equals, "100") fields := d.ToZapFields() c.Assert(len(fields), Equals, 9) + for _, backoff := range backoffs { + c.Assert(d.MaxBackoffAddress[backoff], Equals, "100") + c.Assert(d.MaxBackoffTime[backoff], Equals, 100*time.Millisecond*100) + c.Assert(d.P90BackoffTime[backoff], Equals, time.Millisecond*100*91) + c.Assert(d.AvgBackoffTime[backoff], Equals, time.Millisecond*100*101/2) + c.Assert(d.TotBackoffTimes[backoff], Equals, 101*50) + c.Assert(d.TotBackoffTime[backoff], Equals, 101*50*100*time.Millisecond) + } } func (s *stmtctxSuit) TestStatementContextPushDownFLags(c *C) { diff --git a/sessionctx/variable/session.go b/sessionctx/variable/session.go index 3ec99b0d87cd2..abb07788a8360 100644 --- a/sessionctx/variable/session.go +++ b/sessionctx/variable/session.go @@ -17,6 +17,7 @@ import ( "bytes" "crypto/tls" "fmt" + "sort" "strconv" "strings" "sync" @@ -202,6 +203,10 @@ type SessionVars struct { Users map[string]string // systems variables, don't modify it directly, use GetSystemVar/SetSystemVar method. systems map[string]string + // SysWarningCount is the system variable "warning_count", because it is on the hot path, so we extract it from the systems + SysWarningCount int + // SysErrorCount is the system variable "error_count", because it is on the hot path, so we extract it from the systems + SysErrorCount uint16 // PreparedStmts stores prepared statement. PreparedStmts map[uint32]interface{} PreparedStmtNameToID map[string]uint32 @@ -308,6 +313,8 @@ type SessionVars struct { SeekFactor float64 // MemoryFactor is the memory cost of storing one tuple. MemoryFactor float64 + // DiskFactor is the IO cost of reading/writing one byte to temporary disk. + DiskFactor float64 // ConcurrencyFactor is the CPU cost of additional one goroutine. ConcurrencyFactor float64 @@ -517,6 +524,7 @@ func NewSessionVars() *SessionVars { DescScanFactor: DefOptDescScanFactor, SeekFactor: DefOptSeekFactor, MemoryFactor: DefOptMemoryFactor, + DiskFactor: DefOptDiskFactor, ConcurrencyFactor: DefOptConcurrencyFactor, EnableRadixJoin: false, EnableVectorizedExpression: DefEnableVectorizedExpression, @@ -532,7 +540,7 @@ func NewSessionVars() *SessionVars { AllowRemoveAutoInc: DefTiDBAllowRemoveAutoInc, UsePlanBaselines: DefTiDBUsePlanBaselines, EvolvePlanBaselines: DefTiDBEvolvePlanBaselines, - isolationReadEngines: map[kv.StoreType]struct{}{kv.TiKV: {}, kv.TiFlash: {}}, + isolationReadEngines: map[kv.StoreType]struct{}{kv.TiKV: {}, kv.TiFlash: {}, kv.TiDB: {}}, LockWaitTimeout: DefInnodbLockWaitTimeout * 1000, } vars.Concurrency = Concurrency{ @@ -707,6 +715,11 @@ func (s *SessionVars) Location() *time.Location { // GetSystemVar gets the string value of a system variable. func (s *SessionVars) GetSystemVar(name string) (string, bool) { + if name == WarningCount { + return strconv.Itoa(s.SysWarningCount), true + } else if name == ErrorCount { + return strconv.Itoa(int(s.SysErrorCount)), true + } val, ok := s.systems[name] return val, ok } @@ -848,6 +861,8 @@ func (s *SessionVars) SetSystemVar(name string, val string) error { s.SeekFactor = tidbOptFloat64(val, DefOptSeekFactor) case TiDBOptMemoryFactor: s.MemoryFactor = tidbOptFloat64(val, DefOptMemoryFactor) + case TiDBOptDiskFactor: + s.DiskFactor = tidbOptFloat64(val, DefOptDiskFactor) case TiDBOptConcurrencyFactor: s.ConcurrencyFactor = tidbOptFloat64(val, DefOptConcurrencyFactor) case TiDBIndexLookupConcurrency: @@ -985,6 +1000,8 @@ func (s *SessionVars) SetSystemVar(name string, val string) error { s.isolationReadEngines[kv.TiKV] = struct{}{} case kv.TiFlash.Name(): s.isolationReadEngines[kv.TiFlash] = struct{}{} + case kv.TiDB.Name(): + s.isolationReadEngines[kv.TiDB] = struct{}{} } } case TiDBStoreLimit: @@ -1181,6 +1198,8 @@ const ( SlowLogCopWaitMax = "Cop_wait_max" // SlowLogCopWaitAddr is the address of TiKV where the cop-task which cost wait process time run. SlowLogCopWaitAddr = "Cop_wait_addr" + // SlowLogCopBackoffPrefix contains backoff information. + SlowLogCopBackoffPrefix = "Cop_backoff_" // SlowLogMemMax is the max number bytes of memory used in this statement. SlowLogMemMax = "Mem_max" // SlowLogPrepared is used to indicate whether this sql execute in prepare. @@ -1294,6 +1313,13 @@ func (s *SessionVars) SlowLogFormat(logItems *SlowQueryLogItems) string { if logItems.CopTasks != nil { writeSlowLogItem(&buf, SlowLogNumCopTasksStr, strconv.FormatInt(int64(logItems.CopTasks.NumCopTasks), 10)) if logItems.CopTasks.NumCopTasks > 0 { + // make the result stable + backoffs := make([]string, 0, 3) + for backoff := range logItems.CopTasks.TotBackoffTimes { + backoffs = append(backoffs, backoff) + } + sort.Strings(backoffs) + if logItems.CopTasks.NumCopTasks == 1 { buf.WriteString(SlowLogRowPrefixStr + fmt.Sprintf("%v%v%v %v%v%v", SlowLogCopProcAvg, SlowLogSpaceMarkStr, logItems.CopTasks.AvgProcessTime.Seconds(), @@ -1301,7 +1327,13 @@ func (s *SessionVars) SlowLogFormat(logItems *SlowQueryLogItems) string { buf.WriteString(SlowLogRowPrefixStr + fmt.Sprintf("%v%v%v %v%v%v", SlowLogCopWaitAvg, SlowLogSpaceMarkStr, logItems.CopTasks.AvgWaitTime.Seconds(), SlowLogCopWaitAddr, SlowLogSpaceMarkStr, logItems.CopTasks.MaxWaitAddress) + "\n") - + for _, backoff := range backoffs { + backoffPrefix := SlowLogCopBackoffPrefix + backoff + "_" + buf.WriteString(SlowLogRowPrefixStr + fmt.Sprintf("%v%v%v %v%v%v\n", + backoffPrefix+"total_times", SlowLogSpaceMarkStr, logItems.CopTasks.TotBackoffTimes[backoff], + backoffPrefix+"total_time", SlowLogSpaceMarkStr, logItems.CopTasks.TotBackoffTime[backoff].Seconds(), + )) + } } else { buf.WriteString(SlowLogRowPrefixStr + fmt.Sprintf("%v%v%v %v%v%v %v%v%v %v%v%v", SlowLogCopProcAvg, SlowLogSpaceMarkStr, logItems.CopTasks.AvgProcessTime.Seconds(), @@ -1313,6 +1345,17 @@ func (s *SessionVars) SlowLogFormat(logItems *SlowQueryLogItems) string { SlowLogCopWaitP90, SlowLogSpaceMarkStr, logItems.CopTasks.P90WaitTime.Seconds(), SlowLogCopWaitMax, SlowLogSpaceMarkStr, logItems.CopTasks.MaxWaitTime.Seconds(), SlowLogCopWaitAddr, SlowLogSpaceMarkStr, logItems.CopTasks.MaxWaitAddress) + "\n") + for _, backoff := range backoffs { + backoffPrefix := SlowLogCopBackoffPrefix + backoff + "_" + buf.WriteString(SlowLogRowPrefixStr + fmt.Sprintf("%v%v%v %v%v%v %v%v%v %v%v%v %v%v%v %v%v%v\n", + backoffPrefix+"total_times", SlowLogSpaceMarkStr, logItems.CopTasks.TotBackoffTimes[backoff], + backoffPrefix+"total_time", SlowLogSpaceMarkStr, logItems.CopTasks.TotBackoffTime[backoff].Seconds(), + backoffPrefix+"max_time", SlowLogSpaceMarkStr, logItems.CopTasks.MaxBackoffTime[backoff].Seconds(), + backoffPrefix+"max_addr", SlowLogSpaceMarkStr, logItems.CopTasks.MaxBackoffAddress[backoff], + backoffPrefix+"avg_time", SlowLogSpaceMarkStr, logItems.CopTasks.AvgBackoffTime[backoff].Seconds(), + backoffPrefix+"p90_time", SlowLogSpaceMarkStr, logItems.CopTasks.P90BackoffTime[backoff].Seconds(), + )) + } } } } diff --git a/sessionctx/variable/session_test.go b/sessionctx/variable/session_test.go index 93829d27bc68e..53efd686298e2 100644 --- a/sessionctx/variable/session_test.go +++ b/sessionctx/variable/session_test.go @@ -151,7 +151,24 @@ func (*testSessionSuite) TestSlowLogFormat(c *C) { P90WaitTime: time.Millisecond * 20, MaxWaitTime: time.Millisecond * 30, MaxWaitAddress: "10.6.131.79", + MaxBackoffTime: make(map[string]time.Duration), + AvgBackoffTime: make(map[string]time.Duration), + P90BackoffTime: make(map[string]time.Duration), + TotBackoffTime: make(map[string]time.Duration), + TotBackoffTimes: make(map[string]int), + MaxBackoffAddress: make(map[string]string), } + + backoffs := []string{"rpcTiKV", "rpcPD", "regionMiss"} + for _, backoff := range backoffs { + copTasks.MaxBackoffTime[backoff] = time.Millisecond * 200 + copTasks.MaxBackoffAddress[backoff] = "127.0.0.1" + copTasks.AvgBackoffTime[backoff] = time.Millisecond * 200 + copTasks.P90BackoffTime[backoff] = time.Millisecond * 200 + copTasks.TotBackoffTime[backoff] = time.Millisecond * 200 + copTasks.TotBackoffTimes[backoff] = 200 + } + var memMax int64 = 2333 resultString := `# Txn_start_ts: 406649736972468225 # User: root@192.168.0.1 @@ -168,6 +185,9 @@ func (*testSessionSuite) TestSlowLogFormat(c *C) { # Num_cop_tasks: 10 # Cop_proc_avg: 1 Cop_proc_p90: 2 Cop_proc_max: 3 Cop_proc_addr: 10.6.131.78 # Cop_wait_avg: 0.01 Cop_wait_p90: 0.02 Cop_wait_max: 0.03 Cop_wait_addr: 10.6.131.79 +# Cop_backoff_regionMiss_total_times: 200 Cop_backoff_regionMiss_total_time: 0.2 Cop_backoff_regionMiss_max_time: 0.2 Cop_backoff_regionMiss_max_addr: 127.0.0.1 Cop_backoff_regionMiss_avg_time: 0.2 Cop_backoff_regionMiss_p90_time: 0.2 +# Cop_backoff_rpcPD_total_times: 200 Cop_backoff_rpcPD_total_time: 0.2 Cop_backoff_rpcPD_max_time: 0.2 Cop_backoff_rpcPD_max_addr: 127.0.0.1 Cop_backoff_rpcPD_avg_time: 0.2 Cop_backoff_rpcPD_p90_time: 0.2 +# Cop_backoff_rpcTiKV_total_times: 200 Cop_backoff_rpcTiKV_total_time: 0.2 Cop_backoff_rpcTiKV_max_time: 0.2 Cop_backoff_rpcTiKV_max_addr: 127.0.0.1 Cop_backoff_rpcTiKV_avg_time: 0.2 Cop_backoff_rpcTiKV_p90_time: 0.2 # Mem_max: 2333 # Prepared: true # Has_more_results: true diff --git a/sessionctx/variable/sysvar.go b/sessionctx/variable/sysvar.go index 22b042941948e..153205921a428 100644 --- a/sessionctx/variable/sysvar.go +++ b/sessionctx/variable/sysvar.go @@ -648,6 +648,7 @@ var defaultSysVars = []*SysVar{ {ScopeGlobal | ScopeSession, TiDBOptDescScanFactor, strconv.FormatFloat(DefOptDescScanFactor, 'f', -1, 64)}, {ScopeGlobal | ScopeSession, TiDBOptSeekFactor, strconv.FormatFloat(DefOptSeekFactor, 'f', -1, 64)}, {ScopeGlobal | ScopeSession, TiDBOptMemoryFactor, strconv.FormatFloat(DefOptMemoryFactor, 'f', -1, 64)}, + {ScopeGlobal | ScopeSession, TiDBOptDiskFactor, strconv.FormatFloat(DefOptDiskFactor, 'f', -1, 64)}, {ScopeGlobal | ScopeSession, TiDBOptConcurrencyFactor, strconv.FormatFloat(DefOptConcurrencyFactor, 'f', -1, 64)}, {ScopeGlobal | ScopeSession, TiDBIndexJoinBatchSize, strconv.Itoa(DefIndexJoinBatchSize)}, {ScopeGlobal | ScopeSession, TiDBIndexLookupSize, strconv.Itoa(DefIndexLookupSize)}, @@ -724,7 +725,7 @@ var defaultSysVars = []*SysVar{ {ScopeGlobal, TiDBEvolvePlanTaskMaxTime, strconv.Itoa(DefTiDBEvolvePlanTaskMaxTime)}, {ScopeGlobal, TiDBEvolvePlanTaskStartTime, DefTiDBEvolvePlanTaskStartTime}, {ScopeGlobal, TiDBEvolvePlanTaskEndTime, DefTiDBEvolvePlanTaskEndTime}, - {ScopeGlobal | ScopeSession, TiDBIsolationReadEngines, "tikv,tiflash"}, + {ScopeGlobal | ScopeSession, TiDBIsolationReadEngines, "tikv,tiflash,tidb"}, {ScopeGlobal | ScopeSession, TiDBStoreLimit, strconv.FormatInt(atomic.LoadInt64(&config.GetGlobalConfig().TiKVClient.StoreLimit), 10)}, } diff --git a/sessionctx/variable/tidb_vars.go b/sessionctx/variable/tidb_vars.go index 11ac625d62861..e50cbbd7ae5e3 100644 --- a/sessionctx/variable/tidb_vars.go +++ b/sessionctx/variable/tidb_vars.go @@ -200,6 +200,8 @@ const ( TiDBOptSeekFactor = "tidb_opt_seek_factor" // tidb_opt_memory_factor is the memory cost of storing one tuple. TiDBOptMemoryFactor = "tidb_opt_memory_factor" + // tidb_opt_disk_factor is the IO cost of reading/writing one byte to temporary disk. + TiDBOptDiskFactor = "tidb_opt_disk_factor" // tidb_opt_concurrency_factor is the CPU cost of additional one goroutine. TiDBOptConcurrencyFactor = "tidb_opt_concurrency_factor" @@ -382,6 +384,7 @@ const ( DefOptDescScanFactor = 3.0 DefOptSeekFactor = 20.0 DefOptMemoryFactor = 0.001 + DefOptDiskFactor = 1.5 DefOptConcurrencyFactor = 3.0 DefOptInSubqToJoinAndAgg = true DefBatchInsert = false diff --git a/sessionctx/variable/varsutil.go b/sessionctx/variable/varsutil.go index 954a597a4e5fe..460a7ff48c456 100644 --- a/sessionctx/variable/varsutil.go +++ b/sessionctx/variable/varsutil.go @@ -142,7 +142,7 @@ func GetSessionOnlySysVars(s *SessionVars, key string) (string, bool, error) { case TiDBCheckMb4ValueInUTF8: return BoolToIntStr(config.GetGlobalConfig().CheckMb4ValueInUTF8), true, nil } - sVal, ok := s.systems[key] + sVal, ok := s.GetSystemVar(key) if ok { return sVal, true, nil } @@ -503,6 +503,7 @@ func ValidateSetSystemVar(vars *SessionVars, name string, value string) (string, TiDBOptDescScanFactor, TiDBOptSeekFactor, TiDBOptMemoryFactor, + TiDBOptDiskFactor, TiDBOptConcurrencyFactor: v, err := strconv.ParseFloat(value, 64) if err != nil { @@ -647,6 +648,8 @@ func ValidateSetSystemVar(vars *SessionVars, name string, value string) (string, formatVal += kv.TiKV.Name() case strings.EqualFold(engine, kv.TiFlash.Name()): formatVal += kv.TiFlash.Name() + case strings.EqualFold(engine, kv.TiDB.Name()): + formatVal += kv.TiDB.Name() default: return value, ErrWrongValueForVar.GenWithStackByArgs(name, value) } diff --git a/sessionctx/variable/varsutil_test.go b/sessionctx/variable/varsutil_test.go index f2451f62b39ad..66801cf3b9827 100644 --- a/sessionctx/variable/varsutil_test.go +++ b/sessionctx/variable/varsutil_test.go @@ -352,6 +352,14 @@ func (s *testVarsutilSuite) TestVarsutil(c *C) { c.Assert(val, Equals, "1.0") c.Assert(v.MemoryFactor, Equals, 1.0) + c.Assert(v.DiskFactor, Equals, 1.5) + err = SetSessionSystemVar(v, TiDBOptDiskFactor, types.NewStringDatum("1.1")) + c.Assert(err, IsNil) + val, err = GetSessionSystemVar(v, TiDBOptDiskFactor) + c.Assert(err, IsNil) + c.Assert(val, Equals, "1.1") + c.Assert(v.DiskFactor, Equals, 1.1) + c.Assert(v.ConcurrencyFactor, Equals, 3.0) err = SetSessionSystemVar(v, TiDBOptConcurrencyFactor, types.NewStringDatum("5.0")) c.Assert(err, IsNil) @@ -455,6 +463,8 @@ func (s *testVarsutilSuite) TestValidate(c *C) { {TiDBOptSeekFactor, "-2", true}, {TiDBOptMemoryFactor, "a", true}, {TiDBOptMemoryFactor, "-2", true}, + {TiDBOptDiskFactor, "a", true}, + {TiDBOptDiskFactor, "-2", true}, {TiDBOptConcurrencyFactor, "a", true}, {TiDBOptConcurrencyFactor, "-2", true}, {TxnIsolation, "READ-UNCOMMITTED", true}, diff --git a/statistics/feedback.go b/statistics/feedback.go index 3db3ae09e43e4..cfc371947e23c 100644 --- a/statistics/feedback.go +++ b/statistics/feedback.go @@ -302,7 +302,7 @@ func buildBucketFeedback(h *Histogram, feedback *QueryFeedback) (map[int]*Bucket } total := 0 sc := &stmtctx.StatementContext{TimeZone: time.UTC} - min, max := GetMinValue(h.Tp), GetMaxValue(h.Tp) + min, max := types.GetMinValue(h.Tp), types.GetMaxValue(h.Tp) for _, fb := range feedback.Feedback { skip, err := fb.adjustFeedbackBoundaries(sc, &min, &max) if err != nil { @@ -927,73 +927,3 @@ func SupportColumnType(ft *types.FieldType) bool { } return false } - -// GetMaxValue returns the max value datum for each type. -func GetMaxValue(ft *types.FieldType) (max types.Datum) { - switch ft.Tp { - case mysql.TypeTiny, mysql.TypeShort, mysql.TypeInt24, mysql.TypeLong, mysql.TypeLonglong: - if mysql.HasUnsignedFlag(ft.Flag) { - max.SetUint64(types.IntergerUnsignedUpperBound(ft.Tp)) - } else { - max.SetInt64(types.IntergerSignedUpperBound(ft.Tp)) - } - case mysql.TypeFloat: - max.SetFloat32(float32(types.GetMaxFloat(ft.Flen, ft.Decimal))) - case mysql.TypeDouble: - max.SetFloat64(types.GetMaxFloat(ft.Flen, ft.Decimal)) - case mysql.TypeString, mysql.TypeVarString, mysql.TypeVarchar, mysql.TypeBlob, mysql.TypeTinyBlob, mysql.TypeMediumBlob, mysql.TypeLongBlob: - val := types.MaxValueDatum() - bytes, err := codec.EncodeKey(nil, nil, val) - // should not happen - if err != nil { - logutil.BgLogger().Error("encode key fail", zap.Error(err)) - } - max.SetBytes(bytes) - case mysql.TypeNewDecimal: - max.SetMysqlDecimal(types.NewMaxOrMinDec(false, ft.Flen, ft.Decimal)) - case mysql.TypeDuration: - max.SetMysqlDuration(types.Duration{Duration: types.MaxTime}) - case mysql.TypeDate, mysql.TypeDatetime, mysql.TypeTimestamp: - if ft.Tp == mysql.TypeDate || ft.Tp == mysql.TypeDatetime { - max.SetMysqlTime(types.Time{Time: types.MaxDatetime, Type: ft.Tp}) - } else { - max.SetMysqlTime(types.MaxTimestamp) - } - } - return -} - -// GetMinValue returns the min value datum for each type. -func GetMinValue(ft *types.FieldType) (min types.Datum) { - switch ft.Tp { - case mysql.TypeTiny, mysql.TypeShort, mysql.TypeInt24, mysql.TypeLong, mysql.TypeLonglong: - if mysql.HasUnsignedFlag(ft.Flag) { - min.SetUint64(0) - } else { - min.SetInt64(types.IntergerSignedLowerBound(ft.Tp)) - } - case mysql.TypeFloat: - min.SetFloat32(float32(-types.GetMaxFloat(ft.Flen, ft.Decimal))) - case mysql.TypeDouble: - min.SetFloat64(-types.GetMaxFloat(ft.Flen, ft.Decimal)) - case mysql.TypeString, mysql.TypeVarString, mysql.TypeVarchar, mysql.TypeBlob, mysql.TypeTinyBlob, mysql.TypeMediumBlob, mysql.TypeLongBlob: - val := types.MinNotNullDatum() - bytes, err := codec.EncodeKey(nil, nil, val) - // should not happen - if err != nil { - logutil.BgLogger().Error("encode key fail", zap.Error(err)) - } - min.SetBytes(bytes) - case mysql.TypeNewDecimal: - min.SetMysqlDecimal(types.NewMaxOrMinDec(true, ft.Flen, ft.Decimal)) - case mysql.TypeDuration: - min.SetMysqlDuration(types.Duration{Duration: types.MinTime}) - case mysql.TypeDate, mysql.TypeDatetime, mysql.TypeTimestamp: - if ft.Tp == mysql.TypeDate || ft.Tp == mysql.TypeDatetime { - min.SetMysqlTime(types.Time{Time: types.MinDatetime, Type: ft.Tp}) - } else { - min.SetMysqlTime(types.MinTimestamp) - } - } - return -} diff --git a/statistics/handle/handle_test.go b/statistics/handle/handle_test.go index 2be0e6a445f3c..a72b38851c5a2 100644 --- a/statistics/handle/handle_test.go +++ b/statistics/handle/handle_test.go @@ -17,6 +17,7 @@ import ( "fmt" "testing" "time" + "unsafe" . "github.com/pingcap/check" "github.com/pingcap/errors" @@ -209,11 +210,15 @@ func (s *testStatsSuite) TestAvgColLen(c *C) { tableInfo := tbl.Meta() statsTbl := do.StatsHandle().GetTableStats(tableInfo) c.Assert(statsTbl.Columns[tableInfo.Columns[0].ID].AvgColSize(statsTbl.Count, false), Equals, 1.0) + c.Assert(statsTbl.Columns[tableInfo.Columns[0].ID].AvgColSizeListInDisk(statsTbl.Count), Equals, 8.0) // The size of varchar type is LEN + BYTE, here is 1 + 7 = 8 c.Assert(statsTbl.Columns[tableInfo.Columns[1].ID].AvgColSize(statsTbl.Count, false), Equals, 8.0) c.Assert(statsTbl.Columns[tableInfo.Columns[2].ID].AvgColSize(statsTbl.Count, false), Equals, 8.0) c.Assert(statsTbl.Columns[tableInfo.Columns[3].ID].AvgColSize(statsTbl.Count, false), Equals, 8.0) + c.Assert(statsTbl.Columns[tableInfo.Columns[1].ID].AvgColSizeListInDisk(statsTbl.Count), Equals, 8.0-1) + c.Assert(statsTbl.Columns[tableInfo.Columns[2].ID].AvgColSizeListInDisk(statsTbl.Count), Equals, float64(unsafe.Sizeof(float32(12.3)))) + c.Assert(statsTbl.Columns[tableInfo.Columns[3].ID].AvgColSizeListInDisk(statsTbl.Count), Equals, float64(unsafe.Sizeof(types.Time{}))) testKit.MustExec("insert into t values(132, '123456789112', 1232.3, '2018-03-07 19:17:29')") testKit.MustExec("analyze table t") statsTbl = do.StatsHandle().GetTableStats(tableInfo) @@ -221,6 +226,10 @@ func (s *testStatsSuite) TestAvgColLen(c *C) { c.Assert(statsTbl.Columns[tableInfo.Columns[1].ID].AvgColSize(statsTbl.Count, false), Equals, 10.5) c.Assert(statsTbl.Columns[tableInfo.Columns[2].ID].AvgColSize(statsTbl.Count, false), Equals, 8.0) c.Assert(statsTbl.Columns[tableInfo.Columns[3].ID].AvgColSize(statsTbl.Count, false), Equals, 8.0) + c.Assert(statsTbl.Columns[tableInfo.Columns[0].ID].AvgColSizeListInDisk(statsTbl.Count), Equals, 8.0) + c.Assert(statsTbl.Columns[tableInfo.Columns[1].ID].AvgColSizeListInDisk(statsTbl.Count), Equals, 10.5-1) + c.Assert(statsTbl.Columns[tableInfo.Columns[2].ID].AvgColSizeListInDisk(statsTbl.Count), Equals, float64(unsafe.Sizeof(float32(12.3)))) + c.Assert(statsTbl.Columns[tableInfo.Columns[3].ID].AvgColSizeListInDisk(statsTbl.Count), Equals, float64(unsafe.Sizeof(types.Time{}))) } func (s *testStatsSuite) TestDurationToTS(c *C) { diff --git a/statistics/handle/update.go b/statistics/handle/update.go index cba72bf2a4b12..73187a49dfe44 100644 --- a/statistics/handle/update.go +++ b/statistics/handle/update.go @@ -244,11 +244,13 @@ func needDumpStatsDelta(h *Handle, id int64, item variable.TableDelta, currentTi return false } +type dumpMode bool + const ( - // DumpAll indicates dump all the delta info in to kv - DumpAll = true + // DumpAll indicates dump all the delta info in to kv. + DumpAll dumpMode = true // DumpDelta indicates dump part of the delta info in to kv. - DumpDelta = false + DumpDelta dumpMode = false ) // sweepList will loop over the list, merge each session's local stats into handle @@ -278,12 +280,12 @@ func (h *Handle) sweepList() { } // DumpStatsDeltaToKV sweeps the whole list and updates the global map, then we dumps every table that held in map to KV. -// If the `dumpAll` is false, it will only dump that delta info that `Modify Count / Table Count` greater than a ratio. -func (h *Handle) DumpStatsDeltaToKV(dumpMode bool) error { +// If the mode is `DumpDelta`, it will only dump that delta info that `Modify Count / Table Count` greater than a ratio. +func (h *Handle) DumpStatsDeltaToKV(mode dumpMode) error { h.sweepList() currentTime := time.Now() for id, item := range h.globalMap { - if dumpMode == DumpDelta && !needDumpStatsDelta(h, id, item, currentTime) { + if mode == DumpDelta && !needDumpStatsDelta(h, id, item, currentTime) { continue } updated, err := h.dumpTableStatCountToKV(id, item) @@ -717,7 +719,6 @@ func (h *Handle) HandleAutoAnalyze(is infoschema.InfoSchema) { } } } - return } func (h *Handle) autoAnalyzeTable(tblInfo *model.TableInfo, statsTbl *statistics.Table, start, end time.Time, ratio float64, sql string) bool { @@ -947,10 +948,10 @@ func (h *Handle) dumpRangeFeedback(sc *stmtctx.StatementContext, ran *ranger.Ran return nil } if ran.LowVal[0].Kind() == types.KindMinNotNull { - ran.LowVal[0] = statistics.GetMinValue(q.Hist.Tp) + ran.LowVal[0] = types.GetMinValue(q.Hist.Tp) } if ran.HighVal[0].Kind() == types.KindMaxValue { - ran.HighVal[0] = statistics.GetMaxValue(q.Hist.Tp) + ran.HighVal[0] = types.GetMaxValue(q.Hist.Tp) } } ranges, ok := q.Hist.SplitRange(sc, []*ranger.Range{ran}, q.Tp == statistics.IndexType) diff --git a/statistics/histogram.go b/statistics/histogram.go index 83497b4ed112e..346678ff6136f 100644 --- a/statistics/histogram.go +++ b/statistics/histogram.go @@ -140,6 +140,26 @@ func (c *Column) AvgColSize(count int64, isKey bool) float64 { return math.Round(float64(c.TotColSize)/float64(count)*100) / 100 } +// AvgColSizeListInDisk is the average column size of the histogram. These sizes are derived +// from `chunk.ListInDisk` so we need to update them if those 2 functions are changed. +func (c *Column) AvgColSizeListInDisk(count int64) float64 { + if count == 0 { + return 0 + } + histCount := c.TotalRowCount() + notNullRatio := 1.0 + if histCount > 0 { + notNullRatio = 1.0 - float64(c.NullCount)/histCount + } + size := chunk.GetFixedLen(c.Histogram.Tp) + if size != -1 { + return float64(size) * notNullRatio + } + // Keep two decimal place. + // size of varchar type is LEN + BYTE, so we minus 1 here. + return math.Round(float64(c.TotColSize)/float64(count)*100)/100 - 1 +} + // AppendBucket appends a bucket into `hg`. func (hg *Histogram) AppendBucket(lower *types.Datum, upper *types.Datum, count, repeat int64) { hg.Buckets = append(hg.Buckets, Bucket{Count: count, Repeat: repeat}) diff --git a/statistics/selectivity.go b/statistics/selectivity.go index fef5c54907d25..d8168003812be 100644 --- a/statistics/selectivity.go +++ b/statistics/selectivity.go @@ -20,6 +20,7 @@ import ( "github.com/pingcap/parser/ast" "github.com/pingcap/parser/mysql" "github.com/pingcap/tidb/expression" + planutil "github.com/pingcap/tidb/planner/util" "github.com/pingcap/tidb/sessionctx" "github.com/pingcap/tidb/types" "github.com/pingcap/tidb/util/ranger" @@ -147,7 +148,7 @@ func isColEqCorCol(filter expression.Expression) *expression.Column { // The definition of selectivity is (row count after filter / row count before filter). // And exprs must be CNF now, in other words, `exprs[0] and exprs[1] and ... and exprs[len - 1]` should be held when you call this. // Currently the time complexity is o(n^2). -func (coll *HistColl) Selectivity(ctx sessionctx.Context, exprs []expression.Expression) (float64, []*StatsNode, error) { +func (coll *HistColl) Selectivity(ctx sessionctx.Context, exprs []expression.Expression, filledPaths []*planutil.AccessPath) (float64, []*StatsNode, error) { // If table's count is zero or conditions are empty, we should return 100% selectivity. if coll.Count == 0 || len(exprs) == 0 { return 1, nil, nil @@ -189,7 +190,7 @@ func (coll *HistColl) Selectivity(ctx sessionctx.Context, exprs []expression.Exp for id, colInfo := range coll.Columns { col := expression.ColInfo2Col(extractedCols, colInfo.Info) if col != nil { - maskCovered, ranges, _, err := getMaskAndRanges(ctx, remainedExprs, ranger.ColumnRangeType, nil, col) + maskCovered, ranges, _, err := getMaskAndRanges(ctx, remainedExprs, ranger.ColumnRangeType, nil, nil, col) if err != nil { return 0, nil, errors.Trace(err) } @@ -211,6 +212,13 @@ func (coll *HistColl) Selectivity(ctx sessionctx.Context, exprs []expression.Exp nodes[len(nodes)-1].Selectivity = cnt / float64(coll.Count) } } + id2Paths := make(map[int64]*planutil.AccessPath) + for _, path := range filledPaths { + if path.IsTablePath { + continue + } + id2Paths[path.Index.ID] = path + } for id, idxInfo := range coll.Indices { idxCols := expression.FindPrefixOfIndex(extractedCols, coll.Idx2ColumnIDs[id]) if len(idxCols) > 0 { @@ -218,7 +226,7 @@ func (coll *HistColl) Selectivity(ctx sessionctx.Context, exprs []expression.Exp for i := 0; i < len(idxCols); i++ { lengths = append(lengths, idxInfo.Info.Columns[i].Length) } - maskCovered, ranges, partCover, err := getMaskAndRanges(ctx, remainedExprs, ranger.IndexRangeType, lengths, idxCols...) + maskCovered, ranges, partCover, err := getMaskAndRanges(ctx, remainedExprs, ranger.IndexRangeType, lengths, id2Paths[idxInfo.ID], idxCols...) if err != nil { return 0, nil, errors.Trace(err) } @@ -259,8 +267,7 @@ func (coll *HistColl) Selectivity(ctx sessionctx.Context, exprs []expression.Exp return ret, nodes, nil } -func getMaskAndRanges(ctx sessionctx.Context, exprs []expression.Expression, rangeType ranger.RangeType, - lengths []int, cols ...*expression.Column) (mask int64, ranges []*ranger.Range, partCover bool, err error) { +func getMaskAndRanges(ctx sessionctx.Context, exprs []expression.Expression, rangeType ranger.RangeType, lengths []int, cachedPath *planutil.AccessPath, cols ...*expression.Column) (mask int64, ranges []*ranger.Range, partCover bool, err error) { sc := ctx.GetSessionVars().StmtCtx isDNF := false var accessConds, remainedConds []expression.Expression @@ -269,9 +276,16 @@ func getMaskAndRanges(ctx sessionctx.Context, exprs []expression.Expression, ran accessConds = ranger.ExtractAccessConditionsForColumn(exprs, cols[0].UniqueID) ranges, err = ranger.BuildColumnRange(accessConds, sc, cols[0].RetType, types.UnspecifiedLength) case ranger.IndexRangeType: + if cachedPath != nil { + ranges, accessConds, remainedConds, isDNF = cachedPath.Ranges, cachedPath.AccessConds, cachedPath.TableFilters, cachedPath.IsDNFCond + break + } var res *ranger.DetachRangeResult res, err = ranger.DetachCondAndBuildRangeForIndex(ctx, exprs, cols, lengths) ranges, accessConds, remainedConds, isDNF = res.Ranges, res.AccessConds, res.RemainedConds, res.IsDNFCond + if err != nil { + return 0, nil, false, err + } default: panic("should never be here") } diff --git a/statistics/selectivity_test.go b/statistics/selectivity_test.go index 109b4df605946..20a56f600ee42 100644 --- a/statistics/selectivity_test.go +++ b/statistics/selectivity_test.go @@ -301,12 +301,12 @@ func (s *testStatsSuite) TestSelectivity(c *C) { histColl := statsTbl.GenerateHistCollFromColumnInfo(ds.Columns, ds.Schema().Columns) - ratio, _, err := histColl.Selectivity(sctx, sel.Conditions) + ratio, _, err := histColl.Selectivity(sctx, sel.Conditions, nil) c.Assert(err, IsNil, comment) c.Assert(math.Abs(ratio-tt.selectivity) < eps, IsTrue, Commentf("for %s, needed: %v, got: %v", tt.exprs, tt.selectivity, ratio)) histColl.Count *= 10 - ratio, _, err = histColl.Selectivity(sctx, sel.Conditions) + ratio, _, err = histColl.Selectivity(sctx, sel.Conditions, nil) c.Assert(err, IsNil, comment) c.Assert(math.Abs(ratio-tt.selectivity) < eps, IsTrue, Commentf("for %s, needed: %v, got: %v", tt.exprs, tt.selectivity, ratio)) } @@ -531,7 +531,7 @@ func BenchmarkSelectivity(b *testing.B) { b.Run("Selectivity", func(b *testing.B) { b.ResetTimer() for i := 0; i < b.N; i++ { - _, _, err := statsTbl.Selectivity(sctx, p.(plannercore.LogicalPlan).Children()[0].(*plannercore.LogicalSelection).Conditions) + _, _, err := statsTbl.Selectivity(sctx, p.(plannercore.LogicalPlan).Children()[0].(*plannercore.LogicalSelection).Conditions, nil) c.Assert(err, IsNil) } b.ReportAllocs() diff --git a/statistics/table.go b/statistics/table.go index c3d79ad621fa9..d056986afeca6 100644 --- a/statistics/table.go +++ b/statistics/table.go @@ -28,6 +28,7 @@ import ( "github.com/pingcap/tidb/sessionctx/stmtctx" "github.com/pingcap/tidb/tablecodec" "github.com/pingcap/tidb/types" + "github.com/pingcap/tidb/util/chunk" "github.com/pingcap/tidb/util/codec" "github.com/pingcap/tidb/util/ranger" "go.uber.org/atomic" @@ -694,6 +695,28 @@ func (coll *HistColl) GetAvgRowSize(cols []*expression.Column, isEncodedKey bool return size + float64(len(cols)) } +// GetAvgRowSizeListInDisk computes average row size for given columns. +func (coll *HistColl) GetAvgRowSizeListInDisk(cols []*expression.Column, padChar bool) (size float64) { + if coll.Pseudo || len(coll.Columns) == 0 || coll.Count == 0 { + for _, col := range cols { + size += float64(chunk.EstimateTypeWidth(padChar, col.GetType())) + } + } else { + for _, col := range cols { + colHist, ok := coll.Columns[col.UniqueID] + // Normally this would not happen, it is for compatibility with old version stats which + // does not include TotColSize. + if !ok || (!colHist.IsHandle && colHist.TotColSize == 0 && (colHist.NullCount != coll.Count)) { + size += float64(chunk.EstimateTypeWidth(padChar, col.GetType())) + continue + } + size += colHist.AvgColSizeListInDisk(coll.Count) + } + } + // Add 8 byte for each column's size record. See `ListInDisk` for details. + return size + float64(8*len(cols)) +} + // GetTableAvgRowSize computes average row size for a table scan, exclude the index key-value pairs. func (coll *HistColl) GetTableAvgRowSize(cols []*expression.Column, storeType kv.StoreType, handleInCols bool) (size float64) { size = coll.GetAvgRowSize(cols, false) diff --git a/store/mockstore/mocktikv/analyze.go b/store/mockstore/mocktikv/analyze.go index 65a85da0f6bb3..64552a3eb65f0 100644 --- a/store/mockstore/mocktikv/analyze.go +++ b/store/mockstore/mocktikv/analyze.go @@ -64,10 +64,14 @@ func (h *rpcHandler) handleAnalyzeIndexReq(req *coprocessor.Request, analyzeReq if err != nil { return nil, errors.Trace(err) } + startTS := req.StartTs + if startTS == 0 { + startTS = analyzeReq.GetStartTsFallback() + } e := &indexScanExec{ colsLen: int(analyzeReq.IdxReq.NumColumns), kvRanges: ranges, - startTS: analyzeReq.StartTs, + startTS: startTS, isolationLevel: h.isolationLevel, mvccStore: h.mvccStore, IndexScan: &tipb.IndexScan{Desc: false}, @@ -131,12 +135,16 @@ func (h *rpcHandler) handleAnalyzeColumnsReq(req *coprocessor.Request, analyzeRe if err != nil { return nil, errors.Trace(err) } + startTS := req.StartTs + if startTS == 0 { + startTS = analyzeReq.GetStartTsFallback() + } e := &analyzeColumnsExec{ tblExec: &tableScanExec{ TableScan: &tipb.TableScan{Columns: columns}, kvRanges: ranges, colIDs: evalCtx.colIDs, - startTS: analyzeReq.GetStartTs(), + startTS: startTS, isolationLevel: h.isolationLevel, mvccStore: h.mvccStore, execDetail: new(execDetail), diff --git a/store/mockstore/mocktikv/cop_handler_dag.go b/store/mockstore/mocktikv/cop_handler_dag.go index 693d3a79bc7fc..c9a628e2b1bae 100644 --- a/store/mockstore/mocktikv/cop_handler_dag.go +++ b/store/mockstore/mocktikv/cop_handler_dag.go @@ -49,6 +49,7 @@ var dummySlice = make([]byte, 0) type dagContext struct { dagReq *tipb.DAGRequest keyRanges []*coprocessor.KeyRange + startTS uint64 evalCtx *evalContext } @@ -116,6 +117,7 @@ func (h *rpcHandler) buildDAGExecutor(req *coprocessor.Request) (*dagContext, ex ctx := &dagContext{ dagReq: dagReq, keyRanges: req.Ranges, + startTS: req.StartTs, evalCtx: &evalContext{sc: sc}, } e, err := h.buildDAG(ctx, dagReq.Executors) @@ -129,11 +131,7 @@ func (h *rpcHandler) buildDAGExecutor(req *coprocessor.Request) (*dagContext, ex // is set, the daylight saving problem must be considered. Otherwise the // timezone offset in seconds east of UTC is used to constructed the timezone. func constructTimeZone(name string, offset int) (*time.Location, error) { - if name != "" { - return timeutil.LoadLocation(name) - } - - return time.FixedZone("", offset), nil + return timeutil.ConstructTimeZone(name, offset) } func (h *rpcHandler) handleCopStream(ctx context.Context, req *coprocessor.Request) (tikvpb.Tikv_CoprocessorStreamClient, error) { @@ -197,11 +195,15 @@ func (h *rpcHandler) buildTableScan(ctx *dagContext, executor *tipb.Executor) (* return nil, errors.Trace(err) } + startTS := ctx.startTS + if startTS == 0 { + startTS = ctx.dagReq.GetStartTsFallback() + } e := &tableScanExec{ TableScan: executor.TblScan, kvRanges: ranges, colIDs: ctx.evalCtx.colIDs, - startTS: ctx.dagReq.GetStartTs(), + startTS: startTS, isolationLevel: h.isolationLevel, resolvedLocks: h.resolvedLocks, mvccStore: h.mvccStore, @@ -236,11 +238,15 @@ func (h *rpcHandler) buildIndexScan(ctx *dagContext, executor *tipb.Executor) (* return nil, errors.Trace(err) } + startTS := ctx.startTS + if startTS == 0 { + startTS = ctx.dagReq.GetStartTsFallback() + } e := &indexScanExec{ IndexScan: executor.IdxScan, kvRanges: ranges, colsLen: len(columns), - startTS: ctx.dagReq.GetStartTs(), + startTS: startTS, isolationLevel: h.isolationLevel, resolvedLocks: h.resolvedLocks, mvccStore: h.mvccStore, diff --git a/store/mockstore/mocktikv/rpc.go b/store/mockstore/mocktikv/rpc.go index e9eba4f11db75..00eb09dc8fa4a 100644 --- a/store/mockstore/mocktikv/rpc.go +++ b/store/mockstore/mocktikv/rpc.go @@ -722,6 +722,10 @@ func (c *RPCClient) checkArgs(ctx context.Context, addr string) (*rpcHandler, er return handler, nil } +// TiDBRPCServerCoprocessorHandler is the TiDB rpc server coprocessor handler. +// TODO: remove this global variable. +var TiDBRPCServerCoprocessorHandler func(context.Context, *coprocessor.Request) *coprocessor.Response + // SendRequest sends a request to mock cluster. func (c *RPCClient) SendRequest(ctx context.Context, addr string, req *tikvrpc.Request, timeout time.Duration) (*tikvrpc.Response, error) { if span := opentracing.SpanFromContext(ctx); span != nil && span.Tracer() != nil { @@ -736,12 +740,18 @@ func (c *RPCClient) SendRequest(ctx context.Context, addr string, req *tikvrpc.R } }) + reqCtx := &req.Context + resp := &tikvrpc.Response{} + // When the store type is TiDB, the request should handle over to TiDB rpc server to handle. + if req.Type == tikvrpc.CmdCop && req.StoreTp == kv.TiDB && TiDBRPCServerCoprocessorHandler != nil { + resp.Resp = TiDBRPCServerCoprocessorHandler(context.Background(), req.Cop()) + return resp, nil + } + handler, err := c.checkArgs(ctx, addr) if err != nil { return nil, err } - reqCtx := &req.Context - resp := &tikvrpc.Response{} switch req.Type { case tikvrpc.CmdGet: r := req.Get() diff --git a/store/tikv/2pc.go b/store/tikv/2pc.go index 73b676b6081a8..2e74fe816f8c7 100644 --- a/store/tikv/2pc.go +++ b/store/tikv/2pc.go @@ -70,7 +70,7 @@ var ( // Global variable set by config file. var ( - PessimisticLockTTL uint64 = 20000 // 20s + ManagedLockTTL uint64 = 20000 // 20s ) func (actionPrewrite) String() string { @@ -111,6 +111,7 @@ type twoPhaseCommitter struct { connID uint64 // connID is used for log. cleanWg sync.WaitGroup detail unsafe.Pointer + txnSize int primaryKey []byte forUpdateTS uint64 @@ -280,6 +281,7 @@ func (c *twoPhaseCommitter) initKeysAndMutations() error { if len(keys) == 0 { return nil } + c.txnSize = size if size > int(kv.TxnTotalSizeLimit) { return kv.ErrTxnTooLarge.GenWithStackByArgs(size) @@ -436,7 +438,14 @@ func (c *twoPhaseCommitter) doActionOnBatches(bo *Backoffer, action twoPhaseComm } return errors.Trace(e) } - rateLim := len(batches) // this will be used for LargeTxn, set rateLim here + rateLim := len(batches) + // Set rateLim here for the large transaction. + // If the rate limit is too high, tikv will report service is busy. + // If the rate limit is too low, we can't full utilize the tikv's throughput. + // TODO: Find a self-adaptive way to control the rate limit here. + if rateLim > 32 { + rateLim = 32 + } batchExecutor := newBatchExecutor(rateLim, c, action, bo) err := batchExecutor.process(batches) return errors.Trace(err) @@ -526,6 +535,13 @@ func (actionPrewrite) handleSingleBatch(c *twoPhaseCommitter, bo *Backoffer, bat prewriteResp := resp.Resp.(*pb.PrewriteResponse) keyErrs := prewriteResp.GetErrors() if len(keyErrs) == 0 { + if bytes.Equal(c.primary(), batch.keys[0]) { + // After writing the primary key, if the size of the transaction is large than 4M, + // start the ttlManager. The ttlManager will be closed in tikvTxn.Commit(). + if c.txnSize > 32*1024*1024 { + c.run(c, nil) + } + } return nil } var locks []*Lock @@ -597,8 +613,8 @@ func (tm *ttlManager) close() { } func (tm *ttlManager) keepAlive(c *twoPhaseCommitter) { - // Ticker is set to 1/2 of the PessimisticLockTTL. - ticker := time.NewTicker(time.Duration(PessimisticLockTTL) * time.Millisecond / 2) + // Ticker is set to 1/2 of the ManagedLockTTL. + ticker := time.NewTicker(time.Duration(ManagedLockTTL) * time.Millisecond / 2) defer ticker.Stop() for { select { @@ -631,7 +647,9 @@ func (tm *ttlManager) keepAlive(c *twoPhaseCommitter) { return } - newTTL := uptime + PessimisticLockTTL + newTTL := uptime + ManagedLockTTL + logutil.BgLogger().Info("send TxnHeartBeat", + zap.Uint64("startTS", c.startTS), zap.Uint64("newTTL", newTTL)) startTime := time.Now() _, err = sendTxnHeartBeat(bo, c.store, c.primary(), c.startTS, newTTL) if err != nil { @@ -659,15 +677,13 @@ func (action actionPessimisticLock) handleSingleBatch(c *twoPhaseCommitter, bo * } mutations[i] = mut } - - t0 := oracle.GetTimeFromTS(c.forUpdateTS) - elapsed := uint64(time.Since(t0) / time.Millisecond) + elapsed := uint64(time.Since(c.txn.startTime) / time.Millisecond) req := tikvrpc.NewRequest(tikvrpc.CmdPessimisticLock, &pb.PessimisticLockRequest{ Mutations: mutations, PrimaryLock: c.primary(), StartVersion: c.startTS, ForUpdateTs: c.forUpdateTS, - LockTtl: elapsed + PessimisticLockTTL, + LockTtl: elapsed + ManagedLockTTL, IsFirstLock: c.isFirstLock, WaitTimeout: action.lockWaitTime, }, pb.Context{Priority: c.priority, SyncLog: c.syncLog}) @@ -778,25 +794,23 @@ func (actionPessimisticRollback) handleSingleBatch(c *twoPhaseCommitter, bo *Bac ForUpdateTs: c.forUpdateTS, Keys: batch.keys, }) - for { - resp, err := c.store.SendReq(bo, req, batch.region, readTimeoutShort) - if err != nil { - return errors.Trace(err) - } - regionErr, err := resp.GetRegionError() + resp, err := c.store.SendReq(bo, req, batch.region, readTimeoutShort) + if err != nil { + return errors.Trace(err) + } + regionErr, err := resp.GetRegionError() + if err != nil { + return errors.Trace(err) + } + if regionErr != nil { + err = bo.Backoff(BoRegionMiss, errors.New(regionErr.String())) if err != nil { return errors.Trace(err) } - if regionErr != nil { - err = bo.Backoff(BoRegionMiss, errors.New(regionErr.String())) - if err != nil { - return errors.Trace(err) - } - err = c.pessimisticRollbackKeys(bo, batch.keys) - return errors.Trace(err) - } - return nil + err = c.pessimisticRollbackKeys(bo, batch.keys) + return errors.Trace(err) } + return nil } func getTxnPriority(txn *tikvTxn) pb.CommandPri { @@ -819,8 +833,9 @@ func kvPriorityToCommandPri(pri int) pb.CommandPri { return pb.CommandPri_Low case kv.PriorityHigh: return pb.CommandPri_High + default: + return pb.CommandPri_Normal } - return pb.CommandPri_Normal } func (c *twoPhaseCommitter) setDetail(d *execdetails.CommitDetails) { @@ -999,19 +1014,9 @@ func (c *twoPhaseCommitter) pessimisticRollbackKeys(bo *Backoffer, keys [][]byte return c.doActionOnKeys(bo, actionPessimisticRollback{}, keys) } -func (c *twoPhaseCommitter) executeAndWriteFinishBinlog(ctx context.Context) error { - err := c.execute(ctx) - if err != nil { - c.writeFinishBinlog(ctx, binlog.BinlogType_Rollback, 0) - } else { - c.txn.commitTS = c.commitTS - c.writeFinishBinlog(ctx, binlog.BinlogType_Commit, int64(c.commitTS)) - } - return errors.Trace(err) -} - // execute executes the two-phase commit protocol. -func (c *twoPhaseCommitter) execute(ctx context.Context) error { +func (c *twoPhaseCommitter) execute(ctx context.Context) (err error) { + var binlogSkipped bool defer func() { // Always clean up all written keys if the txn does not commit. c.mu.RLock() @@ -1035,12 +1040,22 @@ func (c *twoPhaseCommitter) execute(ctx context.Context) error { c.cleanWg.Done() }() } + c.txn.commitTS = c.commitTS + if binlogSkipped { + binloginfo.RemoveOneSkippedCommitter() + } else { + if err != nil { + c.writeFinishBinlog(ctx, binlog.BinlogType_Rollback, 0) + } else { + c.writeFinishBinlog(ctx, binlog.BinlogType_Commit, int64(c.commitTS)) + } + } }() binlogChan := c.prewriteBinlog(ctx) prewriteBo := NewBackoffer(ctx, PrewriteMaxBackoff).WithVars(c.txn.vars) start := time.Now() - err := c.prewriteKeys(prewriteBo, c.keys) + err = c.prewriteKeys(prewriteBo, c.keys) commitDetail := c.getDetail() commitDetail.PrewriteTime = time.Since(start) if prewriteBo.totalSleep > 0 { @@ -1050,9 +1065,13 @@ func (c *twoPhaseCommitter) execute(ctx context.Context) error { commitDetail.Mu.Unlock() } if binlogChan != nil { - binlogErr := <-binlogChan - if binlogErr != nil { - return errors.Trace(binlogErr) + binlogWriteResult := <-binlogChan + if binlogWriteResult != nil { + binlogSkipped = binlogWriteResult.Skipped() + binlogErr := binlogWriteResult.GetError() + if binlogErr != nil { + return binlogErr + } } } if err != nil { @@ -1139,11 +1158,11 @@ func (c *twoPhaseCommitter) checkSchemaValid() error { return nil } -func (c *twoPhaseCommitter) prewriteBinlog(ctx context.Context) chan error { +func (c *twoPhaseCommitter) prewriteBinlog(ctx context.Context) chan *binloginfo.WriteResult { if !c.shouldWriteBinlog() { return nil } - ch := make(chan error, 1) + ch := make(chan *binloginfo.WriteResult, 1) go func() { logutil.Eventf(ctx, "start prewrite binlog") binInfo := c.txn.us.GetOption(kv.BinlogInfo).(*binloginfo.BinlogInfo) @@ -1152,9 +1171,13 @@ func (c *twoPhaseCommitter) prewriteBinlog(ctx context.Context) chan error { if bin.Tp == binlog.BinlogType_Prewrite { bin.PrewriteKey = c.keys[0] } - err := binInfo.WriteBinlog(c.store.clusterID) + wr := binInfo.WriteBinlog(c.store.clusterID) + if wr.Skipped() { + binInfo.Data.PrewriteValue = nil + binloginfo.AddOneSkippedCommitter() + } logutil.Eventf(ctx, "finish prewrite binlog") - ch <- errors.Trace(err) + ch <- wr }() return ch } @@ -1169,7 +1192,8 @@ func (c *twoPhaseCommitter) writeFinishBinlog(ctx context.Context, tp binlog.Bin binInfo.Data.PrewriteValue = nil go func() { logutil.Eventf(ctx, "start write finish binlog") - err := binInfo.WriteBinlog(c.store.clusterID) + binlogWriteResult := binInfo.WriteBinlog(c.store.clusterID) + err := binlogWriteResult.GetError() if err != nil { logutil.BgLogger().Error("failed to write binlog", zap.Error(err)) diff --git a/store/tikv/2pc_test.go b/store/tikv/2pc_test.go index 29c46939267d0..f5cb8ad103831 100644 --- a/store/tikv/2pc_test.go +++ b/store/tikv/2pc_test.go @@ -39,7 +39,7 @@ type testCommitterSuite struct { var _ = Suite(&testCommitterSuite{}) func (s *testCommitterSuite) SetUpSuite(c *C) { - PessimisticLockTTL = 3000 // 3s + ManagedLockTTL = 3000 // 3s s.OneByOneSuite.SetUpSuite(c) } @@ -611,7 +611,7 @@ func (s *testCommitterSuite) TestPessimisticTTL(c *C) { expire := oracle.ExtractPhysical(txn.startTS) + int64(lockInfoNew.LockTtl) now := oracle.ExtractPhysical(currentTS) c.Assert(expire > now, IsTrue) - c.Assert(uint64(expire-now) <= PessimisticLockTTL, IsTrue) + c.Assert(uint64(expire-now) <= ManagedLockTTL, IsTrue) return } time.Sleep(100 * time.Millisecond) @@ -619,6 +619,21 @@ func (s *testCommitterSuite) TestPessimisticTTL(c *C) { c.Assert(false, IsTrue, Commentf("update pessimistic ttl fail")) } +// TestElapsedTTL tests that elapsed time is correct even if ts physical time is greater than local time. +func (s *testCommitterSuite) TestElapsedTTL(c *C) { + key := kv.Key("key") + txn := s.begin(c) + txn.startTS = oracle.ComposeTS(oracle.GetPhysical(time.Now().Add(time.Second*10)), 1) + txn.SetOption(kv.Pessimistic, true) + time.Sleep(time.Millisecond * 100) + forUpdateTS := oracle.ComposeTS(oracle.ExtractPhysical(txn.startTS)+100, 1) + err := txn.LockKeys(context.Background(), nil, forUpdateTS, kv.LockAlwaysWait, key) + c.Assert(err, IsNil) + lockInfo := s.getLockInfo(c, key) + c.Assert(lockInfo.LockTtl-ManagedLockTTL, GreaterEqual, uint64(100)) + c.Assert(lockInfo.LockTtl-ManagedLockTTL, Less, uint64(150)) +} + func (s *testCommitterSuite) getLockInfo(c *C, key []byte) *kvrpcpb.LockInfo { txn := s.begin(c) err := txn.Set(key, key) diff --git a/store/tikv/backoff.go b/store/tikv/backoff.go index 25d1ea8645e7b..18bd3414071c3 100644 --- a/store/tikv/backoff.go +++ b/store/tikv/backoff.go @@ -250,6 +250,9 @@ type Backoffer struct { types []fmt.Stringer vars *kv.Variables noop bool + + backoffSleepMS map[backoffType]int + backoffTimes map[backoffType]int } type txnStartCtxKeyType struct{} @@ -332,6 +335,14 @@ func (b *Backoffer) BackoffWithMaxSleep(typ backoffType, maxSleepMs int, err err realSleep := f(b.ctx, maxSleepMs) backoffDuration.Observe(float64(realSleep) / 1000) b.totalSleep += realSleep + if b.backoffSleepMS == nil { + b.backoffSleepMS = make(map[backoffType]int) + } + b.backoffSleepMS[typ] += realSleep + if b.backoffTimes == nil { + b.backoffTimes = make(map[backoffType]int) + } + b.backoffTimes[typ]++ var startTs interface{} if ts := b.ctx.Value(txnStartKey); ts != nil { diff --git a/store/tikv/client.go b/store/tikv/client.go index 64882a8b6d31e..5568e74d24336 100644 --- a/store/tikv/client.go +++ b/store/tikv/client.go @@ -31,10 +31,12 @@ import ( "github.com/pingcap/kvproto/pkg/tikvpb" "github.com/pingcap/parser/terror" "github.com/pingcap/tidb/config" + "github.com/pingcap/tidb/kv" "github.com/pingcap/tidb/metrics" "github.com/pingcap/tidb/store/tikv/tikvrpc" "github.com/pingcap/tidb/util/logutil" "google.golang.org/grpc" + "google.golang.org/grpc/backoff" "google.golang.org/grpc/connectivity" "google.golang.org/grpc/credentials" "google.golang.org/grpc/keepalive" @@ -78,21 +80,23 @@ type connArray struct { streamTimeout chan *tikvrpc.Lease // batchConn is not null when batch is enabled. *batchConn + done chan struct{} } -func newConnArray(maxSize uint, addr string, security config.Security, idleNotify *uint32, done <-chan struct{}) (*connArray, error) { +func newConnArray(maxSize uint, addr string, security config.Security, idleNotify *uint32) (*connArray, error) { a := &connArray{ index: 0, v: make([]*grpc.ClientConn, maxSize), streamTimeout: make(chan *tikvrpc.Lease, 1024), + done: make(chan struct{}), } - if err := a.Init(addr, security, idleNotify, done); err != nil { + if err := a.Init(addr, security, idleNotify); err != nil { return nil, err } return a, nil } -func (a *connArray) Init(addr string, security config.Security, idleNotify *uint32, done <-chan struct{}) error { +func (a *connArray) Init(addr string, security config.Security, idleNotify *uint32) error { a.target = addr opt := grpc.WithInsecure() @@ -132,7 +136,15 @@ func (a *connArray) Init(addr string, security config.Security, idleNotify *uint grpc.WithUnaryInterceptor(unaryInterceptor), grpc.WithStreamInterceptor(streamInterceptor), grpc.WithDefaultCallOptions(grpc.MaxCallRecvMsgSize(MaxRecvMsgSize)), - grpc.WithBackoffMaxDelay(time.Second*3), + grpc.WithConnectParams(grpc.ConnectParams{ + Backoff: backoff.Config{ + BaseDelay: 100 * time.Millisecond, // Default was 1s. + Multiplier: 1.6, // Default + Jitter: 0.2, // Default + MaxDelay: 3 * time.Second, // Default was 120s. + }, + MinConnectTimeout: dialTimeout, + }), grpc.WithKeepaliveParams(keepalive.ClientParameters{ Time: time.Duration(keepAlive) * time.Second, Timeout: time.Duration(keepAliveTimeout) * time.Second, @@ -160,7 +172,7 @@ func (a *connArray) Init(addr string, security config.Security, idleNotify *uint a.batchCommandsClients = append(a.batchCommandsClients, batchClient) } } - go tikvrpc.CheckStreamTimeoutLoop(a.streamTimeout, done) + go tikvrpc.CheckStreamTimeoutLoop(a.streamTimeout, a.done) if allowBatch { go a.batchSendLoop(cfg.TiKVClient) } @@ -185,6 +197,8 @@ func (a *connArray) Close() { a.v[i] = nil } } + + close(a.done) } // rpcClient is RPC client struct. @@ -193,7 +207,6 @@ func (a *connArray) Close() { // that there are too many concurrent requests which overload the service of TiKV. type rpcClient struct { sync.RWMutex - done chan struct{} conns map[string]*connArray security config.Security @@ -206,7 +219,6 @@ type rpcClient struct { func newRPCClient(security config.Security) *rpcClient { return &rpcClient{ - done: make(chan struct{}, 1), conns: make(map[string]*connArray), security: security, } @@ -242,7 +254,7 @@ func (c *rpcClient) createConnArray(addr string) (*connArray, error) { if !ok { var err error connCount := config.GetGlobalConfig().TiKVClient.GrpcConnectionCount - array, err = newConnArray(connCount, addr, c.security, &c.idleNotify, c.done) + array, err = newConnArray(connCount, addr, c.security, &c.idleNotify) if err != nil { return nil, err } @@ -287,7 +299,9 @@ func (c *rpcClient) SendRequest(ctx context.Context, addr string, req *tikvrpc.R return nil, errors.Trace(err) } - if config.GetGlobalConfig().TiKVClient.MaxBatchSize > 0 { + // TiDB RPC server not support batch RPC now. + // TODO: remove this store type check after TiDB RPC Server support stream. + if config.GetGlobalConfig().TiKVClient.MaxBatchSize > 0 && req.StoreTp != kv.TiDB { if batchReq := req.ToBatchCommandsRequest(); batchReq != nil { return sendBatchRequest(ctx, addr, connArray.batchConn, batchReq, timeout) } @@ -348,7 +362,6 @@ func (c *rpcClient) SendRequest(ctx context.Context, addr string, req *tikvrpc.R func (c *rpcClient) Close() error { // TODO: add a unit test for SendRequest After Closed - close(c.done) c.closeConns() return nil } diff --git a/store/tikv/client_batch.go b/store/tikv/client_batch.go index fe2dc76c42deb..73167e710be22 100644 --- a/store/tikv/client_batch.go +++ b/store/tikv/client_batch.go @@ -491,7 +491,6 @@ func (a *batchConn) getClientAndSend(entries []*batchCommandsEntry, requests []* } cli.send(req, entries) - return } func (c *batchCommandsClient) initBatchClient() error { diff --git a/store/tikv/coprocessor.go b/store/tikv/coprocessor.go index 2f60d280725c3..f4ba544903f61 100644 --- a/store/tikv/coprocessor.go +++ b/store/tikv/coprocessor.go @@ -19,6 +19,7 @@ import ( "fmt" "io" "sort" + "strconv" "strings" "sync" "sync/atomic" @@ -31,6 +32,7 @@ import ( "github.com/pingcap/kvproto/pkg/coprocessor" "github.com/pingcap/kvproto/pkg/kvrpcpb" "github.com/pingcap/tidb/distsql" + "github.com/pingcap/tidb/domain/infosync" "github.com/pingcap/tidb/kv" "github.com/pingcap/tidb/metrics" "github.com/pingcap/tidb/store/tikv/tikvrpc" @@ -217,7 +219,6 @@ const rangesPerTask = 25000 func buildCopTasks(bo *Backoffer, cache *RegionCache, ranges *copRanges, req *kv.Request) ([]*copTask, error) { start := time.Now() - rangesLen := ranges.len() cmdType := tikvrpc.CmdCop if req.Streaming { cmdType = tikvrpc.CmdCopStream @@ -230,6 +231,11 @@ func buildCopTasks(bo *Backoffer, cache *RegionCache, ranges *copRanges, req *kv tableStart, tableEnd = keyRange[0].StartKey, keyRange[0].EndKey } + if req.StoreType == kv.TiDB { + return buildTiDBMemCopTasks(ranges, req) + } + + rangesLen := ranges.len() var tasks []*copTask appendTask := func(regionWithRangeInfo *KeyLocation, ranges *copRanges) { if req.StoreType == kv.TiKV { @@ -291,6 +297,25 @@ func buildCopTasks(bo *Backoffer, cache *RegionCache, ranges *copRanges, req *kv return tasks, nil } +func buildTiDBMemCopTasks(ranges *copRanges, req *kv.Request) ([]*copTask, error) { + servers, err := infosync.GetAllServerInfo(context.Background()) + if err != nil { + return nil, err + } + tasks := make([]*copTask, 0, len(servers)) + for _, ser := range servers { + addr := ser.IP + ":" + strconv.FormatUint(uint64(ser.StatusPort), 10) + tasks = append(tasks, &copTask{ + ranges: ranges, + respChan: make(chan *copResponse, 2), + cmdType: tikvrpc.CmdCop, + storeType: req.StoreType, + storeAddr: addr, + }) + } + return tasks, nil +} + func splitRanges(bo *Backoffer, cache *RegionCache, ranges *copRanges, fn func(regionWithRangeInfo *KeyLocation, ranges *copRanges)) error { for ranges.len() > 0 { loc, err := cache.LocateKey(bo, ranges.at(0).StartKey) @@ -688,9 +713,10 @@ func (worker *copIteratorWorker) handleTaskOnce(bo *Backoffer, task *copTask, ch }) req := tikvrpc.NewReplicaReadRequest(task.cmdType, &coprocessor.Request{ - Tp: worker.req.Tp, - Data: worker.req.Data, - Ranges: task.ranges.toPBRanges(), + Tp: worker.req.Tp, + StartTs: worker.req.StartTs, + Data: worker.req.Data, + Ranges: task.ranges.toPBRanges(), }, worker.req.ReplicaRead, worker.replicaReadSeed, kvrpcpb.Context{ IsolationLevel: pbIsolationLevel(worker.req.IsolationLevel), Priority: kvPriorityToCommandPri(worker.req.Priority), @@ -698,8 +724,9 @@ func (worker *copIteratorWorker) handleTaskOnce(bo *Backoffer, task *copTask, ch HandleTime: true, ScanDetail: true, }) + req.StoreTp = task.storeType startTime := time.Now() - resp, rpcCtx, storeAddr, err := worker.SendReqCtx(bo, req, task.region, ReadTimeoutMedium, task.storeType) + resp, rpcCtx, storeAddr, err := worker.SendReqCtx(bo, req, task.region, ReadTimeoutMedium, task.storeType, task.storeAddr) if err != nil { return nil, errors.Trace(err) } @@ -774,8 +801,11 @@ func (ch *clientHelper) ResolveLocks(bo *Backoffer, callerStartTS uint64, locks } // SendReqCtx wraps the SendReqCtx function and use the resolved lock result in the kvrpcpb.Context. -func (ch *clientHelper) SendReqCtx(bo *Backoffer, req *tikvrpc.Request, regionID RegionVerID, timeout time.Duration, sType kv.StoreType) (*tikvrpc.Response, *RPCContext, string, error) { +func (ch *clientHelper) SendReqCtx(bo *Backoffer, req *tikvrpc.Request, regionID RegionVerID, timeout time.Duration, sType kv.StoreType, directStoreAddr string) (*tikvrpc.Response, *RPCContext, string, error) { sender := NewRegionRequestSender(ch.RegionCache, ch.Client) + if len(directStoreAddr) > 0 { + sender.storeAddr = directStoreAddr + } req.Context.ResolvedLocks = ch.minCommitTSPushed.Get() resp, ctx, err := sender.SendReqCtx(bo, req, regionID, timeout, sType) return resp, ctx, sender.storeAddr, err @@ -881,6 +911,11 @@ func (worker *copIteratorWorker) handleCopStreamResult(bo *Backoffer, rpcCtx *RP // successful response, otherwise it's nil. func (worker *copIteratorWorker) handleCopResponse(bo *Backoffer, rpcCtx *RPCContext, resp *copResponse, task *copTask, ch chan<- *copResponse, lastRange *coprocessor.KeyRange, costTime time.Duration) ([]*copTask, error) { if regionErr := resp.pbResp.GetRegionError(); regionErr != nil { + if rpcCtx != nil && task.storeType == kv.TiDB { + resp.err = errors.Errorf("error: %v", regionErr) + worker.sendToRespCh(resp, ch, true) + return nil, nil + } if err := bo.Backoff(BoRegionMiss, errors.New(regionErr.String())); err != nil { return nil, errors.Trace(err) } @@ -913,13 +948,19 @@ func (worker *copIteratorWorker) handleCopResponse(bo *Backoffer, rpcCtx *RPCCon // When the request is using streaming API, the `Range` is not nil. if resp.pbResp.Range != nil { resp.startKey = resp.pbResp.Range.Start - } else { + } else if task.ranges != nil && task.ranges.len() > 0 { resp.startKey = task.ranges.at(0).StartKey } if resp.detail == nil { resp.detail = new(execdetails.ExecDetails) } resp.detail.BackoffTime = time.Duration(bo.totalSleep) * time.Millisecond + resp.detail.BackoffSleep, resp.detail.BackoffTimes = make(map[string]time.Duration), make(map[string]int) + for backoff := range bo.backoffTimes { + backoffName := backoff.String() + resp.detail.BackoffTimes[backoffName] = bo.backoffTimes[backoff] + resp.detail.BackoffSleep[backoffName] = time.Duration(bo.backoffSleepMS[backoff]) * time.Millisecond + } if rpcCtx != nil { resp.detail.CalleeAddress = rpcCtx.Addr } diff --git a/store/tikv/gcworker/gc_worker.go b/store/tikv/gcworker/gc_worker.go index 3689a4e941782..f1b33aea4ad3e 100644 --- a/store/tikv/gcworker/gc_worker.go +++ b/store/tikv/gcworker/gc_worker.go @@ -179,7 +179,7 @@ func (w *GCWorker) start(ctx context.Context, wg *sync.WaitGroup) { w.lastFinish = time.Now() if err != nil { logutil.Logger(ctx).Error("[gc worker] runGCJob", zap.Error(err)) - break + return } case <-ctx.Done(): logutil.Logger(ctx).Info("[gc worker] quit", zap.String("uuid", w.uuid)) diff --git a/store/tikv/lock_resolver.go b/store/tikv/lock_resolver.go index c59b269906326..cd33dd42364fe 100644 --- a/store/tikv/lock_resolver.go +++ b/store/tikv/lock_resolver.go @@ -358,7 +358,6 @@ func (t *txnExpireTime) update(lockExpire int64) { if lockExpire < t.txnExpire { t.txnExpire = lockExpire } - return } func (t *txnExpireTime) value() int64 { diff --git a/store/tikv/oracle/oracles/pd.go b/store/tikv/oracle/oracles/pd.go index 3d07426ed8833..e0c921973be08 100644 --- a/store/tikv/oracle/oracles/pd.go +++ b/store/tikv/oracle/oracles/pd.go @@ -121,6 +121,7 @@ func (o *pdOracle) setLastTS(ts uint64) { func (o *pdOracle) updateTS(ctx context.Context, interval time.Duration) { ticker := time.NewTicker(interval) + defer ticker.Stop() for { select { case <-ticker.C: @@ -131,7 +132,6 @@ func (o *pdOracle) updateTS(ctx context.Context, interval time.Duration) { } o.setLastTS(ts) case <-o.quit: - ticker.Stop() return } } diff --git a/store/tikv/range_task.go b/store/tikv/range_task.go index 92c768c49faac..038e61585ec0d 100644 --- a/store/tikv/range_task.go +++ b/store/tikv/range_task.go @@ -267,12 +267,11 @@ type rangeTaskWorker struct { // run starts the worker. It collects all objects from `w.taskCh` and process them one by one. func (w *rangeTaskWorker) run(ctx context.Context, cancel context.CancelFunc) { defer w.wg.Done() - for r := range w.taskCh { select { case <-ctx.Done(): w.err = ctx.Err() - break + return default: } diff --git a/store/tikv/region_cache.go b/store/tikv/region_cache.go index 1f57855b28470..24f1ff1e32fd3 100644 --- a/store/tikv/region_cache.go +++ b/store/tikv/region_cache.go @@ -82,9 +82,7 @@ type RegionStore struct { // clone clones region store struct. func (r *RegionStore) clone() *RegionStore { storeFails := make([]uint32, len(r.stores)) - for i, e := range r.storeFails { - storeFails[i] = e - } + copy(storeFails, r.storeFails) return &RegionStore{ workTiFlashIdx: r.workTiFlashIdx, workTiKVIdx: r.workTiKVIdx, @@ -1150,7 +1148,6 @@ retry: if !r.compareAndSwapStore(oldRegionStore, newRegionStore) { goto retry } - return } // Contains checks whether the key is in the region, for the maximum region endKey is empty. @@ -1304,7 +1301,6 @@ retryMarkResolved: if !s.compareAndSwapState(oldState, newState) { goto retryMarkResolved } - return } func (s *Store) getResolveState() resolveState { diff --git a/store/tikv/region_request.go b/store/tikv/region_request.go index 55b15d088725c..6e101f766b6ca 100644 --- a/store/tikv/region_request.go +++ b/store/tikv/region_request.go @@ -20,8 +20,8 @@ import ( "time" "go.uber.org/zap" - "google.golang.org/grpc" "google.golang.org/grpc/codes" + "google.golang.org/grpc/status" "github.com/pingcap/errors" "github.com/pingcap/failpoint" @@ -120,6 +120,10 @@ func (s *RegionRequestSender) SendReqCtx( rpcCtx, err = s.regionCache.GetTiKVRPCContext(bo, regionID, replicaRead, seed) case kv.TiFlash: rpcCtx, err = s.regionCache.GetTiFlashRPCContext(bo, regionID) + case kv.TiDB: + rpcCtx = &RPCContext{ + Addr: s.storeAddr, + } default: err = errors.Errorf("unsupported storage type: %v", sType) } @@ -218,7 +222,7 @@ func (s *RegionRequestSender) onSendFail(bo *Backoffer, ctx *RPCContext, err err } else if atomic.LoadUint32(&ShuttingDown) > 0 { return errTiDBShuttingDown } - if grpc.Code(errors.Cause(err)) == codes.Canceled { + if status.Code(errors.Cause(err)) == codes.Canceled { select { case <-bo.ctx.Done(): return errors.Trace(err) @@ -230,7 +234,9 @@ func (s *RegionRequestSender) onSendFail(bo *Backoffer, ctx *RPCContext, err err } } - s.regionCache.OnSendFail(bo, ctx, s.needReloadRegion(ctx), err) + if ctx.Meta != nil { + s.regionCache.OnSendFail(bo, ctx, s.needReloadRegion(ctx), err) + } // Retry on send request failure when it's not canceled. // When a store is not available, the leader of related region should be elected quickly. @@ -337,7 +343,11 @@ func (s *RegionRequestSender) onRegionError(bo *Backoffer, ctx *RPCContext, seed logutil.BgLogger().Debug("tikv reports region failed", zap.Stringer("regionErr", regionErr), zap.Stringer("ctx", ctx)) - s.regionCache.InvalidateCachedRegion(ctx.Region) + // When the request is sent to TiDB, there is no region in the request, so the region id will be 0. + // So when region id is 0, there is no business with region cache. + if ctx.Region.id != 0 { + s.regionCache.InvalidateCachedRegion(ctx.Region) + } return false, nil } diff --git a/store/tikv/snapshot.go b/store/tikv/snapshot.go index 0f47476f24f9e..3c8f8acd46bd1 100644 --- a/store/tikv/snapshot.go +++ b/store/tikv/snapshot.go @@ -214,7 +214,7 @@ func (s *tikvSnapshot) batchGetSingleRegion(bo *Backoffer, batch batchKeys, coll NotFillCache: s.notFillCache, }) - resp, _, _, err := cli.SendReqCtx(bo, req, batch.region, ReadTimeoutMedium, kv.TiKV) + resp, _, _, err := cli.SendReqCtx(bo, req, batch.region, ReadTimeoutMedium, kv.TiKV, "") if err != nil { return errors.Trace(err) @@ -323,7 +323,7 @@ func (s *tikvSnapshot) get(bo *Backoffer, k kv.Key) ([]byte, error) { if err != nil { return nil, errors.Trace(err) } - resp, _, _, err := cli.SendReqCtx(bo, req, loc.Region, readTimeoutShort, kv.TiKV) + resp, _, _, err := cli.SendReqCtx(bo, req, loc.Region, readTimeoutShort, kv.TiKV, "") if err != nil { return nil, errors.Trace(err) } diff --git a/store/tikv/split_region.go b/store/tikv/split_region.go index 46198bda422f0..65594c0e6950b 100644 --- a/store/tikv/split_region.go +++ b/store/tikv/split_region.go @@ -32,10 +32,7 @@ import ( ) func equalRegionStartKey(key, regionStartKey []byte) bool { - if bytes.Equal(key, regionStartKey) { - return true - } - return false + return bytes.Equal(key, regionStartKey) } func (s *tikvStore) splitBatchRegionsReq(bo *Backoffer, keys [][]byte, scatter bool) (*tikvrpc.Response, error) { @@ -235,7 +232,7 @@ func (s *tikvStore) scatterRegion(regionID uint64) error { return nil } -// WaitScatterRegionFinish implements SplitableStore interface. +// WaitScatterRegionFinish implements SplittableStore interface. // backOff is the back off time of the wait scatter region.(Milliseconds) // if backOff <= 0, the default wait scatter back off time will be used. func (s *tikvStore) WaitScatterRegionFinish(regionID uint64, backOff int) error { diff --git a/store/tikv/tikvrpc/tikvrpc.go b/store/tikv/tikvrpc/tikvrpc.go index 986566e539230..6de639fce48d8 100644 --- a/store/tikv/tikvrpc/tikvrpc.go +++ b/store/tikv/tikvrpc/tikvrpc.go @@ -145,6 +145,7 @@ type Request struct { req interface{} kvrpcpb.Context ReplicaReadSeed uint32 + StoreTp kv.StoreType } // NewRequest returns new kv rpc request. @@ -461,8 +462,10 @@ type CopStreamResponse struct { // SetContext set the Context field for the given req to the specified ctx. func SetContext(req *Request, region *metapb.Region, peer *metapb.Peer) error { ctx := &req.Context - ctx.RegionId = region.Id - ctx.RegionEpoch = region.RegionEpoch + if region != nil { + ctx.RegionId = region.Id + ctx.RegionEpoch = region.RegionEpoch + } ctx.Peer = peer switch req.Type { diff --git a/store/tikv/txn.go b/store/tikv/txn.go index 5f8ce931f2057..eb878fd78fe54 100644 --- a/store/tikv/txn.go +++ b/store/tikv/txn.go @@ -310,7 +310,7 @@ func (txn *tikvTxn) Commit(ctx context.Context) error { // latches disabled // pessimistic transaction should also bypass latch. if txn.store.txnLatches == nil || txn.IsPessimistic() { - err = committer.executeAndWriteFinishBinlog(ctx) + err = committer.execute(ctx) logutil.Logger(ctx).Debug("[kv] txnLatches disabled, 2pc directly", zap.Error(err)) return errors.Trace(err) } @@ -328,7 +328,7 @@ func (txn *tikvTxn) Commit(ctx context.Context) error { if lock.IsStale() { return kv.ErrWriteConflictInTiDB.FastGenByArgs(txn.startTS) } - err = committer.executeAndWriteFinishBinlog(ctx) + err = committer.execute(ctx) if err == nil { lock.SetCommitTS(committer.commitTS) } diff --git a/structure/structure.go b/structure/structure.go index 3aba2214550a9..b736ec9daf981 100644 --- a/structure/structure.go +++ b/structure/structure.go @@ -20,11 +20,9 @@ import ( // structure error codes. const ( - codeInvalidHashKeyFlag terror.ErrCode = 1 - codeInvalidHashKeyPrefix terror.ErrCode = 2 - codeInvalidListIndex terror.ErrCode = 3 - codeInvalidListMetaData terror.ErrCode = 4 - codeWriteOnSnapshot terror.ErrCode = 5 + codeInvalidHashKeyFlag terror.ErrCode = 1 + codeInvalidListMetaData terror.ErrCode = 4 + codeWriteOnSnapshot terror.ErrCode = 5 ) var ( diff --git a/table/column.go b/table/column.go index c20b5a41ec025..aee631fee7b91 100644 --- a/table/column.go +++ b/table/column.go @@ -154,7 +154,7 @@ func CastValues(ctx sessionctx.Context, rec []types.Datum, cols []*Column) (err func handleWrongUtf8Value(ctx sessionctx.Context, col *model.ColumnInfo, casted *types.Datum, str string, i int) (types.Datum, error) { sc := ctx.GetSessionVars().StmtCtx - err := ErrTruncateWrongValue.FastGen("incorrect utf8 value %x(%s) for column %s", casted.GetBytes(), str, col.Name) + err := ErrTruncatedWrongValueForField.FastGen("incorrect utf8 value %x(%s) for column %s", casted.GetBytes(), str, col.Name) logutil.BgLogger().Error("incorrect UTF-8 value", zap.Uint64("conn", ctx.GetSessionVars().ConnectionID), zap.Error(err)) // Truncate to valid utf8 string. truncateVal := types.NewStringDatum(str[:i]) @@ -232,18 +232,6 @@ type ColDesc struct { const defaultPrivileges = "select,insert,update,references" -// GetTypeDesc gets the description for column type. -func (c *Column) GetTypeDesc() string { - desc := c.FieldType.CompactStr() - if mysql.HasUnsignedFlag(c.Flag) && c.Tp != mysql.TypeBit && c.Tp != mysql.TypeYear { - desc += " unsigned" - } - if mysql.HasZerofillFlag(c.Flag) && c.Tp != mysql.TypeYear { - desc += " zerofill" - } - return desc -} - // NewColDesc returns a new ColDesc for a column. func NewColDesc(col *Column) *ColDesc { // TODO: if we have no primary key and a unique index which's columns are all not null @@ -324,7 +312,7 @@ func CheckOnce(cols []*Column) error { name := col.Name _, ok := m[name.L] if ok { - return errDuplicateColumn.GenWithStack("column specified twice - %s", name) + return errDuplicateColumn.GenWithStackByArgs(name) } m[name.L] = struct{}{} @@ -416,8 +404,7 @@ func getColDefaultValue(ctx sessionctx.Context, col *model.ColumnInfo, defaultVa } value, err := expression.GetTimeValue(ctx, defaultVal, col.Tp, int8(col.Decimal)) if err != nil { - return types.Datum{}, errGetDefaultFailed.GenWithStack("Field '%s' get default value fail - %s", - col.Name, err) + return types.Datum{}, errGetDefaultFailed.GenWithStackByArgs(col.Name) } // If the column's default value is not ZeroDatetimeStr or CurrentTimestamp, convert the default value to the current session time zone. if needChangeTimeZone { diff --git a/table/table.go b/table/table.go index a467cb641dbf2..4c6d93568be53 100644 --- a/table/table.go +++ b/table/table.go @@ -38,8 +38,25 @@ const ( NormalTable Type = iota // VirtualTable , store no data, just extract data from the memory struct. VirtualTable + // ClusterTable , contain the `VirtualTable` in the all cluster tidb nodes. + ClusterTable ) +// IsNormalTable checks whether the table is a normal table type. +func (tp Type) IsNormalTable() bool { + return tp == NormalTable +} + +// IsVirtualTable checks whether the table is a virtual table type. +func (tp Type) IsVirtualTable() bool { + return tp == VirtualTable +} + +// IsClusterTable checks whether the table is a cluster table type. +func (tp Type) IsClusterTable() bool { + return tp == ClusterTable +} + const ( // DirtyTableAddRow is the constant for dirty table operation type. DirtyTableAddRow = iota @@ -49,41 +66,39 @@ const ( var ( // ErrColumnCantNull is used for inserting null to a not null column. - ErrColumnCantNull = terror.ClassTable.New(codeColumnCantNull, mysql.MySQLErrName[mysql.ErrBadNull]) - errUnknownColumn = terror.ClassTable.New(codeUnknownColumn, "unknown column") - errDuplicateColumn = terror.ClassTable.New(codeDuplicateColumn, "duplicate column") + ErrColumnCantNull = terror.ClassTable.New(mysql.ErrBadNull, mysql.MySQLErrName[mysql.ErrBadNull]) + errUnknownColumn = terror.ClassTable.New(mysql.ErrBadField, mysql.MySQLErrName[mysql.ErrBadField]) + errDuplicateColumn = terror.ClassTable.New(mysql.ErrFieldSpecifiedTwice, mysql.MySQLErrName[mysql.ErrFieldSpecifiedTwice]) - errGetDefaultFailed = terror.ClassTable.New(codeGetDefaultFailed, "get default value fail") + errGetDefaultFailed = terror.ClassTable.New(mysql.ErrFieldGetDefaultFailed, mysql.MySQLErrName[mysql.ErrFieldGetDefaultFailed]) // ErrNoDefaultValue is used when insert a row, the column value is not given, and the column has not null flag // and it doesn't have a default value. - ErrNoDefaultValue = terror.ClassTable.New(codeNoDefaultValue, mysql.MySQLErrName[mysql.ErrNoDefaultForField]) + ErrNoDefaultValue = terror.ClassTable.New(mysql.ErrNoDefaultForField, mysql.MySQLErrName[mysql.ErrNoDefaultForField]) // ErrIndexOutBound returns for index column offset out of bound. - ErrIndexOutBound = terror.ClassTable.New(codeIndexOutBound, "index column offset out of bound") + ErrIndexOutBound = terror.ClassTable.New(mysql.ErrIndexOutBound, mysql.MySQLErrName[mysql.ErrIndexOutBound]) // ErrUnsupportedOp returns for unsupported operation. - ErrUnsupportedOp = terror.ClassTable.New(codeUnsupportedOp, "operation not supported") + ErrUnsupportedOp = terror.ClassTable.New(mysql.ErrUnsupportedOp, mysql.MySQLErrName[mysql.ErrUnsupportedOp]) // ErrRowNotFound returns for row not found. - ErrRowNotFound = terror.ClassTable.New(codeRowNotFound, "can not find the row") + ErrRowNotFound = terror.ClassTable.New(mysql.ErrRowNotFound, mysql.MySQLErrName[mysql.ErrRowNotFound]) // ErrTableStateCantNone returns for table none state. - ErrTableStateCantNone = terror.ClassTable.New(codeTableStateCantNone, "table can not be in none state") + ErrTableStateCantNone = terror.ClassTable.New(mysql.ErrTableStateCantNone, mysql.MySQLErrName[mysql.ErrTableStateCantNone]) // ErrColumnStateCantNone returns for column none state. - ErrColumnStateCantNone = terror.ClassTable.New(codeColumnStateCantNone, "column can not be in none state") + ErrColumnStateCantNone = terror.ClassTable.New(mysql.ErrColumnStateCantNone, mysql.MySQLErrName[mysql.ErrColumnStateCantNone]) // ErrColumnStateNonPublic returns for column non-public state. - ErrColumnStateNonPublic = terror.ClassTable.New(codeColumnStateNonPublic, "can not use non-public column") + ErrColumnStateNonPublic = terror.ClassTable.New(mysql.ErrColumnStateNonPublic, mysql.MySQLErrName[mysql.ErrColumnStateNonPublic]) // ErrIndexStateCantNone returns for index none state. - ErrIndexStateCantNone = terror.ClassTable.New(codeIndexStateCantNone, "index can not be in none state") + ErrIndexStateCantNone = terror.ClassTable.New(mysql.ErrIndexStateCantNone, mysql.MySQLErrName[mysql.ErrIndexStateCantNone]) // ErrInvalidRecordKey returns for invalid record key. - ErrInvalidRecordKey = terror.ClassTable.New(codeInvalidRecordKey, "invalid record key") - // ErrTruncateWrongValue returns for truncate wrong value for field. - ErrTruncateWrongValue = terror.ClassTable.New(codeTruncateWrongValue, "incorrect value") + ErrInvalidRecordKey = terror.ClassTable.New(mysql.ErrInvalidRecordKey, mysql.MySQLErrName[mysql.ErrInvalidRecordKey]) // ErrTruncatedWrongValueForField returns for truncate wrong value for field. - ErrTruncatedWrongValueForField = terror.ClassTable.New(codeTruncateWrongValue, mysql.MySQLErrName[mysql.ErrTruncatedWrongValueForField]) + ErrTruncatedWrongValueForField = terror.ClassTable.New(mysql.ErrTruncatedWrongValueForField, mysql.MySQLErrName[mysql.ErrTruncatedWrongValueForField]) // ErrUnknownPartition returns unknown partition error. - ErrUnknownPartition = terror.ClassTable.New(codeUnknownPartition, mysql.MySQLErrName[mysql.ErrUnknownPartition]) + ErrUnknownPartition = terror.ClassTable.New(mysql.ErrUnknownPartition, mysql.MySQLErrName[mysql.ErrUnknownPartition]) // ErrNoPartitionForGivenValue returns table has no partition for value. - ErrNoPartitionForGivenValue = terror.ClassTable.New(codeNoPartitionForGivenValue, mysql.MySQLErrName[mysql.ErrNoPartitionForGivenValue]) + ErrNoPartitionForGivenValue = terror.ClassTable.New(mysql.ErrNoPartitionForGivenValue, mysql.MySQLErrName[mysql.ErrNoPartitionForGivenValue]) // ErrLockOrActiveTransaction returns when execute unsupported statement in a lock session or an active transaction. - ErrLockOrActiveTransaction = terror.ClassTable.New(codeLockOrActiveTransaction, mysql.MySQLErrName[mysql.ErrLockOrActiveTransaction]) + ErrLockOrActiveTransaction = terror.ClassTable.New(mysql.ErrLockOrActiveTransaction, mysql.MySQLErrName[mysql.ErrLockOrActiveTransaction]) ) // RecordIterFunc is used for low-level record iteration. @@ -166,7 +181,7 @@ type Table interface { // AllocHandle allocates a handle for a new row. AllocHandle(ctx sessionctx.Context) (int64, error) - // AllocHandleIds allocates multiple handle for rows. + // AllocHandleIDs allocates multiple handle for rows. AllocHandleIDs(ctx sessionctx.Context, n uint64) (int64, int64, error) // Allocator returns Allocator. @@ -232,31 +247,6 @@ var TableFromMeta func(alloc autoid.Allocator, tblInfo *model.TableInfo) (Table, // MockTableFromMeta only serves for test. var MockTableFromMeta func(tableInfo *model.TableInfo) Table -// Table error codes. -const ( - codeGetDefaultFailed = 1 - codeIndexOutBound = 2 - codeUnsupportedOp = 3 - codeRowNotFound = 4 - codeTableStateCantNone = 5 - codeColumnStateCantNone = 6 - codeColumnStateNonPublic = 7 - codeIndexStateCantNone = 8 - codeInvalidRecordKey = 9 - - codeColumnCantNull = mysql.ErrBadNull - codeUnknownColumn = 1054 - codeDuplicateColumn = 1110 - codeNoDefaultValue = 1364 - codeTruncateWrongValue = 1366 - // MySQL error code, "Trigger creation context of table `%-.64s`.`%-.64s` is invalid". - // It may happen when inserting some data outside of all table partitions. - - codeUnknownPartition = mysql.ErrUnknownPartition - codeNoPartitionForGivenValue = mysql.ErrNoPartitionForGivenValue - codeLockOrActiveTransaction = mysql.ErrLockOrActiveTransaction -) - // Slice is used for table sorting. type Slice []Table @@ -270,14 +260,23 @@ func (s Slice) Swap(i, j int) { s[i], s[j] = s[j], s[i] } func init() { tableMySQLErrCodes := map[terror.ErrCode]uint16{ - codeColumnCantNull: mysql.ErrBadNull, - codeUnknownColumn: mysql.ErrBadField, - codeDuplicateColumn: mysql.ErrFieldSpecifiedTwice, - codeNoDefaultValue: mysql.ErrNoDefaultForField, - codeTruncateWrongValue: mysql.ErrTruncatedWrongValueForField, - codeUnknownPartition: mysql.ErrUnknownPartition, - codeNoPartitionForGivenValue: mysql.ErrNoPartitionForGivenValue, - codeLockOrActiveTransaction: mysql.ErrLockOrActiveTransaction, + mysql.ErrBadNull: mysql.ErrBadNull, + mysql.ErrBadField: mysql.ErrBadField, + mysql.ErrFieldSpecifiedTwice: mysql.ErrFieldSpecifiedTwice, + mysql.ErrNoDefaultForField: mysql.ErrNoDefaultForField, + mysql.ErrTruncatedWrongValueForField: mysql.ErrTruncatedWrongValueForField, + mysql.ErrUnknownPartition: mysql.ErrUnknownPartition, + mysql.ErrNoPartitionForGivenValue: mysql.ErrNoPartitionForGivenValue, + mysql.ErrLockOrActiveTransaction: mysql.ErrLockOrActiveTransaction, + mysql.ErrIndexOutBound: mysql.ErrIndexOutBound, + mysql.ErrColumnStateNonPublic: mysql.ErrColumnStateNonPublic, + mysql.ErrFieldGetDefaultFailed: mysql.ErrFieldGetDefaultFailed, + mysql.ErrUnsupportedOp: mysql.ErrUnsupportedOp, + mysql.ErrRowNotFound: mysql.ErrRowNotFound, + mysql.ErrTableStateCantNone: mysql.ErrTableStateCantNone, + mysql.ErrColumnStateCantNone: mysql.ErrColumnStateCantNone, + mysql.ErrIndexStateCantNone: mysql.ErrIndexStateCantNone, + mysql.ErrInvalidRecordKey: mysql.ErrInvalidRecordKey, } terror.ErrClassToMySQLCodes[terror.ClassTable] = tableMySQLErrCodes } diff --git a/table/table_test.go b/table/table_test.go index e6e2da0e5f7e8..79d40680c2948 100644 --- a/table/table_test.go +++ b/table/table_test.go @@ -15,6 +15,7 @@ package table import ( . "github.com/pingcap/check" + "github.com/pingcap/parser/mysql" ) var _ = Suite(&testTableSuite{}) @@ -27,3 +28,23 @@ func (t *testTableSuite) TestSlice(c *C) { c.Assert(length, Equals, 2) sl.Swap(0, 1) } + +func (t *testTableSuite) TestErrorCode(c *C) { + c.Assert(int(ErrColumnCantNull.ToSQLError().Code), Equals, mysql.ErrBadNull) + c.Assert(int(errUnknownColumn.ToSQLError().Code), Equals, mysql.ErrBadField) + c.Assert(int(errDuplicateColumn.ToSQLError().Code), Equals, mysql.ErrFieldSpecifiedTwice) + c.Assert(int(errGetDefaultFailed.ToSQLError().Code), Equals, mysql.ErrFieldGetDefaultFailed) + c.Assert(int(ErrNoDefaultValue.ToSQLError().Code), Equals, mysql.ErrNoDefaultForField) + c.Assert(int(ErrIndexOutBound.ToSQLError().Code), Equals, mysql.ErrIndexOutBound) + c.Assert(int(ErrUnsupportedOp.ToSQLError().Code), Equals, mysql.ErrUnsupportedOp) + c.Assert(int(ErrRowNotFound.ToSQLError().Code), Equals, mysql.ErrRowNotFound) + c.Assert(int(ErrTableStateCantNone.ToSQLError().Code), Equals, mysql.ErrTableStateCantNone) + c.Assert(int(ErrColumnStateCantNone.ToSQLError().Code), Equals, mysql.ErrColumnStateCantNone) + c.Assert(int(ErrColumnStateNonPublic.ToSQLError().Code), Equals, mysql.ErrColumnStateNonPublic) + c.Assert(int(ErrIndexStateCantNone.ToSQLError().Code), Equals, mysql.ErrIndexStateCantNone) + c.Assert(int(ErrInvalidRecordKey.ToSQLError().Code), Equals, mysql.ErrInvalidRecordKey) + c.Assert(int(ErrTruncatedWrongValueForField.ToSQLError().Code), Equals, mysql.ErrTruncatedWrongValueForField) + c.Assert(int(ErrUnknownPartition.ToSQLError().Code), Equals, mysql.ErrUnknownPartition) + c.Assert(int(ErrNoPartitionForGivenValue.ToSQLError().Code), Equals, mysql.ErrNoPartitionForGivenValue) + c.Assert(int(ErrLockOrActiveTransaction.ToSQLError().Code), Equals, mysql.ErrLockOrActiveTransaction) +} diff --git a/table/tables/index.go b/table/tables/index.go index 3290d6b8b4b16..b77422b2ec505 100644 --- a/table/tables/index.go +++ b/table/tables/index.go @@ -373,8 +373,7 @@ func (c *index) FetchValues(r []types.Datum, vals []types.Datum) ([]types.Datum, vals = vals[:needLength] for i, ic := range c.idxInfo.Columns { if ic.Offset < 0 || ic.Offset >= len(r) { - return nil, table.ErrIndexOutBound.GenWithStack("Index column %s offset out of bound, offset: %d, row: %v", - ic.Name, ic.Offset, r) + return nil, table.ErrIndexOutBound.GenWithStackByArgs(ic.Name, ic.Offset, r) } vals[i] = r[ic.Offset] } diff --git a/table/tables/partition.go b/table/tables/partition.go index 878d81a97c58a..f3aa4629ae858 100644 --- a/table/tables/partition.go +++ b/table/tables/partition.go @@ -47,7 +47,7 @@ var _ table.PartitionedTable = &partitionedTable{} // partitions) is basically the same. // partition also implements the table.Table interface. type partition struct { - tableCommon + TableCommon } // GetPhysicalID implements table.Table GetPhysicalID interface. @@ -58,27 +58,27 @@ func (p *partition) GetPhysicalID() int64 { // partitionedTable implements the table.PartitionedTable interface. // partitionedTable is a table, it contains many Partitions. type partitionedTable struct { - Table + TableCommon partitionExpr *PartitionExpr partitions map[int64]*partition } -func newPartitionedTable(tbl *Table, tblInfo *model.TableInfo) (table.Table, error) { - ret := &partitionedTable{Table: *tbl} +func newPartitionedTable(tbl *TableCommon, tblInfo *model.TableInfo) (table.Table, error) { + ret := &partitionedTable{TableCommon: *tbl} partitionExpr, err := newPartitionExpr(tblInfo) if err != nil { return nil, errors.Trace(err) } ret.partitionExpr = partitionExpr - if err := initTableIndices(&ret.tableCommon); err != nil { + if err := initTableIndices(&ret.TableCommon); err != nil { return nil, errors.Trace(err) } partitions := make(map[int64]*partition) pi := tblInfo.GetPartitionInfo() for _, p := range pi.Definitions { var t partition - err := initTableCommonWithIndices(&t.tableCommon, tblInfo, p.ID, tbl.Columns, tbl.alloc) + err := initTableCommonWithIndices(&t.TableCommon, tblInfo, p.ID, tbl.Columns, tbl.alloc) if err != nil { return nil, errors.Trace(err) } diff --git a/table/tables/tables.go b/table/tables/tables.go index c569986078180..dda9f95cf9d07 100644 --- a/table/tables/tables.go +++ b/table/tables/tables.go @@ -43,8 +43,8 @@ import ( "go.uber.org/zap" ) -// tableCommon is shared by both Table and partition. -type tableCommon struct { +// TableCommon is shared by both Table and partition. +type TableCommon struct { tableID int64 // physicalTableID is a unique int64 to identify a physical table. physicalTableID int64 @@ -61,13 +61,6 @@ type tableCommon struct { indexPrefix kv.Key } -// Table implements table.Table interface. -type Table struct { - tableCommon -} - -var _ table.Table = &Table{} - // MockTableFromMeta only serves for test. func MockTableFromMeta(tblInfo *model.TableInfo) table.Table { columns := make([]*table.Column, 0, len(tblInfo.Columns)) @@ -76,10 +69,10 @@ func MockTableFromMeta(tblInfo *model.TableInfo) table.Table { columns = append(columns, col) } - var t Table - initTableCommon(&t.tableCommon, tblInfo, tblInfo.ID, columns, nil) + var t TableCommon + initTableCommon(&t, tblInfo, tblInfo.ID, columns, nil) if tblInfo.GetPartitionInfo() == nil { - if err := initTableIndices(&t.tableCommon); err != nil { + if err := initTableIndices(&t); err != nil { return nil } return &t @@ -95,14 +88,14 @@ func MockTableFromMeta(tblInfo *model.TableInfo) table.Table { // TableFromMeta creates a Table instance from model.TableInfo. func TableFromMeta(alloc autoid.Allocator, tblInfo *model.TableInfo) (table.Table, error) { if tblInfo.State == model.StateNone { - return nil, table.ErrTableStateCantNone.GenWithStack("table %s can't be in none state", tblInfo.Name) + return nil, table.ErrTableStateCantNone.GenWithStackByArgs(tblInfo.Name) } colsLen := len(tblInfo.Columns) columns := make([]*table.Column, 0, colsLen) for i, colInfo := range tblInfo.Columns { if colInfo.State == model.StateNone { - return nil, table.ErrColumnStateCantNone.GenWithStack("column %s can't be in none state", colInfo.Name) + return nil, table.ErrColumnStateCantNone.GenWithStackByArgs(colInfo.Name) } // Print some information when the column's offset isn't equal to i. @@ -125,10 +118,10 @@ func TableFromMeta(alloc autoid.Allocator, tblInfo *model.TableInfo) (table.Tabl columns = append(columns, col) } - var t Table - initTableCommon(&t.tableCommon, tblInfo, tblInfo.ID, columns, alloc) + var t TableCommon + initTableCommon(&t, tblInfo, tblInfo.ID, columns, alloc) if tblInfo.GetPartitionInfo() == nil { - if err := initTableIndices(&t.tableCommon); err != nil { + if err := initTableIndices(&t); err != nil { return nil, err } return &t, nil @@ -137,8 +130,8 @@ func TableFromMeta(alloc autoid.Allocator, tblInfo *model.TableInfo) (table.Tabl return newPartitionedTable(&t, tblInfo) } -// initTableCommon initializes a tableCommon struct. -func initTableCommon(t *tableCommon, tblInfo *model.TableInfo, physicalTableID int64, cols []*table.Column, alloc autoid.Allocator) { +// initTableCommon initializes a TableCommon struct. +func initTableCommon(t *TableCommon, tblInfo *model.TableInfo, physicalTableID int64, cols []*table.Column, alloc autoid.Allocator) { t.tableID = tblInfo.ID t.physicalTableID = physicalTableID t.alloc = alloc @@ -151,33 +144,33 @@ func initTableCommon(t *tableCommon, tblInfo *model.TableInfo, physicalTableID i t.indexPrefix = tablecodec.GenTableIndexPrefix(physicalTableID) } -// initTableIndices initializes the indices of the tableCommon. -func initTableIndices(t *tableCommon) error { +// initTableIndices initializes the indices of the TableCommon. +func initTableIndices(t *TableCommon) error { tblInfo := t.meta for _, idxInfo := range tblInfo.Indices { if idxInfo.State == model.StateNone { - return table.ErrIndexStateCantNone.GenWithStack("index %s can't be in none state", idxInfo.Name) + return table.ErrIndexStateCantNone.GenWithStackByArgs(idxInfo.Name) } - // Use partition ID for index, because tableCommon may be table or partition. + // Use partition ID for index, because TableCommon may be table or partition. idx := NewIndex(t.physicalTableID, tblInfo, idxInfo) t.indices = append(t.indices, idx) } return nil } -func initTableCommonWithIndices(t *tableCommon, tblInfo *model.TableInfo, physicalTableID int64, cols []*table.Column, alloc autoid.Allocator) error { +func initTableCommonWithIndices(t *TableCommon, tblInfo *model.TableInfo, physicalTableID int64, cols []*table.Column, alloc autoid.Allocator) error { initTableCommon(t, tblInfo, physicalTableID, cols, alloc) return initTableIndices(t) } // Indices implements table.Table Indices interface. -func (t *tableCommon) Indices() []table.Index { +func (t *TableCommon) Indices() []table.Index { return t.indices } // WritableIndices implements table.Table WritableIndices interface. -func (t *tableCommon) WritableIndices() []table.Index { +func (t *TableCommon) WritableIndices() []table.Index { if len(t.writableIndices) > 0 { return t.writableIndices } @@ -192,23 +185,23 @@ func (t *tableCommon) WritableIndices() []table.Index { } // DeletableIndices implements table.Table DeletableIndices interface. -func (t *tableCommon) DeletableIndices() []table.Index { +func (t *TableCommon) DeletableIndices() []table.Index { // All indices are deletable because we don't need to check StateNone. return t.indices } // Meta implements table.Table Meta interface. -func (t *tableCommon) Meta() *model.TableInfo { +func (t *TableCommon) Meta() *model.TableInfo { return t.meta } // GetPhysicalID implements table.Table GetPhysicalID interface. -func (t *Table) GetPhysicalID() int64 { +func (t *TableCommon) GetPhysicalID() int64 { return t.physicalTableID } // Cols implements table.Table Cols interface. -func (t *tableCommon) Cols() []*table.Column { +func (t *TableCommon) Cols() []*table.Column { if len(t.publicColumns) > 0 { return t.publicColumns } @@ -227,7 +220,7 @@ func (t *tableCommon) Cols() []*table.Column { } // WritableCols implements table WritableCols interface. -func (t *tableCommon) WritableCols() []*table.Column { +func (t *TableCommon) WritableCols() []*table.Column { if len(t.writableColumns) > 0 { return t.writableColumns } @@ -246,29 +239,29 @@ func (t *tableCommon) WritableCols() []*table.Column { } // RecordPrefix implements table.Table interface. -func (t *tableCommon) RecordPrefix() kv.Key { +func (t *TableCommon) RecordPrefix() kv.Key { return t.recordPrefix } // IndexPrefix implements table.Table interface. -func (t *tableCommon) IndexPrefix() kv.Key { +func (t *TableCommon) IndexPrefix() kv.Key { return t.indexPrefix } // RecordKey implements table.Table interface. -func (t *tableCommon) RecordKey(h int64) kv.Key { +func (t *TableCommon) RecordKey(h int64) kv.Key { return tablecodec.EncodeRecordKey(t.recordPrefix, h) } // FirstKey implements table.Table interface. -func (t *tableCommon) FirstKey() kv.Key { +func (t *TableCommon) FirstKey() kv.Key { return t.RecordKey(math.MinInt64) } // UpdateRecord implements table.Table UpdateRecord interface. // `touched` means which columns are really modified, used for secondary indices. // Length of `oldData` and `newData` equals to length of `t.WritableCols()`. -func (t *tableCommon) UpdateRecord(ctx sessionctx.Context, h int64, oldData, newData []types.Datum, touched []bool) error { +func (t *TableCommon) UpdateRecord(ctx sessionctx.Context, h int64, oldData, newData []types.Datum, touched []bool) error { txn, err := ctx.Txn(true) if err != nil { return err @@ -359,7 +352,7 @@ func (t *tableCommon) UpdateRecord(ctx sessionctx.Context, h int64, oldData, new return nil } -func (t *tableCommon) rebuildIndices(ctx sessionctx.Context, rm kv.RetrieverMutator, h int64, touched []bool, oldData []types.Datum, newData []types.Datum) error { +func (t *TableCommon) rebuildIndices(ctx sessionctx.Context, rm kv.RetrieverMutator, h int64, touched []bool, oldData []types.Datum, newData []types.Datum) error { txn, err := ctx.Txn(true) if err != nil { return err @@ -416,7 +409,7 @@ func adjustRowValuesBuf(writeBufs *variable.WriteStmtBufs, rowLen int) { // getRollbackableMemStore get a rollbackable BufferStore, when we are importing data, // Just add the kv to transaction's membuf directly. -func (t *tableCommon) getRollbackableMemStore(ctx sessionctx.Context) (kv.RetrieverMutator, error) { +func (t *TableCommon) getRollbackableMemStore(ctx sessionctx.Context) (kv.RetrieverMutator, error) { bs := ctx.GetSessionVars().GetWriteStmtBufs().BufStore if bs == nil { txn, err := ctx.Txn(true) @@ -431,7 +424,7 @@ func (t *tableCommon) getRollbackableMemStore(ctx sessionctx.Context) (kv.Retrie } // AddRecord implements table.Table AddRecord interface. -func (t *tableCommon) AddRecord(ctx sessionctx.Context, r []types.Datum, opts ...table.AddRecordOption) (recordID int64, err error) { +func (t *TableCommon) AddRecord(ctx sessionctx.Context, r []types.Datum, opts ...table.AddRecordOption) (recordID int64, err error) { var opt table.AddRecordOpt for _, fn := range opts { fn.ApplyOn(&opt) @@ -571,7 +564,7 @@ func (t *tableCommon) AddRecord(ctx sessionctx.Context, r []types.Datum, opts .. } // genIndexKeyStr generates index content string representation. -func (t *tableCommon) genIndexKeyStr(colVals []types.Datum) (string, error) { +func (t *TableCommon) genIndexKeyStr(colVals []types.Datum) (string, error) { // Pass pre-composed error to txn. strVals := make([]string, 0, len(colVals)) for _, cv := range colVals { @@ -589,7 +582,7 @@ func (t *tableCommon) genIndexKeyStr(colVals []types.Datum) (string, error) { } // addIndices adds data into indices. If any key is duplicated, returns the original handle. -func (t *tableCommon) addIndices(sctx sessionctx.Context, recordID int64, r []types.Datum, rm kv.RetrieverMutator, +func (t *TableCommon) addIndices(sctx sessionctx.Context, recordID int64, r []types.Datum, rm kv.RetrieverMutator, opts []table.CreateIdxOptFunc) (int64, error) { txn, err := sctx.Txn(true) if err != nil { @@ -645,7 +638,7 @@ func (t *tableCommon) addIndices(sctx sessionctx.Context, recordID int64, r []ty } // RowWithCols implements table.Table RowWithCols interface. -func (t *tableCommon) RowWithCols(ctx sessionctx.Context, h int64, cols []*table.Column) ([]types.Datum, error) { +func (t *TableCommon) RowWithCols(ctx sessionctx.Context, h int64, cols []*table.Column) ([]types.Datum, error) { // Get raw row data from kv. key := t.RecordKey(h) txn, err := ctx.Txn(true) @@ -708,12 +701,12 @@ func DecodeRawRowData(ctx sessionctx.Context, meta *model.TableInfo, h int64, co } // Row implements table.Table Row interface. -func (t *tableCommon) Row(ctx sessionctx.Context, h int64) ([]types.Datum, error) { +func (t *TableCommon) Row(ctx sessionctx.Context, h int64) ([]types.Datum, error) { return t.RowWithCols(ctx, h, t.Cols()) } // RemoveRecord implements table.Table RemoveRecord interface. -func (t *tableCommon) RemoveRecord(ctx sessionctx.Context, h int64, r []types.Datum) error { +func (t *TableCommon) RemoveRecord(ctx sessionctx.Context, h int64, r []types.Datum) error { err := t.removeRowData(ctx, h) if err != nil { return err @@ -754,7 +747,7 @@ func (t *tableCommon) RemoveRecord(ctx sessionctx.Context, h int64, r []types.Da return err } -func (t *tableCommon) addInsertBinlog(ctx sessionctx.Context, h int64, row []types.Datum, colIDs []int64) error { +func (t *TableCommon) addInsertBinlog(ctx sessionctx.Context, h int64, row []types.Datum, colIDs []int64) error { mutation := t.getMutation(ctx) pk, err := codec.EncodeValue(ctx.GetSessionVars().StmtCtx, nil, types.NewIntDatum(h)) if err != nil { @@ -770,7 +763,7 @@ func (t *tableCommon) addInsertBinlog(ctx sessionctx.Context, h int64, row []typ return nil } -func (t *tableCommon) addUpdateBinlog(ctx sessionctx.Context, oldRow, newRow []types.Datum, colIDs []int64) error { +func (t *TableCommon) addUpdateBinlog(ctx sessionctx.Context, oldRow, newRow []types.Datum, colIDs []int64) error { old, err := tablecodec.EncodeRow(ctx.GetSessionVars().StmtCtx, oldRow, colIDs, nil, nil) if err != nil { return err @@ -786,7 +779,7 @@ func (t *tableCommon) addUpdateBinlog(ctx sessionctx.Context, oldRow, newRow []t return nil } -func (t *tableCommon) addDeleteBinlog(ctx sessionctx.Context, r []types.Datum, colIDs []int64) error { +func (t *TableCommon) addDeleteBinlog(ctx sessionctx.Context, r []types.Datum, colIDs []int64) error { data, err := tablecodec.EncodeRow(ctx.GetSessionVars().StmtCtx, r, colIDs, nil, nil) if err != nil { return err @@ -797,7 +790,7 @@ func (t *tableCommon) addDeleteBinlog(ctx sessionctx.Context, r []types.Datum, c return nil } -func (t *tableCommon) removeRowData(ctx sessionctx.Context, h int64) error { +func (t *TableCommon) removeRowData(ctx sessionctx.Context, h int64) error { // Remove row data. txn, err := ctx.Txn(true) if err != nil { @@ -813,7 +806,7 @@ func (t *tableCommon) removeRowData(ctx sessionctx.Context, h int64) error { } // removeRowIndices removes all the indices of a row. -func (t *tableCommon) removeRowIndices(ctx sessionctx.Context, h int64, rec []types.Datum) error { +func (t *TableCommon) removeRowIndices(ctx sessionctx.Context, h int64, rec []types.Datum) error { txn, err := ctx.Txn(true) if err != nil { return err @@ -838,12 +831,12 @@ func (t *tableCommon) removeRowIndices(ctx sessionctx.Context, h int64, rec []ty } // removeRowIndex implements table.Table RemoveRowIndex interface. -func (t *tableCommon) removeRowIndex(sc *stmtctx.StatementContext, rm kv.RetrieverMutator, h int64, vals []types.Datum, idx table.Index, txn kv.Transaction) error { +func (t *TableCommon) removeRowIndex(sc *stmtctx.StatementContext, rm kv.RetrieverMutator, h int64, vals []types.Datum, idx table.Index, txn kv.Transaction) error { return idx.Delete(sc, rm, vals, h) } // buildIndexForRow implements table.Table BuildIndexForRow interface. -func (t *tableCommon) buildIndexForRow(ctx sessionctx.Context, rm kv.RetrieverMutator, h int64, vals []types.Datum, idx table.Index, txn kv.Transaction, untouched bool) error { +func (t *TableCommon) buildIndexForRow(ctx sessionctx.Context, rm kv.RetrieverMutator, h int64, vals []types.Datum, idx table.Index, txn kv.Transaction, untouched bool) error { var opts []table.CreateIdxOptFunc if untouched { opts = append(opts, table.IndexIsUntouched) @@ -865,7 +858,7 @@ func (t *tableCommon) buildIndexForRow(ctx sessionctx.Context, rm kv.RetrieverMu } // IterRecords implements table.Table IterRecords interface. -func (t *tableCommon) IterRecords(ctx sessionctx.Context, startKey kv.Key, cols []*table.Column, +func (t *TableCommon) IterRecords(ctx sessionctx.Context, startKey kv.Key, cols []*table.Column, fn table.RecordIterFunc) error { prefix := t.RecordPrefix() txn, err := ctx.Txn(true) @@ -960,13 +953,13 @@ func GetColDefaultValue(ctx sessionctx.Context, col *table.Column, defaultVals [ } // AllocHandle implements table.Table AllocHandle interface. -func (t *tableCommon) AllocHandle(ctx sessionctx.Context) (int64, error) { +func (t *TableCommon) AllocHandle(ctx sessionctx.Context) (int64, error) { _, rowID, err := t.AllocHandleIDs(ctx, 1) return rowID, err } -// AllocHandle implements table.Table AllocHandle interface. -func (t *tableCommon) AllocHandleIDs(ctx sessionctx.Context, n uint64) (int64, int64, error) { +// AllocHandleIDs implements table.Table AllocHandle interface. +func (t *TableCommon) AllocHandleIDs(ctx sessionctx.Context, n uint64) (int64, int64, error) { base, maxID, err := t.Allocator(ctx).Alloc(t.tableID, n) if err != nil { return 0, 0, err @@ -1000,7 +993,7 @@ func OverflowShardBits(rowID int64, shardRowIDBits uint64) bool { return rowID&int64(mask) > 0 } -func (t *tableCommon) calcShard(startTS uint64) int64 { +func (t *TableCommon) calcShard(startTS uint64) int64 { var buf [8]byte binary.LittleEndian.PutUint64(buf[:], startTS) hashVal := int64(murmur3.Sum32(buf[:])) @@ -1008,7 +1001,7 @@ func (t *tableCommon) calcShard(startTS uint64) int64 { } // Allocator implements table.Table Allocator interface. -func (t *tableCommon) Allocator(ctx sessionctx.Context) autoid.Allocator { +func (t *TableCommon) Allocator(ctx sessionctx.Context) autoid.Allocator { if ctx != nil { sessAlloc := ctx.GetSessionVars().IDAllocator if sessAlloc != nil { @@ -1019,12 +1012,12 @@ func (t *tableCommon) Allocator(ctx sessionctx.Context) autoid.Allocator { } // RebaseAutoID implements table.Table RebaseAutoID interface. -func (t *tableCommon) RebaseAutoID(ctx sessionctx.Context, newBase int64, isSetStep bool) error { +func (t *TableCommon) RebaseAutoID(ctx sessionctx.Context, newBase int64, isSetStep bool) error { return t.Allocator(ctx).Rebase(t.tableID, newBase, isSetStep) } // Seek implements table.Table Seek interface. -func (t *tableCommon) Seek(ctx sessionctx.Context, h int64) (int64, bool, error) { +func (t *TableCommon) Seek(ctx sessionctx.Context, h int64) (int64, bool, error) { txn, err := ctx.Txn(true) if err != nil { return 0, false, err @@ -1046,7 +1039,7 @@ func (t *tableCommon) Seek(ctx sessionctx.Context, h int64) (int64, bool, error) } // Type implements table.Table Type interface. -func (t *tableCommon) Type() table.Type { +func (t *TableCommon) Type() table.Type { return table.NormalTable } @@ -1057,11 +1050,11 @@ func shouldWriteBinlog(ctx sessionctx.Context) bool { return !ctx.GetSessionVars().InRestrictedSQL } -func (t *tableCommon) getMutation(ctx sessionctx.Context) *binlog.TableMutation { +func (t *TableCommon) getMutation(ctx sessionctx.Context) *binlog.TableMutation { return ctx.StmtGetMutation(t.tableID) } -func (t *tableCommon) canSkip(col *table.Column, value types.Datum) bool { +func (t *TableCommon) canSkip(col *table.Column, value types.Datum) bool { return CanSkip(t.Meta(), col, value) } @@ -1083,7 +1076,7 @@ func CanSkip(info *model.TableInfo, col *table.Column, value types.Datum) bool { } // canSkipUpdateBinlog checks whether the column can be skipped or not. -func (t *tableCommon) canSkipUpdateBinlog(col *table.Column, value types.Datum) bool { +func (t *TableCommon) canSkipUpdateBinlog(col *table.Column, value types.Datum) bool { if col.IsGenerated() && !col.GeneratedStored { return true } diff --git a/tidb-server/main.go b/tidb-server/main.go index ea7398b2d25fd..fc8108798610d 100644 --- a/tidb-server/main.go +++ b/tidb-server/main.go @@ -29,7 +29,7 @@ import ( "github.com/pingcap/log" "github.com/pingcap/parser/mysql" "github.com/pingcap/parser/terror" - pd "github.com/pingcap/pd/client" + "github.com/pingcap/pd/client" pumpcli "github.com/pingcap/tidb-tools/tidb-binlog/pump_client" "github.com/pingcap/tidb/bindinfo" "github.com/pingcap/tidb/config" @@ -50,6 +50,7 @@ import ( "github.com/pingcap/tidb/store/mockstore" "github.com/pingcap/tidb/store/tikv" "github.com/pingcap/tidb/store/tikv/gcworker" + "github.com/pingcap/tidb/util/domainutil" "github.com/pingcap/tidb/util/logutil" "github.com/pingcap/tidb/util/memory" "github.com/pingcap/tidb/util/printer" @@ -60,7 +61,7 @@ import ( "github.com/prometheus/client_golang/prometheus" "github.com/prometheus/client_golang/prometheus/push" "github.com/struCoder/pidusage" - _ "go.uber.org/automaxprocs" + "go.uber.org/automaxprocs/maxprocs" "go.uber.org/zap" ) @@ -91,6 +92,8 @@ const ( nmTokenLimit = "token-limit" nmPluginDir = "plugin-dir" nmPluginLoad = "plugin-load" + nmRepairMode = "repair-mode" + nmRepairList = "repair-list" nmProxyProtocolNetworks = "proxy-protocol-networks" nmProxyProtocolHeaderTimeout = "proxy-protocol-header-timeout" @@ -118,6 +121,8 @@ var ( pluginDir = flag.String(nmPluginDir, "/data/deploy/plugin", "the folder that hold plugin") pluginLoad = flag.String(nmPluginLoad, "", "wait load plugin name(separated by comma)") affinityCPU = flag.String(nmAffinityCPU, "", "affinity cpu (cpu-no. separated by comma, e.g. 1,2,3)") + repairMode = flagBoolean(nmRepairMode, false, "enable admin repair mode") + repairList = flag.String(nmRepairList, "", "admin repair table list") // Log logLevel = flag.String(nmLogLevel, "info", "log level: info, debug, warn, error, fatal") @@ -470,6 +475,14 @@ func overrideConfig() { if actualFlags[nmPluginDir] { cfg.Plugin.Dir = *pluginDir } + if actualFlags[nmRepairMode] { + cfg.RepairMode = *repairMode + } + if actualFlags[nmRepairList] { + if cfg.RepairMode { + cfg.RepairTableList = stringToList(*repairList) + } + } // Log if actualFlags[nmLogLevel] { @@ -561,6 +574,8 @@ func setGlobalVars() { tikv.CommitMaxBackoff = int(parseDuration(cfg.TiKVClient.CommitTimeout).Seconds() * 1000) tikv.RegionCacheTTLSec = int64(cfg.TiKVClient.RegionCacheTTL) + domainutil.RepairInfo.SetRepairMode(cfg.RepairMode) + domainutil.RepairInfo.SetRepairTableList(cfg.RepairTableList) } func setupLog() { @@ -569,6 +584,10 @@ func setupLog() { err = logutil.InitLogger(cfg.Log.ToLogConfig()) terror.MustNil(err) + // Disable automaxprocs log + nopLog := func(string, ...interface{}) {} + _, err = maxprocs.Set(maxprocs.Logger(nopLog)) + terror.MustNil(err) } func printInfo() { @@ -585,6 +604,7 @@ func createServer() { svr, err = server.NewServer(cfg, driver) // Both domain and storage have started, so we have to clean them before exiting. terror.MustNil(err, closeDomainAndStorage) + svr.SetDomain(dom) go dom.ExpensiveQueryHandle().SetSessionManager(svr).Run() dom.InfoSyncer().SetSessionManager(svr) } @@ -627,7 +647,8 @@ func updateCPUUsageMetrics() { func setupTracing() { tracingCfg := cfg.OpenTracing.ToTracingConfig() - tracer, _, err := tracingCfg.New("TiDB") + tracingCfg.ServiceName = "TiDB" + tracer, _, err := tracingCfg.NewTracer() if err != nil { log.Fatal("setup jaeger tracer failed", zap.String("error message", err.Error())) } @@ -655,3 +676,15 @@ func cleanup() { plugin.Shutdown(context.Background()) closeDomainAndStorage() } + +func stringToList(repairString string) []string { + if len(repairString) <= 0 { + return []string{} + } + if repairString[0] == '[' && repairString[len(repairString)-1] == ']' { + repairString = repairString[1 : len(repairString)-1] + } + return strings.FieldsFunc(repairString, func(r rune) bool { + return r == ',' || r == ' ' || r == '"' + }) +} diff --git a/tools/check/check_testSuite.sh b/tools/check/check_testSuite.sh index a426ce904c066..5ae8eceb44cdd 100755 --- a/tools/check/check_testSuite.sh +++ b/tools/check/check_testSuite.sh @@ -1,4 +1,4 @@ -#!/bin/bash +#!/usr/bin/env bash set -euo pipefail diff --git a/types/convert_test.go b/types/convert_test.go index 33023e99d191f..2689968bc5ff9 100644 --- a/types/convert_test.go +++ b/types/convert_test.go @@ -955,7 +955,7 @@ func (s *testTypeConvertSuite) TestConvertJSONToFloat(c *C) { Out float64 ty json.TypeCode }{ - {make(map[string]interface{}, 0), 0, json.TypeCodeObject}, + {make(map[string]interface{}), 0, json.TypeCodeObject}, {make([]interface{}, 0), 0, json.TypeCodeArray}, {int64(3), 3, json.TypeCodeInt64}, {int64(-3), -3, json.TypeCodeInt64}, diff --git a/types/datum.go b/types/datum.go index 125313ec558bc..525ce3be7b2b4 100644 --- a/types/datum.go +++ b/types/datum.go @@ -1659,23 +1659,6 @@ func invalidConv(d *Datum, tp byte) (Datum, error) { return Datum{}, errors.Errorf("cannot convert datum from %s to type %s.", KindStr(d.Kind()), TypeStr(tp)) } -func (d *Datum) convergeType(hasUint, hasDecimal, hasFloat *bool) (x Datum) { - x = *d - switch d.Kind() { - case KindUint64: - *hasUint = true - case KindFloat32: - f := d.GetFloat32() - x.SetFloat64(float64(f)) - *hasFloat = true - case KindFloat64: - *hasFloat = true - case KindMysqlDecimal: - *hasDecimal = true - } - return x -} - // NewDatum creates a new Datum from an interface{}. func NewDatum(in interface{}) (d Datum) { switch x := in.(type) { @@ -1895,3 +1878,182 @@ func CloneRow(dr []Datum) []Datum { } return c } + +// GetMaxValue returns the max value datum for each type. +func GetMaxValue(ft *FieldType) (max Datum) { + switch ft.Tp { + case mysql.TypeTiny, mysql.TypeShort, mysql.TypeInt24, mysql.TypeLong, mysql.TypeLonglong: + if mysql.HasUnsignedFlag(ft.Flag) { + max.SetUint64(IntergerUnsignedUpperBound(ft.Tp)) + } else { + max.SetInt64(IntergerSignedUpperBound(ft.Tp)) + } + case mysql.TypeFloat: + max.SetFloat32(float32(GetMaxFloat(ft.Flen, ft.Decimal))) + case mysql.TypeDouble: + max.SetFloat64(GetMaxFloat(ft.Flen, ft.Decimal)) + case mysql.TypeString, mysql.TypeVarString, mysql.TypeVarchar, mysql.TypeBlob, mysql.TypeTinyBlob, mysql.TypeMediumBlob, mysql.TypeLongBlob: + // codec.Encode KindMaxValue, to avoid import circle + bytes := []byte{250} + max.SetBytes(bytes) + case mysql.TypeNewDecimal: + max.SetMysqlDecimal(NewMaxOrMinDec(false, ft.Flen, ft.Decimal)) + case mysql.TypeDuration: + max.SetMysqlDuration(Duration{Duration: MaxTime}) + case mysql.TypeDate, mysql.TypeDatetime, mysql.TypeTimestamp: + if ft.Tp == mysql.TypeDate || ft.Tp == mysql.TypeDatetime { + max.SetMysqlTime(Time{Time: MaxDatetime, Type: ft.Tp}) + } else { + max.SetMysqlTime(MaxTimestamp) + } + } + return +} + +// GetMinValue returns the min value datum for each type. +func GetMinValue(ft *FieldType) (min Datum) { + switch ft.Tp { + case mysql.TypeTiny, mysql.TypeShort, mysql.TypeInt24, mysql.TypeLong, mysql.TypeLonglong: + if mysql.HasUnsignedFlag(ft.Flag) { + min.SetUint64(0) + } else { + min.SetInt64(IntergerSignedLowerBound(ft.Tp)) + } + case mysql.TypeFloat: + min.SetFloat32(float32(-GetMaxFloat(ft.Flen, ft.Decimal))) + case mysql.TypeDouble: + min.SetFloat64(-GetMaxFloat(ft.Flen, ft.Decimal)) + case mysql.TypeString, mysql.TypeVarString, mysql.TypeVarchar, mysql.TypeBlob, mysql.TypeTinyBlob, mysql.TypeMediumBlob, mysql.TypeLongBlob: + // codec.Encode KindMinNotNull, to avoid import circle + bytes := []byte{1} + min.SetBytes(bytes) + case mysql.TypeNewDecimal: + min.SetMysqlDecimal(NewMaxOrMinDec(true, ft.Flen, ft.Decimal)) + case mysql.TypeDuration: + min.SetMysqlDuration(Duration{Duration: MinTime}) + case mysql.TypeDate, mysql.TypeDatetime, mysql.TypeTimestamp: + if ft.Tp == mysql.TypeDate || ft.Tp == mysql.TypeDatetime { + min.SetMysqlTime(Time{Time: MinDatetime, Type: ft.Tp}) + } else { + min.SetMysqlTime(MinTimestamp) + } + } + return +} + +// RoundingType is used to indicate the rounding type for reversing evaluation. +type RoundingType uint8 + +const ( + // Ceiling means rounding up. + Ceiling RoundingType = iota + // Floor means rounding down. + Floor +) + +func getDatumBound(retType *FieldType, rType RoundingType) Datum { + if rType == Ceiling { + return GetMaxValue(retType) + } + return GetMinValue(retType) +} + +// ChangeReverseResultByUpperLowerBound is for expression's reverse evaluation. +// Here is an example for what's effort for the function: CastRealAsInt(t.a), +// if the type of column `t.a` is mysql.TypeDouble, and there is a row that t.a == MaxFloat64 +// then the cast function will arrive a result MaxInt64. But when we do the reverse evaluation, +// if the result is MaxInt64, and the rounding type is ceiling. Then we should get the MaxFloat64 +// instead of float64(MaxInt64). +// Another example: cast(1.1 as signed) = 1, +// when we get the answer 1, we can only reversely evaluate 1.0 as the column value. So in this +// case, we should judge whether the rounding type are ceiling. If it is, then we should plus one for +// 1.0 and get the reverse result 2.0. +func ChangeReverseResultByUpperLowerBound( + sc *stmtctx.StatementContext, + retType *FieldType, + res Datum, + rType RoundingType) (Datum, error) { + d, err := res.ConvertTo(sc, retType) + if terror.ErrorEqual(err, ErrOverflow) { + return d, nil + } + if err != nil { + return d, err + } + resRetType := FieldType{} + switch res.Kind() { + case KindInt64: + resRetType.Tp = mysql.TypeLonglong + case KindUint64: + resRetType.Tp = mysql.TypeLonglong + resRetType.Flag |= mysql.UnsignedFlag + case KindFloat32: + resRetType.Tp = mysql.TypeFloat + case KindFloat64: + resRetType.Tp = mysql.TypeDouble + case KindMysqlDecimal: + resRetType.Tp = mysql.TypeNewDecimal + resRetType.Flen = int(res.GetMysqlDecimal().GetDigitsFrac() + res.GetMysqlDecimal().GetDigitsInt()) + resRetType.Decimal = int(res.GetMysqlDecimal().GetDigitsInt()) + } + bound := getDatumBound(&resRetType, rType) + cmp, err := d.CompareDatum(sc, &bound) + if err != nil { + return d, err + } + if cmp == 0 { + d = getDatumBound(retType, rType) + } else if rType == Ceiling { + switch retType.Tp { + case mysql.TypeShort: + if mysql.HasUnsignedFlag(retType.Flag) { + if d.GetUint64() != math.MaxUint16 { + d.SetUint64(d.GetUint64() + 1) + } + } else { + if d.GetInt64() != math.MaxInt16 { + d.SetInt64(d.GetInt64() + 1) + } + } + case mysql.TypeLong: + if mysql.HasUnsignedFlag(retType.Flag) { + if d.GetUint64() != math.MaxUint32 { + d.SetUint64(d.GetUint64() + 1) + } + } else { + if d.GetInt64() != math.MaxInt32 { + d.SetInt64(d.GetInt64() + 1) + } + } + case mysql.TypeLonglong: + if mysql.HasUnsignedFlag(retType.Flag) { + if d.GetUint64() != math.MaxUint64 { + d.SetUint64(d.GetUint64() + 1) + } + } else { + if d.GetInt64() != math.MaxInt64 { + d.SetInt64(d.GetInt64() + 1) + } + } + case mysql.TypeFloat: + if d.GetFloat32() != math.MaxFloat32 { + d.SetFloat32(d.GetFloat32() + 1.0) + } + case mysql.TypeDouble: + if d.GetFloat64() != math.MaxFloat64 { + d.SetFloat64(d.GetFloat64() + 1.0) + } + case mysql.TypeNewDecimal: + if d.GetMysqlDecimal().Compare(NewMaxOrMinDec(false, retType.Flen, retType.Decimal)) != 0 { + var decimalOne, newD MyDecimal + one := decimalOne.FromInt(1) + err = DecimalAdd(d.GetMysqlDecimal(), one, &newD) + if err != nil { + return d, err + } + d = NewDecimalDatum(&newD) + } + } + } + return d, nil +} diff --git a/types/datum_test.go b/types/datum_test.go index 40d6ae0dd36a0..09b612e47e681 100644 --- a/types/datum_test.go +++ b/types/datum_test.go @@ -15,7 +15,9 @@ package types import ( "fmt" + "math" "reflect" + "strconv" "testing" "time" @@ -373,6 +375,124 @@ func (ts *testDatumSuite) TestCloneDatum(c *C) { } } +func newTypeWithFlag(tp byte, flag uint) *FieldType { + t := NewFieldType(tp) + t.Flag |= flag + return t +} + +func newMyDecimal(val string, c *C) *MyDecimal { + t := MyDecimal{} + err := t.FromString([]byte(val)) + c.Assert(err, IsNil) + return &t +} + +func newRetTypeWithFlenDecimal(tp byte, flen int, decimal int) *FieldType { + return &FieldType{ + Tp: tp, + Flen: flen, + Decimal: decimal, + } +} + +func (ts *testDatumSuite) TestChangeReverseResultByUpperLowerBound(c *C) { + sc := new(stmtctx.StatementContext) + sc.IgnoreTruncate = true + sc.OverflowAsWarning = true + // TODO: add more reserve convert tests for each pair of convert type. + testData := []struct { + a Datum + res Datum + retType *FieldType + roundType RoundingType + }{ + // int64 reserve to uint64 + { + NewIntDatum(1), + NewUintDatum(2), + newTypeWithFlag(mysql.TypeLonglong, mysql.UnsignedFlag), + Ceiling, + }, + { + NewIntDatum(1), + NewUintDatum(1), + newTypeWithFlag(mysql.TypeLonglong, mysql.UnsignedFlag), + Floor, + }, + { + NewIntDatum(math.MaxInt64), + NewUintDatum(math.MaxUint64), + newTypeWithFlag(mysql.TypeLonglong, mysql.UnsignedFlag), + Ceiling, + }, + { + NewIntDatum(math.MaxInt64), + NewUintDatum(math.MaxInt64), + newTypeWithFlag(mysql.TypeLonglong, mysql.UnsignedFlag), + Floor, + }, + // int64 reserve to float64 + { + NewIntDatum(1), + NewFloat64Datum(2), + newRetTypeWithFlenDecimal(mysql.TypeDouble, mysql.MaxRealWidth, UnspecifiedLength), + Ceiling, + }, + { + NewIntDatum(1), + NewFloat64Datum(1), + newRetTypeWithFlenDecimal(mysql.TypeDouble, mysql.MaxRealWidth, UnspecifiedLength), + Floor, + }, + { + NewIntDatum(math.MaxInt64), + GetMaxValue(newRetTypeWithFlenDecimal(mysql.TypeDouble, mysql.MaxRealWidth, UnspecifiedLength)), + newRetTypeWithFlenDecimal(mysql.TypeDouble, mysql.MaxRealWidth, UnspecifiedLength), + Ceiling, + }, + { + NewIntDatum(math.MaxInt64), + NewFloat64Datum(float64(math.MaxInt64)), + newRetTypeWithFlenDecimal(mysql.TypeDouble, mysql.MaxRealWidth, UnspecifiedLength), + Floor, + }, + // int64 reserve to Decimal + { + NewIntDatum(1), + NewDecimalDatum(newMyDecimal("2", c)), + newRetTypeWithFlenDecimal(mysql.TypeNewDecimal, 30, 3), + Ceiling, + }, + { + NewIntDatum(1), + NewDecimalDatum(newMyDecimal("1", c)), + newRetTypeWithFlenDecimal(mysql.TypeNewDecimal, 30, 3), + Floor, + }, + { + NewIntDatum(math.MaxInt64), + GetMaxValue(newRetTypeWithFlenDecimal(mysql.TypeNewDecimal, 30, 3)), + newRetTypeWithFlenDecimal(mysql.TypeNewDecimal, 30, 3), + Ceiling, + }, + { + NewIntDatum(math.MaxInt64), + NewDecimalDatum(newMyDecimal(strconv.FormatInt(math.MaxInt64, 10), c)), + newRetTypeWithFlenDecimal(mysql.TypeNewDecimal, 30, 3), + Floor, + }, + } + for ith, test := range testData { + reverseRes, err := ChangeReverseResultByUpperLowerBound(sc, test.retType, test.a, test.roundType) + c.Assert(err, IsNil) + var cmp int + cmp, err = reverseRes.CompareDatum(sc, &test.res) + c.Assert(err, IsNil) + c.Assert(cmp, Equals, 0, Commentf("%dth got:%#v, expect:%#v", ith, reverseRes, test.res)) + } +} + func prepareCompareDatums() ([]Datum, []Datum) { vals := make([]Datum, 0, 5) vals = append(vals, NewIntDatum(1)) diff --git a/types/json/binary.go b/types/json/binary.go index 84b2a1e1cd7de..519b74a71253f 100644 --- a/types/json/binary.go +++ b/types/json/binary.go @@ -357,23 +357,6 @@ func marshalStringTo(buf, s []byte) []byte { return buf } -func (bj BinaryJSON) marshalValueEntryTo(buf []byte, entryOff int) ([]byte, error) { - tpCode := bj.Value[entryOff] - switch tpCode { - case TypeCodeLiteral: - buf = marshalLiteralTo(buf, bj.Value[entryOff+1]) - default: - offset := endian.Uint32(bj.Value[entryOff+1:]) - tmp := BinaryJSON{TypeCode: tpCode, Value: bj.Value[offset:]} - var err error - buf, err = tmp.marshalTo(buf) - if err != nil { - return nil, errors.Trace(err) - } - } - return buf, nil -} - func marshalLiteralTo(b []byte, litType byte) []byte { switch litType { case LiteralFalse: diff --git a/types/json/binary_functions.go b/types/json/binary_functions.go index 4a0cfd50d82ba..b936b0eafd765 100644 --- a/types/json/binary_functions.go +++ b/types/json/binary_functions.go @@ -19,7 +19,6 @@ import ( "encoding/hex" "fmt" "sort" - "strconv" "unicode/utf8" "unsafe" @@ -55,33 +54,33 @@ func (bj BinaryJSON) Type() string { } } -// Quote is for JSON_QUOTE -func (bj BinaryJSON) Quote() string { - str := hack.String(bj.GetString()) - return strconv.Quote(string(str)) -} - // Unquote is for JSON_UNQUOTE. func (bj BinaryJSON) Unquote() (string, error) { switch bj.TypeCode { case TypeCodeString: - tmp := string(hack.String(bj.GetString())) - tlen := len(tmp) - if tlen < 2 { - return tmp, nil - } - head, tail := tmp[0], tmp[tlen-1] - if head == '"' && tail == '"' { - // Remove prefix and suffix '"' before unquoting - return unquoteString(tmp[1 : tlen-1]) - } - // if value is not double quoted, do nothing - return tmp, nil + str := string(hack.String(bj.GetString())) + return UnquoteString(str) default: return bj.String(), nil } } +// UnquoteString remove quotes in a string, +// including the quotes at the head and tail of string. +func UnquoteString(str string) (string, error) { + strLen := len(str) + if strLen < 2 { + return str, nil + } + head, tail := str[0], str[strLen-1] + if head == '"' && tail == '"' { + // Remove prefix and suffix '"' before unquoting + return unquoteString(str[1 : strLen-1]) + } + // if value is not double quoted, do nothing + return str, nil +} + // unquoteString recognizes the escape sequences shown in: // https://dev.mysql.com/doc/refman/5.7/en/json-modification-functions.html#json-unquote-character-escape-sequences func unquoteString(s string) (string, error) { diff --git a/types/mydecimal.go b/types/mydecimal.go index 70a5c651bacc8..4e4ffcd5e84aa 100644 --- a/types/mydecimal.go +++ b/types/mydecimal.go @@ -250,6 +250,11 @@ func (d *MyDecimal) GetDigitsFrac() int8 { return d.digitsFrac } +// GetDigitsInt returns the digitsInt. +func (d *MyDecimal) GetDigitsInt() int8 { + return d.digitsInt +} + // String returns the decimal string representation rounded to resultFrac. func (d *MyDecimal) String() string { tmp := *d @@ -1781,10 +1786,10 @@ func doAdd(from1, from2, to *MyDecimal) error { stop = 0 if wordsInt1 > wordsInt2 { idx1 = wordsInt1 - wordsInt2 - dec1, dec2 = from1, from2 + dec1 = from1 } else { idx1 = wordsInt2 - wordsInt1 - dec1, dec2 = from2, from1 + dec1 = from2 } for idx1 > stop { idxTo-- diff --git a/types/time.go b/types/time.go index bb3c49deae4b5..21110c2245ecb 100644 --- a/types/time.go +++ b/types/time.go @@ -911,7 +911,7 @@ type Duration struct { //Add adds d to d, returns a duration value. func (d Duration) Add(v Duration) (Duration, error) { - if &v == nil { + if v == (Duration{}) { return d, nil } dsum, err := AddInt64(int64(d.Duration), int64(v.Duration)) @@ -926,7 +926,7 @@ func (d Duration) Add(v Duration) (Duration, error) { // Sub subtracts d to d, returns a duration value. func (d Duration) Sub(v Duration) (Duration, error) { - if &v == nil { + if v == (Duration{}) { return d, nil } dsum, err := SubInt64(int64(d.Duration), int64(v.Duration)) @@ -2252,16 +2252,6 @@ func skipWhiteSpace(input string) string { return "" } -var weekdayAbbrev = map[string]gotime.Weekday{ - "Sun": gotime.Sunday, - "Mon": gotime.Monday, - "Tue": gotime.Tuesday, - "Wed": gotime.Wednesday, - "Thu": gotime.Tuesday, - "Fri": gotime.Friday, - "Sat": gotime.Saturday, -} - var monthAbbrev = map[string]gotime.Month{ "Jan": gotime.January, "Feb": gotime.February, @@ -2628,35 +2618,6 @@ func monthNumeric(t *MysqlTime, input string, ctx map[string]int) (string, bool) return input[length:], true } -func parseOrdinalNumbers(input string) (value int, remain string) { - for i, c := range input { - if !unicode.IsDigit(c) { - v, err := strconv.ParseUint(input[:i], 10, 64) - if err != nil { - return -1, input - } - value = int(v) - break - } - } - switch { - case strings.HasPrefix(remain, "st"): - if value == 1 { - remain = remain[2:] - return - } - case strings.HasPrefix(remain, "nd"): - if value == 2 { - remain = remain[2:] - return - } - case strings.HasPrefix(remain, "th"): - remain = remain[2:] - return - } - return -1, input -} - // DateFSP gets fsp from date string. func DateFSP(date string) (fsp int) { i := strings.LastIndex(date, ".") diff --git a/types/time_test.go b/types/time_test.go index be3ec53b8f6a2..a3979c19d4665 100644 --- a/types/time_test.go +++ b/types/time_test.go @@ -439,26 +439,6 @@ func (s *testTimeSuite) TestYear(c *C) { } } -func (s *testTimeSuite) getLocation(c *C) *time.Location { - locations := []string{"Asia/Shanghai", "Europe/Berlin"} - timeFormat := "Jan 2, 2006 at 3:04pm (MST)" - - z, err := time.LoadLocation(locations[0]) - c.Assert(err, IsNil) - - t1, err := time.ParseInLocation(timeFormat, "Jul 9, 2012 at 5:02am (CEST)", z) - c.Assert(err, IsNil) - t2, err := time.Parse(timeFormat, "Jul 9, 2012 at 5:02am (CEST)") - c.Assert(err, IsNil) - - if t1.Equal(t2) { - z, err = time.LoadLocation(locations[1]) - c.Assert(err, IsNil) - } - - return z -} - func (s *testTimeSuite) TestCodec(c *C) { defer testleak.AfterTest(c)() diff --git a/util/admin/admin.go b/util/admin/admin.go index 52c949958ec0f..fb5f302d4828f 100644 --- a/util/admin/admin.go +++ b/util/admin/admin.go @@ -104,7 +104,7 @@ func IsJobRollbackable(job *model.Job) bool { model.ActionTruncateTable, model.ActionAddForeignKey, model.ActionDropForeignKey, model.ActionRenameTable, model.ActionModifyTableCharsetAndCollate, model.ActionTruncateTablePartition, - model.ActionModifySchemaCharsetAndCollate: + model.ActionModifySchemaCharsetAndCollate, model.ActionRepairTable: return job.SchemaState == model.StateNone } return true diff --git a/util/admin/admin_test.go b/util/admin/admin_test.go index ce14fd41682c2..5aede319c0c9a 100644 --- a/util/admin/admin_test.go +++ b/util/admin/admin_test.go @@ -262,13 +262,21 @@ func (s *testSuite) TestCancelJobs(c *C) { TableID: 2, Type: model.ActionAddIndex, } + job3 := &model.Job{ + ID: 1003, + SchemaID: 1, + TableID: 2, + Type: model.ActionRepairTable, + } err = t.EnQueueDDLJob(job, meta.AddIndexJobListKey) c.Assert(err, IsNil) err = t.EnQueueDDLJob(job1) c.Assert(err, IsNil) err = t.EnQueueDDLJob(job2, meta.AddIndexJobListKey) c.Assert(err, IsNil) - errs, err = CancelJobs(txn, []int64{job1.ID, job.ID, job2.ID}) + err = t.EnQueueDDLJob(job3) + c.Assert(err, IsNil) + errs, err = CancelJobs(txn, []int64{job1.ID, job.ID, job2.ID, job3.ID}) c.Assert(err, IsNil) for _, err := range errs { c.Assert(err, IsNil) diff --git a/util/chunk/codec.go b/util/chunk/codec.go index f866ac34c6efc..51ef7b6baa718 100644 --- a/util/chunk/codec.go +++ b/util/chunk/codec.go @@ -184,6 +184,50 @@ func getFixedLen(colType *types.FieldType) int { } } +// GetFixedLen get the memory size of a fixed-length type. +// if colType is not fixed-length, it returns varElemLen, aka -1. +func GetFixedLen(colType *types.FieldType) int { + return getFixedLen(colType) +} + +// EstimateTypeWidth estimates the average width of values of the type. +// This is used by the planner, which doesn't require absolutely correct results; +// it's OK (and expected) to guess if we don't know for sure. +// +// mostly study from https://github.com/postgres/postgres/blob/REL_12_STABLE/src/backend/utils/cache/lsyscache.c#L2356 +func EstimateTypeWidth(padChar bool, colType *types.FieldType) int { + colLen := getFixedLen(colType) + // Easy if it's a fixed-width type + if colLen != varElemLen { + return colLen + } + + colLen = colType.Flen + if colLen > 0 { + /* + * If PAD_CHAR_TO_FULL_LENGTH is enabled, and type is CHAR, + * the colType.Flen is also the only width. + */ + if padChar && colType.Tp == mysql.TypeString { + return colLen + } + if colLen <= 32 { + return colLen + } + if colLen < 1000 { + return 32 + (colLen-32)/2 // assume 50% + } + /* + * Beyond 1000, assume we're looking at something like + * "varchar(10000)" where the limit isn't actually reached often, and + * use a fixed estimate. + */ + return 32 + (1000-32)/2 + } + // Oops, we have no idea ... wild guess time. + return 32 +} + func init() { for i := 0; i < 128; i++ { allNotNullBitmap[i] = 0xFF diff --git a/util/chunk/codec_test.go b/util/chunk/codec_test.go index b904ed9756f85..5e22a3c8f10cb 100644 --- a/util/chunk/codec_test.go +++ b/util/chunk/codec_test.go @@ -77,6 +77,28 @@ func (s *testCodecSuite) TestCodec(c *check.C) { } } +func (s *testCodecSuite) TestEstimateTypeWidth(c *check.C) { + var colType *types.FieldType + + colType = &types.FieldType{Tp: mysql.TypeLonglong} + c.Assert(EstimateTypeWidth(false, colType), check.Equals, 8) // fixed-witch type + + colType = &types.FieldType{Tp: mysql.TypeString, Flen: 100000} + c.Assert(EstimateTypeWidth(true, colType), check.Equals, 100000) // PAD_CHAR_TO_FULL_LENGTH + + colType = &types.FieldType{Tp: mysql.TypeString, Flen: 31} + c.Assert(EstimateTypeWidth(false, colType), check.Equals, 31) // colLen <= 32 + + colType = &types.FieldType{Tp: mysql.TypeString, Flen: 999} + c.Assert(EstimateTypeWidth(false, colType), check.Equals, 515) // colLen < 1000 + + colType = &types.FieldType{Tp: mysql.TypeString, Flen: 2000} + c.Assert(EstimateTypeWidth(false, colType), check.Equals, 516) // colLen < 1000 + + colType = &types.FieldType{Tp: mysql.TypeString} + c.Assert(EstimateTypeWidth(false, colType), check.Equals, 32) // value after guessing +} + func BenchmarkEncodeChunk(b *testing.B) { numCols := 4 numRows := 1024 diff --git a/util/domainutil/repair_vars.go b/util/domainutil/repair_vars.go new file mode 100644 index 0000000000000..f45081e96fa53 --- /dev/null +++ b/util/domainutil/repair_vars.go @@ -0,0 +1,170 @@ +// Copyright 2019 PingCAP, Inc. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// See the License for the specific language governing permissions and +// limitations under the License. + +package domainutil + +import ( + "strings" + "sync" + + "github.com/pingcap/parser/model" +) + +type repairInfo struct { + sync.RWMutex + repairMode bool + repairTableList []string + repairDBInfoMap map[int64]*model.DBInfo +} + +// RepairInfo indicates the repaired table info. +var RepairInfo repairInfo + +// InRepairMode indicates whether TiDB is in repairMode. +func (r *repairInfo) InRepairMode() bool { + r.RLock() + defer r.RUnlock() + return r.repairMode +} + +// SetRepairMode sets whether TiDB is in repairMode. +func (r *repairInfo) SetRepairMode(mode bool) { + r.Lock() + defer r.Unlock() + r.repairMode = mode +} + +// GetRepairTableList gets repairing table list. +func (r *repairInfo) GetRepairTableList() []string { + r.RLock() + defer r.RUnlock() + return r.repairTableList +} + +// SetRepairTableList sets repairing table list. +func (r *repairInfo) SetRepairTableList(list []string) { + for i, one := range list { + list[i] = strings.ToLower(one) + } + r.Lock() + defer r.Unlock() + r.repairTableList = list +} + +// CheckAndFetchRepairedTable fetches the repairing table list from meta, true indicates fetch success. +func (r *repairInfo) CheckAndFetchRepairedTable(di *model.DBInfo, tbl *model.TableInfo) bool { + r.Lock() + defer r.Unlock() + if !r.repairMode { + return false + } + isRepair := false + for _, tn := range r.repairTableList { + // Use dbName and tableName to specify a table. + if strings.ToLower(tn) == di.Name.L+"."+tbl.Name.L { + isRepair = true + break + } + } + if isRepair { + // Record the repaired table in Map. + if repairedDB, ok := r.repairDBInfoMap[di.ID]; ok { + repairedDB.Tables = append(repairedDB.Tables, tbl) + } else { + // Shallow copy the DBInfo. + repairedDB := di.Copy() + // Clean the tables and set repaired table. + repairedDB.Tables = []*model.TableInfo{tbl} + r.repairDBInfoMap[di.ID] = repairedDB + } + return true + } + return false +} + +// GetRepairedTableInfoByTableName is exported for test. +func (r *repairInfo) GetRepairedTableInfoByTableName(schemaLowerName, tableLowerName string) (*model.TableInfo, *model.DBInfo) { + r.RLock() + defer r.RUnlock() + for _, db := range r.repairDBInfoMap { + if db.Name.L != schemaLowerName { + continue + } + for _, t := range db.Tables { + if t.Name.L == tableLowerName { + return t, db + } + } + return nil, db + } + return nil, nil +} + +// RemoveFromRepairInfo remove the table from repair info when repaired. +func (r *repairInfo) RemoveFromRepairInfo(schemaLowerName, tableLowerName string) { + repairedLowerName := schemaLowerName + "." + tableLowerName + // Remove from the repair list. + r.Lock() + defer r.Unlock() + for i, rt := range r.repairTableList { + if strings.ToLower(rt) == repairedLowerName { + r.repairTableList = append(r.repairTableList[:i], r.repairTableList[i+1:]...) + break + } + } + // Remove from the repair map. + for _, db := range r.repairDBInfoMap { + if db.Name.L == schemaLowerName { + for j, t := range db.Tables { + if t.Name.L == tableLowerName { + db.Tables = append(db.Tables[:j], db.Tables[j+1:]...) + break + } + } + if len(db.Tables) == 0 { + delete(r.repairDBInfoMap, db.ID) + } + break + } + } + if len(r.repairDBInfoMap) == 0 { + r.repairMode = false + } +} + +// repairKeyType is keyType for admin repair table. +type repairKeyType int + +const ( + // RepairedTable is the key type, caching the target repaired table in sessionCtx. + RepairedTable repairKeyType = iota + // RepairedDatabase is the key type, caching the target repaired database in sessionCtx. + RepairedDatabase +) + +func (t repairKeyType) String() (res string) { + switch t { + case RepairedTable: + res = "RepairedTable" + case RepairedDatabase: + res = "RepairedDatabase" + } + return res +} + +func init() { + RepairInfo = repairInfo{} + RepairInfo.repairMode = false + RepairInfo.repairTableList = []string{} + RepairInfo.repairDBInfoMap = make(map[int64]*model.DBInfo) +} diff --git a/util/execdetails/execdetails.go b/util/execdetails/execdetails.go index b80e025ea2bd7..6e1ae05f6b0ec 100644 --- a/util/execdetails/execdetails.go +++ b/util/execdetails/execdetails.go @@ -37,6 +37,8 @@ type ExecDetails struct { ProcessTime time.Duration WaitTime time.Duration BackoffTime time.Duration + BackoffSleep map[string]time.Duration + BackoffTimes map[string]int RequestCount int TotalKeys int64 ProcessedKeys int64 diff --git a/util/expensivequery/expensivequery.go b/util/expensivequery/expensivequery.go index 9e1b26a1cc234..6ba77fb2052f6 100644 --- a/util/expensivequery/expensivequery.go +++ b/util/expensivequery/expensivequery.go @@ -52,6 +52,7 @@ func (eqh *Handle) Run() { // use 100ms as tickInterval temply, may use given interval or use defined variable later tickInterval := time.Millisecond * time.Duration(100) ticker := time.NewTicker(tickInterval) + defer ticker.Stop() for { select { case <-ticker.C: diff --git a/util/memory/tracker.go b/util/memory/tracker.go index a28116c85feec..400c91c8bac61 100644 --- a/util/memory/tracker.go +++ b/util/memory/tracker.go @@ -78,6 +78,12 @@ func (t *Tracker) SetBytesLimit(bytesLimit int64) { t.bytesLimit = bytesLimit } +// GetBytesLimit gets the bytes limit for this tracker. +// "bytesLimit <= 0" means no limit. +func (t *Tracker) GetBytesLimit() int64 { + return t.bytesLimit +} + // SetActionOnExceed sets the action when memory usage exceeds bytesLimit. func (t *Tracker) SetActionOnExceed(a ActionOnExceed) { t.actionMu.Lock() diff --git a/util/mock/context.go b/util/mock/context.go index de79e16dcc92c..41f5625cca6cd 100644 --- a/util/mock/context.go +++ b/util/mock/context.go @@ -26,6 +26,7 @@ import ( "github.com/pingcap/tidb/sessionctx" "github.com/pingcap/tidb/sessionctx/variable" "github.com/pingcap/tidb/util" + "github.com/pingcap/tidb/util/disk" "github.com/pingcap/tidb/util/kvcache" "github.com/pingcap/tidb/util/memory" "github.com/pingcap/tidb/util/sqlexec" @@ -267,6 +268,7 @@ func NewContext() *Context { sctx.sessionVars.MaxChunkSize = 32 sctx.sessionVars.StmtCtx.TimeZone = time.UTC sctx.sessionVars.StmtCtx.MemTracker = memory.NewTracker(stringutil.StringerStr("mock.NewContext"), -1) + sctx.sessionVars.StmtCtx.DiskTracker = disk.NewTracker(stringutil.StringerStr("mock.NewContext"), -1) sctx.sessionVars.GlobalVarsAccessor = variable.NewMockGlobalAccessor() if err := sctx.GetSessionVars().SetSystemVar(variable.MaxAllowedPacket, "67108864"); err != nil { panic(err) diff --git a/util/plancodec/codec.go b/util/plancodec/codec.go index 31b77df1e84b7..971bb94d84f75 100644 --- a/util/plancodec/codec.go +++ b/util/plancodec/codec.go @@ -55,6 +55,17 @@ func DecodePlan(planString string) (string, error) { return pd.decode(planString) } +// DecodeNormalizedPlan decodes the string to plan tree. +func DecodeNormalizedPlan(planString string) (string, error) { + if len(planString) == 0 { + return "", nil + } + pd := decoderPool.Get().(*planDecoder) + defer decoderPool.Put(pd) + pd.buf.Reset() + return pd.buildPlanTree(planString) +} + type planDecoder struct { buf bytes.Buffer depths []int @@ -72,8 +83,11 @@ func (pd *planDecoder) decode(planString string) (string, error) { if err != nil { return "", err } + return pd.buildPlanTree(str) +} - nodes := strings.Split(str, lineBreakerStr) +func (pd *planDecoder) buildPlanTree(planString string) (string, error) { + nodes := strings.Split(planString, lineBreakerStr) if len(pd.depths) < len(nodes) { pd.depths = make([]int, 0, len(nodes)) pd.planInfos = make([]*planInfo, 0, len(nodes)) @@ -256,6 +270,22 @@ func EncodePlanNode(depth, pid int, planType string, isRoot bool, rowCount float buf.WriteByte(lineBreaker) } +// NormalizePlanNode is used to normalize the plan to a string. +func NormalizePlanNode(depth, pid int, planType string, isRoot bool, explainInfo string, buf *bytes.Buffer) { + buf.WriteString(strconv.Itoa(depth)) + buf.WriteByte(separator) + buf.WriteString(encodeID(planType, pid)) + buf.WriteByte(separator) + if isRoot { + buf.WriteString(rootTaskType) + } else { + buf.WriteString(copTaskType) + } + buf.WriteByte(separator) + buf.WriteString(explainInfo) + buf.WriteByte(lineBreaker) +} + func encodeID(planType string, id int) string { planID := TypeStringToPhysicalID(planType) return strconv.Itoa(planID) + idSeparator + strconv.Itoa(id) diff --git a/util/plancodec/id.go b/util/plancodec/id.go index 8369f7bfd0e59..583799b422cfa 100644 --- a/util/plancodec/id.go +++ b/util/plancodec/id.go @@ -84,8 +84,8 @@ const ( TypeIndexReader = "IndexReader" // TypeWindow is the type of Window. TypeWindow = "Window" - // TypeTableGather is the type of TableGather. - TypeTableGather = "TableGather" + // TypeTiKVSingleGather is the type of TiKVSingleGather. + TypeTiKVSingleGather = "TiKVSingleGather" // TypeIndexMerge is the type of IndexMergeReader TypeIndexMerge = "IndexMerge" // TypePointGet is the type of PointGetPlan. @@ -94,6 +94,8 @@ const ( TypeShowDDLJobs = "ShowDDLJobs" // TypeBatchPointGet is the type of BatchPointGetPlan. TypeBatchPointGet = "Batch_Point_Get" + // TypeClusterMemTableReader is the type of TableReader. + TypeClusterMemTableReader = "ClusterMemTableReader" ) // plan id. @@ -132,11 +134,12 @@ const ( typeTableReaderID typeIndexReaderID typeWindowID - typeTableGatherID + typeTiKVSingleGatherID typeIndexMergeID typePointGet typeShowDDLJobs typeBatchPointGet + typeClusterMemTableReader ) // TypeStringToPhysicalID converts the plan type string to plan id. @@ -210,8 +213,8 @@ func TypeStringToPhysicalID(tp string) int { return typeIndexReaderID case TypeWindow: return typeWindowID - case TypeTableGather: - return typeTableGatherID + case TypeTiKVSingleGather: + return typeTiKVSingleGatherID case TypeIndexMerge: return typeIndexMergeID case TypePointGet: @@ -220,6 +223,8 @@ func TypeStringToPhysicalID(tp string) int { return typeShowDDLJobs case TypeBatchPointGet: return typeBatchPointGet + case TypeClusterMemTableReader: + return typeClusterMemTableReader } // Should never reach here. return 0 @@ -296,8 +301,8 @@ func PhysicalIDToTypeString(id int) string { return TypeIndexReader case typeWindowID: return TypeWindow - case typeTableGatherID: - return TypeTableGather + case typeTiKVSingleGatherID: + return TypeTiKVSingleGather case typeIndexMergeID: return TypeIndexMerge case typePointGet: @@ -306,6 +311,8 @@ func PhysicalIDToTypeString(id int) string { return TypeShowDDLJobs case typeBatchPointGet: return TypeBatchPointGet + case typeClusterMemTableReader: + return TypeClusterMemTableReader } // Should never reach here. diff --git a/util/profile/profile.go b/util/profile/profile.go index 593ddf8df851e..6f104b1407d4d 100644 --- a/util/profile/profile.go +++ b/util/profile/profile.go @@ -16,6 +16,7 @@ package profile import ( "bytes" "io" + "io/ioutil" "runtime/pprof" "strconv" "strings" @@ -79,27 +80,27 @@ func (c *Collector) ProfileGraph(name string) ([][]types.Datum, error) { if p == nil { return nil, errors.Errorf("cannot retrieve %s profile", name) } + debug := 0 + if name == "goroutine" { + debug = 2 + } buffer := &bytes.Buffer{} - if err := p.WriteTo(buffer, 0); err != nil { + if err := p.WriteTo(buffer, debug); err != nil { return nil, err } + if name == "goroutine" { + return c.ParseGoroutines(buffer) + } return c.ProfileReaderToDatums(buffer) } -// Goroutines returns the groutine list which alive in runtime -func (c *Collector) Goroutines() ([][]types.Datum, error) { - p := pprof.Lookup("goroutine") - if p == nil { - return nil, errors.Errorf("cannot retrieve goroutine profile") - } - - buffer := bytes.Buffer{} - err := p.WriteTo(&buffer, 2) +// ParseGoroutines returns the groutine list for given string representation +func (c *Collector) ParseGoroutines(reader io.Reader) ([][]types.Datum, error) { + content, err := ioutil.ReadAll(reader) if err != nil { return nil, err } - - goroutines := strings.Split(buffer.String(), "\n\n") + goroutines := strings.Split(string(content), "\n\n") var rows [][]types.Datum for _, goroutine := range goroutines { colIndex := strings.Index(goroutine, ":") diff --git a/util/set/int_set.go b/util/set/int_set.go index 9fef718e0c42a..dc835d7a82e7e 100644 --- a/util/set/int_set.go +++ b/util/set/int_set.go @@ -36,8 +36,12 @@ func (s IntSet) Insert(val int) { type Int64Set map[int64]struct{} // NewInt64Set builds a Int64Set. -func NewInt64Set() Int64Set { - return make(map[int64]struct{}) +func NewInt64Set(xs ...int64) Int64Set { + set := make(Int64Set) + for _, x := range xs { + set.Insert(x) + } + return set } // Exist checks whether `val` exists in `s`. diff --git a/util/set/int_set_test.go b/util/set/int_set_test.go index 6f5a928964556..0fd14e68eb32d 100644 --- a/util/set/int_set_test.go +++ b/util/set/int_set_test.go @@ -57,4 +57,10 @@ func (s *intSetTestSuite) TestInt64Set(c *check.C) { } c.Assert(set.Exist(11), check.IsFalse) + + set = NewInt64Set(1, 2, 3, 4, 5, 6) + for i := 1; i < 7; i++ { + c.Assert(set.Exist(int64(i)), check.IsTrue) + } + c.Assert(set.Exist(7), check.IsFalse) } diff --git a/util/set/string_set.go b/util/set/string_set.go index 37745b4ecc6ae..019c25c472c23 100644 --- a/util/set/string_set.go +++ b/util/set/string_set.go @@ -17,8 +17,12 @@ package set type StringSet map[string]struct{} // NewStringSet builds a float64 set. -func NewStringSet() StringSet { - return make(map[string]struct{}) +func NewStringSet(ss ...string) StringSet { + set := make(StringSet) + for _, s := range ss { + set.Insert(s) + } + return set } // Exist checks whether `val` exists in `s`. @@ -31,3 +35,14 @@ func (s StringSet) Exist(val string) bool { func (s StringSet) Insert(val string) { s[val] = struct{}{} } + +// Intersection returns the intersection of two sets +func (s StringSet) Intersection(rhs StringSet) StringSet { + newSet := NewStringSet() + for elt := range s { + if rhs.Exist(elt) { + newSet.Insert(elt) + } + } + return newSet +} diff --git a/util/set/string_set_test.go b/util/set/string_set_test.go index 9b07d16828697..21db3fbdcac30 100644 --- a/util/set/string_set_test.go +++ b/util/set/string_set_test.go @@ -14,6 +14,8 @@ package set import ( + "fmt" + "github.com/pingcap/check" ) @@ -38,4 +40,24 @@ func (s *stringSetTestSuite) TestStringSet(c *check.C) { } c.Assert(set.Exist("11"), check.IsFalse) + + set = NewStringSet("1", "2", "3", "4", "5", "6") + for i := 1; i < 7; i++ { + c.Assert(set.Exist(fmt.Sprintf("%d", i)), check.IsTrue) + } + c.Assert(set.Exist("7"), check.IsFalse) + + s1 := NewStringSet("1", "2", "3") + s2 := NewStringSet("4", "2", "3") + s3 := s1.Intersection(s2) + c.Assert(s3, check.DeepEquals, NewStringSet("2", "3")) + + s4 := NewStringSet("4", "5", "3") + c.Assert(s3.Intersection(s4), check.DeepEquals, NewStringSet("3")) + + s5 := NewStringSet("4", "5") + c.Assert(s3.Intersection(s5), check.DeepEquals, NewStringSet()) + + s6 := NewStringSet() + c.Assert(s3.Intersection(s6), check.DeepEquals, NewStringSet()) } diff --git a/util/timeutil/time.go b/util/timeutil/time.go index b11b1dd5d127f..6b00468c625a4 100644 --- a/util/timeutil/time.go +++ b/util/timeutil/time.go @@ -161,6 +161,16 @@ func Zone(loc *time.Location) (string, int64) { return name, int64(offset) } +// ConstructTimeZone constructs timezone by name first. When the timezone name +// is set, the daylight saving problem must be considered. Otherwise the +// timezone offset in seconds east of UTC is used to constructed the timezone. +func ConstructTimeZone(name string, offset int) (*time.Location, error) { + if name != "" { + return LoadLocation(name) + } + return time.FixedZone("", offset), nil +} + // WithinDayTimePeriod tests whether `now` is between `start` and `end`. func WithinDayTimePeriod(start, end, now time.Time) bool { // Converts to UTC and only keeps the hour and minute info.