Skip to content

Commit

Permalink
Merge branch 'master' into mocktikv
Browse files Browse the repository at this point in the history
  • Loading branch information
disksing authored Mar 18, 2021
2 parents c968b7a + 28c3748 commit 154070b
Show file tree
Hide file tree
Showing 286 changed files with 8,846 additions and 3,961 deletions.
143 changes: 87 additions & 56 deletions bindinfo/bind_test.go

Large diffs are not rendered by default.

13 changes: 4 additions & 9 deletions bindinfo/handle.go
Original file line number Diff line number Diff line change
Expand Up @@ -25,7 +25,6 @@ import (

"github.com/pingcap/parser"
"github.com/pingcap/parser/ast"
"github.com/pingcap/parser/format"
"github.com/pingcap/parser/mysql"
"github.com/pingcap/parser/terror"
"github.com/pingcap/tidb/metrics"
Expand Down Expand Up @@ -624,7 +623,7 @@ func (h *BindHandle) CaptureBaselines() {
continue
}
dbName := utilparser.GetDefaultDB(stmt, bindableStmt.Schema)
normalizedSQL, digest := parser.NormalizeDigest(utilparser.RestoreWithDefaultDB(stmt, dbName))
normalizedSQL, digest := parser.NormalizeDigest(utilparser.RestoreWithDefaultDB(stmt, dbName, bindableStmt.Query))
if r := h.GetBindRecord(digest, normalizedSQL, dbName); r != nil && r.HasUsingBinding() {
continue
}
Expand Down Expand Up @@ -689,14 +688,10 @@ func GenerateBindSQL(ctx context.Context, stmtNode ast.StmtNode, planHint string
// We need to evolve plan based on the current sql, not the original sql which may have different parameters.
// So here we would remove the hint and inject the current best plan hint.
hint.BindHint(stmtNode, &hint.HintsSet{})
var sb strings.Builder
restoreCtx := format.NewRestoreCtx(format.DefaultRestoreFlags, &sb)
restoreCtx.DefaultDB = defaultDB
err := stmtNode.Restore(restoreCtx)
if err != nil {
logutil.Logger(ctx).Debug("[sql-bind] restore SQL failed when generating bind SQL", zap.Error(err))
bindSQL := utilparser.RestoreWithDefaultDB(stmtNode, defaultDB, "")
if bindSQL == "" {
return ""
}
bindSQL := sb.String()
switch n := stmtNode.(type) {
case *ast.DeleteStmt:
deleteIdx := strings.Index(bindSQL, "DELETE")
Expand Down
9 changes: 5 additions & 4 deletions cmd/ddltest/column_test.go
Original file line number Diff line number Diff line change
Expand Up @@ -21,14 +21,15 @@ import (
"time"

. "github.com/pingcap/check"
"github.com/pingcap/log"
"github.com/pingcap/parser/terror"
"github.com/pingcap/tidb/kv"
plannercore "github.com/pingcap/tidb/planner/core"
"github.com/pingcap/tidb/session"
"github.com/pingcap/tidb/table"
"github.com/pingcap/tidb/table/tables"
"github.com/pingcap/tidb/types"
log "github.com/sirupsen/logrus"
"go.uber.org/zap"
goctx "golang.org/x/net/context"
)

Expand Down Expand Up @@ -56,7 +57,7 @@ func (s *TestDDLSuite) checkAddColumn(c *C, rowID int64, defaultVal interface{},
// When insert a row with 3 columns, the third column value will be the first column value.
newInsertCount++
} else {
log.Fatalf("[checkAddColumn fail]invalid row: %v", data)
log.Fatal("[checkAddColumn fail]invalid row", zap.Any("row", data))
}
}

Expand All @@ -67,7 +68,7 @@ func (s *TestDDLSuite) checkAddColumn(c *C, rowID int64, defaultVal interface{},
} else if reflect.DeepEqual(col3Val, updatedVal) {
newUpdateCount++
} else {
log.Fatalf("[checkAddColumn fail]invalid row: %v", data)
log.Fatal("[checkAddColumn fail]invalid row", zap.Any("row", data))
}
}

Expand Down Expand Up @@ -102,7 +103,7 @@ func (s *TestDDLSuite) checkDropColumn(c *C, rowID int64, alterColumn *table.Col
// Check updated row.
updateCount++
} else {
log.Fatalf("[checkDropColumn fail]invalid row: %v", data)
log.Fatal("[checkDropColumn fail]invalid row", zap.Any("row", data))
}
return true, nil
})
Expand Down
61 changes: 41 additions & 20 deletions cmd/ddltest/ddl_test.go
Original file line number Diff line number Diff line change
Expand Up @@ -32,6 +32,7 @@ import (
_ "github.com/go-sql-driver/mysql"
. "github.com/pingcap/check"
"github.com/pingcap/errors"
"github.com/pingcap/log"
zaplog "github.com/pingcap/log"
"github.com/pingcap/parser/model"
"github.com/pingcap/parser/terror"
Expand All @@ -48,7 +49,7 @@ import (
"github.com/pingcap/tidb/util/logutil"
"github.com/pingcap/tidb/util/testkit"
"github.com/pingcap/tidb/util/testutil"
log "github.com/sirupsen/logrus"
"go.uber.org/zap"
goctx "golang.org/x/net/context"
)

Expand Down Expand Up @@ -163,7 +164,7 @@ func (s *TestDDLSuite) restartServerRegularly() {
if *enableRestart {
err = s.restartServerRand()
if err != nil {
log.Fatalf("restartServerRand failed, err %v", errors.ErrorStack(err))
log.Fatal("restartServerRand failed", zap.Error(err))
}
}
case <-s.quit:
Expand All @@ -184,7 +185,7 @@ func (s *TestDDLSuite) TearDownSuite(c *C) {
case <-time.After(100 * time.Second):
buf := make([]byte, 2<<20)
size := runtime.Stack(buf, true)
log.Errorf("%s", buf[:size])
log.Error("testing timeout", zap.ByteString("buf", buf[:size]))
case <-quitCh:
}
}()
Expand Down Expand Up @@ -224,12 +225,12 @@ func (s *TestDDLSuite) killServer(proc *os.Process) error {
// Make sure this tidb is killed, and it makes the next tidb that has the same port as this one start quickly.
err := proc.Kill()
if err != nil {
log.Errorf("kill server failed err %v", err)
log.Error("kill server failed", zap.Error(err))
return errors.Trace(err)
}
_, err = proc.Wait()
if err != nil {
log.Errorf("kill server, wait failed err %v", err)
log.Error("kill server, wait failed", zap.Error(err))
return errors.Trace(err)
}

Expand Down Expand Up @@ -296,25 +297,29 @@ func (s *TestDDLSuite) startServer(i int, fp *os.File) (*server, error) {
for i := 0; i < s.retryCount; i++ {
db, err = sql.Open("mysql", fmt.Sprintf("root@(%s)/test_ddl", addr))
if err != nil {
log.Warnf("open addr %v failed, retry count %d err %v", addr, i, err)
log.Warn("open addr failed", zap.String("addr", addr), zap.Int("retry count", i), zap.Error(err))
continue
}
err = db.Ping()
if err == nil {
break
}
log.Warnf("ping addr %v failed, retry count %d err %v", addr, i, err)
log.Warn("ping addr failed", zap.String("addr", addr), zap.Int("retry count", i), zap.Error(err))

err = db.Close()
if err != nil {
log.Warnf("close db failed, retry count %d err %v", i, err)
log.Warn("close db failed", zap.Int("retry count", i), zap.Error(err))
break
}
time.Sleep(sleepTime)
sleepTime += sleepTime
}
if err != nil {
log.Errorf("restart server addr %v failed %v, take time %v", addr, err, time.Since(startTime))
log.Error("restart server addr failed",
zap.String("addr", addr),
zap.Duration("take time", time.Since(startTime)),
zap.Error(err),
)
return nil, errors.Trace(err)
}
db.SetMaxOpenConns(10)
Expand All @@ -324,7 +329,7 @@ func (s *TestDDLSuite) startServer(i int, fp *os.File) (*server, error) {
return nil, errors.Trace(err)
}

log.Infof("start server %s ok %v", addr, err)
log.Info("start server ok", zap.String("addr", addr), zap.Error(err))

return &server{
Cmd: cmd,
Expand All @@ -346,7 +351,7 @@ func (s *TestDDLSuite) restartServerRand() error {

server := s.procs[i]
s.procs[i] = nil
log.Warnf("begin to restart %s", server.addr)
log.Warn("begin to restart", zap.String("addr", server.addr))
err := s.killServer(server.Process)
if err != nil {
return errors.Trace(err)
Expand All @@ -372,11 +377,11 @@ func isRetryError(err error) bool {

// TODO: Check the specific columns number.
if strings.Contains(err.Error(), "Column count doesn't match value count at row") {
log.Warnf("err is %v", err)
log.Warn("err", zap.Error(err))
return false
}

log.Errorf("err is %v, can not retry", err)
log.Error("can not retry", zap.Error(err))

return false
}
Expand All @@ -386,7 +391,11 @@ func (s *TestDDLSuite) exec(query string, args ...interface{}) (sql.Result, erro
server := s.getServer()
r, err := server.db.Exec(query, args...)
if isRetryError(err) {
log.Errorf("exec %s in server %s err %v, retry", query, err, server.addr)
log.Error("exec in server, retry",
zap.String("query", query),
zap.String("addr", server.addr),
zap.Error(err),
)
continue
}

Expand All @@ -397,7 +406,11 @@ func (s *TestDDLSuite) exec(query string, args ...interface{}) (sql.Result, erro
func (s *TestDDLSuite) mustExec(c *C, query string, args ...interface{}) sql.Result {
r, err := s.exec(query, args...)
if err != nil {
log.Fatalf("[mustExec fail]query - %v %v, error - %v", query, args, err)
log.Fatal("[mustExec fail]query",
zap.String("query", query),
zap.Any("args", args),
zap.Error(err),
)
}

return r
Expand All @@ -418,7 +431,11 @@ func (s *TestDDLSuite) execInsert(c *C, query string, args ...interface{}) sql.R
}
}

log.Fatalf("[execInsert fail]query - %v %v, error - %v", query, args, err)
log.Fatal("[execInsert fail]query",
zap.String("query", query),
zap.Any("args", args),
zap.Error(err),
)
}
}

Expand All @@ -427,7 +444,11 @@ func (s *TestDDLSuite) query(query string, args ...interface{}) (*sql.Rows, erro
server := s.getServer()
r, err := server.db.Query(query, args...)
if isRetryError(err) {
log.Errorf("query %s in server %s err %v, retry", query, err, server.addr)
log.Error("query in server, retry",
zap.String("query", query),
zap.String("addr", server.addr),
zap.Error(err),
)
continue
}

Expand All @@ -447,7 +468,7 @@ func (s *TestDDLSuite) getServer() *server {
}
}

log.Fatalf("try to get server too many times")
log.Fatal("try to get server too many times")
return nil
}

Expand Down Expand Up @@ -787,7 +808,7 @@ func (s *TestDDLSuite) TestSimpleConflictUpdate(c *C) {
c.Assert(keysMap, HasKey, data[0].GetValue())

if !reflect.DeepEqual(data[1].GetValue(), data[0].GetValue()) && !reflect.DeepEqual(data[1].GetValue(), defaultValue) {
log.Fatalf("[TestSimpleConflictUpdate fail]Bad row: %v", data)
log.Fatal("[TestSimpleConflictUpdate fail]Bad row", zap.Any("row", data))
}

return true, nil
Expand Down Expand Up @@ -978,7 +999,7 @@ func (s *TestDDLSuite) TestSimpleMixed(c *C) {
} else if reflect.DeepEqual(data[1].GetValue(), defaultValue) && data[0].GetInt64() < int64(rowCount) {
updateCount++
} else {
log.Fatalf("[TestSimpleMixed fail]invalid row: %v", data)
log.Fatal("[TestSimpleMixed fail]invalid row", zap.Any("row", data))
}

return true, nil
Expand Down
2 changes: 1 addition & 1 deletion cmd/ddltest/index_test.go
Original file line number Diff line number Diff line change
Expand Up @@ -104,7 +104,7 @@ func (s *TestDDLSuite) checkDropIndex(c *C, indexInfo *model.IndexInfo) {
c.Assert(err, IsNil)
txn, err := ctx.Txn(false)
c.Assert(err, IsNil)
defer func(){
defer func() {
err := txn.Rollback()
c.Assert(err, IsNil)
}()
Expand Down
3 changes: 2 additions & 1 deletion cmd/explaintest/main.go
Original file line number Diff line number Diff line change
Expand Up @@ -94,7 +94,7 @@ func newTester(name string) *tester {
t.enableQueryLog = true
t.ctx = mock.NewContext()
t.ctx.GetSessionVars().EnableWindowFunction = true

t.ctx.GetSessionVars().IntPrimaryKeyDefaultAsClustered = true
return t
}

Expand Down Expand Up @@ -658,6 +658,7 @@ func main() {
"set @@tidb_projection_concurrency=4",
"set @@tidb_distsql_scan_concurrency=15",
"set @@global.tidb_enable_clustered_index=0;",
"set @@tidb_int_primary_key_default_as_clustered=1",
}
for _, sql := range resets {
if _, err = mdb.Exec(sql); err != nil {
Expand Down
22 changes: 10 additions & 12 deletions cmd/explaintest/r/clustered_index.result
Original file line number Diff line number Diff line change
Expand Up @@ -3,24 +3,22 @@ create database with_cluster_index;
drop database if exists wout_cluster_index;
create database wout_cluster_index;
use with_cluster_index;
set @@tidb_enable_clustered_index = 1;
create table tbl_0 ( col_0 decimal not null , col_1 blob(207) , col_2 text , col_3 datetime default '1986-07-01' , col_4 bigint unsigned default 1504335725690712365 , primary key idx_0 ( col_3,col_2(1),col_1(6) ) , key idx_1 ( col_3 ) , unique key idx_2 ( col_3 ) , unique key idx_3 ( col_0 ) , key idx_4 ( col_1(1),col_2(1) ) , key idx_5 ( col_2(1) ) ) ;
create table tbl_1 ( col_5 char(135) , col_6 bit(17) default 50609 not null , col_7 char(202) default 'IoQWYoGdbbgBDlxpDHQ' , col_8 char(213) , col_9 time not null , primary key idx_6 ( col_6 ) , unique key idx_7 ( col_5 ) ) ;
create table tbl_2 ( col_10 datetime default '1976-05-11' , col_11 datetime , col_12 float , col_13 double(56,29) default 18.0118 , col_14 char not null , primary key idx_8 ( col_14,col_13,col_10 ) , key idx_9 ( col_11 ) ) ;
create table tbl_3 ( col_15 tinyint default -91 not null , col_16 bit(61) default 990141831018971350 not null , col_17 double(244,22) default 3985 not null , col_18 binary(32) default 'kxMlWqvpxXNBlxoU' , col_19 text(401) , primary key idx_10 ( col_18,col_19(4) ) , key idx_11 ( col_17,col_18,col_19(2),col_15,col_16 ) , unique key idx_12 ( col_17 ) ) ;
create table tbl_4 ( col_20 double(230,16) default 8.49 not null , col_21 int unsigned not null , col_22 enum('Alice','Bob','Charlie','David') not null , col_23 float default 3066.13040283622 , col_24 datetime default '1980-10-27' not null , primary key idx_13 ( col_22,col_24 ) , key idx_14 ( col_23,col_20 ) , key idx_15 ( col_24 ) , key idx_16 ( col_20 ) , unique key idx_17 ( col_24 ) , key idx_18 ( col_21 ) ) ;
create table tbl_0 ( col_0 decimal not null , col_1 blob(207) , col_2 text , col_3 datetime default '1986-07-01' , col_4 bigint unsigned default 1504335725690712365 , primary key idx_0 ( col_3,col_2(1),col_1(6) ) clustered, key idx_1 ( col_3 ), unique key idx_2 ( col_3 ) , unique key idx_3 ( col_0 ) , key idx_4 ( col_1(1),col_2(1) ) , key idx_5 ( col_2(1) ) ) ;
create table tbl_1 ( col_5 char(135) , col_6 bit(17) default 50609 not null , col_7 char(202) default 'IoQWYoGdbbgBDlxpDHQ' , col_8 char(213) , col_9 time not null , primary key idx_6 ( col_6 ) clustered, unique key idx_7 ( col_5 ) ) ;
create table tbl_2 ( col_10 datetime default '1976-05-11' , col_11 datetime , col_12 float , col_13 double(56,29) default 18.0118 , col_14 char not null , primary key idx_8 ( col_14,col_13,col_10 ) clustered, key idx_9 ( col_11 ) ) ;
create table tbl_3 ( col_15 tinyint default -91 not null , col_16 bit(61) default 990141831018971350 not null , col_17 double(244,22) default 3985 not null , col_18 binary(32) default 'kxMlWqvpxXNBlxoU' , col_19 text(401) , primary key idx_10 ( col_18,col_19(4) ) clustered, key idx_11 ( col_17,col_18,col_19(2),col_15,col_16 ) , unique key idx_12 ( col_17 ) ) ;
create table tbl_4 ( col_20 double(230,16) default 8.49 not null , col_21 int unsigned not null , col_22 enum('Alice','Bob','Charlie','David') not null , col_23 float default 3066.13040283622 , col_24 datetime default '1980-10-27' not null , primary key idx_13 ( col_22,col_24 ) clustered, key idx_14 ( col_23,col_20 ) , key idx_15 ( col_24 ) , key idx_16 ( col_20 ) , unique key idx_17 ( col_24 ) , key idx_18 ( col_21 ) ) ;
load stats 's/with_cluster_index_tbl_0.json';
load stats 's/with_cluster_index_tbl_1.json';
load stats 's/with_cluster_index_tbl_2.json';
load stats 's/with_cluster_index_tbl_3.json';
load stats 's/with_cluster_index_tbl_4.json';
use wout_cluster_index;
set @@tidb_enable_clustered_index = 0;
create table tbl_0 ( col_0 decimal not null , col_1 blob(207) , col_2 text , col_3 datetime default '1986-07-01' , col_4 bigint unsigned default 1504335725690712365 , primary key idx_0 ( col_3,col_2(1),col_1(6) ) , key idx_1 ( col_3 ) , unique key idx_2 ( col_3 ) , unique key idx_3 ( col_0 ) , key idx_4 ( col_1(1),col_2(1) ) , key idx_5 ( col_2(1) ) ) ;
create table tbl_1 ( col_5 char(135) , col_6 bit(17) default 50609 not null , col_7 char(202) default 'IoQWYoGdbbgBDlxpDHQ' , col_8 char(213) , col_9 time not null , primary key idx_6 ( col_6 ) , unique key idx_7 ( col_5 ) ) ;
create table tbl_2 ( col_10 datetime default '1976-05-11' , col_11 datetime , col_12 float , col_13 double(56,29) default 18.0118 , col_14 char not null , primary key idx_8 ( col_14,col_13,col_10 ) , key idx_9 ( col_11 ) ) ;
create table tbl_3 ( col_15 tinyint default -91 not null , col_16 bit(61) default 990141831018971350 not null , col_17 double(244,22) default 3985 not null , col_18 binary(32) default 'kxMlWqvpxXNBlxoU' , col_19 text(401) , primary key idx_10 ( col_18,col_19(4) ) , key idx_11 ( col_17,col_18,col_19(2),col_15,col_16 ) , unique key idx_12 ( col_17 ) ) ;
create table tbl_4 ( col_20 double(230,16) default 8.49 not null , col_21 int unsigned not null , col_22 enum('Alice','Bob','Charlie','David') not null , col_23 float default 3066.13040283622 , col_24 datetime default '1980-10-27' not null , primary key idx_13 ( col_22,col_24 ) , key idx_14 ( col_23,col_20 ) , key idx_15 ( col_24 ) , key idx_16 ( col_20 ) , unique key idx_17 ( col_24 ) , key idx_18 ( col_21 ) ) ;
create table tbl_0 ( col_0 decimal not null , col_1 blob(207) , col_2 text , col_3 datetime default '1986-07-01' , col_4 bigint unsigned default 1504335725690712365 , primary key idx_0 ( col_3,col_2(1),col_1(6) ) nonclustered, key idx_1 ( col_3 ) , unique key idx_2 ( col_3 ) , unique key idx_3 ( col_0 ) , key idx_4 ( col_1(1),col_2(1) ) , key idx_5 ( col_2(1) ) ) ;
create table tbl_1 ( col_5 char(135) , col_6 bit(17) default 50609 not null , col_7 char(202) default 'IoQWYoGdbbgBDlxpDHQ' , col_8 char(213) , col_9 time not null , primary key idx_6 ( col_6 ) nonclustered, unique key idx_7 ( col_5 ) ) ;
create table tbl_2 ( col_10 datetime default '1976-05-11' , col_11 datetime , col_12 float , col_13 double(56,29) default 18.0118 , col_14 char not null , primary key idx_8 ( col_14,col_13,col_10 ) nonclustered, key idx_9 ( col_11 ) ) ;
create table tbl_3 ( col_15 tinyint default -91 not null , col_16 bit(61) default 990141831018971350 not null , col_17 double(244,22) default 3985 not null , col_18 binary(32) default 'kxMlWqvpxXNBlxoU' , col_19 text(401) , primary key idx_10 ( col_18,col_19(4) ) nonclustered, key idx_11 ( col_17,col_18,col_19(2),col_15,col_16 ) , unique key idx_12 ( col_17 ) ) ;
create table tbl_4 ( col_20 double(230,16) default 8.49 not null , col_21 int unsigned not null , col_22 enum('Alice','Bob','Charlie','David') not null , col_23 float default 3066.13040283622 , col_24 datetime default '1980-10-27' not null , primary key idx_13 ( col_22,col_24 ) nonclustered, key idx_14 ( col_23,col_20 ) , key idx_15 ( col_24 ) , key idx_16 ( col_20 ) , unique key idx_17 ( col_24 ) , key idx_18 ( col_21 ) ) ;
load stats 's/wout_cluster_index_tbl_0.json';
load stats 's/wout_cluster_index_tbl_1.json';
load stats 's/wout_cluster_index_tbl_2.json';
Expand Down
2 changes: 1 addition & 1 deletion cmd/explaintest/r/explain.result
Original file line number Diff line number Diff line change
Expand Up @@ -43,4 +43,4 @@ drop view if exists v;
create view v as select cast(replace(substring_index(substring_index("",',',1),':',-1),'"','') as CHAR(32)) as event_id;
desc v;
Field Type Null Key Default Extra
event_id varchar(32) YES NULL
event_id varchar(32) NO NULL
Loading

0 comments on commit 154070b

Please sign in to comment.