diff --git a/ddl/column_type_change_test.go b/ddl/column_type_change_test.go index d0ca4f10c35a6..15b2b45eec7a3 100644 --- a/ddl/column_type_change_test.go +++ b/ddl/column_type_change_test.go @@ -1678,7 +1678,7 @@ func (s *testColumnTypeChangeSuite) TestChangingColOriginDefaultValueAfterAddCol tk1 := testkit.NewTestKit(c, s.store) tk1.MustExec("use test") - tk.MustExec(fmt.Sprintf("set time_zone = 'UTC'")) + tk.MustExec("set time_zone = 'UTC'") tk.MustExec("drop table if exists t") tk.MustExec("create table t(a int, b int not null, unique key(a))") tk.MustExec("insert into t values(1, 1)") diff --git a/ddl/ddl_api.go b/ddl/ddl_api.go index 9b670911c4b71..6b31fc717ab93 100644 --- a/ddl/ddl_api.go +++ b/ddl/ddl_api.go @@ -18,7 +18,6 @@ package ddl import ( - "encoding/hex" "fmt" "math" "strconv" @@ -28,7 +27,6 @@ import ( "unicode/utf8" "github.com/cznic/mathutil" - "github.com/go-yaml/yaml" "github.com/pingcap/errors" "github.com/pingcap/failpoint" "github.com/pingcap/parser/ast" @@ -49,12 +47,10 @@ import ( "github.com/pingcap/tidb/sessionctx/variable" "github.com/pingcap/tidb/table" "github.com/pingcap/tidb/table/tables" - "github.com/pingcap/tidb/tablecodec" "github.com/pingcap/tidb/types" driver "github.com/pingcap/tidb/types/parser_driver" "github.com/pingcap/tidb/util" "github.com/pingcap/tidb/util/chunk" - "github.com/pingcap/tidb/util/codec" "github.com/pingcap/tidb/util/collate" "github.com/pingcap/tidb/util/dbterror" "github.com/pingcap/tidb/util/domainutil" @@ -5917,150 +5913,6 @@ func (d *ddl) AlterIndexVisibility(ctx sessionctx.Context, ident ast.Ident, inde return errors.Trace(err) } -func buildPlacementSpecReplicasAndConstraint(replicas uint64, cnstr string) ([]*placement.Rule, error) { - rules := []*placement.Rule{} - - cnstbytes := []byte(cnstr) - - constraints1 := []string{} - err1 := yaml.UnmarshalStrict(cnstbytes, &constraints1) - if err1 == nil { - // can not emit REPLICAS with an array label - if replicas == 0 { - return rules, errors.Errorf("array CONSTRAINTS should be with a positive REPLICAS") - } - - labelConstraints, err := placement.NewConstraints(constraints1) - if err != nil { - return rules, err - } - - rules = append(rules, &placement.Rule{ - Count: int(replicas), - Constraints: labelConstraints, - }) - - return rules, nil - } - - constraints2 := map[string]int{} - err2 := yaml.UnmarshalStrict(cnstbytes, &constraints2) - if err2 == nil { - ruleCnt := int(replicas) - - for labels, cnt := range constraints2 { - if cnt <= 0 { - return rules, errors.Errorf("count should be positive, but got %d", cnt) - } - - if replicas != 0 { - ruleCnt -= cnt - if ruleCnt < 0 { - return rules, errors.Errorf("REPLICAS should be larger or equal to the number of total replicas, but got %d", replicas) - } - } - - labelConstraints, err := placement.NewConstraints(strings.Split(strings.TrimSpace(labels), ",")) - if err != nil { - return rules, err - } - - rules = append(rules, &placement.Rule{ - Count: cnt, - Constraints: labelConstraints, - }) - } - - if ruleCnt > 0 { - rules = append(rules, &placement.Rule{ - Count: ruleCnt, - }) - } - - return rules, nil - } - - return nil, errors.Errorf("constraint is neither an array of string, nor a string-to-number map, due to:\n%s\n%s", err1, err2) -} - -func buildPlacementSpecs(bundle *placement.Bundle, specs []*ast.PlacementSpec) (*placement.Bundle, error) { - var err error - var spec *ast.PlacementSpec - - for _, rspec := range specs { - spec = rspec - - var role placement.PeerRoleType - switch spec.Role { - case ast.PlacementRoleFollower: - role = placement.Follower - case ast.PlacementRoleLeader: - if spec.Replicas == 0 { - spec.Replicas = 1 - } - if spec.Replicas > 1 { - err = errors.Errorf("replicas can only be 1 when the role is leader") - } - role = placement.Leader - case ast.PlacementRoleLearner: - role = placement.Learner - case ast.PlacementRoleVoter: - role = placement.Voter - default: - err = errors.Errorf("ROLE is not specified") - } - if err != nil { - break - } - - if spec.Tp == ast.PlacementAlter || spec.Tp == ast.PlacementDrop { - origLen := len(bundle.Rules) - newRules := bundle.Rules[:0] - for _, r := range bundle.Rules { - if r.Role != role { - newRules = append(newRules, r) - } - } - bundle.Rules = newRules - - // alter == drop + add new rules - if spec.Tp == ast.PlacementDrop { - // error if no rules will be dropped - if len(bundle.Rules) == origLen { - err = errors.Errorf("no rule of role '%s' to drop", role) - break - } - continue - } - } - - var newRules []*placement.Rule - newRules, err = buildPlacementSpecReplicasAndConstraint(spec.Replicas, spec.Constraints) - if err != nil { - break - } - for _, r := range newRules { - r.Role = role - bundle.Rules = append(bundle.Rules, r) - } - } - - if err != nil { - var sb strings.Builder - sb.Reset() - - restoreCtx := format.NewRestoreCtx(format.RestoreStringSingleQuotes|format.RestoreKeyWordLowercase|format.RestoreNameBackQuotes, &sb) - - if e := spec.Restore(restoreCtx); e != nil { - return nil, ErrInvalidPlacementSpec.GenWithStackByArgs("", err) - } - - return nil, ErrInvalidPlacementSpec.GenWithStackByArgs(sb.String(), err) - } - - return bundle, nil -} - func (d *ddl) AlterTableAlterPartition(ctx sessionctx.Context, ident ast.Ident, spec *ast.AlterTableSpec) (err error) { schema, tb, err := d.getSchemaAndTableByIdent(ctx, ident) if err != nil { @@ -6077,61 +5929,29 @@ func (d *ddl) AlterTableAlterPartition(ctx sessionctx.Context, ident ast.Ident, return errors.Trace(err) } - oldBundle := infoschema.GetBundle(d.infoCache.GetLatest(), []int64{partitionID, meta.ID, schema.ID}) + bundle := infoschema.GetBundle(d.infoCache.GetLatest(), []int64{partitionID, meta.ID, schema.ID}) - oldBundle.ID = placement.GroupID(partitionID) + bundle.ID = placement.GroupID(partitionID) - bundle, err := buildPlacementSpecs(oldBundle, spec.PlacementSpecs) + err = bundle.ApplyPlacementSpec(spec.PlacementSpecs) if err != nil { - return errors.Trace(err) - } + var sb strings.Builder + sb.Reset() - extraCnt := map[placement.PeerRoleType]int{} - startKey := hex.EncodeToString(codec.EncodeBytes(nil, tablecodec.GenTableRecordPrefix(partitionID))) - endKey := hex.EncodeToString(codec.EncodeBytes(nil, tablecodec.GenTableRecordPrefix(partitionID+1))) - newRules := bundle.Rules[:0] - for i, rule := range bundle.Rules { - // merge all empty constraints - if len(rule.Constraints) == 0 { - extraCnt[rule.Role] += rule.Count - continue - } - // refer to tidb#22065. - // add -engine=tiflash to every rule to avoid schedules to tiflash instances. - // placement rules in SQL is not compatible with `set tiflash replica` yet - if err := rule.Constraints.Add(placement.Constraint{ - Op: placement.NotIn, - Key: placement.EngineLabelKey, - Values: []string{placement.EngineLabelTiFlash}, - }); err != nil { - return errors.Trace(err) + restoreCtx := format.NewRestoreCtx(format.RestoreStringSingleQuotes|format.RestoreKeyWordLowercase|format.RestoreNameBackQuotes, &sb) + + if e := spec.Restore(restoreCtx); e != nil { + return ErrInvalidPlacementSpec.GenWithStackByArgs("", err) } - rule.GroupID = bundle.ID - rule.ID = strconv.Itoa(i) - rule.StartKeyHex = startKey - rule.EndKeyHex = endKey - newRules = append(newRules, rule) + return ErrInvalidPlacementSpec.GenWithStackByArgs(sb.String(), err) } - for role, cnt := range extraCnt { - if cnt <= 0 { - continue - } - // refer to tidb#22065. - newRules = append(newRules, &placement.Rule{ - GroupID: bundle.ID, - ID: string(role), - Role: role, - Count: cnt, - StartKeyHex: startKey, - EndKeyHex: endKey, - Constraints: []placement.Constraint{{ - Op: placement.NotIn, - Key: placement.EngineLabelKey, - Values: []string{placement.EngineLabelTiFlash}, - }}, - }) + + err = bundle.Tidy() + if err != nil { + return errors.Trace(err) } - bundle.Rules = newRules + bundle.Reset(partitionID) + if len(bundle.Rules) == 0 { bundle.Index = 0 bundle.Override = false diff --git a/ddl/partition.go b/ddl/partition.go index 4e55ec1779e21..566d98505d948 100644 --- a/ddl/partition.go +++ b/ddl/partition.go @@ -915,7 +915,7 @@ func dropRuleBundles(d *ddlCtx, physicalTableIDs []int64) error { for _, ID := range physicalTableIDs { oldBundle, ok := d.infoCache.GetLatest().BundleByName(placement.GroupID(ID)) if ok && !oldBundle.IsEmpty() { - bundles = append(bundles, placement.BuildPlacementDropBundle(ID)) + bundles = append(bundles, placement.NewBundle(ID)) } } err := infosync.PutRuleBundles(context.TODO(), bundles) @@ -1097,8 +1097,8 @@ func onTruncateTablePartition(d *ddlCtx, t *meta.Meta, job *model.Job) (int64, e for i, oldID := range oldIDs { oldBundle, ok := d.infoCache.GetLatest().BundleByName(placement.GroupID(oldID)) if ok && !oldBundle.IsEmpty() { - bundles = append(bundles, placement.BuildPlacementDropBundle(oldID)) - bundles = append(bundles, placement.BuildPlacementCopyBundle(oldBundle, newPartitions[i].ID)) + bundles = append(bundles, placement.NewBundle(oldID)) + bundles = append(bundles, oldBundle.Clone().Reset(newPartitions[i].ID)) } } @@ -1300,14 +1300,14 @@ func (w *worker) onExchangeTablePartition(d *ddlCtx, t *meta.Meta, job *model.Jo ntBundle, ntOK := d.infoCache.GetLatest().BundleByName(placement.GroupID(nt.ID)) ntOK = ntOK && !ntBundle.IsEmpty() if ptOK && ntOK { - bundles = append(bundles, placement.BuildPlacementCopyBundle(ptBundle, nt.ID)) - bundles = append(bundles, placement.BuildPlacementCopyBundle(ntBundle, partDef.ID)) + bundles = append(bundles, ptBundle.Clone().Reset(nt.ID)) + bundles = append(bundles, ntBundle.Clone().Reset(partDef.ID)) } else if ptOK { - bundles = append(bundles, placement.BuildPlacementDropBundle(partDef.ID)) - bundles = append(bundles, placement.BuildPlacementCopyBundle(ptBundle, nt.ID)) + bundles = append(bundles, placement.NewBundle(partDef.ID)) + bundles = append(bundles, ptBundle.Clone().Reset(nt.ID)) } else if ntOK { - bundles = append(bundles, placement.BuildPlacementDropBundle(nt.ID)) - bundles = append(bundles, placement.BuildPlacementCopyBundle(ntBundle, partDef.ID)) + bundles = append(bundles, placement.NewBundle(nt.ID)) + bundles = append(bundles, ntBundle.Clone().Reset(partDef.ID)) } err = infosync.PutRuleBundles(context.TODO(), bundles) if err != nil { diff --git a/ddl/placement/bundle.go b/ddl/placement/bundle.go new file mode 100644 index 0000000000000..1bb7539d79ad0 --- /dev/null +++ b/ddl/placement/bundle.go @@ -0,0 +1,203 @@ +// Copyright 2021 PingCAP, Inc. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// See the License for the specific language governing permissions and +// limitations under the License. + +package placement + +import ( + "encoding/hex" + "encoding/json" + "errors" + "fmt" + "strconv" + + "github.com/pingcap/failpoint" + "github.com/pingcap/parser/ast" + "github.com/pingcap/tidb/tablecodec" + "github.com/pingcap/tidb/util/codec" +) + +// Refer to https://github.com/tikv/pd/issues/2701 . +// IMO, it is indeed not bad to have a copy of definition. +// After all, placement rules are communicated using an HTTP API. Loose +// coupling is a good feature. + +// Bundle is a group of all rules and configurations. It is used to support rule cache. +type Bundle struct { + ID string `json:"group_id"` + Index int `json:"group_index"` + Override bool `json:"group_override"` + Rules []*Rule `json:"rules"` +} + +// NewBundle will create a bundle with the provided ID. +// Note that you should never pass negative id. +func NewBundle(id int64) *Bundle { + return &Bundle{ + ID: GroupID(id), + } +} + +// ApplyPlacementSpec will apply actions defined in PlacementSpec to the bundle. +func (b *Bundle) ApplyPlacementSpec(specs []*ast.PlacementSpec) error { + for _, spec := range specs { + var role PeerRoleType + switch spec.Role { + case ast.PlacementRoleFollower: + role = Follower + case ast.PlacementRoleLeader: + if spec.Replicas == 0 { + spec.Replicas = 1 + } + if spec.Replicas > 1 { + return ErrLeaderReplicasMustOne + } + role = Leader + case ast.PlacementRoleLearner: + role = Learner + case ast.PlacementRoleVoter: + role = Voter + default: + return ErrMissingRoleField + } + + if spec.Tp == ast.PlacementAlter || spec.Tp == ast.PlacementDrop { + origLen := len(b.Rules) + newRules := b.Rules[:0] + for _, r := range b.Rules { + if r.Role != role { + newRules = append(newRules, r) + } + } + b.Rules = newRules + + // alter == drop + add new rules + if spec.Tp == ast.PlacementDrop { + // error if no rules will be dropped + if len(b.Rules) == origLen { + return fmt.Errorf("%w: %s", ErrNoRulesToDrop, role) + } + continue + } + } + + var newRules []*Rule + newRules, err := NewRules(spec.Replicas, spec.Constraints) + if err != nil { + return err + } + for _, r := range newRules { + r.Role = role + b.Rules = append(b.Rules, r) + } + } + + return nil +} + +// String implements fmt.Stringer. +func (b *Bundle) String() string { + t, err := json.Marshal(b) + failpoint.Inject("MockMarshalFailure", func(val failpoint.Value) { + if _, ok := val.(bool); ok { + err = errors.New("test") + } + }) + if err != nil { + return "" + } + return string(t) +} + +// Tidy will post optimize Rules, trying to generate rules that suits PD. +func (b *Bundle) Tidy() error { + extraCnt := map[PeerRoleType]int{} + newRules := b.Rules[:0] + for i, rule := range b.Rules { + // useless Rule + if rule.Count <= 0 { + continue + } + // merge all empty constraints + if len(rule.Constraints) == 0 { + extraCnt[rule.Role] += rule.Count + continue + } + // refer to tidb#22065. + // add -engine=tiflash to every rule to avoid schedules to tiflash instances. + // placement rules in SQL is not compatible with `set tiflash replica` yet + err := rule.Constraints.Add(Constraint{ + Op: NotIn, + Key: EngineLabelKey, + Values: []string{EngineLabelTiFlash}, + }) + if err != nil { + return err + } + // Constraints.Add() will automatically avoid duplication + // if -engine=tiflash is added and there is only one constraint + // then it must be -engine=tiflash + // it is seen as an empty constraint, so merge it + if len(rule.Constraints) == 1 { + extraCnt[rule.Role] += rule.Count + continue + } + rule.ID = strconv.Itoa(i) + newRules = append(newRules, rule) + } + for role, cnt := range extraCnt { + // add -engine=tiflash, refer to tidb#22065. + newRules = append(newRules, &Rule{ + ID: string(role), + Role: role, + Count: cnt, + Constraints: []Constraint{{ + Op: NotIn, + Key: EngineLabelKey, + Values: []string{EngineLabelTiFlash}, + }}, + }) + } + b.Rules = newRules + return nil +} + +// Reset resets the bundle ID and keyrange of all rules. +func (b *Bundle) Reset(newID int64) *Bundle { + b.ID = GroupID(newID) + startKey := hex.EncodeToString(codec.EncodeBytes(nil, tablecodec.GenTableRecordPrefix(newID))) + endKey := hex.EncodeToString(codec.EncodeBytes(nil, tablecodec.GenTableRecordPrefix(newID+1))) + for _, rule := range b.Rules { + rule.GroupID = b.ID + rule.StartKeyHex = startKey + rule.EndKeyHex = endKey + } + return b +} + +// Clone is used to duplicate a bundle. +func (b *Bundle) Clone() *Bundle { + newBundle := &Bundle{} + *newBundle = *b + if len(b.Rules) > 0 { + newBundle.Rules = make([]*Rule, 0, len(b.Rules)) + for i := range b.Rules { + newBundle.Rules = append(newBundle.Rules, b.Rules[i].Clone()) + } + } + return newBundle +} + +// IsEmpty is used to check if a bundle is empty. +func (b *Bundle) IsEmpty() bool { + return len(b.Rules) == 0 && b.Index == 0 && !b.Override +} diff --git a/ddl/placement/bundle_test.go b/ddl/placement/bundle_test.go new file mode 100644 index 0000000000000..c63fbae74e670 --- /dev/null +++ b/ddl/placement/bundle_test.go @@ -0,0 +1,378 @@ +// Copyright 2021 PingCAP, Inc. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// See the License for the specific language governing permissions and +// limitations under the License. + +package placement + +import ( + "encoding/hex" + "errors" + + . "github.com/pingcap/check" + "github.com/pingcap/failpoint" + "github.com/pingcap/parser/ast" + "github.com/pingcap/tidb/tablecodec" + "github.com/pingcap/tidb/util/codec" +) + +var _ = Suite(&testBundleSuite{}) + +type testBundleSuite struct{} + +func (s *testBundleSuite) TestEmpty(c *C) { + bundle := &Bundle{ID: GroupID(1)} + c.Assert(bundle.IsEmpty(), IsTrue) + + bundle = &Bundle{ID: GroupID(1), Index: 1} + c.Assert(bundle.IsEmpty(), IsFalse) + + bundle = &Bundle{ID: GroupID(1), Override: true} + c.Assert(bundle.IsEmpty(), IsFalse) + + bundle = &Bundle{ID: GroupID(1), Rules: []*Rule{{ID: "434"}}} + c.Assert(bundle.IsEmpty(), IsFalse) + + bundle = &Bundle{ID: GroupID(1), Index: 1, Override: true} + c.Assert(bundle.IsEmpty(), IsFalse) +} + +func (s *testBundleSuite) TestClone(c *C) { + bundle := &Bundle{ID: GroupID(1), Rules: []*Rule{{ID: "434"}}} + + newBundle := bundle.Clone() + newBundle.ID = GroupID(2) + newBundle.Rules[0] = &Rule{ID: "121"} + + c.Assert(bundle, DeepEquals, &Bundle{ID: GroupID(1), Rules: []*Rule{{ID: "434"}}}) + c.Assert(newBundle, DeepEquals, &Bundle{ID: GroupID(2), Rules: []*Rule{{ID: "121"}}}) +} + +func (s *testBundleSuite) TestApplyPlacmentSpec(c *C) { + type TestCase struct { + name string + input []*ast.PlacementSpec + output []*Rule + err error + } + var tests []TestCase + + tests = append(tests, TestCase{ + name: "empty", + input: []*ast.PlacementSpec{}, + output: []*Rule{}, + }) + + rules, err := NewRules(3, `["+zone=sh", "+zone=sh"]`) + c.Assert(err, IsNil) + c.Assert(rules, HasLen, 1) + rules[0].Role = Voter + tests = append(tests, TestCase{ + name: "add voter array", + input: []*ast.PlacementSpec{{ + Role: ast.PlacementRoleVoter, + Tp: ast.PlacementAdd, + Replicas: 3, + Constraints: `["+zone=sh", "+zone=sh"]`, + }}, + output: rules, + }) + + rules, err = NewRules(3, `["+zone=sh", "+zone=sh"]`) + c.Assert(err, IsNil) + c.Assert(rules, HasLen, 1) + rules[0].Role = Learner + tests = append(tests, TestCase{ + name: "add learner array", + input: []*ast.PlacementSpec{{ + Role: ast.PlacementRoleLearner, + Tp: ast.PlacementAdd, + Replicas: 3, + Constraints: `["+zone=sh", "+zone=sh"]`, + }}, + output: rules, + }) + + rules, err = NewRules(3, `["+zone=sh", "+zone=sh"]`) + c.Assert(err, IsNil) + c.Assert(rules, HasLen, 1) + rules[0].Role = Follower + tests = append(tests, TestCase{ + name: "add follower array", + input: []*ast.PlacementSpec{{ + Role: ast.PlacementRoleFollower, + Tp: ast.PlacementAdd, + Replicas: 3, + Constraints: `["+zone=sh", "+zone=sh"]`, + }}, + output: rules, + }) + + tests = append(tests, TestCase{ + name: "add invalid constraints", + input: []*ast.PlacementSpec{{ + Role: ast.PlacementRoleVoter, + Tp: ast.PlacementAdd, + Replicas: 3, + Constraints: "ne", + }}, + err: ErrInvalidConstraintsFormat, + }) + + tests = append(tests, TestCase{ + name: "add empty role", + input: []*ast.PlacementSpec{{ + Tp: ast.PlacementAdd, + Replicas: 3, + Constraints: "", + }}, + err: ErrMissingRoleField, + }) + + tests = append(tests, TestCase{ + name: "add multiple leaders", + input: []*ast.PlacementSpec{{ + Role: ast.PlacementRoleLeader, + Tp: ast.PlacementAdd, + Replicas: 3, + Constraints: "", + }}, + err: ErrLeaderReplicasMustOne, + }) + + rules, err = NewRules(1, "") + c.Assert(err, IsNil) + c.Assert(rules, HasLen, 1) + rules[0].Role = Leader + tests = append(tests, TestCase{ + name: "omit leader field", + input: []*ast.PlacementSpec{{ + Role: ast.PlacementRoleLeader, + Tp: ast.PlacementAdd, + Constraints: "", + }}, + output: rules, + }) + + rules, err = NewRules(3, `["-zone=sh","+zone=bj"]`) + c.Assert(err, IsNil) + c.Assert(rules, HasLen, 1) + rules[0].Role = Follower + tests = append(tests, TestCase{ + name: "drop", + input: []*ast.PlacementSpec{ + { + Role: ast.PlacementRoleFollower, + Tp: ast.PlacementAdd, + Replicas: 3, + Constraints: `["- zone=sh", "+zone = bj"]`, + }, + { + Role: ast.PlacementRoleVoter, + Tp: ast.PlacementAdd, + Replicas: 3, + Constraints: `["+ zone=sh", "-zone = bj"]`, + }, + { + Role: ast.PlacementRoleVoter, + Tp: ast.PlacementDrop, + }, + }, + output: rules, + }) + + tests = append(tests, TestCase{ + name: "drop unexisted", + input: []*ast.PlacementSpec{{ + Role: ast.PlacementRoleLeader, + Tp: ast.PlacementDrop, + Constraints: "", + }}, + err: ErrNoRulesToDrop, + }) + + rules1, err := NewRules(3, `["-zone=sh","+zone=bj"]`) + c.Assert(err, IsNil) + c.Assert(rules1, HasLen, 1) + rules1[0].Role = Follower + rules2, err := NewRules(3, `["+zone=sh","-zone=bj"]`) + c.Assert(err, IsNil) + c.Assert(rules2, HasLen, 1) + rules2[0].Role = Voter + tests = append(tests, TestCase{ + name: "alter", + input: []*ast.PlacementSpec{ + { + Role: ast.PlacementRoleFollower, + Tp: ast.PlacementAdd, + Replicas: 3, + Constraints: `["- zone=sh", "+zone = bj"]`, + }, + { + Role: ast.PlacementRoleVoter, + Tp: ast.PlacementAdd, + Replicas: 3, + Constraints: `["- zone=sh", "+zone = bj"]`, + }, + { + Role: ast.PlacementRoleVoter, + Tp: ast.PlacementAlter, + Replicas: 3, + Constraints: `["+ zone=sh", "-zone = bj"]`, + }, + }, + output: append(rules1, rules2...), + }) + + for _, t := range tests { + comment := Commentf("%s", t.name) + bundle := &Bundle{} + err := bundle.ApplyPlacementSpec(t.input) + if t.err == nil { + c.Assert(err, IsNil) + matchRules(t.output, bundle.Rules, comment.CheckCommentString(), c) + } else { + c.Assert(errors.Is(err, t.err), IsTrue, comment) + } + } +} + +func (s *testBundleSuite) TestString(c *C) { + bundle := &Bundle{ + ID: GroupID(1), + } + + rules1, err := NewRules(3, `["+zone=sh", "+zone=sh"]`) + c.Assert(err, IsNil) + rules2, err := NewRules(4, `["-zone=sh", "+zone=bj"]`) + c.Assert(err, IsNil) + bundle.Rules = append(rules1, rules2...) + + c.Assert(bundle.String(), Equals, `{"group_id":"TiDB_DDL_1","group_index":0,"group_override":false,"rules":[{"group_id":"","id":"","start_key":"","end_key":"","role":"","count":3,"label_constraints":[{"key":"zone","op":"in","values":["sh"]}]},{"group_id":"","id":"","start_key":"","end_key":"","role":"","count":4,"label_constraints":[{"key":"zone","op":"notIn","values":["sh"]},{"key":"zone","op":"in","values":["bj"]}]}]}`) + + c.Assert(failpoint.Enable("github.com/pingcap/tidb/ddl/placement/MockMarshalFailure", `return(true)`), IsNil) + defer func() { + c.Assert(failpoint.Disable("github.com/pingcap/tidb/ddl/placement/MockMarshalFailure"), IsNil) + }() + c.Assert(bundle.String(), Equals, "") +} + +func (s *testBundleSuite) TestNew(c *C) { + c.Assert(NewBundle(3), DeepEquals, &Bundle{ID: GroupID(3)}) + c.Assert(NewBundle(-1), DeepEquals, &Bundle{ID: GroupID(-1)}) +} + +func (s *testBundleSuite) TestReset(c *C) { + bundle := &Bundle{ + ID: GroupID(1), + } + + rules, err := NewRules(3, `["+zone=sh", "+zone=sh"]`) + c.Assert(err, IsNil) + bundle.Rules = rules + + bundle.Reset(3) + c.Assert(bundle.ID, Equals, GroupID(3)) + c.Assert(bundle.Rules, HasLen, 1) + c.Assert(bundle.Rules[0].GroupID, Equals, bundle.ID) + + startKey := hex.EncodeToString(codec.EncodeBytes(nil, tablecodec.GenTableRecordPrefix(3))) + c.Assert(bundle.Rules[0].StartKeyHex, Equals, startKey) + + endKey := hex.EncodeToString(codec.EncodeBytes(nil, tablecodec.GenTableRecordPrefix(4))) + c.Assert(bundle.Rules[0].EndKeyHex, Equals, endKey) +} + +func (s *testBundleSuite) TestTidy(c *C) { + bundle := &Bundle{ + ID: GroupID(1), + } + + rules0, err := NewRules(1, `["+zone=sh", "+zone=sh"]`) + c.Assert(err, IsNil) + c.Assert(rules0, HasLen, 1) + rules0[0].Count = 0 + rules1, err := NewRules(4, `["-zone=sh", "+zone=bj"]`) + c.Assert(err, IsNil) + c.Assert(rules1, HasLen, 1) + rules2, err := NewRules(4, `["-zone=sh", "+zone=bj"]`) + c.Assert(err, IsNil) + bundle.Rules = append(bundle.Rules, rules0...) + bundle.Rules = append(bundle.Rules, rules1...) + bundle.Rules = append(bundle.Rules, rules2...) + + err = bundle.Tidy() + c.Assert(err, IsNil) + c.Assert(bundle.Rules, HasLen, 2) + c.Assert(bundle.Rules[0].ID, Equals, "1") + c.Assert(bundle.Rules[0].Constraints, HasLen, 3) + c.Assert(bundle.Rules[0].Constraints[2], DeepEquals, Constraint{ + Op: NotIn, + Key: EngineLabelKey, + Values: []string{EngineLabelTiFlash}, + }) + c.Assert(bundle.Rules[1].ID, Equals, "2") + + // merge + rules3, err := NewRules(4, "") + c.Assert(err, IsNil) + c.Assert(rules3, HasLen, 1) + rules3[0].Role = Follower + + rules4, err := NewRules(5, "") + c.Assert(err, IsNil) + c.Assert(rules4, HasLen, 1) + rules4[0].Role = Follower + + rules0[0].Role = Voter + bundle.Rules = append(bundle.Rules, rules0...) + bundle.Rules = append(bundle.Rules, rules3...) + bundle.Rules = append(bundle.Rules, rules4...) + + chkfunc := func() { + c.Assert(err, IsNil) + c.Assert(bundle.Rules, HasLen, 3) + c.Assert(bundle.Rules[0].ID, Equals, "0") + c.Assert(bundle.Rules[1].ID, Equals, "1") + c.Assert(bundle.Rules[2].ID, Equals, "follower") + c.Assert(bundle.Rules[2].Count, Equals, 9) + c.Assert(bundle.Rules[2].Constraints, DeepEquals, Constraints{ + { + Op: NotIn, + Key: EngineLabelKey, + Values: []string{EngineLabelTiFlash}, + }, + }) + } + err = bundle.Tidy() + chkfunc() + + // tidy again + // it should be stable + err = bundle.Tidy() + chkfunc() + + // tidy again + // it should be stable + bundle2 := bundle.Clone() + err = bundle2.Tidy() + c.Assert(err, IsNil) + c.Assert(bundle2, DeepEquals, bundle) + + bundle.Rules[2].Constraints = append(bundle.Rules[2].Constraints, Constraint{ + Op: In, + Key: EngineLabelKey, + Values: []string{EngineLabelTiFlash}, + }) + c.Log(bundle.Rules[2]) + err = bundle.Tidy() + c.Assert(errors.Is(err, ErrConflictingConstraints), IsTrue) +} diff --git a/ddl/placement/errors.go b/ddl/placement/errors.go index 95fce4591c961..26b60621d0f44 100644 --- a/ddl/placement/errors.go +++ b/ddl/placement/errors.go @@ -30,4 +30,14 @@ var ( ErrInvalidConstraintsFormat = errors.New("invalid label constraints format") // ErrInvalidConstraintsRelicas is from rule.go. ErrInvalidConstraintsRelicas = errors.New("label constraints with invalid REPLICAS") + // ErrInvalidBundleID is from bundle.go. + ErrInvalidBundleID = errors.New("invalid bundle ID") + // ErrInvalidBundleIDFormat is from bundle.go. + ErrInvalidBundleIDFormat = errors.New("invalid bundle ID format") + // ErrLeaderReplicasMustOne is from bundle.go. + ErrLeaderReplicasMustOne = errors.New("REPLICAS must be 1 if ROLE=leader") + // ErrMissingRoleField is from bundle.go. + ErrMissingRoleField = errors.New("the ROLE field is not specified") + // ErrNoRulesToDrop is from bundle.go. + ErrNoRulesToDrop = errors.New("no rule of such role to drop") ) diff --git a/ddl/placement/types.go b/ddl/placement/types.go deleted file mode 100644 index 72093a2c19c78..0000000000000 --- a/ddl/placement/types.go +++ /dev/null @@ -1,91 +0,0 @@ -// Copyright 2020 PingCAP, Inc. -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// See the License for the specific language governing permissions and -// limitations under the License. - -package placement - -import ( - "encoding/json" -) - -// Refer to https://github.com/tikv/pd/issues/2701 . -// IMO, it is indeed not bad to have a copy of definition. -// After all, placement rules are communicated using an HTTP API. Loose -// coupling is a good feature. - -// Bundle is a group of all rules and configurations. It is used to support rule cache. -type Bundle struct { - ID string `json:"group_id"` - Index int `json:"group_index"` - Override bool `json:"group_override"` - Rules []*Rule `json:"rules"` -} - -func (b *Bundle) String() string { - t, err := json.Marshal(b) - if err != nil { - return "" - } - return string(t) -} - -// Clone is used to duplicate a bundle. -func (b *Bundle) Clone() *Bundle { - newBundle := &Bundle{} - *newBundle = *b - if len(b.Rules) > 0 { - newBundle.Rules = make([]*Rule, 0, len(b.Rules)) - for i := range b.Rules { - newBundle.Rules = append(newBundle.Rules, b.Rules[i].Clone()) - } - } - return newBundle -} - -// IsEmpty is used to check if a bundle is empty. -func (b *Bundle) IsEmpty() bool { - return len(b.Rules) == 0 && b.Index == 0 && !b.Override -} - -// RuleOpType indicates the operation type. -type RuleOpType string - -const ( - // RuleOpAdd a placement rule, only need to specify the field *Rule. - RuleOpAdd RuleOpType = "add" - // RuleOpDel a placement rule, only need to specify the field `GroupID`, `ID`, `MatchID`. - RuleOpDel RuleOpType = "del" -) - -// RuleOp is for batching placement rule actions. -type RuleOp struct { - *Rule - Action RuleOpType `json:"action"` - DeleteByIDPrefix bool `json:"delete_by_id_prefix"` -} - -// Clone is used to clone a RuleOp that is safe to modify, without affecting the old RuleOp. -func (op *RuleOp) Clone() *RuleOp { - newOp := &RuleOp{} - *newOp = *op - newOp.Rule = &Rule{} - *newOp.Rule = *op.Rule - return newOp -} - -func (op *RuleOp) String() string { - b, err := json.Marshal(op) - if err != nil { - return "" - } - return string(b) -} diff --git a/ddl/placement/types_test.go b/ddl/placement/types_test.go deleted file mode 100644 index 93ed1a5a80f43..0000000000000 --- a/ddl/placement/types_test.go +++ /dev/null @@ -1,50 +0,0 @@ -// Copyright 2020 PingCAP, Inc. -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// See the License for the specific language governing permissions and -// limitations under the License. - -package placement - -import ( - . "github.com/pingcap/check" -) - -var _ = Suite(&testBundleSuite{}) - -type testBundleSuite struct{} - -func (t *testBundleSuite) TestEmpty(c *C) { - bundle := &Bundle{ID: GroupID(1)} - c.Assert(bundle.IsEmpty(), IsTrue) - - bundle = &Bundle{ID: GroupID(1), Index: 1} - c.Assert(bundle.IsEmpty(), IsFalse) - - bundle = &Bundle{ID: GroupID(1), Override: true} - c.Assert(bundle.IsEmpty(), IsFalse) - - bundle = &Bundle{ID: GroupID(1), Rules: []*Rule{{ID: "434"}}} - c.Assert(bundle.IsEmpty(), IsFalse) - - bundle = &Bundle{ID: GroupID(1), Index: 1, Override: true} - c.Assert(bundle.IsEmpty(), IsFalse) -} - -func (t *testBundleSuite) TestClone(c *C) { - bundle := &Bundle{ID: GroupID(1), Rules: []*Rule{{ID: "434"}}} - - newBundle := bundle.Clone() - newBundle.ID = GroupID(2) - newBundle.Rules[0] = &Rule{ID: "121"} - - c.Assert(bundle, DeepEquals, &Bundle{ID: GroupID(1), Rules: []*Rule{{ID: "434"}}}) - c.Assert(newBundle, DeepEquals, &Bundle{ID: GroupID(2), Rules: []*Rule{{ID: "121"}}}) -} diff --git a/ddl/placement_rule_test.go b/ddl/placement_rule_test.go deleted file mode 100644 index a9a916cb5a199..0000000000000 --- a/ddl/placement_rule_test.go +++ /dev/null @@ -1,409 +0,0 @@ -// Copyright 2020 PingCAP, Inc. -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// See the License for the specific language governing permissions and -// limitations under the License. - -package ddl - -import ( - "encoding/hex" - "encoding/json" - - . "github.com/pingcap/check" - "github.com/pingcap/parser/ast" - "github.com/pingcap/tidb/ddl/placement" - "github.com/pingcap/tidb/tablecodec" - "github.com/pingcap/tidb/util/codec" -) - -var _ = Suite(&testPlacementSuite{}) - -type testPlacementSuite struct { -} - -func (s *testPlacementSuite) TestPlacementBuild(c *C) { - tests := []struct { - input []*ast.PlacementSpec - bundle *placement.Bundle - output []*placement.Rule - err string - }{ - { - input: []*ast.PlacementSpec{}, - output: []*placement.Rule{}, - }, - - { - input: []*ast.PlacementSpec{{ - Role: ast.PlacementRoleVoter, - Tp: ast.PlacementAdd, - Replicas: 3, - Constraints: `["+zone=sh", "+zone=sh"]`, - }}, - output: []*placement.Rule{ - { - Role: placement.Voter, - Count: 3, - Constraints: []placement.Constraint{ - {Key: "zone", Op: "in", Values: []string{"sh"}}, - }, - }, - }, - }, - - { - input: []*ast.PlacementSpec{{ - Role: ast.PlacementRoleVoter, - Tp: ast.PlacementAdd, - Replicas: 3, - Constraints: "", - }}, - output: []*placement.Rule{{ - Role: placement.Voter, - Count: 3, - Constraints: []placement.Constraint{}, - }}, - }, - - { - input: []*ast.PlacementSpec{{ - Role: ast.PlacementRoleVoter, - Tp: ast.PlacementAdd, - Constraints: `{"+zone=sh,+zone=sh": 2, "+zone=sh": 1}`, - }}, - output: []*placement.Rule{ - { - Role: placement.Voter, - Count: 1, - Constraints: []placement.Constraint{ - {Key: "zone", Op: "in", Values: []string{"sh"}}, - }, - }, - { - Role: placement.Voter, - Count: 2, - Constraints: []placement.Constraint{ - {Key: "zone", Op: "in", Values: []string{"sh"}}, - }, - }, - }, - }, - - { - input: []*ast.PlacementSpec{{ - Role: ast.PlacementRoleVoter, - Tp: ast.PlacementAdd, - Replicas: 3, - Constraints: `["- zone=sh", "-zone = bj"]`, - }}, - output: []*placement.Rule{ - { - Role: placement.Voter, - Count: 3, - Constraints: []placement.Constraint{ - {Key: "zone", Op: "notIn", Values: []string{"sh"}}, - {Key: "zone", Op: "notIn", Values: []string{"bj"}}, - }, - }, - }, - }, - - { - input: []*ast.PlacementSpec{{ - Role: ast.PlacementRoleVoter, - Tp: ast.PlacementAdd, - Replicas: 3, - Constraints: `["+ zone=sh", "-zone = bj"]`, - }}, - output: []*placement.Rule{ - { - Role: placement.Voter, - Count: 3, - Constraints: []placement.Constraint{ - {Key: "zone", Op: "in", Values: []string{"sh"}}, - {Key: "zone", Op: "notIn", Values: []string{"bj"}}, - }, - }, - }, - }, - - { - input: []*ast.PlacementSpec{ - { - Role: ast.PlacementRoleVoter, - Tp: ast.PlacementAdd, - Replicas: 3, - Constraints: `["+ zone=sh", "-zone = bj"]`, - }, - { - Role: ast.PlacementRoleFollower, - Tp: ast.PlacementAdd, - Replicas: 2, - Constraints: `["- zone=sh", "+zone = bj"]`, - }, - }, - output: []*placement.Rule{ - { - Role: placement.Voter, - Count: 3, - Constraints: []placement.Constraint{ - {Key: "zone", Op: "in", Values: []string{"sh"}}, - {Key: "zone", Op: "notIn", Values: []string{"bj"}}, - }, - }, - { - Role: placement.Follower, - Count: 2, - Constraints: []placement.Constraint{ - {Key: "zone", Op: "notIn", Values: []string{"sh"}}, - {Key: "zone", Op: "in", Values: []string{"bj"}}, - }, - }, - }, - }, - - { - input: []*ast.PlacementSpec{ - { - Role: ast.PlacementRoleVoter, - Tp: ast.PlacementAdd, - Replicas: 3, - Constraints: `["+ zone=sh", "-zone = bj"]`, - }, - { - Role: ast.PlacementRoleVoter, - Tp: ast.PlacementAlter, - Replicas: 2, - Constraints: `["- zone=sh", "+zone = bj"]`, - }, - }, - output: []*placement.Rule{ - { - Role: placement.Voter, - Count: 2, - Constraints: []placement.Constraint{ - {Key: "zone", Op: "notIn", Values: []string{"sh"}}, - {Key: "zone", Op: "in", Values: []string{"bj"}}, - }, - }, - }, - }, - - { - input: []*ast.PlacementSpec{ - { - Role: ast.PlacementRoleVoter, - Tp: ast.PlacementAdd, - Replicas: 3, - Constraints: `["+ zone=sh", "-zone = bj"]`, - }, - { - Role: ast.PlacementRoleVoter, - Tp: ast.PlacementAlter, - Replicas: 3, - Constraints: `{"- zone=sh":1, "+zone = bj":1}`, - }, - }, - output: []*placement.Rule{ - { - Role: placement.Voter, - Count: 1, - Constraints: []placement.Constraint{{Key: "zone", Op: "notIn", Values: []string{"sh"}}}, - }, - { - Role: placement.Voter, - Count: 1, - Constraints: []placement.Constraint{{Key: "zone", Op: "in", Values: []string{"bj"}}}, - }, - { - Role: placement.Voter, - Count: 1, - }, - }, - }, - - { - input: []*ast.PlacementSpec{ - { - Role: ast.PlacementRoleVoter, - Tp: ast.PlacementAdd, - Replicas: 3, - Constraints: `["+ zone=sh", "-zone = bj"]`, - }, - { - Role: ast.PlacementRoleVoter, - Tp: ast.PlacementDrop, - }, - }, - output: []*placement.Rule{}, - }, - - { - input: []*ast.PlacementSpec{ - { - Role: ast.PlacementRoleLearner, - Tp: ast.PlacementDrop, - }, - }, - bundle: &placement.Bundle{Rules: []*placement.Rule{ - {Role: placement.Learner}, - {Role: placement.Voter}, - {Role: placement.Learner}, - {Role: placement.Voter}, - }}, - output: []*placement.Rule{ - {Role: placement.Voter}, - {Role: placement.Voter}, - }, - }, - - { - input: []*ast.PlacementSpec{ - { - Role: ast.PlacementRoleLearner, - Tp: ast.PlacementDrop, - }, - { - Role: ast.PlacementRoleVoter, - Tp: ast.PlacementDrop, - }, - }, - bundle: &placement.Bundle{Rules: []*placement.Rule{ - {Role: placement.Learner}, - {Role: placement.Voter}, - {Role: placement.Learner}, - {Role: placement.Voter}, - }}, - output: []*placement.Rule{}, - }, - - { - input: []*ast.PlacementSpec{ - { - Role: ast.PlacementRoleLearner, - Tp: ast.PlacementDrop, - }, - }, - err: ".*no rule of role 'learner' to drop.*", - }, - - { - input: []*ast.PlacementSpec{{ - Role: ast.PlacementRoleVoter, - Tp: ast.PlacementAdd, - Replicas: 3, - Constraints: `['+ zone=sh', '-zone = bj']`, - }}, - output: []*placement.Rule{ - { - Role: placement.Voter, - Count: 3, - Constraints: []placement.Constraint{ - {Key: "zone", Op: "in", Values: []string{"sh"}}, - {Key: "zone", Op: "notIn", Values: []string{"bj"}}, - }, - }, - }, - }, - } - for i, t := range tests { - var bundle *placement.Bundle - if t.bundle == nil { - bundle = &placement.Bundle{Rules: []*placement.Rule{}} - } else { - bundle = t.bundle - } - out, err := buildPlacementSpecs(bundle, t.input) - if t.err == "" { - c.Assert(err, IsNil) - expected, err := json.Marshal(t.output) - c.Assert(err, IsNil) - got, err := json.Marshal(out.Rules) - c.Assert(err, IsNil) - comment := Commentf("%d test\nexpected %s\nbut got %s", i, expected, got) - c.Assert(len(t.output), Equals, len(out.Rules), comment) - for _, r1 := range t.output { - found := false - for _, r2 := range out.Rules { - if ok, _ := DeepEquals.Check([]interface{}{r1, r2}, nil); ok { - found = true - break - } - } - c.Assert(found, IsTrue, comment) - } - } else { - c.Assert(err, ErrorMatches, t.err) - } - } -} - -func (s *testPlacementSuite) TestPlacementBuildDrop(c *C) { - tests := []struct { - input int64 - output *placement.Bundle - }{ - { - input: 2, - output: &placement.Bundle{ID: placement.GroupID(2)}, - }, - { - input: 1, - output: &placement.Bundle{ID: placement.GroupID(1)}, - }, - } - for _, t := range tests { - out := placement.BuildPlacementDropBundle(t.input) - c.Assert(t.output, DeepEquals, out) - } -} - -func (s *testPlacementSuite) TestPlacementBuildTruncate(c *C) { - bundle := &placement.Bundle{ - ID: placement.GroupID(-1), - Rules: []*placement.Rule{{GroupID: placement.GroupID(-1)}}, - } - - tests := []struct { - input int64 - output *placement.Bundle - }{ - { - input: 1, - output: &placement.Bundle{ - ID: placement.GroupID(1), - Rules: []*placement.Rule{{ - GroupID: placement.GroupID(1), - StartKeyHex: hex.EncodeToString(codec.EncodeBytes(nil, tablecodec.GenTableRecordPrefix(1))), - EndKeyHex: hex.EncodeToString(codec.EncodeBytes(nil, tablecodec.GenTableRecordPrefix(2))), - }}, - }, - }, - { - input: 2, - output: &placement.Bundle{ - ID: placement.GroupID(2), - Rules: []*placement.Rule{{ - GroupID: placement.GroupID(2), - StartKeyHex: hex.EncodeToString(codec.EncodeBytes(nil, tablecodec.GenTableRecordPrefix(2))), - EndKeyHex: hex.EncodeToString(codec.EncodeBytes(nil, tablecodec.GenTableRecordPrefix(3))), - }}, - }, - }, - } - for _, t := range tests { - out := placement.BuildPlacementCopyBundle(bundle, t.input) - c.Assert(t.output, DeepEquals, out) - c.Assert(bundle.ID, Equals, placement.GroupID(-1)) - c.Assert(bundle.Rules, HasLen, 1) - c.Assert(bundle.Rules[0].GroupID, Equals, placement.GroupID(-1)) - } -} diff --git a/ddl/placement_sql_test.go b/ddl/placement_sql_test.go index 0396bc9554074..f5ce6c1328d6e 100644 --- a/ddl/placement_sql_test.go +++ b/ddl/placement_sql_test.go @@ -78,6 +78,12 @@ add placement policy replicas=3`) c.Assert(err, IsNil) + _, err = tk.Exec(`alter table t1 alter partition p0 +add placement policy + constraints="{'+zone=sh': 1}" + role=follower`) + c.Assert(err, IsNil) + _, err = tk.Exec(`alter table t1 alter partition p0 add placement policy constraints='{"+ zone = sh ": 1}' @@ -129,7 +135,7 @@ drop placement policy _, err = tk.Exec(`alter table t1 alter partition p0 drop placement policy role=follower`) - c.Assert(err, ErrorMatches, ".*no rule of role 'follower' to drop.*") + c.Assert(err, ErrorMatches, ".*no rule of such role to drop.*") _, err = tk.Exec(`alter table t1 alter partition p0 add placement policy @@ -142,7 +148,7 @@ add placement policy add placement policy constraints='{"+ zone = sh, -zone = bj ": 1}' replicas=3`) - c.Assert(err, ErrorMatches, ".*ROLE is not specified.*") + c.Assert(err, ErrorMatches, ".*the ROLE field is not specified.*") // multiple statements _, err = tk.Exec(`alter table t1 alter partition p0 @@ -202,7 +208,7 @@ drop placement policy role=leader, drop placement policy role=leader`) - c.Assert(err, ErrorMatches, ".*no rule of role 'leader' to drop.*") + c.Assert(err, ErrorMatches, ".*no rule of such role to drop.*") s.dom.InfoSchema().SetBundle(bundle) _, err = tk.Exec(`alter table t1 alter partition p0 @@ -219,14 +225,14 @@ drop placement policy add placement policy role=follower constraints='[]'`) - c.Assert(err, ErrorMatches, ".*array CONSTRAINTS should be with a positive REPLICAS.*") + c.Assert(err, ErrorMatches, ".*label constraints with invalid REPLICAS: should be positive.*") _, err = tk.Exec(`alter table t1 alter partition p0 add placement policy constraints=',,,' role=follower replicas=3`) - c.Assert(err, ErrorMatches, "(?s).*constraint is neither an array of string, nor a string-to-number map.*") + c.Assert(err, ErrorMatches, "(?s).*invalid label constraints format: .* or any yaml compatible representation.*") _, err = tk.Exec(`alter table t1 alter partition p0 add placement policy @@ -239,14 +245,14 @@ add placement policy constraints='[,,,' role=follower replicas=3`) - c.Assert(err, ErrorMatches, "(?s).*constraint is neither an array of string, nor a string-to-number map.*") + c.Assert(err, ErrorMatches, "(?s).*invalid label constraints format: .* or any yaml compatible representation.*") _, err = tk.Exec(`alter table t1 alter partition p0 add placement policy constraints='{,,,' role=follower replicas=3`) - c.Assert(err, ErrorMatches, "(?s).*constraint is neither an array of string, nor a string-to-number map.*") + c.Assert(err, ErrorMatches, "(?s).*invalid label constraints format: .* or any yaml compatible representation.*") _, err = tk.Exec(`alter table t1 alter partition p0 add placement policy @@ -273,7 +279,7 @@ add placement policy constraints='{"+ zone = sh, -zone = bj ": -1}' role=follower replicas=3`) - c.Assert(err, ErrorMatches, ".*count should be positive.*") + c.Assert(err, ErrorMatches, ".*label constraints in map syntax have invalid replicas: count of labels.*") _, err = tk.Exec(`alter table t1 alter partition p0 add placement policy @@ -298,7 +304,7 @@ add placement policy _, err = tk.Exec(`alter table t_part_pk_id alter partition p0 add placement policy constraints='["+host=store1"]' role=leader;`) c.Assert(err, IsNil) _, err = tk.Exec(`alter table t_part_pk_id alter partition p0 add placement policy constraints='["+host=store1"]' role=leader replicas=3;`) - c.Assert(err, ErrorMatches, ".*replicas can only be 1 when the role is leader") + c.Assert(err, ErrorMatches, ".*REPLICAS must be 1 if ROLE=leader.*") tk.MustExec("drop table t_part_pk_id") } diff --git a/ddl/schema.go b/ddl/schema.go index a4b14a49bdbc3..9fc43fd917af7 100644 --- a/ddl/schema.go +++ b/ddl/schema.go @@ -167,7 +167,7 @@ func onDropSchema(d *ddlCtx, t *meta.Meta, job *model.Job) (ver int64, _ error) for _, ID := range append(oldIDs, dbInfo.ID) { oldBundle, ok := d.infoCache.GetLatest().BundleByName(placement.GroupID(ID)) if ok && !oldBundle.IsEmpty() { - bundles = append(bundles, placement.BuildPlacementDropBundle(ID)) + bundles = append(bundles, placement.NewBundle(ID)) } } err := infosync.PutRuleBundles(context.TODO(), bundles) diff --git a/ddl/table.go b/ddl/table.go index 6c113fc855b23..8e3843b0b77e3 100644 --- a/ddl/table.go +++ b/ddl/table.go @@ -492,7 +492,7 @@ func onTruncateTable(d *ddlCtx, t *meta.Meta, job *model.Job) (ver int64, _ erro bundles := make([]*placement.Bundle, 0, len(oldPartitionIDs)+1) if oldBundle, ok := is.BundleByName(placement.GroupID(tableID)); ok { - bundles = append(bundles, placement.BuildPlacementCopyBundle(oldBundle, newTableID)) + bundles = append(bundles, oldBundle.Clone().Reset(newTableID)) } if pi := tblInfo.GetPartitionInfo(); pi != nil { @@ -504,7 +504,7 @@ func onTruncateTable(d *ddlCtx, t *meta.Meta, job *model.Job) (ver int64, _ erro if oldBundle, ok := is.BundleByName(placement.GroupID(oldPartitionIDs[i])); ok && !oldBundle.IsEmpty() { oldIDs = append(oldIDs, oldPartitionIDs[i]) newIDs = append(newIDs, newID) - bundles = append(bundles, placement.BuildPlacementCopyBundle(oldBundle, newID)) + bundles = append(bundles, oldBundle.Clone().Reset(newID)) } } job.CtxVars = []interface{}{oldIDs, newIDs} diff --git a/executor/grant_test.go b/executor/grant_test.go index 8ccfac3533fb6..df45169b23ff5 100644 --- a/executor/grant_test.go +++ b/executor/grant_test.go @@ -366,6 +366,14 @@ func (s *testSuite3) TestMaintainRequire(c *C) { c.Assert(err, NotNil) } +func (s *testSuite3) TestMaintainAuthString(c *C) { + tk := testkit.NewTestKit(c, s.store) + tk.MustExec(`CREATE USER 'maint_auth_str1'@'%' IDENTIFIED BY 'foo'`) + tk.MustQuery("SELECT authentication_string FROM mysql.user WHERE `Host` = '%' and `User` = 'maint_auth_str1'").Check(testkit.Rows("*F3A2A51A9B0F2BE2468926B4132313728C250DBF")) + tk.MustExec(`ALTER USER 'maint_auth_str1'@'%' REQUIRE SSL`) + tk.MustQuery("SELECT authentication_string FROM mysql.user WHERE `Host` = '%' and `User` = 'maint_auth_str1'").Check(testkit.Rows("*F3A2A51A9B0F2BE2468926B4132313728C250DBF")) +} + func (s *testSuite3) TestGrantOnNonExistTable(c *C) { tk := testkit.NewTestKit(c, s.store) tk.MustExec("create user genius") diff --git a/executor/simple.go b/executor/simple.go index 95067f42621d0..3d21e150c2b3b 100644 --- a/executor/simple.go +++ b/executor/simple.go @@ -848,10 +848,16 @@ func (e *SimpleExec) executeAlterUser(s *ast.AlterUserStmt) error { failedUsers := make([]string, 0, len(s.Specs)) for _, spec := range s.Specs { - if spec.User.CurrentUser { - user := e.ctx.GetSessionVars().User + user := e.ctx.GetSessionVars().User + if spec.User.CurrentUser || ((user != nil) && (user.Username == spec.User.Username) && (user.AuthHostname == spec.User.Hostname)) { spec.User.Username = user.Username spec.User.Hostname = user.AuthHostname + } else { + checker := privilege.GetPrivilegeManager(e.ctx) + activeRoles := e.ctx.GetSessionVars().ActiveRoles + if checker != nil && !checker.RequestVerification(activeRoles, "", "", "", mysql.SuperPriv) { + return ErrDBaccessDenied.GenWithStackByArgs(spec.User.Username, spec.User.Hostname, "mysql") + } } exists, err := userExists(e.ctx, spec.User.Username, spec.User.Hostname) @@ -863,22 +869,25 @@ func (e *SimpleExec) executeAlterUser(s *ast.AlterUserStmt) error { failedUsers = append(failedUsers, user) continue } - pwd, ok := spec.EncodedPassword() - if !ok { - return errors.Trace(ErrPasswordFormat) - } + exec := e.ctx.(sqlexec.RestrictedSQLExecutor) - stmt, err := exec.ParseWithParams(context.TODO(), `UPDATE %n.%n SET authentication_string=%? WHERE Host=%? and User=%?;`, mysql.SystemDB, mysql.UserTable, pwd, spec.User.Hostname, spec.User.Username) - if err != nil { - return err - } - _, _, err = exec.ExecRestrictedStmt(context.TODO(), stmt) - if err != nil { - failedUsers = append(failedUsers, spec.User.String()) + if spec.AuthOpt != nil { + pwd, ok := spec.EncodedPassword() + if !ok { + return errors.Trace(ErrPasswordFormat) + } + stmt, err := exec.ParseWithParams(context.TODO(), `UPDATE %n.%n SET authentication_string=%? WHERE Host=%? and User=%?;`, mysql.SystemDB, mysql.UserTable, pwd, spec.User.Hostname, spec.User.Username) + if err != nil { + return err + } + _, _, err = exec.ExecRestrictedStmt(context.TODO(), stmt) + if err != nil { + failedUsers = append(failedUsers, spec.User.String()) + } } if len(privData) > 0 { - stmt, err = exec.ParseWithParams(context.TODO(), "INSERT INTO %n.%n (Host, User, Priv) VALUES (%?,%?,%?) ON DUPLICATE KEY UPDATE Priv = values(Priv)", mysql.SystemDB, mysql.GlobalPrivTable, spec.User.Hostname, spec.User.Username, string(hack.String(privData))) + stmt, err := exec.ParseWithParams(context.TODO(), "INSERT INTO %n.%n (Host, User, Priv) VALUES (%?,%?,%?) ON DUPLICATE KEY UPDATE Priv = values(Priv)", mysql.SystemDB, mysql.GlobalPrivTable, spec.User.Hostname, spec.User.Username, string(hack.String(privData))) if err != nil { return err } diff --git a/executor/tiflash_test.go b/executor/tiflash_test.go index d4a4f873e6db5..282410eb06472 100644 --- a/executor/tiflash_test.go +++ b/executor/tiflash_test.go @@ -300,7 +300,7 @@ func (s *tiflashTestSuite) TestTiFlashPartitionTableShuffledHashJoin(c *C) { tk.MustExec(fmt.Sprintf("analyze table %v", tbl)) } - tk.MustExec("SET tidb_allow_mpp=2") + tk.MustExec("SET tidb_enforce_mpp=1") tk.MustExec("SET tidb_opt_broadcast_join=0") tk.MustExec("SET tidb_broadcast_join_threshold_count=0") tk.MustExec("SET tidb_broadcast_join_threshold_size=0") @@ -378,7 +378,7 @@ func (s *tiflashTestSuite) TestTiFlashPartitionTableReader(c *C) { tk.MustExec(fmt.Sprintf("insert into %v values %v", tbl, strings.Join(vals, ", "))) } - tk.MustExec("SET tidb_allow_mpp=2") + tk.MustExec("SET tidb_enforce_mpp=1") tk.MustExec("set @@session.tidb_isolation_read_engines='tiflash'") for i := 0; i < 100; i++ { l, r := rand.Intn(400), rand.Intn(400) @@ -742,7 +742,7 @@ func (s *tiflashTestSuite) TestTiFlashPartitionTableShuffledHashAggregation(c *C tk.MustExec(fmt.Sprintf("analyze table %v", tbl)) } tk.MustExec("set @@session.tidb_isolation_read_engines='tiflash'") - tk.MustExec("set @@session.tidb_allow_mpp=2") + tk.MustExec("set @@session.tidb_enforce_mpp=1") // mock executor does not support use outer table as build side for outer join, so need to // force the inner table as build side tk.MustExec("set tidb_opt_mpp_outer_join_fixed_build_side=1") @@ -814,7 +814,7 @@ func (s *tiflashTestSuite) TestTiFlashPartitionTableBroadcastJoin(c *C) { tk.MustExec(fmt.Sprintf("analyze table %v", tbl)) } tk.MustExec("set @@session.tidb_isolation_read_engines='tiflash'") - tk.MustExec("set @@session.tidb_allow_mpp=2") + tk.MustExec("set @@session.tidb_enforce_mpp=1") tk.MustExec("set @@session.tidb_opt_broadcast_join=ON") // mock executor does not support use outer table as build side for outer join, so need to // force the inner table as build side diff --git a/infoschema/tables.go b/infoschema/tables.go index df6c926b6354d..f6a46ff267953 100644 --- a/infoschema/tables.go +++ b/infoschema/tables.go @@ -1131,8 +1131,8 @@ var tableStatementsSummaryCols = []columnInfo{ {name: "SUMMARY_BEGIN_TIME", tp: mysql.TypeTimestamp, size: 26, flag: mysql.NotNullFlag, comment: "Begin time of this summary"}, {name: "SUMMARY_END_TIME", tp: mysql.TypeTimestamp, size: 26, flag: mysql.NotNullFlag, comment: "End time of this summary"}, {name: "STMT_TYPE", tp: mysql.TypeVarchar, size: 64, flag: mysql.NotNullFlag, comment: "Statement type"}, - {name: "SCHEMA_NAME", tp: mysql.TypeVarchar, size: 64, flag: mysql.NotNullFlag, comment: "Current schema"}, - {name: "DIGEST", tp: mysql.TypeVarchar, size: 64, flag: mysql.NotNullFlag}, + {name: "SCHEMA_NAME", tp: mysql.TypeVarchar, size: 64, comment: "Current schema"}, + {name: "DIGEST", tp: mysql.TypeVarchar, size: 64}, {name: "DIGEST_TEXT", tp: mysql.TypeBlob, size: types.UnspecifiedLength, flag: mysql.NotNullFlag, comment: "Normalized statement"}, {name: "TABLE_NAMES", tp: mysql.TypeBlob, size: types.UnspecifiedLength, comment: "Involved tables"}, {name: "INDEX_NAMES", tp: mysql.TypeBlob, size: types.UnspecifiedLength, comment: "Used indices"}, diff --git a/infoschema/tables_test.go b/infoschema/tables_test.go index dbba2dbcdba38..4db474d66911b 100644 --- a/infoschema/tables_test.go +++ b/infoschema/tables_test.go @@ -1401,6 +1401,71 @@ func (s *testTableSuite) TestSimpleStmtSummaryEvictedCount(c *C) { tk.MustExec("set global tidb_stmt_summary_refresh_interval = 1800") } +func (s *testTableSuite) TestStmtSummaryTableOther(c *C) { + interval := int64(1800) + tk := s.newTestKitWithRoot(c) + tk.MustExec(fmt.Sprintf("set global tidb_stmt_summary_refresh_interval=%v", interval)) + tk.MustExec("set global tidb_enable_stmt_summary=0") + tk.MustExec("set global tidb_enable_stmt_summary=1") + // set stmt size to 1 + // first sql + tk.MustExec("set global tidb_stmt_summary_max_stmt_count=1") + defer tk.MustExec("set global tidb_stmt_summary_max_stmt_count=100") + // second sql, evict first sql from stmt_summary + tk.MustExec("show databases;") + // third sql, evict second sql from stmt_summary + tk.MustQuery("SELECT DIGEST_TEXT, DIGEST FROM `INFORMATION_SCHEMA`.`STATEMENTS_SUMMARY`;"). + Check(testkit.Rows( + // digest in cache + // "show databases ;" + "show databases ; dcd020298c5f79e8dc9d63b3098083601614a04a52db458738347d15ea5712a1", + // digest evicted + " ", + )) + // forth sql, evict third sql from stmt_summary + tk.MustQuery("SELECT SCHEMA_NAME FROM `INFORMATION_SCHEMA`.`STATEMENTS_SUMMARY`;"). + Check(testkit.Rows( + // digest in cache + "test", // select xx from yy; + // digest evicted + "", + )) +} + +func (s *testTableSuite) TestStmtSummaryHistoryTableOther(c *C) { + tk := s.newTestKitWithRoot(c) + // disable refreshing summary + interval := int64(9999) + tk.MustExec("set global tidb_stmt_summary_max_stmt_count = 1") + defer tk.MustExec("set global tidb_stmt_summary_max_stmt_count = 100") + tk.MustExec(fmt.Sprintf("set global tidb_stmt_summary_refresh_interval = %v", interval)) + defer tk.MustExec(fmt.Sprintf("set global tidb_stmt_summary_refresh_interval = %v", 1800)) + + tk.MustExec("set global tidb_enable_stmt_summary = 0") + tk.MustExec("set global tidb_enable_stmt_summary = 1") + // first sql + tk.MustExec("set global tidb_stmt_summary_max_stmt_count=1") + // second sql, evict first sql from stmt_summary + tk.MustExec("show databases;") + // third sql, evict second sql from stmt_summary + tk.MustQuery("SELECT DIGEST_TEXT, DIGEST FROM `INFORMATION_SCHEMA`.`STATEMENTS_SUMMARY_HISTORY`;"). + Check(testkit.Rows( + // digest in cache + // "show databases ;" + "show databases ; dcd020298c5f79e8dc9d63b3098083601614a04a52db458738347d15ea5712a1", + // digest evicted + " ", + )) + // forth sql, evict third sql from stmt_summary + tk.MustQuery("SELECT SCHEMA_NAME FROM `INFORMATION_SCHEMA`.`STATEMENTS_SUMMARY_HISTORY`;"). + Check(testkit.Rows( + // digest in cache + "test", // select xx from yy; + // digest evicted + "", + )) +} + func (s *testTableSuite) TestPerformanceSchemaforPlanCache(c *C) { orgEnable := plannercore.PreparedPlanCacheEnabled() defer func() { diff --git a/planner/core/enforce_mpp_test.go b/planner/core/enforce_mpp_test.go new file mode 100644 index 0000000000000..b2ba38cb515de --- /dev/null +++ b/planner/core/enforce_mpp_test.go @@ -0,0 +1,299 @@ +// Copyright 2019 PingCAP, Inc. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// See the License for the specific language governing permissions and +// limitations under the License. + +package core_test + +import ( + "strings" + + . "github.com/pingcap/check" + "github.com/pingcap/parser/model" + "github.com/pingcap/tidb/domain" + "github.com/pingcap/tidb/kv" + "github.com/pingcap/tidb/util/collate" + "github.com/pingcap/tidb/util/testkit" + "github.com/pingcap/tidb/util/testutil" +) + +var _ = SerialSuites(&testEnforceMPPSuite{}) + +type testEnforceMPPSuite struct { + testData testutil.TestData + store kv.Storage + dom *domain.Domain +} + +func (s *testEnforceMPPSuite) SetUpSuite(c *C) { + var err error + s.testData, err = testutil.LoadTestSuiteData("testdata", "enforce_mpp_suite") + c.Assert(err, IsNil) +} + +func (s *testEnforceMPPSuite) TearDownSuite(c *C) { + c.Assert(s.testData.GenerateOutputIfNeeded(), IsNil) +} + +func (s *testEnforceMPPSuite) SetUpTest(c *C) { + var err error + s.store, s.dom, err = newStoreWithBootstrap() + c.Assert(err, IsNil) +} + +func (s *testEnforceMPPSuite) TearDownTest(c *C) { + s.dom.Close() + err := s.store.Close() + c.Assert(err, IsNil) +} + +func (s *testEnforceMPPSuite) TestSetVariables(c *C) { + tk := testkit.NewTestKit(c, s.store) + + // test value limit of tidb_opt_tiflash_concurrency_factor + err := tk.ExecToErr("set @@tidb_opt_tiflash_concurrency_factor = 0") + c.Assert(err, NotNil) + c.Assert(err.Error(), Equals, `[variable:1231]Variable 'tidb_opt_tiflash_concurrency_factor' can't be set to the value of '0'`) + + // test set tidb_enforce_mpp when tidb_allow_mpp=false; + err = tk.ExecToErr("set @@tidb_allow_mpp = 0; set @@tidb_enforce_mpp = 1;") + c.Assert(err, NotNil) + c.Assert(err.Error(), Equals, `[variable:1231]Variable 'tidb_enforce_mpp' can't be set to the value of '1' but tidb_allow_mpp is 0, please activate tidb_allow_mpp at first.'`) + + err = tk.ExecToErr("set @@tidb_allow_mpp = 1; set @@tidb_enforce_mpp = 1;") + c.Assert(err, IsNil) + + err = tk.ExecToErr("set @@tidb_allow_mpp = 0;") + c.Assert(err, IsNil) +} + +func (s *testEnforceMPPSuite) TestEnforceMPP(c *C) { + tk := testkit.NewTestKit(c, s.store) + + // test query + tk.MustExec("use test") + tk.MustExec("drop table if exists t") + tk.MustExec("create table t(a int, b int)") + tk.MustExec("create index idx on t(a)") + + // Create virtual tiflash replica info. + dom := domain.GetDomain(tk.Se) + is := dom.InfoSchema() + db, exists := is.SchemaByName(model.NewCIStr("test")) + c.Assert(exists, IsTrue) + for _, tblInfo := range db.Tables { + if tblInfo.Name.L == "t" { + tblInfo.TiFlashReplica = &model.TiFlashReplicaInfo{ + Count: 1, + Available: true, + } + } + } + + var input []string + var output []struct { + SQL string + Plan []string + Warn []string + } + s.testData.GetTestCases(c, &input, &output) + for i, tt := range input { + s.testData.OnRecord(func() { + output[i].SQL = tt + }) + if strings.HasPrefix(tt, "set") { + tk.MustExec(tt) + continue + } + s.testData.OnRecord(func() { + output[i].SQL = tt + output[i].Plan = s.testData.ConvertRowsToStrings(tk.MustQuery(tt).Rows()) + output[i].Warn = s.testData.ConvertSQLWarnToStrings(tk.Se.GetSessionVars().StmtCtx.GetWarnings()) + }) + res := tk.MustQuery(tt) + res.Check(testkit.Rows(output[i].Plan...)) + c.Assert(s.testData.ConvertSQLWarnToStrings(tk.Se.GetSessionVars().StmtCtx.GetWarnings()), DeepEquals, output[i].Warn) + } +} + +// general cases. +func (s *testEnforceMPPSuite) TestEnforceMPPWarning1(c *C) { + tk := testkit.NewTestKit(c, s.store) + + // test query + tk.MustExec("use test") + tk.MustExec("drop table if exists t") + tk.MustExec("create table t(a int, b int as (a+1), c time)") + tk.MustExec("create index idx on t(a)") + + var input []string + var output []struct { + SQL string + Plan []string + Warn []string + } + s.testData.GetTestCases(c, &input, &output) + for i, tt := range input { + s.testData.OnRecord(func() { + output[i].SQL = tt + }) + if strings.HasPrefix(tt, "set") { + tk.MustExec(tt) + continue + } + if strings.HasPrefix(tt, "cmd: create-replica") { + // Create virtual tiflash replica info. + dom := domain.GetDomain(tk.Se) + is := dom.InfoSchema() + db, exists := is.SchemaByName(model.NewCIStr("test")) + c.Assert(exists, IsTrue) + for _, tblInfo := range db.Tables { + if tblInfo.Name.L == "t" { + tblInfo.TiFlashReplica = &model.TiFlashReplicaInfo{ + Count: 1, + Available: false, + } + } + } + continue + } + if strings.HasPrefix(tt, "cmd: enable-replica") { + // Create virtual tiflash replica info. + dom := domain.GetDomain(tk.Se) + is := dom.InfoSchema() + db, exists := is.SchemaByName(model.NewCIStr("test")) + c.Assert(exists, IsTrue) + for _, tblInfo := range db.Tables { + if tblInfo.Name.L == "t" { + tblInfo.TiFlashReplica = &model.TiFlashReplicaInfo{ + Count: 1, + Available: true, + } + } + } + continue + } + s.testData.OnRecord(func() { + output[i].SQL = tt + output[i].Plan = s.testData.ConvertRowsToStrings(tk.MustQuery(tt).Rows()) + output[i].Warn = s.testData.ConvertSQLWarnToStrings(tk.Se.GetSessionVars().StmtCtx.GetWarnings()) + }) + res := tk.MustQuery(tt) + res.Check(testkit.Rows(output[i].Plan...)) + c.Assert(s.testData.ConvertSQLWarnToStrings(tk.Se.GetSessionVars().StmtCtx.GetWarnings()), DeepEquals, output[i].Warn) + } +} + +// partition table. +func (s *testEnforceMPPSuite) TestEnforceMPPWarning2(c *C) { + tk := testkit.NewTestKit(c, s.store) + + // test query + tk.MustExec("use test") + tk.MustExec("drop table if exists t") + tk.MustExec("CREATE TABLE t (a int, b char(20)) PARTITION BY HASH(a)") + + // Create virtual tiflash replica info. + dom := domain.GetDomain(tk.Se) + is := dom.InfoSchema() + db, exists := is.SchemaByName(model.NewCIStr("test")) + c.Assert(exists, IsTrue) + for _, tblInfo := range db.Tables { + if tblInfo.Name.L == "t" { + tblInfo.TiFlashReplica = &model.TiFlashReplicaInfo{ + Count: 1, + Available: true, + } + } + } + + var input []string + var output []struct { + SQL string + Plan []string + Warn []string + } + s.testData.GetTestCases(c, &input, &output) + for i, tt := range input { + s.testData.OnRecord(func() { + output[i].SQL = tt + }) + if strings.HasPrefix(tt, "set") { + tk.MustExec(tt) + continue + } + s.testData.OnRecord(func() { + output[i].SQL = tt + output[i].Plan = s.testData.ConvertRowsToStrings(tk.MustQuery(tt).Rows()) + output[i].Warn = s.testData.ConvertSQLWarnToStrings(tk.Se.GetSessionVars().StmtCtx.GetWarnings()) + }) + res := tk.MustQuery(tt) + res.Check(testkit.Rows(output[i].Plan...)) + c.Assert(s.testData.ConvertSQLWarnToStrings(tk.Se.GetSessionVars().StmtCtx.GetWarnings()), DeepEquals, output[i].Warn) + } +} + +// new collation. +func (s *testEnforceMPPSuite) TestEnforceMPPWarning3(c *C) { + tk := testkit.NewTestKit(c, s.store) + + // test query + tk.MustExec("use test") + tk.MustExec("drop table if exists t") + tk.MustExec("CREATE TABLE t (a int, b char(20))") + + // Create virtual tiflash replica info. + dom := domain.GetDomain(tk.Se) + is := dom.InfoSchema() + db, exists := is.SchemaByName(model.NewCIStr("test")) + c.Assert(exists, IsTrue) + for _, tblInfo := range db.Tables { + if tblInfo.Name.L == "t" { + tblInfo.TiFlashReplica = &model.TiFlashReplicaInfo{ + Count: 1, + Available: true, + } + } + } + + var input []string + var output []struct { + SQL string + Plan []string + Warn []string + } + s.testData.GetTestCases(c, &input, &output) + for i, tt := range input { + s.testData.OnRecord(func() { + output[i].SQL = tt + }) + if strings.HasPrefix(tt, "set") || strings.HasPrefix(tt, "UPDATE") { + tk.MustExec(tt) + continue + } + if strings.HasPrefix(tt, "cmd: enable-new-collation") { + collate.SetNewCollationEnabledForTest(true) + continue + } + if strings.HasPrefix(tt, "cmd: disable-new-collation") { + collate.SetNewCollationEnabledForTest(false) + continue + } + s.testData.OnRecord(func() { + output[i].SQL = tt + output[i].Plan = s.testData.ConvertRowsToStrings(tk.MustQuery(tt).Rows()) + output[i].Warn = s.testData.ConvertSQLWarnToStrings(tk.Se.GetSessionVars().StmtCtx.GetWarnings()) + }) + res := tk.MustQuery(tt) + res.Check(testkit.Rows(output[i].Plan...)) + c.Assert(s.testData.ConvertSQLWarnToStrings(tk.Se.GetSessionVars().StmtCtx.GetWarnings()), DeepEquals, output[i].Warn) + } +} diff --git a/planner/core/exhaust_physical_plans.go b/planner/core/exhaust_physical_plans.go index d7a17d3017221..eef51465a072d 100644 --- a/planner/core/exhaust_physical_plans.go +++ b/planner/core/exhaust_physical_plans.go @@ -41,6 +41,8 @@ import ( func (p *LogicalUnionScan) exhaustPhysicalPlans(prop *property.PhysicalProperty) ([]PhysicalPlan, bool, error) { if prop.IsFlashProp() { + p.SCtx().GetSessionVars().RaiseWarningWhenMPPEnforced( + "MPP mode may be blocked because operator `UnionScan` is not supported now.") return nil, true, nil } childProp := prop.CloneEssentialFields() @@ -1687,7 +1689,7 @@ func (p *LogicalJoin) exhaustPhysicalPlans(prop *property.PhysicalProperty) ([]P if prop.IsFlashProp() && ((p.preferJoinType&preferBCJoin) == 0 && p.preferJoinType > 0) { return nil, false, nil } - if prop.PartitionTp == property.BroadcastType { + if prop.MPPPartitionTp == property.BroadcastType { return nil, false, nil } joins := make([]PhysicalPlan, 0, 8) @@ -1785,7 +1787,7 @@ func (p *LogicalJoin) tryToGetMppHashJoin(prop *property.PhysicalProperty, useBC return nil } - if prop.PartitionTp == property.BroadcastType { + if prop.MPPPartitionTp == property.BroadcastType { return nil } if !canExprsInJoinPushdown(p, kv.TiFlash) { @@ -1828,27 +1830,27 @@ func (p *LogicalJoin) tryToGetMppHashJoin(prop *property.PhysicalProperty, useBC baseJoin.InnerChildIdx = preferredBuildIndex childrenProps := make([]*property.PhysicalProperty, 2) if useBCJ { - childrenProps[preferredBuildIndex] = &property.PhysicalProperty{TaskTp: property.MppTaskType, ExpectedCnt: math.MaxFloat64, PartitionTp: property.BroadcastType, CanAddEnforcer: true} + childrenProps[preferredBuildIndex] = &property.PhysicalProperty{TaskTp: property.MppTaskType, ExpectedCnt: math.MaxFloat64, MPPPartitionTp: property.BroadcastType, CanAddEnforcer: true} expCnt := math.MaxFloat64 if prop.ExpectedCnt < p.stats.RowCount { expCntScale := prop.ExpectedCnt / p.stats.RowCount expCnt = p.children[1-preferredBuildIndex].statsInfo().RowCount * expCntScale } - if prop.PartitionTp == property.HashType { + if prop.MPPPartitionTp == property.HashType { hashKeys := rkeys if preferredBuildIndex == 1 { hashKeys = lkeys } if matches := prop.IsSubsetOf(hashKeys); len(matches) != 0 { - childrenProps[1-preferredBuildIndex] = &property.PhysicalProperty{TaskTp: property.MppTaskType, ExpectedCnt: expCnt, PartitionTp: property.HashType, PartitionCols: prop.PartitionCols} + childrenProps[1-preferredBuildIndex] = &property.PhysicalProperty{TaskTp: property.MppTaskType, ExpectedCnt: expCnt, MPPPartitionTp: property.HashType, MPPPartitionCols: prop.MPPPartitionCols} } else { return nil } } else { - childrenProps[1-preferredBuildIndex] = &property.PhysicalProperty{TaskTp: property.MppTaskType, ExpectedCnt: expCnt, PartitionTp: property.AnyType} + childrenProps[1-preferredBuildIndex] = &property.PhysicalProperty{TaskTp: property.MppTaskType, ExpectedCnt: expCnt, MPPPartitionTp: property.AnyType} } } else { - if prop.PartitionTp == property.HashType { + if prop.MPPPartitionTp == property.HashType { var matches []int if matches = prop.IsSubsetOf(lkeys); len(matches) == 0 { matches = prop.IsSubsetOf(rkeys) @@ -1859,8 +1861,8 @@ func (p *LogicalJoin) tryToGetMppHashJoin(prop *property.PhysicalProperty, useBC lkeys = chooseSubsetOfJoinKeys(lkeys, matches) rkeys = chooseSubsetOfJoinKeys(rkeys, matches) } - childrenProps[0] = &property.PhysicalProperty{TaskTp: property.MppTaskType, ExpectedCnt: math.MaxFloat64, PartitionTp: property.HashType, PartitionCols: lkeys, CanAddEnforcer: true} - childrenProps[1] = &property.PhysicalProperty{TaskTp: property.MppTaskType, ExpectedCnt: math.MaxFloat64, PartitionTp: property.HashType, PartitionCols: rkeys, CanAddEnforcer: true} + childrenProps[0] = &property.PhysicalProperty{TaskTp: property.MppTaskType, ExpectedCnt: math.MaxFloat64, MPPPartitionTp: property.HashType, MPPPartitionCols: lkeys, CanAddEnforcer: true} + childrenProps[1] = &property.PhysicalProperty{TaskTp: property.MppTaskType, ExpectedCnt: math.MaxFloat64, MPPPartitionTp: property.HashType, MPPPartitionCols: rkeys, CanAddEnforcer: true} } join := PhysicalHashJoin{ basePhysicalJoin: baseJoin, @@ -2096,6 +2098,8 @@ func (la *LogicalApply) GetHashJoin(prop *property.PhysicalProperty) *PhysicalHa func (la *LogicalApply) exhaustPhysicalPlans(prop *property.PhysicalProperty) ([]PhysicalPlan, bool, error) { if !prop.AllColsFromSchema(la.children[0].Schema()) || prop.IsFlashProp() { // for convenient, we don't pass through any prop + la.SCtx().GetSessionVars().RaiseWarningWhenMPPEnforced( + "MPP mode may be blocked because operator `Apply` is not supported now.") return nil, true, nil } disableAggPushDownToCop(la.children[0]) @@ -2142,6 +2146,8 @@ func disableAggPushDownToCop(p LogicalPlan) { } func (p *LogicalWindow) exhaustPhysicalPlans(prop *property.PhysicalProperty) ([]PhysicalPlan, bool, error) { + p.SCtx().GetSessionVars().RaiseWarningWhenMPPEnforced( + "MPP mode may be blocked because operator `Window` is not supported now.") if prop.IsFlashProp() { return nil, true, nil } @@ -2205,7 +2211,12 @@ func (p *baseLogicalPlan) canPushToCopImpl(storeTp kv.StoreType, considerDual bo } else { return false } + // These operators can be partially push down to TiFlash, so we don't raise warning for them. + case *LogicalLimit, *LogicalTopN: + return false default: + p.SCtx().GetSessionVars().RaiseWarningWhenMPPEnforced( + "MPP mode may be blocked because operator `" + c.TP() + "` is not supported now.") return false } } @@ -2363,13 +2374,13 @@ func (la *LogicalAggregation) tryToGetMppHashAggs(prop *property.PhysicalPropert if prop.TaskTp != property.RootTaskType && prop.TaskTp != property.MppTaskType { return nil } - if prop.PartitionTp == property.BroadcastType { + if prop.MPPPartitionTp == property.BroadcastType { return nil } if len(la.GroupByItems) > 0 { partitionCols := la.GetGroupByCols() // trying to match the required parititions. - if prop.PartitionTp == property.HashType { + if prop.MPPPartitionTp == property.HashType { if matches := prop.IsSubsetOf(partitionCols); len(matches) != 0 { partitionCols = chooseSubsetOfJoinKeys(partitionCols, matches) } else { @@ -2382,7 +2393,7 @@ func (la *LogicalAggregation) tryToGetMppHashAggs(prop *property.PhysicalPropert // If there are no available partition cols, but still have group by items, that means group by items are all expressions or constants. // To avoid mess, we don't do any one-phase aggregation in this case. if len(partitionCols) != 0 { - childProp := &property.PhysicalProperty{TaskTp: property.MppTaskType, ExpectedCnt: math.MaxFloat64, PartitionTp: property.HashType, PartitionCols: partitionCols, CanAddEnforcer: true} + childProp := &property.PhysicalProperty{TaskTp: property.MppTaskType, ExpectedCnt: math.MaxFloat64, MPPPartitionTp: property.HashType, MPPPartitionCols: partitionCols, CanAddEnforcer: true} agg := NewPhysicalHashAgg(la, la.stats.ScaleByExpectCnt(prop.ExpectedCnt), childProp) agg.SetSchema(la.schema.Clone()) agg.MppRunMode = Mpp1Phase @@ -2390,7 +2401,7 @@ func (la *LogicalAggregation) tryToGetMppHashAggs(prop *property.PhysicalPropert } // 2-phase agg - childProp := &property.PhysicalProperty{TaskTp: property.MppTaskType, ExpectedCnt: math.MaxFloat64, PartitionTp: property.AnyType} + childProp := &property.PhysicalProperty{TaskTp: property.MppTaskType, ExpectedCnt: math.MaxFloat64, MPPPartitionTp: property.AnyType} agg := NewPhysicalHashAgg(la, la.stats.ScaleByExpectCnt(prop.ExpectedCnt), childProp) agg.SetSchema(la.schema.Clone()) agg.MppRunMode = Mpp2Phase @@ -2429,7 +2440,7 @@ func (la *LogicalAggregation) getHashAggs(prop *property.PhysicalProperty) []Phy taskTypes = append(taskTypes, property.CopTiFlashLocalReadTaskType) } canPushDownToTiFlash := la.canPushToCop(kv.TiFlash) - canPushDownToMPP := la.ctx.GetSessionVars().IsMPPAllowed() && la.checkCanPushDownToMPP() && canPushDownToTiFlash + canPushDownToMPP := canPushDownToTiFlash && la.ctx.GetSessionVars().IsMPPAllowed() && la.checkCanPushDownToMPP() if la.HasDistinct() { // TODO: remove after the cost estimation of distinct pushdown is implemented. if !la.ctx.GetSessionVars().AllowDistinctAggPushDown { @@ -2557,6 +2568,8 @@ func (p *LogicalLimit) exhaustPhysicalPlans(prop *property.PhysicalProperty) ([] func (p *LogicalLock) exhaustPhysicalPlans(prop *property.PhysicalProperty) ([]PhysicalPlan, bool, error) { if prop.IsFlashProp() { + p.SCtx().GetSessionVars().RaiseWarningWhenMPPEnforced( + "MPP mode may be blocked because operator `Lock` is not supported now.") return nil, true, nil } childProp := prop.CloneEssentialFields() @@ -2574,7 +2587,7 @@ func (p *LogicalUnionAll) exhaustPhysicalPlans(prop *property.PhysicalProperty) return nil, true, nil } // TODO: UnionAll can pass partition info, but for briefness, we prevent it from pushing down. - if prop.TaskTp == property.MppTaskType && prop.PartitionTp != property.AnyType { + if prop.TaskTp == property.MppTaskType && prop.MPPPartitionTp != property.AnyType { return nil, true, nil } canUseMpp := p.ctx.GetSessionVars().IsMPPAllowed() && p.canPushToCopImpl(kv.TiFlash, true) @@ -2650,6 +2663,7 @@ func (ls *LogicalSort) exhaustPhysicalPlans(prop *property.PhysicalProperty) ([] func (p *LogicalMaxOneRow) exhaustPhysicalPlans(prop *property.PhysicalProperty) ([]PhysicalPlan, bool, error) { if !prop.IsEmpty() || prop.IsFlashProp() { + p.SCtx().GetSessionVars().RaiseWarningWhenMPPEnforced("MPP mode may be blocked because operator `MaxOneRow` is not supported now.") return nil, true, nil } mor := PhysicalMaxOneRow{}.Init(p.ctx, p.stats, p.blockOffset, &property.PhysicalProperty{ExpectedCnt: 2}) diff --git a/planner/core/find_best_task.go b/planner/core/find_best_task.go index 71d85a7d8b503..4d8112eb42523 100644 --- a/planner/core/find_best_task.go +++ b/planner/core/find_best_task.go @@ -328,8 +328,8 @@ func (p *baseLogicalPlan) findBestTask(prop *property.PhysicalProperty, planCoun // try to get the task with an enforced sort. newProp.SortItems = []property.SortItem{} newProp.ExpectedCnt = math.MaxFloat64 - newProp.PartitionCols = nil - newProp.PartitionTp = property.AnyType + newProp.MPPPartitionCols = nil + newProp.MPPPartitionTp = property.AnyType var hintCanWork bool plansNeedEnforce, hintCanWork, err = p.self.exhaustPhysicalPlans(newProp) if err != nil { @@ -644,8 +644,8 @@ func (ds *DataSource) findBestTask(prop *property.PhysicalProperty, planCounter } // Next, get the bestTask with enforced prop prop.SortItems = []property.SortItem{} - prop.PartitionTp = property.AnyType - } else if prop.PartitionTp != property.AnyType { + prop.MPPPartitionTp = property.AnyType + } else if prop.MPPPartitionTp != property.AnyType { return invalidTask, 0, nil } defer func() { @@ -1546,12 +1546,14 @@ func (ds *DataSource) convertToTableScan(prop *property.PhysicalProperty, candid if ts.KeepOrder { return &mppTask{}, nil } - if prop.PartitionTp != property.AnyType || ts.isPartition { + if prop.MPPPartitionTp != property.AnyType || ts.isPartition { // If ts is a single partition, then this partition table is in static-only prune, then we should not choose mpp execution. + ds.SCtx().GetSessionVars().RaiseWarningWhenMPPEnforced("MPP mode may be blocked because table `" + ds.tableInfo.Name.O + "`is a partition table which is not supported when `@@tidb_partition_prune_mode=static`.") return &mppTask{}, nil } for _, col := range ts.schema.Columns { if col.VirtualExpr != nil { + ds.SCtx().GetSessionVars().RaiseWarningWhenMPPEnforced("MPP mode may be blocked because column `" + col.OrigName + "` is a virtual column which is not supported now.") return &mppTask{}, nil } } diff --git a/planner/core/integration_test.go b/planner/core/integration_test.go index 9c95d5026f79b..394d5c02b0c1e 100644 --- a/planner/core/integration_test.go +++ b/planner/core/integration_test.go @@ -3889,129 +3889,3 @@ func (s *testIntegrationSerialSuite) TestMergeContinuousSelections(c *C) { res.Check(testkit.Rows(output[i].Plan...)) } } - -func (s *testIntegrationSerialSuite) TestEnforceMPP(c *C) { - tk := testkit.NewTestKit(c, s.store) - - // test value limit of tidb_opt_tiflash_concurrency_factor - err := tk.ExecToErr("set @@tidb_opt_tiflash_concurrency_factor = 0") - c.Assert(err, NotNil) - c.Assert(err.Error(), Equals, `[variable:1231]Variable 'tidb_opt_tiflash_concurrency_factor' can't be set to the value of '0'`) - - tk.MustExec("set @@tidb_opt_tiflash_concurrency_factor = 1") - tk.MustQuery("select @@tidb_opt_tiflash_concurrency_factor").Check(testkit.Rows("1")) - tk.MustExec("set @@tidb_opt_tiflash_concurrency_factor = 24") - tk.MustQuery("select @@tidb_opt_tiflash_concurrency_factor").Check(testkit.Rows("24")) - - // test set tidb_allow_mpp - tk.MustExec("set @@session.tidb_allow_mpp = 0") - tk.MustQuery("select @@session.tidb_allow_mpp").Check(testkit.Rows("OFF")) - tk.MustExec("set @@session.tidb_allow_mpp = 1") - tk.MustQuery("select @@session.tidb_allow_mpp").Check(testkit.Rows("ON")) - tk.MustExec("set @@session.tidb_allow_mpp = 2") - tk.MustQuery("select @@session.tidb_allow_mpp").Check(testkit.Rows("ENFORCE")) - - tk.MustExec("set @@session.tidb_allow_mpp = off") - tk.MustQuery("select @@session.tidb_allow_mpp").Check(testkit.Rows("OFF")) - tk.MustExec("set @@session.tidb_allow_mpp = oN") - tk.MustQuery("select @@session.tidb_allow_mpp").Check(testkit.Rows("ON")) - tk.MustExec("set @@session.tidb_allow_mpp = enForcE") - tk.MustQuery("select @@session.tidb_allow_mpp").Check(testkit.Rows("ENFORCE")) - - tk.MustExec("set @@global.tidb_allow_mpp = faLsE") - tk.MustQuery("select @@global.tidb_allow_mpp").Check(testkit.Rows("OFF")) - tk.MustExec("set @@global.tidb_allow_mpp = True") - tk.MustQuery("select @@global.tidb_allow_mpp").Check(testkit.Rows("ON")) - - err = tk.ExecToErr("set @@global.tidb_allow_mpp = enforceWithTypo") - c.Assert(err, NotNil) - c.Assert(err.Error(), Equals, `[variable:1231]Variable 'tidb_allow_mpp' can't be set to the value of 'enforceWithTypo'`) - - // test query - tk.MustExec("use test") - tk.MustExec("drop table if exists t") - tk.MustExec("create table t(a int)") - tk.MustExec("create index idx on t(a)") - - // Create virtual tiflash replica info. - dom := domain.GetDomain(tk.Se) - is := dom.InfoSchema() - db, exists := is.SchemaByName(model.NewCIStr("test")) - c.Assert(exists, IsTrue) - for _, tblInfo := range db.Tables { - if tblInfo.Name.L == "t" { - tblInfo.TiFlashReplica = &model.TiFlashReplicaInfo{ - Count: 1, - Available: true, - } - } - } - - // ban mpp - tk.MustExec("set @@session.tidb_allow_mpp = 0") - tk.MustQuery("select @@session.tidb_allow_mpp").Check(testkit.Rows("OFF")) - - // read from tiflash, batch cop. - tk.MustQuery("explain format='verbose' select /*+ read_from_storage(tiflash[t]) */ count(*) from t where a=1").Check(testkit.Rows( - "StreamAgg_20 1.00 285050.00 root funcs:count(Column#5)->Column#3", - "└─TableReader_21 1.00 19003.88 root data:StreamAgg_9", - " └─StreamAgg_9 1.00 19006.88 batchCop[tiflash] funcs:count(1)->Column#5", - " └─Selection_19 10.00 285020.00 batchCop[tiflash] eq(test.t.a, 1)", - " └─TableFullScan_18 10000.00 255020.00 batchCop[tiflash] table:t keep order:false, stats:pseudo")) - - // open mpp - tk.MustExec("set @@session.tidb_allow_mpp = 1") - tk.MustQuery("select @@session.tidb_allow_mpp").Check(testkit.Rows("ON")) - - // should use tikv to index read - tk.MustQuery("explain format='verbose' select count(*) from t where a=1;").Check(testkit.Rows( - "StreamAgg_30 1.00 485.00 root funcs:count(Column#6)->Column#3", - "└─IndexReader_31 1.00 32.88 root index:StreamAgg_10", - " └─StreamAgg_10 1.00 35.88 cop[tikv] funcs:count(1)->Column#6", - " └─IndexRangeScan_29 10.00 455.00 cop[tikv] table:t, index:idx(a) range:[1,1], keep order:false, stats:pseudo")) - - // read from tikv, indexRead - tk.MustQuery("explain format='verbose' select /*+ read_from_storage(tikv[t]) */ count(*) from t where a=1;").Check(testkit.Rows( - "StreamAgg_18 1.00 485.00 root funcs:count(Column#5)->Column#3", - "└─IndexReader_19 1.00 32.88 root index:StreamAgg_10", - " └─StreamAgg_10 1.00 35.88 cop[tikv] funcs:count(1)->Column#5", - " └─IndexRangeScan_17 10.00 455.00 cop[tikv] table:t, index:idx(a) range:[1,1], keep order:false, stats:pseudo")) - - // read from tiflash, mpp with large cost - tk.MustQuery("explain format='verbose' select /*+ read_from_storage(tiflash[t]) */ count(*) from t where a=1").Check(testkit.Rows( - "HashAgg_21 1.00 11910.73 root funcs:count(Column#5)->Column#3", - "└─TableReader_23 1.00 11877.13 root data:ExchangeSender_22", - " └─ExchangeSender_22 1.00 285050.00 batchCop[tiflash] ExchangeType: PassThrough", - " └─HashAgg_9 1.00 285050.00 batchCop[tiflash] funcs:count(1)->Column#5", - " └─Selection_20 10.00 285020.00 batchCop[tiflash] eq(test.t.a, 1)", - " └─TableFullScan_19 10000.00 255020.00 batchCop[tiflash] table:t keep order:false, stats:pseudo")) - - // enforce mpp - tk.MustExec("set @@session.tidb_allow_mpp = 2") - tk.MustQuery("select @@session.tidb_allow_mpp").Check(testkit.Rows("ENFORCE")) - - // should use mpp - tk.MustQuery("explain format='verbose' select count(*) from t where a=1;").Check(testkit.Rows( - "HashAgg_24 1.00 33.60 root funcs:count(Column#5)->Column#3", - "└─TableReader_26 1.00 0.00 root data:ExchangeSender_25", - " └─ExchangeSender_25 1.00 285050.00 batchCop[tiflash] ExchangeType: PassThrough", - " └─HashAgg_9 1.00 285050.00 batchCop[tiflash] funcs:count(1)->Column#5", - " └─Selection_23 10.00 285020.00 batchCop[tiflash] eq(test.t.a, 1)", - " └─TableFullScan_22 10000.00 255020.00 batchCop[tiflash] table:t keep order:false, stats:pseudo")) - - // read from tikv, indexRead - tk.MustQuery("explain format='verbose' select /*+ read_from_storage(tikv[t]) */ count(*) from t where a=1;").Check(testkit.Rows( - "StreamAgg_18 1.00 485.00 root funcs:count(Column#5)->Column#3", - "└─IndexReader_19 1.00 32.88 root index:StreamAgg_10", - " └─StreamAgg_10 1.00 35.88 cop[tikv] funcs:count(1)->Column#5", - " └─IndexRangeScan_17 10.00 455.00 cop[tikv] table:t, index:idx(a) range:[1,1], keep order:false, stats:pseudo")) - - // read from tiflash, mpp with little cost - tk.MustQuery("explain format='verbose' select /*+ read_from_storage(tiflash[t]) */ count(*) from t where a=1").Check(testkit.Rows( - "HashAgg_21 1.00 33.60 root funcs:count(Column#5)->Column#3", - "└─TableReader_23 1.00 0.00 root data:ExchangeSender_22", - " └─ExchangeSender_22 1.00 285050.00 batchCop[tiflash] ExchangeType: PassThrough", - " └─HashAgg_9 1.00 285050.00 batchCop[tiflash] funcs:count(1)->Column#5", - " └─Selection_20 10.00 285020.00 batchCop[tiflash] eq(test.t.a, 1)", - " └─TableFullScan_19 10000.00 255020.00 batchCop[tiflash] table:t keep order:false, stats:pseudo")) -} diff --git a/planner/core/logical_plan_builder.go b/planner/core/logical_plan_builder.go index e575bb79e7135..8389f0be4cade 100644 --- a/planner/core/logical_plan_builder.go +++ b/planner/core/logical_plan_builder.go @@ -619,6 +619,8 @@ func (ds *DataSource) setPreferredStoreType(hintInfo *tableHintInfo) { ds.DBName.O, ds.table.Meta().Name.O, kv.TiKV.Name(), ds.ctx.GetSessionVars().GetIsolationReadEngines()) warning := ErrInternal.GenWithStack(errMsg) ds.ctx.GetSessionVars().StmtCtx.AppendWarning(warning) + } else { + ds.ctx.GetSessionVars().RaiseWarningWhenMPPEnforced("MPP mode may be blocked because you have set a hint to read table `" + hintTbl.tblName.O + "` from TiKV.") } } if hintTbl := hintInfo.ifPreferTiFlash(alias); hintTbl != nil { diff --git a/planner/core/planbuilder.go b/planner/core/planbuilder.go index 6f36d6d0a9ad8..eeb969c0adcd5 100644 --- a/planner/core/planbuilder.go +++ b/planner/core/planbuilder.go @@ -973,10 +973,16 @@ func getPossibleAccessPaths(ctx sessionctx.Context, tableHints *tableHintInfo, i tablePath := &util.AccessPath{StoreType: tp} fillContentForTablePath(tablePath, tblInfo) publicPaths = append(publicPaths, tablePath) - if tblInfo.TiFlashReplica != nil && tblInfo.TiFlashReplica.Available { + + if tblInfo.TiFlashReplica == nil { + ctx.GetSessionVars().RaiseWarningWhenMPPEnforced("MPP mode may be blocked because there aren't tiflash replicas of table `" + tblInfo.Name.O + "`.") + } else if !tblInfo.TiFlashReplica.Available { + ctx.GetSessionVars().RaiseWarningWhenMPPEnforced("MPP mode may be blocked because tiflash replicas of table `" + tblInfo.Name.O + "` not ready.") + } else { publicPaths = append(publicPaths, genTiFlashPath(tblInfo, false)) publicPaths = append(publicPaths, genTiFlashPath(tblInfo, true)) } + optimizerUseInvisibleIndexes := ctx.GetSessionVars().OptimizerUseInvisibleIndexes check = check && ctx.GetSessionVars().ConnectionID > 0 @@ -1110,11 +1116,15 @@ func filterPathByIsolationRead(ctx sessionctx.Context, paths []*util.AccessPath, } } var err error + engineVals, _ := ctx.GetSessionVars().GetSystemVar(variable.TiDBIsolationReadEngines) if len(paths) == 0 { - engineVals, _ := ctx.GetSessionVars().GetSystemVar(variable.TiDBIsolationReadEngines) err = ErrInternal.GenWithStackByArgs(fmt.Sprintf("Can not find access path matching '%v'(value: '%v'). Available values are '%v'.", variable.TiDBIsolationReadEngines, engineVals, availableEngineStr)) } + if _, ok := isolationReadEngines[kv.TiFlash]; !ok { + ctx.GetSessionVars().RaiseWarningWhenMPPEnforced( + fmt.Sprintf("MPP mode may be blocked because '%v'(value: '%v') not match, need 'tiflash'.", variable.TiDBIsolationReadEngines, engineVals)) + } return paths, err } diff --git a/planner/core/task.go b/planner/core/task.go index eacc5dbf73e6d..26261836f5e07 100644 --- a/planner/core/task.go +++ b/planner/core/task.go @@ -728,9 +728,9 @@ func (p *PhysicalHashJoin) convertPartitionKeysIfNeed(lTask, rTask *mppTask) (*m nlTask := lTask.copy().(*mppTask) nlTask.p = lProj nlTask = nlTask.enforceExchangerImpl(&property.PhysicalProperty{ - TaskTp: property.MppTaskType, - PartitionTp: property.HashType, - PartitionCols: lPartKeys, + TaskTp: property.MppTaskType, + MPPPartitionTp: property.HashType, + MPPPartitionCols: lPartKeys, }) nlTask.cst = lTask.cst lProj.cost = nlTask.cst @@ -740,9 +740,9 @@ func (p *PhysicalHashJoin) convertPartitionKeysIfNeed(lTask, rTask *mppTask) (*m nrTask := rTask.copy().(*mppTask) nrTask.p = rProj nrTask = nrTask.enforceExchangerImpl(&property.PhysicalProperty{ - TaskTp: property.MppTaskType, - PartitionTp: property.HashType, - PartitionCols: rPartKeys, + TaskTp: property.MppTaskType, + MPPPartitionTp: property.HashType, + MPPPartitionCols: rPartKeys, }) nrTask.cst = rTask.cst rProj.cost = nrTask.cst @@ -1404,10 +1404,13 @@ func CheckAggCanPushCop(sctx sessionctx.Context, aggFuncs []*aggregation.AggFunc for _, aggFunc := range aggFuncs { // if the aggFunc contain VirtualColumn or CorrelatedColumn, it can not be pushed down. if expression.ContainVirtualColumn(aggFunc.Args) || expression.ContainCorrelatedColumn(aggFunc.Args) { + sctx.GetSessionVars().RaiseWarningWhenMPPEnforced( + "MPP mode may be blocked because expressions of AggFunc `" + aggFunc.Name + "` contain virtual column or correlated column, which is not supported now.") return false } pb := aggregation.AggFuncToPBExpr(sc, client, aggFunc) if pb == nil { + sctx.GetSessionVars().RaiseWarningWhenMPPEnforced("MPP mode may be blocked because AggFunc `" + aggFunc.Name + "` is not supported now.") return false } if !aggregation.CheckAggPushDown(aggFunc, storeType) { @@ -1425,6 +1428,7 @@ func CheckAggCanPushCop(sctx sessionctx.Context, aggFuncs []*aggregation.AggFunc } } if expression.ContainVirtualColumn(groupByItems) { + sctx.GetSessionVars().RaiseWarningWhenMPPEnforced("MPP mode may be blocked because groupByItems contain virtual column, which is not supported now.") return false } return expression.CanExprsPushDown(sc, groupByItems, client, storeType) @@ -1911,7 +1915,7 @@ func (p *PhysicalHashAgg) attach2TaskForMpp(tasks ...task) task { } } partialAgg.SetCost(mpp.cost()) - prop := &property.PhysicalProperty{TaskTp: property.MppTaskType, ExpectedCnt: math.MaxFloat64, PartitionTp: property.HashType, PartitionCols: partitionCols} + prop := &property.PhysicalProperty{TaskTp: property.MppTaskType, ExpectedCnt: math.MaxFloat64, MPPPartitionTp: property.HashType, MPPPartitionCols: partitionCols} newMpp := mpp.enforceExchangerImpl(prop) if newMpp.invalid() { return newMpp @@ -2042,7 +2046,7 @@ type mppTask struct { p PhysicalPlan cst float64 - partTp property.PartitionType + partTp property.MPPPartitionType hashCols []*expression.Column } @@ -2091,7 +2095,7 @@ func (t *mppTask) convertToRootTaskImpl(ctx sessionctx.Context) *rootTask { cst := t.cst + t.count()*ctx.GetSessionVars().GetNetworkFactor(nil) p.cost = cst / p.ctx.GetSessionVars().CopTiFlashConcurrencyFactor if p.ctx.GetSessionVars().IsMPPEnforced() { - p.cost = 0 + p.cost = cst / 1000000000 } rt := &rootTask{ p: p, @@ -2101,7 +2105,7 @@ func (t *mppTask) convertToRootTaskImpl(ctx sessionctx.Context) *rootTask { } func (t *mppTask) needEnforce(prop *property.PhysicalProperty) bool { - switch prop.PartitionTp { + switch prop.MPPPartitionTp { case property.AnyType: return false case property.BroadcastType: @@ -2111,10 +2115,10 @@ func (t *mppTask) needEnforce(prop *property.PhysicalProperty) bool { return true } // TODO: consider equalivant class - if len(prop.PartitionCols) != len(t.hashCols) { + if len(prop.MPPPartitionCols) != len(t.hashCols) { return true } - for i, col := range prop.PartitionCols { + for i, col := range prop.MPPPartitionCols { if !col.Equal(nil, t.hashCols[i]) { return true } @@ -2125,6 +2129,7 @@ func (t *mppTask) needEnforce(prop *property.PhysicalProperty) bool { func (t *mppTask) enforceExchanger(prop *property.PhysicalProperty) *mppTask { if len(prop.SortItems) != 0 { + t.p.SCtx().GetSessionVars().RaiseWarningWhenMPPEnforced("MPP mode may be blocked because operator `Sort` is not supported now.") return &mppTask{} } if !t.needEnforce(prop) { @@ -2134,17 +2139,18 @@ func (t *mppTask) enforceExchanger(prop *property.PhysicalProperty) *mppTask { } func (t *mppTask) enforceExchangerImpl(prop *property.PhysicalProperty) *mppTask { - if collate.NewCollationEnabled() && prop.PartitionTp == property.HashType { - for _, col := range prop.PartitionCols { + if collate.NewCollationEnabled() && prop.MPPPartitionTp == property.HashType { + for _, col := range prop.MPPPartitionCols { if types.IsString(col.RetType.Tp) { + t.p.SCtx().GetSessionVars().RaiseWarningWhenMPPEnforced("MPP mode may be blocked because when `new_collation_enabled` is true, HashJoin or HashAgg with string key is not supported now.") return &mppTask{cst: math.MaxFloat64} } } } ctx := t.p.SCtx() sender := PhysicalExchangeSender{ - ExchangeType: tipb.ExchangeType(prop.PartitionTp), - HashCols: prop.PartitionCols, + ExchangeType: tipb.ExchangeType(prop.MPPPartitionTp), + HashCols: prop.MPPPartitionCols, }.Init(ctx, t.p.statsInfo()) sender.SetChildren(t.p) receiver := PhysicalExchangeReceiver{}.Init(ctx, t.p.statsInfo()) @@ -2155,7 +2161,7 @@ func (t *mppTask) enforceExchangerImpl(prop *property.PhysicalProperty) *mppTask return &mppTask{ p: receiver, cst: cst, - partTp: prop.PartitionTp, - hashCols: prop.PartitionCols, + partTp: prop.MPPPartitionTp, + hashCols: prop.MPPPartitionCols, } } diff --git a/planner/core/testdata/enforce_mpp_suite_in.json b/planner/core/testdata/enforce_mpp_suite_in.json new file mode 100644 index 0000000000000..8f80d928190cf --- /dev/null +++ b/planner/core/testdata/enforce_mpp_suite_in.json @@ -0,0 +1,67 @@ +[ + { + "name": "TestEnforceMPP", + "cases": [ + "select @@tidb_allow_mpp", + "select @@tidb_enforce_mpp", + "select @@tidb_opt_tiflash_concurrency_factor", + "set @@tidb_allow_mpp=0", + "explain format='verbose' select count(*) from t where a=1", + "explain format='verbose' select /*+ read_from_storage(tikv[t]) */ count(*) from t where a=1", + "explain format='verbose' select /*+ read_from_storage(tiflash[t]) */ count(*) from t where a=1", + "set @@tidb_allow_mpp=1;", + "set @@tidb_enforce_mpp=0;", + "explain format='verbose' select count(*) from t where a=1", + "explain format='verbose' select /*+ read_from_storage(tikv[t]) */ count(*) from t where a=1", + "explain format='verbose' select /*+ read_from_storage(tiflash[t]) */ count(*) from t where a=1", + "set @@tidb_opt_tiflash_concurrency_factor = 1000000", + "explain format='verbose' select count(*) from t where a=1", + "explain format='verbose' select /*+ read_from_storage(tikv[t]) */ count(*) from t where a=1", + "explain format='verbose' select /*+ read_from_storage(tiflash[t]) */ count(*) from t where a=1", + "set @@tidb_enforce_mpp=1;", + "explain format='verbose' select count(*) from t where a=1", + "explain format='verbose' select /*+ read_from_storage(tikv[t]) */ count(*) from t where a=1", + "explain format='verbose' select /*+ read_from_storage(tiflash[t]) */ count(*) from t where a=1" + ] + }, + { + "name": "TestEnforceMPPWarning1", + "cases": [ + "set @@tidb_allow_mpp=1;set @@tidb_enforce_mpp=1;", + "explain select count(*) from t where a=1 -- 1. no replica", + "cmd: create-replica", + "explain select count(*) from t where a=1 -- 2. replica not ready", + "cmd: enable-replica", + "set @@session.tidb_isolation_read_engines = 'tikv';", + "explain select count(*) from t where a=1 -- 3. isolation_engine not match", + "set @@session.tidb_isolation_read_engines = 'tikv, tiflash';", + "explain select /*+ read_from_storage(tikv[t]) */ count(*) from t where a=1 -- 4. hint use tikv", + "explain SELECT a, ROW_NUMBER() OVER (ORDER BY a) FROM t; -- 5. window unsupported", + "EXPLAIN SELECT t1.b FROM t t1 join t t2 where t1.a=t2.a; -- 6. virtual column", + "EXPLAIN SELECT count(b) from t where a=1; -- 7. agg func has virtual column", + "EXPLAIN SELECT count(*) from t group by b; -- 8. group by virtual column", + "EXPLAIN SELECT group_concat(a) from t; -- 9. agg func not supported", + "EXPLAIN SELECT count(a) from t group by md5(a); -- 10. scalar func not supported", + "EXPLAIN SELECT count(a) from t where c=1; -- 11. type not supported" + ] + }, + { + "name": "TestEnforceMPPWarning2", + "cases": [ + "set @@tidb_allow_mpp=1;set @@tidb_enforce_mpp=1;", + "set @@tidb_partition_prune_mode=static;", + "EXPLAIN SELECT count(*) from t where a=1; -- 12. static partition prune", + "set @@tidb_partition_prune_mode=dynamic;" + + ] + }, + { + "name": "TestEnforceMPPWarning3", + "cases": [ + "set @@tidb_allow_mpp=1;set @@tidb_enforce_mpp=1;", + "cmd: enable-new-collation", + "EXPLAIN SELECT count(*) from t group by b; -- 13. new collation FIXME", + "EXPLAIN SELECT * from t t1 join t t2 on t1.b=t2.b; -- 13. new collation FIXME" + ] + } +] diff --git a/planner/core/testdata/enforce_mpp_suite_out.json b/planner/core/testdata/enforce_mpp_suite_out.json new file mode 100644 index 0000000000000..372a69d73513f --- /dev/null +++ b/planner/core/testdata/enforce_mpp_suite_out.json @@ -0,0 +1,438 @@ +[ + { + "Name": "TestEnforceMPP", + "Cases": [ + { + "SQL": "select @@tidb_allow_mpp", + "Plan": [ + "1" + ], + "Warn": null + }, + { + "SQL": "select @@tidb_enforce_mpp", + "Plan": [ + "0" + ], + "Warn": null + }, + { + "SQL": "select @@tidb_opt_tiflash_concurrency_factor", + "Plan": [ + "24" + ], + "Warn": null + }, + { + "SQL": "set @@tidb_allow_mpp=0", + "Plan": null, + "Warn": null + }, + { + "SQL": "explain format='verbose' select count(*) from t where a=1", + "Plan": [ + "StreamAgg_24 1.00 485.00 root funcs:count(Column#6)->Column#4", + "└─IndexReader_25 1.00 32.88 root index:StreamAgg_9", + " └─StreamAgg_9 1.00 35.88 cop[tikv] funcs:count(1)->Column#6", + " └─IndexRangeScan_23 10.00 455.00 cop[tikv] table:t, index:idx(a) range:[1,1], keep order:false, stats:pseudo" + ], + "Warn": null + }, + { + "SQL": "explain format='verbose' select /*+ read_from_storage(tikv[t]) */ count(*) from t where a=1", + "Plan": [ + "StreamAgg_17 1.00 485.00 root funcs:count(Column#6)->Column#4", + "└─IndexReader_18 1.00 32.88 root index:StreamAgg_9", + " └─StreamAgg_9 1.00 35.88 cop[tikv] funcs:count(1)->Column#6", + " └─IndexRangeScan_16 10.00 455.00 cop[tikv] table:t, index:idx(a) range:[1,1], keep order:false, stats:pseudo" + ], + "Warn": null + }, + { + "SQL": "explain format='verbose' select /*+ read_from_storage(tiflash[t]) */ count(*) from t where a=1", + "Plan": [ + "StreamAgg_20 1.00 285050.00 root funcs:count(Column#6)->Column#4", + "└─TableReader_21 1.00 19003.88 root data:StreamAgg_9", + " └─StreamAgg_9 1.00 19006.88 batchCop[tiflash] funcs:count(1)->Column#6", + " └─Selection_19 10.00 285020.00 batchCop[tiflash] eq(test.t.a, 1)", + " └─TableFullScan_18 10000.00 255020.00 batchCop[tiflash] table:t keep order:false, stats:pseudo" + ], + "Warn": null + }, + { + "SQL": "set @@tidb_allow_mpp=1;", + "Plan": null, + "Warn": null + }, + { + "SQL": "set @@tidb_enforce_mpp=0;", + "Plan": null, + "Warn": null + }, + { + "SQL": "explain format='verbose' select count(*) from t where a=1", + "Plan": [ + "StreamAgg_30 1.00 485.00 root funcs:count(Column#7)->Column#4", + "└─IndexReader_31 1.00 32.88 root index:StreamAgg_10", + " └─StreamAgg_10 1.00 35.88 cop[tikv] funcs:count(1)->Column#7", + " └─IndexRangeScan_29 10.00 455.00 cop[tikv] table:t, index:idx(a) range:[1,1], keep order:false, stats:pseudo" + ], + "Warn": null + }, + { + "SQL": "explain format='verbose' select /*+ read_from_storage(tikv[t]) */ count(*) from t where a=1", + "Plan": [ + "StreamAgg_18 1.00 485.00 root funcs:count(Column#6)->Column#4", + "└─IndexReader_19 1.00 32.88 root index:StreamAgg_10", + " └─StreamAgg_10 1.00 35.88 cop[tikv] funcs:count(1)->Column#6", + " └─IndexRangeScan_17 10.00 455.00 cop[tikv] table:t, index:idx(a) range:[1,1], keep order:false, stats:pseudo" + ], + "Warn": null + }, + { + "SQL": "explain format='verbose' select /*+ read_from_storage(tiflash[t]) */ count(*) from t where a=1", + "Plan": [ + "HashAgg_21 1.00 11910.73 root funcs:count(Column#6)->Column#4", + "└─TableReader_23 1.00 11877.13 root data:ExchangeSender_22", + " └─ExchangeSender_22 1.00 285050.00 batchCop[tiflash] ExchangeType: PassThrough", + " └─HashAgg_9 1.00 285050.00 batchCop[tiflash] funcs:count(1)->Column#6", + " └─Selection_20 10.00 285020.00 batchCop[tiflash] eq(test.t.a, 1)", + " └─TableFullScan_19 10000.00 255020.00 batchCop[tiflash] table:t keep order:false, stats:pseudo" + ], + "Warn": null + }, + { + "SQL": "set @@tidb_opt_tiflash_concurrency_factor = 1000000", + "Plan": null, + "Warn": null + }, + { + "SQL": "explain format='verbose' select count(*) from t where a=1", + "Plan": [ + "HashAgg_24 1.00 33.89 root funcs:count(Column#6)->Column#4", + "└─TableReader_26 1.00 0.29 root data:ExchangeSender_25", + " └─ExchangeSender_25 1.00 285050.00 batchCop[tiflash] ExchangeType: PassThrough", + " └─HashAgg_9 1.00 285050.00 batchCop[tiflash] funcs:count(1)->Column#6", + " └─Selection_23 10.00 285020.00 batchCop[tiflash] eq(test.t.a, 1)", + " └─TableFullScan_22 10000.00 255020.00 batchCop[tiflash] table:t keep order:false, stats:pseudo" + ], + "Warn": null + }, + { + "SQL": "explain format='verbose' select /*+ read_from_storage(tikv[t]) */ count(*) from t where a=1", + "Plan": [ + "StreamAgg_18 1.00 485.00 root funcs:count(Column#6)->Column#4", + "└─IndexReader_19 1.00 32.88 root index:StreamAgg_10", + " └─StreamAgg_10 1.00 35.88 cop[tikv] funcs:count(1)->Column#6", + " └─IndexRangeScan_17 10.00 455.00 cop[tikv] table:t, index:idx(a) range:[1,1], keep order:false, stats:pseudo" + ], + "Warn": null + }, + { + "SQL": "explain format='verbose' select /*+ read_from_storage(tiflash[t]) */ count(*) from t where a=1", + "Plan": [ + "HashAgg_21 1.00 33.89 root funcs:count(Column#6)->Column#4", + "└─TableReader_23 1.00 0.29 root data:ExchangeSender_22", + " └─ExchangeSender_22 1.00 285050.00 batchCop[tiflash] ExchangeType: PassThrough", + " └─HashAgg_9 1.00 285050.00 batchCop[tiflash] funcs:count(1)->Column#6", + " └─Selection_20 10.00 285020.00 batchCop[tiflash] eq(test.t.a, 1)", + " └─TableFullScan_19 10000.00 255020.00 batchCop[tiflash] table:t keep order:false, stats:pseudo" + ], + "Warn": null + }, + { + "SQL": "set @@tidb_enforce_mpp=1;", + "Plan": null, + "Warn": null + }, + { + "SQL": "explain format='verbose' select count(*) from t where a=1", + "Plan": [ + "HashAgg_24 1.00 33.60 root funcs:count(Column#6)->Column#4", + "└─TableReader_26 1.00 0.00 root data:ExchangeSender_25", + " └─ExchangeSender_25 1.00 285050.00 batchCop[tiflash] ExchangeType: PassThrough", + " └─HashAgg_9 1.00 285050.00 batchCop[tiflash] funcs:count(1)->Column#6", + " └─Selection_23 10.00 285020.00 batchCop[tiflash] eq(test.t.a, 1)", + " └─TableFullScan_22 10000.00 255020.00 batchCop[tiflash] table:t keep order:false, stats:pseudo" + ], + "Warn": null + }, + { + "SQL": "explain format='verbose' select /*+ read_from_storage(tikv[t]) */ count(*) from t where a=1", + "Plan": [ + "StreamAgg_18 1.00 485.00 root funcs:count(Column#6)->Column#4", + "└─IndexReader_19 1.00 32.88 root index:StreamAgg_10", + " └─StreamAgg_10 1.00 35.88 cop[tikv] funcs:count(1)->Column#6", + " └─IndexRangeScan_17 10.00 455.00 cop[tikv] table:t, index:idx(a) range:[1,1], keep order:false, stats:pseudo" + ], + "Warn": [ + "MPP mode may be blocked because you have set a hint to read table `t` from TiKV." + ] + }, + { + "SQL": "explain format='verbose' select /*+ read_from_storage(tiflash[t]) */ count(*) from t where a=1", + "Plan": [ + "HashAgg_21 1.00 33.60 root funcs:count(Column#6)->Column#4", + "└─TableReader_23 1.00 0.00 root data:ExchangeSender_22", + " └─ExchangeSender_22 1.00 285050.00 batchCop[tiflash] ExchangeType: PassThrough", + " └─HashAgg_9 1.00 285050.00 batchCop[tiflash] funcs:count(1)->Column#6", + " └─Selection_20 10.00 285020.00 batchCop[tiflash] eq(test.t.a, 1)", + " └─TableFullScan_19 10000.00 255020.00 batchCop[tiflash] table:t keep order:false, stats:pseudo" + ], + "Warn": null + } + ] + }, + { + "Name": "TestEnforceMPPWarning1", + "Cases": [ + { + "SQL": "set @@tidb_allow_mpp=1;set @@tidb_enforce_mpp=1;", + "Plan": null, + "Warn": null + }, + { + "SQL": "explain select count(*) from t where a=1 -- 1. no replica", + "Plan": [ + "StreamAgg_17 1.00 root funcs:count(Column#7)->Column#5", + "└─IndexReader_18 1.00 root index:StreamAgg_9", + " └─StreamAgg_9 1.00 cop[tikv] funcs:count(1)->Column#7", + " └─IndexRangeScan_16 10.00 cop[tikv] table:t, index:idx(a) range:[1,1], keep order:false, stats:pseudo" + ], + "Warn": [ + "MPP mode may be blocked because there aren't tiflash replicas of table `t`." + ] + }, + { + "SQL": "cmd: create-replica", + "Plan": null, + "Warn": null + }, + { + "SQL": "explain select count(*) from t where a=1 -- 2. replica not ready", + "Plan": [ + "StreamAgg_17 1.00 root funcs:count(Column#7)->Column#5", + "└─IndexReader_18 1.00 root index:StreamAgg_9", + " └─StreamAgg_9 1.00 cop[tikv] funcs:count(1)->Column#7", + " └─IndexRangeScan_16 10.00 cop[tikv] table:t, index:idx(a) range:[1,1], keep order:false, stats:pseudo" + ], + "Warn": [ + "MPP mode may be blocked because tiflash replicas of table `t` not ready." + ] + }, + { + "SQL": "cmd: enable-replica", + "Plan": null, + "Warn": null + }, + { + "SQL": "set @@session.tidb_isolation_read_engines = 'tikv';", + "Plan": null, + "Warn": null + }, + { + "SQL": "explain select count(*) from t where a=1 -- 3. isolation_engine not match", + "Plan": [ + "StreamAgg_17 1.00 root funcs:count(Column#7)->Column#5", + "└─IndexReader_18 1.00 root index:StreamAgg_9", + " └─StreamAgg_9 1.00 cop[tikv] funcs:count(1)->Column#7", + " └─IndexRangeScan_16 10.00 cop[tikv] table:t, index:idx(a) range:[1,1], keep order:false, stats:pseudo" + ], + "Warn": [ + "MPP mode may be blocked because 'tidb_isolation_read_engines'(value: 'tikv') not match, need 'tiflash'." + ] + }, + { + "SQL": "set @@session.tidb_isolation_read_engines = 'tikv, tiflash';", + "Plan": null, + "Warn": null + }, + { + "SQL": "explain select /*+ read_from_storage(tikv[t]) */ count(*) from t where a=1 -- 4. hint use tikv", + "Plan": [ + "StreamAgg_18 1.00 root funcs:count(Column#7)->Column#5", + "└─IndexReader_19 1.00 root index:StreamAgg_10", + " └─StreamAgg_10 1.00 cop[tikv] funcs:count(1)->Column#7", + " └─IndexRangeScan_17 10.00 cop[tikv] table:t, index:idx(a) range:[1,1], keep order:false, stats:pseudo" + ], + "Warn": [ + "MPP mode may be blocked because you have set a hint to read table `t` from TiKV." + ] + }, + { + "SQL": "explain SELECT a, ROW_NUMBER() OVER (ORDER BY a) FROM t; -- 5. window unsupported", + "Plan": [ + "Window_7 10000.00 root row_number()->Column#6 over(order by test.t.a rows between current row and current row)", + "└─IndexReader_9 10000.00 root index:IndexFullScan_8", + " └─IndexFullScan_8 10000.00 cop[tikv] table:t, index:idx(a) keep order:true, stats:pseudo" + ], + "Warn": [ + "MPP mode may be blocked because operator `Window` is not supported now." + ] + }, + { + "SQL": "EXPLAIN SELECT t1.b FROM t t1 join t t2 where t1.a=t2.a; -- 6. virtual column", + "Plan": [ + "HashJoin_35 12487.50 root inner join, equal:[eq(test.t.a, test.t.a)]", + "├─TableReader_55(Build) 9990.00 root data:Selection_54", + "│ └─Selection_54 9990.00 cop[tiflash] not(isnull(test.t.a))", + "│ └─TableFullScan_53 10000.00 cop[tiflash] table:t2 keep order:false, stats:pseudo", + "└─TableReader_49(Probe) 9990.00 root data:Selection_48", + " └─Selection_48 9990.00 cop[tiflash] not(isnull(test.t.a))", + " └─TableFullScan_47 10000.00 cop[tiflash] table:t1 keep order:false, stats:pseudo" + ], + "Warn": [ + "MPP mode may be blocked because column `test.t.b` is a virtual column which is not supported now." + ] + }, + { + "SQL": "EXPLAIN SELECT count(b) from t where a=1; -- 7. agg func has virtual column", + "Plan": [ + "StreamAgg_10 1.00 root funcs:count(test.t.b)->Column#5", + "└─IndexLookUp_41 10.00 root ", + " ├─IndexRangeScan_39(Build) 10.00 cop[tikv] table:t, index:idx(a) range:[1,1], keep order:false, stats:pseudo", + " └─TableRowIDScan_40(Probe) 10.00 cop[tikv] table:t keep order:false, stats:pseudo" + ], + "Warn": [ + "MPP mode may be blocked because expressions of AggFunc `count` contain virtual column or correlated column, which is not supported now.", + "MPP mode may be blocked because expressions of AggFunc `count` contain virtual column or correlated column, which is not supported now.", + "MPP mode may be blocked because expressions of AggFunc `count` contain virtual column or correlated column, which is not supported now.", + "MPP mode may be blocked because expressions of AggFunc `count` contain virtual column or correlated column, which is not supported now." + ] + }, + { + "SQL": "EXPLAIN SELECT count(*) from t group by b; -- 8. group by virtual column", + "Plan": [ + "HashAgg_5 8000.00 root group by:test.t.b, funcs:count(1)->Column#5", + "└─Projection_11 10000.00 root test.t.b", + " └─TableReader_10 10000.00 root data:TableFullScan_9", + " └─TableFullScan_9 10000.00 cop[tiflash] table:t keep order:false, stats:pseudo" + ], + "Warn": [ + "MPP mode may be blocked because groupByItems contain virtual column, which is not supported now.", + "MPP mode may be blocked because groupByItems contain virtual column, which is not supported now." + ] + }, + { + "SQL": "EXPLAIN SELECT group_concat(a) from t; -- 9. agg func not supported", + "Plan": [ + "HashAgg_5 1.00 root funcs:group_concat(Column#6 separator \",\")->Column#5", + "└─Projection_30 10000.00 root cast(test.t.a, var_string(20))->Column#6", + " └─TableReader_13 10000.00 root data:TableFullScan_11", + " └─TableFullScan_11 10000.00 cop[tiflash] table:t keep order:false, stats:pseudo" + ], + "Warn": [ + "MPP mode may be blocked because AggFunc `group_concat` is not supported now.", + "MPP mode may be blocked because AggFunc `group_concat` is not supported now.", + "MPP mode may be blocked because AggFunc `group_concat` is not supported now." + ] + }, + { + "SQL": "EXPLAIN SELECT count(a) from t group by md5(a); -- 10. scalar func not supported", + "Plan": [ + "HashAgg_5 8000.00 root group by:Column#7, funcs:count(Column#6)->Column#5", + "└─Projection_18 10000.00 root test.t.a, md5(cast(test.t.a, var_string(20)))->Column#7", + " └─TableReader_11 10000.00 root data:TableFullScan_9", + " └─TableFullScan_9 10000.00 cop[tiflash] table:t keep order:false, stats:pseudo" + ], + "Warn": [ + "Scalar function 'md5'(signature: MD5) can not be pushed to tiflash", + "Scalar function 'md5'(signature: MD5) can not be pushed to tiflash" + ] + }, + { + "SQL": "EXPLAIN SELECT count(a) from t where c=1; -- 11. type not supported", + "Plan": [ + "HashAgg_6 1.00 root funcs:count(test.t.a)->Column#5", + "└─Selection_16 10000.00 root eq(test.t.c, 00:00:01.000000)", + " └─TableReader_15 10000.00 root data:TableFullScan_14", + " └─TableFullScan_14 10000.00 cop[tiflash] table:t keep order:false, stats:pseudo" + ], + "Warn": [ + "Expr 'test.t.c' can not be pushed to TiFlash because it contains Duration type", + "Expr 'test.t.c' can not be pushed to TiFlash because it contains Duration type", + "Expr 'test.t.c' can not be pushed to TiFlash because it contains Duration type", + "Expr 'test.t.c' can not be pushed to TiFlash because it contains Duration type", + "Expr 'test.t.c' can not be pushed to TiFlash because it contains Duration type" + ] + } + ] + }, + { + "Name": "TestEnforceMPPWarning2", + "Cases": [ + { + "SQL": "set @@tidb_allow_mpp=1;set @@tidb_enforce_mpp=1;", + "Plan": null, + "Warn": null + }, + { + "SQL": "set @@tidb_partition_prune_mode=static;", + "Plan": null, + "Warn": null + }, + { + "SQL": "EXPLAIN SELECT count(*) from t where a=1; -- 12. static partition prune", + "Plan": [ + "StreamAgg_31 1.00 root funcs:count(Column#6)->Column#4", + "└─TableReader_32 1.00 root data:StreamAgg_12", + " └─StreamAgg_12 1.00 batchCop[tiflash] funcs:count(1)->Column#6", + " └─Selection_30 10.00 batchCop[tiflash] eq(test.t.a, 1)", + " └─TableFullScan_29 10000.00 batchCop[tiflash] table:t, partition:p0 keep order:false, stats:pseudo" + ], + "Warn": [ + "MPP mode may be blocked because table `t`is a partition table which is not supported when `@@tidb_partition_prune_mode=static`." + ] + }, + { + "SQL": "set @@tidb_partition_prune_mode=dynamic;", + "Plan": null, + "Warn": null + } + ] + }, + { + "Name": "TestEnforceMPPWarning3", + "Cases": [ + { + "SQL": "set @@tidb_allow_mpp=1;set @@tidb_enforce_mpp=1;", + "Plan": null, + "Warn": null + }, + { + "SQL": "cmd: enable-new-collation", + "Plan": null, + "Warn": null + }, + { + "SQL": "EXPLAIN SELECT count(*) from t group by b; -- 13. new collation FIXME", + "Plan": [ + "HashAgg_23 8000.00 root group by:test.t.b, funcs:count(Column#7)->Column#4", + "└─TableReader_25 8000.00 root data:ExchangeSender_24", + " └─ExchangeSender_24 8000.00 batchCop[tiflash] ExchangeType: PassThrough", + " └─HashAgg_10 8000.00 batchCop[tiflash] group by:test.t.b, funcs:count(1)->Column#7", + " └─TableFullScan_20 10000.00 batchCop[tiflash] table:t keep order:false, stats:pseudo" + ], + "Warn": [ + "MPP mode may be blocked because when `new_collation_enabled` is true, HashJoin or HashAgg with string key is not supported now.", + "MPP mode may be blocked because when `new_collation_enabled` is true, HashJoin or HashAgg with string key is not supported now." + ] + }, + { + "SQL": "EXPLAIN SELECT * from t t1 join t t2 on t1.b=t2.b; -- 13. new collation FIXME", + "Plan": [ + "TableReader_18 12487.50 root data:ExchangeSender_17", + "└─ExchangeSender_17 12487.50 cop[tiflash] ExchangeType: PassThrough", + " └─HashJoin_8 12487.50 cop[tiflash] inner join, equal:[eq(test.t.b, test.t.b)]", + " ├─ExchangeReceiver_14(Build) 9990.00 cop[tiflash] ", + " │ └─ExchangeSender_13 9990.00 cop[tiflash] ExchangeType: Broadcast", + " │ └─Selection_12 9990.00 cop[tiflash] not(isnull(test.t.b))", + " │ └─TableFullScan_11 10000.00 cop[tiflash] table:t1 keep order:false, stats:pseudo", + " └─Selection_16(Probe) 9990.00 cop[tiflash] not(isnull(test.t.b))", + " └─TableFullScan_15 10000.00 cop[tiflash] table:t2 keep order:false, stats:pseudo" + ], + "Warn": null + } + ] + } +] diff --git a/planner/property/physical_property.go b/planner/property/physical_property.go index 8ddb1a6212437..51cf72572fc9d 100644 --- a/planner/property/physical_property.go +++ b/planner/property/physical_property.go @@ -30,12 +30,12 @@ type SortItem struct { Desc bool } -// PartitionType is the way to partition during mpp data exchanging. -type PartitionType int +// MPPPartitionType is the way to partition during mpp data exchanging. +type MPPPartitionType int const ( // AnyType will not require any special partition types. - AnyType PartitionType = iota + AnyType MPPPartitionType = iota // BroadcastType requires current task to broadcast its data. BroadcastType // HashType requires current task to shuffle its data according to some columns. @@ -70,10 +70,10 @@ type PhysicalProperty struct { CanAddEnforcer bool // If the partition type is hash, the data should be reshuffled by partition cols. - PartitionCols []*expression.Column + MPPPartitionCols []*expression.Column // which types the exchange sender belongs to, only take effects when it's a mpp task. - PartitionTp PartitionType + MPPPartitionTp MPPPartitionType } // NewPhysicalProperty builds property from columns. @@ -97,11 +97,11 @@ func SortItemsFromCols(cols []*expression.Column, desc bool) []SortItem { // IsSubsetOf check if the keys can match the needs of partition. func (p *PhysicalProperty) IsSubsetOf(keys []*expression.Column) []int { - if len(p.PartitionCols) > len(keys) { + if len(p.MPPPartitionCols) > len(keys) { return nil } matches := make([]int, 0, len(keys)) - for _, partCol := range p.PartitionCols { + for _, partCol := range p.MPPPartitionCols { found := false for i, key := range keys { if partCol.Equal(nil, key) { @@ -183,8 +183,8 @@ func (p *PhysicalProperty) HashCode() []byte { } } if p.TaskTp == MppTaskType { - p.hashcode = codec.EncodeInt(p.hashcode, int64(p.PartitionTp)) - for _, col := range p.PartitionCols { + p.hashcode = codec.EncodeInt(p.hashcode, int64(p.MPPPartitionTp)) + for _, col := range p.MPPPartitionCols { p.hashcode = append(p.hashcode, col.HashCode(nil)...) } } @@ -200,11 +200,11 @@ func (p *PhysicalProperty) String() string { // property, specifically, `CanAddEnforcer` should not be included. func (p *PhysicalProperty) CloneEssentialFields() *PhysicalProperty { prop := &PhysicalProperty{ - SortItems: p.SortItems, - TaskTp: p.TaskTp, - ExpectedCnt: p.ExpectedCnt, - PartitionTp: p.PartitionTp, - PartitionCols: p.PartitionCols, + SortItems: p.SortItems, + TaskTp: p.TaskTp, + ExpectedCnt: p.ExpectedCnt, + MPPPartitionTp: p.MPPPartitionTp, + MPPPartitionCols: p.MPPPartitionCols, } return prop } diff --git a/privilege/privileges/privileges_test.go b/privilege/privileges/privileges_test.go index 1bb69be14c826..4d0b42a6d37ff 100644 --- a/privilege/privileges/privileges_test.go +++ b/privilege/privileges/privileges_test.go @@ -468,6 +468,28 @@ func (s *testPrivilegeSuite) TestSetPasswdStmt(c *C) { c.Assert(err, NotNil) } +func (s *testPrivilegeSuite) TestAlterUserStmt(c *C) { + se := newSession(c, s.store, s.dbName) + + // high privileged user setting password for other user (passes) + mustExec(c, se, "CREATE USER 'superuser2'") + mustExec(c, se, "CREATE USER 'nobodyuser2'") + mustExec(c, se, "CREATE USER 'nobodyuser3'") + mustExec(c, se, "GRANT ALL ON *.* TO 'superuser2'") + mustExec(c, se, "GRANT CREATE USER ON *.* TO 'nobodyuser2'") + + c.Assert(se.Auth(&auth.UserIdentity{Username: "superuser2", Hostname: "localhost", AuthUsername: "superuser2", AuthHostname: "%"}, nil, nil), IsTrue) + mustExec(c, se, "ALTER USER 'nobodyuser2' IDENTIFIED BY 'newpassword'") + mustExec(c, se, "ALTER USER 'nobodyuser2' IDENTIFIED BY ''") + + // low privileged user trying to set password for other user (fails) + c.Assert(se.Auth(&auth.UserIdentity{Username: "nobodyuser2", Hostname: "localhost", AuthUsername: "nobodyuser2", AuthHostname: "%"}, nil, nil), IsTrue) + mustExec(c, se, "ALTER USER 'nobodyuser2' IDENTIFIED BY 'newpassword'") + mustExec(c, se, "ALTER USER 'nobodyuser2' IDENTIFIED BY ''") + _, err := se.ExecuteInternal(context.Background(), "ALTER USER 'superuser2' IDENTIFIED BY 'newpassword'") + c.Assert(err, NotNil) +} + func (s *testPrivilegeSuite) TestSelectViewSecurity(c *C) { se := newSession(c, s.store, s.dbName) ctx, _ := se.(sessionctx.Context) diff --git a/server/http_handler_test.go b/server/http_handler_test.go index abb466f897080..fbfce82624021 100644 --- a/server/http_handler_test.go +++ b/server/http_handler_test.go @@ -1238,6 +1238,7 @@ func (ts *HTTPHandlerTestSerialSuite) TestPostSettings(c *C) { form = make(url.Values) form.Set("tidb_deadlock_history_capacity", "5") resp, err = ts.formStatus("/settings", form) + c.Assert(err, IsNil) c.Assert(len(deadlockhistory.GlobalDeadlockHistory.GetAll()), Equals, 5) c.Assert(deadlockhistory.GlobalDeadlockHistory.GetAll()[0].ID, Equals, uint64(6)) c.Assert(deadlockhistory.GlobalDeadlockHistory.GetAll()[4].ID, Equals, uint64(10)) @@ -1248,6 +1249,7 @@ func (ts *HTTPHandlerTestSerialSuite) TestPostSettings(c *C) { form = make(url.Values) form.Set("tidb_deadlock_history_capacity", "6") resp, err = ts.formStatus("/settings", form) + c.Assert(err, IsNil) deadlockhistory.GlobalDeadlockHistory.Push(dummyRecord()) c.Assert(len(deadlockhistory.GlobalDeadlockHistory.GetAll()), Equals, 6) c.Assert(deadlockhistory.GlobalDeadlockHistory.GetAll()[0].ID, Equals, uint64(7)) diff --git a/sessionctx/variable/session.go b/sessionctx/variable/session.go index 772882153a134..2556c2a37c736 100644 --- a/sessionctx/variable/session.go +++ b/sessionctx/variable/session.go @@ -28,6 +28,7 @@ import ( "sync/atomic" "time" + "github.com/pingcap/errors" "github.com/pingcap/parser" "github.com/pingcap/parser/ast" "github.com/pingcap/parser/auth" @@ -508,9 +509,16 @@ type SessionVars struct { // Value set to 2 means to force to send batch cop for any query. Value set to 0 means never use batch cop. AllowBatchCop int - // AllowMPPExecution means if we should use mpp way to execute query. Default value is "ON", means to be determined by the optimizer. - // Value set to "ENFORCE" means to use mpp whenever possible. Value set to means never use mpp. - allowMPPExecution string + // allowMPPExecution means if we should use mpp way to execute query. + // Default value is `true`, means to be determined by the optimizer. + // Value set to `false` means never use mpp. + allowMPPExecution bool + + // enforceMPPExecution means if we should enforce mpp way to execute query. + // Default value is `false`, means to be determined by variable `allowMPPExecution`. + // Value set to `true` means enforce use mpp. + // Note if you want to set `enforceMPPExecution` to `true`, you must set `allowMPPExecution` to `true` first. + enforceMPPExecution bool // TiDBAllowAutoRandExplicitInsert indicates whether explicit insertion on auto_random column is allowed. AllowAutoRandExplicitInsert bool @@ -860,12 +868,21 @@ func (s *SessionVars) AllocMPPTaskID(startTS uint64) int64 { // IsMPPAllowed returns whether mpp execution is allowed. func (s *SessionVars) IsMPPAllowed() bool { - return s.allowMPPExecution != "OFF" + return s.allowMPPExecution } // IsMPPEnforced returns whether mpp execution is enforced. func (s *SessionVars) IsMPPEnforced() bool { - return s.allowMPPExecution == "ENFORCE" + return s.allowMPPExecution && s.enforceMPPExecution +} + +// RaiseWarningWhenMPPEnforced will raise a warning when mpp mode is enforced and executing explain statement. +// TODO: Confirm whether this function will be inlined and +// omit the overhead of string construction when calling with false condition. +func (s *SessionVars) RaiseWarningWhenMPPEnforced(warning string) { + if s.IsMPPEnforced() && s.StmtCtx.InExplainStmt { + s.StmtCtx.AppendWarning(errors.New(warning)) + } } // CheckAndGetTxnScope will return the transaction scope we should use in the current session. @@ -1096,6 +1113,7 @@ func NewSessionVars() *SessionVars { vars.AllowBatchCop = DefTiDBAllowBatchCop vars.allowMPPExecution = DefTiDBAllowMPPExecution + vars.enforceMPPExecution = DefTiDBEnforceMPPExecution var enableChunkRPC string if config.GetGlobalConfig().TiKVClient.EnableChunkRPC { diff --git a/sessionctx/variable/sysvar.go b/sessionctx/variable/sysvar.go index ff8650317dfa8..841fabdbd7238 100644 --- a/sessionctx/variable/sysvar.go +++ b/sessionctx/variable/sysvar.go @@ -836,8 +836,17 @@ var defaultSysVars = []*SysVar{ } return normalizedValue, nil }}, - {Scope: ScopeGlobal | ScopeSession, Name: TiDBAllowMPPExecution, Value: On, Type: TypeEnum, PossibleValues: []string{"OFF", "ON", "ENFORCE"}, SetSession: func(s *SessionVars, val string) error { - s.allowMPPExecution = val + {Scope: ScopeGlobal | ScopeSession, Name: TiDBAllowMPPExecution, Type: TypeBool, Value: BoolToOnOff(DefTiDBAllowMPPExecution), SetSession: func(s *SessionVars, val string) error { + s.allowMPPExecution = TiDBOptOn(val) + return nil + }}, + {Scope: ScopeGlobal | ScopeSession, Name: TiDBEnforceMPPExecution, Type: TypeBool, Value: BoolToOnOff(DefTiDBEnforceMPPExecution), Validation: func(vars *SessionVars, normalizedValue string, originalValue string, scope ScopeFlag) (string, error) { + if TiDBOptOn(normalizedValue) && !vars.allowMPPExecution { + return normalizedValue, ErrWrongValueForVar.GenWithStackByArgs("tidb_enforce_mpp", "1' but tidb_allow_mpp is 0, please activate tidb_allow_mpp at first.") + } + return normalizedValue, nil + }, SetSession: func(s *SessionVars, val string) error { + s.enforceMPPExecution = TiDBOptOn(val) return nil }}, {Scope: ScopeGlobal | ScopeSession, Name: TiDBBCJThresholdCount, Value: strconv.Itoa(DefBroadcastJoinThresholdCount), Type: TypeInt, MinValue: 0, MaxValue: math.MaxInt64, SetSession: func(s *SessionVars, val string) error { diff --git a/sessionctx/variable/tidb_vars.go b/sessionctx/variable/tidb_vars.go index 2a5fd6360cacc..1bc9b3ebaa33f 100644 --- a/sessionctx/variable/tidb_vars.go +++ b/sessionctx/variable/tidb_vars.go @@ -299,10 +299,17 @@ const ( // The default value is 0 TiDBAllowBatchCop = "tidb_allow_batch_cop" - // TiDBAllowMPPExecution means if we should use mpp way to execute query. Default value is 1 (or 'ON'), means to be determined by the optimizer. - // Value set to 2 (or 'ENFORCE') which means to use mpp whenever possible. Value set to 2 (or 'OFF') means never use mpp. + // TiDBAllowMPPExecution means if we should use mpp way to execute query or not. + // Default value is `true`, means to be determined by the optimizer. + // Value set to `false` means never use mpp. TiDBAllowMPPExecution = "tidb_allow_mpp" + // TiDBEnforceMPPExecution means if we should enforce mpp way to execute query or not. + // Default value is `false`, means to be determined by variable `tidb_allow_mpp`. + // Value set to `true` means enforce use mpp. + // Note if you want to set `tidb_enforce_mpp` to `true`, you must set `tidb_allow_mpp` to `true` first. + TiDBEnforceMPPExecution = "tidb_enforce_mpp" + // TiDBInitChunkSize is used to control the init chunk size during query execution. TiDBInitChunkSize = "tidb_init_chunk_size" @@ -639,7 +646,8 @@ const ( DefBroadcastJoinThresholdCount = 10 * 1024 DefTiDBOptimizerSelectivityLevel = 0 DefTiDBAllowBatchCop = 1 - DefTiDBAllowMPPExecution = "ON" + DefTiDBAllowMPPExecution = true + DefTiDBEnforceMPPExecution = false DefTiDBTxnMode = "" DefTiDBRowFormatV1 = 1 DefTiDBRowFormatV2 = 2 diff --git a/store/gcworker/gc_worker.go b/store/gcworker/gc_worker.go index 1dbb3cde03880..155c84cc8c303 100644 --- a/store/gcworker/gc_worker.go +++ b/store/gcworker/gc_worker.go @@ -1897,7 +1897,7 @@ func (w *GCWorker) doGCPlacementRules(dr util.DelRangeTask) (pid int64, err erro return } // Notify PD to drop the placement rules, even if there may be no placement rules. - bundles := []*placement.Bundle{placement.BuildPlacementDropBundle(pid)} + bundles := []*placement.Bundle{placement.NewBundle(pid)} err = infosync.PutRuleBundles(context.TODO(), bundles) return } diff --git a/store/tikv/locate/region_request_test.go b/store/tikv/locate/region_request_test.go index 2ae06ef652620..ac67115928637 100644 --- a/store/tikv/locate/region_request_test.go +++ b/store/tikv/locate/region_request_test.go @@ -864,6 +864,7 @@ func (s *testRegionRequestToThreeStoresSuite) TestReplicaSelector(c *C) { region.lastAccess = time.Now().Unix() replicaSelector, err = newReplicaSelector(cache, regionLoc.Region) + c.Assert(err, IsNil) c.Assert(replicaSelector, NotNil) cache.testingKnobs.mockRequestLiveness = func(s *Store, bo *retry.Backoffer) livenessState { return reachable @@ -945,6 +946,7 @@ func (s *testRegionRequestToThreeStoresSuite) TestReplicaSelector(c *C) { replicaSelector, _ = newReplicaSelector(cache, regionLoc.Region) replicaSelector.next(s.bo) rpcCtx, err = replicaSelector.next(s.bo) + c.Assert(err, IsNil) replicaSelector.OnSendSuccess() // Verify the regionStore is updated and the workTiKVIdx points to the leader. leaderStore, leaderPeer, _, _ = region.WorkStorePeer(region.getStore()) diff --git a/structure/hash.go b/structure/hash.go index 2c8ce77520b01..1110eb5244440 100644 --- a/structure/hash.go +++ b/structure/hash.go @@ -16,7 +16,6 @@ package structure import ( "bytes" "context" - "encoding/binary" "strconv" "github.com/pingcap/errors" @@ -29,20 +28,6 @@ type HashPair struct { Value []byte } -type hashMeta struct { - FieldCount int64 -} - -func (meta hashMeta) Value() []byte { - buf := make([]byte, 8) - binary.BigEndian.PutUint64(buf[0:8], uint64(meta.FieldCount)) - return buf -} - -func (meta hashMeta) IsEmpty() bool { - return meta.FieldCount <= 0 -} - // HSet sets the string value of a hash field. func (t *TxStructure) HSet(key []byte, field []byte, value []byte) error { if t.readWriter == nil { @@ -127,48 +112,19 @@ func (t *TxStructure) updateHash(key []byte, field []byte, fn func(oldValue []by return errors.Trace(err) } - metaKey := t.encodeHashMetaKey(key) - meta, err := t.loadHashMeta(metaKey) - if err != nil { - return errors.Trace(err) - } - - if oldValue == nil { - meta.FieldCount++ - if err = t.readWriter.Set(metaKey, meta.Value()); err != nil { - return errors.Trace(err) - } - } - return nil } -// HLen gets the number of fields in a hash. -func (t *TxStructure) HLen(key []byte) (int64, error) { - metaKey := t.encodeHashMetaKey(key) - meta, err := t.loadHashMeta(metaKey) - if err != nil { - return 0, errors.Trace(err) - } - return meta.FieldCount, nil -} - // HDel deletes one or more hash fields. func (t *TxStructure) HDel(key []byte, fields ...[]byte) error { if t.readWriter == nil { return ErrWriteOnSnapshot } - metaKey := t.encodeHashMetaKey(key) - meta, err := t.loadHashMeta(metaKey) - if err != nil || meta.IsEmpty() { - return errors.Trace(err) - } - var value []byte for _, field := range fields { dataKey := t.encodeHashDataKey(key, field) - value, err = t.loadHashValue(dataKey) + value, err := t.loadHashValue(dataKey) if err != nil { return errors.Trace(err) } @@ -177,18 +133,10 @@ func (t *TxStructure) HDel(key []byte, fields ...[]byte) error { if err = t.readWriter.Delete(dataKey); err != nil { return errors.Trace(err) } - - meta.FieldCount-- } } - if meta.IsEmpty() { - err = t.readWriter.Delete(metaKey) - } else { - err = t.readWriter.Set(metaKey, meta.Value()) - } - - return errors.Trace(err) + return nil } // HKeys gets all the fields in a hash. @@ -236,13 +184,7 @@ func (t *TxStructure) HGetLastN(key []byte, num int) ([]HashPair, error) { // HClear removes the hash value of the key. func (t *TxStructure) HClear(key []byte) error { - metaKey := t.encodeHashMetaKey(key) - meta, err := t.loadHashMeta(metaKey) - if err != nil || meta.IsEmpty() { - return errors.Trace(err) - } - - err = t.iterateHash(key, func(field []byte, value []byte) error { + err := t.iterateHash(key, func(field []byte, value []byte) error { k := t.encodeHashDataKey(key, field) return errors.Trace(t.readWriter.Delete(k)) }) @@ -251,7 +193,7 @@ func (t *TxStructure) HClear(key []byte) error { return errors.Trace(err) } - return errors.Trace(t.readWriter.Delete(metaKey)) + return nil } func (t *TxStructure) iterateHash(key []byte, fn func(k []byte, v []byte) error) error { @@ -378,28 +320,6 @@ func (t *TxStructure) iterReverseHash(key []byte, fn func(k []byte, v []byte) (b return nil } -func (t *TxStructure) loadHashMeta(metaKey []byte) (hashMeta, error) { - v, err := t.reader.Get(context.TODO(), metaKey) - if kv.ErrNotExist.Equal(err) { - err = nil - } - if err != nil { - return hashMeta{}, errors.Trace(err) - } - - meta := hashMeta{FieldCount: 0} - if v == nil { - return meta, nil - } - - if len(v) != 8 { - return meta, ErrInvalidListMetaData - } - - meta.FieldCount = int64(binary.BigEndian.Uint64(v[0:8])) - return meta, nil -} - func (t *TxStructure) loadHashValue(dataKey []byte) ([]byte, error) { v, err := t.reader.Get(context.TODO(), dataKey) if kv.ErrNotExist.Equal(err) { diff --git a/structure/structure_test.go b/structure/structure_test.go index 000ed6e611154..6781fddd32ca0 100644 --- a/structure/structure_test.go +++ b/structure/structure_test.go @@ -221,10 +221,6 @@ func (s *testTxStructureSuite) TestHash(c *C) { err = tx.HSet(key, []byte("2"), []byte("2")) c.Assert(err, IsNil) - l, err := tx.HLen(key) - c.Assert(err, IsNil) - c.Assert(l, Equals, int64(2)) - value, err := tx.HGet(key, []byte("1")) c.Assert(err, IsNil) c.Assert(value, DeepEquals, []byte("1")) @@ -261,18 +257,10 @@ func (s *testTxStructureSuite) TestHash(c *C) { c.Assert(err, IsNil) c.Assert(value, IsNil) - l, err = tx.HLen(key) - c.Assert(err, IsNil) - c.Assert(l, Equals, int64(1)) - n, err := tx.HInc(key, []byte("1"), 1) c.Assert(err, IsNil) c.Assert(n, Equals, int64(1)) - l, err = tx.HLen(key) - c.Assert(err, IsNil) - c.Assert(l, Equals, int64(2)) - // Test set new value which equals to old value. value, err = tx.HGet(key, []byte("1")) c.Assert(err, IsNil) @@ -285,41 +273,21 @@ func (s *testTxStructureSuite) TestHash(c *C) { c.Assert(err, IsNil) c.Assert(value, DeepEquals, []byte("1")) - l, err = tx.HLen(key) - c.Assert(err, IsNil) - c.Assert(l, Equals, int64(2)) - n, err = tx.HInc(key, []byte("1"), 1) c.Assert(err, IsNil) c.Assert(n, Equals, int64(2)) - l, err = tx.HLen(key) - c.Assert(err, IsNil) - c.Assert(l, Equals, int64(2)) - n, err = tx.HInc(key, []byte("1"), 1) c.Assert(err, IsNil) c.Assert(n, Equals, int64(3)) - l, err = tx.HLen(key) - c.Assert(err, IsNil) - c.Assert(l, Equals, int64(2)) - n, err = tx.HGetInt64(key, []byte("1")) c.Assert(err, IsNil) c.Assert(n, Equals, int64(3)) - l, err = tx.HLen(key) - c.Assert(err, IsNil) - c.Assert(l, Equals, int64(2)) - err = tx.HClear(key) c.Assert(err, IsNil) - l, err = tx.HLen(key) - c.Assert(err, IsNil) - c.Assert(l, Equals, int64(0)) - err = tx.HDel(key, []byte("fake_key")) c.Assert(err, IsNil) @@ -328,24 +296,12 @@ func (s *testTxStructureSuite) TestHash(c *C) { c.Assert(err, IsNil) c.Assert(value, IsNil) - l, err = tx.HLen(key) - c.Assert(err, IsNil) - c.Assert(l, Equals, int64(0)) - err = tx.HSet(key, []byte("nil_key"), nil) c.Assert(err, IsNil) - l, err = tx.HLen(key) - c.Assert(err, IsNil) - c.Assert(l, Equals, int64(0)) - err = tx.HSet(key, []byte("nil_key"), []byte("1")) c.Assert(err, IsNil) - l, err = tx.HLen(key) - c.Assert(err, IsNil) - c.Assert(l, Equals, int64(1)) - value, err = tx.HGet(key, []byte("nil_key")) c.Assert(err, IsNil) c.Assert(value, DeepEquals, []byte("1")) @@ -353,10 +309,6 @@ func (s *testTxStructureSuite) TestHash(c *C) { err = tx.HSet(key, []byte("nil_key"), nil) c.Assert(err, NotNil) - l, err = tx.HLen(key) - c.Assert(err, IsNil) - c.Assert(l, Equals, int64(1)) - value, err = tx.HGet(key, []byte("nil_key")) c.Assert(err, IsNil) c.Assert(value, DeepEquals, []byte("1")) @@ -364,10 +316,6 @@ func (s *testTxStructureSuite) TestHash(c *C) { err = tx.HSet(key, []byte("nil_key"), []byte("2")) c.Assert(err, IsNil) - l, err = tx.HLen(key) - c.Assert(err, IsNil) - c.Assert(l, Equals, int64(1)) - value, err = tx.HGet(key, []byte("nil_key")) c.Assert(err, IsNil) c.Assert(value, DeepEquals, []byte("2")) diff --git a/telemetry/data_slow_query.go b/telemetry/data_slow_query.go index b2408b3223a05..dd73097293e6c 100644 --- a/telemetry/data_slow_query.go +++ b/telemetry/data_slow_query.go @@ -22,6 +22,7 @@ import ( "time" pingcapErrors "github.com/pingcap/errors" + "github.com/pingcap/parser/mysql" "github.com/pingcap/tidb/domain/infosync" "github.com/pingcap/tidb/sessionctx" "github.com/pingcap/tidb/util/logutil" @@ -141,7 +142,9 @@ func init() { lastSQBInfo["+Inf"] = 0 currentSQBInfo["+Inf"] = 0 - logutil.BgLogger().Info("Telemetry slow query stats initialized", zap.String("currentSQBInfo", currentSQBInfo.String()), zap.String("lastSQBInfo", lastSQBInfo.String())) + if mysql.TiDBReleaseVersion != "None" { + logutil.BgLogger().Info("Telemetry slow query stats initialized", zap.String("currentSQBInfo", currentSQBInfo.String()), zap.String("lastSQBInfo", lastSQBInfo.String())) + } } // postReportSlowQueryStats copy currentSQBInfo to lastSQBInfo to be ready for next report diff --git a/util/stmtsummary/evicted.go b/util/stmtsummary/evicted.go index d3cf1ff0abc29..24ca577f01956 100644 --- a/util/stmtsummary/evicted.go +++ b/util/stmtsummary/evicted.go @@ -15,6 +15,8 @@ package stmtsummary import ( "container/list" + "math" + "sync" "time" "github.com/pingcap/parser/mysql" @@ -23,6 +25,7 @@ import ( // stmtSummaryByDigestEvicted contents digests evicted from stmtSummaryByDigestMap type stmtSummaryByDigestEvicted struct { + sync.Mutex // record evicted data in intervals // latest history data is Back() history *list.List @@ -34,8 +37,10 @@ type stmtSummaryByDigestEvictedElement struct { beginTime int64 // endTime is the end time of current interval endTime int64 - // *Kinds* of digest being evicted + // digestKeyMap contains *Kinds* of digest being evicted digestKeyMap map[string]struct{} + // otherSummary contains summed up information of evicted elements + otherSummary *stmtSummaryByDigestElement } // spawn a new pointer to stmtSummaryByDigestEvicted @@ -51,6 +56,14 @@ func newStmtSummaryByDigestEvictedElement(beginTime int64, endTime int64) *stmtS beginTime: beginTime, endTime: endTime, digestKeyMap: make(map[string]struct{}), + otherSummary: &stmtSummaryByDigestElement{ + beginTime: beginTime, + endTime: endTime, + authUsers: make(map[string]struct{}), + minLatency: time.Duration(math.MaxInt64), + backoffTypes: make(map[string]int), + firstSeen: time.Unix(endTime, 0), + }, } } @@ -63,6 +76,9 @@ func (ssbde *stmtSummaryByDigestEvicted) AddEvicted(evictedKey *stmtSummaryByDig evictedValue.Lock() defer evictedValue.Unlock() + ssbde.Lock() + defer ssbde.Unlock() + if evictedValue.history == nil { return } @@ -130,6 +146,8 @@ func (ssbde *stmtSummaryByDigestEvicted) AddEvicted(evictedKey *stmtSummaryByDig // Clear up all records in stmtSummaryByDigestEvicted func (ssbde *stmtSummaryByDigestEvicted) Clear() { + ssbde.Lock() + defer ssbde.Unlock() ssbde.history.Init() } @@ -137,6 +155,7 @@ func (ssbde *stmtSummaryByDigestEvicted) Clear() { func (seElement *stmtSummaryByDigestEvictedElement) addEvicted(digestKey *stmtSummaryByDigestKey, digestValue *stmtSummaryByDigestElement) { if digestKey != nil { seElement.digestKeyMap[string(digestKey.Hash())] = struct{}{} + addInfo(seElement.otherSummary, digestValue) } } @@ -190,3 +209,211 @@ func (seElement *stmtSummaryByDigestEvictedElement) toEvictedCountDatum() []type func (ssMap *stmtSummaryByDigestMap) ToEvictedCountDatum() [][]types.Datum { return ssMap.other.ToEvictedCountDatum() } + +func (ssbde *stmtSummaryByDigestEvicted) toCurrentDatum() []types.Datum { + var seElement *stmtSummaryByDigestEvictedElement + + ssbde.Lock() + if ssbde.history.Len() > 0 { + seElement = ssbde.history.Back().Value.(*stmtSummaryByDigestEvictedElement) + } + ssbde.Unlock() + + if seElement == nil { + return nil + } + + return seElement.toDatum() +} + +func (ssbde *stmtSummaryByDigestEvicted) toHistoryDatum(historySize int) [][]types.Datum { + // Collect all history summaries to an array. + ssbde.Lock() + seElements := ssbde.collectHistorySummaries(historySize) + ssbde.Unlock() + rows := make([][]types.Datum, 0, len(seElements)) + + for _, seElement := range seElements { + rows = append(rows, seElement.toDatum()) + } + return rows +} + +func (ssbde *stmtSummaryByDigestEvicted) collectHistorySummaries(historySize int) []*stmtSummaryByDigestEvictedElement { + lst := make([]*stmtSummaryByDigestEvictedElement, 0, ssbde.history.Len()) + for element := ssbde.history.Front(); element != nil && len(lst) < historySize; element = element.Next() { + seElement := element.Value.(*stmtSummaryByDigestEvictedElement) + lst = append(lst, seElement) + } + return lst +} + +func (seElement *stmtSummaryByDigestEvictedElement) toDatum() []types.Datum { + return seElement.otherSummary.toDatum(new(stmtSummaryByDigest)) +} + +// addInfo adds information in addWith into addTo. +func addInfo(addTo *stmtSummaryByDigestElement, addWith *stmtSummaryByDigestElement) { + addTo.Lock() + defer addTo.Unlock() + + // user + for user := range addWith.authUsers { + addTo.authUsers[user] = struct{}{} + } + + // execCount and sumWarnings + addTo.execCount += addWith.execCount + addTo.sumWarnings += addWith.sumWarnings + + // latency + addTo.sumLatency += addWith.sumLatency + if addTo.maxLatency < addWith.maxLatency { + addTo.maxLatency = addWith.maxLatency + } + if addTo.minLatency > addWith.minLatency { + addTo.minLatency = addWith.minLatency + } + addTo.sumParseLatency += addWith.sumParseLatency + if addTo.maxParseLatency < addWith.maxParseLatency { + addTo.maxParseLatency = addWith.maxParseLatency + } + addTo.sumCompileLatency += addWith.sumCompileLatency + if addTo.maxCompileLatency < addWith.maxCompileLatency { + addTo.maxCompileLatency = addWith.maxCompileLatency + } + + // coprocessor + addTo.sumNumCopTasks += addWith.sumNumCopTasks + if addTo.maxCopProcessTime < addWith.maxCopProcessTime { + addTo.maxCopProcessTime = addWith.maxCopProcessTime + addTo.maxCopProcessAddress = addWith.maxCopProcessAddress + } + if addTo.maxCopWaitTime < addWith.maxCopWaitTime { + addTo.maxCopWaitTime = addWith.maxCopWaitTime + addTo.maxCopWaitAddress = addWith.maxCopWaitAddress + } + + // TiKV + addTo.sumProcessTime += addWith.sumProcessTime + if addTo.maxProcessTime < addWith.maxProcessTime { + addTo.maxProcessTime = addWith.maxProcessTime + } + addTo.sumWaitTime += addWith.sumWaitTime + if addTo.maxWaitTime < addWith.maxWaitTime { + addTo.maxWaitTime = addWith.maxWaitTime + } + addTo.sumBackoffTime += addWith.sumBackoffTime + if addTo.maxBackoffTime < addWith.maxBackoffTime { + addTo.maxBackoffTime = addWith.maxBackoffTime + } + + addTo.sumTotalKeys += addWith.sumTotalKeys + if addTo.maxTotalKeys < addWith.maxTotalKeys { + addTo.maxTotalKeys = addWith.maxTotalKeys + } + addTo.sumProcessedKeys += addWith.sumProcessedKeys + if addTo.maxProcessedKeys < addWith.maxProcessedKeys { + addTo.maxProcessedKeys = addWith.maxProcessedKeys + } + addTo.sumRocksdbDeleteSkippedCount += addWith.sumRocksdbDeleteSkippedCount + if addTo.maxRocksdbDeleteSkippedCount < addWith.maxRocksdbDeleteSkippedCount { + addTo.maxRocksdbDeleteSkippedCount = addWith.maxRocksdbDeleteSkippedCount + } + addTo.sumRocksdbKeySkippedCount += addWith.sumRocksdbKeySkippedCount + if addTo.maxRocksdbKeySkippedCount < addWith.maxRocksdbKeySkippedCount { + addTo.maxRocksdbKeySkippedCount = addWith.maxRocksdbKeySkippedCount + } + addTo.sumRocksdbBlockCacheHitCount += addWith.sumRocksdbBlockCacheHitCount + if addTo.maxRocksdbBlockCacheHitCount < addWith.maxRocksdbBlockCacheHitCount { + addTo.maxRocksdbBlockCacheHitCount = addWith.maxRocksdbBlockCacheHitCount + } + addTo.sumRocksdbBlockReadCount += addWith.sumRocksdbBlockReadCount + if addTo.maxRocksdbBlockReadCount < addWith.maxRocksdbBlockReadCount { + addTo.maxRocksdbBlockReadCount = addWith.maxRocksdbBlockReadCount + } + addTo.sumRocksdbBlockReadByte += addWith.sumRocksdbBlockReadByte + if addTo.maxRocksdbBlockReadByte < addWith.maxRocksdbBlockReadByte { + addTo.maxRocksdbBlockReadByte = addWith.maxRocksdbBlockReadByte + } + + // txn + addTo.commitCount += addWith.commitCount + addTo.sumPrewriteTime += addWith.sumPrewriteTime + if addTo.maxPrewriteTime < addWith.maxPrewriteTime { + addTo.maxPrewriteTime = addWith.maxPrewriteTime + } + addTo.sumCommitTime += addWith.sumCommitTime + if addTo.maxCommitTime < addWith.maxCommitTime { + addTo.maxCommitTime = addWith.maxCommitTime + } + addTo.sumGetCommitTsTime += addWith.sumGetCommitTsTime + if addTo.maxGetCommitTsTime < addWith.maxGetCommitTsTime { + addTo.maxGetCommitTsTime = addWith.maxGetCommitTsTime + } + addTo.sumCommitBackoffTime += addWith.sumCommitBackoffTime + if addTo.maxCommitBackoffTime < addWith.maxCommitBackoffTime { + addTo.maxCommitBackoffTime = addWith.maxCommitBackoffTime + } + addTo.sumResolveLockTime += addWith.sumResolveLockTime + if addTo.maxResolveLockTime < addWith.maxResolveLockTime { + addTo.maxResolveLockTime = addWith.maxResolveLockTime + } + addTo.sumLocalLatchTime += addWith.sumLocalLatchTime + if addTo.maxLocalLatchTime < addWith.maxLocalLatchTime { + addTo.maxLocalLatchTime = addWith.maxLocalLatchTime + } + addTo.sumWriteKeys += addWith.sumWriteKeys + if addTo.maxWriteKeys < addWith.maxWriteKeys { + addTo.maxWriteKeys = addWith.maxWriteKeys + } + addTo.sumWriteSize += addWith.sumWriteSize + if addTo.maxWriteSize < addWith.maxWriteSize { + addTo.maxWriteSize = addWith.maxWriteSize + } + addTo.sumPrewriteRegionNum += addWith.sumPrewriteRegionNum + if addTo.maxPrewriteRegionNum < addWith.maxPrewriteRegionNum { + addTo.maxPrewriteRegionNum = addWith.maxPrewriteRegionNum + } + addTo.sumTxnRetry += addWith.sumTxnRetry + if addTo.maxTxnRetry < addWith.maxTxnRetry { + addTo.maxTxnRetry = addWith.maxTxnRetry + } + addTo.sumBackoffTimes += addWith.sumBackoffTimes + for backoffType, backoffValue := range addWith.backoffTypes { + _, ok := addTo.backoffTypes[backoffType] + if ok { + addTo.backoffTypes[backoffType] += backoffValue + } else { + addTo.backoffTypes[backoffType] = backoffValue + } + } + + // plan cache + addTo.planCacheHits += addWith.planCacheHits + + // other + addTo.sumAffectedRows += addWith.sumAffectedRows + addTo.sumMem += addWith.sumMem + if addTo.maxMem < addWith.maxMem { + addTo.maxMem = addWith.maxMem + } + addTo.sumDisk += addWith.sumDisk + if addTo.maxDisk < addWith.maxDisk { + addTo.maxDisk = addWith.maxDisk + } + if addTo.firstSeen.After(addWith.firstSeen) { + addTo.firstSeen = addWith.firstSeen + } + if addTo.lastSeen.Before(addWith.lastSeen) { + addTo.lastSeen = addWith.lastSeen + } + addTo.execRetryCount += addWith.execRetryCount + addTo.execRetryTime += addWith.execRetryTime + addTo.sumKVTotal += addWith.sumKVTotal + addTo.sumPDTotal += addWith.sumPDTotal + addTo.sumBackoffTotal += addWith.sumBackoffTotal + addTo.sumWriteSQLRespTotal += addWith.sumWriteSQLRespTotal + + addTo.sumErrors += addWith.sumErrors +} diff --git a/util/stmtsummary/evicted_test.go b/util/stmtsummary/evicted_test.go index 36861eb4cfd1e..07c4749385d1b 100644 --- a/util/stmtsummary/evicted_test.go +++ b/util/stmtsummary/evicted_test.go @@ -17,6 +17,7 @@ import ( "bytes" "container/list" "fmt" + "reflect" "time" . "github.com/pingcap/check" @@ -25,6 +26,7 @@ import ( "github.com/pingcap/tidb/types" ) +// fake a stmtSummaryByDigest func newInduceSsbd(beginTime int64, endTime int64) *stmtSummaryByDigest { newSsbd := &stmtSummaryByDigest{ history: list.New(), @@ -32,6 +34,8 @@ func newInduceSsbd(beginTime int64, endTime int64) *stmtSummaryByDigest { newSsbd.history.PushBack(newInduceSsbde(beginTime, endTime)) return newSsbd } + +// fake a stmtSummaryByDigestElement func newInduceSsbde(beginTime int64, endTime int64) *stmtSummaryByDigestElement { newSsbde := &stmtSummaryByDigestElement{ beginTime: beginTime, @@ -58,7 +62,7 @@ func (s *testStmtSummarySuite) TestMapToEvictedCountDatum(c *C) { interval := ssMap.refreshInterval() ssMap.beginTimeForCurInterval = now + interval - // set summaryMap capacity to 1. + // set summaryMap's capacity to 1. err := ssMap.summaryMap.SetCapacity(1) if err != nil { log.Fatal(err.Error()) @@ -94,7 +98,7 @@ func (s *testStmtSummarySuite) TestMapToEvictedCountDatum(c *C) { c.Assert(err, IsNil) ssMap.beginTimeForCurInterval = now + interval - // insert one statement every other interval. + // insert one statement per interval. for i := 0; i < 50; i++ { ssMap.AddStatement(generateAnyExecInfo()) ssMap.beginTimeForCurInterval += interval * 2 @@ -239,7 +243,7 @@ func (s *testStmtSummarySuite) TestStmtSummaryByDigestEvictedElement(c *C) { } // test stmtSummaryByDigestEvicted.addEvicted -// test evicted count's detail +// test stmtSummaryByDigestEvicted.toEvictedCountDatum (single and multiple intervals) func (s *testStmtSummarySuite) TestEvictedCountDetailed(c *C) { ssMap := newStmtSummaryByDigestMap() ssMap.Clear() @@ -304,6 +308,12 @@ func (s *testStmtSummarySuite) TestEvictedCountDetailed(c *C) { c.Assert(other.history.Len(), Equals, 0) } +func (s *testStmtSummarySuite) TestEvictedElementToDatum(c *C) { + seElement := newStmtSummaryByDigestEvictedElement(0, 1) + datum0 := seElement.toDatum() + c.Assert(datum0, NotNil) +} + func (s *testStmtSummarySuite) TestNewStmtSummaryByDigestEvictedElement(c *C) { now := time.Now().Unix() end := now + 60 @@ -318,6 +328,300 @@ func (s *testStmtSummarySuite) TestStmtSummaryByDigestEvicted(c *C) { c.Assert(stmtEvicted.history.Len(), Equals, 0) } +// test addInfo function +func (s *testStmtSummarySuite) TestAddInfo(c *C) { + now := time.Now().Unix() + addTo := stmtSummaryByDigestElement{ + // user + authUsers: map[string]struct{}{"a": {}}, + + // execCount and sumWarnings + execCount: 3, + sumWarnings: 8, + + // latency + sumLatency: 8, + maxLatency: 5, + minLatency: 1, + sumParseLatency: 3, + maxParseLatency: 2, + sumCompileLatency: 3, + maxCompileLatency: 2, + + // coprocessor + sumNumCopTasks: 4, + maxCopProcessTime: 4, + maxCopProcessAddress: "19.19.8.10", + maxCopWaitTime: 4, + maxCopWaitAddress: "19.19.8.10", + + // TiKV + sumProcessTime: 1, + maxProcessTime: 1, + sumWaitTime: 2, + maxWaitTime: 1, + sumBackoffTime: 2, + maxBackoffTime: 2, + + sumTotalKeys: 3, + maxTotalKeys: 2, + sumProcessedKeys: 8, + maxProcessedKeys: 4, + sumRocksdbDeleteSkippedCount: 8, + maxRocksdbDeleteSkippedCount: 2, + + sumRocksdbKeySkippedCount: 8, + maxRocksdbKeySkippedCount: 3, + sumRocksdbBlockCacheHitCount: 8, + maxRocksdbBlockCacheHitCount: 3, + sumRocksdbBlockReadCount: 3, + maxRocksdbBlockReadCount: 3, + sumRocksdbBlockReadByte: 4, + maxRocksdbBlockReadByte: 4, + + // txn + commitCount: 8, + sumPrewriteTime: 3, + maxPrewriteTime: 3, + sumCommitTime: 8, + maxCommitTime: 5, + sumGetCommitTsTime: 8, + maxGetCommitTsTime: 8, + sumCommitBackoffTime: 8, + maxCommitBackoffTime: 8, + + sumResolveLockTime: 8, + maxResolveLockTime: 8, + sumLocalLatchTime: 8, + maxLocalLatchTime: 8, + sumWriteKeys: 8, + maxWriteKeys: 8, + sumWriteSize: 8, + maxWriteSize: 8, + sumPrewriteRegionNum: 8, + maxPrewriteRegionNum: 8, + sumTxnRetry: 8, + maxTxnRetry: 8, + sumBackoffTimes: 8, + backoffTypes: map[string]int{}, + + // plan cache + planCacheHits: 8, + + // other + sumAffectedRows: 8, + sumMem: 8, + maxMem: 8, + sumDisk: 8, + maxDisk: 8, + firstSeen: time.Unix(now-10, 0), + lastSeen: time.Unix(now-8, 0), + execRetryCount: 8, + execRetryTime: 8, + sumKVTotal: 2, + sumPDTotal: 2, + sumBackoffTotal: 2, + sumWriteSQLRespTotal: 100, + sumErrors: 8, + } + + addWith := stmtSummaryByDigestElement{ + // user + authUsers: map[string]struct{}{"a": {}}, + + // execCount and sumWarnings + execCount: 3, + sumWarnings: 8, + + // latency + sumLatency: 8, + maxLatency: 5, + minLatency: 1, + sumParseLatency: 3, + maxParseLatency: 2, + sumCompileLatency: 3, + maxCompileLatency: 2, + + // coprocessor + sumNumCopTasks: 4, + maxCopProcessTime: 4, + maxCopProcessAddress: "19.19.8.10", + maxCopWaitTime: 4, + maxCopWaitAddress: "19.19.8.10", + + // TiKV + sumProcessTime: 1, + maxProcessTime: 1, + sumWaitTime: 2, + maxWaitTime: 1, + sumBackoffTime: 2, + maxBackoffTime: 2, + + sumTotalKeys: 3, + maxTotalKeys: 2, + sumProcessedKeys: 8, + maxProcessedKeys: 4, + sumRocksdbDeleteSkippedCount: 8, + maxRocksdbDeleteSkippedCount: 2, + + sumRocksdbKeySkippedCount: 8, + maxRocksdbKeySkippedCount: 3, + sumRocksdbBlockCacheHitCount: 8, + maxRocksdbBlockCacheHitCount: 3, + sumRocksdbBlockReadCount: 3, + maxRocksdbBlockReadCount: 3, + sumRocksdbBlockReadByte: 4, + maxRocksdbBlockReadByte: 4, + + // txn + commitCount: 8, + sumPrewriteTime: 3, + maxPrewriteTime: 3, + sumCommitTime: 8, + maxCommitTime: 5, + sumGetCommitTsTime: 8, + maxGetCommitTsTime: 8, + sumCommitBackoffTime: 8, + maxCommitBackoffTime: 8, + + sumResolveLockTime: 8, + maxResolveLockTime: 8, + sumLocalLatchTime: 8, + maxLocalLatchTime: 8, + sumWriteKeys: 8, + maxWriteKeys: 8, + sumWriteSize: 8, + maxWriteSize: 8, + sumPrewriteRegionNum: 8, + maxPrewriteRegionNum: 8, + sumTxnRetry: 8, + maxTxnRetry: 8, + sumBackoffTimes: 8, + backoffTypes: map[string]int{}, + + // plan cache + planCacheHits: 8, + + // other + sumAffectedRows: 8, + sumMem: 8, + maxMem: 8, + sumDisk: 8, + maxDisk: 8, + firstSeen: time.Unix(now-10, 0), + lastSeen: time.Unix(now-8, 0), + execRetryCount: 8, + execRetryTime: 8, + sumKVTotal: 2, + sumPDTotal: 2, + sumBackoffTotal: 2, + sumWriteSQLRespTotal: 100, + sumErrors: 8, + } + addWith.authUsers["b"] = struct{}{} + addWith.maxCopProcessTime = 15 + addWith.maxCopProcessAddress = "1.14.5.14" + addWith.firstSeen = time.Unix(now-20, 0) + addWith.lastSeen = time.Unix(now, 0) + + addInfo(&addTo, &addWith) + + expectedSum := stmtSummaryByDigestElement{ + // user + authUsers: map[string]struct{}{"a": {}, "b": {}}, + + // execCount and sumWarnings + execCount: 6, + sumWarnings: 16, + + // latency + sumLatency: 16, + maxLatency: 5, + minLatency: 1, + sumParseLatency: 6, + maxParseLatency: 2, + sumCompileLatency: 6, + maxCompileLatency: 2, + + // coprocessor + sumNumCopTasks: 8, + maxCopProcessTime: 15, + maxCopProcessAddress: "1.14.5.14", + maxCopWaitTime: 4, + maxCopWaitAddress: "19.19.8.10", + + // TiKV + sumProcessTime: 2, + maxProcessTime: 1, + sumWaitTime: 4, + maxWaitTime: 1, + sumBackoffTime: 4, + maxBackoffTime: 2, + + sumTotalKeys: 6, + maxTotalKeys: 2, + sumProcessedKeys: 16, + maxProcessedKeys: 4, + sumRocksdbDeleteSkippedCount: 16, + maxRocksdbDeleteSkippedCount: 2, + + sumRocksdbKeySkippedCount: 16, + maxRocksdbKeySkippedCount: 3, + sumRocksdbBlockCacheHitCount: 16, + maxRocksdbBlockCacheHitCount: 3, + sumRocksdbBlockReadCount: 6, + maxRocksdbBlockReadCount: 3, + sumRocksdbBlockReadByte: 8, + maxRocksdbBlockReadByte: 4, + + // txn + commitCount: 16, + sumPrewriteTime: 6, + maxPrewriteTime: 3, + sumCommitTime: 16, + maxCommitTime: 5, + sumGetCommitTsTime: 16, + maxGetCommitTsTime: 8, + sumCommitBackoffTime: 16, + maxCommitBackoffTime: 8, + + sumResolveLockTime: 16, + maxResolveLockTime: 8, + sumLocalLatchTime: 16, + maxLocalLatchTime: 8, + sumWriteKeys: 16, + maxWriteKeys: 8, + sumWriteSize: 16, + maxWriteSize: 8, + sumPrewriteRegionNum: 16, + maxPrewriteRegionNum: 8, + sumTxnRetry: 16, + maxTxnRetry: 8, + sumBackoffTimes: 16, + backoffTypes: map[string]int{}, + + // plan cache + planCacheHits: 16, + + // other + sumAffectedRows: 16, + sumMem: 16, + maxMem: 8, + sumDisk: 16, + maxDisk: 8, + firstSeen: time.Unix(now-20, 0), + lastSeen: time.Unix(now, 0), + execRetryCount: 16, + execRetryTime: 16, + sumKVTotal: 4, + sumPDTotal: 4, + sumBackoffTotal: 4, + sumWriteSQLRespTotal: 200, + sumErrors: 16, + } + c.Assert(reflect.DeepEqual(&addTo, &expectedSum), Equals, true) +} + func getAllEvicted(ssdbe *stmtSummaryByDigestEvicted) string { buf := bytes.NewBuffer(nil) for e := ssdbe.history.Back(); e != nil; e = e.Prev() { diff --git a/util/stmtsummary/statement_summary.go b/util/stmtsummary/statement_summary.go index 149196e0a4c31..df0c7c87f5f13 100644 --- a/util/stmtsummary/statement_summary.go +++ b/util/stmtsummary/statement_summary.go @@ -340,6 +340,7 @@ func (ssMap *stmtSummaryByDigestMap) ToCurrentDatum(user *auth.UserIdentity, isS ssMap.Lock() values := ssMap.summaryMap.Values() beginTime := ssMap.beginTimeForCurInterval + other := ssMap.other ssMap.Unlock() rows := make([][]types.Datum, 0, len(values)) @@ -349,21 +350,29 @@ func (ssMap *stmtSummaryByDigestMap) ToCurrentDatum(user *auth.UserIdentity, isS rows = append(rows, record) } } + if otherDatum := other.toCurrentDatum(); otherDatum != nil { + rows = append(rows, otherDatum) + } return rows } // ToHistoryDatum converts history statements summaries to datum. func (ssMap *stmtSummaryByDigestMap) ToHistoryDatum(user *auth.UserIdentity, isSuper bool) [][]types.Datum { + historySize := ssMap.historySize() + ssMap.Lock() values := ssMap.summaryMap.Values() + other := ssMap.other ssMap.Unlock() - historySize := ssMap.historySize() rows := make([][]types.Datum, 0, len(values)*historySize) for _, value := range values { records := value.(*stmtSummaryByDigest).toHistoryDatum(historySize, user, isSuper) rows = append(rows, records...) } + + otherDatum := other.toHistoryDatum(historySize) + rows = append(rows, otherDatum...) return rows } @@ -884,8 +893,9 @@ func (ssElement *stmtSummaryByDigestElement) toDatum(ssbd *stmtSummaryByDigest) types.NewTime(types.FromGoTime(time.Unix(ssElement.beginTime, 0)), mysql.TypeTimestamp, 0), types.NewTime(types.FromGoTime(time.Unix(ssElement.endTime, 0)), mysql.TypeTimestamp, 0), ssbd.stmtType, - ssbd.schemaName, - ssbd.digest, + // This behaviour follow MySQL. see more in https://dev.mysql.com/doc/refman/5.7/en/performance-schema-statement-digests.html + convertEmptyToNil(ssbd.schemaName), + convertEmptyToNil(ssbd.digest), ssbd.normalizedSQL, convertEmptyToNil(ssbd.tableNames), convertEmptyToNil(strings.Join(ssElement.indexNames, ",")), diff --git a/util/stmtsummary/statement_summary_test.go b/util/stmtsummary/statement_summary_test.go index f09398df68423..5e595f20a6d50 100644 --- a/util/stmtsummary/statement_summary_test.go +++ b/util/stmtsummary/statement_summary_test.go @@ -671,6 +671,52 @@ func (s *testStmtSummarySuite) TestToDatum(c *C) { datums = s.ssMap.ToHistoryDatum(nil, true) c.Assert(len(datums), Equals, 1) match(c, datums[0], expectedDatum...) + + // test evict + err := s.ssMap.SetMaxStmtCount("1", false) + defer func() { + // clean up + err = s.ssMap.SetMaxStmtCount("", false) + c.Assert(err, IsNil) + }() + + c.Assert(err, IsNil) + stmtExecInfo2 := stmtExecInfo1 + stmtExecInfo2.Digest = "bandit sei" + s.ssMap.AddStatement(stmtExecInfo2) + c.Assert(s.ssMap.summaryMap.Size(), Equals, 1) + datums = s.ssMap.ToCurrentDatum(nil, true) + expectedEvictedDatum := []interface{}{n, e, "", "", "", "", + "", "", stmtExecInfo1.User, 1, 0, 0, int64(stmtExecInfo1.TotalLatency), + int64(stmtExecInfo1.TotalLatency), int64(stmtExecInfo1.TotalLatency), int64(stmtExecInfo1.TotalLatency), + int64(stmtExecInfo1.ParseLatency), int64(stmtExecInfo1.ParseLatency), int64(stmtExecInfo1.CompileLatency), + int64(stmtExecInfo1.CompileLatency), stmtExecInfo1.CopTasks.NumCopTasks, int64(stmtExecInfo1.CopTasks.MaxProcessTime), + stmtExecInfo1.CopTasks.MaxProcessAddress, int64(stmtExecInfo1.CopTasks.MaxWaitTime), + stmtExecInfo1.CopTasks.MaxWaitAddress, int64(stmtExecInfo1.ExecDetail.TimeDetail.ProcessTime), int64(stmtExecInfo1.ExecDetail.TimeDetail.ProcessTime), + int64(stmtExecInfo1.ExecDetail.TimeDetail.WaitTime), int64(stmtExecInfo1.ExecDetail.TimeDetail.WaitTime), int64(stmtExecInfo1.ExecDetail.BackoffTime), + int64(stmtExecInfo1.ExecDetail.BackoffTime), stmtExecInfo1.ExecDetail.ScanDetail.TotalKeys, stmtExecInfo1.ExecDetail.ScanDetail.TotalKeys, + stmtExecInfo1.ExecDetail.ScanDetail.ProcessedKeys, stmtExecInfo1.ExecDetail.ScanDetail.ProcessedKeys, + int64(stmtExecInfo1.ExecDetail.ScanDetail.RocksdbDeleteSkippedCount), int64(stmtExecInfo1.ExecDetail.ScanDetail.RocksdbDeleteSkippedCount), + int64(stmtExecInfo1.ExecDetail.ScanDetail.RocksdbKeySkippedCount), int64(stmtExecInfo1.ExecDetail.ScanDetail.RocksdbKeySkippedCount), + int64(stmtExecInfo1.ExecDetail.ScanDetail.RocksdbBlockCacheHitCount), int64(stmtExecInfo1.ExecDetail.ScanDetail.RocksdbBlockCacheHitCount), + int64(stmtExecInfo1.ExecDetail.ScanDetail.RocksdbBlockReadCount), int64(stmtExecInfo1.ExecDetail.ScanDetail.RocksdbBlockReadCount), + int64(stmtExecInfo1.ExecDetail.ScanDetail.RocksdbBlockReadByte), int64(stmtExecInfo1.ExecDetail.ScanDetail.RocksdbBlockReadByte), + int64(stmtExecInfo1.ExecDetail.CommitDetail.PrewriteTime), int64(stmtExecInfo1.ExecDetail.CommitDetail.PrewriteTime), + int64(stmtExecInfo1.ExecDetail.CommitDetail.CommitTime), int64(stmtExecInfo1.ExecDetail.CommitDetail.CommitTime), + int64(stmtExecInfo1.ExecDetail.CommitDetail.GetCommitTsTime), int64(stmtExecInfo1.ExecDetail.CommitDetail.GetCommitTsTime), + stmtExecInfo1.ExecDetail.CommitDetail.Mu.CommitBackoffTime, stmtExecInfo1.ExecDetail.CommitDetail.Mu.CommitBackoffTime, + stmtExecInfo1.ExecDetail.CommitDetail.ResolveLockTime, stmtExecInfo1.ExecDetail.CommitDetail.ResolveLockTime, + int64(stmtExecInfo1.ExecDetail.CommitDetail.LocalLatchTime), int64(stmtExecInfo1.ExecDetail.CommitDetail.LocalLatchTime), + stmtExecInfo1.ExecDetail.CommitDetail.WriteKeys, stmtExecInfo1.ExecDetail.CommitDetail.WriteKeys, + stmtExecInfo1.ExecDetail.CommitDetail.WriteSize, stmtExecInfo1.ExecDetail.CommitDetail.WriteSize, + stmtExecInfo1.ExecDetail.CommitDetail.PrewriteRegionNum, stmtExecInfo1.ExecDetail.CommitDetail.PrewriteRegionNum, + stmtExecInfo1.ExecDetail.CommitDetail.TxnRetry, stmtExecInfo1.ExecDetail.CommitDetail.TxnRetry, 0, 0, 1, + fmt.Sprintf("%s:1", boTxnLockName), stmtExecInfo1.MemMax, stmtExecInfo1.MemMax, stmtExecInfo1.DiskMax, stmtExecInfo1.DiskMax, + 0, 0, 0, 0, 0, stmtExecInfo1.StmtCtx.AffectedRows(), + t, t, 0, 0, 0, "", "", "", ""} + expectedDatum[4] = stmtExecInfo2.Digest + match(c, datums[0], expectedDatum...) + match(c, datums[1], expectedEvictedDatum...) } // Test AddStatement and ToDatum parallel. @@ -849,7 +895,8 @@ func (s *testStmtSummarySuite) TestSetMaxStmtCountParallel(c *C) { wg.Wait() datums := s.ssMap.ToCurrentDatum(nil, true) - c.Assert(len(datums), Equals, 1) + // due to evictions happened in cache, an additional record will be appended to the table. + c.Assert(len(datums), Equals, 2) } // Test setting EnableStmtSummary to 0. @@ -1071,6 +1118,32 @@ func (s *testStmtSummarySuite) TestSummaryHistory(c *C) { c.Assert(err, IsNil) datum = s.ssMap.ToHistoryDatum(nil, true) c.Assert(len(datum), Equals, 5) + + // test eviction + s.ssMap.Clear() + err = s.ssMap.SetMaxStmtCount("1", false) + c.Assert(err, IsNil) + defer func() { + err := s.ssMap.SetMaxStmtCount("", false) + c.Assert(err, IsNil) + }() + // insert first digest + for i := 0; i < 6; i++ { + s.ssMap.beginTimeForCurInterval = now + int64(i)*10 + s.ssMap.AddStatement(stmtExecInfo1) + c.Assert(s.ssMap.summaryMap.Size(), Equals, 1) + c.Assert(s.ssMap.other.history.Len(), Equals, 0) + } + // insert another digest to evict it + stmtExecInfo2 := stmtExecInfo1 + stmtExecInfo2.Digest = "bandit digest" + s.ssMap.AddStatement(stmtExecInfo2) + c.Assert(s.ssMap.summaryMap.Size(), Equals, 1) + // length of `other` should not longer than historySize. + c.Assert(s.ssMap.other.history.Len(), Equals, 5) + datum = s.ssMap.ToHistoryDatum(nil, true) + // length of STATEMENT_SUMMARY_HISTORY == (history in cache) + (history evicted) + c.Assert(len(datum), Equals, 6) } // Test summary when PrevSQL is not empty.