diff --git a/OWNERS b/OWNERS new file mode 100644 index 00000000000..5911dfd3b66 --- /dev/null +++ b/OWNERS @@ -0,0 +1,26 @@ +# See the OWNERS docs at https://go.k8s.io/owners +approvers: + - AndreMouche + - binshi-bing + - bufferflies + - CabinfeverB + - Connor1996 + - disksing + - huachaohuang + - HunDunDM + - HuSharp + - JmPotato + - lhy1024 + - nolouch + - overvenus + - qiuyesuifeng + - rleungx + - siddontang + - Yisaer + - zhouqiang-cl +reviewers: + - BusyJay + - howardlau1999 + - Luffbee + - shafreeck + - xhebox diff --git a/OWNERS_ALIASES b/OWNERS_ALIASES new file mode 100644 index 00000000000..516a466c91e --- /dev/null +++ b/OWNERS_ALIASES @@ -0,0 +1,6 @@ +# Sort the member alphabetically. +aliases: + sig-critical-approvers-config: + - easonn7 + - kevin-xianliu + - niubell diff --git a/client/resource_group/controller/OWNERS b/client/resource_group/controller/OWNERS new file mode 100644 index 00000000000..aa02465dbd9 --- /dev/null +++ b/client/resource_group/controller/OWNERS @@ -0,0 +1,7 @@ +# See the OWNERS docs at https://go.k8s.io/owners +options: + no_parent_owners: true +filters: + "(OWNERS|config\\.go)$": + approvers: + - sig-critical-approvers-config diff --git a/client/tlsutil/OWNERS b/client/tlsutil/OWNERS new file mode 100644 index 00000000000..211db06feee --- /dev/null +++ b/client/tlsutil/OWNERS @@ -0,0 +1,7 @@ +# See the OWNERS docs at https://go.k8s.io/owners +options: + no_parent_owners: true +filters: + "(OWNERS|tlsconfig\\.go)$": + approvers: + - sig-critical-approvers-config diff --git a/conf/OWNERS b/conf/OWNERS new file mode 100644 index 00000000000..1a435c49089 --- /dev/null +++ b/conf/OWNERS @@ -0,0 +1,7 @@ +# See the OWNERS docs at https://go.k8s.io/owners +options: + no_parent_owners: true +filters: + "(OWNERS|config\\.toml)$": + approvers: + - sig-critical-approvers-config diff --git a/pkg/encryption/OWNERS b/pkg/encryption/OWNERS new file mode 100644 index 00000000000..aa02465dbd9 --- /dev/null +++ b/pkg/encryption/OWNERS @@ -0,0 +1,7 @@ +# See the OWNERS docs at https://go.k8s.io/owners +options: + no_parent_owners: true +filters: + "(OWNERS|config\\.go)$": + approvers: + - sig-critical-approvers-config diff --git a/pkg/mcs/resourcemanager/server/OWNERS b/pkg/mcs/resourcemanager/server/OWNERS new file mode 100644 index 00000000000..aa02465dbd9 --- /dev/null +++ b/pkg/mcs/resourcemanager/server/OWNERS @@ -0,0 +1,7 @@ +# See the OWNERS docs at https://go.k8s.io/owners +options: + no_parent_owners: true +filters: + "(OWNERS|config\\.go)$": + approvers: + - sig-critical-approvers-config diff --git a/pkg/mcs/scheduling/server/config/OWNERS b/pkg/mcs/scheduling/server/config/OWNERS new file mode 100644 index 00000000000..aa02465dbd9 --- /dev/null +++ b/pkg/mcs/scheduling/server/config/OWNERS @@ -0,0 +1,7 @@ +# See the OWNERS docs at https://go.k8s.io/owners +options: + no_parent_owners: true +filters: + "(OWNERS|config\\.go)$": + approvers: + - sig-critical-approvers-config diff --git a/pkg/mcs/tso/server/OWNERS b/pkg/mcs/tso/server/OWNERS new file mode 100644 index 00000000000..aa02465dbd9 --- /dev/null +++ b/pkg/mcs/tso/server/OWNERS @@ -0,0 +1,7 @@ +# See the OWNERS docs at https://go.k8s.io/owners +options: + no_parent_owners: true +filters: + "(OWNERS|config\\.go)$": + approvers: + - sig-critical-approvers-config diff --git a/pkg/mock/mockcluster/mockcluster.go b/pkg/mock/mockcluster/mockcluster.go index a285e4a8cbf..b2f5bbae66b 100644 --- a/pkg/mock/mockcluster/mockcluster.go +++ b/pkg/mock/mockcluster/mockcluster.go @@ -188,7 +188,7 @@ func (mc *Cluster) AllocPeer(storeID uint64) (*metapb.Peer, error) { func (mc *Cluster) initRuleManager() { if mc.RuleManager == nil { mc.RuleManager = placement.NewRuleManager(storage.NewStorageWithMemoryBackend(), mc, mc.GetOpts()) - mc.RuleManager.Initialize(int(mc.GetReplicationConfig().MaxReplicas), mc.GetReplicationConfig().LocationLabels) + mc.RuleManager.Initialize(int(mc.GetReplicationConfig().MaxReplicas), mc.GetReplicationConfig().LocationLabels, mc.GetReplicationConfig().IsolationLevel) } } diff --git a/pkg/schedule/config/OWNERS b/pkg/schedule/config/OWNERS new file mode 100644 index 00000000000..ce5d15ddc19 --- /dev/null +++ b/pkg/schedule/config/OWNERS @@ -0,0 +1,7 @@ +# See the OWNERS docs at https://go.k8s.io/owners +options: + no_parent_owners: true +filters: + "(OWNERS|(config|store_config)\\.go)$": + approvers: + - sig-critical-approvers-config diff --git a/pkg/schedule/schedulers/OWNERS b/pkg/schedule/schedulers/OWNERS new file mode 100644 index 00000000000..ae96e4f1f42 --- /dev/null +++ b/pkg/schedule/schedulers/OWNERS @@ -0,0 +1,7 @@ +# See the OWNERS docs at https://go.k8s.io/owners +options: + no_parent_owners: true +filters: + "(OWNERS|hot_region_config\\.go)$": + approvers: + - sig-critical-approvers-config diff --git a/pkg/typeutil/size_test.go b/pkg/typeutil/size_test.go index eae092cdb5c..5d66f43d5a4 100644 --- a/pkg/typeutil/size_test.go +++ b/pkg/typeutil/size_test.go @@ -17,6 +17,7 @@ package typeutil import ( "encoding/json" + "github.com/docker/go-units" . "github.com/pingcap/check" ) @@ -41,23 +42,31 @@ func (s *testSizeSuite) TestJSON(c *C) { } func (s *testSizeSuite) TestParseMbFromText(c *C) { + const defaultValue = 2 + testdata := []struct { body []string size uint64 }{{ body: []string{"10Mib", "10MiB", "10M", "10MB"}, - size: uint64(10), + size: 10, }, { body: []string{"10GiB", "10Gib", "10G", "10GB"}, - size: uint64(10 * 1024), + size: 10 * units.GiB / units.MiB, + }, { + body: []string{"1024KiB", "1048576"}, + size: 1, + }, { + body: []string{"100KiB", "1023KiB", "1048575", "0"}, + size: 0, }, { body: []string{"10yiB", "10aib"}, - size: uint64(1), + size: defaultValue, }} for _, t := range testdata { for _, b := range t.body { - c.Assert(int(ParseMBFromText(b, 1)), Equals, int(t.size)) + c.Assert(ParseMBFromText(b, defaultValue), Equals, t.size) } } } diff --git a/server/api/label_test.go b/server/api/label_test.go index 9acddae8436..2fc2e5eef8f 100644 --- a/server/api/label_test.go +++ b/server/api/label_test.go @@ -17,6 +17,7 @@ package api import ( "context" "fmt" + "strings" . "github.com/pingcap/check" "github.com/pingcap/kvproto/pkg/metapb" @@ -24,6 +25,7 @@ import ( tu "github.com/tikv/pd/pkg/testutil" "github.com/tikv/pd/server" "github.com/tikv/pd/server/config" + "github.com/tikv/pd/server/core" ) var _ = Suite(&testLabelsStoreSuite{}) @@ -264,6 +266,30 @@ func (s *testStrictlyLabelsStoreSuite) TestStoreMatch(c *C) { valid: false, expectError: "key matching the label was not found", }, + { + store: &metapb.Store{ + Id: 3, + Address: "tiflash1", + State: metapb.StoreState_Up, + Labels: []*metapb.StoreLabel{ + { + Key: "zone", + Value: "us-west-1", + }, + { + Key: "disk", + Value: "ssd", + }, + { + Key: core.EngineKey, + Value: core.EngineTiFlash, + }, + }, + Version: "3.0.0", + }, + valid: true, + expectError: "placement rules is disabled", + }, } for _, t := range cases { @@ -271,12 +297,16 @@ func (s *testStrictlyLabelsStoreSuite) TestStoreMatch(c *C) { Header: &pdpb.RequestHeader{ClusterId: s.svr.ClusterID()}, Store: &metapb.Store{ Id: t.store.Id, - Address: fmt.Sprintf("tikv%d", t.store.Id), + Address: t.store.Address, State: t.store.State, Labels: t.store.Labels, Version: t.store.Version, }, }) + if t.store.Address == "tiflash1" { + c.Assert(strings.Contains(resp.GetHeader().GetError().String(), t.expectError), IsTrue) + continue + } if t.valid { c.Assert(err, IsNil) } else { @@ -291,12 +321,13 @@ func (s *testStrictlyLabelsStoreSuite) TestStoreMatch(c *C) { Header: &pdpb.RequestHeader{ClusterId: s.svr.ClusterID()}, Store: &metapb.Store{ Id: t.store.Id, - Address: fmt.Sprintf("tikv%d", t.store.Id), + Address: t.store.Address, State: t.store.State, Labels: t.store.Labels, Version: t.store.Version, }, }) + if t.valid { c.Assert(err, IsNil) } else { diff --git a/server/api/operator_test.go b/server/api/operator_test.go index 86d99c5e726..9f5d3167476 100644 --- a/server/api/operator_test.go +++ b/server/api/operator_test.go @@ -364,7 +364,9 @@ func (s *testTransferRegionOperatorSuite) TestTransferRegionWithPlacementRule(c if tc.placementRuleEnable { err := s.svr.GetRaftCluster().GetRuleManager().Initialize( s.svr.GetRaftCluster().GetOpts().GetMaxReplicas(), - s.svr.GetRaftCluster().GetOpts().GetLocationLabels()) + s.svr.GetRaftCluster().GetOpts().GetLocationLabels(), + s.svr.GetRaftCluster().GetOpts().GetIsolationLevel(), + ) c.Assert(err, IsNil) } if len(tc.rules) > 0 { diff --git a/server/cluster/cluster.go b/server/cluster/cluster.go index 66953caac3c..367f7c1d9c9 100644 --- a/server/cluster/cluster.go +++ b/server/cluster/cluster.go @@ -253,7 +253,7 @@ func (c *RaftCluster) Start(s Server) error { c.ruleManager = placement.NewRuleManager(c.storage, c, c.GetOpts()) if c.opt.IsPlacementRulesEnabled() { - err = c.ruleManager.Initialize(c.opt.GetMaxReplicas(), c.opt.GetLocationLabels()) + err = c.ruleManager.Initialize(c.opt.GetMaxReplicas(), c.opt.GetLocationLabels(), c.opt.GetIsolationLevel()) if err != nil { return err } @@ -1140,6 +1140,9 @@ func (c *RaftCluster) checkStoreLabels(s *core.StoreInfo) error { } for _, label := range s.GetLabels() { key := label.GetKey() + if key == core.EngineKey { + continue + } if _, ok := keysSet[key]; !ok { log.Warn("not found the key match with the store label", zap.Stringer("store", s.GetMeta()), diff --git a/server/cluster/cluster_test.go b/server/cluster/cluster_test.go index be631a0f49a..a94474e5159 100644 --- a/server/cluster/cluster_test.go +++ b/server/cluster/cluster_test.go @@ -225,7 +225,7 @@ func (s *testClusterInfoSuite) TestSetOfflineStore(c *C) { cluster.coordinator = newCoordinator(s.ctx, cluster, nil) cluster.ruleManager = placement.NewRuleManager(storage.NewStorageWithMemoryBackend(), cluster, cluster.GetOpts()) if opt.IsPlacementRulesEnabled() { - err := cluster.ruleManager.Initialize(opt.GetMaxReplicas(), opt.GetLocationLabels()) + err := cluster.ruleManager.Initialize(opt.GetMaxReplicas(), opt.GetLocationLabels(), opt.GetIsolationLevel()) if err != nil { panic(err) } @@ -396,7 +396,7 @@ func (s *testClusterInfoSuite) TestUpStore(c *C) { cluster.coordinator = newCoordinator(s.ctx, cluster, nil) cluster.ruleManager = placement.NewRuleManager(storage.NewStorageWithMemoryBackend(), cluster, cluster.GetOpts()) if opt.IsPlacementRulesEnabled() { - err := cluster.ruleManager.Initialize(opt.GetMaxReplicas(), opt.GetLocationLabels()) + err := cluster.ruleManager.Initialize(opt.GetMaxReplicas(), opt.GetLocationLabels(), opt.GetIsolationLevel()) if err != nil { panic(err) } @@ -491,7 +491,7 @@ func (s *testClusterInfoSuite) TestDeleteStoreUpdatesClusterVersion(c *C) { cluster.coordinator = newCoordinator(s.ctx, cluster, nil) cluster.ruleManager = placement.NewRuleManager(storage.NewStorageWithMemoryBackend(), cluster, cluster.GetOpts()) if opt.IsPlacementRulesEnabled() { - err := cluster.ruleManager.Initialize(opt.GetMaxReplicas(), opt.GetLocationLabels()) + err := cluster.ruleManager.Initialize(opt.GetMaxReplicas(), opt.GetLocationLabels(), opt.GetIsolationLevel()) if err != nil { panic(err) } @@ -1121,7 +1121,7 @@ func (s *testClusterInfoSuite) TestOfflineAndMerge(c *C) { cluster.coordinator = newCoordinator(s.ctx, cluster, nil) cluster.ruleManager = placement.NewRuleManager(storage.NewStorageWithMemoryBackend(), cluster, cluster.GetOpts()) if opt.IsPlacementRulesEnabled() { - err := cluster.ruleManager.Initialize(opt.GetMaxReplicas(), opt.GetLocationLabels()) + err := cluster.ruleManager.Initialize(opt.GetMaxReplicas(), opt.GetLocationLabels(), opt.GetIsolationLevel()) if err != nil { panic(err) } @@ -1742,7 +1742,7 @@ func newTestRaftCluster( rc.InitCluster(id, opt, s, basicCluster) rc.ruleManager = placement.NewRuleManager(storage.NewStorageWithMemoryBackend(), rc, opt) if opt.IsPlacementRulesEnabled() { - err := rc.ruleManager.Initialize(opt.GetMaxReplicas(), opt.GetLocationLabels()) + err := rc.ruleManager.Initialize(opt.GetMaxReplicas(), opt.GetLocationLabels(), opt.GetIsolationLevel()) if err != nil { panic(err) } diff --git a/server/config/OWNERS b/server/config/OWNERS new file mode 100644 index 00000000000..179de4843e6 --- /dev/null +++ b/server/config/OWNERS @@ -0,0 +1,7 @@ +# See the OWNERS docs at https://go.k8s.io/owners +options: + no_parent_owners: true +filters: + "(OWNERS|(config|service_middleware_config)\\.go)$": + approvers: + - sig-critical-approvers-config diff --git a/server/config/persist_options.go b/server/config/persist_options.go index fe7203722c2..fc7835e6ae1 100644 --- a/server/config/persist_options.go +++ b/server/config/persist_options.go @@ -260,6 +260,13 @@ func (o *PersistOptions) SetSplitMergeInterval(splitMergeInterval time.Duration) o.SetScheduleConfig(v) } +// SetMaxStoreDownTime to set the max store down time. It's only used to test. +func (o *PersistOptions) SetMaxStoreDownTime(time time.Duration) { + v := o.GetScheduleConfig().Clone() + v.MaxStoreDownTime = typeutil.NewDuration(time) + o.SetScheduleConfig(v) +} + // SetMaxMergeRegionSize sets the max merge region size. func (o *PersistOptions) SetMaxMergeRegionSize(maxMergeRegionSize uint64) { v := o.GetScheduleConfig().Clone() diff --git a/server/config/store_config.go b/server/config/store_config.go index 81a13a7e734..e66d8eb0459 100644 --- a/server/config/store_config.go +++ b/server/config/store_config.go @@ -134,9 +134,14 @@ func (c *StoreConfig) CheckRegionSize(size, mergeSize uint64) error { if size < c.GetRegionMaxSize() { return nil } - + // This could happen when the region split size is set to a value less than 1MiB, + // which is a very extreme case, we just pass the check here to prevent panic. + regionSplitSize := c.GetRegionSplitSize() + if regionSplitSize == 0 { + return nil + } // the smallest of the split regions can not be merge again, so it's size should less merge size. - if smallSize := size % c.GetRegionSplitSize(); smallSize <= mergeSize && smallSize != 0 { + if smallSize := size % regionSplitSize; smallSize <= mergeSize && smallSize != 0 { log.Debug("region size is too small", zap.Uint64("size", size), zap.Uint64("merge-size", mergeSize), zap.Uint64("small-size", smallSize)) return errs.ErrCheckerMergeAgain.FastGenByArgs("the smallest region of the split regions is less than max-merge-region-size, " + "it will be merged again") diff --git a/server/config/store_config_test.go b/server/config/store_config_test.go index bf8f3cc7644..c2697a8ffa0 100644 --- a/server/config/store_config_test.go +++ b/server/config/store_config_test.go @@ -148,4 +148,8 @@ func (t *testTiKVConfigSuite) TestMergeCheck(c *C) { c.Assert(config.CheckRegionKeys(v.keys, v.mergeKeys), NotNil) } } + // Test CheckRegionSize when the region split size is 0. + config.RegionSplitSize = "100KiB" + c.Assert(config.GetRegionSplitSize(), Equals, uint64(0)) + c.Assert(config.CheckRegionSize(defaultRegionMaxSize, 50), IsNil) } diff --git a/server/election/lease.go b/server/election/lease.go index 4e368b98361..f3c00f47089 100644 --- a/server/election/lease.go +++ b/server/election/lease.go @@ -152,8 +152,11 @@ func (l *lease) keepAliveWorker(ctx context.Context, interval time.Duration) <-c expire := start.Add(time.Duration(res.TTL) * time.Second) select { case ch <- expire: - case <-ctx1.Done(): + // Here we don't use `ctx1.Done()` because we want to make sure if the keep alive success, we can update the expire time. + case <-ctx.Done(): } + } else { + log.Error("keep alive response ttl is zero", zap.String("purpose", l.Purpose)) } }() diff --git a/server/election/lease_test.go b/server/election/lease_test.go index 0c0aa3c1687..0161e7f3d27 100644 --- a/server/election/lease_test.go +++ b/server/election/lease_test.go @@ -16,9 +16,11 @@ package election import ( "context" + "testing" "time" . "github.com/pingcap/check" + "github.com/stretchr/testify/require" "github.com/tikv/pd/pkg/etcdutil" "go.etcd.io/etcd/clientv3" "go.etcd.io/etcd/embed" @@ -104,3 +106,34 @@ func (s *testLeaseSuite) TestLease(c *C) { time.Sleep((defaultLeaseTimeout + 1) * time.Second) c.Check(lease1.IsExpired(), IsTrue) } + +func TestLeaseKeepAlive(t *testing.T) { + re := require.New(t) + cfg := etcdutil.NewTestSingleConfig() + etcd, err := embed.StartEtcd(cfg) + defer func() { + etcd.Close() + }() + re.NoError(err) + + ep := cfg.LCUrls[0].String() + client, err := clientv3.New(clientv3.Config{ + Endpoints: []string{ep}, + }) + re.NoError(err) + + <-etcd.Server.ReadyNotify() + + // Create the lease. + lease := &lease{ + Purpose: "test_lease", + client: client, + lease: clientv3.NewLease(client), + } + + re.NoError(lease.Grant(defaultLeaseTimeout)) + ch := lease.keepAliveWorker(context.Background(), 2*time.Second) + time.Sleep(2 * time.Second) + <-ch + re.NoError(lease.Close()) +} diff --git a/server/schedule/checker/rule_checker.go b/server/schedule/checker/rule_checker.go index ea665c1faa9..ac24aaea50b 100644 --- a/server/schedule/checker/rule_checker.go +++ b/server/schedule/checker/rule_checker.go @@ -102,6 +102,11 @@ func (c *RuleChecker) CheckWithFit(region *core.RegionInfo, fit *placement.Regio panic("cached should be used") }) + // the placement rule is disabled + if fit == nil { + return + } + // If the fit is calculated by FitRegion, which means we get a new fit result, thus we should // invalid the cache if it exists c.ruleManager.InvalidCache(region.GetID()) diff --git a/server/schedule/placement/rule_manager.go b/server/schedule/placement/rule_manager.go index 04b2d96d9f1..ac5585fdbe0 100644 --- a/server/schedule/placement/rule_manager.go +++ b/server/schedule/placement/rule_manager.go @@ -63,7 +63,7 @@ func NewRuleManager(storage endpoint.RuleStorage, storeSetInformer core.StoreSet // Initialize loads rules from storage. If Placement Rules feature is never enabled, it creates default rule that is // compatible with previous configuration. -func (m *RuleManager) Initialize(maxReplica int, locationLabels []string) error { +func (m *RuleManager) Initialize(maxReplica int, locationLabels []string, isolationLevel string) error { m.Lock() defer m.Unlock() if m.initialized { @@ -84,6 +84,7 @@ func (m *RuleManager) Initialize(maxReplica int, locationLabels []string) error Role: Voter, Count: maxReplica, LocationLabels: locationLabels, + IsolationLevel: isolationLevel, } if err := m.storage.SaveRule(defaultRule.StoreKey(), defaultRule); err != nil { return err diff --git a/server/schedule/placement/rule_manager_test.go b/server/schedule/placement/rule_manager_test.go index ae750fe5f9b..7e19942e488 100644 --- a/server/schedule/placement/rule_manager_test.go +++ b/server/schedule/placement/rule_manager_test.go @@ -36,7 +36,7 @@ func (s *testManagerSuite) SetUpTest(c *C) { s.store = storage.NewStorageWithMemoryBackend() var err error s.manager = NewRuleManager(s.store, nil, nil) - err = s.manager.Initialize(3, []string{"zone", "rack", "host"}) + err = s.manager.Initialize(3, []string{"zone", "rack", "host"}, "") c.Assert(err, IsNil) } @@ -113,7 +113,7 @@ func (s *testManagerSuite) TestSaveLoad(c *C) { } m2 := NewRuleManager(s.store, nil, nil) - err := m2.Initialize(3, []string{"no", "labels"}) + err := m2.Initialize(3, []string{"no", "labels"}, "") c.Assert(err, IsNil) c.Assert(m2.GetAllRules(), HasLen, 3) c.Assert(m2.GetRule("pd", "default").String(), Equals, rules[0].String()) @@ -128,7 +128,7 @@ func (s *testManagerSuite) TestSetAfterGet(c *C) { s.manager.SetRule(rule) m2 := NewRuleManager(s.store, nil, nil) - err := m2.Initialize(100, []string{}) + err := m2.Initialize(100, []string{}, "") c.Assert(err, IsNil) rule = m2.GetRule("pd", "default") c.Assert(rule.Count, Equals, 1) diff --git a/server/server.go b/server/server.go index 3463b3c6e4a..43bdc458494 100644 --- a/server/server.go +++ b/server/server.go @@ -897,7 +897,7 @@ func (s *Server) SetReplicationConfig(cfg config.ReplicationConfig) error { } if cfg.EnablePlacementRules { // initialize rule manager. - if err := rc.GetRuleManager().Initialize(int(cfg.MaxReplicas), cfg.LocationLabels); err != nil { + if err := rc.GetRuleManager().Initialize(int(cfg.MaxReplicas), cfg.LocationLabels, cfg.IsolationLevel); err != nil { return err } } else { @@ -920,19 +920,19 @@ func (s *Server) SetReplicationConfig(cfg config.ReplicationConfig) error { defaultRule := rc.GetRuleManager().GetRule("pd", "default") CheckInDefaultRule := func() error { - // replication config won't work when placement rule is enabled and exceeds one default rule + // replication config won't work when placement rule is enabled and exceeds one default rule if !(defaultRule != nil && len(defaultRule.StartKey) == 0 && len(defaultRule.EndKey) == 0) { - return errors.New("cannot update MaxReplicas or LocationLabels when placement rules feature is enabled and not only default rule exists, please update rule instead") + return errors.New("cannot update MaxReplicas, LocationLabels or IsolationLevel when placement rules feature is enabled and not only default rule exists, please update rule instead") } - if !(defaultRule.Count == int(old.MaxReplicas) && typeutil.StringsEqual(defaultRule.LocationLabels, []string(old.LocationLabels))) { + if !(defaultRule.Count == int(old.MaxReplicas) && typeutil.StringsEqual(defaultRule.LocationLabels, []string(old.LocationLabels)) && defaultRule.IsolationLevel == old.IsolationLevel) { return errors.New("cannot to update replication config, the default rules do not consistent with replication config, please update rule instead") } return nil } - if !(cfg.MaxReplicas == old.MaxReplicas && typeutil.StringsEqual(cfg.LocationLabels, old.LocationLabels)) { + if !(cfg.MaxReplicas == old.MaxReplicas && typeutil.StringsEqual(cfg.LocationLabels, old.LocationLabels) && cfg.IsolationLevel == old.IsolationLevel) { if err := CheckInDefaultRule(); err != nil { return err } @@ -943,6 +943,7 @@ func (s *Server) SetReplicationConfig(cfg config.ReplicationConfig) error { if rule != nil { rule.Count = int(cfg.MaxReplicas) rule.LocationLabels = cfg.LocationLabels + rule.IsolationLevel = cfg.IsolationLevel rc := s.GetRaftCluster() if rc == nil { return errs.ErrNotBootstrapped.GenWithStackByArgs() diff --git a/server/statistics/region_collection_test.go b/server/statistics/region_collection_test.go index eb100e958fd..f9d1193d700 100644 --- a/server/statistics/region_collection_test.go +++ b/server/statistics/region_collection_test.go @@ -42,7 +42,7 @@ func (t *testRegionStatisticsSuite) SetUpTest(c *C) { t.store = storage.NewStorageWithMemoryBackend() var err error t.manager = placement.NewRuleManager(t.store, nil, nil) - err = t.manager.Initialize(3, []string{"zone", "rack", "host"}) + err = t.manager.Initialize(3, []string{"zone", "rack", "host"}, "") c.Assert(err, IsNil) } diff --git a/tests/pdctl/config/config_test.go b/tests/pdctl/config/config_test.go index 297cc538606..b15bfd5e7d9 100644 --- a/tests/pdctl/config/config_test.go +++ b/tests/pdctl/config/config_test.go @@ -637,7 +637,7 @@ func (s *configTestSuite) TestUpdateDefaultReplicaConfig(c *C) { c.Assert(replicationCfg.MaxReplicas, Equals, expect) } - checkLocaltionLabels := func(expect int) { + checkLocationLabels := func(expect int) { args := []string{"-u", pdAddr, "config", "show", "replication"} output, err := pdctl.ExecuteCommand(cmd, args...) c.Assert(err, IsNil) @@ -646,6 +646,15 @@ func (s *configTestSuite) TestUpdateDefaultReplicaConfig(c *C) { c.Assert(replicationCfg.LocationLabels, HasLen, expect) } + checkIsolationLevel := func(expect string) { + args := []string{"-u", pdAddr, "config", "show", "replication"} + output, err := pdctl.ExecuteCommand(cmd, args...) + c.Assert(err, IsNil) + replicationCfg := config.ReplicationConfig{} + c.Assert(json.Unmarshal(output, &replicationCfg), IsNil) + c.Assert(replicationCfg.IsolationLevel, Equals, expect) + } + checkRuleCount := func(expect int) { args := []string{"-u", pdAddr, "config", "placement-rules", "show", "--group", "pd", "--id", "default"} output, err := pdctl.ExecuteCommand(cmd, args...) @@ -664,6 +673,15 @@ func (s *configTestSuite) TestUpdateDefaultReplicaConfig(c *C) { c.Assert(rule.LocationLabels, HasLen, expect) } + checkRuleIsolationLevel := func(expect string) { + args := []string{"-u", pdAddr, "config", "placement-rules", "show", "--group", "pd", "--id", "default"} + output, err := pdctl.ExecuteCommand(cmd, args...) + c.Assert(err, IsNil) + rule := placement.Rule{} + c.Assert(json.Unmarshal(output, &rule), IsNil) + c.Assert(rule.IsolationLevel, Equals, expect) + } + // update successfully when placement rules is not enabled. output, err := pdctl.ExecuteCommand(cmd, "-u", pdAddr, "config", "set", "max-replicas", "2") c.Assert(err, IsNil) @@ -672,8 +690,13 @@ func (s *configTestSuite) TestUpdateDefaultReplicaConfig(c *C) { output, err = pdctl.ExecuteCommand(cmd, "-u", pdAddr, "config", "set", "location-labels", "zone,host") c.Assert(err, IsNil) c.Assert(strings.Contains(string(output), "Success!"), IsTrue) - checkLocaltionLabels(2) + output, err = pdctl.ExecuteCommand(cmd, "-u", pdAddr, "config", "set", "isolation-level", "zone") + c.Assert(err, IsNil) + c.Assert(strings.Contains(string(output), "Success!"), IsTrue) + checkLocationLabels(2) checkRuleLocationLabels(2) + checkIsolationLevel("zone") + checkRuleIsolationLevel("zone") // update successfully when only one default rule exists. output, err = pdctl.ExecuteCommand(cmd, "-u", pdAddr, "config", "placement-rules", "enable") @@ -686,11 +709,18 @@ func (s *configTestSuite) TestUpdateDefaultReplicaConfig(c *C) { checkMaxReplicas(3) checkRuleCount(3) + // We need to change isolation first because we will validate + // if the location label contains the isolation level when setting location labels. + output, err = pdctl.ExecuteCommand(cmd, "-u", pdAddr, "config", "set", "isolation-level", "host") + c.Assert(err, IsNil) + c.Assert(strings.Contains(string(output), "Success!"), IsTrue) output, err = pdctl.ExecuteCommand(cmd, "-u", pdAddr, "config", "set", "location-labels", "host") c.Assert(err, IsNil) c.Assert(strings.Contains(string(output), "Success!"), IsTrue) - checkLocaltionLabels(1) + checkLocationLabels(1) checkRuleLocationLabels(1) + checkIsolationLevel("host") + checkRuleIsolationLevel("host") // update unsuccessfully when many rule exists. f, _ := os.CreateTemp("/tmp", "pd_tests") @@ -720,8 +750,10 @@ func (s *configTestSuite) TestUpdateDefaultReplicaConfig(c *C) { c.Assert(err, IsNil) checkMaxReplicas(4) checkRuleCount(4) - checkLocaltionLabels(1) + checkLocationLabels(1) checkRuleLocationLabels(1) + checkIsolationLevel("host") + checkRuleIsolationLevel("host") } func (s *configTestSuite) TestPDServerConfig(c *C) {