diff --git a/.github/workflows/check.yaml b/.github/workflows/check.yaml index cfc113cfbd4..bc2ea54fde0 100644 --- a/.github/workflows/check.yaml +++ b/.github/workflows/check.yaml @@ -4,22 +4,11 @@ jobs: statics: runs-on: ubuntu-latest steps: - - uses: actions/setup-go@v2 - with: - go-version: 1.16 - name: Checkout code - uses: actions/checkout@v2 - - name: Restore cache - uses: actions/cache@v2 + uses: actions/checkout@v4 + - uses: actions/setup-go@v5 with: - path: | - ~/go/pkg/mod - ~/.cache/go-build - **/.tools - **/.dashboard_download_cache - key: ${{ runner.os }}-golang-${{ hashFiles('**/go.sum') }} - restore-keys: | - ${{ runner.os }}-golang + go-version: 1.16 - name: Make Check run: | make build diff --git a/.github/workflows/label.yaml b/.github/workflows/label.yaml index 5ff2b895528..00438d26b63 100644 --- a/.github/workflows/label.yaml +++ b/.github/workflows/label.yaml @@ -7,7 +7,7 @@ jobs: add_labels: runs-on: ubuntu-latest steps: - - uses: actions/github-script@v4 + - uses: actions/github-script@v7 name: Add labels with: script: | diff --git a/.github/workflows/pd-tests.yaml b/.github/workflows/pd-tests.yaml index c29a68ec11a..4ae61db6d38 100644 --- a/.github/workflows/pd-tests.yaml +++ b/.github/workflows/pd-tests.yaml @@ -20,23 +20,11 @@ jobs: outputs: job-total: ${{ strategy.job-total }} steps: - - uses: actions/setup-go@v2 - with: - go-version: 1.16 - name: Checkout code - uses: actions/checkout@v2 - - name: Restore cache - uses: actions/cache@v2 + uses: actions/checkout@v4 + - uses: actions/setup-go@v5 with: - path: | - ~/go/pkg/mod - ~/.cache/go-build - **/.tools - **/.dashboard_download_cache - key: ${{ runner.os }}-go-${{ matrix.worker_id }}-${{ hashFiles('**/go.sum') }} - restore-keys: | - ${{ runner.os }}-go-${{ matrix.worker_id }} - ${{ runner.os }}-go- + go-version: 1.16 - name: Dispatch Packages id: packages-units env: @@ -62,20 +50,21 @@ jobs: mv covprofile covprofile_$WORKER_ID sed -i "/failpoint_binding/d" covprofile_$WORKER_ID - name: Upload coverage result ${{ matrix.worker_id }} - uses: actions/upload-artifact@v2 + uses: actions/upload-artifact@v4 with: - name: cover-reports + name: cover-reports-${{ matrix.worker_id }} path: covprofile_${{ matrix.worker_id }} report-coverage: needs: chunks runs-on: ubuntu-latest steps: - name: Checkout code - uses: actions/checkout@v2 + uses: actions/checkout@v4 - name: Download chunk report - uses: actions/download-artifact@v2 + uses: actions/download-artifact@v4 with: - name: cover-reports + pattern: cover-reports-* + merge-multiple: true - name: Merge env: TOTAL_JOBS: ${{needs.chunks.outputs.job-total}} diff --git a/.github/workflows/tso-consistency-test.yaml b/.github/workflows/tso-consistency-test.yaml index a3e1de811df..9b55282db27 100644 --- a/.github/workflows/tso-consistency-test.yaml +++ b/.github/workflows/tso-consistency-test.yaml @@ -8,10 +8,10 @@ jobs: tso-consistency-test: runs-on: ubuntu-latest steps: - - uses: actions/setup-go@v2 + - name: Checkout code + uses: actions/checkout@v4 + - uses: actions/setup-go@v5 with: go-version: 1.16 - - name: Checkout code - uses: actions/checkout@v2 - name: Make TSO Consistency Test run: make test-tso-consistency diff --git a/.github/workflows/tso-function-test.yaml b/.github/workflows/tso-function-test.yaml index fd8c14f88f6..c9512ab747c 100644 --- a/.github/workflows/tso-function-test.yaml +++ b/.github/workflows/tso-function-test.yaml @@ -12,10 +12,10 @@ jobs: tso-function-test: runs-on: ubuntu-latest steps: - - uses: actions/setup-go@v2 + - name: Checkout code + uses: actions/checkout@v4 + - uses: actions/setup-go@v5 with: go-version: 1.16 - - name: Checkout code - uses: actions/checkout@v2 - name: Make TSO Function Test run: make test-tso-function diff --git a/OWNERS b/OWNERS new file mode 100644 index 00000000000..0c8b972be1e --- /dev/null +++ b/OWNERS @@ -0,0 +1,27 @@ +# See the OWNERS docs at https://go.k8s.io/owners +approvers: + - AndreMouche + - binshi-bing + - bufferflies + - CabinfeverB + - Connor1996 + - disksing + - huachaohuang + - HunDunDM + - HuSharp + - JmPotato + - lhy1024 + - nolouch + - overvenus + - qiuyesuifeng + - rleungx + - siddontang + - Yisaer + - zhouqiang-cl +reviewers: + - BusyJay + - howardlau1999 + - Luffbee + - okJiang + - shafreeck + - xhebox diff --git a/pkg/mock/mockcluster/mockcluster.go b/pkg/mock/mockcluster/mockcluster.go index c1318868aef..8ec968c6614 100644 --- a/pkg/mock/mockcluster/mockcluster.go +++ b/pkg/mock/mockcluster/mockcluster.go @@ -166,7 +166,7 @@ func (mc *Cluster) AllocPeer(storeID uint64) (*metapb.Peer, error) { func (mc *Cluster) initRuleManager() { if mc.RuleManager == nil { mc.RuleManager = placement.NewRuleManager(core.NewStorage(kv.NewMemoryKV()), mc, mc.GetOpts()) - mc.RuleManager.Initialize(int(mc.GetReplicationConfig().MaxReplicas), mc.GetReplicationConfig().LocationLabels) + mc.RuleManager.Initialize(int(mc.GetReplicationConfig().MaxReplicas), mc.GetReplicationConfig().LocationLabels, mc.GetReplicationConfig().IsolationLevel) } } diff --git a/server/api/label_test.go b/server/api/label_test.go index 91902f14c40..cbb2d213d32 100644 --- a/server/api/label_test.go +++ b/server/api/label_test.go @@ -24,6 +24,7 @@ import ( "github.com/pingcap/kvproto/pkg/pdpb" "github.com/tikv/pd/server" "github.com/tikv/pd/server/config" + "github.com/tikv/pd/server/core" ) var _ = Suite(&testLabelsStoreSuite{}) @@ -260,6 +261,30 @@ func (s *testStrictlyLabelsStoreSuite) TestStoreMatch(c *C) { valid: false, expectError: "key matching the label was not found", }, + { + store: &metapb.Store{ + Id: 3, + Address: "tiflash1", + State: metapb.StoreState_Up, + Labels: []*metapb.StoreLabel{ + { + Key: "zone", + Value: "us-west-1", + }, + { + Key: "disk", + Value: "ssd", + }, + { + Key: core.EngineKey, + Value: core.EngineTiFlash, + }, + }, + Version: "3.0.0", + }, + valid: true, + expectError: "placement rules is disabled", + }, } for _, t := range cases { @@ -267,12 +292,16 @@ func (s *testStrictlyLabelsStoreSuite) TestStoreMatch(c *C) { Header: &pdpb.RequestHeader{ClusterId: s.svr.ClusterID()}, Store: &metapb.Store{ Id: t.store.Id, - Address: fmt.Sprintf("tikv%d", t.store.Id), + Address: t.store.Address, State: t.store.State, Labels: t.store.Labels, Version: t.store.Version, }, }) + if t.store.Address == "tiflash1" { + c.Assert(strings.Contains(err.Error(), t.expectError), IsTrue) + continue + } if t.valid { c.Assert(err, IsNil) } else { @@ -287,7 +316,7 @@ func (s *testStrictlyLabelsStoreSuite) TestStoreMatch(c *C) { Header: &pdpb.RequestHeader{ClusterId: s.svr.ClusterID()}, Store: &metapb.Store{ Id: t.store.Id, - Address: fmt.Sprintf("tikv%d", t.store.Id), + Address: t.store.Address, State: t.store.State, Labels: t.store.Labels, Version: t.store.Version, diff --git a/server/api/operator_test.go b/server/api/operator_test.go index f05b7e2ede2..e8d54db8ca1 100644 --- a/server/api/operator_test.go +++ b/server/api/operator_test.go @@ -350,7 +350,9 @@ func (s *testTransferRegionOperatorSuite) TestTransferRegionWithPlacementRule(c if tc.placementRuleEnable { err := s.svr.GetRaftCluster().GetRuleManager().Initialize( s.svr.GetRaftCluster().GetOpts().GetMaxReplicas(), - s.svr.GetRaftCluster().GetOpts().GetLocationLabels()) + s.svr.GetRaftCluster().GetOpts().GetLocationLabels(), + s.svr.GetRaftCluster().GetOpts().GetIsolationLevel(), + ) c.Assert(err, IsNil) } if len(tc.rules) > 0 { diff --git a/server/cluster/cluster.go b/server/cluster/cluster.go index 57963a48d9b..c557794b1c2 100644 --- a/server/cluster/cluster.go +++ b/server/cluster/cluster.go @@ -240,7 +240,7 @@ func (c *RaftCluster) Start(s Server) error { c.ruleManager = placement.NewRuleManager(c.storage, c, c.GetOpts()) if c.opt.IsPlacementRulesEnabled() { - err = c.ruleManager.Initialize(c.opt.GetMaxReplicas(), c.opt.GetLocationLabels()) + err = c.ruleManager.Initialize(c.opt.GetMaxReplicas(), c.opt.GetLocationLabels(), c.opt.GetIsolationLevel()) if err != nil { return err } @@ -1027,6 +1027,9 @@ func (c *RaftCluster) checkStoreLabels(s *core.StoreInfo) error { } for _, label := range s.GetLabels() { key := label.GetKey() + if key == core.EngineKey { + continue + } if _, ok := keysSet[key]; !ok { log.Warn("not found the key match with the store label", zap.Stringer("store", s.GetMeta()), diff --git a/server/cluster/cluster_test.go b/server/cluster/cluster_test.go index d2ffea4931e..b6eb5e7c77f 100644 --- a/server/cluster/cluster_test.go +++ b/server/cluster/cluster_test.go @@ -850,7 +850,7 @@ func (s *testClusterInfoSuite) TestOfflineAndMerge(c *C) { storage := core.NewStorage(kv.NewMemoryKV()) cluster.ruleManager = placement.NewRuleManager(storage, cluster, cluster.GetOpts()) if opt.IsPlacementRulesEnabled() { - err := cluster.ruleManager.Initialize(opt.GetMaxReplicas(), opt.GetLocationLabels()) + err := cluster.ruleManager.Initialize(opt.GetMaxReplicas(), opt.GetLocationLabels(), opt.GetIsolationLevel()) if err != nil { panic(err) } @@ -1146,7 +1146,7 @@ func newTestCluster(ctx context.Context, opt *config.PersistOptions) *testCluste rc := newTestRaftCluster(ctx, mockid.NewIDAllocator(), opt, storage, core.NewBasicCluster()) rc.ruleManager = placement.NewRuleManager(storage, rc, rc.GetOpts()) if opt.IsPlacementRulesEnabled() { - err := rc.ruleManager.Initialize(opt.GetMaxReplicas(), opt.GetLocationLabels()) + err := rc.ruleManager.Initialize(opt.GetMaxReplicas(), opt.GetLocationLabels(), opt.GetIsolationLevel()) if err != nil { panic(err) } diff --git a/server/config/persist_options.go b/server/config/persist_options.go index 0b1412f7109..f2eab891220 100644 --- a/server/config/persist_options.go +++ b/server/config/persist_options.go @@ -252,6 +252,13 @@ func (o *PersistOptions) SetSplitMergeInterval(splitMergeInterval time.Duration) o.SetScheduleConfig(v) } +// SetMaxStoreDownTime to set the max store down time. It's only used to test. +func (o *PersistOptions) SetMaxStoreDownTime(time time.Duration) { + v := o.GetScheduleConfig().Clone() + v.MaxStoreDownTime = typeutil.NewDuration(time) + o.SetScheduleConfig(v) +} + // SetStoreLimit sets a store limit for a given type and rate. func (o *PersistOptions) SetStoreLimit(storeID uint64, typ storelimit.Type, ratePerMin float64) { v := o.GetScheduleConfig().Clone() diff --git a/server/election/lease.go b/server/election/lease.go index e68286b4509..306291b855e 100644 --- a/server/election/lease.go +++ b/server/election/lease.go @@ -143,8 +143,11 @@ func (l *lease) keepAliveWorker(ctx context.Context, interval time.Duration) <-c expire := start.Add(time.Duration(res.TTL) * time.Second) select { case ch <- expire: - case <-ctx1.Done(): + // Here we don't use `ctx1.Done()` because we want to make sure if the keep alive success, we can update the expire time. + case <-ctx.Done(): } + } else { + log.Error("keep alive response ttl is zero", zap.String("purpose", l.Purpose)) } }() diff --git a/server/election/lease_test.go b/server/election/lease_test.go index 0c0aa3c1687..0161e7f3d27 100644 --- a/server/election/lease_test.go +++ b/server/election/lease_test.go @@ -16,9 +16,11 @@ package election import ( "context" + "testing" "time" . "github.com/pingcap/check" + "github.com/stretchr/testify/require" "github.com/tikv/pd/pkg/etcdutil" "go.etcd.io/etcd/clientv3" "go.etcd.io/etcd/embed" @@ -104,3 +106,34 @@ func (s *testLeaseSuite) TestLease(c *C) { time.Sleep((defaultLeaseTimeout + 1) * time.Second) c.Check(lease1.IsExpired(), IsTrue) } + +func TestLeaseKeepAlive(t *testing.T) { + re := require.New(t) + cfg := etcdutil.NewTestSingleConfig() + etcd, err := embed.StartEtcd(cfg) + defer func() { + etcd.Close() + }() + re.NoError(err) + + ep := cfg.LCUrls[0].String() + client, err := clientv3.New(clientv3.Config{ + Endpoints: []string{ep}, + }) + re.NoError(err) + + <-etcd.Server.ReadyNotify() + + // Create the lease. + lease := &lease{ + Purpose: "test_lease", + client: client, + lease: clientv3.NewLease(client), + } + + re.NoError(lease.Grant(defaultLeaseTimeout)) + ch := lease.keepAliveWorker(context.Background(), 2*time.Second) + time.Sleep(2 * time.Second) + <-ch + re.NoError(lease.Close()) +} diff --git a/server/schedule/placement/rule_manager.go b/server/schedule/placement/rule_manager.go index 66ec8f87754..48cb5d6ca8f 100644 --- a/server/schedule/placement/rule_manager.go +++ b/server/schedule/placement/rule_manager.go @@ -62,7 +62,7 @@ func NewRuleManager(storage *core.Storage, storeSetInformer core.StoreSetInforme // Initialize loads rules from storage. If Placement Rules feature is never enabled, it creates default rule that is // compatible with previous configuration. -func (m *RuleManager) Initialize(maxReplica int, locationLabels []string) error { +func (m *RuleManager) Initialize(maxReplica int, locationLabels []string, isolationLevel string) error { m.Lock() defer m.Unlock() if m.initialized { @@ -83,6 +83,7 @@ func (m *RuleManager) Initialize(maxReplica int, locationLabels []string) error Role: Voter, Count: maxReplica, LocationLabels: locationLabels, + IsolationLevel: isolationLevel, } if err := m.storage.SaveRule(defaultRule.StoreKey(), defaultRule); err != nil { return err diff --git a/server/schedule/placement/rule_manager_test.go b/server/schedule/placement/rule_manager_test.go index 1e82c1d7e40..9af1a65df79 100644 --- a/server/schedule/placement/rule_manager_test.go +++ b/server/schedule/placement/rule_manager_test.go @@ -16,6 +16,7 @@ package placement import ( "encoding/hex" + . "github.com/pingcap/check" "github.com/pingcap/kvproto/pkg/metapb" "github.com/tikv/pd/pkg/codec" @@ -34,7 +35,7 @@ func (s *testManagerSuite) SetUpTest(c *C) { s.store = core.NewStorage(kv.NewMemoryKV()) var err error s.manager = NewRuleManager(s.store, nil, nil) - err = s.manager.Initialize(3, []string{"zone", "rack", "host"}) + err = s.manager.Initialize(3, []string{"zone", "rack", "host"}, "") c.Assert(err, IsNil) } @@ -111,7 +112,7 @@ func (s *testManagerSuite) TestSaveLoad(c *C) { } m2 := NewRuleManager(s.store, nil, nil) - err := m2.Initialize(3, []string{"no", "labels"}) + err := m2.Initialize(3, []string{"no", "labels"}, "") c.Assert(err, IsNil) c.Assert(m2.GetAllRules(), HasLen, 3) c.Assert(m2.GetRule("pd", "default").String(), Equals, rules[0].String()) @@ -126,7 +127,7 @@ func (s *testManagerSuite) TestSetAfterGet(c *C) { s.manager.SetRule(rule) m2 := NewRuleManager(s.store, nil, nil) - err := m2.Initialize(100, []string{}) + err := m2.Initialize(100, []string{}, "") c.Assert(err, IsNil) rule = m2.GetRule("pd", "default") c.Assert(rule.Count, Equals, 1) diff --git a/server/server.go b/server/server.go index 88647b89dd3..bd41b0eca0c 100644 --- a/server/server.go +++ b/server/server.go @@ -859,7 +859,7 @@ func (s *Server) SetReplicationConfig(cfg config.ReplicationConfig) error { } if cfg.EnablePlacementRules { // initialize rule manager. - if err := raftCluster.GetRuleManager().Initialize(int(cfg.MaxReplicas), cfg.LocationLabels); err != nil { + if err := raftCluster.GetRuleManager().Initialize(int(cfg.MaxReplicas), cfg.LocationLabels, cfg.IsolationLevel); err != nil { return err } } else { @@ -878,19 +878,19 @@ func (s *Server) SetReplicationConfig(cfg config.ReplicationConfig) error { defaultRule := s.GetRaftCluster().GetRuleManager().GetRule("pd", "default") CheckInDefaultRule := func() error { - // replication config won't work when placement rule is enabled and exceeds one default rule + // replication config won't work when placement rule is enabled and exceeds one default rule if !(defaultRule != nil && len(defaultRule.StartKey) == 0 && len(defaultRule.EndKey) == 0) { - return errors.New("cannot update MaxReplicas or LocationLabels when placement rules feature is enabled and not only default rule exists, please update rule instead") + return errors.New("cannot update MaxReplicas, LocationLabels or IsolationLevel when placement rules feature is enabled and not only default rule exists, please update rule instead") } - if !(defaultRule.Count == int(old.MaxReplicas) && typeutil.StringsEqual(defaultRule.LocationLabels, []string(old.LocationLabels))) { + if !(defaultRule.Count == int(old.MaxReplicas) && typeutil.StringsEqual(defaultRule.LocationLabels, []string(old.LocationLabels)) && defaultRule.IsolationLevel == old.IsolationLevel) { return errors.New("cannot to update replication config, the default rules do not consistent with replication config, please update rule instead") } return nil } - if !(cfg.MaxReplicas == old.MaxReplicas && typeutil.StringsEqual(cfg.LocationLabels, old.LocationLabels)) { + if !(cfg.MaxReplicas == old.MaxReplicas && typeutil.StringsEqual(cfg.LocationLabels, old.LocationLabels) && cfg.IsolationLevel == old.IsolationLevel) { if err := CheckInDefaultRule(); err != nil { return err } @@ -901,6 +901,7 @@ func (s *Server) SetReplicationConfig(cfg config.ReplicationConfig) error { if rule != nil { rule.Count = int(cfg.MaxReplicas) rule.LocationLabels = cfg.LocationLabels + rule.IsolationLevel = cfg.IsolationLevel if err := s.GetRaftCluster().GetRuleManager().SetRule(rule); err != nil { log.Error("failed to update rule count", errs.ZapError(err)) diff --git a/server/statistics/region_collection_test.go b/server/statistics/region_collection_test.go index 7ec069e810d..7ba08674027 100644 --- a/server/statistics/region_collection_test.go +++ b/server/statistics/region_collection_test.go @@ -41,7 +41,7 @@ func (t *testRegionStatisticsSuite) SetUpTest(c *C) { t.store = core.NewStorage(kv.NewMemoryKV()) var err error t.manager = placement.NewRuleManager(t.store, nil, nil) - err = t.manager.Initialize(3, []string{"zone", "rack", "host"}) + err = t.manager.Initialize(3, []string{"zone", "rack", "host"}, "") c.Assert(err, IsNil) } diff --git a/tests/pdctl/config/config_test.go b/tests/pdctl/config/config_test.go index 799a9e47151..0bbf2054095 100644 --- a/tests/pdctl/config/config_test.go +++ b/tests/pdctl/config/config_test.go @@ -620,7 +620,7 @@ func (s *configTestSuite) TestUpdateDefaultReplicaConfig(c *C) { c.Assert(replicationCfg.MaxReplicas, Equals, expect) } - checkLocaltionLabels := func(expect int) { + checkLocationLabels := func(expect int) { args := []string{"-u", pdAddr, "config", "show", "replication"} output, err := pdctl.ExecuteCommand(cmd, args...) c.Assert(err, IsNil) @@ -629,6 +629,15 @@ func (s *configTestSuite) TestUpdateDefaultReplicaConfig(c *C) { c.Assert(replicationCfg.LocationLabels, HasLen, expect) } + checkIsolationLevel := func(expect string) { + args := []string{"-u", pdAddr, "config", "show", "replication"} + output, err := pdctl.ExecuteCommand(cmd, args...) + c.Assert(err, IsNil) + replicationCfg := config.ReplicationConfig{} + c.Assert(json.Unmarshal(output, &replicationCfg), IsNil) + c.Assert(replicationCfg.IsolationLevel, Equals, expect) + } + checkRuleCount := func(expect int) { args := []string{"-u", pdAddr, "config", "placement-rules", "show", "--group", "pd", "--id", "default"} output, err := pdctl.ExecuteCommand(cmd, args...) @@ -647,6 +656,15 @@ func (s *configTestSuite) TestUpdateDefaultReplicaConfig(c *C) { c.Assert(rule.LocationLabels, HasLen, expect) } + checkRuleIsolationLevel := func(expect string) { + args := []string{"-u", pdAddr, "config", "placement-rules", "show", "--group", "pd", "--id", "default"} + output, err := pdctl.ExecuteCommand(cmd, args...) + c.Assert(err, IsNil) + rule := placement.Rule{} + c.Assert(json.Unmarshal(output, &rule), IsNil) + c.Assert(rule.IsolationLevel, Equals, expect) + } + // update successfully when placement rules is not enabled. output, err := pdctl.ExecuteCommand(cmd, "-u", pdAddr, "config", "set", "max-replicas", "2") c.Assert(err, IsNil) @@ -655,8 +673,13 @@ func (s *configTestSuite) TestUpdateDefaultReplicaConfig(c *C) { output, err = pdctl.ExecuteCommand(cmd, "-u", pdAddr, "config", "set", "location-labels", "zone,host") c.Assert(err, IsNil) c.Assert(strings.Contains(string(output), "Success!"), IsTrue) - checkLocaltionLabels(2) + output, err = pdctl.ExecuteCommand(cmd, "-u", pdAddr, "config", "set", "isolation-level", "zone") + c.Assert(err, IsNil) + c.Assert(strings.Contains(string(output), "Success!"), IsTrue) + checkLocationLabels(2) checkRuleLocationLabels(2) + checkIsolationLevel("zone") + checkRuleIsolationLevel("zone") // update successfully when only one default rule exists. output, err = pdctl.ExecuteCommand(cmd, "-u", pdAddr, "config", "placement-rules", "enable") @@ -669,11 +692,18 @@ func (s *configTestSuite) TestUpdateDefaultReplicaConfig(c *C) { checkMaxReplicas(3) checkRuleCount(3) + // We need to change isolation first because we will validate + // if the location label contains the isolation level when setting location labels. + output, err = pdctl.ExecuteCommand(cmd, "-u", pdAddr, "config", "set", "isolation-level", "host") + c.Assert(err, IsNil) + c.Assert(strings.Contains(string(output), "Success!"), IsTrue) output, err = pdctl.ExecuteCommand(cmd, "-u", pdAddr, "config", "set", "location-labels", "host") c.Assert(err, IsNil) c.Assert(strings.Contains(string(output), "Success!"), IsTrue) - checkLocaltionLabels(1) + checkLocationLabels(1) checkRuleLocationLabels(1) + checkIsolationLevel("host") + checkRuleIsolationLevel("host") // update unsuccessfully when many rule exists. f, _ := os.CreateTemp("/tmp", "pd_tests") @@ -703,8 +733,10 @@ func (s *configTestSuite) TestUpdateDefaultReplicaConfig(c *C) { c.Assert(err, IsNil) checkMaxReplicas(4) checkRuleCount(4) - checkLocaltionLabels(1) + checkLocationLabels(1) checkRuleLocationLabels(1) + checkIsolationLevel("host") + checkRuleIsolationLevel("host") } func (s *configTestSuite) TestPDServerConfig(c *C) {