From 715a78d895522ad8e6c0c15c43f03d0edbb32dfa Mon Sep 17 00:00:00 2001 From: Ti Chi Robot Date: Fri, 21 Apr 2023 14:43:21 +0800 Subject: [PATCH] storelimit: impl fead back by snapshot report (#6161) (#6349) close tikv/pd#6147, ref tikv/pd#6161 Signed-off-by: bufferflies <1045931706@qq.com> Co-authored-by: buffer <1045931706@qq.com> Co-authored-by: Ti Chi Robot --- pkg/core/constant/kind.go | 15 ++++ pkg/core/store.go | 18 ++++- pkg/core/storelimit/limit.go | 11 +++ pkg/core/storelimit/limit_test.go | 98 +++++++++++++++++++++++++-- pkg/core/storelimit/sliding_window.go | 63 +++++++++++++---- pkg/core/storelimit/store_limit.go | 8 +++ pkg/schedule/config/config.go | 1 + pkg/schedule/filter/filters_test.go | 3 +- pkg/schedule/operator/influence.go | 2 +- pkg/statistics/store_collection.go | 12 ++++ server/cluster/cluster.go | 39 ++++++++++- server/cluster/cluster_test.go | 4 ++ server/config/config.go | 12 ++++ server/config/persist_options.go | 5 ++ tests/pdctl/config/config_test.go | 10 +++ 15 files changed, 273 insertions(+), 28 deletions(-) diff --git a/pkg/core/constant/kind.go b/pkg/core/constant/kind.go index 20e9675775e..d9e0938885c 100644 --- a/pkg/core/constant/kind.go +++ b/pkg/core/constant/kind.go @@ -27,6 +27,21 @@ const ( PriorityLevelLen ) +func (p PriorityLevel) String() string { + switch p { + case Low: + return "low" + case Medium: + return "medium" + case High: + return "high" + case Urgent: + return "urgent" + default: + return "unknown" + } +} + // ScheduleKind distinguishes resources and schedule policy. type ScheduleKind struct { Resource ResourceKind diff --git a/pkg/core/store.go b/pkg/core/store.go index 776f4222fe4..979f70afa9e 100644 --- a/pkg/core/store.go +++ b/pkg/core/store.go @@ -106,11 +106,27 @@ func (s *StoreInfo) Clone(opts ...StoreCreateOption) *StoreInfo { store := *s store.meta = typeutil.DeepClone(s.meta, StoreFactory) for _, opt := range opts { - opt(&store) + if opt != nil { + opt(&store) + } } return &store } +// LimitVersion returns the limit version of the store. +func (s *StoreInfo) LimitVersion() string { + s.mu.RLock() + defer s.mu.RUnlock() + return s.limiter.Version() +} + +// Feedback is used to update the store's limit. +func (s *StoreInfo) Feedback(e float64) { + if limit := s.limiter; limit != nil { + limit.Feedback(e) + } +} + // ShallowClone creates a copy of current StoreInfo, but not clone 'meta'. func (s *StoreInfo) ShallowClone(opts ...StoreCreateOption) *StoreInfo { store := *s diff --git a/pkg/core/storelimit/limit.go b/pkg/core/storelimit/limit.go index 1ecd99102ad..b598daf6db7 100644 --- a/pkg/core/storelimit/limit.go +++ b/pkg/core/storelimit/limit.go @@ -32,6 +32,13 @@ const ( storeLimitTypeLen ) +const ( + // VersionV1 represents the rate limit version of the store limit + VersionV1 = "v1" + // VersionV2 represents the sliding window version of the store limit + VersionV2 = "v2" +) + // StoreLimit is an interface to control the operator rate of store // TODO: add a method to control the rate of store // the normal control flow is: @@ -48,7 +55,11 @@ type StoreLimit interface { Take(count int64, typ Type, level constant.PriorityLevel) bool // Reset resets the store limit Reset(rate float64, typ Type) + // Feedback update limit capacity by auto-tuning. + Feedback(e float64) // Ack put back the cost into the limit for the next waiting operator after the operator is finished. // only snapshot type can use this method. Ack(cost int64, typ Type) + // Version returns the version of the store limit + Version() string } diff --git a/pkg/core/storelimit/limit_test.go b/pkg/core/storelimit/limit_test.go index 03cc06d8a17..60580fc795c 100644 --- a/pkg/core/storelimit/limit_test.go +++ b/pkg/core/storelimit/limit_test.go @@ -15,7 +15,11 @@ package storelimit import ( + "container/list" + "context" + "math/rand" "testing" + "time" "github.com/stretchr/testify/require" "github.com/tikv/pd/pkg/core/constant" @@ -41,8 +45,8 @@ func TestStoreLimit(t *testing.T) { func TestSlidingWindow(t *testing.T) { t.Parallel() re := require.New(t) - capacity := int64(10) - s := NewSlidingWindows(float64(capacity)) + capacity := int64(defaultWindowSize) + s := NewSlidingWindows() re.Len(s.windows, int(constant.PriorityLevelLen)) // capacity:[10, 10, 10, 10] for i, v := range s.windows { @@ -61,23 +65,23 @@ func TestSlidingWindow(t *testing.T) { // case 1: it will occupy the normal window size not the core.High window. re.True(s.Take(capacity, SendSnapshot, constant.High)) - re.EqualValues(capacity, s.GetUsed()) + re.EqualValues([]int64{capacity, 0, 0, 0}, s.GetUsed()) re.EqualValues(0, s.windows[constant.High].getUsed()) s.Ack(capacity, SendSnapshot) - re.EqualValues(s.GetUsed(), 0) + re.EqualValues([]int64{0, 0, 0, 0}, s.GetUsed()) // case 2: it will occupy the core.High window size if the normal window is full. capacity = 2000 - s.Reset(float64(capacity), SendSnapshot) + s.set(float64(capacity), SendSnapshot) re.True(s.Take(capacity-minSnapSize, SendSnapshot, constant.Low)) re.True(s.Take(capacity-minSnapSize, SendSnapshot, constant.Low)) re.False(s.Take(capacity, SendSnapshot, constant.Low)) re.True(s.Take(capacity-minSnapSize, SendSnapshot, constant.Medium)) re.False(s.Take(capacity-minSnapSize, SendSnapshot, constant.Medium)) - re.EqualValues(s.GetUsed(), capacity+capacity+capacity-minSnapSize*3) + re.EqualValues([]int64{capacity + capacity - minSnapSize*2, capacity - minSnapSize, 0, 0}, s.GetUsed()) s.Ack(capacity-minSnapSize, SendSnapshot) s.Ack(capacity-minSnapSize, SendSnapshot) - re.Equal(s.GetUsed(), capacity-minSnapSize) + re.Equal([]int64{capacity - minSnapSize, 0, 0, 0}, s.GetUsed()) // case 3: skip the type is not the SendSnapshot for i := 0; i < 10; i++ { @@ -108,3 +112,83 @@ func TestWindow(t *testing.T) { re.EqualValues(s.ack(minSnapSize*2), minSnapSize) re.EqualValues(s.getUsed(), 0) } + +func TestFeedback(t *testing.T) { + s := NewSlidingWindows() + re := require.New(t) + type SnapshotStats struct { + total int64 + remaining int64 + size int64 + start int64 + } + // region size is 10GB, snapshot write limit is 100MB/s and the snapshot concurrency is 3. + // the best strategy is that the tikv executing queue equals the wait. + regionSize, limit, wait := int64(10000), int64(100), int64(4) + iter := 100 + ops := make(chan int64, 10) + ctx, cancel := context.WithCancel(context.Background()) + + // generate the operator + go func() { + for { + if s.Available(regionSize, SendSnapshot, constant.Low) && iter > 0 { + iter-- + size := regionSize - rand.Int63n(regionSize/10) + s.Take(size, SendSnapshot, constant.Low) + ops <- size + } + if iter == 0 { + cancel() + return + } + } + }() + + // receive the operator + queue := list.List{} + interval := time.Microsecond * 100 + ticker := time.NewTicker(interval) + defer ticker.Stop() + + // tick is the time that the snapshot has been executed. + tick := int64(0) + for { + select { + case op := <-ops: + stats := &SnapshotStats{ + total: op / limit, + remaining: op, + size: op, + start: tick, + } + queue.PushBack(stats) + case <-ctx.Done(): + return + case <-ticker.C: + tick++ + first := queue.Front() + if first == nil { + continue + } + stats := first.Value.(*SnapshotStats) + if stats.remaining > 0 { + stats.remaining -= limit + continue + } + cost := tick - stats.start + exec := stats.total + if exec < 5 { + exec = 5 + } + err := exec*wait - cost + queue.Remove(first) + s.Feedback(float64(err)) + if iter < 5 { + re.Greater(float64(s.GetCap()), float64(regionSize*(wait-2))) + re.Less(float64(s.GetCap()), float64(regionSize*wait)) + } + s.Ack(stats.size, SendSnapshot) + } + } +} diff --git a/pkg/core/storelimit/sliding_window.go b/pkg/core/storelimit/sliding_window.go index 5fded60e124..0a70eb548d0 100644 --- a/pkg/core/storelimit/sliding_window.go +++ b/pkg/core/storelimit/sliding_window.go @@ -22,6 +22,11 @@ import ( const ( // minSnapSize is the min value to check the windows has enough size. minSnapSize = 10 + // defaultWindowSize is the default window size. + defaultWindowSize = 100 + + defaultProportion = 20 + defaultIntegral = 10 ) var _ StoreLimit = &SlidingWindows{} @@ -30,30 +35,53 @@ var _ StoreLimit = &SlidingWindows{} type SlidingWindows struct { mu syncutil.RWMutex windows []*window + lastSum float64 } // NewSlidingWindows is the construct of SlidingWindows. -func NewSlidingWindows(cap float64) *SlidingWindows { - if cap < 0 { - cap = minSnapSize - } +func NewSlidingWindows() *SlidingWindows { windows := make([]*window, constant.PriorityLevelLen) for i := 0; i < int(constant.PriorityLevelLen); i++ { - windows[i] = newWindow(int64(cap) >> i) + windows[i] = newWindow(int64(defaultWindowSize) >> i) } return &SlidingWindows{ windows: windows, } } -// Reset resets the capacity of the sliding windows. -// It doesn't clear all the used, only set the capacity. -func (s *SlidingWindows) Reset(cap float64, typ Type) { +// Version returns v2 +func (s *SlidingWindows) Version() string { + return VersionV2 +} + +// Feedback is used to update the capacity of the sliding windows. +func (s *SlidingWindows) Feedback(e float64) { + s.mu.Lock() + defer s.mu.Unlock() + // If the limiter is available, we don't need to update the capacity. + if s.windows[constant.Low].available() { + return + } + s.lastSum += e + // There are two constants to control the proportion of the sum and the current error. + // The sum of the error is used to ensure the capacity is more stable even if the error is zero. + // In the final scene, the sum of the error should be stable and the current error should be zero. + cap := defaultProportion*e + defaultIntegral*s.lastSum + // The capacity should be at least the default window size. + if cap < defaultWindowSize { + cap = defaultWindowSize + } + s.set(cap, SendSnapshot) +} + +// Reset does nothing because the capacity depends on the feedback. +func (s *SlidingWindows) Reset(_ float64, _ Type) { +} + +func (s *SlidingWindows) set(cap float64, typ Type) { if typ != SendSnapshot { return } - s.mu.Lock() - defer s.mu.Unlock() if cap < 0 { cap = minSnapSize } @@ -62,13 +90,20 @@ func (s *SlidingWindows) Reset(cap float64, typ Type) { } } +// GetCap returns the capacity of the sliding windows. +func (s *SlidingWindows) GetCap() int64 { + s.mu.RLock() + defer s.mu.RUnlock() + return s.windows[0].capacity +} + // GetUsed returns the used size in the sliding windows. -func (s *SlidingWindows) GetUsed() int64 { +func (s *SlidingWindows) GetUsed() []int64 { s.mu.RLock() defer s.mu.RUnlock() - used := int64(0) - for _, v := range s.windows { - used += v.getUsed() + used := make([]int64, len(s.windows)) + for i, v := range s.windows { + used[i] = v.getUsed() } return used } diff --git a/pkg/core/storelimit/store_limit.go b/pkg/core/storelimit/store_limit.go index 5de17aecb30..79dc07245a2 100644 --- a/pkg/core/storelimit/store_limit.go +++ b/pkg/core/storelimit/store_limit.go @@ -84,6 +84,14 @@ func NewStoreRateLimit(ratePerSec float64) StoreLimit { // Ack does nothing. func (l *StoreRateLimit) Ack(_ int64, _ Type) {} +// Version returns v1 +func (l *StoreRateLimit) Version() string { + return VersionV1 +} + +// Feedback does nothing. +func (l *StoreRateLimit) Feedback(_ float64) {} + // Available returns the number of available tokens. // notice that the priority level is not used. func (l *StoreRateLimit) Available(cost int64, typ Type, _ constant.PriorityLevel) bool { diff --git a/pkg/schedule/config/config.go b/pkg/schedule/config/config.go index 47900511902..06ac209d57a 100644 --- a/pkg/schedule/config/config.go +++ b/pkg/schedule/config/config.go @@ -80,6 +80,7 @@ type Config interface { CheckLabelProperty(string, []*metapb.StoreLabel) bool IsDebugMetricsEnabled() bool GetClusterVersion() *semver.Version + GetStoreLimitVersion() string // for test purpose SetPlacementRuleEnabled(bool) SetSplitMergeInterval(time.Duration) diff --git a/pkg/schedule/filter/filters_test.go b/pkg/schedule/filter/filters_test.go index bdf88177c42..bd727f5b463 100644 --- a/pkg/schedule/filter/filters_test.go +++ b/pkg/schedule/filter/filters_test.go @@ -152,9 +152,8 @@ func TestRuleFitFilter(t *testing.T) { func TestSendStateFilter(t *testing.T) { re := require.New(t) - store := core.NewStoreInfoWithLabel(1, map[string]string{}).Clone(core.SetStoreLimit(storelimit.NewSlidingWindows(1000))) + store := core.NewStoreInfoWithLabel(1, map[string]string{}).Clone(core.SetStoreLimit(storelimit.NewSlidingWindows())) region := core.NewTestRegionInfo(1, 1, []byte(""), []byte("")) - snapshotFilter := NewSnapshotSendFilter([]*core.StoreInfo{store}, constant.Medium) re.NotNil(SelectOneRegion([]*core.RegionInfo{region}, nil, snapshotFilter)) re.True(store.GetStoreLimit().Take(1000, storelimit.SendSnapshot, constant.Medium)) diff --git a/pkg/schedule/operator/influence.go b/pkg/schedule/operator/influence.go index fed259d99f0..2b42ffa3516 100644 --- a/pkg/schedule/operator/influence.go +++ b/pkg/schedule/operator/influence.go @@ -33,7 +33,7 @@ func NewOpInfluence() *OpInfluence { } // Add adds another influence. -func (m OpInfluence) Add(other *OpInfluence) { +func (m *OpInfluence) Add(other *OpInfluence) { for id, v := range other.StoresInfluence { m.GetStoreInfluence(id).add(v) } diff --git a/pkg/statistics/store_collection.go b/pkg/statistics/store_collection.go index 98f6832c05c..bea2df04381 100644 --- a/pkg/statistics/store_collection.go +++ b/pkg/statistics/store_collection.go @@ -20,6 +20,8 @@ import ( "github.com/pingcap/kvproto/pkg/metapb" "github.com/tikv/pd/pkg/core" + "github.com/tikv/pd/pkg/core/constant" + "github.com/tikv/pd/pkg/core/storelimit" "github.com/tikv/pd/server/config" ) @@ -111,6 +113,16 @@ func (s *storeStatistics) Observe(store *core.StoreInfo, stats *StoresStats) { s.RegionCount += store.GetRegionCount() s.LeaderCount += store.GetLeaderCount() s.WitnessCount += store.GetWitnessCount() + limit, ok := store.GetStoreLimit().(*storelimit.SlidingWindows) + if ok { + cap := limit.GetCap() + storeStatusGauge.WithLabelValues(storeAddress, id, "windows_size").Set(float64(cap)) + for i, use := range limit.GetUsed() { + priority := constant.PriorityLevel(i).String() + storeStatusGauge.WithLabelValues(storeAddress, id, "windows_used_level_"+priority).Set(float64(use)) + } + } + // TODO: pre-allocate gauge metrics storeStatusGauge.WithLabelValues(storeAddress, id, "region_score").Set(store.RegionScore(s.opt.GetRegionScoreFormulaVersion(), s.opt.GetHighSpaceRatio(), s.opt.GetLowSpaceRatio(), 0)) storeStatusGauge.WithLabelValues(storeAddress, id, "leader_score").Set(store.LeaderScore(s.opt.GetLeaderSchedulePolicy(), 0)) diff --git a/server/cluster/cluster.go b/server/cluster/cluster.go index 92ad05176ab..3fcd3efdf28 100644 --- a/server/cluster/cluster.go +++ b/server/cluster/cluster.go @@ -95,6 +95,10 @@ const ( removingAction = "removing" preparingAction = "preparing" gcTunerCheckCfgInterval = 10 * time.Second + + // minSnapshotDurationSec is the minimum duration that a store can tolerate. + // It should enlarge the limiter if the snapshot's duration is less than this value. + minSnapshotDurationSec = 5 ) // Server is the interface for cluster. @@ -848,17 +852,29 @@ func (c *RaftCluster) HandleStoreHeartbeat(heartbeat *pdpb.StoreHeartbeatRequest return errors.Errorf("store %v not found", storeID) } + limit := store.GetStoreLimit() + version := c.opt.GetStoreLimitVersion() + var opt core.StoreCreateOption + if limit == nil || limit.Version() != version { + if version == storelimit.VersionV2 { + limit = storelimit.NewSlidingWindows() + } else { + limit = storelimit.NewStoreRateLimit(0.0) + } + opt = core.SetStoreLimit(limit) + } + nowTime := time.Now() var newStore *core.StoreInfo // If this cluster has slow stores, we should awaken hibernated regions in other stores. if needAwaken, slowStoreIDs := c.NeedAwakenAllRegionsInStore(storeID); needAwaken { log.Info("forcely awaken hibernated regions", zap.Uint64("store-id", storeID), zap.Uint64s("slow-stores", slowStoreIDs)) - newStore = store.Clone(core.SetStoreStats(stats), core.SetLastHeartbeatTS(nowTime), core.SetLastAwakenTime(nowTime)) + newStore = store.Clone(core.SetStoreStats(stats), core.SetLastHeartbeatTS(nowTime), core.SetLastAwakenTime(nowTime), opt) resp.AwakenRegions = &pdpb.AwakenRegions{ AbnormalStores: slowStoreIDs, } } else { - newStore = store.Clone(core.SetStoreStats(stats), core.SetLastHeartbeatTS(nowTime)) + newStore = store.Clone(core.SetStoreStats(stats), core.SetLastHeartbeatTS(nowTime), opt) } if newStore.IsLowSpace(c.opt.GetLowSpaceRatio()) { @@ -919,7 +935,24 @@ func (c *RaftCluster) HandleStoreHeartbeat(heartbeat *pdpb.StoreHeartbeatRequest peerInfo := core.NewPeerInfo(peer, loads, interval) c.hotStat.CheckReadAsync(statistics.NewCheckPeerTask(peerInfo, region)) } - + for _, stat := range stats.GetSnapshotStats() { + // the duration of snapshot is the sum between to send and generate snapshot. + // notice: to enlarge the limit in time, we reset the executing duration when it less than the minSnapshotDurationSec. + dur := stat.GetSendDurationSec() + stat.GetGenerateDurationSec() + if dur < minSnapshotDurationSec { + dur = minSnapshotDurationSec + } + // This error is the diff between the executing duration and the waiting duration. + // The waiting duration is the total duration minus the executing duration. + // so e=executing_duration-waiting_duration=executing_duration-(total_duration-executing_duration)=2*executing_duration-total_duration + // Eg: the total duration is 20s, the executing duration is 10s, the error is 0s. + // Eg: the total duration is 20s, the executing duration is 8s, the error is -4s. + // Eg: the total duration is 10s, the executing duration is 12s, the error is 4s. + // if error is positive, it means the most time cost in executing, pd should send more snapshot to this tikv. + // if error is negative, it means the most time cost in waiting, pd should send less snapshot to this tikv. + e := int64(dur)*2 - int64(stat.GetTotalDurationSec()) + store.Feedback(float64(e)) + } // Here we will compare the reported regions with the previous hot peers to decide if it is still hot. c.hotStat.CheckReadAsync(statistics.NewCollectUnReportedPeerTask(storeID, regions, interval)) return nil diff --git a/server/cluster/cluster_test.go b/server/cluster/cluster_test.go index e6165eac07c..5f84282e554 100644 --- a/server/cluster/cluster_test.go +++ b/server/cluster/cluster_test.go @@ -50,6 +50,7 @@ func TestStoreHeartbeat(t *testing.T) { defer cancel() _, opt, err := newTestScheduleConfig() + opt.GetScheduleConfig().StoreLimitVersion = "v2" re.NoError(err) cluster := newTestRaftCluster(ctx, mockid.NewIDAllocator(), opt, storage.NewStorageWithMemoryBackend(), core.NewBasicCluster()) @@ -84,6 +85,7 @@ func TestStoreHeartbeat(t *testing.T) { s := cluster.GetStore(store.GetID()) re.NotEqual(int64(0), s.GetLastHeartbeatTS().UnixNano()) re.Equal(req.GetStats(), s.GetStoreStats()) + re.Equal("v2", cluster.GetStore(1).GetStoreLimit().Version()) storeMetasAfterHeartbeat = append(storeMetasAfterHeartbeat, s.GetMeta()) } @@ -129,9 +131,11 @@ func TestStoreHeartbeat(t *testing.T) { }, PeerStats: []*pdpb.PeerStat{}, } + cluster.opt.GetScheduleConfig().StoreLimitVersion = "v1" re.NoError(cluster.HandleStoreHeartbeat(hotReq, hotResp)) re.NoError(cluster.HandleStoreHeartbeat(hotReq, hotResp)) re.NoError(cluster.HandleStoreHeartbeat(hotReq, hotResp)) + re.Equal("v1", cluster.GetStore(1).GetStoreLimit().Version()) time.Sleep(20 * time.Millisecond) storeStats := cluster.hotStat.RegionStats(statistics.Read, 3) re.Len(storeStats[1], 1) diff --git a/server/config/config.go b/server/config/config.go index 420222c2f23..92042c73187 100644 --- a/server/config/config.go +++ b/server/config/config.go @@ -672,6 +672,11 @@ type ScheduleConfig struct { // SlowStoreEvictingAffectedStoreRatioThreshold is the affected ratio threshold when judging a store is slow // A store's slowness must affected more than `store-count * SlowStoreEvictingAffectedStoreRatioThreshold` to trigger evicting. SlowStoreEvictingAffectedStoreRatioThreshold float64 `toml:"slow-store-evicting-affected-store-ratio-threshold" json:"slow-store-evicting-affected-store-ratio-threshold,omitempty"` + + // StoreLimitVersion is the version of store limit. + // v1: which is based on the region count by rate limit. + // v2: which is based on region size by window size. + StoreLimitVersion string `toml:"store-limit-version" json:"store-limit-version,omitempty"` } // Clone returns a cloned scheduling configuration. @@ -726,6 +731,8 @@ const ( defaultMaxStorePreparingTime = 48 * time.Hour // When a slow store affected more than 30% of total stores, it will trigger evicting. defaultSlowStoreEvictingAffectedStoreRatioThreshold = 0.3 + + defaultStoreLimitVersion = "v1" ) func (c *ScheduleConfig) adjust(meta *configutil.ConfigMetaData, reloading bool) error { @@ -777,6 +784,11 @@ func (c *ScheduleConfig) adjust(meta *configutil.ConfigMetaData, reloading bool) if !meta.IsDefined("store-limit-mode") { configutil.AdjustString(&c.StoreLimitMode, defaultStoreLimitMode) } + + if !meta.IsDefined("store-limit-version") { + configutil.AdjustString(&c.StoreLimitVersion, defaultStoreLimitVersion) + } + if !meta.IsDefined("enable-joint-consensus") { c.EnableJointConsensus = defaultEnableJointConsensus } diff --git a/server/config/persist_options.go b/server/config/persist_options.go index bc4d9e53d6c..c01c79a3d46 100644 --- a/server/config/persist_options.go +++ b/server/config/persist_options.go @@ -510,6 +510,11 @@ func (o *PersistOptions) GetStoreLimitMode() string { return o.GetScheduleConfig().StoreLimitMode } +// GetStoreLimitVersion returns the limit version of store. +func (o *PersistOptions) GetStoreLimitVersion() string { + return o.GetScheduleConfig().StoreLimitVersion +} + // GetTolerantSizeRatio gets the tolerant size ratio. func (o *PersistOptions) GetTolerantSizeRatio() float64 { return o.GetScheduleConfig().TolerantSizeRatio diff --git a/tests/pdctl/config/config_test.go b/tests/pdctl/config/config_test.go index 8995d71493b..9634737e18b 100644 --- a/tests/pdctl/config/config_test.go +++ b/tests/pdctl/config/config_test.go @@ -136,6 +136,16 @@ func TestConfig(t *testing.T) { re.Equal(20*10000, int(svr.GetScheduleConfig().MaxMergeRegionKeys)) re.Equal(20*10000, int(svr.GetScheduleConfig().GetMaxMergeRegionKeys())) + // set store limit v2 + args = []string{"-u", pdAddr, "config", "set", "store-limit-version", "v2"} + _, err = pdctl.ExecuteCommand(cmd, args...) + re.NoError(err) + re.Equal("v2", svr.GetScheduleConfig().StoreLimitVersion) + args = []string{"-u", pdAddr, "config", "set", "store-limit-version", "v1"} + _, err = pdctl.ExecuteCommand(cmd, args...) + re.NoError(err) + re.Equal("v1", svr.GetScheduleConfig().StoreLimitVersion) + // config show replication args = []string{"-u", pdAddr, "config", "show", "replication"} output, err = pdctl.ExecuteCommand(cmd, args...)