Skip to content

Commit

Permalink
config: refactor StoreConfig into PersistOptions to ensure the persis…
Browse files Browse the repository at this point in the history
…tence (#6902)

ref #5839

`StoreConfig` is needed by the scheduling service. To ensure it could be watched by the latter, this PR:

- Refactor `StoreConfig` into `PersistOptions`.
- Remove `StoreConfigManager`.
- Persist the store config after each sync.

Signed-off-by: JmPotato <ghzpotato@gmail.com>
  • Loading branch information
JmPotato authored Aug 8, 2023
1 parent 2764bb2 commit c088cd1
Show file tree
Hide file tree
Showing 12 changed files with 326 additions and 364 deletions.
2 changes: 1 addition & 1 deletion pkg/mock/mockcluster/config.go
Original file line number Diff line number Diff line change
Expand Up @@ -182,7 +182,7 @@ func (mc *Cluster) SetRegionSizeMB(v uint64) {
}

func (mc *Cluster) updateStoreConfig(f func(*config.StoreConfig)) {
r := mc.StoreConfigManager.GetStoreConfig().Clone()
r := mc.PersistOptions.GetStoreConfig().Clone()
f(r)
mc.SetStoreConfig(r)
}
20 changes: 9 additions & 11 deletions pkg/mock/mockcluster/mockcluster.go
Original file line number Diff line number Diff line change
Expand Up @@ -57,23 +57,21 @@ type Cluster struct {
*config.PersistOptions
ID uint64
suspectRegions map[uint64]struct{}
*config.StoreConfigManager
*buckets.HotBucketCache
storage.Storage
}

// NewCluster creates a new Cluster
func NewCluster(ctx context.Context, opts *config.PersistOptions) *Cluster {
c := &Cluster{
ctx: ctx,
BasicCluster: core.NewBasicCluster(),
IDAllocator: mockid.NewIDAllocator(),
HotStat: statistics.NewHotStat(ctx),
HotBucketCache: buckets.NewBucketsCache(ctx),
PersistOptions: opts,
suspectRegions: map[uint64]struct{}{},
StoreConfigManager: config.NewTestStoreConfigManager(nil),
Storage: storage.NewStorageWithMemoryBackend(),
ctx: ctx,
BasicCluster: core.NewBasicCluster(),
IDAllocator: mockid.NewIDAllocator(),
HotStat: statistics.NewHotStat(ctx),
HotBucketCache: buckets.NewBucketsCache(ctx),
PersistOptions: opts,
suspectRegions: map[uint64]struct{}{},
Storage: storage.NewStorageWithMemoryBackend(),
}
if c.PersistOptions.GetReplicationConfig().EnablePlacementRules {
c.initRuleManager()
Expand All @@ -86,7 +84,7 @@ func NewCluster(ctx context.Context, opts *config.PersistOptions) *Cluster {

// GetStoreConfig returns the store config.
func (mc *Cluster) GetStoreConfig() sc.StoreConfigProvider {
return mc.StoreConfigManager.GetStoreConfig()
return mc
}

// GetCheckerConfig returns the checker config.
Expand Down
4 changes: 3 additions & 1 deletion pkg/schedule/config/config_provider.go
Original file line number Diff line number Diff line change
Expand Up @@ -73,6 +73,7 @@ type SchedulerConfigProvider interface {
// CheckerConfigProvider is the interface for checker configurations.
type CheckerConfigProvider interface {
SharedConfigProvider
StoreConfigProvider

GetSwitchWitnessInterval() time.Duration
IsRemoveExtraReplicaEnabled() bool
Expand Down Expand Up @@ -120,6 +121,7 @@ type SharedConfigProvider interface {
type ConfProvider interface {
SchedulerConfigProvider
CheckerConfigProvider
StoreConfigProvider
// for test purpose
SetPlacementRuleEnabled(bool)
SetSplitMergeInterval(time.Duration)
Expand All @@ -132,10 +134,10 @@ type ConfProvider interface {
// StoreConfigProvider is the interface that wraps the StoreConfigProvider related methods.
type StoreConfigProvider interface {
GetRegionMaxSize() uint64
GetRegionMaxKeys() uint64
CheckRegionSize(uint64, uint64) error
CheckRegionKeys(uint64, uint64) error
IsEnableRegionBucket() bool
IsRaftKV2() bool
// for test purpose
SetRegionBucketEnabled(bool)
}
30 changes: 13 additions & 17 deletions pkg/statistics/region_collection.go
Original file line number Diff line number Diff line change
Expand Up @@ -22,7 +22,6 @@ import (
"github.com/tikv/pd/pkg/core"
sc "github.com/tikv/pd/pkg/schedule/config"
"github.com/tikv/pd/pkg/schedule/placement"
"github.com/tikv/pd/server/config"
)

// RegionInfoProvider is an interface to provide the region information.
Expand Down Expand Up @@ -87,28 +86,25 @@ type RegionInfoWithTS struct {
// RegionStatistics is used to record the status of regions.
type RegionStatistics struct {
sync.RWMutex
rip RegionInfoProvider
conf sc.CheckerConfigProvider
stats map[RegionStatisticType]map[uint64]*RegionInfoWithTS
index map[uint64]RegionStatisticType
ruleManager *placement.RuleManager
storeConfigManager *config.StoreConfigManager
rip RegionInfoProvider
conf sc.CheckerConfigProvider
stats map[RegionStatisticType]map[uint64]*RegionInfoWithTS
index map[uint64]RegionStatisticType
ruleManager *placement.RuleManager
}

// NewRegionStatistics creates a new RegionStatistics.
func NewRegionStatistics(
rip RegionInfoProvider,
conf sc.CheckerConfigProvider,
ruleManager *placement.RuleManager,
storeConfigManager *config.StoreConfigManager,
) *RegionStatistics {
r := &RegionStatistics{
rip: rip,
conf: conf,
ruleManager: ruleManager,
storeConfigManager: storeConfigManager,
stats: make(map[RegionStatisticType]map[uint64]*RegionInfoWithTS),
index: make(map[uint64]RegionStatisticType),
rip: rip,
conf: conf,
ruleManager: ruleManager,
stats: make(map[RegionStatisticType]map[uint64]*RegionInfoWithTS),
index: make(map[uint64]RegionStatisticType),
}
for _, typ := range regionStatisticTypes {
r.stats[typ] = make(map[uint64]*RegionInfoWithTS)
Expand Down Expand Up @@ -149,7 +145,7 @@ func (r *RegionStatistics) deleteEntry(deleteIndex RegionStatisticType, regionID
func (r *RegionStatistics) RegionStatsNeedUpdate(region *core.RegionInfo) bool {
regionID := region.GetID()
if r.IsRegionStatsType(regionID, OversizedRegion) !=
region.IsOversized(int64(r.storeConfigManager.GetStoreConfig().GetRegionMaxSize()), int64(r.storeConfigManager.GetStoreConfig().GetRegionMaxKeys())) {
region.IsOversized(int64(r.conf.GetRegionMaxSize()), int64(r.conf.GetRegionMaxKeys())) {
return true
}
return r.IsRegionStatsType(regionID, UndersizedRegion) !=
Expand Down Expand Up @@ -204,8 +200,8 @@ func (r *RegionStatistics) Observe(region *core.RegionInfo, stores []*core.Store
LearnerPeer: len(region.GetLearners()) > 0,
EmptyRegion: region.GetApproximateSize() <= core.EmptyRegionApproximateSize,
OversizedRegion: region.IsOversized(
int64(r.storeConfigManager.GetStoreConfig().GetRegionMaxSize()),
int64(r.storeConfigManager.GetStoreConfig().GetRegionMaxKeys()),
int64(r.conf.GetRegionMaxSize()),
int64(r.conf.GetRegionMaxKeys()),
),
UndersizedRegion: region.NeedMerge(
int64(r.conf.GetMaxMergeRegionSize()),
Expand Down
4 changes: 2 additions & 2 deletions pkg/statistics/region_collection_test.go
Original file line number Diff line number Diff line change
Expand Up @@ -65,7 +65,7 @@ func TestRegionStatistics(t *testing.T) {
r2 := &metapb.Region{Id: 2, Peers: peers[0:2], StartKey: []byte("cc"), EndKey: []byte("dd")}
region1 := core.NewRegionInfo(r1, peers[0])
region2 := core.NewRegionInfo(r2, peers[0])
regionStats := NewRegionStatistics(nil, opt, manager, nil)
regionStats := NewRegionStatistics(nil, opt, manager)
regionStats.Observe(region1, stores)
re.Len(regionStats.stats[ExtraPeer], 1)
re.Len(regionStats.stats[LearnerPeer], 1)
Expand Down Expand Up @@ -150,7 +150,7 @@ func TestRegionStatisticsWithPlacementRule(t *testing.T) {
region3 := core.NewRegionInfo(r3, peers[0])
region4 := core.NewRegionInfo(r4, peers[0])
region5 := core.NewRegionInfo(r5, peers[4])
regionStats := NewRegionStatistics(nil, opt, manager, nil)
regionStats := NewRegionStatistics(nil, opt, manager)
// r2 didn't match the rules
regionStats.Observe(region2, stores)
re.Len(regionStats.stats[MissPeer], 1)
Expand Down
17 changes: 8 additions & 9 deletions pkg/statistics/store_collection.go
Original file line number Diff line number Diff line change
Expand Up @@ -33,7 +33,6 @@ const (

type storeStatistics struct {
opt *config.PersistOptions
storeConfig *config.StoreConfig
Up int
Disconnect int
Unhealthy int
Expand All @@ -55,10 +54,9 @@ type storeStatistics struct {
Removed int
}

func newStoreStatistics(opt *config.PersistOptions, storeConfig *config.StoreConfig) *storeStatistics {
func newStoreStatistics(opt *config.PersistOptions) *storeStatistics {
return &storeStatistics{
opt: opt,
storeConfig: storeConfig,
LabelCounter: make(map[string]int),
}
}
Expand Down Expand Up @@ -220,10 +218,11 @@ func (s *storeStatistics) Collect() {
configs["max-snapshot-count"] = float64(s.opt.GetMaxSnapshotCount())
configs["max-merge-region-size"] = float64(s.opt.GetMaxMergeRegionSize())
configs["max-merge-region-keys"] = float64(s.opt.GetMaxMergeRegionKeys())
configs["region-max-size"] = float64(s.storeConfig.GetRegionMaxSize())
configs["region-split-size"] = float64(s.storeConfig.GetRegionSplitSize())
configs["region-split-keys"] = float64(s.storeConfig.GetRegionSplitKeys())
configs["region-max-keys"] = float64(s.storeConfig.GetRegionMaxKeys())
storeConfig := s.opt.GetStoreConfig()
configs["region-max-size"] = float64(storeConfig.GetRegionMaxSize())
configs["region-split-size"] = float64(storeConfig.GetRegionSplitSize())
configs["region-split-keys"] = float64(storeConfig.GetRegionSplitKeys())
configs["region-max-keys"] = float64(storeConfig.GetRegionMaxKeys())

var enableMakeUpReplica, enableRemoveDownReplica, enableRemoveExtraReplica, enableReplaceOfflineReplica float64
if s.opt.IsMakeUpReplicaEnabled() {
Expand Down Expand Up @@ -292,10 +291,10 @@ type storeStatisticsMap struct {
}

// NewStoreStatisticsMap creates a new storeStatisticsMap.
func NewStoreStatisticsMap(opt *config.PersistOptions, storeConfig *config.StoreConfig) *storeStatisticsMap {
func NewStoreStatisticsMap(opt *config.PersistOptions) *storeStatisticsMap {
return &storeStatisticsMap{
opt: opt,
stats: newStoreStatistics(opt, storeConfig),
stats: newStoreStatistics(opt),
}
}

Expand Down
2 changes: 1 addition & 1 deletion pkg/statistics/store_collection_test.go
Original file line number Diff line number Diff line change
Expand Up @@ -65,7 +65,7 @@ func TestStoreStatistics(t *testing.T) {
UsedSize: 0,
}))
stores[5] = store5
storeStats := NewStoreStatisticsMap(opt, nil)
storeStats := NewStoreStatisticsMap(opt)
for _, store := range stores {
storeStats.Observe(store, storesStats)
}
Expand Down
Loading

0 comments on commit c088cd1

Please sign in to comment.