diff --git a/.github/CODEOWNERS b/.github/CODEOWNERS index f2a8157d0c4..4c952e2f1ea 100644 --- a/.github/CODEOWNERS +++ b/.github/CODEOWNERS @@ -1,5 +1,5 @@ # Require review from domain experts when the PR modified significant config files. /server/config/config.go @tikv/pd-configuration-reviewer -/server/schedulers/hot_region_config.go @tikv/pd-configuration-reviewer +/server/schedule/schedulers/hot_region_config.go @tikv/pd-configuration-reviewer /conf/config.toml @tikv/pd-configuration-reviewer /metrics/grafana/pd.json @tikv/pd-configuration-reviewer diff --git a/cmd/pd-server/main.go b/cmd/pd-server/main.go index f655166a177..5d83791c65a 100644 --- a/cmd/pd-server/main.go +++ b/cmd/pd-server/main.go @@ -38,7 +38,7 @@ import ( "github.com/tikv/pd/server/apiv2" "github.com/tikv/pd/server/config" "github.com/tikv/pd/server/join" - "github.com/tikv/pd/server/schedulers" + "github.com/tikv/pd/server/schedule/schedulers" "go.uber.org/zap" ) diff --git a/pkg/autoscaling/calculation_test.go b/pkg/autoscaling/calculation_test.go index a40d81a2042..de3be68d68c 100644 --- a/pkg/autoscaling/calculation_test.go +++ b/pkg/autoscaling/calculation_test.go @@ -25,7 +25,7 @@ import ( "github.com/stretchr/testify/require" "github.com/tikv/pd/pkg/core" "github.com/tikv/pd/pkg/mock/mockcluster" - "github.com/tikv/pd/server/config" + "github.com/tikv/pd/pkg/mock/mockconfig" ) func TestGetScaledTiKVGroups(t *testing.T) { @@ -34,7 +34,7 @@ func TestGetScaledTiKVGroups(t *testing.T) { ctx, cancel := context.WithCancel(context.Background()) defer cancel() // case1 indicates the tikv cluster with not any group existed - case1 := mockcluster.NewCluster(ctx, config.NewTestOptions()) + case1 := mockcluster.NewCluster(ctx, mockconfig.NewTestOptions()) case1.AddLabelsStore(1, 1, map[string]string{}) case1.AddLabelsStore(2, 1, map[string]string{ "foo": "bar", @@ -44,7 +44,7 @@ func TestGetScaledTiKVGroups(t *testing.T) { }) // case2 indicates the tikv cluster with 1 auto-scaling group existed - case2 := mockcluster.NewCluster(ctx, config.NewTestOptions()) + case2 := mockcluster.NewCluster(ctx, mockconfig.NewTestOptions()) case2.AddLabelsStore(1, 1, map[string]string{}) case2.AddLabelsStore(2, 1, map[string]string{ groupLabelKey: fmt.Sprintf("%s-%s-0", autoScalingGroupLabelKeyPrefix, TiKV.String()), @@ -56,7 +56,7 @@ func TestGetScaledTiKVGroups(t *testing.T) { }) // case3 indicates the tikv cluster with other group existed - case3 := mockcluster.NewCluster(ctx, config.NewTestOptions()) + case3 := mockcluster.NewCluster(ctx, mockconfig.NewTestOptions()) case3.AddLabelsStore(1, 1, map[string]string{}) case3.AddLabelsStore(2, 1, map[string]string{ groupLabelKey: "foo", @@ -331,7 +331,7 @@ func TestStrategyChangeCount(t *testing.T) { // tikv cluster with 1 auto-scaling group existed ctx, cancel := context.WithCancel(context.Background()) defer cancel() - cluster := mockcluster.NewCluster(ctx, config.NewTestOptions()) + cluster := mockcluster.NewCluster(ctx, mockconfig.NewTestOptions()) cluster.AddLabelsStore(1, 1, map[string]string{}) cluster.AddLabelsStore(2, 1, map[string]string{ groupLabelKey: fmt.Sprintf("%s-%s-0", autoScalingGroupLabelKeyPrefix, TiKV.String()), diff --git a/pkg/mock/mockcluster/config.go b/pkg/mock/mockcluster/config.go index 1f85a66507c..ae37b8b3c05 100644 --- a/pkg/mock/mockcluster/config.go +++ b/pkg/mock/mockcluster/config.go @@ -169,3 +169,19 @@ func (mc *Cluster) SetMaxReplicasWithLabel(enablePlacementRules bool, num int, l mc.SetLocationLabels(labels) } } + +// SetRegionMaxSize sets the region max size. +func (mc *Cluster) SetRegionMaxSize(v string) { + mc.updateStoreConfig(func(r *config.StoreConfig) { r.RegionMaxSize = v }) +} + +// SetRegionSizeMB sets the region max size. +func (mc *Cluster) SetRegionSizeMB(v uint64) { + mc.updateStoreConfig(func(r *config.StoreConfig) { r.RegionMaxSizeMB = v }) +} + +func (mc *Cluster) updateStoreConfig(f func(*config.StoreConfig)) { + r := mc.StoreConfigManager.GetStoreConfig().Clone() + f(r) + mc.SetStoreConfig(r) +} diff --git a/pkg/mock/mockcluster/mockcluster.go b/pkg/mock/mockcluster/mockcluster.go index f03e99bbd32..a07713331fe 100644 --- a/pkg/mock/mockcluster/mockcluster.go +++ b/pkg/mock/mockcluster/mockcluster.go @@ -33,6 +33,7 @@ import ( "github.com/tikv/pd/pkg/utils/typeutil" "github.com/tikv/pd/pkg/versioninfo" "github.com/tikv/pd/server/config" + sc "github.com/tikv/pd/server/schedule/config" "github.com/tikv/pd/server/schedule/labeler" "github.com/tikv/pd/server/schedule/placement" "github.com/tikv/pd/server/statistics" @@ -81,12 +82,12 @@ func NewCluster(ctx context.Context, opts *config.PersistOptions) *Cluster { } // GetStoreConfig returns the store config. -func (mc *Cluster) GetStoreConfig() *config.StoreConfig { +func (mc *Cluster) GetStoreConfig() sc.StoreConfig { return mc.StoreConfigManager.GetStoreConfig() } // GetOpts returns the cluster configuration. -func (mc *Cluster) GetOpts() *config.PersistOptions { +func (mc *Cluster) GetOpts() sc.Config { return mc.PersistOptions } diff --git a/pkg/mock/mockconfig/mockconfig.go b/pkg/mock/mockconfig/mockconfig.go new file mode 100644 index 00000000000..b373fe02ad7 --- /dev/null +++ b/pkg/mock/mockconfig/mockconfig.go @@ -0,0 +1,31 @@ +// Copyright 2023 TiKV Project Authors. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package mockconfig + +import ( + "github.com/tikv/pd/server/config" + sc "github.com/tikv/pd/server/schedule/config" +) + +// NewTestOptions creates default options for testing. +func NewTestOptions() *config.PersistOptions { + // register default schedulers in case config check fail. + for _, d := range config.DefaultSchedulers { + sc.RegisterScheduler(d.Type) + } + c := config.NewConfig() + c.Adjust(nil, false) + return config.NewPersistOptions(c) +} diff --git a/pkg/mock/mockhbstream/mockhbstream_test.go b/pkg/mock/mockhbstream/mockhbstream_test.go index 49c631fdeb2..d9a83cdc3d4 100644 --- a/pkg/mock/mockhbstream/mockhbstream_test.go +++ b/pkg/mock/mockhbstream/mockhbstream_test.go @@ -24,9 +24,9 @@ import ( "github.com/stretchr/testify/require" "github.com/tikv/pd/pkg/core" "github.com/tikv/pd/pkg/mock/mockcluster" + "github.com/tikv/pd/pkg/mock/mockconfig" "github.com/tikv/pd/pkg/utils/testutil" "github.com/tikv/pd/pkg/utils/typeutil" - "github.com/tikv/pd/server/config" "github.com/tikv/pd/server/schedule/hbstream" ) @@ -36,7 +36,7 @@ func TestActivity(t *testing.T) { ctx, cancel := context.WithCancel(context.Background()) defer cancel() - cluster := mockcluster.NewCluster(ctx, config.NewTestOptions()) + cluster := mockcluster.NewCluster(ctx, mockconfig.NewTestOptions()) cluster.AddRegionStore(1, 1) cluster.AddRegionStore(2, 0) cluster.AddLeaderRegion(1, 1) diff --git a/plugin/scheduler_example/evict_leader.go b/plugin/scheduler_example/evict_leader.go index 5960ff99901..9c198233e62 100644 --- a/plugin/scheduler_example/evict_leader.go +++ b/plugin/scheduler_example/evict_leader.go @@ -31,7 +31,7 @@ import ( "github.com/tikv/pd/server/schedule/filter" "github.com/tikv/pd/server/schedule/operator" "github.com/tikv/pd/server/schedule/plan" - "github.com/tikv/pd/server/schedulers" + "github.com/tikv/pd/server/schedule/schedulers" "github.com/unrolled/render" ) diff --git a/server/api/diagnostic_test.go b/server/api/diagnostic_test.go index fe1dfce04a4..13d078a692c 100644 --- a/server/api/diagnostic_test.go +++ b/server/api/diagnostic_test.go @@ -28,7 +28,7 @@ import ( "github.com/tikv/pd/server" "github.com/tikv/pd/server/cluster" "github.com/tikv/pd/server/config" - "github.com/tikv/pd/server/schedulers" + "github.com/tikv/pd/server/schedule/schedulers" ) type diagnosticTestSuite struct { diff --git a/server/api/min_resolved_ts.go b/server/api/min_resolved_ts.go index a348877e094..2e03646c809 100644 --- a/server/api/min_resolved_ts.go +++ b/server/api/min_resolved_ts.go @@ -50,7 +50,7 @@ type minResolvedTS struct { func (h *minResolvedTSHandler) GetMinResolvedTS(w http.ResponseWriter, r *http.Request) { c := h.svr.GetRaftCluster() value := c.GetMinResolvedTS() - persistInterval := c.GetOpts().GetPDServerConfig().MinResolvedTSPersistenceInterval + persistInterval := c.GetPDServerConfig().MinResolvedTSPersistenceInterval h.rd.JSON(w, http.StatusOK, minResolvedTS{ MinResolvedTS: value, PersistInterval: persistInterval, diff --git a/server/api/min_resolved_ts_test.go b/server/api/min_resolved_ts_test.go index 96c2a778f46..e0d18ba4989 100644 --- a/server/api/min_resolved_ts_test.go +++ b/server/api/min_resolved_ts_test.go @@ -65,7 +65,7 @@ func (suite *minResolvedTSTestSuite) TearDownSuite() { func (suite *minResolvedTSTestSuite) TestMinResolvedTS() { // case1: default run job - interval := suite.svr.GetRaftCluster().GetOpts().GetPDServerConfig().MinResolvedTSPersistenceInterval + interval := suite.svr.GetRaftCluster().GetPDServerConfig().MinResolvedTSPersistenceInterval suite.checkMinResolvedTS(&minResolvedTS{ MinResolvedTS: 0, IsRealTime: true, @@ -83,7 +83,7 @@ func (suite *minResolvedTSTestSuite) TestMinResolvedTS() { interval = typeutil.Duration{Duration: suite.defaultInterval} suite.setMinResolvedTSPersistenceInterval(interval) suite.Eventually(func() bool { - return interval == suite.svr.GetRaftCluster().GetOpts().GetPDServerConfig().MinResolvedTSPersistenceInterval + return interval == suite.svr.GetRaftCluster().GetPDServerConfig().MinResolvedTSPersistenceInterval }, time.Second*10, time.Millisecond*20) suite.checkMinResolvedTS(&minResolvedTS{ MinResolvedTS: 0, @@ -116,9 +116,9 @@ func (suite *minResolvedTSTestSuite) TestMinResolvedTS() { } func (suite *minResolvedTSTestSuite) setMinResolvedTSPersistenceInterval(duration typeutil.Duration) { - cfg := suite.svr.GetRaftCluster().GetOpts().GetPDServerConfig().Clone() + cfg := suite.svr.GetRaftCluster().GetPDServerConfig().Clone() cfg.MinResolvedTSPersistenceInterval = duration - suite.svr.GetRaftCluster().GetOpts().SetPDServerConfig(cfg) + suite.svr.GetRaftCluster().SetPDServerConfig(cfg) } func (suite *minResolvedTSTestSuite) checkMinResolvedTS(expect *minResolvedTS) { diff --git a/server/api/rule_test.go b/server/api/rule_test.go index 72ae480b5fc..2ae910b028f 100644 --- a/server/api/rule_test.go +++ b/server/api/rule_test.go @@ -966,7 +966,7 @@ func (suite *regionRuleTestSuite) TestRegionPlacementRule() { re, "invalid region id")) suite.NoError(err) - suite.svr.GetRaftCluster().GetOpts().GetReplicationConfig().EnablePlacementRules = false + suite.svr.GetRaftCluster().GetReplicationConfig().EnablePlacementRules = false url = fmt.Sprintf("%s/config/rules/region/%d/detail", suite.urlPrefix, 1) err = tu.CheckGetJSON(testDialClient, url, nil, tu.Status(re, http.StatusPreconditionFailed), tu.StringContain( re, "placement rules feature is disabled")) diff --git a/server/api/scheduler.go b/server/api/scheduler.go index 757af7b9b7e..6aeaede4eee 100644 --- a/server/api/scheduler.go +++ b/server/api/scheduler.go @@ -25,7 +25,7 @@ import ( "github.com/tikv/pd/pkg/errs" "github.com/tikv/pd/pkg/utils/apiutil" "github.com/tikv/pd/server" - "github.com/tikv/pd/server/schedulers" + "github.com/tikv/pd/server/schedule/schedulers" "github.com/unrolled/render" ) diff --git a/server/api/scheduler_test.go b/server/api/scheduler_test.go index 45f33da1a91..ec3e5565f4c 100644 --- a/server/api/scheduler_test.go +++ b/server/api/scheduler_test.go @@ -78,13 +78,13 @@ func (suite *scheduleTestSuite) TestOriginAPI() { input1["store_id"] = 2 body, err = json.Marshal(input1) suite.NoError(err) - suite.NoError(failpoint.Enable("github.com/tikv/pd/server/schedulers/persistFail", "return(true)")) + suite.NoError(failpoint.Enable("github.com/tikv/pd/server/schedule/schedulers/persistFail", "return(true)")) suite.NoError(tu.CheckPostJSON(testDialClient, addURL, body, tu.StatusNotOK(re))) suite.Len(rc.GetSchedulers(), 1) resp = make(map[string]interface{}) suite.NoError(tu.ReadGetJSON(re, testDialClient, listURL, &resp)) suite.Len(resp["store-id-ranges"], 1) - suite.NoError(failpoint.Disable("github.com/tikv/pd/server/schedulers/persistFail")) + suite.NoError(failpoint.Disable("github.com/tikv/pd/server/schedule/schedulers/persistFail")) suite.NoError(tu.CheckPostJSON(testDialClient, addURL, body, tu.StatusOK(re))) suite.Len(rc.GetSchedulers(), 1) resp = make(map[string]interface{}) diff --git a/server/cluster/cluster.go b/server/cluster/cluster.go index 832d5e3111d..30a91593aa4 100644 --- a/server/cluster/cluster.go +++ b/server/cluster/cluster.go @@ -52,10 +52,11 @@ import ( "github.com/tikv/pd/server/replication" "github.com/tikv/pd/server/schedule" "github.com/tikv/pd/server/schedule/checker" + sc "github.com/tikv/pd/server/schedule/config" "github.com/tikv/pd/server/schedule/hbstream" "github.com/tikv/pd/server/schedule/labeler" "github.com/tikv/pd/server/schedule/placement" - "github.com/tikv/pd/server/schedulers" + "github.com/tikv/pd/server/schedule/schedulers" "github.com/tikv/pd/server/statistics" "github.com/tikv/pd/server/statistics/buckets" "go.etcd.io/etcd/clientv3" @@ -178,7 +179,7 @@ func NewRaftCluster(ctx context.Context, clusterID uint64, regionSyncer *syncer. } // GetStoreConfig returns the store config. -func (c *RaftCluster) GetStoreConfig() *config.StoreConfig { +func (c *RaftCluster) GetStoreConfig() sc.StoreConfig { return c.storeConfigManager.GetStoreConfig() } @@ -737,10 +738,35 @@ func (c *RaftCluster) SetStorage(s storage.Storage) { // GetOpts returns cluster's configuration. // There is no need a lock since it won't changed. -func (c *RaftCluster) GetOpts() *config.PersistOptions { +func (c *RaftCluster) GetOpts() sc.Config { return c.opt } +// GetScheduleConfig returns scheduling configurations. +func (c *RaftCluster) GetScheduleConfig() *config.ScheduleConfig { + return c.opt.GetScheduleConfig() +} + +// SetScheduleConfig sets the PD scheduling configuration. +func (c *RaftCluster) SetScheduleConfig(cfg *config.ScheduleConfig) { + c.opt.SetScheduleConfig(cfg) +} + +// GetReplicationConfig returns replication configurations. +func (c *RaftCluster) GetReplicationConfig() *config.ReplicationConfig { + return c.opt.GetReplicationConfig() +} + +// GetPDServerConfig returns pd server configurations. +func (c *RaftCluster) GetPDServerConfig() *config.PDServerConfig { + return c.opt.GetPDServerConfig() +} + +// SetPDServerConfig sets the PD configuration. +func (c *RaftCluster) SetPDServerConfig(cfg *config.PDServerConfig) { + c.opt.SetPDServerConfig(cfg) +} + // AddSuspectRegions adds regions to suspect list. func (c *RaftCluster) AddSuspectRegions(regionIDs ...uint64) { c.coordinator.checkers.AddSuspectRegions(regionIDs...) diff --git a/server/cluster/cluster_test.go b/server/cluster/cluster_test.go index c1d10714d8c..f16f3f34750 100644 --- a/server/cluster/cluster_test.go +++ b/server/cluster/cluster_test.go @@ -40,7 +40,7 @@ import ( "github.com/tikv/pd/server/schedule/filter" "github.com/tikv/pd/server/schedule/labeler" "github.com/tikv/pd/server/schedule/placement" - "github.com/tikv/pd/server/schedulers" + "github.com/tikv/pd/server/schedule/schedulers" "github.com/tikv/pd/server/statistics" ) diff --git a/server/cluster/coordinator.go b/server/cluster/coordinator.go index 3e151c94d36..5378e95d4fb 100644 --- a/server/cluster/coordinator.go +++ b/server/cluster/coordinator.go @@ -38,7 +38,7 @@ import ( "github.com/tikv/pd/server/schedule/hbstream" "github.com/tikv/pd/server/schedule/operator" "github.com/tikv/pd/server/schedule/plan" - "github.com/tikv/pd/server/schedulers" + "github.com/tikv/pd/server/schedule/schedulers" "github.com/tikv/pd/server/statistics" "go.uber.org/zap" ) @@ -93,7 +93,7 @@ func newCoordinator(ctx context.Context, cluster *RaftCluster, hbStreams *hbstre cancel: cancel, cluster: cluster, prepareChecker: newPrepareChecker(), - checkers: checker.NewController(ctx, cluster, cluster.ruleManager, cluster.regionLabeler, opController), + checkers: checker.NewController(ctx, cluster, cluster.opt, cluster.ruleManager, cluster.regionLabeler, opController), regionScatterer: schedule.NewRegionScatterer(ctx, cluster, opController), regionSplitter: schedule.NewRegionSplitter(cluster, schedule.NewSplitRegionsHandler(cluster, opController)), schedulers: schedulers, @@ -768,7 +768,7 @@ func (c *coordinator) isSchedulerDisabled(name string) (bool, error) { return false, errs.ErrSchedulerNotFound.FastGenByArgs() } t := s.GetType() - scheduleConfig := c.cluster.GetOpts().GetScheduleConfig() + scheduleConfig := c.cluster.GetScheduleConfig() for _, s := range scheduleConfig.Schedulers { if t == s.Type { return s.Disable, nil diff --git a/server/cluster/coordinator_test.go b/server/cluster/coordinator_test.go index 5985f0dc0ef..0ce548792ce 100644 --- a/server/cluster/coordinator_test.go +++ b/server/cluster/coordinator_test.go @@ -39,7 +39,7 @@ import ( "github.com/tikv/pd/server/schedule/hbstream" "github.com/tikv/pd/server/schedule/labeler" "github.com/tikv/pd/server/schedule/operator" - "github.com/tikv/pd/server/schedulers" + "github.com/tikv/pd/server/schedule/schedulers" "github.com/tikv/pd/server/statistics" ) @@ -536,10 +536,9 @@ func TestCheckCache(t *testing.T) { re.Len(co.checkers.GetWaitingRegions(), 1) // cancel the replica-schedule-limit restriction - opt := tc.GetOpts() - cfg := opt.GetScheduleConfig() + cfg := tc.GetScheduleConfig() cfg.ReplicaScheduleLimit = 10 - tc.GetOpts().SetScheduleConfig(cfg) + tc.SetScheduleConfig(cfg) co.wg.Add(1) co.patrolRegions() oc := co.opController diff --git a/server/cluster/diagnostic_manager.go b/server/cluster/diagnostic_manager.go index 2d6f41aa504..1e741742e7c 100644 --- a/server/cluster/diagnostic_manager.go +++ b/server/cluster/diagnostic_manager.go @@ -24,7 +24,7 @@ import ( "github.com/tikv/pd/pkg/movingaverage" "github.com/tikv/pd/server/schedule/operator" "github.com/tikv/pd/server/schedule/plan" - "github.com/tikv/pd/server/schedulers" + "github.com/tikv/pd/server/schedule/schedulers" "go.uber.org/zap" ) diff --git a/server/cluster/store_limiter_test.go b/server/cluster/store_limiter_test.go index d7561a2c64f..ab5fd3d88bd 100644 --- a/server/cluster/store_limiter_test.go +++ b/server/cluster/store_limiter_test.go @@ -20,13 +20,13 @@ import ( "github.com/pingcap/kvproto/pkg/pdpb" "github.com/stretchr/testify/require" "github.com/tikv/pd/pkg/core/storelimit" - "github.com/tikv/pd/server/config" + "github.com/tikv/pd/pkg/mock/mockconfig" ) func TestCollect(t *testing.T) { re := require.New(t) - limiter := NewStoreLimiter(config.NewTestOptions()) + limiter := NewStoreLimiter(mockconfig.NewTestOptions()) limiter.Collect(&pdpb.StoreStats{}) re.Equal(int64(1), limiter.state.cst.total) @@ -35,7 +35,7 @@ func TestCollect(t *testing.T) { func TestStoreLimitScene(t *testing.T) { re := require.New(t) - limiter := NewStoreLimiter(config.NewTestOptions()) + limiter := NewStoreLimiter(mockconfig.NewTestOptions()) re.Equal(storelimit.DefaultScene(storelimit.AddPeer), limiter.scene[storelimit.AddPeer]) re.Equal(storelimit.DefaultScene(storelimit.RemovePeer), limiter.scene[storelimit.RemovePeer]) } @@ -43,7 +43,7 @@ func TestStoreLimitScene(t *testing.T) { func TestReplaceStoreLimitScene(t *testing.T) { re := require.New(t) - limiter := NewStoreLimiter(config.NewTestOptions()) + limiter := NewStoreLimiter(mockconfig.NewTestOptions()) sceneAddPeer := &storelimit.Scene{Idle: 4, Low: 3, Normal: 2, High: 1} limiter.ReplaceStoreLimitScene(sceneAddPeer, storelimit.AddPeer) diff --git a/server/config/config.go b/server/config/config.go index e0ca63767ec..e15fc301d39 100644 --- a/server/config/config.go +++ b/server/config/config.go @@ -907,11 +907,6 @@ func (c *ScheduleConfig) Validate() error { if c.LeaderSchedulePolicy != "count" && c.LeaderSchedulePolicy != "size" { return errors.Errorf("leader-schedule-policy %v is invalid", c.LeaderSchedulePolicy) } - for _, scheduleConfig := range c.Schedulers { - if !IsSchedulerRegistered(scheduleConfig.Type) { - return errors.Errorf("create func of %v is not registered, maybe misspelled", scheduleConfig.Type) - } - } if c.SlowStoreEvictingAffectedStoreRatioThreshold == 0 { return errors.Errorf("slow-store-evicting-affected-store-ratio-threshold is not set") } @@ -1212,10 +1207,6 @@ type StoreLabel struct { Value string `toml:"value" json:"value"` } -// RejectLeader is the label property type that suggests a store should not -// have any region leaders. -const RejectLeader = "reject-leader" - // LabelPropertyConfig is the config section to set properties to store labels. // NOTE: This type is exported by HTTP API. Please pay more attention when modifying it. type LabelPropertyConfig map[string][]StoreLabel diff --git a/server/config/config_test.go b/server/config/config_test.go index ee6c9e22287..b8025a396b8 100644 --- a/server/config/config_test.go +++ b/server/config/config_test.go @@ -53,8 +53,6 @@ func TestBadFormatJoinAddr(t *testing.T) { func TestReloadConfig(t *testing.T) { re := require.New(t) - registerDefaultSchedulers() - RegisterScheduler("shuffle-leader") opt, err := newTestScheduleOption() re.NoError(err) storage := storage.NewStorageWithMemoryBackend() @@ -64,22 +62,10 @@ func TestReloadConfig(t *testing.T) { opt.GetPDServerConfig().UseRegionStorage = true re.NoError(opt.Persist(storage)) - // Add a new default enable scheduler "shuffle-leader" - DefaultSchedulers = append(DefaultSchedulers, SchedulerConfig{Type: "shuffle-leader"}) - defer func() { - DefaultSchedulers = DefaultSchedulers[:len(DefaultSchedulers)-1] - }() - newOpt, err := newTestScheduleOption() re.NoError(err) re.NoError(newOpt.Reload(storage)) - schedulers := newOpt.GetSchedulers() - re.Len(schedulers, len(DefaultSchedulers)) - re.True(newOpt.IsUseRegionStorage()) - for i, s := range schedulers { - re.Equal(DefaultSchedulers[i].Type, s.Type) - re.False(s.Disable) - } + re.Equal(5, newOpt.GetMaxReplicas()) re.Equal(uint64(10), newOpt.GetMaxSnapshotCount()) re.Equal(int64(512), newOpt.GetMaxMovableHotPeerSize()) @@ -87,7 +73,6 @@ func TestReloadConfig(t *testing.T) { func TestReloadUpgrade(t *testing.T) { re := require.New(t) - registerDefaultSchedulers() opt, err := newTestScheduleOption() re.NoError(err) @@ -111,7 +96,6 @@ func TestReloadUpgrade(t *testing.T) { func TestReloadUpgrade2(t *testing.T) { re := require.New(t) - registerDefaultSchedulers() opt, err := newTestScheduleOption() re.NoError(err) @@ -133,7 +117,6 @@ func TestReloadUpgrade2(t *testing.T) { func TestValidation(t *testing.T) { re := require.New(t) - registerDefaultSchedulers() cfg := NewConfig() re.NoError(cfg.Adjust(nil, false)) @@ -163,8 +146,6 @@ func TestValidation(t *testing.T) { func TestAdjust(t *testing.T) { re := require.New(t) - registerDefaultSchedulers() - RegisterScheduler("random-merge") cfgData := ` name = "" lease = 0 @@ -226,32 +207,6 @@ max-merge-region-keys = 400000 re.NoError(err) re.Contains(cfg.WarningMsgs[0], "Config contains undefined item") re.Equal(40*10000, int(cfg.Schedule.GetMaxMergeRegionKeys())) - // Check misspelled schedulers name - cfgData = ` -name = "" -lease = 0 - -[[schedule.schedulers]] -type = "random-merge-schedulers" -` - cfg = NewConfig() - meta, err = toml.Decode(cfgData, &cfg) - re.NoError(err) - err = cfg.Adjust(&meta, false) - re.Error(err) - // Check correct schedulers name - cfgData = ` -name = "" -lease = 0 - -[[schedule.schedulers]] -type = "random-merge" -` - cfg = NewConfig() - meta, err = toml.Decode(cfgData, &cfg) - re.NoError(err) - err = cfg.Adjust(&meta, false) - re.NoError(err) cfgData = ` [metric] @@ -293,7 +248,6 @@ tso-update-physical-interval = "15s" func TestMigrateFlags(t *testing.T) { re := require.New(t) - registerDefaultSchedulers() load := func(s string) (*Config, error) { cfg := NewConfig() meta, err := toml.Decode(s, &cfg) @@ -331,7 +285,6 @@ disable-make-up-replica = false func TestPDServerConfig(t *testing.T) { re := require.New(t) - registerDefaultSchedulers() tests := []struct { cfgData string hasErr bool @@ -398,7 +351,6 @@ dashboard-address = "foo" func TestDashboardConfig(t *testing.T) { re := require.New(t) - registerDefaultSchedulers() cfgData := ` [dashboard] tidb-cacert-path = "/path/ca.pem" @@ -438,7 +390,6 @@ tidb-cert-path = "/path/client.pem" func TestReplicationMode(t *testing.T) { re := require.New(t) - registerDefaultSchedulers() cfgData := ` [replication-mode] replication-mode = "dr-auto-sync" @@ -474,7 +425,6 @@ wait-store-timeout = "120s" func TestHotHistoryRegionConfig(t *testing.T) { re := require.New(t) - registerDefaultSchedulers() cfgData := ` [schedule] hot-regions-reserved-days= 30 @@ -497,7 +447,6 @@ hot-regions-write-interval= "30m" func TestConfigClone(t *testing.T) { re := require.New(t) - registerDefaultSchedulers() cfg := &Config{} cfg.Adjust(nil, false) re.Equal(cfg, cfg.Clone()) @@ -529,9 +478,3 @@ func newTestScheduleOption() (*PersistOptions, error) { opt := NewPersistOptions(cfg) return opt, nil } - -func registerDefaultSchedulers() { - for _, d := range DefaultSchedulers { - RegisterScheduler(d.Type) - } -} diff --git a/server/config/store_config.go b/server/config/store_config.go index 3856179b40c..a3cd8e54650 100644 --- a/server/config/store_config.go +++ b/server/config/store_config.go @@ -120,6 +120,14 @@ func (c *StoreConfig) IsEnableRegionBucket() bool { return c.Coprocessor.EnableRegionBucket } +// SetRegionBucketEnabled sets if the region bucket is enabled. +func (c *StoreConfig) SetRegionBucketEnabled(enabled bool) { + if c == nil { + return + } + c.Coprocessor.EnableRegionBucket = enabled +} + // GetRegionBucketSize returns region bucket size if enable region buckets. func (c *StoreConfig) GetRegionBucketSize() uint64 { if c == nil || !c.Coprocessor.EnableRegionBucket { @@ -160,6 +168,12 @@ func (c *StoreConfig) CheckRegionKeys(keys, mergeKeys uint64) error { return nil } +// Clone makes a deep copy of the config. +func (c *StoreConfig) Clone() *StoreConfig { + cfg := *c + return &cfg +} + // StoreConfigManager is used to manage the store config. type StoreConfigManager struct { config atomic.Value @@ -219,6 +233,14 @@ func (m *StoreConfigManager) GetStoreConfig() *StoreConfig { return config.(*StoreConfig) } +// SetStoreConfig sets the store configuration. +func (m *StoreConfigManager) SetStoreConfig(cfg *StoreConfig) { + if m == nil { + return + } + m.config.Store(cfg) +} + // Source is used to get the store config. type Source interface { GetConfig(statusAddress string) (*StoreConfig, error) diff --git a/server/config/util.go b/server/config/util.go index ed85be7c70a..cb1e947bf8d 100644 --- a/server/config/util.go +++ b/server/config/util.go @@ -18,7 +18,6 @@ import ( "net/url" "regexp" "strings" - "sync" "github.com/pingcap/errors" "github.com/pingcap/kvproto/pkg/metapb" @@ -72,30 +71,6 @@ func ValidateURLWithScheme(rawURL string) error { return nil } -var schedulerMap sync.Map - -// RegisterScheduler registers the scheduler type. -func RegisterScheduler(typ string) { - schedulerMap.Store(typ, struct{}{}) -} - -// IsSchedulerRegistered checks if the named scheduler type is registered. -func IsSchedulerRegistered(name string) bool { - _, ok := schedulerMap.Load(name) - return ok -} - -// NewTestOptions creates default options for testing. -func NewTestOptions() *PersistOptions { - // register default schedulers in case config check fail. - for _, d := range DefaultSchedulers { - RegisterScheduler(d.Type) - } - c := NewConfig() - c.Adjust(nil, false) - return NewPersistOptions(c) -} - // parseUrls parse a string into multiple urls. func parseUrls(s string) ([]url.URL, error) { items := strings.Split(s, ",") diff --git a/server/handler.go b/server/handler.go index c81f4ace6a3..9b28b498cb7 100644 --- a/server/handler.go +++ b/server/handler.go @@ -43,7 +43,7 @@ import ( "github.com/tikv/pd/server/schedule/filter" "github.com/tikv/pd/server/schedule/operator" "github.com/tikv/pd/server/schedule/placement" - "github.com/tikv/pd/server/schedulers" + "github.com/tikv/pd/server/schedule/schedulers" "github.com/tikv/pd/server/statistics" "go.uber.org/zap" ) diff --git a/server/replication/replication_mode_test.go b/server/replication/replication_mode_test.go index cf049301803..499c40b0412 100644 --- a/server/replication/replication_mode_test.go +++ b/server/replication/replication_mode_test.go @@ -26,6 +26,7 @@ import ( "github.com/stretchr/testify/require" "github.com/tikv/pd/pkg/core" "github.com/tikv/pd/pkg/mock/mockcluster" + "github.com/tikv/pd/pkg/mock/mockconfig" "github.com/tikv/pd/pkg/storage" "github.com/tikv/pd/pkg/utils/typeutil" "github.com/tikv/pd/server/config" @@ -37,7 +38,7 @@ func TestInitial(t *testing.T) { defer cancel() store := storage.NewStorageWithMemoryBackend() conf := config.ReplicationModeConfig{ReplicationMode: modeMajority} - cluster := mockcluster.NewCluster(ctx, config.NewTestOptions()) + cluster := mockcluster.NewCluster(ctx, mockconfig.NewTestOptions()) rep, err := NewReplicationModeManager(conf, store, cluster, newMockReplicator([]uint64{1})) re.NoError(err) re.Equal(&pb.ReplicationStatus{Mode: pb.ReplicationMode_MAJORITY}, rep.GetReplicationStatus()) @@ -71,7 +72,7 @@ func TestStatus(t *testing.T) { conf := config.ReplicationModeConfig{ReplicationMode: modeDRAutoSync, DRAutoSync: config.DRAutoSyncReplicationConfig{ LabelKey: "dr-label", }} - cluster := mockcluster.NewCluster(ctx, config.NewTestOptions()) + cluster := mockcluster.NewCluster(ctx, mockconfig.NewTestOptions()) rep, err := NewReplicationModeManager(conf, store, cluster, newMockReplicator([]uint64{1})) re.NoError(err) re.Equal(&pb.ReplicationStatus{ @@ -170,7 +171,7 @@ func TestStateSwitch(t *testing.T) { DRReplicas: 1, WaitStoreTimeout: typeutil.Duration{Duration: time.Minute}, }} - cluster := mockcluster.NewCluster(ctx, config.NewTestOptions()) + cluster := mockcluster.NewCluster(ctx, mockconfig.NewTestOptions()) replicator := newMockReplicator([]uint64{1}) rep, err := NewReplicationModeManager(conf, store, cluster, replicator) re.NoError(err) @@ -356,7 +357,7 @@ func TestReplicateState(t *testing.T) { DRReplicas: 1, WaitStoreTimeout: typeutil.Duration{Duration: time.Minute}, }} - cluster := mockcluster.NewCluster(ctx, config.NewTestOptions()) + cluster := mockcluster.NewCluster(ctx, mockconfig.NewTestOptions()) replicator := newMockReplicator([]uint64{1}) rep, err := NewReplicationModeManager(conf, store, cluster, replicator) re.NoError(err) @@ -398,7 +399,7 @@ func TestAsynctimeout(t *testing.T) { DRReplicas: 1, WaitStoreTimeout: typeutil.Duration{Duration: time.Minute}, }} - cluster := mockcluster.NewCluster(ctx, config.NewTestOptions()) + cluster := mockcluster.NewCluster(ctx, mockconfig.NewTestOptions()) var replicator mockFileReplicator rep, err := NewReplicationModeManager(conf, store, cluster, &replicator) re.NoError(err) @@ -441,7 +442,7 @@ func TestRecoverProgress(t *testing.T) { DRReplicas: 1, WaitStoreTimeout: typeutil.Duration{Duration: time.Minute}, }} - cluster := mockcluster.NewCluster(ctx, config.NewTestOptions()) + cluster := mockcluster.NewCluster(ctx, mockconfig.NewTestOptions()) cluster.AddLabelsStore(1, 1, map[string]string{}) rep, err := NewReplicationModeManager(conf, store, cluster, newMockReplicator([]uint64{1})) re.NoError(err) @@ -503,7 +504,7 @@ func TestRecoverProgressWithSplitAndMerge(t *testing.T) { DRReplicas: 1, WaitStoreTimeout: typeutil.Duration{Duration: time.Minute}, }} - cluster := mockcluster.NewCluster(ctx, config.NewTestOptions()) + cluster := mockcluster.NewCluster(ctx, mockconfig.NewTestOptions()) cluster.AddLabelsStore(1, 1, map[string]string{}) rep, err := NewReplicationModeManager(conf, store, cluster, newMockReplicator([]uint64{1})) re.NoError(err) diff --git a/server/schedule/checker/checker_controller.go b/server/schedule/checker/checker_controller.go index 3db876c545e..4a50227d4cc 100644 --- a/server/schedule/checker/checker_controller.go +++ b/server/schedule/checker/checker_controller.go @@ -23,8 +23,8 @@ import ( "github.com/tikv/pd/pkg/core" "github.com/tikv/pd/pkg/errs" "github.com/tikv/pd/pkg/utils/keyutil" - "github.com/tikv/pd/server/config" "github.com/tikv/pd/server/schedule" + "github.com/tikv/pd/server/schedule/config" "github.com/tikv/pd/server/schedule/labeler" "github.com/tikv/pd/server/schedule/operator" "github.com/tikv/pd/server/schedule/placement" @@ -36,7 +36,7 @@ const DefaultCacheSize = 1000 // Controller is used to manage all checkers. type Controller struct { cluster schedule.Cluster - opts *config.PersistOptions + conf config.Config opController *schedule.OperatorController learnerChecker *LearnerChecker replicaChecker *ReplicaChecker @@ -51,19 +51,19 @@ type Controller struct { } // NewController create a new Controller. -func NewController(ctx context.Context, cluster schedule.Cluster, ruleManager *placement.RuleManager, labeler *labeler.RegionLabeler, opController *schedule.OperatorController) *Controller { +func NewController(ctx context.Context, cluster schedule.Cluster, conf config.Config, ruleManager *placement.RuleManager, labeler *labeler.RegionLabeler, opController *schedule.OperatorController) *Controller { regionWaitingList := cache.NewDefaultCache(DefaultCacheSize) return &Controller{ cluster: cluster, - opts: cluster.GetOpts(), + conf: conf, opController: opController, learnerChecker: NewLearnerChecker(cluster), - replicaChecker: NewReplicaChecker(cluster, regionWaitingList), + replicaChecker: NewReplicaChecker(cluster, conf, regionWaitingList), ruleChecker: NewRuleChecker(ctx, cluster, ruleManager, regionWaitingList), splitChecker: NewSplitChecker(cluster, ruleManager, labeler), - mergeChecker: NewMergeChecker(ctx, cluster), + mergeChecker: NewMergeChecker(ctx, cluster, conf), jointStateChecker: NewJointStateChecker(cluster), - priorityInspector: NewPriorityInspector(cluster), + priorityInspector: NewPriorityInspector(cluster, conf), regionWaitingList: regionWaitingList, suspectRegions: cache.NewIDTTL(ctx, time.Minute, 3*time.Minute), suspectKeyRanges: cache.NewStringTTL(ctx, time.Minute, 3*time.Minute), @@ -91,7 +91,7 @@ func (c *Controller) CheckRegion(region *core.RegionInfo) []*operator.Operator { return []*operator.Operator{op} } - if c.opts.IsPlacementRulesEnabled() { + if c.conf.IsPlacementRulesEnabled() { skipRuleCheck := c.cluster.GetOpts().IsPlacementRulesCacheEnabled() && c.cluster.GetRuleManager().IsRegionFitCached(c.cluster, region) if skipRuleCheck { @@ -106,7 +106,7 @@ func (c *Controller) CheckRegion(region *core.RegionInfo) []*operator.Operator { }) fit := c.priorityInspector.Inspect(region) if op := c.ruleChecker.CheckWithFit(region, fit); op != nil { - if opController.OperatorCount(operator.OpReplica) < c.opts.GetReplicaScheduleLimit() { + if opController.OperatorCount(operator.OpReplica) < c.conf.GetReplicaScheduleLimit() { return []*operator.Operator{op} } operator.OperatorLimitCounter.WithLabelValues(c.ruleChecker.GetType(), operator.OpReplica.String()).Inc() @@ -118,7 +118,7 @@ func (c *Controller) CheckRegion(region *core.RegionInfo) []*operator.Operator { return []*operator.Operator{op} } if op := c.replicaChecker.Check(region); op != nil { - if opController.OperatorCount(operator.OpReplica) < c.opts.GetReplicaScheduleLimit() { + if opController.OperatorCount(operator.OpReplica) < c.conf.GetReplicaScheduleLimit() { return []*operator.Operator{op} } operator.OperatorLimitCounter.WithLabelValues(c.replicaChecker.GetType(), operator.OpReplica.String()).Inc() @@ -127,7 +127,7 @@ func (c *Controller) CheckRegion(region *core.RegionInfo) []*operator.Operator { } if c.mergeChecker != nil { - allowed := opController.OperatorCount(operator.OpMerge) < c.opts.GetMergeScheduleLimit() + allowed := opController.OperatorCount(operator.OpMerge) < c.conf.GetMergeScheduleLimit() if !allowed { operator.OperatorLimitCounter.WithLabelValues(c.mergeChecker.GetType(), operator.OpMerge.String()).Inc() } else if ops := c.mergeChecker.Check(region); ops != nil { diff --git a/server/schedule/checker/joint_state_checker_test.go b/server/schedule/checker/joint_state_checker_test.go index 1ea0ddd21f7..9fd6194f49a 100644 --- a/server/schedule/checker/joint_state_checker_test.go +++ b/server/schedule/checker/joint_state_checker_test.go @@ -22,7 +22,7 @@ import ( "github.com/stretchr/testify/require" "github.com/tikv/pd/pkg/core" "github.com/tikv/pd/pkg/mock/mockcluster" - "github.com/tikv/pd/server/config" + "github.com/tikv/pd/pkg/mock/mockconfig" "github.com/tikv/pd/server/schedule/operator" ) @@ -30,7 +30,7 @@ func TestLeaveJointState(t *testing.T) { re := require.New(t) ctx, cancel := context.WithCancel(context.Background()) defer cancel() - cluster := mockcluster.NewCluster(ctx, config.NewTestOptions()) + cluster := mockcluster.NewCluster(ctx, mockconfig.NewTestOptions()) jsc := NewJointStateChecker(cluster) for id := uint64(1); id <= 10; id++ { cluster.PutStoreWithLabels(id) diff --git a/server/schedule/checker/learner_checker_test.go b/server/schedule/checker/learner_checker_test.go index d5fdf25f40c..a70c952475c 100644 --- a/server/schedule/checker/learner_checker_test.go +++ b/server/schedule/checker/learner_checker_test.go @@ -22,8 +22,8 @@ import ( "github.com/stretchr/testify/require" "github.com/tikv/pd/pkg/core" "github.com/tikv/pd/pkg/mock/mockcluster" + "github.com/tikv/pd/pkg/mock/mockconfig" "github.com/tikv/pd/pkg/versioninfo" - "github.com/tikv/pd/server/config" "github.com/tikv/pd/server/schedule/operator" ) @@ -31,7 +31,7 @@ func TestPromoteLearner(t *testing.T) { re := require.New(t) ctx, cancel := context.WithCancel(context.Background()) defer cancel() - cluster := mockcluster.NewCluster(ctx, config.NewTestOptions()) + cluster := mockcluster.NewCluster(ctx, mockconfig.NewTestOptions()) cluster.SetClusterVersion(versioninfo.MinSupportedVersion(versioninfo.Version4_0)) lc := NewLearnerChecker(cluster) for id := uint64(1); id <= 10; id++ { diff --git a/server/schedule/checker/merge_checker.go b/server/schedule/checker/merge_checker.go index d429ccbd44b..e5a1b9d3d0a 100644 --- a/server/schedule/checker/merge_checker.go +++ b/server/schedule/checker/merge_checker.go @@ -25,8 +25,8 @@ import ( "github.com/tikv/pd/pkg/core" "github.com/tikv/pd/pkg/errs" "github.com/tikv/pd/pkg/utils/logutil" - "github.com/tikv/pd/server/config" "github.com/tikv/pd/server/schedule" + "github.com/tikv/pd/server/schedule/config" "github.com/tikv/pd/server/schedule/filter" "github.com/tikv/pd/server/schedule/labeler" "github.com/tikv/pd/server/schedule/operator" @@ -76,18 +76,17 @@ var ( type MergeChecker struct { PauseController cluster schedule.Cluster - opts *config.PersistOptions + conf config.Config splitCache *cache.TTLUint64 startTime time.Time // it's used to judge whether server recently start. } // NewMergeChecker creates a merge checker. -func NewMergeChecker(ctx context.Context, cluster schedule.Cluster) *MergeChecker { - opts := cluster.GetOpts() - splitCache := cache.NewIDTTL(ctx, time.Minute, opts.GetSplitMergeInterval()) +func NewMergeChecker(ctx context.Context, cluster schedule.Cluster, conf config.Config) *MergeChecker { + splitCache := cache.NewIDTTL(ctx, time.Minute, conf.GetSplitMergeInterval()) return &MergeChecker{ cluster: cluster, - opts: opts, + conf: conf, splitCache: splitCache, startTime: time.Now(), } @@ -102,7 +101,7 @@ func (m *MergeChecker) GetType() string { // will skip check it for a while. func (m *MergeChecker) RecordRegionSplit(regionIDs []uint64) { for _, regionID := range regionIDs { - m.splitCache.PutWithTTL(regionID, nil, m.opts.GetSplitMergeInterval()) + m.splitCache.PutWithTTL(regionID, nil, m.conf.GetSplitMergeInterval()) } } @@ -115,13 +114,13 @@ func (m *MergeChecker) Check(region *core.RegionInfo) []*operator.Operator { return nil } - expireTime := m.startTime.Add(m.opts.GetSplitMergeInterval()) + expireTime := m.startTime.Add(m.conf.GetSplitMergeInterval()) if time.Now().Before(expireTime) { mergeCheckerRecentlyStartCounter.Inc() return nil } - m.splitCache.UpdateTTL(m.opts.GetSplitMergeInterval()) + m.splitCache.UpdateTTL(m.conf.GetSplitMergeInterval()) if m.splitCache.Exists(region.GetID()) { mergeCheckerRecentlySplitCounter.Inc() return nil @@ -134,7 +133,7 @@ func (m *MergeChecker) Check(region *core.RegionInfo) []*operator.Operator { } // region is not small enough - if !region.NeedMerge(int64(m.opts.GetMaxMergeRegionSize()), int64(m.opts.GetMaxMergeRegionKeys())) { + if !region.NeedMerge(int64(m.conf.GetMaxMergeRegionSize()), int64(m.conf.GetMaxMergeRegionKeys())) { mergeCheckerNoNeedCounter.Inc() return nil } @@ -162,7 +161,7 @@ func (m *MergeChecker) Check(region *core.RegionInfo) []*operator.Operator { if m.checkTarget(region, next) { target = next } - if !m.opts.IsOneWayMergeEnabled() && m.checkTarget(region, prev) { // allow a region can be merged by two ways. + if !m.conf.IsOneWayMergeEnabled() && m.checkTarget(region, prev) { // allow a region can be merged by two ways. if target == nil || prev.GetApproximateSize() < next.GetApproximateSize() { // pick smaller target = prev } @@ -183,13 +182,13 @@ func (m *MergeChecker) Check(region *core.RegionInfo) []*operator.Operator { return nil } if err := m.cluster.GetStoreConfig().CheckRegionSize(uint64(target.GetApproximateSize()+region.GetApproximateSize()), - m.opts.GetMaxMergeRegionSize()); err != nil { + m.conf.GetMaxMergeRegionSize()); err != nil { mergeCheckerSplitSizeAfterMergeCounter.Inc() return nil } if err := m.cluster.GetStoreConfig().CheckRegionKeys(uint64(target.GetApproximateKeys()+region.GetApproximateKeys()), - m.opts.GetMaxMergeRegionKeys()); err != nil { + m.conf.GetMaxMergeRegionKeys()); err != nil { mergeCheckerSplitKeysAfterMergeCounter.Inc() return nil } diff --git a/server/schedule/checker/merge_checker_test.go b/server/schedule/checker/merge_checker_test.go index df3a61477d9..44e029cf992 100644 --- a/server/schedule/checker/merge_checker_test.go +++ b/server/schedule/checker/merge_checker_test.go @@ -25,10 +25,11 @@ import ( "github.com/tikv/pd/pkg/core" "github.com/tikv/pd/pkg/core/storelimit" "github.com/tikv/pd/pkg/mock/mockcluster" + "github.com/tikv/pd/pkg/mock/mockconfig" "github.com/tikv/pd/pkg/utils/testutil" "github.com/tikv/pd/pkg/versioninfo" - "github.com/tikv/pd/server/config" "github.com/tikv/pd/server/schedule" + "github.com/tikv/pd/server/schedule/config" "github.com/tikv/pd/server/schedule/hbstream" "github.com/tikv/pd/server/schedule/labeler" "github.com/tikv/pd/server/schedule/operator" @@ -54,14 +55,12 @@ func TestMergeCheckerTestSuite(t *testing.T) { } func (suite *mergeCheckerTestSuite) SetupTest() { - cfg := config.NewTestOptions() + cfg := mockconfig.NewTestOptions() suite.ctx, suite.cancel = context.WithCancel(context.Background()) suite.cluster = mockcluster.NewCluster(suite.ctx, cfg) suite.cluster.SetMaxMergeRegionSize(2) suite.cluster.SetMaxMergeRegionKeys(2) - suite.cluster.SetLabelPropertyConfig(config.LabelPropertyConfig{ - config.RejectLeader: {{Key: "reject", Value: "leader"}}, - }) + suite.cluster.SetLabelProperty(config.RejectLeader, "reject", "leader") suite.cluster.SetClusterVersion(versioninfo.MinSupportedVersion(versioninfo.Version4_0)) stores := map[uint64][]string{ 1: {}, 2: {}, 3: {}, 4: {}, 5: {}, 6: {}, @@ -81,7 +80,7 @@ func (suite *mergeCheckerTestSuite) SetupTest() { for _, region := range suite.regions { suite.cluster.PutRegion(region) } - suite.mc = NewMergeChecker(suite.ctx, suite.cluster) + suite.mc = NewMergeChecker(suite.ctx, suite.cluster, suite.cluster.GetOpts()) } func (suite *mergeCheckerTestSuite) TearDownTest() { @@ -103,13 +102,12 @@ func (suite *mergeCheckerTestSuite) TestBasic() { suite.Nil(ops) // it can merge if the max region size of the store is greater than the target region size. - config := suite.cluster.GetStoreConfig() - config.RegionMaxSize = "144MiB" - config.RegionMaxSizeMB = 10 * 1024 + suite.cluster.SetRegionMaxSize("144MiB") + suite.cluster.SetRegionSizeMB(1024) ops = suite.mc.Check(suite.regions[2]) suite.NotNil(ops) - config.RegionMaxSizeMB = 144 + suite.cluster.SetRegionSizeMB(144) ops = suite.mc.Check(suite.regions[2]) suite.Nil(ops) // change the size back @@ -443,7 +441,7 @@ func (suite *mergeCheckerTestSuite) TestMatchPeers() { } func (suite *mergeCheckerTestSuite) TestStoreLimitWithMerge() { - cfg := config.NewTestOptions() + cfg := mockconfig.NewTestOptions() tc := mockcluster.NewCluster(suite.ctx, cfg) tc.SetMaxMergeRegionSize(2) tc.SetMaxMergeRegionKeys(2) @@ -463,7 +461,7 @@ func (suite *mergeCheckerTestSuite) TestStoreLimitWithMerge() { tc.PutRegion(region) } - mc := NewMergeChecker(suite.ctx, tc) + mc := NewMergeChecker(suite.ctx, tc, tc.GetOpts()) stream := hbstream.NewTestHeartbeatStreams(suite.ctx, tc.ID, tc, false /* no need to run */) oc := schedule.NewOperatorController(suite.ctx, tc, stream) @@ -511,7 +509,7 @@ func (suite *mergeCheckerTestSuite) TestStoreLimitWithMerge() { } func (suite *mergeCheckerTestSuite) TestCache() { - cfg := config.NewTestOptions() + cfg := mockconfig.NewTestOptions() suite.cluster = mockcluster.NewCluster(suite.ctx, cfg) suite.cluster.SetMaxMergeRegionSize(2) suite.cluster.SetMaxMergeRegionKeys(2) @@ -532,7 +530,7 @@ func (suite *mergeCheckerTestSuite) TestCache() { suite.cluster.PutRegion(region) } - suite.mc = NewMergeChecker(suite.ctx, suite.cluster) + suite.mc = NewMergeChecker(suite.ctx, suite.cluster, suite.cluster.GetOpts()) ops := suite.mc.Check(suite.regions[1]) suite.Nil(ops) diff --git a/server/schedule/checker/priority_inspector.go b/server/schedule/checker/priority_inspector.go index 5c5d2c0b377..2bb240fe24e 100644 --- a/server/schedule/checker/priority_inspector.go +++ b/server/schedule/checker/priority_inspector.go @@ -19,8 +19,8 @@ import ( "github.com/tikv/pd/pkg/cache" "github.com/tikv/pd/pkg/core" - "github.com/tikv/pd/server/config" "github.com/tikv/pd/server/schedule" + "github.com/tikv/pd/server/schedule/config" "github.com/tikv/pd/server/schedule/placement" ) @@ -30,15 +30,15 @@ const defaultPriorityQueueSize = 1280 // PriorityInspector ensures high priority region should run first type PriorityInspector struct { cluster schedule.Cluster - opts *config.PersistOptions + conf config.Config queue *cache.PriorityQueue } // NewPriorityInspector creates a priority inspector. -func NewPriorityInspector(cluster schedule.Cluster) *PriorityInspector { +func NewPriorityInspector(cluster schedule.Cluster, conf config.Config) *PriorityInspector { return &PriorityInspector{ cluster: cluster, - opts: cluster.GetOpts(), + conf: conf, queue: cache.NewPriorityQueue(defaultPriorityQueueSize), } } @@ -63,7 +63,7 @@ func NewRegionEntry(regionID uint64) *RegionPriorityEntry { // Inspect inspects region's replicas, it will put into priority queue if the region lack of replicas. func (p *PriorityInspector) Inspect(region *core.RegionInfo) (fit *placement.RegionFit) { var makeupCount int - if p.opts.IsPlacementRulesEnabled() { + if p.conf.IsPlacementRulesEnabled() { makeupCount, fit = p.inspectRegionInPlacementRule(region) } else { makeupCount = p.inspectRegionInReplica(region) @@ -92,7 +92,7 @@ func (p *PriorityInspector) inspectRegionInPlacementRule(region *core.RegionInfo // inspectReplicas inspects region in replica mode func (p *PriorityInspector) inspectRegionInReplica(region *core.RegionInfo) (makeupCount int) { - return p.opts.GetMaxReplicas() - len(region.GetPeers()) + return p.conf.GetMaxReplicas() - len(region.GetPeers()) } // addOrRemoveRegion add or remove region from queue @@ -119,7 +119,7 @@ func (p *PriorityInspector) GetPriorityRegions() (ids []uint64) { re := e.Value.(*RegionPriorityEntry) // avoid to some priority region occupy checker, region don't need check on next check interval // the next run time is : last_time+retry*10*patrol_region_interval - if t := re.Last.Add(time.Duration(re.Attempt*10) * p.opts.GetPatrolRegionInterval()); t.Before(time.Now()) { + if t := re.Last.Add(time.Duration(re.Attempt*10) * p.conf.GetPatrolRegionInterval()); t.Before(time.Now()) { ids = append(ids, re.regionID) } } diff --git a/server/schedule/checker/priority_inspector_test.go b/server/schedule/checker/priority_inspector_test.go index b736ecc8097..fc88b3e428f 100644 --- a/server/schedule/checker/priority_inspector_test.go +++ b/server/schedule/checker/priority_inspector_test.go @@ -21,12 +21,12 @@ import ( "github.com/stretchr/testify/require" "github.com/tikv/pd/pkg/mock/mockcluster" - "github.com/tikv/pd/server/config" + "github.com/tikv/pd/pkg/mock/mockconfig" ) func TestCheckPriorityRegions(t *testing.T) { re := require.New(t) - opt := config.NewTestOptions() + opt := mockconfig.NewTestOptions() ctx, cancel := context.WithCancel(context.Background()) defer cancel() tc := mockcluster.NewCluster(ctx, opt) @@ -37,7 +37,7 @@ func TestCheckPriorityRegions(t *testing.T) { tc.AddLeaderRegion(2, 2, 3) tc.AddLeaderRegion(3, 2) - pc := NewPriorityInspector(tc) + pc := NewPriorityInspector(tc, tc.GetOpts()) checkPriorityRegionTest(re, pc, tc) opt.SetPlacementRuleEnabled(true) re.True(opt.IsPlacementRulesEnabled()) diff --git a/server/schedule/checker/replica_checker.go b/server/schedule/checker/replica_checker.go index aa1445ab80b..f36eb4bac18 100644 --- a/server/schedule/checker/replica_checker.go +++ b/server/schedule/checker/replica_checker.go @@ -22,8 +22,8 @@ import ( "github.com/tikv/pd/pkg/cache" "github.com/tikv/pd/pkg/core" "github.com/tikv/pd/pkg/errs" - "github.com/tikv/pd/server/config" "github.com/tikv/pd/server/schedule" + "github.com/tikv/pd/server/schedule/config" "github.com/tikv/pd/server/schedule/operator" "go.uber.org/zap" ) @@ -61,15 +61,15 @@ var ( type ReplicaChecker struct { PauseController cluster schedule.Cluster - opts *config.PersistOptions + conf config.Config regionWaitingList cache.Cache } // NewReplicaChecker creates a replica checker. -func NewReplicaChecker(cluster schedule.Cluster, regionWaitingList cache.Cache) *ReplicaChecker { +func NewReplicaChecker(cluster schedule.Cluster, conf config.Config, regionWaitingList cache.Cache) *ReplicaChecker { return &ReplicaChecker{ cluster: cluster, - opts: cluster.GetOpts(), + conf: conf, regionWaitingList: regionWaitingList, } } @@ -113,7 +113,7 @@ func (r *ReplicaChecker) Check(region *core.RegionInfo) *operator.Operator { } func (r *ReplicaChecker) checkDownPeer(region *core.RegionInfo) *operator.Operator { - if !r.opts.IsRemoveDownReplicaEnabled() { + if !r.conf.IsRemoveDownReplicaEnabled() { return nil } @@ -129,7 +129,7 @@ func (r *ReplicaChecker) checkDownPeer(region *core.RegionInfo) *operator.Operat return nil } // Only consider the state of the Store, not `stats.DownSeconds`. - if store.DownTime() < r.opts.GetMaxStoreDownTime() { + if store.DownTime() < r.conf.GetMaxStoreDownTime() { continue } return r.fixPeer(region, storeID, downStatus) @@ -138,7 +138,7 @@ func (r *ReplicaChecker) checkDownPeer(region *core.RegionInfo) *operator.Operat } func (r *ReplicaChecker) checkOfflinePeer(region *core.RegionInfo) *operator.Operator { - if !r.opts.IsReplaceOfflineReplicaEnabled() { + if !r.conf.IsReplaceOfflineReplicaEnabled() { return nil } @@ -165,10 +165,10 @@ func (r *ReplicaChecker) checkOfflinePeer(region *core.RegionInfo) *operator.Ope } func (r *ReplicaChecker) checkMakeUpReplica(region *core.RegionInfo) *operator.Operator { - if !r.opts.IsMakeUpReplicaEnabled() { + if !r.conf.IsMakeUpReplicaEnabled() { return nil } - if len(region.GetPeers()) >= r.opts.GetMaxReplicas() { + if len(region.GetPeers()) >= r.conf.GetMaxReplicas() { return nil } log.Debug("region has fewer than max replicas", zap.Uint64("region-id", region.GetID()), zap.Int("peers", len(region.GetPeers()))) @@ -192,12 +192,12 @@ func (r *ReplicaChecker) checkMakeUpReplica(region *core.RegionInfo) *operator.O } func (r *ReplicaChecker) checkRemoveExtraReplica(region *core.RegionInfo) *operator.Operator { - if !r.opts.IsRemoveExtraReplicaEnabled() { + if !r.conf.IsRemoveExtraReplicaEnabled() { return nil } // when add learner peer, the number of peer will exceed max replicas for a while, // just comparing the the number of voters to avoid too many cancel add operator log. - if len(region.GetVoters()) <= r.opts.GetMaxReplicas() { + if len(region.GetVoters()) <= r.conf.GetMaxReplicas() { return nil } log.Debug("region has more than max replicas", zap.Uint64("region-id", region.GetID()), zap.Int("peers", len(region.GetPeers()))) @@ -217,7 +217,7 @@ func (r *ReplicaChecker) checkRemoveExtraReplica(region *core.RegionInfo) *opera } func (r *ReplicaChecker) checkLocationReplacement(region *core.RegionInfo) *operator.Operator { - if !r.opts.IsLocationReplacementEnabled() { + if !r.conf.IsLocationReplacementEnabled() { return nil } @@ -246,7 +246,7 @@ func (r *ReplicaChecker) checkLocationReplacement(region *core.RegionInfo) *oper func (r *ReplicaChecker) fixPeer(region *core.RegionInfo, storeID uint64, status string) *operator.Operator { // Check the number of replicas first. - if len(region.GetVoters()) > r.opts.GetMaxReplicas() { + if len(region.GetVoters()) > r.conf.GetMaxReplicas() { removeExtra := fmt.Sprintf("remove-extra-%s-replica", status) op, err := operator.CreateRemovePeerOperator(removeExtra, r.cluster, operator.OpReplica, region, storeID) if err != nil { @@ -292,8 +292,8 @@ func (r *ReplicaChecker) strategy(region *core.RegionInfo) *ReplicaStrategy { return &ReplicaStrategy{ checkerName: replicaCheckerName, cluster: r.cluster, - locationLabels: r.opts.GetLocationLabels(), - isolationLevel: r.opts.GetIsolationLevel(), + locationLabels: r.conf.GetLocationLabels(), + isolationLevel: r.conf.GetIsolationLevel(), region: region, } } diff --git a/server/schedule/checker/replica_checker_test.go b/server/schedule/checker/replica_checker_test.go index 5e8157ac0c7..814a22537cd 100644 --- a/server/schedule/checker/replica_checker_test.go +++ b/server/schedule/checker/replica_checker_test.go @@ -26,9 +26,10 @@ import ( "github.com/tikv/pd/pkg/cache" "github.com/tikv/pd/pkg/core" "github.com/tikv/pd/pkg/mock/mockcluster" + "github.com/tikv/pd/pkg/mock/mockconfig" "github.com/tikv/pd/pkg/utils/testutil" "github.com/tikv/pd/pkg/versioninfo" - "github.com/tikv/pd/server/config" + "github.com/tikv/pd/server/schedule/config" "github.com/tikv/pd/server/schedule/operator" ) @@ -45,11 +46,11 @@ func TestReplicaCheckerTestSuite(t *testing.T) { } func (suite *replicaCheckerTestSuite) SetupTest() { - cfg := config.NewTestOptions() + cfg := mockconfig.NewTestOptions() suite.ctx, suite.cancel = context.WithCancel(context.Background()) suite.cluster = mockcluster.NewCluster(suite.ctx, cfg) suite.cluster.SetClusterVersion(versioninfo.MinSupportedVersion(versioninfo.Version4_0)) - suite.rc = NewReplicaChecker(suite.cluster, cache.NewDefaultCache(10)) + suite.rc = NewReplicaChecker(suite.cluster, suite.cluster.GetOpts(), cache.NewDefaultCache(10)) stats := &pdpb.StoreStats{ Capacity: 100, Available: 100, @@ -115,9 +116,7 @@ func (suite *replicaCheckerTestSuite) TestReplacePendingPeer() { } func (suite *replicaCheckerTestSuite) TestReplaceOfflinePeer() { - suite.cluster.SetLabelPropertyConfig(config.LabelPropertyConfig{ - config.RejectLeader: {{Key: "noleader", Value: "true"}}, - }) + suite.cluster.SetLabelProperty(config.RejectLeader, "noleader", "true") peers := []*metapb.Peer{ { Id: 4, @@ -204,11 +203,11 @@ func (suite *replicaCheckerTestSuite) downPeerAndCheck(aliveRole metapb.PeerRole } func (suite *replicaCheckerTestSuite) TestBasic() { - opt := config.NewTestOptions() + opt := mockconfig.NewTestOptions() tc := mockcluster.NewCluster(suite.ctx, opt) tc.SetMaxSnapshotCount(2) tc.SetClusterVersion(versioninfo.MinSupportedVersion(versioninfo.Version4_0)) - rc := NewReplicaChecker(tc, cache.NewDefaultCache(10)) + rc := NewReplicaChecker(tc, tc.GetOpts(), cache.NewDefaultCache(10)) // Add stores 1,2,3,4. tc.AddRegionStore(1, 4) @@ -277,14 +276,14 @@ func (suite *replicaCheckerTestSuite) TestBasic() { } func (suite *replicaCheckerTestSuite) TestLostStore() { - opt := config.NewTestOptions() + opt := mockconfig.NewTestOptions() tc := mockcluster.NewCluster(suite.ctx, opt) tc.SetClusterVersion(versioninfo.MinSupportedVersion(versioninfo.Version4_0)) tc.AddRegionStore(1, 1) tc.AddRegionStore(2, 1) - rc := NewReplicaChecker(tc, cache.NewDefaultCache(10)) + rc := NewReplicaChecker(tc, tc.GetOpts(), cache.NewDefaultCache(10)) // now region peer in store 1,2,3.but we just have store 1,2 // This happens only in recovering the PD tc @@ -296,14 +295,13 @@ func (suite *replicaCheckerTestSuite) TestLostStore() { } func (suite *replicaCheckerTestSuite) TestOffline() { - opt := config.NewTestOptions() + opt := mockconfig.NewTestOptions() tc := mockcluster.NewCluster(suite.ctx, opt) tc.SetClusterVersion(versioninfo.MinSupportedVersion(versioninfo.Version4_0)) tc.SetMaxReplicas(3) tc.SetLocationLabels([]string{"zone", "rack", "host"}) - rc := NewReplicaChecker(tc, cache.NewDefaultCache(10)) - + rc := NewReplicaChecker(tc, tc.GetOpts(), cache.NewDefaultCache(10)) tc.AddLabelsStore(1, 1, map[string]string{"zone": "z1", "rack": "r1", "host": "h1"}) tc.AddLabelsStore(2, 2, map[string]string{"zone": "z2", "rack": "r1", "host": "h1"}) tc.AddLabelsStore(3, 3, map[string]string{"zone": "z3", "rack": "r1", "host": "h1"}) @@ -348,13 +346,13 @@ func (suite *replicaCheckerTestSuite) TestOffline() { } func (suite *replicaCheckerTestSuite) TestDistinctScore() { - opt := config.NewTestOptions() + opt := mockconfig.NewTestOptions() tc := mockcluster.NewCluster(suite.ctx, opt) tc.SetClusterVersion(versioninfo.MinSupportedVersion(versioninfo.Version4_0)) tc.SetMaxReplicas(3) tc.SetLocationLabels([]string{"zone", "rack", "host"}) - rc := NewReplicaChecker(tc, cache.NewDefaultCache(10)) + rc := NewReplicaChecker(tc, tc.GetOpts(), cache.NewDefaultCache(10)) tc.AddLabelsStore(1, 9, map[string]string{"zone": "z1", "rack": "r1", "host": "h1"}) tc.AddLabelsStore(2, 8, map[string]string{"zone": "z1", "rack": "r1", "host": "h1"}) @@ -427,13 +425,13 @@ func (suite *replicaCheckerTestSuite) TestDistinctScore() { } func (suite *replicaCheckerTestSuite) TestDistinctScore2() { - opt := config.NewTestOptions() + opt := mockconfig.NewTestOptions() tc := mockcluster.NewCluster(suite.ctx, opt) tc.SetClusterVersion(versioninfo.MinSupportedVersion(versioninfo.Version4_0)) tc.SetMaxReplicas(5) tc.SetLocationLabels([]string{"zone", "host"}) - rc := NewReplicaChecker(tc, cache.NewDefaultCache(10)) + rc := NewReplicaChecker(tc, tc.GetOpts(), cache.NewDefaultCache(10)) tc.AddLabelsStore(1, 1, map[string]string{"zone": "z1", "host": "h1"}) tc.AddLabelsStore(2, 1, map[string]string{"zone": "z1", "host": "h2"}) @@ -457,11 +455,11 @@ func (suite *replicaCheckerTestSuite) TestDistinctScore2() { } func (suite *replicaCheckerTestSuite) TestStorageThreshold() { - opt := config.NewTestOptions() + opt := mockconfig.NewTestOptions() tc := mockcluster.NewCluster(suite.ctx, opt) tc.SetLocationLabels([]string{"zone"}) tc.SetClusterVersion(versioninfo.MinSupportedVersion(versioninfo.Version4_0)) - rc := NewReplicaChecker(tc, cache.NewDefaultCache(10)) + rc := NewReplicaChecker(tc, tc.GetOpts(), cache.NewDefaultCache(10)) tc.AddLabelsStore(1, 1, map[string]string{"zone": "z1"}) tc.UpdateStorageRatio(1, 0.5, 0.5) @@ -493,10 +491,10 @@ func (suite *replicaCheckerTestSuite) TestStorageThreshold() { } func (suite *replicaCheckerTestSuite) TestOpts() { - opt := config.NewTestOptions() + opt := mockconfig.NewTestOptions() tc := mockcluster.NewCluster(suite.ctx, opt) tc.SetClusterVersion(versioninfo.MinSupportedVersion(versioninfo.Version4_0)) - rc := NewReplicaChecker(tc, cache.NewDefaultCache(10)) + rc := NewReplicaChecker(tc, tc.GetOpts(), cache.NewDefaultCache(10)) tc.AddRegionStore(1, 100) tc.AddRegionStore(2, 100) @@ -524,11 +522,11 @@ func (suite *replicaCheckerTestSuite) TestOpts() { // See issue: https://github.com/tikv/pd/issues/3705 func (suite *replicaCheckerTestSuite) TestFixDownPeer() { - opt := config.NewTestOptions() + opt := mockconfig.NewTestOptions() tc := mockcluster.NewCluster(suite.ctx, opt) tc.SetClusterVersion(versioninfo.MinSupportedVersion(versioninfo.Version4_0)) tc.SetLocationLabels([]string{"zone"}) - rc := NewReplicaChecker(tc, cache.NewDefaultCache(10)) + rc := NewReplicaChecker(tc, tc.GetOpts(), cache.NewDefaultCache(10)) tc.AddLabelsStore(1, 1, map[string]string{"zone": "z1"}) tc.AddLabelsStore(2, 1, map[string]string{"zone": "z1"}) @@ -555,11 +553,11 @@ func (suite *replicaCheckerTestSuite) TestFixDownPeer() { // See issue: https://github.com/tikv/pd/issues/3705 func (suite *replicaCheckerTestSuite) TestFixOfflinePeer() { - opt := config.NewTestOptions() + opt := mockconfig.NewTestOptions() tc := mockcluster.NewCluster(suite.ctx, opt) tc.SetClusterVersion(versioninfo.MinSupportedVersion(versioninfo.Version4_0)) tc.SetLocationLabels([]string{"zone"}) - rc := NewReplicaChecker(tc, cache.NewDefaultCache(10)) + rc := NewReplicaChecker(tc, tc.GetOpts(), cache.NewDefaultCache(10)) tc.AddLabelsStore(1, 1, map[string]string{"zone": "z1"}) tc.AddLabelsStore(2, 1, map[string]string{"zone": "z1"}) diff --git a/server/schedule/checker/rule_checker_test.go b/server/schedule/checker/rule_checker_test.go index 52cf9be7c27..21d7d008dd6 100644 --- a/server/schedule/checker/rule_checker_test.go +++ b/server/schedule/checker/rule_checker_test.go @@ -28,9 +28,9 @@ import ( "github.com/tikv/pd/pkg/core" "github.com/tikv/pd/pkg/errs" "github.com/tikv/pd/pkg/mock/mockcluster" + "github.com/tikv/pd/pkg/mock/mockconfig" "github.com/tikv/pd/pkg/utils/testutil" "github.com/tikv/pd/pkg/versioninfo" - "github.com/tikv/pd/server/config" "github.com/tikv/pd/server/schedule/operator" "github.com/tikv/pd/server/schedule/placement" ) @@ -49,7 +49,7 @@ type ruleCheckerTestSuite struct { } func (suite *ruleCheckerTestSuite) SetupTest() { - cfg := config.NewTestOptions() + cfg := mockconfig.NewTestOptions() suite.ctx, suite.cancel = context.WithCancel(context.Background()) suite.cluster = mockcluster.NewCluster(suite.ctx, cfg) suite.cluster.SetClusterVersion(versioninfo.MinSupportedVersion(versioninfo.SwitchWitness)) diff --git a/server/schedule/checker/split_checker_test.go b/server/schedule/checker/split_checker_test.go index 957ca87bc07..3445e7cd9b9 100644 --- a/server/schedule/checker/split_checker_test.go +++ b/server/schedule/checker/split_checker_test.go @@ -21,7 +21,7 @@ import ( "github.com/stretchr/testify/require" "github.com/tikv/pd/pkg/mock/mockcluster" - "github.com/tikv/pd/server/config" + "github.com/tikv/pd/pkg/mock/mockconfig" "github.com/tikv/pd/server/schedule/labeler" "github.com/tikv/pd/server/schedule/operator" "github.com/tikv/pd/server/schedule/placement" @@ -29,7 +29,7 @@ import ( func TestSplit(t *testing.T) { re := require.New(t) - cfg := config.NewTestOptions() + cfg := mockconfig.NewTestOptions() cfg.GetReplicationConfig().EnablePlacementRules = true ctx, cancel := context.WithCancel(context.Background()) defer cancel() diff --git a/server/schedule/config/config.go b/server/schedule/config/config.go new file mode 100644 index 00000000000..4f952d2c5bd --- /dev/null +++ b/server/schedule/config/config.go @@ -0,0 +1,98 @@ +package config + +import ( + "sync" + "time" + + "github.com/coreos/go-semver/semver" + "github.com/pingcap/kvproto/pkg/metapb" + "github.com/tikv/pd/pkg/core" + "github.com/tikv/pd/pkg/core/storelimit" +) + +// RejectLeader is the label property type that suggests a store should not +// have any region leaders. +const RejectLeader = "reject-leader" + +var schedulerMap sync.Map + +// RegisterScheduler registers the scheduler type. +func RegisterScheduler(typ string) { + schedulerMap.Store(typ, struct{}{}) +} + +// IsSchedulerRegistered checks if the named scheduler type is registered. +func IsSchedulerRegistered(name string) bool { + _, ok := schedulerMap.Load(name) + return ok +} + +// Config is the interface that wraps the Config related methods. +type Config interface { + GetReplicaScheduleLimit() uint64 + GetRegionScheduleLimit() uint64 + GetMergeScheduleLimit() uint64 + GetLeaderScheduleLimit() uint64 + GetHotRegionScheduleLimit() uint64 + GetWitnessScheduleLimit() uint64 + + GetHotRegionCacheHitsThreshold() int + GetMaxMovableHotPeerSize() int64 + IsTraceRegionFlow() bool + + GetSplitMergeInterval() time.Duration + GetMaxMergeRegionSize() uint64 + GetMaxMergeRegionKeys() uint64 + GetKeyType() core.KeyType + IsOneWayMergeEnabled() bool + IsCrossTableMergeEnabled() bool + + IsPlacementRulesEnabled() bool + IsPlacementRulesCacheEnabled() bool + + GetMaxReplicas() int + GetPatrolRegionInterval() time.Duration + GetMaxStoreDownTime() time.Duration + GetLocationLabels() []string + GetIsolationLevel() string + IsReplaceOfflineReplicaEnabled() bool + IsMakeUpReplicaEnabled() bool + IsRemoveExtraReplicaEnabled() bool + IsLocationReplacementEnabled() bool + IsRemoveDownReplicaEnabled() bool + + GetSwitchWitnessInterval() time.Duration + IsWitnessAllowed() bool + + GetLowSpaceRatio() float64 + GetHighSpaceRatio() float64 + GetTolerantSizeRatio() float64 + GetLeaderSchedulePolicy() core.SchedulePolicy + GetRegionScoreFormulaVersion() string + + GetMaxSnapshotCount() uint64 + GetMaxPendingPeerCount() uint64 + GetSchedulerMaxWaitingOperator() uint64 + GetStoreLimitByType(uint64, storelimit.Type) float64 + SetAllStoresLimit(storelimit.Type, float64) + GetSlowStoreEvictingAffectedStoreRatioThreshold() float64 + IsUseJointConsensus() bool + CheckLabelProperty(string, []*metapb.StoreLabel) bool + IsDebugMetricsEnabled() bool + GetClusterVersion() *semver.Version + // for test purpose + SetPlacementRuleEnabled(bool) + SetSplitMergeInterval(time.Duration) + SetMaxReplicas(int) + SetPlacementRulesCacheEnabled(bool) +} + +// StoreConfig is the interface that wraps the StoreConfig related methods. +type StoreConfig interface { + GetRegionMaxSize() uint64 + CheckRegionSize(uint64, uint64) error + CheckRegionKeys(uint64, uint64) error + IsEnableRegionBucket() bool + // for test purpose + SetRegionBucketEnabled(bool) +} diff --git a/server/schedule/filter/candidates.go b/server/schedule/filter/candidates.go index 2e6f5fade43..aa4486dc255 100644 --- a/server/schedule/filter/candidates.go +++ b/server/schedule/filter/candidates.go @@ -19,7 +19,7 @@ import ( "sort" "github.com/tikv/pd/pkg/core" - "github.com/tikv/pd/server/config" + "github.com/tikv/pd/server/schedule/config" "github.com/tikv/pd/server/schedule/plan" ) @@ -35,14 +35,14 @@ func NewCandidates(stores []*core.StoreInfo) *StoreCandidates { } // FilterSource keeps stores that can pass all source filters. -func (c *StoreCandidates) FilterSource(opt *config.PersistOptions, collector *plan.Collector, counter *Counter, filters ...Filter) *StoreCandidates { - c.Stores = SelectSourceStores(c.Stores, filters, opt, collector, counter) +func (c *StoreCandidates) FilterSource(conf config.Config, collector *plan.Collector, counter *Counter, filters ...Filter) *StoreCandidates { + c.Stores = SelectSourceStores(c.Stores, filters, conf, collector, counter) return c } // FilterTarget keeps stores that can pass all target filters. -func (c *StoreCandidates) FilterTarget(opt *config.PersistOptions, collector *plan.Collector, counter *Counter, filters ...Filter) *StoreCandidates { - c.Stores = SelectTargetStores(c.Stores, filters, opt, collector, counter) +func (c *StoreCandidates) FilterTarget(conf config.Config, collector *plan.Collector, counter *Counter, filters ...Filter) *StoreCandidates { + c.Stores = SelectTargetStores(c.Stores, filters, conf, collector, counter) return c } diff --git a/server/schedule/filter/candidates_test.go b/server/schedule/filter/candidates_test.go index d2d94862d00..6dd6ebf1461 100644 --- a/server/schedule/filter/candidates_test.go +++ b/server/schedule/filter/candidates_test.go @@ -20,7 +20,7 @@ import ( "github.com/pingcap/kvproto/pkg/metapb" "github.com/stretchr/testify/require" "github.com/tikv/pd/pkg/core" - "github.com/tikv/pd/server/config" + "github.com/tikv/pd/server/schedule/config" "github.com/tikv/pd/server/schedule/plan" ) @@ -50,7 +50,7 @@ type idFilter func(uint64) bool func (f idFilter) Scope() string { return "idFilter" } func (f idFilter) Type() filterType { return filterType(0) } -func (f idFilter) Source(opt *config.PersistOptions, store *core.StoreInfo) *plan.Status { +func (f idFilter) Source(conf config.Config, store *core.StoreInfo) *plan.Status { if f(store.GetID()) { return statusOK } @@ -58,7 +58,7 @@ func (f idFilter) Source(opt *config.PersistOptions, store *core.StoreInfo) *pla return statusStoreScoreDisallowed } -func (f idFilter) Target(opt *config.PersistOptions, store *core.StoreInfo) *plan.Status { +func (f idFilter) Target(conf config.Config, store *core.StoreInfo) *plan.Status { if f(store.GetID()) { return statusOK } diff --git a/server/schedule/filter/comparer.go b/server/schedule/filter/comparer.go index a4068bb8d63..36db4627aa6 100644 --- a/server/schedule/filter/comparer.go +++ b/server/schedule/filter/comparer.go @@ -16,7 +16,7 @@ package filter import ( "github.com/tikv/pd/pkg/core" - "github.com/tikv/pd/server/config" + "github.com/tikv/pd/server/schedule/config" ) // StoreComparer compares 2 stores. Often used for StoreCandidates to @@ -25,10 +25,10 @@ type StoreComparer func(a, b *core.StoreInfo) int // RegionScoreComparer creates a StoreComparer to sort store by region // score. -func RegionScoreComparer(opt *config.PersistOptions) StoreComparer { +func RegionScoreComparer(conf config.Config) StoreComparer { return func(a, b *core.StoreInfo) int { - sa := a.RegionScore(opt.GetRegionScoreFormulaVersion(), opt.GetHighSpaceRatio(), opt.GetLowSpaceRatio(), 0) - sb := b.RegionScore(opt.GetRegionScoreFormulaVersion(), opt.GetHighSpaceRatio(), opt.GetLowSpaceRatio(), 0) + sa := a.RegionScore(conf.GetRegionScoreFormulaVersion(), conf.GetHighSpaceRatio(), conf.GetLowSpaceRatio(), 0) + sb := b.RegionScore(conf.GetRegionScoreFormulaVersion(), conf.GetHighSpaceRatio(), conf.GetLowSpaceRatio(), 0) switch { case sa > sb: return 1 diff --git a/server/schedule/filter/filters.go b/server/schedule/filter/filters.go index aa2ffdbaf67..52f76a20df6 100644 --- a/server/schedule/filter/filters.go +++ b/server/schedule/filter/filters.go @@ -23,18 +23,18 @@ import ( "github.com/tikv/pd/pkg/core/storelimit" "github.com/tikv/pd/pkg/slice" "github.com/tikv/pd/pkg/utils/typeutil" - "github.com/tikv/pd/server/config" + "github.com/tikv/pd/server/schedule/config" "github.com/tikv/pd/server/schedule/placement" "github.com/tikv/pd/server/schedule/plan" "go.uber.org/zap" ) // SelectSourceStores selects stores that be selected as source store from the list. -func SelectSourceStores(stores []*core.StoreInfo, filters []Filter, opt *config.PersistOptions, collector *plan.Collector, +func SelectSourceStores(stores []*core.StoreInfo, filters []Filter, conf config.Config, collector *plan.Collector, counter *Counter) []*core.StoreInfo { return filterStoresBy(stores, func(s *core.StoreInfo) bool { return slice.AllOf(filters, func(i int) bool { - status := filters[i].Source(opt, s) + status := filters[i].Source(conf, s) if !status.IsOK() { if counter != nil { counter.inc(source, filters[i].Type(), s.GetID(), 0) @@ -54,12 +54,12 @@ func SelectSourceStores(stores []*core.StoreInfo, filters []Filter, opt *config. } // SelectUnavailableTargetStores selects unavailable stores that can't be selected as target store from the list. -func SelectUnavailableTargetStores(stores []*core.StoreInfo, filters []Filter, opt *config.PersistOptions, +func SelectUnavailableTargetStores(stores []*core.StoreInfo, filters []Filter, conf config.Config, collector *plan.Collector, counter *Counter) []*core.StoreInfo { return filterStoresBy(stores, func(s *core.StoreInfo) bool { targetID := strconv.FormatUint(s.GetID(), 10) return slice.AnyOf(filters, func(i int) bool { - status := filters[i].Target(opt, s) + status := filters[i].Target(conf, s) if !status.IsOK() { cfilter, ok := filters[i].(comparingFilter) sourceID := uint64(0) @@ -84,7 +84,7 @@ func SelectUnavailableTargetStores(stores []*core.StoreInfo, filters []Filter, o } // SelectTargetStores selects stores that be selected as target store from the list. -func SelectTargetStores(stores []*core.StoreInfo, filters []Filter, opt *config.PersistOptions, collector *plan.Collector, +func SelectTargetStores(stores []*core.StoreInfo, filters []Filter, conf config.Config, collector *plan.Collector, counter *Counter) []*core.StoreInfo { if len(filters) == 0 { return stores @@ -93,7 +93,7 @@ func SelectTargetStores(stores []*core.StoreInfo, filters []Filter, opt *config. return filterStoresBy(stores, func(s *core.StoreInfo) bool { return slice.AllOf(filters, func(i int) bool { filter := filters[i] - status := filter.Target(opt, s) + status := filter.Target(conf, s) if !status.IsOK() { cfilter, ok := filter.(comparingFilter) sourceID := uint64(0) @@ -132,9 +132,9 @@ type Filter interface { Scope() string Type() filterType // Return plan.Status to show whether be filtered as source - Source(opt *config.PersistOptions, store *core.StoreInfo) *plan.Status + Source(conf config.Config, store *core.StoreInfo) *plan.Status // Return plan.Status to show whether be filtered as target - Target(opt *config.PersistOptions, store *core.StoreInfo) *plan.Status + Target(conf config.Config, store *core.StoreInfo) *plan.Status } // comparingFilter is an interface to filter target store by comparing source and target stores @@ -145,10 +145,10 @@ type comparingFilter interface { } // Target checks if store can pass all Filters as target store. -func Target(opt *config.PersistOptions, store *core.StoreInfo, filters []Filter) bool { +func Target(conf config.Config, store *core.StoreInfo, filters []Filter) bool { storeID := strconv.FormatUint(store.GetID(), 10) for _, filter := range filters { - status := filter.Target(opt, store) + status := filter.Target(conf, store) if !status.IsOK() { if status != statusStoreRemoved { cfilter, ok := filter.(comparingFilter) @@ -188,14 +188,14 @@ func (f *excludedFilter) Type() filterType { return excluded } -func (f *excludedFilter) Source(opt *config.PersistOptions, store *core.StoreInfo) *plan.Status { +func (f *excludedFilter) Source(conf config.Config, store *core.StoreInfo) *plan.Status { if _, ok := f.sources[store.GetID()]; ok { return statusStoreAlreadyHasPeer } return statusOK } -func (f *excludedFilter) Target(opt *config.PersistOptions, store *core.StoreInfo) *plan.Status { +func (f *excludedFilter) Target(conf config.Config, store *core.StoreInfo) *plan.Status { if _, ok := f.targets[store.GetID()]; ok { return statusStoreAlreadyHasPeer } @@ -218,12 +218,12 @@ func (f *storageThresholdFilter) Type() filterType { return storageThreshold } -func (f *storageThresholdFilter) Source(opt *config.PersistOptions, store *core.StoreInfo) *plan.Status { +func (f *storageThresholdFilter) Source(conf config.Config, store *core.StoreInfo) *plan.Status { return statusOK } -func (f *storageThresholdFilter) Target(opt *config.PersistOptions, store *core.StoreInfo) *plan.Status { - if !store.IsLowSpace(opt.GetLowSpaceRatio()) { +func (f *storageThresholdFilter) Target(conf config.Config, store *core.StoreInfo) *plan.Status { + if !store.IsLowSpace(conf.GetLowSpaceRatio()) { return statusOK } return statusStoreLowSpace @@ -286,11 +286,11 @@ func (f *distinctScoreFilter) Type() filterType { return distinctScore } -func (f *distinctScoreFilter) Source(_ *config.PersistOptions, _ *core.StoreInfo) *plan.Status { +func (f *distinctScoreFilter) Source(_ config.Config, _ *core.StoreInfo) *plan.Status { return statusOK } -func (f *distinctScoreFilter) Target(_ *config.PersistOptions, store *core.StoreInfo) *plan.Status { +func (f *distinctScoreFilter) Target(_ config.Config, store *core.StoreInfo) *plan.Status { score := core.DistinctScore(f.labels, f.stores, store) switch f.policy { case locationSafeguard: @@ -341,9 +341,9 @@ func (f *StoreStateFilter) Type() filterType { // conditionFunc defines condition to determine a store should be selected. // It should consider if the filter allows temporary states. -type conditionFunc func(*config.PersistOptions, *core.StoreInfo) *plan.Status +type conditionFunc func(config.Config, *core.StoreInfo) *plan.Status -func (f *StoreStateFilter) isRemoved(_ *config.PersistOptions, store *core.StoreInfo) *plan.Status { +func (f *StoreStateFilter) isRemoved(_ config.Config, store *core.StoreInfo) *plan.Status { if store.IsRemoved() { f.Reason = storeStateTombstone return statusStoreRemoved @@ -352,8 +352,8 @@ func (f *StoreStateFilter) isRemoved(_ *config.PersistOptions, store *core.Store return statusOK } -func (f *StoreStateFilter) isDown(opt *config.PersistOptions, store *core.StoreInfo) *plan.Status { - if store.DownTime() > opt.GetMaxStoreDownTime() { +func (f *StoreStateFilter) isDown(conf config.Config, store *core.StoreInfo) *plan.Status { + if store.DownTime() > conf.GetMaxStoreDownTime() { f.Reason = storeStateDown return statusStoreDown } @@ -362,7 +362,7 @@ func (f *StoreStateFilter) isDown(opt *config.PersistOptions, store *core.StoreI return statusOK } -func (f *StoreStateFilter) isRemoving(_ *config.PersistOptions, store *core.StoreInfo) *plan.Status { +func (f *StoreStateFilter) isRemoving(_ config.Config, store *core.StoreInfo) *plan.Status { if store.IsRemoving() { f.Reason = storeStateOffline return statusStoresRemoving @@ -371,7 +371,7 @@ func (f *StoreStateFilter) isRemoving(_ *config.PersistOptions, store *core.Stor return statusOK } -func (f *StoreStateFilter) pauseLeaderTransfer(_ *config.PersistOptions, store *core.StoreInfo) *plan.Status { +func (f *StoreStateFilter) pauseLeaderTransfer(_ config.Config, store *core.StoreInfo) *plan.Status { if !store.AllowLeaderTransfer() { f.Reason = storeStatePauseLeader return statusStoreRejectLeader @@ -380,7 +380,7 @@ func (f *StoreStateFilter) pauseLeaderTransfer(_ *config.PersistOptions, store * return statusOK } -func (f *StoreStateFilter) slowStoreEvicted(opt *config.PersistOptions, store *core.StoreInfo) *plan.Status { +func (f *StoreStateFilter) slowStoreEvicted(conf config.Config, store *core.StoreInfo) *plan.Status { if store.EvictedAsSlowStore() { f.Reason = storeStateSlow return statusStoreRejectLeader @@ -389,7 +389,7 @@ func (f *StoreStateFilter) slowStoreEvicted(opt *config.PersistOptions, store *c return statusOK } -func (f *StoreStateFilter) slowTrendEvicted(opt *config.PersistOptions, store *core.StoreInfo) *plan.Status { +func (f *StoreStateFilter) slowTrendEvicted(_ config.Config, store *core.StoreInfo) *plan.Status { if store.IsEvictedAsSlowTrend() { f.Reason = storeStateSlowTrend return statusStoreRejectLeader @@ -398,7 +398,7 @@ func (f *StoreStateFilter) slowTrendEvicted(opt *config.PersistOptions, store *c return statusOK } -func (f *StoreStateFilter) isDisconnected(_ *config.PersistOptions, store *core.StoreInfo) *plan.Status { +func (f *StoreStateFilter) isDisconnected(_ config.Config, store *core.StoreInfo) *plan.Status { if !f.AllowTemporaryStates && store.IsDisconnected() { f.Reason = storeStateDisconnected return statusStoreDisconnected @@ -407,7 +407,7 @@ func (f *StoreStateFilter) isDisconnected(_ *config.PersistOptions, store *core. return statusOK } -func (f *StoreStateFilter) isBusy(_ *config.PersistOptions, store *core.StoreInfo) *plan.Status { +func (f *StoreStateFilter) isBusy(_ config.Config, store *core.StoreInfo) *plan.Status { if !f.AllowTemporaryStates && store.IsBusy() { f.Reason = storeStateBusy return statusStoreBusy @@ -416,7 +416,7 @@ func (f *StoreStateFilter) isBusy(_ *config.PersistOptions, store *core.StoreInf return statusOK } -func (f *StoreStateFilter) exceedRemoveLimit(_ *config.PersistOptions, store *core.StoreInfo) *plan.Status { +func (f *StoreStateFilter) exceedRemoveLimit(_ config.Config, store *core.StoreInfo) *plan.Status { if !f.AllowTemporaryStates && !store.IsAvailable(storelimit.RemovePeer) { f.Reason = storeStateExceedRemoveLimit return statusStoreRemoveLimit @@ -425,7 +425,7 @@ func (f *StoreStateFilter) exceedRemoveLimit(_ *config.PersistOptions, store *co return statusOK } -func (f *StoreStateFilter) exceedAddLimit(_ *config.PersistOptions, store *core.StoreInfo) *plan.Status { +func (f *StoreStateFilter) exceedAddLimit(_ config.Config, store *core.StoreInfo) *plan.Status { if !f.AllowTemporaryStates && !store.IsAvailable(storelimit.AddPeer) { f.Reason = storeStateExceedAddLimit return statusStoreAddLimit @@ -434,9 +434,9 @@ func (f *StoreStateFilter) exceedAddLimit(_ *config.PersistOptions, store *core. return statusOK } -func (f *StoreStateFilter) tooManySnapshots(opt *config.PersistOptions, store *core.StoreInfo) *plan.Status { - if !f.AllowTemporaryStates && (uint64(store.GetSendingSnapCount()) > opt.GetMaxSnapshotCount() || - uint64(store.GetReceivingSnapCount()) > opt.GetMaxSnapshotCount()) { +func (f *StoreStateFilter) tooManySnapshots(conf config.Config, store *core.StoreInfo) *plan.Status { + if !f.AllowTemporaryStates && (uint64(store.GetSendingSnapCount()) > conf.GetMaxSnapshotCount() || + uint64(store.GetReceivingSnapCount()) > conf.GetMaxSnapshotCount()) { f.Reason = storeStateTooManySnapshot return statusStoreSnapshotThrottled } @@ -444,10 +444,10 @@ func (f *StoreStateFilter) tooManySnapshots(opt *config.PersistOptions, store *c return statusOK } -func (f *StoreStateFilter) tooManyPendingPeers(opt *config.PersistOptions, store *core.StoreInfo) *plan.Status { +func (f *StoreStateFilter) tooManyPendingPeers(conf config.Config, store *core.StoreInfo) *plan.Status { if !f.AllowTemporaryStates && - opt.GetMaxPendingPeerCount() > 0 && - store.GetPendingPeerCount() > int(opt.GetMaxPendingPeerCount()) { + conf.GetMaxPendingPeerCount() > 0 && + store.GetPendingPeerCount() > int(conf.GetMaxPendingPeerCount()) { f.Reason = storeStateTooManyPendingPeer return statusStorePendingPeerThrottled } @@ -455,8 +455,8 @@ func (f *StoreStateFilter) tooManyPendingPeers(opt *config.PersistOptions, store return statusOK } -func (f *StoreStateFilter) hasRejectLeaderProperty(opts *config.PersistOptions, store *core.StoreInfo) *plan.Status { - if opts.CheckLabelProperty(config.RejectLeader, store.GetLabels()) { +func (f *StoreStateFilter) hasRejectLeaderProperty(conf config.Config, store *core.StoreInfo) *plan.Status { + if conf.CheckLabelProperty(config.RejectLeader, store.GetLabels()) { f.Reason = storeStateRejectLeader return statusStoreRejectLeader } @@ -488,7 +488,7 @@ const ( fastFailoverTarget ) -func (f *StoreStateFilter) anyConditionMatch(typ int, opt *config.PersistOptions, store *core.StoreInfo) *plan.Status { +func (f *StoreStateFilter) anyConditionMatch(typ int, conf config.Config, store *core.StoreInfo) *plan.Status { var funcs []conditionFunc switch typ { case leaderSource: @@ -511,7 +511,7 @@ func (f *StoreStateFilter) anyConditionMatch(typ int, opt *config.PersistOptions funcs = []conditionFunc{f.isRemoved, f.isRemoving, f.isDown, f.isDisconnected, f.isBusy} } for _, cf := range funcs { - if status := cf(opt, store); !status.IsOK() { + if status := cf(conf, store); !status.IsOK() { return status } } @@ -520,14 +520,14 @@ func (f *StoreStateFilter) anyConditionMatch(typ int, opt *config.PersistOptions // Source returns true when the store can be selected as the schedule // source. -func (f *StoreStateFilter) Source(opts *config.PersistOptions, store *core.StoreInfo) (status *plan.Status) { +func (f *StoreStateFilter) Source(conf config.Config, store *core.StoreInfo) (status *plan.Status) { if f.TransferLeader { - if status = f.anyConditionMatch(leaderSource, opts, store); !status.IsOK() { + if status = f.anyConditionMatch(leaderSource, conf, store); !status.IsOK() { return } } if f.MoveRegion { - if status = f.anyConditionMatch(regionSource, opts, store); !status.IsOK() { + if status = f.anyConditionMatch(regionSource, conf, store); !status.IsOK() { return } } @@ -536,22 +536,22 @@ func (f *StoreStateFilter) Source(opts *config.PersistOptions, store *core.Store // Target returns true when the store can be selected as the schedule // target. -func (f *StoreStateFilter) Target(opts *config.PersistOptions, store *core.StoreInfo) (status *plan.Status) { +func (f *StoreStateFilter) Target(conf config.Config, store *core.StoreInfo) (status *plan.Status) { if f.TransferLeader { - if status = f.anyConditionMatch(leaderTarget, opts, store); !status.IsOK() { + if status = f.anyConditionMatch(leaderTarget, conf, store); !status.IsOK() { return } } if f.MoveRegion && f.AllowFastFailover { - return f.anyConditionMatch(fastFailoverTarget, opts, store) + return f.anyConditionMatch(fastFailoverTarget, conf, store) } if f.MoveRegion && f.ScatterRegion { - if status = f.anyConditionMatch(scatterRegionTarget, opts, store); !status.IsOK() { + if status = f.anyConditionMatch(scatterRegionTarget, conf, store); !status.IsOK() { return } } if f.MoveRegion && !f.ScatterRegion { - if status = f.anyConditionMatch(regionTarget, opts, store); !status.IsOK() { + if status = f.anyConditionMatch(regionTarget, conf, store); !status.IsOK() { return } } @@ -580,7 +580,7 @@ func (f labelConstraintFilter) Type() filterType { } // Source filters stores when select them as schedule source. -func (f labelConstraintFilter) Source(opt *config.PersistOptions, store *core.StoreInfo) *plan.Status { +func (f labelConstraintFilter) Source(conf config.Config, store *core.StoreInfo) *plan.Status { if placement.MatchLabelConstraints(store, f.constraints) { return statusOK } @@ -588,7 +588,7 @@ func (f labelConstraintFilter) Source(opt *config.PersistOptions, store *core.St } // Target filters stores when select them as schedule target. -func (f labelConstraintFilter) Target(_ *config.PersistOptions, store *core.StoreInfo) *plan.Status { +func (f labelConstraintFilter) Target(_ config.Config, store *core.StoreInfo) *plan.Status { if placement.MatchLabelConstraints(store, f.constraints) { return statusOK } @@ -630,7 +630,7 @@ func (f *ruleFitFilter) Type() filterType { return ruleFit } -func (f *ruleFitFilter) Source(_ *config.PersistOptions, _ *core.StoreInfo) *plan.Status { +func (f *ruleFitFilter) Source(_ config.Config, _ *core.StoreInfo) *plan.Status { return statusOK } @@ -639,7 +639,7 @@ func (f *ruleFitFilter) Source(_ *config.PersistOptions, _ *core.StoreInfo) *pla // the replaced store can match the source rule. // RegionA:[1,2,3], move peer1 --> peer2 will not allow, because it's count not match the rule. // but transfer role peer1 --> peer2, it will support. -func (f *ruleFitFilter) Target(options *config.PersistOptions, store *core.StoreInfo) *plan.Status { +func (f *ruleFitFilter) Target(options config.Config, store *core.StoreInfo) *plan.Status { if f.oldFit.Replace(f.srcStore, store) { return statusOK } @@ -683,11 +683,11 @@ func (f *ruleLeaderFitFilter) Type() filterType { return ruleLeader } -func (f *ruleLeaderFitFilter) Source(_ *config.PersistOptions, _ *core.StoreInfo) *plan.Status { +func (f *ruleLeaderFitFilter) Source(_ config.Config, _ *core.StoreInfo) *plan.Status { return statusOK } -func (f *ruleLeaderFitFilter) Target(_ *config.PersistOptions, store *core.StoreInfo) *plan.Status { +func (f *ruleLeaderFitFilter) Target(_ config.Config, store *core.StoreInfo) *plan.Status { targetStoreID := store.GetID() targetPeer := f.region.GetStorePeer(targetStoreID) if targetPeer == nil && !f.allowMoveLeader { @@ -739,11 +739,11 @@ func (f *ruleWitnessFitFilter) Type() filterType { return ruleFit } -func (f *ruleWitnessFitFilter) Source(_ *config.PersistOptions, _ *core.StoreInfo) *plan.Status { +func (f *ruleWitnessFitFilter) Source(_ config.Config, _ *core.StoreInfo) *plan.Status { return statusOK } -func (f *ruleWitnessFitFilter) Target(options *config.PersistOptions, store *core.StoreInfo) *plan.Status { +func (f *ruleWitnessFitFilter) Target(_ config.Config, store *core.StoreInfo) *plan.Status { targetStoreID := store.GetID() targetPeer := f.region.GetStorePeer(targetStoreID) if targetPeer == nil { @@ -761,19 +761,19 @@ func (f *ruleWitnessFitFilter) Target(options *config.PersistOptions, store *cor // NewPlacementSafeguard creates a filter that ensures after replace a peer with new // peer, the placement restriction will not become worse. -func NewPlacementSafeguard(scope string, opt *config.PersistOptions, cluster *core.BasicCluster, ruleManager *placement.RuleManager, +func NewPlacementSafeguard(scope string, conf config.Config, cluster *core.BasicCluster, ruleManager *placement.RuleManager, region *core.RegionInfo, sourceStore *core.StoreInfo, oldFit *placement.RegionFit) Filter { - if opt.IsPlacementRulesEnabled() { + if conf.IsPlacementRulesEnabled() { return newRuleFitFilter(scope, cluster, ruleManager, region, oldFit, sourceStore.GetID()) } - return NewLocationSafeguard(scope, opt.GetLocationLabels(), cluster.GetRegionStores(region), sourceStore) + return NewLocationSafeguard(scope, conf.GetLocationLabels(), cluster.GetRegionStores(region), sourceStore) } // NewPlacementLeaderSafeguard creates a filter that ensures after transfer a leader with // existed peer, the placement restriction will not become worse. // Note that it only worked when PlacementRules enabled otherwise it will always permit the sourceStore. -func NewPlacementLeaderSafeguard(scope string, opt *config.PersistOptions, cluster *core.BasicCluster, ruleManager *placement.RuleManager, region *core.RegionInfo, sourceStore *core.StoreInfo, allowMoveLeader bool) Filter { - if opt.IsPlacementRulesEnabled() { +func NewPlacementLeaderSafeguard(scope string, conf config.Config, cluster *core.BasicCluster, ruleManager *placement.RuleManager, region *core.RegionInfo, sourceStore *core.StoreInfo, allowMoveLeader bool) Filter { + if conf.IsPlacementRulesEnabled() { return newRuleLeaderFitFilter(scope, cluster, ruleManager, region, sourceStore.GetID(), allowMoveLeader) } return nil @@ -782,9 +782,9 @@ func NewPlacementLeaderSafeguard(scope string, opt *config.PersistOptions, clust // NewPlacementWitnessSafeguard creates a filter that ensures after transfer a witness with // existed peer, the placement restriction will not become worse. // Note that it only worked when PlacementRules enabled otherwise it will always permit the sourceStore. -func NewPlacementWitnessSafeguard(scope string, opt *config.PersistOptions, cluster *core.BasicCluster, ruleManager *placement.RuleManager, +func NewPlacementWitnessSafeguard(scope string, conf config.Config, cluster *core.BasicCluster, ruleManager *placement.RuleManager, region *core.RegionInfo, sourceStore *core.StoreInfo, oldFit *placement.RegionFit) Filter { - if opt.IsPlacementRulesEnabled() { + if conf.IsPlacementRulesEnabled() { return newRuleWitnessFitFilter(scope, cluster, ruleManager, region, oldFit, sourceStore.GetID()) } return nil @@ -811,14 +811,14 @@ func (f *engineFilter) Type() filterType { return engine } -func (f *engineFilter) Source(_ *config.PersistOptions, store *core.StoreInfo) *plan.Status { +func (f *engineFilter) Source(_ config.Config, store *core.StoreInfo) *plan.Status { if f.constraint.MatchStore(store) { return statusOK } return statusStoreNotMatchRule } -func (f *engineFilter) Target(_ *config.PersistOptions, store *core.StoreInfo) *plan.Status { +func (f *engineFilter) Target(_ config.Config, store *core.StoreInfo) *plan.Status { if f.constraint.MatchStore(store) { return statusOK } @@ -854,14 +854,14 @@ func (f *specialUseFilter) Type() filterType { return specialUse } -func (f *specialUseFilter) Source(opt *config.PersistOptions, store *core.StoreInfo) *plan.Status { - if store.IsLowSpace(opt.GetLowSpaceRatio()) || !f.constraint.MatchStore(store) { +func (f *specialUseFilter) Source(conf config.Config, store *core.StoreInfo) *plan.Status { + if store.IsLowSpace(conf.GetLowSpaceRatio()) || !f.constraint.MatchStore(store) { return statusOK } return statusStoreNotMatchRule } -func (f *specialUseFilter) Target(opt *config.PersistOptions, store *core.StoreInfo) *plan.Status { +func (f *specialUseFilter) Target(conf config.Config, store *core.StoreInfo) *plan.Status { if !f.constraint.MatchStore(store) { return statusOK } @@ -928,11 +928,11 @@ func (f *isolationFilter) Type() filterType { return isolation } -func (f *isolationFilter) Source(opt *config.PersistOptions, store *core.StoreInfo) *plan.Status { +func (f *isolationFilter) Source(conf config.Config, store *core.StoreInfo) *plan.Status { return statusOK } -func (f *isolationFilter) Target(_ *config.PersistOptions, store *core.StoreInfo) *plan.Status { +func (f *isolationFilter) Target(_ config.Config, store *core.StoreInfo) *plan.Status { // No isolation constraint to fit if len(f.constraintSet) == 0 { return statusStoreNotMatchIsolation @@ -953,7 +953,7 @@ func (f *isolationFilter) Target(_ *config.PersistOptions, store *core.StoreInfo // createRegionForRuleFit is used to create a clone region with RegionCreateOptions which is only used for // FitRegion in filter func createRegionForRuleFit(startKey, endKey []byte, - peers []*metapb.Peer, leader *metapb.Peer, opts ...core.RegionCreateOption) *core.RegionInfo { + peers []*metapb.Peer, leader *metapb.Peer, config ...core.RegionCreateOption) *core.RegionInfo { copyLeader := typeutil.DeepClone(leader, core.RegionPeerFactory) copyPeers := make([]*metapb.Peer, 0, len(peers)) for _, p := range peers { @@ -968,6 +968,6 @@ func createRegionForRuleFit(startKey, endKey []byte, StartKey: startKey, EndKey: endKey, Peers: copyPeers, - }, copyLeader, opts...) + }, copyLeader, config...) return cloneRegion } diff --git a/server/schedule/filter/filters_test.go b/server/schedule/filter/filters_test.go index 076e17184f8..b6c607c5fa0 100644 --- a/server/schedule/filter/filters_test.go +++ b/server/schedule/filter/filters_test.go @@ -24,7 +24,7 @@ import ( "github.com/stretchr/testify/require" "github.com/tikv/pd/pkg/core" "github.com/tikv/pd/pkg/mock/mockcluster" - "github.com/tikv/pd/server/config" + "github.com/tikv/pd/pkg/mock/mockconfig" "github.com/tikv/pd/server/schedule/placement" "github.com/tikv/pd/server/schedule/plan" ) @@ -59,8 +59,8 @@ func TestDistinctScoreFilter(t *testing.T) { } ls := NewLocationSafeguard("", labels, stores, allStores[testCase.source-1]) li := NewLocationImprover("", labels, stores, allStores[testCase.source-1]) - re.Equal(testCase.safeGuardRes, ls.Target(config.NewTestOptions(), allStores[testCase.target-1]).StatusCode) - re.Equal(testCase.improverRes, li.Target(config.NewTestOptions(), allStores[testCase.target-1]).StatusCode) + re.Equal(testCase.safeGuardRes, ls.Target(mockconfig.NewTestOptions(), allStores[testCase.target-1]).StatusCode) + re.Equal(testCase.improverRes, li.Target(mockconfig.NewTestOptions(), allStores[testCase.target-1]).StatusCode) } } @@ -69,7 +69,7 @@ func TestLabelConstraintsFilter(t *testing.T) { ctx, cancel := context.WithCancel(context.Background()) defer cancel() - opt := config.NewTestOptions() + opt := mockconfig.NewTestOptions() testCluster := mockcluster.NewCluster(ctx, opt) store := core.NewStoreInfoWithLabel(1, map[string]string{"id": "1"}) @@ -100,7 +100,7 @@ func TestRuleFitFilter(t *testing.T) { ctx, cancel := context.WithCancel(context.Background()) defer cancel() - opt := config.NewTestOptions() + opt := mockconfig.NewTestOptions() opt.SetPlacementRuleEnabled(false) testCluster := mockcluster.NewCluster(ctx, opt) testCluster.SetLocationLabels([]string{"zone"}) @@ -156,7 +156,7 @@ func TestStoreStateFilter(t *testing.T) { &StoreStateFilter{TransferLeader: true, MoveRegion: true}, &StoreStateFilter{MoveRegion: true, AllowTemporaryStates: true}, } - opt := config.NewTestOptions() + opt := mockconfig.NewTestOptions() store := core.NewStoreInfoWithLabel(1, map[string]string{}) type testCase struct { @@ -208,7 +208,7 @@ func TestStoreStateFilterReason(t *testing.T) { &StoreStateFilter{TransferLeader: true, MoveRegion: true}, &StoreStateFilter{MoveRegion: true, AllowTemporaryStates: true}, } - opt := config.NewTestOptions() + opt := mockconfig.NewTestOptions() store := core.NewStoreInfoWithLabel(1, map[string]string{}) type testCase struct { @@ -260,7 +260,7 @@ func TestIsolationFilter(t *testing.T) { ctx, cancel := context.WithCancel(context.Background()) defer cancel() - opt := config.NewTestOptions() + opt := mockconfig.NewTestOptions() testCluster := mockcluster.NewCluster(ctx, opt) testCluster.SetLocationLabels([]string{"zone", "rack", "host"}) allStores := []struct { @@ -331,7 +331,7 @@ func TestPlacementGuard(t *testing.T) { ctx, cancel := context.WithCancel(context.Background()) defer cancel() - opt := config.NewTestOptions() + opt := mockconfig.NewTestOptions() opt.SetPlacementRuleEnabled(false) testCluster := mockcluster.NewCluster(ctx, opt) testCluster.SetLocationLabels([]string{"zone"}) @@ -359,7 +359,7 @@ func TestSpecialUseFilter(t *testing.T) { ctx, cancel := context.WithCancel(context.Background()) defer cancel() - opt := config.NewTestOptions() + opt := mockconfig.NewTestOptions() testCluster := mockcluster.NewCluster(ctx, opt) testCases := []struct { diff --git a/server/schedule/filter/healthy.go b/server/schedule/filter/healthy.go index ce9dcee793b..deb8c22a07e 100644 --- a/server/schedule/filter/healthy.go +++ b/server/schedule/filter/healthy.go @@ -16,7 +16,7 @@ package filter import ( "github.com/tikv/pd/pkg/core" - "github.com/tikv/pd/server/config" + "github.com/tikv/pd/server/schedule/config" "github.com/tikv/pd/server/schedule/placement" ) @@ -67,6 +67,6 @@ func ReplicatedRegion(cluster regionHealthCluster) func(*core.RegionInfo) bool { type regionHealthCluster interface { core.StoreSetInformer core.RegionSetInformer - GetOpts() *config.PersistOptions + GetOpts() config.Config GetRuleManager() *placement.RuleManager } diff --git a/server/schedule/filter/healthy_test.go b/server/schedule/filter/healthy_test.go index 82598e7832f..15588352554 100644 --- a/server/schedule/filter/healthy_test.go +++ b/server/schedule/filter/healthy_test.go @@ -23,7 +23,7 @@ import ( "github.com/stretchr/testify/require" "github.com/tikv/pd/pkg/core" "github.com/tikv/pd/pkg/mock/mockcluster" - "github.com/tikv/pd/server/config" + "github.com/tikv/pd/pkg/mock/mockconfig" ) func TestIsRegionHealthy(t *testing.T) { @@ -68,7 +68,7 @@ func TestIsRegionHealthy(t *testing.T) { {region(peers(1, 2, 3, 4), core.WithLearners(peers(1))), true, true, false, true, true, false}, } - opt := config.NewTestOptions() + opt := mockconfig.NewTestOptions() tc := mockcluster.NewCluster(ctx, opt) tc.AddRegionStore(1, 1) tc.AddRegionStore(2, 1) diff --git a/server/schedule/filter/region_filters_test.go b/server/schedule/filter/region_filters_test.go index 493262a6717..f7ad224ade3 100644 --- a/server/schedule/filter/region_filters_test.go +++ b/server/schedule/filter/region_filters_test.go @@ -24,7 +24,7 @@ import ( "github.com/stretchr/testify/require" "github.com/tikv/pd/pkg/core" "github.com/tikv/pd/pkg/mock/mockcluster" - "github.com/tikv/pd/server/config" + "github.com/tikv/pd/pkg/mock/mockconfig" ) func TestRegionPendingFilter(t *testing.T) { @@ -63,7 +63,7 @@ func TestRegionReplicatedFilter(t *testing.T) { re := require.New(t) ctx, cancel := context.WithCancel(context.Background()) defer cancel() - opt := config.NewTestOptions() + opt := mockconfig.NewTestOptions() opt.SetPlacementRuleEnabled(false) opt.SetMaxReplicas(3) testCluster := mockcluster.NewCluster(ctx, opt) @@ -85,7 +85,7 @@ func TestRegionEmptyFilter(t *testing.T) { re := require.New(t) ctx, cancel := context.WithCancel(context.Background()) defer cancel() - opt := config.NewTestOptions() + opt := mockconfig.NewTestOptions() opt.SetPlacementRuleEnabled(false) opt.SetMaxReplicas(3) testCluster := mockcluster.NewCluster(ctx, opt) diff --git a/server/schedule/operator/builder.go b/server/schedule/operator/builder.go index 41a0bcef83e..5e1bb85048b 100644 --- a/server/schedule/operator/builder.go +++ b/server/schedule/operator/builder.go @@ -24,7 +24,7 @@ import ( "github.com/tikv/pd/pkg/id" "github.com/tikv/pd/pkg/utils/typeutil" "github.com/tikv/pd/pkg/versioninfo" - "github.com/tikv/pd/server/config" + "github.com/tikv/pd/server/schedule/config" "github.com/tikv/pd/server/schedule/filter" "github.com/tikv/pd/server/schedule/placement" ) @@ -32,8 +32,8 @@ import ( // ClusterInformer provides the necessary information for building operator. type ClusterInformer interface { GetBasicCluster() *core.BasicCluster - GetOpts() *config.PersistOptions - GetStoreConfig() *config.StoreConfig + GetOpts() config.Config + GetStoreConfig() config.StoreConfig GetRuleManager() *placement.RuleManager GetAllocator() id.Allocator } diff --git a/server/schedule/operator/builder_test.go b/server/schedule/operator/builder_test.go index 87993e6eac0..b017511e9ad 100644 --- a/server/schedule/operator/builder_test.go +++ b/server/schedule/operator/builder_test.go @@ -23,7 +23,8 @@ import ( "github.com/stretchr/testify/suite" "github.com/tikv/pd/pkg/core" "github.com/tikv/pd/pkg/mock/mockcluster" - "github.com/tikv/pd/server/config" + "github.com/tikv/pd/pkg/mock/mockconfig" + "github.com/tikv/pd/server/schedule/config" ) type operatorBuilderTestSuite struct { @@ -39,12 +40,10 @@ func TestOperatorBuilderTestSuite(t *testing.T) { } func (suite *operatorBuilderTestSuite) SetupTest() { - opts := config.NewTestOptions() + opts := mockconfig.NewTestOptions() suite.ctx, suite.cancel = context.WithCancel(context.Background()) suite.cluster = mockcluster.NewCluster(suite.ctx, opts) - suite.cluster.SetLabelPropertyConfig(config.LabelPropertyConfig{ - config.RejectLeader: {{Key: "noleader", Value: "true"}}, - }) + suite.cluster.SetLabelProperty(config.RejectLeader, "noleader", "true") suite.cluster.SetLocationLabels([]string{"zone", "host"}) suite.cluster.AddLabelsStore(1, 0, map[string]string{"zone": "z1", "host": "h1"}) suite.cluster.AddLabelsStore(2, 0, map[string]string{"zone": "z1", "host": "h1"}) diff --git a/server/schedule/operator/create_operator_test.go b/server/schedule/operator/create_operator_test.go index eadfe50d466..9a7c692cc4a 100644 --- a/server/schedule/operator/create_operator_test.go +++ b/server/schedule/operator/create_operator_test.go @@ -26,8 +26,9 @@ import ( "github.com/stretchr/testify/suite" "github.com/tikv/pd/pkg/core" "github.com/tikv/pd/pkg/mock/mockcluster" + "github.com/tikv/pd/pkg/mock/mockconfig" "github.com/tikv/pd/pkg/versioninfo" - "github.com/tikv/pd/server/config" + "github.com/tikv/pd/server/schedule/config" "github.com/tikv/pd/server/schedule/placement" ) @@ -44,12 +45,10 @@ func TestCreateOperatorTestSuite(t *testing.T) { } func (suite *createOperatorTestSuite) SetupTest() { - opts := config.NewTestOptions() + opts := mockconfig.NewTestOptions() suite.ctx, suite.cancel = context.WithCancel(context.Background()) suite.cluster = mockcluster.NewCluster(suite.ctx, opts) - suite.cluster.SetLabelPropertyConfig(config.LabelPropertyConfig{ - config.RejectLeader: {{Key: "noleader", Value: "true"}}, - }) + suite.cluster.SetLabelProperty(config.RejectLeader, "noleader", "true") suite.cluster.SetLocationLabels([]string{"zone", "host"}) suite.cluster.AddLabelsStore(1, 0, map[string]string{"zone": "z1", "host": "h1"}) suite.cluster.AddLabelsStore(2, 0, map[string]string{"zone": "z1", "host": "h1"}) @@ -1142,7 +1141,7 @@ func TestCreateLeaveJointStateOperatorWithoutFitRules(t *testing.T) { ctx, cancel := context.WithCancel(context.Background()) defer cancel() - opts := config.NewTestOptions() + opts := mockconfig.NewTestOptions() cluster := mockcluster.NewCluster(ctx, opts) re.NoError(cluster.SetRules([]*placement.Rule{ { diff --git a/server/schedule/operator/operator_test.go b/server/schedule/operator/operator_test.go index acc48dbfe0e..897d30c7f1d 100644 --- a/server/schedule/operator/operator_test.go +++ b/server/schedule/operator/operator_test.go @@ -27,7 +27,8 @@ import ( "github.com/tikv/pd/pkg/core" "github.com/tikv/pd/pkg/core/storelimit" "github.com/tikv/pd/pkg/mock/mockcluster" - "github.com/tikv/pd/server/config" + "github.com/tikv/pd/pkg/mock/mockconfig" + "github.com/tikv/pd/server/schedule/config" ) type operatorTestSuite struct { @@ -43,14 +44,12 @@ func TestOperatorTestSuite(t *testing.T) { } func (suite *operatorTestSuite) SetupTest() { - cfg := config.NewTestOptions() + cfg := mockconfig.NewTestOptions() suite.ctx, suite.cancel = context.WithCancel(context.Background()) suite.cluster = mockcluster.NewCluster(suite.ctx, cfg) suite.cluster.SetMaxMergeRegionSize(2) suite.cluster.SetMaxMergeRegionKeys(2) - suite.cluster.SetLabelPropertyConfig(config.LabelPropertyConfig{ - config.RejectLeader: {{Key: "reject", Value: "leader"}}, - }) + suite.cluster.SetLabelProperty(config.RejectLeader, "reject", "leader") stores := map[uint64][]string{ 1: {}, 2: {}, 3: {}, 4: {}, 5: {}, 6: {}, 7: {"reject", "leader"}, diff --git a/server/schedule/operator/step_test.go b/server/schedule/operator/step_test.go index bfcd35dd673..3b797575149 100644 --- a/server/schedule/operator/step_test.go +++ b/server/schedule/operator/step_test.go @@ -22,7 +22,7 @@ import ( "github.com/stretchr/testify/suite" "github.com/tikv/pd/pkg/core" "github.com/tikv/pd/pkg/mock/mockcluster" - "github.com/tikv/pd/server/config" + "github.com/tikv/pd/pkg/mock/mockconfig" ) type operatorStepTestSuite struct { @@ -43,7 +43,7 @@ type testCase struct { } func (suite *operatorStepTestSuite) SetupTest() { - suite.cluster = mockcluster.NewCluster(context.Background(), config.NewTestOptions()) + suite.cluster = mockcluster.NewCluster(context.Background(), mockconfig.NewTestOptions()) for i := 1; i <= 10; i++ { suite.cluster.PutStoreWithLabels(uint64(i)) } diff --git a/server/schedule/operator_controller_test.go b/server/schedule/operator_controller_test.go index 2417e697fc0..55089b6db7d 100644 --- a/server/schedule/operator_controller_test.go +++ b/server/schedule/operator_controller_test.go @@ -30,7 +30,7 @@ import ( "github.com/tikv/pd/pkg/core" "github.com/tikv/pd/pkg/core/storelimit" "github.com/tikv/pd/pkg/mock/mockcluster" - "github.com/tikv/pd/server/config" + "github.com/tikv/pd/pkg/mock/mockconfig" "github.com/tikv/pd/server/schedule/hbstream" "github.com/tikv/pd/server/schedule/labeler" "github.com/tikv/pd/server/schedule/operator" @@ -58,7 +58,7 @@ func (suite *operatorControllerTestSuite) TearDownSuite() { // issue #1338 func (suite *operatorControllerTestSuite) TestGetOpInfluence() { - opt := config.NewTestOptions() + opt := mockconfig.NewTestOptions() tc := mockcluster.NewCluster(suite.ctx, opt) oc := NewOperatorController(suite.ctx, tc, nil) tc.AddLeaderStore(2, 1) @@ -100,7 +100,7 @@ func (suite *operatorControllerTestSuite) TestGetOpInfluence() { } func (suite *operatorControllerTestSuite) TestOperatorStatus() { - opt := config.NewTestOptions() + opt := mockconfig.NewTestOptions() tc := mockcluster.NewCluster(suite.ctx, opt) stream := hbstream.NewTestHeartbeatStreams(suite.ctx, tc.ID, tc, false /* no need to run */) oc := NewOperatorController(suite.ctx, tc, stream) @@ -135,7 +135,7 @@ func (suite *operatorControllerTestSuite) TestOperatorStatus() { } func (suite *operatorControllerTestSuite) TestFastFailOperator() { - opt := config.NewTestOptions() + opt := mockconfig.NewTestOptions() tc := mockcluster.NewCluster(suite.ctx, opt) stream := hbstream.NewTestHeartbeatStreams(suite.ctx, tc.ID, tc, false /* no need to run */) oc := NewOperatorController(suite.ctx, tc, stream) @@ -169,7 +169,7 @@ func (suite *operatorControllerTestSuite) TestFastFailOperator() { // Issue 3353 func (suite *operatorControllerTestSuite) TestFastFailWithUnhealthyStore() { - opt := config.NewTestOptions() + opt := mockconfig.NewTestOptions() tc := mockcluster.NewCluster(suite.ctx, opt) stream := hbstream.NewTestHeartbeatStreams(suite.ctx, tc.ID, tc, false /* no need to run */) oc := NewOperatorController(suite.ctx, tc, stream) @@ -189,7 +189,7 @@ func (suite *operatorControllerTestSuite) TestFastFailWithUnhealthyStore() { func (suite *operatorControllerTestSuite) TestCheckAddUnexpectedStatus() { suite.NoError(failpoint.Disable("github.com/tikv/pd/server/schedule/unexpectedOperator")) - opt := config.NewTestOptions() + opt := mockconfig.NewTestOptions() tc := mockcluster.NewCluster(suite.ctx, opt) stream := hbstream.NewTestHeartbeatStreams(suite.ctx, tc.ID, tc, false /* no need to run */) oc := NewOperatorController(suite.ctx, tc, stream) @@ -254,7 +254,7 @@ func (suite *operatorControllerTestSuite) TestCheckAddUnexpectedStatus() { // issue #1716 func (suite *operatorControllerTestSuite) TestConcurrentRemoveOperator() { - opt := config.NewTestOptions() + opt := mockconfig.NewTestOptions() tc := mockcluster.NewCluster(suite.ctx, opt) stream := hbstream.NewTestHeartbeatStreams(suite.ctx, tc.ID, tc, false /* no need to run */) oc := NewOperatorController(suite.ctx, tc, stream) @@ -295,7 +295,7 @@ func (suite *operatorControllerTestSuite) TestConcurrentRemoveOperator() { } func (suite *operatorControllerTestSuite) TestPollDispatchRegion() { - opt := config.NewTestOptions() + opt := mockconfig.NewTestOptions() tc := mockcluster.NewCluster(suite.ctx, opt) stream := hbstream.NewTestHeartbeatStreams(suite.ctx, tc.ID, tc, false /* no need to run */) oc := NewOperatorController(suite.ctx, tc, stream) @@ -368,7 +368,7 @@ func (suite *operatorControllerTestSuite) TestPollDispatchRegion() { } func (suite *operatorControllerTestSuite) TestStoreLimit() { - opt := config.NewTestOptions() + opt := mockconfig.NewTestOptions() tc := mockcluster.NewCluster(suite.ctx, opt) stream := hbstream.NewTestHeartbeatStreams(suite.ctx, tc.ID, tc, false /* no need to run */) oc := NewOperatorController(suite.ctx, tc, stream) @@ -436,7 +436,7 @@ func (suite *operatorControllerTestSuite) TestStoreLimit() { // #1652 func (suite *operatorControllerTestSuite) TestDispatchOutdatedRegion() { - cluster := mockcluster.NewCluster(suite.ctx, config.NewTestOptions()) + cluster := mockcluster.NewCluster(suite.ctx, mockconfig.NewTestOptions()) stream := hbstream.NewTestHeartbeatStreams(suite.ctx, cluster.ID, cluster, false /* no need to run */) controller := NewOperatorController(suite.ctx, cluster, stream) @@ -486,7 +486,7 @@ func (suite *operatorControllerTestSuite) TestDispatchOutdatedRegion() { } func (suite *operatorControllerTestSuite) TestCalcInfluence() { - cluster := mockcluster.NewCluster(suite.ctx, config.NewTestOptions()) + cluster := mockcluster.NewCluster(suite.ctx, mockconfig.NewTestOptions()) stream := hbstream.NewTestHeartbeatStreams(suite.ctx, cluster.ID, cluster, false /* no need to run */) controller := NewOperatorController(suite.ctx, cluster, stream) @@ -563,7 +563,7 @@ func (suite *operatorControllerTestSuite) TestCalcInfluence() { } func (suite *operatorControllerTestSuite) TestDispatchUnfinishedStep() { - cluster := mockcluster.NewCluster(suite.ctx, config.NewTestOptions()) + cluster := mockcluster.NewCluster(suite.ctx, mockconfig.NewTestOptions()) stream := hbstream.NewTestHeartbeatStreams(suite.ctx, cluster.ID, cluster, false /* no need to run */) controller := NewOperatorController(suite.ctx, cluster, stream) @@ -699,7 +699,7 @@ func (suite *operatorControllerTestSuite) checkRemoveOperatorSuccess(oc *Operato } func (suite *operatorControllerTestSuite) TestAddWaitingOperator() { - opts := config.NewTestOptions() + opts := mockconfig.NewTestOptions() cluster := mockcluster.NewCluster(suite.ctx, opts) stream := hbstream.NewTestHeartbeatStreams(suite.ctx, cluster.ID, cluster, false /* no need to run */) controller := NewOperatorController(suite.ctx, cluster, stream) @@ -783,7 +783,7 @@ func (suite *operatorControllerTestSuite) TestAddWaitingOperator() { // issue #5279 func (suite *operatorControllerTestSuite) TestInvalidStoreId() { - opt := config.NewTestOptions() + opt := mockconfig.NewTestOptions() tc := mockcluster.NewCluster(suite.ctx, opt) stream := hbstream.NewTestHeartbeatStreams(suite.ctx, tc.ID, tc, false /* no need to run */) oc := NewOperatorController(suite.ctx, tc, stream) diff --git a/server/schedule/placement/rule_manager.go b/server/schedule/placement/rule_manager.go index 38a7f4cb465..3417eb5c32c 100644 --- a/server/schedule/placement/rule_manager.go +++ b/server/schedule/placement/rule_manager.go @@ -31,7 +31,7 @@ import ( "github.com/tikv/pd/pkg/slice" "github.com/tikv/pd/pkg/storage/endpoint" "github.com/tikv/pd/pkg/utils/syncutil" - "github.com/tikv/pd/server/config" + "github.com/tikv/pd/server/schedule/config" "go.uber.org/zap" "golang.org/x/exp/slices" ) @@ -49,15 +49,15 @@ type RuleManager struct { keyType string storeSetInformer core.StoreSetInformer cache *RegionRuleFitCacheManager - opt *config.PersistOptions + conf config.Config } // NewRuleManager creates a RuleManager instance. -func NewRuleManager(storage endpoint.RuleStorage, storeSetInformer core.StoreSetInformer, opt *config.PersistOptions) *RuleManager { +func NewRuleManager(storage endpoint.RuleStorage, storeSetInformer core.StoreSetInformer, conf config.Config) *RuleManager { return &RuleManager{ storage: storage, storeSetInformer: storeSetInformer, - opt: opt, + conf: conf, ruleConfig: newRuleConfig(), cache: NewRegionRuleFitCacheManager(), } @@ -338,12 +338,12 @@ func (m *RuleManager) FitRegion(storeSet StoreSet, region *core.RegionInfo) (fit regionStores := getStoresByRegion(storeSet, region) rules := m.GetRulesForApplyRegion(region) var isCached bool - if m.opt.IsPlacementRulesCacheEnabled() { + if m.conf.IsPlacementRulesCacheEnabled() { if isCached, fit = m.cache.CheckAndGetCache(region, rules, regionStores); isCached && fit != nil { return fit } } - fit = fitRegion(regionStores, region, rules, m.opt.IsWitnessAllowed()) + fit = fitRegion(regionStores, region, rules, m.conf.IsWitnessAllowed()) fit.regionStores = regionStores fit.rules = rules if isCached { diff --git a/server/schedule/placement/rule_manager_test.go b/server/schedule/placement/rule_manager_test.go index 8a62116af8c..1ceeb61655f 100644 --- a/server/schedule/placement/rule_manager_test.go +++ b/server/schedule/placement/rule_manager_test.go @@ -22,16 +22,16 @@ import ( "github.com/stretchr/testify/require" "github.com/tikv/pd/pkg/codec" "github.com/tikv/pd/pkg/core" + "github.com/tikv/pd/pkg/mock/mockconfig" "github.com/tikv/pd/pkg/storage/endpoint" "github.com/tikv/pd/pkg/storage/kv" - "github.com/tikv/pd/server/config" ) func newTestManager(t *testing.T) (endpoint.RuleStorage, *RuleManager) { re := require.New(t) store := endpoint.NewStorageEndpoint(kv.NewMemoryKV(), nil) var err error - manager := NewRuleManager(store, nil, config.NewTestOptions()) + manager := NewRuleManager(store, nil, mockconfig.NewTestOptions()) err = manager.Initialize(3, []string{"zone", "rack", "host"}) re.NoError(err) return store, manager @@ -425,7 +425,7 @@ func TestCheckApplyRules(t *testing.T) { func TestCacheManager(t *testing.T) { re := require.New(t) _, manager := newTestManager(t) - manager.opt.SetPlacementRulesCacheEnabled(true) + manager.conf.SetPlacementRulesCacheEnabled(true) rules := addExtraRules(0) re.NoError(manager.SetRules(rules)) stores := makeStores() diff --git a/server/schedule/region_scatterer_test.go b/server/schedule/region_scatterer_test.go index 132264a577c..552f396658b 100644 --- a/server/schedule/region_scatterer_test.go +++ b/server/schedule/region_scatterer_test.go @@ -29,8 +29,8 @@ import ( "github.com/stretchr/testify/require" "github.com/tikv/pd/pkg/core" "github.com/tikv/pd/pkg/mock/mockcluster" + "github.com/tikv/pd/pkg/mock/mockconfig" "github.com/tikv/pd/pkg/versioninfo" - "github.com/tikv/pd/server/config" "github.com/tikv/pd/server/schedule/hbstream" "github.com/tikv/pd/server/schedule/operator" "github.com/tikv/pd/server/schedule/placement" @@ -88,7 +88,7 @@ func checkOperator(re *require.Assertions, op *operator.Operator) { func scatter(re *require.Assertions, numStores, numRegions uint64, useRules bool) { ctx, cancel := context.WithCancel(context.Background()) defer cancel() - opt := config.NewTestOptions() + opt := mockconfig.NewTestOptions() tc := mockcluster.NewCluster(ctx, opt) stream := hbstream.NewTestHeartbeatStreams(ctx, tc.ID, tc, false) oc := NewOperatorController(ctx, tc, stream) @@ -145,7 +145,7 @@ func scatter(re *require.Assertions, numStores, numRegions uint64, useRules bool func scatterSpecial(re *require.Assertions, numOrdinaryStores, numSpecialStores, numRegions uint64) { ctx, cancel := context.WithCancel(context.Background()) defer cancel() - opt := config.NewTestOptions() + opt := mockconfig.NewTestOptions() tc := mockcluster.NewCluster(ctx, opt) stream := hbstream.NewTestHeartbeatStreams(ctx, tc.ID, tc, false) oc := NewOperatorController(ctx, tc, stream) @@ -223,7 +223,7 @@ func TestStoreLimit(t *testing.T) { re := require.New(t) ctx, cancel := context.WithCancel(context.Background()) defer cancel() - opt := config.NewTestOptions() + opt := mockconfig.NewTestOptions() tc := mockcluster.NewCluster(ctx, opt) stream := hbstream.NewTestHeartbeatStreams(ctx, tc.ID, tc, false) oc := NewOperatorController(ctx, tc, stream) @@ -255,7 +255,7 @@ func TestScatterCheck(t *testing.T) { re := require.New(t) ctx, cancel := context.WithCancel(context.Background()) defer cancel() - opt := config.NewTestOptions() + opt := mockconfig.NewTestOptions() tc := mockcluster.NewCluster(ctx, opt) stream := hbstream.NewTestHeartbeatStreams(ctx, tc.ID, tc, false) oc := NewOperatorController(ctx, tc, stream) @@ -304,7 +304,7 @@ func TestSomeStoresFilteredScatterGroupInConcurrency(t *testing.T) { re := require.New(t) ctx, cancel := context.WithCancel(context.Background()) defer cancel() - opt := config.NewTestOptions() + opt := mockconfig.NewTestOptions() tc := mockcluster.NewCluster(ctx, opt) stream := hbstream.NewTestHeartbeatStreams(ctx, tc.ID, tc, false) oc := NewOperatorController(ctx, tc, stream) @@ -349,7 +349,7 @@ func TestScatterGroupInConcurrency(t *testing.T) { re := require.New(t) ctx, cancel := context.WithCancel(context.Background()) defer cancel() - opt := config.NewTestOptions() + opt := mockconfig.NewTestOptions() tc := mockcluster.NewCluster(ctx, opt) stream := hbstream.NewTestHeartbeatStreams(ctx, tc.ID, tc, false) oc := NewOperatorController(ctx, tc, stream) @@ -421,7 +421,7 @@ func TestScatterForManyRegion(t *testing.T) { re := require.New(t) ctx, cancel := context.WithCancel(context.Background()) defer cancel() - opt := config.NewTestOptions() + opt := mockconfig.NewTestOptions() tc := mockcluster.NewCluster(ctx, opt) stream := hbstream.NewTestHeartbeatStreams(ctx, tc.ID, tc, false) oc := NewOperatorController(ctx, tc, stream) @@ -449,7 +449,7 @@ func TestScattersGroup(t *testing.T) { re := require.New(t) ctx, cancel := context.WithCancel(context.Background()) defer cancel() - opt := config.NewTestOptions() + opt := mockconfig.NewTestOptions() tc := mockcluster.NewCluster(ctx, opt) stream := hbstream.NewTestHeartbeatStreams(ctx, tc.ID, tc, false) oc := NewOperatorController(ctx, tc, stream) @@ -538,7 +538,7 @@ func TestRegionFromDifferentGroups(t *testing.T) { re := require.New(t) ctx, cancel := context.WithCancel(context.Background()) defer cancel() - opt := config.NewTestOptions() + opt := mockconfig.NewTestOptions() tc := mockcluster.NewCluster(ctx, opt) stream := hbstream.NewTestHeartbeatStreams(ctx, tc.ID, tc, false) oc := NewOperatorController(ctx, tc, stream) @@ -574,7 +574,7 @@ func TestRegionHasLearner(t *testing.T) { re := require.New(t) ctx, cancel := context.WithCancel(context.Background()) defer cancel() - opt := config.NewTestOptions() + opt := mockconfig.NewTestOptions() tc := mockcluster.NewCluster(ctx, opt) stream := hbstream.NewTestHeartbeatStreams(ctx, tc.ID, tc, false) oc := NewOperatorController(ctx, tc, stream) @@ -662,7 +662,7 @@ func TestSelectedStoresTooFewPeers(t *testing.T) { re := require.New(t) ctx, cancel := context.WithCancel(context.Background()) defer cancel() - opt := config.NewTestOptions() + opt := mockconfig.NewTestOptions() tc := mockcluster.NewCluster(ctx, opt) stream := hbstream.NewTestHeartbeatStreams(ctx, tc.ID, tc, false) oc := NewOperatorController(ctx, tc, stream) @@ -699,7 +699,7 @@ func TestSelectedStoresTooManyPeers(t *testing.T) { re := require.New(t) ctx, cancel := context.WithCancel(context.Background()) defer cancel() - opt := config.NewTestOptions() + opt := mockconfig.NewTestOptions() tc := mockcluster.NewCluster(ctx, opt) stream := hbstream.NewTestHeartbeatStreams(ctx, tc.ID, tc, false) oc := NewOperatorController(ctx, tc, stream) @@ -736,7 +736,7 @@ func TestBalanceRegion(t *testing.T) { re := require.New(t) ctx, cancel := context.WithCancel(context.Background()) defer cancel() - opt := config.NewTestOptions() + opt := mockconfig.NewTestOptions() opt.SetLocationLabels([]string{"host"}) tc := mockcluster.NewCluster(ctx, opt) stream := hbstream.NewTestHeartbeatStreams(ctx, tc.ID, tc, false) diff --git a/server/schedule/region_splitter_test.go b/server/schedule/region_splitter_test.go index 10bf81411fe..eb91421d342 100644 --- a/server/schedule/region_splitter_test.go +++ b/server/schedule/region_splitter_test.go @@ -22,7 +22,7 @@ import ( "github.com/stretchr/testify/suite" "github.com/tikv/pd/pkg/core" "github.com/tikv/pd/pkg/mock/mockcluster" - "github.com/tikv/pd/server/config" + "github.com/tikv/pd/pkg/mock/mockconfig" ) type mockSplitRegionsHandler struct { @@ -81,7 +81,7 @@ func (suite *regionSplitterTestSuite) TearDownTest() { } func (suite *regionSplitterTestSuite) TestRegionSplitter() { - opt := config.NewTestOptions() + opt := mockconfig.NewTestOptions() opt.SetPlacementRuleEnabled(false) tc := mockcluster.NewCluster(suite.ctx, opt) handler := newMockSplitRegionsHandler() @@ -108,7 +108,7 @@ func (suite *regionSplitterTestSuite) TestRegionSplitter() { } func (suite *regionSplitterTestSuite) TestGroupKeysByRegion() { - opt := config.NewTestOptions() + opt := mockconfig.NewTestOptions() opt.SetPlacementRuleEnabled(false) tc := mockcluster.NewCluster(suite.ctx, opt) handler := newMockSplitRegionsHandler() diff --git a/server/schedule/scheduler.go b/server/schedule/scheduler.go index e974041fa69..f14b7ee79fb 100644 --- a/server/schedule/scheduler.go +++ b/server/schedule/scheduler.go @@ -24,7 +24,7 @@ import ( "github.com/pingcap/log" "github.com/tikv/pd/pkg/errs" "github.com/tikv/pd/pkg/storage/endpoint" - "github.com/tikv/pd/server/config" + "github.com/tikv/pd/server/schedule/config" "github.com/tikv/pd/server/schedule/operator" "github.com/tikv/pd/server/schedule/plan" "go.uber.org/zap" diff --git a/server/schedulers/balance_benchmark_test.go b/server/schedule/schedulers/balance_benchmark_test.go similarity index 94% rename from server/schedulers/balance_benchmark_test.go rename to server/schedule/schedulers/balance_benchmark_test.go index 7b2c31f8377..dbf736c5421 100644 --- a/server/schedulers/balance_benchmark_test.go +++ b/server/schedule/schedulers/balance_benchmark_test.go @@ -21,6 +21,7 @@ import ( "github.com/pingcap/kvproto/pkg/metapb" "github.com/stretchr/testify/assert" "github.com/tikv/pd/pkg/mock/mockcluster" + "github.com/tikv/pd/pkg/mock/mockconfig" "github.com/tikv/pd/server/schedule" "github.com/tikv/pd/server/schedule/operator" "github.com/tikv/pd/server/schedule/placement" @@ -42,7 +43,11 @@ var ( // the tolerate define storeCount that store can elect candidate but not should balance // so the case bench the worst scene func newBenchCluster(ruleEnable, labelEnable bool, tombstoneEnable bool) (context.CancelFunc, *mockcluster.Cluster, *schedule.OperatorController) { - cancel, opt, tc, oc := prepareSchedulersTest() + Register() + ctx, cancel := context.WithCancel(context.Background()) + opt := mockconfig.NewTestOptions() + tc := mockcluster.NewCluster(ctx, opt) + oc := schedule.NewOperatorController(ctx, tc, nil) opt.GetScheduleConfig().TolerantSizeRatio = float64(storeCount) opt.SetPlacementRuleEnabled(ruleEnable) @@ -87,7 +92,11 @@ func newBenchCluster(ruleEnable, labelEnable bool, tombstoneEnable bool) (contex } func newBenchBigCluster(storeNumInOneRack, regionNum int) (context.CancelFunc, *mockcluster.Cluster, *schedule.OperatorController) { - cancel, opt, tc, oc := prepareSchedulersTest() + Register() + ctx, cancel := context.WithCancel(context.Background()) + opt := mockconfig.NewTestOptions() + tc := mockcluster.NewCluster(ctx, opt) + oc := schedule.NewOperatorController(ctx, tc, nil) opt.GetScheduleConfig().TolerantSizeRatio = float64(storeCount) opt.SetPlacementRuleEnabled(true) diff --git a/server/schedulers/balance_leader.go b/server/schedule/schedulers/balance_leader.go similarity index 98% rename from server/schedulers/balance_leader.go rename to server/schedule/schedulers/balance_leader.go index d451897baa6..46fe1dced11 100644 --- a/server/schedulers/balance_leader.go +++ b/server/schedule/schedulers/balance_leader.go @@ -451,12 +451,12 @@ func (l *balanceLeaderScheduler) transferLeaderOut(solver *solver, collector *pl defer func() { solver.step-- }() targets := solver.GetFollowerStores(solver.region) finalFilters := l.filters - opts := solver.GetOpts() - if leaderFilter := filter.NewPlacementLeaderSafeguard(l.GetName(), opts, solver.GetBasicCluster(), solver.GetRuleManager(), solver.region, solver.source, false /*allowMoveLeader*/); leaderFilter != nil { + conf := solver.GetOpts() + if leaderFilter := filter.NewPlacementLeaderSafeguard(l.GetName(), conf, solver.GetBasicCluster(), solver.GetRuleManager(), solver.region, solver.source, false /*allowMoveLeader*/); leaderFilter != nil { finalFilters = append(l.filters, leaderFilter) } - targets = filter.SelectTargetStores(targets, finalFilters, opts, collector, l.filterCounter) - leaderSchedulePolicy := opts.GetLeaderSchedulePolicy() + targets = filter.SelectTargetStores(targets, finalFilters, conf, collector, l.filterCounter) + leaderSchedulePolicy := conf.GetLeaderSchedulePolicy() sort.Slice(targets, func(i, j int) bool { iOp := solver.GetOpInfluence(targets[i].GetID()) jOp := solver.GetOpInfluence(targets[j].GetID()) @@ -500,12 +500,12 @@ func (l *balanceLeaderScheduler) transferLeaderIn(solver *solver, collector *pla return nil } finalFilters := l.filters - opts := solver.GetOpts() - if leaderFilter := filter.NewPlacementLeaderSafeguard(l.GetName(), opts, solver.GetBasicCluster(), solver.GetRuleManager(), solver.region, solver.source, false /*allowMoveLeader*/); leaderFilter != nil { + conf := solver.GetOpts() + if leaderFilter := filter.NewPlacementLeaderSafeguard(l.GetName(), conf, solver.GetBasicCluster(), solver.GetRuleManager(), solver.region, solver.source, false /*allowMoveLeader*/); leaderFilter != nil { finalFilters = append(l.filters, leaderFilter) } target := filter.NewCandidates([]*core.StoreInfo{solver.target}). - FilterTarget(opts, nil, l.filterCounter, finalFilters...). + FilterTarget(conf, nil, l.filterCounter, finalFilters...). PickFirst() if target == nil { log.Debug("region has no target store", zap.String("scheduler", l.GetName()), zap.Uint64("region-id", solver.region.GetID())) diff --git a/server/schedulers/balance_leader_test.go b/server/schedule/schedulers/balance_leader_test.go similarity index 100% rename from server/schedulers/balance_leader_test.go rename to server/schedule/schedulers/balance_leader_test.go diff --git a/server/schedulers/balance_plan.go b/server/schedule/schedulers/balance_plan.go similarity index 100% rename from server/schedulers/balance_plan.go rename to server/schedule/schedulers/balance_plan.go diff --git a/server/schedulers/balance_plan_test.go b/server/schedule/schedulers/balance_plan_test.go similarity index 100% rename from server/schedulers/balance_plan_test.go rename to server/schedule/schedulers/balance_plan_test.go diff --git a/server/schedulers/balance_region.go b/server/schedule/schedulers/balance_region.go similarity index 100% rename from server/schedulers/balance_region.go rename to server/schedule/schedulers/balance_region.go diff --git a/server/schedulers/balance_test.go b/server/schedule/schedulers/balance_test.go similarity index 99% rename from server/schedulers/balance_test.go rename to server/schedule/schedulers/balance_test.go index 99bd181072c..418d74630a5 100644 --- a/server/schedulers/balance_test.go +++ b/server/schedule/schedulers/balance_test.go @@ -30,8 +30,8 @@ import ( "github.com/tikv/pd/pkg/storage" "github.com/tikv/pd/pkg/utils/testutil" "github.com/tikv/pd/pkg/versioninfo" - "github.com/tikv/pd/server/config" "github.com/tikv/pd/server/schedule" + "github.com/tikv/pd/server/schedule/config" "github.com/tikv/pd/server/schedule/operator" "github.com/tikv/pd/server/schedule/plan" ) @@ -229,7 +229,7 @@ type balanceLeaderSchedulerTestSuite struct { tc *mockcluster.Cluster lb schedule.Scheduler oc *schedule.OperatorController - opt *config.PersistOptions + conf config.Config } func TestBalanceLeaderSchedulerTestSuite(t *testing.T) { @@ -237,7 +237,7 @@ func TestBalanceLeaderSchedulerTestSuite(t *testing.T) { } func (suite *balanceLeaderSchedulerTestSuite) SetupTest() { - suite.cancel, suite.opt, suite.tc, suite.oc = prepareSchedulersTest() + suite.cancel, suite.conf, suite.tc, suite.oc = prepareSchedulersTest() lb, err := schedule.CreateScheduler(BalanceLeaderType, suite.oc, storage.NewStorageWithMemoryBackend(), schedule.ConfigSliceDecoder(BalanceLeaderType, []string{"", ""})) suite.NoError(err) suite.lb = lb diff --git a/server/schedulers/balance_witness.go b/server/schedule/schedulers/balance_witness.go similarity index 100% rename from server/schedulers/balance_witness.go rename to server/schedule/schedulers/balance_witness.go diff --git a/server/schedulers/balance_witness_test.go b/server/schedule/schedulers/balance_witness_test.go similarity index 96% rename from server/schedulers/balance_witness_test.go rename to server/schedule/schedulers/balance_witness_test.go index 2baea5cf115..73453cd0478 100644 --- a/server/schedulers/balance_witness_test.go +++ b/server/schedule/schedulers/balance_witness_test.go @@ -21,8 +21,8 @@ import ( "github.com/stretchr/testify/suite" "github.com/tikv/pd/pkg/mock/mockcluster" "github.com/tikv/pd/pkg/storage" - "github.com/tikv/pd/server/config" "github.com/tikv/pd/server/schedule" + "github.com/tikv/pd/server/schedule/config" "github.com/tikv/pd/server/schedule/operator" "github.com/tikv/pd/server/schedule/placement" ) @@ -37,11 +37,11 @@ type balanceWitnessSchedulerTestSuite struct { tc *mockcluster.Cluster lb schedule.Scheduler oc *schedule.OperatorController - opt *config.PersistOptions + conf config.Config } func (suite *balanceWitnessSchedulerTestSuite) SetupTest() { - suite.cancel, suite.opt, suite.tc, suite.oc = prepareSchedulersTest() + suite.cancel, suite.conf, suite.tc, suite.oc = prepareSchedulersTest() suite.tc.RuleManager.SetRules([]*placement.Rule{ { GroupID: "pd", diff --git a/server/schedulers/base_scheduler.go b/server/schedule/schedulers/base_scheduler.go similarity index 100% rename from server/schedulers/base_scheduler.go rename to server/schedule/schedulers/base_scheduler.go diff --git a/server/schedulers/evict_leader.go b/server/schedule/schedulers/evict_leader.go similarity index 100% rename from server/schedulers/evict_leader.go rename to server/schedule/schedulers/evict_leader.go diff --git a/server/schedulers/evict_leader_test.go b/server/schedule/schedulers/evict_leader_test.go similarity index 100% rename from server/schedulers/evict_leader_test.go rename to server/schedule/schedulers/evict_leader_test.go diff --git a/server/schedulers/evict_slow_store.go b/server/schedule/schedulers/evict_slow_store.go similarity index 100% rename from server/schedulers/evict_slow_store.go rename to server/schedule/schedulers/evict_slow_store.go diff --git a/server/schedulers/evict_slow_store_test.go b/server/schedule/schedulers/evict_slow_store_test.go similarity index 98% rename from server/schedulers/evict_slow_store_test.go rename to server/schedule/schedulers/evict_slow_store_test.go index 9f5f9c290f6..c7617da6369 100644 --- a/server/schedulers/evict_slow_store_test.go +++ b/server/schedule/schedulers/evict_slow_store_test.go @@ -130,7 +130,7 @@ func (suite *evictSlowStoreTestSuite) TestEvictSlowStorePrepare() { } func (suite *evictSlowStoreTestSuite) TestEvictSlowStorePersistFail() { - persisFail := "github.com/tikv/pd/server/schedulers/persistFail" + persisFail := "github.com/tikv/pd/server/schedule/schedulers/persistFail" suite.NoError(failpoint.Enable(persisFail, "return(true)")) storeInfo := suite.tc.GetStore(1) diff --git a/server/schedulers/evict_slow_trend.go b/server/schedule/schedulers/evict_slow_trend.go similarity index 100% rename from server/schedulers/evict_slow_trend.go rename to server/schedule/schedulers/evict_slow_trend.go diff --git a/server/schedulers/evict_slow_trend_test.go b/server/schedule/schedulers/evict_slow_trend_test.go similarity index 100% rename from server/schedulers/evict_slow_trend_test.go rename to server/schedule/schedulers/evict_slow_trend_test.go diff --git a/server/schedulers/grant_hot_region.go b/server/schedule/schedulers/grant_hot_region.go similarity index 100% rename from server/schedulers/grant_hot_region.go rename to server/schedule/schedulers/grant_hot_region.go diff --git a/server/schedulers/grant_leader.go b/server/schedule/schedulers/grant_leader.go similarity index 100% rename from server/schedulers/grant_leader.go rename to server/schedule/schedulers/grant_leader.go diff --git a/server/schedulers/hot_region.go b/server/schedule/schedulers/hot_region.go similarity index 100% rename from server/schedulers/hot_region.go rename to server/schedule/schedulers/hot_region.go diff --git a/server/schedulers/hot_region_config.go b/server/schedule/schedulers/hot_region_config.go similarity index 100% rename from server/schedulers/hot_region_config.go rename to server/schedule/schedulers/hot_region_config.go diff --git a/server/schedulers/hot_region_test.go b/server/schedule/schedulers/hot_region_test.go similarity index 99% rename from server/schedulers/hot_region_test.go rename to server/schedule/schedulers/hot_region_test.go index a3d60bc041b..a0521b1bbe2 100644 --- a/server/schedulers/hot_region_test.go +++ b/server/schedule/schedulers/hot_region_test.go @@ -279,7 +279,7 @@ func checkHotWriteRegionScheduleByteRateOnly(re *require.Assertions, enablePlace tc.SetHotRegionScheduleLimit(0) re.False(hb.IsScheduleAllowed(tc)) clearPendingInfluence(hb.(*hotScheduler)) - tc.SetHotRegionScheduleLimit(int(opt.GetScheduleConfig().HotRegionScheduleLimit)) + tc.SetHotRegionScheduleLimit(int(opt.GetHotRegionScheduleLimit())) for i := 0; i < 20; i++ { ops, _ := hb.Schedule(tc, false) @@ -546,9 +546,9 @@ func TestHotWriteRegionScheduleByteRateOnlyWithTiFlash(t *testing.T) { hb.stLoadInfos[writePeer][8].LoadPred.Expect.Loads, []float64{regionBytesSum / aliveTiFlashCount, regionKeysSum / aliveTiFlashCount, 0})) // check IsTraceRegionFlow == false - pdServerCfg := tc.GetOpts().GetPDServerConfig() + pdServerCfg := tc.GetPDServerConfig() pdServerCfg.FlowRoundByDigit = 8 - tc.GetOpts().SetPDServerConfig(pdServerCfg) + tc.SetPDServerConfig(pdServerCfg) clearPendingInfluence(hb) ops, _ = hb.Schedule(tc, false) re.NotEmpty(ops) @@ -558,7 +558,7 @@ func TestHotWriteRegionScheduleByteRateOnlyWithTiFlash(t *testing.T) { []float64{hotRegionBytesSum / aliveTiFlashCount, hotRegionKeysSum / aliveTiFlashCount, 0})) // revert pdServerCfg.FlowRoundByDigit = 3 - tc.GetOpts().SetPDServerConfig(pdServerCfg) + tc.SetPDServerConfig(pdServerCfg) } // Will transfer a hot region from store 1, because the total count of peers // which is hot for store 1 is larger than other stores. @@ -1446,7 +1446,6 @@ func TestHotCacheUpdateCache(t *testing.T) { func TestHotCacheKeyThresholds(t *testing.T) { re := require.New(t) - statistics.ThresholdsUpdateInterval = 0 defer func() { statistics.ThresholdsUpdateInterval = 8 * time.Second diff --git a/server/schedulers/hot_region_v2.go b/server/schedule/schedulers/hot_region_v2.go similarity index 100% rename from server/schedulers/hot_region_v2.go rename to server/schedule/schedulers/hot_region_v2.go diff --git a/server/schedulers/hot_region_v2_test.go b/server/schedule/schedulers/hot_region_v2_test.go similarity index 100% rename from server/schedulers/hot_region_v2_test.go rename to server/schedule/schedulers/hot_region_v2_test.go diff --git a/server/schedulers/init.go b/server/schedule/schedulers/init.go similarity index 100% rename from server/schedulers/init.go rename to server/schedule/schedulers/init.go diff --git a/server/schedulers/label.go b/server/schedule/schedulers/label.go similarity index 99% rename from server/schedulers/label.go rename to server/schedule/schedulers/label.go index 2f85021b624..5a2c0c8144d 100644 --- a/server/schedulers/label.go +++ b/server/schedule/schedulers/label.go @@ -18,8 +18,8 @@ import ( "github.com/pingcap/log" "github.com/tikv/pd/pkg/core" "github.com/tikv/pd/pkg/errs" - "github.com/tikv/pd/server/config" "github.com/tikv/pd/server/schedule" + "github.com/tikv/pd/server/schedule/config" "github.com/tikv/pd/server/schedule/filter" "github.com/tikv/pd/server/schedule/operator" "github.com/tikv/pd/server/schedule/plan" diff --git a/server/schedulers/metrics.go b/server/schedule/schedulers/metrics.go similarity index 100% rename from server/schedulers/metrics.go rename to server/schedule/schedulers/metrics.go diff --git a/server/schedulers/random_merge.go b/server/schedule/schedulers/random_merge.go similarity index 100% rename from server/schedulers/random_merge.go rename to server/schedule/schedulers/random_merge.go diff --git a/server/schedulers/scatter_range.go b/server/schedule/schedulers/scatter_range.go similarity index 100% rename from server/schedulers/scatter_range.go rename to server/schedule/schedulers/scatter_range.go diff --git a/server/schedulers/scheduler_test.go b/server/schedule/schedulers/scheduler_test.go similarity index 98% rename from server/schedulers/scheduler_test.go rename to server/schedule/schedulers/scheduler_test.go index 10ca1f50a4c..d3c715a10b3 100644 --- a/server/schedulers/scheduler_test.go +++ b/server/schedule/schedulers/scheduler_test.go @@ -23,21 +23,22 @@ import ( "github.com/stretchr/testify/require" "github.com/tikv/pd/pkg/core" "github.com/tikv/pd/pkg/mock/mockcluster" + "github.com/tikv/pd/pkg/mock/mockconfig" "github.com/tikv/pd/pkg/storage" "github.com/tikv/pd/pkg/utils/testutil" "github.com/tikv/pd/pkg/versioninfo" - "github.com/tikv/pd/server/config" "github.com/tikv/pd/server/schedule" + "github.com/tikv/pd/server/schedule/config" "github.com/tikv/pd/server/schedule/hbstream" "github.com/tikv/pd/server/schedule/operator" "github.com/tikv/pd/server/schedule/placement" "github.com/tikv/pd/server/statistics" ) -func prepareSchedulersTest(needToRunStream ...bool) (context.CancelFunc, *config.PersistOptions, *mockcluster.Cluster, *schedule.OperatorController) { +func prepareSchedulersTest(needToRunStream ...bool) (context.CancelFunc, config.Config, *mockcluster.Cluster, *schedule.OperatorController) { Register() ctx, cancel := context.WithCancel(context.Background()) - opt := config.NewTestOptions() + opt := mockconfig.NewTestOptions() tc := mockcluster.NewCluster(ctx, opt) var stream *hbstream.HeartbeatStreams if len(needToRunStream) == 0 { @@ -79,12 +80,9 @@ func TestShuffleLeader(t *testing.T) { func TestRejectLeader(t *testing.T) { re := require.New(t) - cancel, opt, tc, oc := prepareSchedulersTest() + cancel, _, tc, oc := prepareSchedulersTest() defer cancel() - opt.SetLabelPropertyConfig(config.LabelPropertyConfig{ - config.RejectLeader: {{Key: "noleader", Value: "true"}}, - }) - + tc.SetLabelProperty(config.RejectLeader, "noleader", "true") // Add 3 stores 1,2,3. tc.AddLabelsStore(1, 1, map[string]string{"noleader": "true"}) tc.UpdateLeaderCount(1, 1) diff --git a/server/schedulers/shuffle_hot_region.go b/server/schedule/schedulers/shuffle_hot_region.go similarity index 100% rename from server/schedulers/shuffle_hot_region.go rename to server/schedule/schedulers/shuffle_hot_region.go diff --git a/server/schedulers/shuffle_leader.go b/server/schedule/schedulers/shuffle_leader.go similarity index 100% rename from server/schedulers/shuffle_leader.go rename to server/schedule/schedulers/shuffle_leader.go diff --git a/server/schedulers/shuffle_region.go b/server/schedule/schedulers/shuffle_region.go similarity index 100% rename from server/schedulers/shuffle_region.go rename to server/schedule/schedulers/shuffle_region.go diff --git a/server/schedulers/shuffle_region_config.go b/server/schedule/schedulers/shuffle_region_config.go similarity index 100% rename from server/schedulers/shuffle_region_config.go rename to server/schedule/schedulers/shuffle_region_config.go diff --git a/server/schedulers/split_bucket.go b/server/schedule/schedulers/split_bucket.go similarity index 100% rename from server/schedulers/split_bucket.go rename to server/schedule/schedulers/split_bucket.go diff --git a/server/schedulers/split_bucket_test.go b/server/schedule/schedulers/split_bucket_test.go similarity index 100% rename from server/schedulers/split_bucket_test.go rename to server/schedule/schedulers/split_bucket_test.go diff --git a/server/schedulers/transfer_witness_leader.go b/server/schedule/schedulers/transfer_witness_leader.go similarity index 100% rename from server/schedulers/transfer_witness_leader.go rename to server/schedule/schedulers/transfer_witness_leader.go diff --git a/server/schedulers/transfer_witness_leader_test.go b/server/schedule/schedulers/transfer_witness_leader_test.go similarity index 99% rename from server/schedulers/transfer_witness_leader_test.go rename to server/schedule/schedulers/transfer_witness_leader_test.go index d258a2a6a34..9966e7cc085 100644 --- a/server/schedulers/transfer_witness_leader_test.go +++ b/server/schedule/schedulers/transfer_witness_leader_test.go @@ -53,6 +53,7 @@ func TestTransferWitnessLeaderWithUnhealthyPeer(t *testing.T) { re := require.New(t) cancel, _, tc, oc := prepareSchedulersTest() defer cancel() + sl, err := schedule.CreateScheduler(TransferWitnessLeaderType, oc, storage.NewStorageWithMemoryBackend(), nil) re.NoError(err) diff --git a/server/schedulers/utils.go b/server/schedule/schedulers/utils.go similarity index 100% rename from server/schedulers/utils.go rename to server/schedule/schedulers/utils.go diff --git a/server/schedulers/utils_test.go b/server/schedule/schedulers/utils_test.go similarity index 100% rename from server/schedulers/utils_test.go rename to server/schedule/schedulers/utils_test.go diff --git a/server/statistics/region_collection.go b/server/statistics/region_collection.go index 5f5d1748466..0fbab064646 100644 --- a/server/statistics/region_collection.go +++ b/server/statistics/region_collection.go @@ -21,6 +21,7 @@ import ( "github.com/pingcap/log" "github.com/tikv/pd/pkg/core" "github.com/tikv/pd/server/config" + sc "github.com/tikv/pd/server/schedule/config" "github.com/tikv/pd/server/schedule/placement" ) @@ -73,7 +74,7 @@ type RegionInfo struct { // RegionStatistics is used to record the status of regions. type RegionStatistics struct { sync.RWMutex - opt *config.PersistOptions + conf sc.Config stats map[RegionStatisticType]map[uint64]*RegionInfo offlineStats map[RegionStatisticType]map[uint64]*core.RegionInfo index map[uint64]RegionStatisticType @@ -83,9 +84,9 @@ type RegionStatistics struct { } // NewRegionStatistics creates a new RegionStatistics. -func NewRegionStatistics(opt *config.PersistOptions, ruleManager *placement.RuleManager, storeConfigManager *config.StoreConfigManager) *RegionStatistics { +func NewRegionStatistics(conf sc.Config, ruleManager *placement.RuleManager, storeConfigManager *config.StoreConfigManager) *RegionStatistics { r := &RegionStatistics{ - opt: opt, + conf: conf, ruleManager: ruleManager, storeConfigManager: storeConfigManager, stats: make(map[RegionStatisticType]map[uint64]*RegionInfo), @@ -167,7 +168,7 @@ func (r *RegionStatistics) RegionStatsNeedUpdate(region *core.RegionInfo) bool { return true } return r.IsRegionStatsType(regionID, UndersizedRegion) != - region.NeedMerge(int64(r.opt.GetMaxMergeRegionSize()), int64(r.opt.GetMaxMergeRegionKeys())) + region.NeedMerge(int64(r.conf.GetMaxMergeRegionSize()), int64(r.conf.GetMaxMergeRegionKeys())) } // Observe records the current regions' status. @@ -181,9 +182,9 @@ func (r *RegionStatistics) Observe(region *core.RegionInfo, stores []*core.Store offlinePeerTypeIndex RegionStatisticType deleteIndex RegionStatisticType ) - desiredReplicas := r.opt.GetMaxReplicas() + desiredReplicas := r.conf.GetMaxReplicas() desiredVoters := desiredReplicas - if r.opt.IsPlacementRulesEnabled() { + if r.conf.IsPlacementRulesEnabled() { if !r.ruleManager.IsInitialized() { log.Warn("ruleManager haven't been initialized") return @@ -226,8 +227,8 @@ func (r *RegionStatistics) Observe(region *core.RegionInfo, stores []*core.Store int64(r.storeConfigManager.GetStoreConfig().GetRegionMaxKeys()), ), UndersizedRegion: region.NeedMerge( - int64(r.opt.GetMaxMergeRegionSize()), - int64(r.opt.GetMaxMergeRegionKeys()), + int64(r.conf.GetMaxMergeRegionSize()), + int64(r.conf.GetMaxMergeRegionKeys()), ), WitnessLeader: region.GetLeader().GetIsWitness(), } diff --git a/server/statistics/region_collection_test.go b/server/statistics/region_collection_test.go index c7c8e42fea0..2f8067f62e0 100644 --- a/server/statistics/region_collection_test.go +++ b/server/statistics/region_collection_test.go @@ -21,8 +21,8 @@ import ( "github.com/pingcap/kvproto/pkg/pdpb" "github.com/stretchr/testify/require" "github.com/tikv/pd/pkg/core" + "github.com/tikv/pd/pkg/mock/mockconfig" "github.com/tikv/pd/pkg/storage" - "github.com/tikv/pd/server/config" "github.com/tikv/pd/server/schedule/placement" ) @@ -32,7 +32,7 @@ func TestRegionStatistics(t *testing.T) { manager := placement.NewRuleManager(store, nil, nil) err := manager.Initialize(3, []string{"zone", "rack", "host"}) re.NoError(err) - opt := config.NewTestOptions() + opt := mockconfig.NewTestOptions() opt.SetPlacementRuleEnabled(false) peers := []*metapb.Peer{ {Id: 5, StoreId: 1}, @@ -137,7 +137,7 @@ func TestRegionStatisticsWithPlacementRule(t *testing.T) { manager := placement.NewRuleManager(store, nil, nil) err := manager.Initialize(3, []string{"zone", "rack", "host"}) re.NoError(err) - opt := config.NewTestOptions() + opt := mockconfig.NewTestOptions() opt.SetPlacementRuleEnabled(true) peers := []*metapb.Peer{ {Id: 5, StoreId: 1}, diff --git a/server/statistics/store_collection_test.go b/server/statistics/store_collection_test.go index 63ffc5b52cd..878ebf65627 100644 --- a/server/statistics/store_collection_test.go +++ b/server/statistics/store_collection_test.go @@ -21,12 +21,12 @@ import ( "github.com/pingcap/kvproto/pkg/metapb" "github.com/stretchr/testify/require" "github.com/tikv/pd/pkg/core" - "github.com/tikv/pd/server/config" + "github.com/tikv/pd/pkg/mock/mockconfig" ) func TestStoreStatistics(t *testing.T) { re := require.New(t) - opt := config.NewTestOptions() + opt := mockconfig.NewTestOptions() rep := opt.GetReplicationConfig().Clone() rep.LocationLabels = []string{"zone", "host"} opt.SetReplicationConfig(rep) diff --git a/server/testutil.go b/server/testutil.go index df24267dac8..30c84ee3a91 100644 --- a/server/testutil.go +++ b/server/testutil.go @@ -32,7 +32,7 @@ import ( "github.com/tikv/pd/pkg/utils/testutil" "github.com/tikv/pd/pkg/utils/typeutil" "github.com/tikv/pd/server/config" - "github.com/tikv/pd/server/schedulers" + "github.com/tikv/pd/server/schedule/schedulers" "go.etcd.io/etcd/embed" ) diff --git a/tests/client/client_test.go b/tests/client/client_test.go index 1e8fde87473..dd1dfc0d379 100644 --- a/tests/client/client_test.go +++ b/tests/client/client_test.go @@ -824,8 +824,7 @@ func (suite *clientTestSuite) SetupSuite() { }, }) } - config := cluster.GetStoreConfig() - config.EnableRegionBucket = true + cluster.GetStoreConfig().SetRegionBucketEnabled(true) } func (suite *clientTestSuite) TearDownSuite() { @@ -956,8 +955,8 @@ func (suite *clientTestSuite) TestGetRegion() { } return r.Buckets != nil }) - config := suite.srv.GetRaftCluster().GetStoreConfig() - config.EnableRegionBucket = false + suite.srv.GetRaftCluster().GetStoreConfig().SetRegionBucketEnabled(false) + testutil.Eventually(re, func() bool { r, err := suite.client.GetRegion(context.Background(), []byte("a"), pd.WithBuckets()) suite.NoError(err) @@ -966,7 +965,7 @@ func (suite *clientTestSuite) TestGetRegion() { } return r.Buckets == nil }) - config.EnableRegionBucket = true + suite.srv.GetRaftCluster().GetStoreConfig().SetRegionBucketEnabled(true) suite.NoError(failpoint.Enable("github.com/tikv/pd/server/grpcClientClosed", `return(true)`)) suite.NoError(failpoint.Enable("github.com/tikv/pd/server/useForwardRequest", `return(true)`)) diff --git a/tests/cluster.go b/tests/cluster.go index b60f44d2291..a788ac6bb3a 100644 --- a/tests/cluster.go +++ b/tests/cluster.go @@ -43,7 +43,7 @@ import ( "github.com/tikv/pd/server/config" "github.com/tikv/pd/server/join" "github.com/tikv/pd/server/keyspace" - "github.com/tikv/pd/server/schedulers" + "github.com/tikv/pd/server/schedule/schedulers" "go.etcd.io/etcd/clientv3" ) diff --git a/tests/server/cluster/cluster_test.go b/tests/server/cluster/cluster_test.go index 527f54c6fcc..f729d6e13c2 100644 --- a/tests/server/cluster/cluster_test.go +++ b/tests/server/cluster/cluster_test.go @@ -820,7 +820,6 @@ func TestLoadClusterInfo(t *testing.T) { testStorage := rc.GetStorage() basicCluster := rc.GetBasicCluster() - opt := rc.GetOpts() // Save meta, stores and regions. n := 10 meta := &metapb.Cluster{Id: 123} @@ -852,7 +851,7 @@ func TestLoadClusterInfo(t *testing.T) { re.NoError(testStorage.Flush()) raftCluster = cluster.NewRaftCluster(ctx, svr.ClusterID(), syncer.NewRegionSyncer(svr), svr.GetClient(), svr.GetHTTPClient()) - raftCluster.InitCluster(mockid.NewIDAllocator(), opt, testStorage, basicCluster) + raftCluster.InitCluster(mockid.NewIDAllocator(), svr.GetPersistOptions(), testStorage, basicCluster) raftCluster, err = raftCluster.LoadClusterInfo() re.NoError(err) re.NotNil(raftCluster) @@ -1171,8 +1170,7 @@ func TestUpgradeStoreLimit(t *testing.T) { // restart PD // Here we use an empty storelimit to simulate the upgrade progress. - opt := rc.GetOpts() - scheduleCfg := opt.GetScheduleConfig().Clone() + scheduleCfg := rc.GetScheduleConfig().Clone() scheduleCfg.StoreLimit = map[uint64]config.StoreLimitConfig{} re.NoError(leaderServer.GetServer().SetScheduleConfig(*scheduleCfg)) err = leaderServer.Stop() @@ -1315,7 +1313,7 @@ func checkMinResolvedTSFromStorage(re *require.Assertions, rc *cluster.RaftClust } func setMinResolvedTSPersistenceInterval(re *require.Assertions, rc *cluster.RaftCluster, svr *server.Server, interval time.Duration) { - cfg := rc.GetOpts().GetPDServerConfig().Clone() + cfg := rc.GetPDServerConfig().Clone() cfg.MinResolvedTSPersistenceInterval = typeutil.NewDuration(interval) err := svr.SetPDServerConfig(*cfg) re.NoError(err) @@ -1367,9 +1365,9 @@ func TestMinResolvedTS(t *testing.T) { } // default run job - re.NotEqual(rc.GetOpts().GetMinResolvedTSPersistenceInterval(), 0) + re.NotEqual(rc.GetPDServerConfig().MinResolvedTSPersistenceInterval.Duration, 0) setMinResolvedTSPersistenceInterval(re, rc, svr, 0) - re.Equal(time.Duration(0), rc.GetOpts().GetMinResolvedTSPersistenceInterval()) + re.Equal(time.Duration(0), rc.GetPDServerConfig().MinResolvedTSPersistenceInterval.Duration) // case1: cluster is no initialized // min resolved ts should be not available diff --git a/tools/pd-simulator/main.go b/tools/pd-simulator/main.go index 312a13a86a9..78b9742ccd7 100644 --- a/tools/pd-simulator/main.go +++ b/tools/pd-simulator/main.go @@ -32,7 +32,7 @@ import ( "github.com/tikv/pd/server" "github.com/tikv/pd/server/api" "github.com/tikv/pd/server/config" - "github.com/tikv/pd/server/schedulers" + "github.com/tikv/pd/server/schedule/schedulers" "github.com/tikv/pd/server/statistics" "github.com/tikv/pd/tools/pd-analysis/analysis" "github.com/tikv/pd/tools/pd-simulator/simulator"