From d694360c01a518c0e08374cd9298dc431004d01c Mon Sep 17 00:00:00 2001 From: Ryan Leung Date: Mon, 26 Oct 2020 11:43:14 +0800 Subject: [PATCH] cherry pick #2946 to release-4.0 Signed-off-by: ti-srebot --- plugin/scheduler_example/evict_leader.go | 6 + server/schedule/checker/replica_strategy.go | 128 +++++++++++++++ server/schedule/checker/rule_checker.go | 5 + server/schedule/filter/filters.go | 117 +++++++++++++- server/schedule/filter/filters_test.go | 167 ++++++++++++++++++++ server/schedule/operator/builder.go | 14 ++ server/schedule/region_scatterer.go | 7 +- server/schedulers/balance_leader.go | 2 +- server/schedulers/balance_region.go | 17 +- server/schedulers/evict_leader.go | 7 + server/schedulers/hot_region.go | 6 +- server/schedulers/label.go | 7 + server/schedulers/random_merge.go | 6 + server/schedulers/shuffle_hot_region.go | 2 +- server/schedulers/shuffle_leader.go | 2 +- server/schedulers/shuffle_region.go | 2 +- 16 files changed, 485 insertions(+), 10 deletions(-) create mode 100644 server/schedule/checker/replica_strategy.go diff --git a/plugin/scheduler_example/evict_leader.go b/plugin/scheduler_example/evict_leader.go index 96e989bfcfef..7ae9bba2877c 100644 --- a/plugin/scheduler_example/evict_leader.go +++ b/plugin/scheduler_example/evict_leader.go @@ -225,7 +225,13 @@ func (s *evictLeaderScheduler) Schedule(cluster opt.Cluster) []*operator.Operato if region == nil { continue } +<<<<<<< HEAD target := s.selector.SelectTarget(cluster, cluster.GetFollowerStores(region)) +======= + target := filter.NewCandidates(cluster.GetFollowerStores(region)). + FilterTarget(cluster.GetOpts(), &filter.StoreStateFilter{ActionScope: EvictLeaderName, TransferLeader: true}). + RandomPick() +>>>>>>> 6bf18ce0... metrics: optimize the store state filter metrics (#2946) if target == nil { continue } diff --git a/server/schedule/checker/replica_strategy.go b/server/schedule/checker/replica_strategy.go new file mode 100644 index 000000000000..52cc24cad4c3 --- /dev/null +++ b/server/schedule/checker/replica_strategy.go @@ -0,0 +1,128 @@ +// Copyright 2020 TiKV Project Authors. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// See the License for the specific language governing permissions and +// limitations under the License. + +package checker + +import ( + "github.com/pingcap/log" + "github.com/tikv/pd/server/core" + "github.com/tikv/pd/server/schedule/filter" + "github.com/tikv/pd/server/schedule/opt" + "go.uber.org/zap" +) + +// ReplicaStrategy collects some utilities to manipulate region peers. It +// exists to allow replica_checker and rule_checker to reuse common logics. +type ReplicaStrategy struct { + checkerName string // replica-checker / rule-checker + cluster opt.Cluster + locationLabels []string + isolationLevel string + region *core.RegionInfo + extraFilters []filter.Filter +} + +// SelectStoreToAdd returns the store to add a replica to a region. +// `coLocationStores` are the stores used to compare location with target +// store. +// `extraFilters` is used to set up more filters based on the context that +// calling this method. +// +// For example, to select a target store to replace a region's peer, we can use +// the peer list with the peer removed as `coLocationStores`. +// Meanwhile, we need to provide more constraints to ensure that the isolation +// level cannot be reduced after replacement. +func (s *ReplicaStrategy) SelectStoreToAdd(coLocationStores []*core.StoreInfo, extraFilters ...filter.Filter) uint64 { + // The selection process uses a two-stage fashion. The first stage + // ignores the temporary state of the stores and selects the stores + // with the highest score according to the location label. The second + // stage considers all temporary states and capacity factors to select + // the most suitable target. + // + // The reason for it is to prevent the non-optimal replica placement due + // to the short-term state, resulting in redundant scheduling. + filters := []filter.Filter{ + filter.NewExcludedFilter(s.checkerName, nil, s.region.GetStoreIds()), + filter.NewStorageThresholdFilter(s.checkerName), + filter.NewSpecialUseFilter(s.checkerName), + &filter.StoreStateFilter{ActionScope: s.checkerName, MoveRegion: true, AllowTemporaryStates: true}, + } + if len(s.locationLabels) > 0 && s.isolationLevel != "" { + filters = append(filters, filter.NewIsolationFilter(s.checkerName, s.isolationLevel, s.locationLabels, coLocationStores)) + } + if len(extraFilters) > 0 { + filters = append(filters, extraFilters...) + } + if len(s.extraFilters) > 0 { + filters = append(filters, s.extraFilters...) + } + + isolationComparer := filter.IsolationComparer(s.locationLabels, coLocationStores) + strictStateFilter := &filter.StoreStateFilter{ActionScope: s.checkerName, MoveRegion: true} + target := filter.NewCandidates(s.cluster.GetStores()). + FilterTarget(s.cluster.GetOpts(), filters...). + Sort(isolationComparer).Reverse().Top(isolationComparer). // greater isolation score is better + Sort(filter.RegionScoreComparer(s.cluster.GetOpts())). // less region score is better + FilterTarget(s.cluster.GetOpts(), strictStateFilter).PickFirst() // the filter does not ignore temp states + if target == nil { + return 0 + } + return target.GetID() +} + +// SelectStoreToReplace returns a store to replace oldStore. The location +// placement after scheduling should be not worse than original. +func (s *ReplicaStrategy) SelectStoreToReplace(coLocationStores []*core.StoreInfo, old uint64) uint64 { + // trick to avoid creating a slice with `old` removed. + s.swapStoreToFirst(coLocationStores, old) + safeGuard := filter.NewLocationSafeguard(s.checkerName, s.locationLabels, coLocationStores, s.cluster.GetStore(old)) + return s.SelectStoreToAdd(coLocationStores[1:], safeGuard) +} + +// SelectStoreToImprove returns a store to replace oldStore. The location +// placement after scheduling should be better than original. +func (s *ReplicaStrategy) SelectStoreToImprove(coLocationStores []*core.StoreInfo, old uint64) uint64 { + // trick to avoid creating a slice with `old` removed. + s.swapStoreToFirst(coLocationStores, old) + filters := []filter.Filter{ + filter.NewLocationImprover(s.checkerName, s.locationLabels, coLocationStores, s.cluster.GetStore(old)), + } + if len(s.locationLabels) > 0 && s.isolationLevel != "" { + filters = append(filters, filter.NewIsolationFilter(s.checkerName, s.isolationLevel, s.locationLabels, coLocationStores[1:])) + } + return s.SelectStoreToAdd(coLocationStores[1:], filters...) +} + +func (s *ReplicaStrategy) swapStoreToFirst(stores []*core.StoreInfo, id uint64) { + for i, s := range stores { + if s.GetID() == id { + stores[0], stores[i] = stores[i], stores[0] + return + } + } +} + +// SelectStoreToRemove returns the best option to remove from the region. +func (s *ReplicaStrategy) SelectStoreToRemove(coLocationStores []*core.StoreInfo) uint64 { + isolationComparer := filter.IsolationComparer(s.locationLabels, coLocationStores) + source := filter.NewCandidates(coLocationStores). + FilterSource(s.cluster.GetOpts(), &filter.StoreStateFilter{ActionScope: replicaCheckerName, MoveRegion: true}). + Sort(isolationComparer).Top(isolationComparer). + Sort(filter.RegionScoreComparer(s.cluster.GetOpts())).Reverse(). + PickFirst() + if source == nil { + log.Debug("no removable store", zap.Uint64("region-id", s.region.GetID())) + return 0 + } + return source.GetID() +} diff --git a/server/schedule/checker/rule_checker.go b/server/schedule/checker/rule_checker.go index e4c8af4cc989..a9c9e8de6154 100644 --- a/server/schedule/checker/rule_checker.go +++ b/server/schedule/checker/rule_checker.go @@ -158,8 +158,13 @@ func (c *RuleChecker) allowLeader(fit *placement.RegionFit, peer *metapb.Peer) b if s == nil { return false } +<<<<<<< HEAD stateFilter := filter.StoreStateFilter{ActionScope: "rule-checker", TransferLeader: true} if !stateFilter.Target(c.cluster, s) { +======= + stateFilter := &filter.StoreStateFilter{ActionScope: "rule-checker", TransferLeader: true} + if !stateFilter.Target(c.cluster.GetOpts(), s) { +>>>>>>> 6bf18ce0... metrics: optimize the store state filter metrics (#2946) return false } for _, rf := range fit.RuleFits { diff --git a/server/schedule/filter/filters.go b/server/schedule/filter/filters.go index 4e5fa4ca76a7..339a0246f1bc 100644 --- a/server/schedule/filter/filters.go +++ b/server/schedule/filter/filters.go @@ -361,16 +361,23 @@ type StoreStateFilter struct { TransferLeader bool // Set true if the schedule involves any move region operation. MoveRegion bool +<<<<<<< HEAD +======= + // Set true if allows temporary states. + AllowTemporaryStates bool + // Reason is used to distinguish the reason of store state filter + Reason string +>>>>>>> 6bf18ce0... metrics: optimize the store state filter metrics (#2946) } // Scope returns the scheduler or the checker which the filter acts on. -func (f StoreStateFilter) Scope() string { +func (f *StoreStateFilter) Scope() string { return f.ActionScope } // Type returns the type of the Filter. -func (f StoreStateFilter) Type() string { - return "store-state-filter" +func (f *StoreStateFilter) Type() string { + return fmt.Sprintf("store-state-%s-filter", f.Reason) } // Source returns true when the store can be selected as the schedule @@ -384,6 +391,7 @@ func (f StoreStateFilter) Source(opt opt.Options, store *core.StoreInfo) bool { return false } +<<<<<<< HEAD if f.MoveRegion && !f.filterMoveRegion(opt, true, store) { return false } @@ -461,6 +469,65 @@ func NewBlacklistStoreFilter(scope string, typ BlacklistType) *BlacklistStoreFil blacklist: make(map[uint64]struct{}), flag: typ, } +======= +func (f *StoreStateFilter) isTombstone(opt *config.PersistOptions, store *core.StoreInfo) bool { + f.Reason = "tombstone" + return store.IsTombstone() +} + +func (f *StoreStateFilter) isDown(opt *config.PersistOptions, store *core.StoreInfo) bool { + f.Reason = "down" + return store.DownTime() > opt.GetMaxStoreDownTime() +} + +func (f *StoreStateFilter) isOffline(opt *config.PersistOptions, store *core.StoreInfo) bool { + f.Reason = "offline" + return store.IsOffline() +} + +func (f *StoreStateFilter) pauseLeaderTransfer(opt *config.PersistOptions, store *core.StoreInfo) bool { + f.Reason = "pause-leader" + return !store.AllowLeaderTransfer() +} + +func (f *StoreStateFilter) isDisconnected(opt *config.PersistOptions, store *core.StoreInfo) bool { + f.Reason = "disconnected" + return !f.AllowTemporaryStates && store.IsDisconnected() +} + +func (f *StoreStateFilter) isBusy(opt *config.PersistOptions, store *core.StoreInfo) bool { + f.Reason = "busy" + return !f.AllowTemporaryStates && store.IsBusy() +} + +func (f *StoreStateFilter) exceedRemoveLimit(opt *config.PersistOptions, store *core.StoreInfo) bool { + f.Reason = "exceed-remove-limit" + return !f.AllowTemporaryStates && !store.IsAvailable(storelimit.RemovePeer) +} + +func (f *StoreStateFilter) exceedAddLimit(opt *config.PersistOptions, store *core.StoreInfo) bool { + f.Reason = "exceed-add-limit" + return !f.AllowTemporaryStates && !store.IsAvailable(storelimit.AddPeer) +} + +func (f *StoreStateFilter) tooManySnapshots(opt *config.PersistOptions, store *core.StoreInfo) bool { + f.Reason = "too-many-snapshot" + return !f.AllowTemporaryStates && (uint64(store.GetSendingSnapCount()) > opt.GetMaxSnapshotCount() || + uint64(store.GetReceivingSnapCount()) > opt.GetMaxSnapshotCount() || + uint64(store.GetApplyingSnapCount()) > opt.GetMaxSnapshotCount()) +} + +func (f *StoreStateFilter) tooManyPendingPeers(opt *config.PersistOptions, store *core.StoreInfo) bool { + f.Reason = "too-many-pending-peer" + return !f.AllowTemporaryStates && + opt.GetMaxPendingPeerCount() > 0 && + store.GetPendingPeerCount() > int(opt.GetMaxPendingPeerCount()) +} + +func (f *StoreStateFilter) hasRejectLeaderProperty(opts *config.PersistOptions, store *core.StoreInfo) bool { + f.Reason = "reject-leader" + return opts.CheckLabelProperty(opt.RejectLeader, store.GetLabels()) +>>>>>>> 6bf18ce0... metrics: optimize the store state filter metrics (#2946) } // Scope returns the scheduler or the checker which the filter acts on. @@ -473,14 +540,36 @@ func (f *BlacklistStoreFilter) Type() string { return "blacklist-store-filter" } +<<<<<<< HEAD // Source implements the Filter. func (f *BlacklistStoreFilter) Source(opt opt.Options, store *core.StoreInfo) bool { if f.flag&BlacklistSource != BlacklistSource { return true +======= +func (f *StoreStateFilter) anyConditionMatch(typ int, opt *config.PersistOptions, store *core.StoreInfo) bool { + var funcs []conditionFunc + switch typ { + case leaderSource: + funcs = []conditionFunc{f.isTombstone, f.isDown, f.pauseLeaderTransfer, f.isDisconnected} + case regionSource: + funcs = []conditionFunc{f.isBusy, f.exceedRemoveLimit, f.tooManySnapshots} + case leaderTarget: + funcs = []conditionFunc{f.isTombstone, f.isOffline, f.isDown, f.pauseLeaderTransfer, + f.isDisconnected, f.isBusy, f.hasRejectLeaderProperty} + case regionTarget: + funcs = []conditionFunc{f.isTombstone, f.isOffline, f.isDown, f.isDisconnected, f.isBusy, + f.exceedAddLimit, f.tooManySnapshots, f.tooManyPendingPeers} + } + for _, cf := range funcs { + if cf(opt, store) { + return true + } +>>>>>>> 6bf18ce0... metrics: optimize the store state filter metrics (#2946) } return f.filter(store) } +<<<<<<< HEAD // Add adds the store to the blacklist. func (f *BlacklistStoreFilter) Add(storeID uint64) { f.blacklist[storeID] = struct{}{} @@ -490,6 +579,28 @@ func (f *BlacklistStoreFilter) Add(storeID uint64) { func (f *BlacklistStoreFilter) Target(opt opt.Options, store *core.StoreInfo) bool { if f.flag&BlacklistTarget != BlacklistTarget { return true +======= +// Source returns true when the store can be selected as the schedule +// source. +func (f *StoreStateFilter) Source(opts *config.PersistOptions, store *core.StoreInfo) bool { + if f.TransferLeader && f.anyConditionMatch(leaderSource, opts, store) { + return false + } + if f.MoveRegion && f.anyConditionMatch(regionSource, opts, store) { + return false + } + return true +} + +// Target returns true when the store can be selected as the schedule +// target. +func (f *StoreStateFilter) Target(opts *config.PersistOptions, store *core.StoreInfo) bool { + if f.TransferLeader && f.anyConditionMatch(leaderTarget, opts, store) { + return false + } + if f.MoveRegion && f.anyConditionMatch(regionTarget, opts, store) { + return false +>>>>>>> 6bf18ce0... metrics: optimize the store state filter metrics (#2946) } return f.filter(store) } diff --git a/server/schedule/filter/filters_test.go b/server/schedule/filter/filters_test.go index 716efccefd43..9f053b6f315b 100644 --- a/server/schedule/filter/filters_test.go +++ b/server/schedule/filter/filters_test.go @@ -60,6 +60,7 @@ func (s *testFiltersSuite) TestLabelConstraintsFilter(c *C) { } func (s *testFiltersSuite) TestRuleFitFilter(c *C) { +<<<<<<< HEAD opt := mockoption.NewScheduleOptions() opt.EnablePlacementRules = true opt.LocationLabels = []string{"zone"} @@ -69,6 +70,172 @@ func (s *testFiltersSuite) TestRuleFitFilter(c *C) { tc.AddLabelsStore(3, 1, map[string]string{"zone": "z2"}) tc.AddLabelsStore(4, 1, map[string]string{"zone": "z2"}) tc.AddLabelsStore(5, 1, map[string]string{"zone": "z3"}) +======= + opt := config.NewTestOptions() + opt.SetPlacementRuleEnabled(false) + testCluster := mockcluster.NewCluster(opt) + testCluster.SetLocationLabels([]string{"zone"}) + testCluster.SetEnablePlacementRules(true) + region := core.NewRegionInfo(&metapb.Region{Peers: []*metapb.Peer{ + {StoreId: 1, Id: 1}, + {StoreId: 3, Id: 3}, + {StoreId: 5, Id: 5}, + }}, &metapb.Peer{StoreId: 1, Id: 1}) + + testCases := []struct { + storeID uint64 + regionCount int + labels map[string]string + sourceRes bool + targetRes bool + }{ + {1, 1, map[string]string{"zone": "z1"}, true, true}, + {2, 1, map[string]string{"zone": "z1"}, true, true}, + {3, 1, map[string]string{"zone": "z2"}, true, false}, + {4, 1, map[string]string{"zone": "z2"}, true, false}, + {5, 1, map[string]string{"zone": "z3"}, true, false}, + {6, 1, map[string]string{"zone": "z4"}, true, true}, + } + // Init cluster + for _, tc := range testCases { + testCluster.AddLabelsStore(tc.storeID, tc.regionCount, tc.labels) + } + for _, tc := range testCases { + filter := newRuleFitFilter("", testCluster, region, 1) + c.Assert(filter.Source(testCluster.GetOpts(), testCluster.GetStore(tc.storeID)), Equals, tc.sourceRes) + c.Assert(filter.Target(testCluster.GetOpts(), testCluster.GetStore(tc.storeID)), Equals, tc.targetRes) + } +} + +func (s *testFiltersSuite) TestStoreStateFilter(c *C) { + filters := []Filter{ + &StoreStateFilter{TransferLeader: true}, + &StoreStateFilter{MoveRegion: true}, + &StoreStateFilter{TransferLeader: true, MoveRegion: true}, + &StoreStateFilter{MoveRegion: true, AllowTemporaryStates: true}, + } + opt := config.NewTestOptions() + store := core.NewStoreInfoWithLabel(1, 0, map[string]string{}) + + type testCase struct { + filterIdx int + sourceRes bool + targetRes bool + } + + check := func(store *core.StoreInfo, testCases []testCase) { + for _, tc := range testCases { + c.Assert(filters[tc.filterIdx].Source(opt, store), Equals, tc.sourceRes) + c.Assert(filters[tc.filterIdx].Target(opt, store), Equals, tc.targetRes) + } + } + + store = store.Clone(core.SetLastHeartbeatTS(time.Now())) + testCases := []testCase{ + {2, true, true}, + } + check(store, testCases) + + // Disconn + store = store.Clone(core.SetLastHeartbeatTS(time.Now().Add(-5 * time.Minute))) + testCases = []testCase{ + {0, false, false}, + {1, true, false}, + {2, false, false}, + {3, true, true}, + } + check(store, testCases) + + // Busy + store = store.Clone(core.SetLastHeartbeatTS(time.Now())). + Clone(core.SetStoreStats(&pdpb.StoreStats{IsBusy: true})) + testCases = []testCase{ + {0, true, false}, + {1, false, false}, + {2, false, false}, + {3, true, true}, + } + check(store, testCases) +} + +func (s *testFiltersSuite) TestIsolationFilter(c *C) { + opt := config.NewTestOptions() + testCluster := mockcluster.NewCluster(opt) + testCluster.SetLocationLabels([]string{"zone", "rack", "host"}) + allStores := []struct { + storeID uint64 + regionCount int + labels map[string]string + }{ + {1, 1, map[string]string{"zone": "z1", "rack": "r1", "host": "h1"}}, + {2, 1, map[string]string{"zone": "z1", "rack": "r1", "host": "h1"}}, + {3, 1, map[string]string{"zone": "z1", "rack": "r1", "host": "h2"}}, + {4, 1, map[string]string{"zone": "z1", "rack": "r2", "host": "h1"}}, + {5, 1, map[string]string{"zone": "z1", "rack": "r3", "host": "h1"}}, + {6, 1, map[string]string{"zone": "z2", "rack": "r1", "host": "h1"}}, + {7, 1, map[string]string{"zone": "z3", "rack": "r3", "host": "h1"}}, + } + for _, store := range allStores { + testCluster.AddLabelsStore(store.storeID, store.regionCount, store.labels) + } + + testCases := []struct { + region *core.RegionInfo + isolationLevel string + sourceRes []bool + targetRes []bool + }{ + { + core.NewRegionInfo(&metapb.Region{Peers: []*metapb.Peer{ + {Id: 1, StoreId: 1}, + {Id: 2, StoreId: 6}, + }}, &metapb.Peer{StoreId: 1, Id: 1}), + "zone", + []bool{true, true, true, true, true, true, true}, + []bool{false, false, false, false, false, false, true}, + }, + { + core.NewRegionInfo(&metapb.Region{Peers: []*metapb.Peer{ + {Id: 1, StoreId: 1}, + {Id: 2, StoreId: 4}, + {Id: 3, StoreId: 7}, + }}, &metapb.Peer{StoreId: 1, Id: 1}), + "rack", + []bool{true, true, true, true, true, true, true}, + []bool{false, false, false, false, true, true, false}, + }, + { + core.NewRegionInfo(&metapb.Region{Peers: []*metapb.Peer{ + {Id: 1, StoreId: 1}, + {Id: 2, StoreId: 4}, + {Id: 3, StoreId: 6}, + }}, &metapb.Peer{StoreId: 1, Id: 1}), + "host", + []bool{true, true, true, true, true, true, true}, + []bool{false, false, true, false, true, false, true}, + }, + } + + for _, tc := range testCases { + filter := NewIsolationFilter("", tc.isolationLevel, testCluster.GetLocationLabels(), testCluster.GetRegionStores(tc.region)) + for idx, store := range allStores { + c.Assert(filter.Source(testCluster.GetOpts(), testCluster.GetStore(store.storeID)), Equals, tc.sourceRes[idx]) + c.Assert(filter.Target(testCluster.GetOpts(), testCluster.GetStore(store.storeID)), Equals, tc.targetRes[idx]) + } + } +} + +func (s *testFiltersSuite) TestPlacementGuard(c *C) { + opt := config.NewTestOptions() + opt.SetPlacementRuleEnabled(false) + testCluster := mockcluster.NewCluster(opt) + testCluster.SetLocationLabels([]string{"zone"}) + testCluster.AddLabelsStore(1, 1, map[string]string{"zone": "z1"}) + testCluster.AddLabelsStore(2, 1, map[string]string{"zone": "z1"}) + testCluster.AddLabelsStore(3, 1, map[string]string{"zone": "z2"}) + testCluster.AddLabelsStore(4, 1, map[string]string{"zone": "z2"}) + testCluster.AddLabelsStore(5, 1, map[string]string{"zone": "z3"}) +>>>>>>> 6bf18ce0... metrics: optimize the store state filter metrics (#2946) region := core.NewRegionInfo(&metapb.Region{Peers: []*metapb.Peer{ {StoreId: 1, Id: 1}, {StoreId: 3, Id: 3}, diff --git a/server/schedule/operator/builder.go b/server/schedule/operator/builder.go index 540628be39f9..b98d7c1f50dc 100644 --- a/server/schedule/operator/builder.go +++ b/server/schedule/operator/builder.go @@ -344,7 +344,21 @@ func (b *Builder) allowLeader(peer *metapb.Peer) bool { if peer.GetStoreId() == b.currentLeader { return true } +<<<<<<< HEAD if peer.GetIsLearner() { +======= + // Leave + b.steps = append(b.steps, ChangePeerV2Leave(step)) +} + +var stateFilter = &filter.StoreStateFilter{ActionScope: "operator-builder", TransferLeader: true} + +// check if the peer is allowed to become the leader. +func (b *Builder) allowLeader(peer *metapb.Peer, ignoreClusterLimit bool) bool { + // these peer roles are not allowed to become leader. + switch peer.GetRole() { + case metapb.PeerRole_Learner, metapb.PeerRole_DemotingVoter: +>>>>>>> 6bf18ce0... metrics: optimize the store state filter metrics (#2946) return false } store := b.cluster.GetStore(peer.GetStoreId()) diff --git a/server/schedule/region_scatterer.go b/server/schedule/region_scatterer.go index 313a2ee72e18..1e3e37a26407 100644 --- a/server/schedule/region_scatterer.go +++ b/server/schedule/region_scatterer.go @@ -136,8 +136,13 @@ type engineContext struct { selectedLeader *selectedStores } +<<<<<<< HEAD func newEngineContext(filters ...filter.Filter) engineContext { filters = append(filters, filter.StoreStateFilter{ActionScope: regionScatterName}) +======= +func newEngineContext(ctx context.Context, filters ...filter.Filter) engineContext { + filters = append(filters, &filter.StoreStateFilter{ActionScope: regionScatterName}) +>>>>>>> 6bf18ce0... metrics: optimize the store state filter metrics (#2946) return engineContext{ filters: filters, selectedPeer: newSelectedStores(true), @@ -278,7 +283,7 @@ func (r *RegionScatterer) selectPeerToReplace(group string, stores map[uint64]*c func (r *RegionScatterer) collectAvailableStores(group string, region *core.RegionInfo, context engineContext) map[uint64]*core.StoreInfo { filters := []filter.Filter{ filter.NewExcludedFilter(r.name, nil, region.GetStoreIds()), - filter.StoreStateFilter{ActionScope: r.name, MoveRegion: true}, + &filter.StoreStateFilter{ActionScope: r.name, MoveRegion: true}, } filters = append(filters, context.filters...) filters = append(filters, context.selectedPeer.newFilters(r.name, group)...) diff --git a/server/schedulers/balance_leader.go b/server/schedulers/balance_leader.go index da2a727e947f..e1ce1f67ed72 100644 --- a/server/schedulers/balance_leader.go +++ b/server/schedulers/balance_leader.go @@ -91,7 +91,7 @@ func newBalanceLeaderScheduler(opController *schedule.OperatorController, conf * option(s) } s.filters = []filter.Filter{ - filter.StoreStateFilter{ActionScope: s.GetName(), TransferLeader: true}, + &filter.StoreStateFilter{ActionScope: s.GetName(), TransferLeader: true}, filter.NewSpecialUseFilter(s.GetName()), } return s diff --git a/server/schedulers/balance_region.go b/server/schedulers/balance_region.go index f597897c3506..0a46395b09a1 100644 --- a/server/schedulers/balance_region.go +++ b/server/schedulers/balance_region.go @@ -91,7 +91,7 @@ func newBalanceRegionScheduler(opController *schedule.OperatorController, conf * setOption(scheduler) } scheduler.filters = []filter.Filter{ - filter.StoreStateFilter{ActionScope: scheduler.GetName(), MoveRegion: true}, + &filter.StoreStateFilter{ActionScope: scheduler.GetName(), MoveRegion: true}, filter.NewSpecialUseFilter(scheduler.GetName()), } return scheduler @@ -227,6 +227,21 @@ func (s *balanceRegionScheduler) transferPeer(cluster opt.Cluster, region *core. } exclude[target.GetID()] = struct{}{} // exclude next round. +<<<<<<< HEAD +======= + filters := []filter.Filter{ + filter.NewExcludedFilter(s.GetName(), nil, region.GetStoreIds()), + filter.NewPlacementSafeguard(s.GetName(), cluster, region, source), + filter.NewSpecialUseFilter(s.GetName()), + &filter.StoreStateFilter{ActionScope: s.GetName(), MoveRegion: true}, + } + + candidates := filter.NewCandidates(cluster.GetStores()). + FilterTarget(cluster.GetOpts(), filters...). + Sort(filter.RegionScoreComparer(cluster.GetOpts())) + + for _, target := range candidates.Stores { +>>>>>>> 6bf18ce0... metrics: optimize the store state filter metrics (#2946) regionID := region.GetID() sourceID := source.GetID() targetID := target.GetID() diff --git a/server/schedulers/evict_leader.go b/server/schedulers/evict_leader.go index d3ac87977a3c..6ab98f0bad71 100644 --- a/server/schedulers/evict_leader.go +++ b/server/schedulers/evict_leader.go @@ -227,7 +227,14 @@ func (s *evictLeaderScheduler) scheduleOnce(cluster opt.Cluster) []*operator.Ope schedulerCounter.WithLabelValues(s.GetName(), "no-leader").Inc() continue } +<<<<<<< HEAD target := s.selector.SelectTarget(cluster, cluster.GetFollowerStores(region)) +======= + + target := filter.NewCandidates(cluster.GetFollowerStores(region)). + FilterTarget(cluster.GetOpts(), &filter.StoreStateFilter{ActionScope: EvictLeaderName, TransferLeader: true}). + RandomPick() +>>>>>>> 6bf18ce0... metrics: optimize the store state filter metrics (#2946) if target == nil { schedulerCounter.WithLabelValues(s.GetName(), "no-target-store").Inc() continue diff --git a/server/schedulers/hot_region.go b/server/schedulers/hot_region.go index f8aa2ec3904c..542d5accc0bb 100644 --- a/server/schedulers/hot_region.go +++ b/server/schedulers/hot_region.go @@ -777,7 +777,7 @@ func (bs *balanceSolver) filterDstStores() map[uint64]*storeLoadDetail { } filters = []filter.Filter{ - filter.StoreStateFilter{ActionScope: bs.sche.GetName(), MoveRegion: true}, + &filter.StoreStateFilter{ActionScope: bs.sche.GetName(), MoveRegion: true}, filter.NewExcludedFilter(bs.sche.GetName(), bs.cur.region.GetStoreIds(), bs.cur.region.GetStoreIds()), filter.NewConnectedFilter(bs.sche.GetName()), filter.NewSpecialUseFilter(bs.sche.GetName(), filter.SpecialUseHotRegion), @@ -788,8 +788,12 @@ func (bs *balanceSolver) filterDstStores() map[uint64]*storeLoadDetail { case transferLeader: filters = []filter.Filter{ +<<<<<<< HEAD filter.StoreStateFilter{ActionScope: bs.sche.GetName(), TransferLeader: true}, filter.NewConnectedFilter(bs.sche.GetName()), +======= + &filter.StoreStateFilter{ActionScope: bs.sche.GetName(), TransferLeader: true}, +>>>>>>> 6bf18ce0... metrics: optimize the store state filter metrics (#2946) filter.NewSpecialUseFilter(bs.sche.GetName(), filter.SpecialUseHotRegion), } diff --git a/server/schedulers/label.go b/server/schedulers/label.go index ff7945dbd763..2e9e09454e07 100644 --- a/server/schedulers/label.go +++ b/server/schedulers/label.go @@ -125,7 +125,14 @@ func (s *labelScheduler) Schedule(cluster opt.Cluster) []*operator.Operator { excludeStores[p.GetStoreId()] = struct{}{} } f := filter.NewExcludedFilter(s.GetName(), nil, excludeStores) +<<<<<<< HEAD target := s.selector.SelectTarget(cluster, cluster.GetFollowerStores(region), f) +======= + + target := filter.NewCandidates(cluster.GetFollowerStores(region)). + FilterTarget(cluster.GetOpts(), &filter.StoreStateFilter{ActionScope: LabelName, TransferLeader: true}, f). + RandomPick() +>>>>>>> 6bf18ce0... metrics: optimize the store state filter metrics (#2946) if target == nil { log.Debug("label scheduler no target found for region", zap.Uint64("region-id", region.GetID())) schedulerCounter.WithLabelValues(s.GetName(), "no-target").Inc() diff --git a/server/schedulers/random_merge.go b/server/schedulers/random_merge.go index 447748d7d675..5b8cf8f1a6e0 100644 --- a/server/schedulers/random_merge.go +++ b/server/schedulers/random_merge.go @@ -103,8 +103,14 @@ func (s *randomMergeScheduler) IsScheduleAllowed(cluster opt.Cluster) bool { func (s *randomMergeScheduler) Schedule(cluster opt.Cluster) []*operator.Operator { schedulerCounter.WithLabelValues(s.GetName(), "schedule").Inc() +<<<<<<< HEAD stores := cluster.GetStores() store := s.selector.SelectSource(cluster, stores) +======= + store := filter.NewCandidates(cluster.GetStores()). + FilterSource(cluster.GetOpts(), &filter.StoreStateFilter{ActionScope: s.conf.Name, MoveRegion: true}). + RandomPick() +>>>>>>> 6bf18ce0... metrics: optimize the store state filter metrics (#2946) if store == nil { schedulerCounter.WithLabelValues(s.GetName(), "no-source-store").Inc() return nil diff --git a/server/schedulers/shuffle_hot_region.go b/server/schedulers/shuffle_hot_region.go index 75dfa7d2f5e0..c92fde9549f8 100644 --- a/server/schedulers/shuffle_hot_region.go +++ b/server/schedulers/shuffle_hot_region.go @@ -177,7 +177,7 @@ func (s *shuffleHotRegionScheduler) randomSchedule(cluster opt.Cluster, loadDeta } filters := []filter.Filter{ - filter.StoreStateFilter{ActionScope: s.GetName(), MoveRegion: true}, + &filter.StoreStateFilter{ActionScope: s.GetName(), MoveRegion: true}, filter.NewExcludedFilter(s.GetName(), srcRegion.GetStoreIds(), srcRegion.GetStoreIds()), scoreGuard, } diff --git a/server/schedulers/shuffle_leader.go b/server/schedulers/shuffle_leader.go index e3e935fa8b8e..9cad0eac01b9 100644 --- a/server/schedulers/shuffle_leader.go +++ b/server/schedulers/shuffle_leader.go @@ -72,7 +72,7 @@ type shuffleLeaderScheduler struct { // between stores. func newShuffleLeaderScheduler(opController *schedule.OperatorController, conf *shuffleLeaderSchedulerConfig) schedule.Scheduler { filters := []filter.Filter{ - filter.StoreStateFilter{ActionScope: conf.Name, TransferLeader: true}, + &filter.StoreStateFilter{ActionScope: conf.Name, TransferLeader: true}, filter.NewSpecialUseFilter(conf.Name), } base := NewBaseScheduler(opController) diff --git a/server/schedulers/shuffle_region.go b/server/schedulers/shuffle_region.go index aad6b4728eb0..2112cf458343 100644 --- a/server/schedulers/shuffle_region.go +++ b/server/schedulers/shuffle_region.go @@ -68,7 +68,7 @@ type shuffleRegionScheduler struct { // between stores. func newShuffleRegionScheduler(opController *schedule.OperatorController, conf *shuffleRegionSchedulerConfig) schedule.Scheduler { filters := []filter.Filter{ - filter.StoreStateFilter{ActionScope: ShuffleRegionName, MoveRegion: true}, + &filter.StoreStateFilter{ActionScope: ShuffleRegionName, MoveRegion: true}, filter.NewSpecialUseFilter(ShuffleRegionName), } base := NewBaseScheduler(opController)