Skip to content

Commit

Permalink
This is an automated cherry-pick of #3834
Browse files Browse the repository at this point in the history
Signed-off-by: HunDunDM <hundundm@gmail.com>
  • Loading branch information
HunDunDM committed Jul 16, 2021
1 parent 11497a4 commit fba249d
Show file tree
Hide file tree
Showing 3 changed files with 6 additions and 36 deletions.
29 changes: 1 addition & 28 deletions server/schedulers/hot_region.go
Original file line number Diff line number Diff line change
Expand Up @@ -159,22 +159,6 @@ func (h *hotScheduler) GetNextInterval(interval time.Duration) time.Duration {
}

func (h *hotScheduler) IsScheduleAllowed(cluster opt.Cluster) bool {
return h.allowBalanceLeader(cluster) || h.allowBalanceRegion(cluster)
}

func (h *hotScheduler) allowBalanceLeader(cluster opt.Cluster) bool {
hotRegionAllowed := h.OpController.OperatorCount(operator.OpHotRegion) < cluster.GetHotRegionScheduleLimit()
leaderAllowed := h.OpController.OperatorCount(operator.OpLeader) < cluster.GetLeaderScheduleLimit()
if !hotRegionAllowed {
operator.OperatorLimitCounter.WithLabelValues(h.GetType(), operator.OpHotRegion.String()).Inc()
}
if !leaderAllowed {
operator.OperatorLimitCounter.WithLabelValues(h.GetType(), operator.OpLeader.String()).Inc()
}
return hotRegionAllowed && leaderAllowed
}

func (h *hotScheduler) allowBalanceRegion(cluster opt.Cluster) bool {
allowed := h.OpController.OperatorCount(operator.OpHotRegion) < cluster.GetHotRegionScheduleLimit()
if !allowed {
operator.OperatorLimitCounter.WithLabelValues(h.GetType(), operator.OpHotRegion.String()).Inc()
Expand Down Expand Up @@ -581,7 +565,7 @@ func (bs *balanceSolver) isValid() bool {
}

func (bs *balanceSolver) solve() []*operator.Operator {
if !bs.isValid() || !bs.allowBalance() {
if !bs.isValid() {
return nil
}
bs.cur = &solution{}
Expand Down Expand Up @@ -624,17 +608,6 @@ func (bs *balanceSolver) solve() []*operator.Operator {
return ops
}

func (bs *balanceSolver) allowBalance() bool {
switch bs.opTy {
case movePeer:
return bs.sche.allowBalanceRegion(bs.cluster)
case transferLeader:
return bs.sche.allowBalanceLeader(bs.cluster)
default:
return false
}
}

func (bs *balanceSolver) filterSrcStores() map[uint64]*storeLoadDetail {
ret := make(map[uint64]*storeLoadDetail)
for id, detail := range bs.stLoadDetail {
Expand Down
9 changes: 3 additions & 6 deletions server/schedulers/hot_test.go
Original file line number Diff line number Diff line change
Expand Up @@ -196,12 +196,10 @@ func (s *testHotWriteRegionSchedulerSuite) checkByteRateOnly(c *C, tc *mockclust

// hot region scheduler is restricted by `hot-region-schedule-limit`.
opt.HotRegionScheduleLimit = 0
c.Assert(hb.Schedule(tc), HasLen, 0)
c.Assert(hb.IsScheduleAllowed(tc), IsFalse)
hb.(*hotScheduler).clearPendingInfluence()
opt.HotRegionScheduleLimit = mockoption.NewScheduleOptions().HotRegionScheduleLimit

// hot region scheduler is restricted by schedule limit.
opt.LeaderScheduleLimit = 0
for i := 0; i < 20; i++ {
op := hb.Schedule(tc)[0]
hb.(*hotScheduler).clearPendingInfluence()
Expand All @@ -214,7 +212,6 @@ func (s *testHotWriteRegionSchedulerSuite) checkByteRateOnly(c *C, tc *mockclust
testutil.CheckTransferPeerWithLeaderTransfer(c, op, operator.OpHotRegion, 1, 6)
}
}
opt.LeaderScheduleLimit = mockoption.NewScheduleOptions().LeaderScheduleLimit

// hot region scheduler is not affect by `balance-region-schedule-limit`.
opt.RegionScheduleLimit = 0
Expand Down Expand Up @@ -265,7 +262,7 @@ func (s *testHotWriteRegionSchedulerSuite) checkByteRateOnly(c *C, tc *mockclust
// Region 1 and 2 are the same, cannot move peer to store 5 due to the label.
// Region 3 can only move peer to store 5.
// Region 5 can only move peer to store 6.
opt.LeaderScheduleLimit = 0
opt.HotRegionScheduleLimit = 0
for i := 0; i < 30; i++ {
op := hb.Schedule(tc)[0]
hb.(*hotScheduler).clearPendingInfluence()
Expand Down Expand Up @@ -443,7 +440,7 @@ func (s *testHotWriteRegionSchedulerSuite) TestWithPendingInfluence(c *C) {
hb, err := schedule.CreateScheduler(HotWriteRegionType, schedule.NewOperatorController(ctx, nil, nil), core.NewStorage(kv.NewMemoryKV()), nil)
c.Assert(err, IsNil)
opt.HotRegionCacheHitsThreshold = 0
opt.LeaderScheduleLimit = 0
opt.HotRegionScheduleLimit = 0
for i := 0; i < 2; i++ {
// 0: byte rate
// 1: key rate
Expand Down
4 changes: 2 additions & 2 deletions server/schedulers/scheduler_test.go
Original file line number Diff line number Diff line change
Expand Up @@ -311,7 +311,7 @@ func (s *testHotRegionSchedulerSuite) TestAbnormalReplica(c *C) {
ctx, cancel := context.WithCancel(context.Background())
defer cancel()
opt := mockoption.NewScheduleOptions()
opt.LeaderScheduleLimit = 0
opt.HotRegionScheduleLimit = 0
tc := mockcluster.NewCluster(opt)
hb, err := schedule.CreateScheduler(HotReadRegionType, schedule.NewOperatorController(ctx, nil, nil), core.NewStorage(kv.NewMemoryKV()), nil)
c.Assert(err, IsNil)
Expand All @@ -330,7 +330,7 @@ func (s *testHotRegionSchedulerSuite) TestAbnormalReplica(c *C) {
tc.AddLeaderRegionWithReadInfo(3, 1, 512*KB*statistics.RegionHeartBeatReportInterval, 0, statistics.RegionHeartBeatReportInterval, []uint64{2, 3})
opt.HotRegionCacheHitsThreshold = 0
c.Assert(tc.IsRegionHot(tc.GetRegion(1)), IsTrue)
c.Assert(hb.Schedule(tc), IsNil)
c.Assert(hb.IsScheduleAllowed(tc), IsFalse)
}

var _ = Suite(&testEvictLeaderSuite{})
Expand Down

0 comments on commit fba249d

Please sign in to comment.