From 2a4fe6daed680c0ec89a9dcee500a6bae3221dbf Mon Sep 17 00:00:00 2001 From: qiuyesuifeng Date: Thu, 30 Jun 2016 21:30:46 +0800 Subject: [PATCH] server: add max one peer count balance support. --- server/balancer.go | 21 ++++++++++++++----- server/balancer_test.go | 45 +++++++++++++++++++++++++++++++++++++++++ 2 files changed, 61 insertions(+), 5 deletions(-) diff --git a/server/balancer.go b/server/balancer.go index b63fc6abf8b..6ba91c308de 100644 --- a/server/balancer.go +++ b/server/balancer.go @@ -247,6 +247,22 @@ func (rb *resourceBalancer) selectRemovePeer(cluster *clusterInfo, peers map[uin } func (rb *resourceBalancer) doLeaderBalance(cluster *clusterInfo, stores []*storeInfo, region *metapb.Region, leader *metapb.Peer, newPeer *metapb.Peer) (*balanceOperator, error) { + if !rb.checkScore(cluster, leader, newPeer) { + return nil, nil + } + + regionID := region.GetId() + + // If cluster max peer count config is 1, we cannot do leader transfer, + // only need to add new peer and remove leader peer. + meta := cluster.getMeta() + if meta.GetMaxPeerCount() == 1 { + addPeerOperator := newAddPeerOperator(regionID, newPeer) + removePeerOperator := newRemovePeerOperator(regionID, leader) + + return newBalanceOperator(region, addPeerOperator, removePeerOperator), nil + } + followerPeers, _ := getFollowerPeers(region, leader) newLeader := rb.selectNewLeaderPeer(cluster, followerPeers) if newLeader == nil { @@ -254,11 +270,6 @@ func (rb *resourceBalancer) doLeaderBalance(cluster *clusterInfo, stores []*stor return nil, nil } - if !rb.checkScore(cluster, leader, newPeer) { - return nil, nil - } - - regionID := region.GetId() leaderTransferOperator := newTransferLeaderOperator(regionID, leader, newLeader, maxWaitCount) addPeerOperator := newAddPeerOperator(regionID, newPeer) removePeerOperator := newRemovePeerOperator(regionID, leader) diff --git a/server/balancer_test.go b/server/balancer_test.go index cc992a68b4d..c4da649400a 100644 --- a/server/balancer_test.go +++ b/server/balancer_test.go @@ -330,4 +330,49 @@ func (s *testBalancerSuite) TestResourceBalancer(c *C) { bop, err = cb.Balance(clusterInfo) c.Assert(err, IsNil) c.Assert(bop, IsNil) + + // If cluster max peer count config is 1, we can only add peer and remove leader peer. + s.updateStore(c, clusterInfo, 1, 100, 60, 0, 0) + s.updateStore(c, clusterInfo, 2, 100, 70, 0, 0) + s.updateStore(c, clusterInfo, 3, 100, 80, 0, 0) + s.updateStore(c, clusterInfo, 4, 100, 90, 0, 0) + + // Set cluster config. + oldMeta := clusterInfo.getMeta() + meta := &metapb.Cluster{ + Id: proto.Uint64(0), + MaxPeerCount: proto.Uint32(1), + } + clusterInfo.setMeta(meta) + + testCfg.MinCapacityUsedRatio = 0.3 + testCfg.MaxCapacityUsedRatio = 0.9 + cb = newResourceBalancer(testCfg) + bop, err = cb.Balance(clusterInfo) + c.Assert(err, IsNil) + c.Assert(bop, IsNil) + + // Set region peers to one peer. + peers := region.GetPeers() + region.Peers = []*metapb.Peer{leaderPeer} + clusterInfo.regions.updateRegion(region) + + cb = newResourceBalancer(testCfg) + bop, err = cb.Balance(clusterInfo) + c.Assert(err, IsNil) + c.Assert(bop, NotNil) + + newOp1 = bop.Ops[0].(*changePeerOperator) + c.Assert(newOp1.ChangePeer.GetChangeType(), Equals, raftpb.ConfChangeType_AddNode) + c.Assert(newOp1.ChangePeer.GetPeer().GetStoreId(), Equals, uint64(4)) + + newOp2 = bop.Ops[1].(*changePeerOperator) + c.Assert(newOp2.ChangePeer.GetChangeType(), Equals, raftpb.ConfChangeType_RemoveNode) + c.Assert(newOp2.ChangePeer.GetPeer().GetStoreId(), Equals, uint64(1)) + + // Reset cluster config and region peers. + clusterInfo.setMeta(oldMeta) + + region.Peers = peers + clusterInfo.regions.updateRegion(region) }