Skip to content

Commit

Permalink
*: remove functions which are not useful anymore (#4110)
Browse files Browse the repository at this point in the history
Signed-off-by: Ryan Leung <rleungx@gmail.com>

Co-authored-by: Ti Chi Robot <ti-community-prow-bot@tidb.io>
  • Loading branch information
rleungx and ti-chi-bot authored Sep 13, 2021
1 parent 3f53f1e commit 5b1896e
Show file tree
Hide file tree
Showing 13 changed files with 20 additions and 57 deletions.
2 changes: 1 addition & 1 deletion pkg/cache/cache_test.go
Original file line number Diff line number Diff line change
Expand Up @@ -324,7 +324,7 @@ func (s *testRegionCacheSuite) TestPriorityQueue(c *C) {
c.Assert(pq.Get(4), IsNil)
c.Assert(pq.Len(), Equals, 3)

// case1 test getAll ,the highest element should be the first
// case1 test getAll, the highest element should be the first
entries := pq.Elems()
c.Assert(len(entries), Equals, 3)
c.Assert(entries[0].Priority, Equals, 1)
Expand Down
8 changes: 4 additions & 4 deletions server/cluster/cluster.go
Original file line number Diff line number Diff line change
Expand Up @@ -1422,15 +1422,15 @@ func (c *RaftCluster) IsFeatureSupported(f versioninfo.Feature) bool {
return versioninfo.IsCompatible(minSupportVersion, clusterVersion)
}

// GetConfig gets config from cluster.
func (c *RaftCluster) GetConfig() *metapb.Cluster {
// GetMetaCluster gets meta cluster.
func (c *RaftCluster) GetMetaCluster() *metapb.Cluster {
c.RLock()
defer c.RUnlock()
return proto.Clone(c.meta).(*metapb.Cluster)
}

// PutConfig puts config into cluster.
func (c *RaftCluster) PutConfig(meta *metapb.Cluster) error {
// PutMetaCluster puts meta cluster.
func (c *RaftCluster) PutMetaCluster(meta *metapb.Cluster) error {
c.Lock()
defer c.Unlock()
if meta.GetId() != c.clusterID {
Expand Down
2 changes: 1 addition & 1 deletion server/core/basic_cluster.go
Original file line number Diff line number Diff line change
Expand Up @@ -368,7 +368,7 @@ func (bc *BasicCluster) PutRegion(region *RegionInfo) []*RegionInfo {
return bc.Regions.SetRegion(region)
}

// CheckAndPutRegion checks if the region is valid to put,if valid then put.
// CheckAndPutRegion checks if the region is valid to put, if valid then put.
func (bc *BasicCluster) CheckAndPutRegion(region *RegionInfo) []*RegionInfo {
origin, err := bc.PreCheckPutRegion(region)
if err != nil {
Expand Down
2 changes: 1 addition & 1 deletion server/core/hot_region_storage.go
Original file line number Diff line number Diff line change
Expand Up @@ -311,7 +311,7 @@ type HotRegionStorageIterator struct {

// Next moves the iterator to the next key/value pair.
// And return historyHotRegion which it is now pointing to.
// it will return (nil,nil),if there is no more historyHotRegion.
// it will return (nil, nil), if there is no more historyHotRegion.
func (it *HotRegionStorageIterator) Next() (*HistoryHotRegion, error) {
iter := it.iters[0]
for !iter.Next() {
Expand Down
4 changes: 2 additions & 2 deletions server/grpc_service.go
Original file line number Diff line number Diff line change
Expand Up @@ -925,7 +925,7 @@ func (s *Server) GetClusterConfig(ctx context.Context, request *pdpb.GetClusterC
}
return &pdpb.GetClusterConfigResponse{
Header: s.header(),
Cluster: rc.GetConfig(),
Cluster: rc.GetMetaCluster(),
}, nil
}

Expand All @@ -950,7 +950,7 @@ func (s *Server) PutClusterConfig(ctx context.Context, request *pdpb.PutClusterC
return &pdpb.PutClusterConfigResponse{Header: s.notBootstrappedHeader()}, nil
}
conf := request.GetCluster()
if err := rc.PutConfig(conf); err != nil {
if err := rc.PutMetaCluster(conf); err != nil {
return nil, status.Errorf(codes.Unknown, err.Error())
}

Expand Down
2 changes: 1 addition & 1 deletion server/schedule/checker/priority_checker.go
Original file line number Diff line number Diff line change
Expand Up @@ -100,7 +100,7 @@ func (p *PriorityChecker) checkRegionInReplica(region *core.RegionInfo) (makeupC
return p.opts.GetMaxReplicas() - len(region.GetPeers())
}

// addOrRemoveRegion add or remove region from queue
// addOrRemoveRegion add or remove region from queue
// it will remove if region's priority equal 0
// it's Attempt will increase if region's priority equal last
func (p *PriorityChecker) addOrRemoveRegion(priority int, regionID uint64) {
Expand Down
2 changes: 1 addition & 1 deletion server/schedule/checker/replica_checker_test.go
Original file line number Diff line number Diff line change
Expand Up @@ -340,7 +340,7 @@ func (s *testReplicaCheckerSuite) TestOffline(c *C) {
// Transfer peer to store 4.
testutil.CheckTransferPeer(c, rc.Check(region), operator.OpReplica, 3, 4)

// Store 5 has a same label score with store 4,but the region score smaller than store 4, we will choose store 5.
// Store 5 has a same label score with store 4, but the region score smaller than store 4, we will choose store 5.
tc.AddLabelsStore(5, 3, map[string]string{"zone": "z4", "rack": "r1", "host": "h1"})
testutil.CheckTransferPeer(c, rc.Check(region), operator.OpReplica, 3, 5)
// Store 5 has too many snapshots, choose store 4
Expand Down
8 changes: 0 additions & 8 deletions server/schedule/operator_controller.go
Original file line number Diff line number Diff line change
Expand Up @@ -957,11 +957,3 @@ func (oc *OperatorController) getOrCreateStoreLimit(storeID uint64, limitType st
}
return oc.storesLimit[storeID][limitType]
}

// GetLeaderSchedulePolicy is to get leader schedule policy.
func (oc *OperatorController) GetLeaderSchedulePolicy() core.SchedulePolicy {
if oc.cluster == nil {
return core.ByCount
}
return oc.cluster.GetOpts().GetLeaderSchedulePolicy()
}
4 changes: 2 additions & 2 deletions server/schedulers/balance_leader.go
Original file line number Diff line number Diff line change
Expand Up @@ -140,7 +140,7 @@ func (l *balanceLeaderScheduler) IsScheduleAllowed(cluster opt.Cluster) bool {
func (l *balanceLeaderScheduler) Schedule(cluster opt.Cluster) []*operator.Operator {
schedulerCounter.WithLabelValues(l.GetName(), "schedule").Inc()

leaderSchedulePolicy := l.opController.GetLeaderSchedulePolicy()
leaderSchedulePolicy := cluster.GetOpts().GetLeaderSchedulePolicy()
opInfluence := l.opController.GetOpInfluence(cluster)
kind := core.NewScheduleKind(core.LeaderKind, leaderSchedulePolicy)
plan := newBalancePlan(kind, cluster, opInfluence)
Expand Down Expand Up @@ -215,7 +215,7 @@ func (l *balanceLeaderScheduler) transferLeaderOut(plan *balancePlan) []*operato
finalFilters = append(l.filters, leaderFilter)
}
targets = filter.SelectTargetStores(targets, finalFilters, plan.cluster.GetOpts())
leaderSchedulePolicy := l.opController.GetLeaderSchedulePolicy()
leaderSchedulePolicy := plan.cluster.GetOpts().GetLeaderSchedulePolicy()
sort.Slice(targets, func(i, j int) bool {
iOp := plan.GetOpInfluence(targets[i].GetID())
jOp := plan.GetOpInfluence(targets[j].GetID())
Expand Down
16 changes: 0 additions & 16 deletions server/schedulers/balance_test.go
Original file line number Diff line number Diff line change
Expand Up @@ -17,7 +17,6 @@ package schedulers
import (
"context"
"fmt"
"math"
"math/rand"

. "github.com/pingcap/check"
Expand Down Expand Up @@ -146,21 +145,6 @@ func (s *testBalanceSuite) TestShouldBalance(c *C) {
}
}

func (s *testBalanceSuite) TestBalanceLimit(c *C) {
opt := config.NewTestOptions()
tc := mockcluster.NewCluster(s.ctx, opt)
tc.AddLeaderStore(1, 10)
tc.AddLeaderStore(2, 20)
tc.AddLeaderStore(3, 30)

// StandDeviation is sqrt((10^2+0+10^2)/3).
c.Assert(adjustBalanceLimit(tc, core.LeaderKind), Equals, uint64(math.Sqrt(200.0/3.0)))

tc.SetStoreOffline(1)
// StandDeviation is sqrt((5^2+5^2)/2).
c.Assert(adjustBalanceLimit(tc, core.LeaderKind), Equals, uint64(math.Sqrt(50.0/2.0)))
}

func (s *testBalanceSuite) TestTolerantRatio(c *C) {
opt := config.NewTestOptions()
tc := mockcluster.NewCluster(s.ctx, opt)
Expand Down
11 changes: 6 additions & 5 deletions server/schedulers/hot_region.go
Original file line number Diff line number Diff line change
Expand Up @@ -78,11 +78,12 @@ const (
maxHotScheduleInterval = 20 * time.Second
)

// schedulePeerPr the probability of schedule the hot peer.
var schedulePeerPr = 0.66

// pendingAmpFactor will amplify the impact of pending influence, making scheduling slower or even serial when two stores are close together
var pendingAmpFactor = 2.0
var (
// schedulePeerPr the probability of schedule the hot peer.
schedulePeerPr = 0.66
// pendingAmpFactor will amplify the impact of pending influence, making scheduling slower or even serial when two stores are close together
pendingAmpFactor = 2.0
)

type hotScheduler struct {
name string
Expand Down
14 changes: 0 additions & 14 deletions server/schedulers/utils.go
Original file line number Diff line number Diff line change
Expand Up @@ -21,10 +21,8 @@ import (
"strconv"
"time"

"github.com/montanaflynn/stats"
"github.com/pingcap/log"
"github.com/tikv/pd/pkg/errs"
"github.com/tikv/pd/pkg/typeutil"
"github.com/tikv/pd/server/core"
"github.com/tikv/pd/server/schedule/operator"
"github.com/tikv/pd/server/schedule/opt"
Expand Down Expand Up @@ -176,18 +174,6 @@ func adjustTolerantRatio(cluster opt.Cluster, kind core.ScheduleKind) float64 {
return tolerantSizeRatio
}

func adjustBalanceLimit(cluster opt.Cluster, kind core.ResourceKind) uint64 {
stores := cluster.GetStores()
counts := make([]float64, 0, len(stores))
for _, s := range stores {
if s.IsUp() {
counts = append(counts, float64(s.ResourceCount(kind)))
}
}
limit, _ := stats.StandardDeviation(counts)
return typeutil.MaxUint64(1, uint64(limit))
}

func getKeyRanges(args []string) ([]core.KeyRange, error) {
var ranges []core.KeyRange
for len(args) > 1 {
Expand Down
2 changes: 1 addition & 1 deletion tests/server/cluster/cluster_test.go
Original file line number Diff line number Diff line change
Expand Up @@ -702,7 +702,7 @@ func (s *clusterTestSuite) TestLoadClusterInfo(c *C) {
c.Assert(raftCluster, NotNil)

// Check meta, stores, and regions.
c.Assert(raftCluster.GetConfig(), DeepEquals, meta)
c.Assert(raftCluster.GetMetaCluster(), DeepEquals, meta)
c.Assert(raftCluster.GetStoreCount(), Equals, n)
for _, store := range raftCluster.GetMetaStores() {
c.Assert(store, DeepEquals, stores[store.GetId()])
Expand Down

0 comments on commit 5b1896e

Please sign in to comment.