Skip to content

Commit

Permalink
Merge branch 'release-6.1' into cherry-pick-6925-to-release-6.1
Browse files Browse the repository at this point in the history
  • Loading branch information
rleungx authored Sep 11, 2024
2 parents 7332050 + 5c8d3f3 commit 0c7d082
Show file tree
Hide file tree
Showing 47 changed files with 834 additions and 226 deletions.
26 changes: 26 additions & 0 deletions OWNERS
Original file line number Diff line number Diff line change
@@ -0,0 +1,26 @@
# See the OWNERS docs at https://go.k8s.io/owners
approvers:
- AndreMouche
- binshi-bing
- bufferflies
- CabinfeverB
- Connor1996
- disksing
- huachaohuang
- HunDunDM
- HuSharp
- JmPotato
- lhy1024
- nolouch
- overvenus
- qiuyesuifeng
- rleungx
- siddontang
- Yisaer
- zhouqiang-cl
reviewers:
- BusyJay
- howardlau1999
- Luffbee
- shafreeck
- xhebox
6 changes: 6 additions & 0 deletions OWNERS_ALIASES
Original file line number Diff line number Diff line change
@@ -0,0 +1,6 @@
# Sort the member alphabetically.
aliases:
sig-critical-approvers-config:
- easonn7
- kevin-xianliu
- niubell
7 changes: 7 additions & 0 deletions client/resource_group/controller/OWNERS
Original file line number Diff line number Diff line change
@@ -0,0 +1,7 @@
# See the OWNERS docs at https://go.k8s.io/owners
options:
no_parent_owners: true
filters:
"(OWNERS|config\\.go)$":
approvers:
- sig-critical-approvers-config
7 changes: 7 additions & 0 deletions client/tlsutil/OWNERS
Original file line number Diff line number Diff line change
@@ -0,0 +1,7 @@
# See the OWNERS docs at https://go.k8s.io/owners
options:
no_parent_owners: true
filters:
"(OWNERS|tlsconfig\\.go)$":
approvers:
- sig-critical-approvers-config
7 changes: 7 additions & 0 deletions conf/OWNERS
Original file line number Diff line number Diff line change
@@ -0,0 +1,7 @@
# See the OWNERS docs at https://go.k8s.io/owners
options:
no_parent_owners: true
filters:
"(OWNERS|config\\.toml)$":
approvers:
- sig-critical-approvers-config
7 changes: 7 additions & 0 deletions pkg/encryption/OWNERS
Original file line number Diff line number Diff line change
@@ -0,0 +1,7 @@
# See the OWNERS docs at https://go.k8s.io/owners
options:
no_parent_owners: true
filters:
"(OWNERS|config\\.go)$":
approvers:
- sig-critical-approvers-config
7 changes: 7 additions & 0 deletions pkg/mcs/resourcemanager/server/OWNERS
Original file line number Diff line number Diff line change
@@ -0,0 +1,7 @@
# See the OWNERS docs at https://go.k8s.io/owners
options:
no_parent_owners: true
filters:
"(OWNERS|config\\.go)$":
approvers:
- sig-critical-approvers-config
7 changes: 7 additions & 0 deletions pkg/mcs/scheduling/server/config/OWNERS
Original file line number Diff line number Diff line change
@@ -0,0 +1,7 @@
# See the OWNERS docs at https://go.k8s.io/owners
options:
no_parent_owners: true
filters:
"(OWNERS|config\\.go)$":
approvers:
- sig-critical-approvers-config
7 changes: 7 additions & 0 deletions pkg/mcs/tso/server/OWNERS
Original file line number Diff line number Diff line change
@@ -0,0 +1,7 @@
# See the OWNERS docs at https://go.k8s.io/owners
options:
no_parent_owners: true
filters:
"(OWNERS|config\\.go)$":
approvers:
- sig-critical-approvers-config
2 changes: 1 addition & 1 deletion pkg/mock/mockcluster/mockcluster.go
Original file line number Diff line number Diff line change
Expand Up @@ -188,7 +188,7 @@ func (mc *Cluster) AllocPeer(storeID uint64) (*metapb.Peer, error) {
func (mc *Cluster) initRuleManager() {
if mc.RuleManager == nil {
mc.RuleManager = placement.NewRuleManager(storage.NewStorageWithMemoryBackend(), mc, mc.GetOpts())
mc.RuleManager.Initialize(int(mc.GetReplicationConfig().MaxReplicas), mc.GetReplicationConfig().LocationLabels)
mc.RuleManager.Initialize(int(mc.GetReplicationConfig().MaxReplicas), mc.GetReplicationConfig().LocationLabels, mc.GetReplicationConfig().IsolationLevel)
}
}

Expand Down
7 changes: 7 additions & 0 deletions pkg/schedule/config/OWNERS
Original file line number Diff line number Diff line change
@@ -0,0 +1,7 @@
# See the OWNERS docs at https://go.k8s.io/owners
options:
no_parent_owners: true
filters:
"(OWNERS|(config|store_config)\\.go)$":
approvers:
- sig-critical-approvers-config
7 changes: 7 additions & 0 deletions pkg/schedule/schedulers/OWNERS
Original file line number Diff line number Diff line change
@@ -0,0 +1,7 @@
# See the OWNERS docs at https://go.k8s.io/owners
options:
no_parent_owners: true
filters:
"(OWNERS|hot_region_config\\.go)$":
approvers:
- sig-critical-approvers-config
17 changes: 13 additions & 4 deletions pkg/typeutil/size_test.go
Original file line number Diff line number Diff line change
Expand Up @@ -17,6 +17,7 @@ package typeutil
import (
"encoding/json"

"github.com/docker/go-units"
. "github.com/pingcap/check"
)

Expand All @@ -41,23 +42,31 @@ func (s *testSizeSuite) TestJSON(c *C) {
}

func (s *testSizeSuite) TestParseMbFromText(c *C) {
const defaultValue = 2

testdata := []struct {
body []string
size uint64
}{{
body: []string{"10Mib", "10MiB", "10M", "10MB"},
size: uint64(10),
size: 10,
}, {
body: []string{"10GiB", "10Gib", "10G", "10GB"},
size: uint64(10 * 1024),
size: 10 * units.GiB / units.MiB,
}, {
body: []string{"1024KiB", "1048576"},
size: 1,
}, {
body: []string{"100KiB", "1023KiB", "1048575", "0"},
size: 0,
}, {
body: []string{"10yiB", "10aib"},
size: uint64(1),
size: defaultValue,
}}

for _, t := range testdata {
for _, b := range t.body {
c.Assert(int(ParseMBFromText(b, 1)), Equals, int(t.size))
c.Assert(ParseMBFromText(b, defaultValue), Equals, t.size)
}
}
}
35 changes: 33 additions & 2 deletions server/api/label_test.go
Original file line number Diff line number Diff line change
Expand Up @@ -17,13 +17,15 @@ package api
import (
"context"
"fmt"
"strings"

. "github.com/pingcap/check"
"github.com/pingcap/kvproto/pkg/metapb"
"github.com/pingcap/kvproto/pkg/pdpb"
tu "github.com/tikv/pd/pkg/testutil"
"github.com/tikv/pd/server"
"github.com/tikv/pd/server/config"
"github.com/tikv/pd/server/core"
)

var _ = Suite(&testLabelsStoreSuite{})
Expand Down Expand Up @@ -264,19 +266,47 @@ func (s *testStrictlyLabelsStoreSuite) TestStoreMatch(c *C) {
valid: false,
expectError: "key matching the label was not found",
},
{
store: &metapb.Store{
Id: 3,
Address: "tiflash1",
State: metapb.StoreState_Up,
Labels: []*metapb.StoreLabel{
{
Key: "zone",
Value: "us-west-1",
},
{
Key: "disk",
Value: "ssd",
},
{
Key: core.EngineKey,
Value: core.EngineTiFlash,
},
},
Version: "3.0.0",
},
valid: true,
expectError: "placement rules is disabled",
},
}

for _, t := range cases {
resp, err := s.grpcSvr.PutStore(context.Background(), &pdpb.PutStoreRequest{
Header: &pdpb.RequestHeader{ClusterId: s.svr.ClusterID()},
Store: &metapb.Store{
Id: t.store.Id,
Address: fmt.Sprintf("tikv%d", t.store.Id),
Address: t.store.Address,
State: t.store.State,
Labels: t.store.Labels,
Version: t.store.Version,
},
})
if t.store.Address == "tiflash1" {
c.Assert(strings.Contains(resp.GetHeader().GetError().String(), t.expectError), IsTrue)
continue
}
if t.valid {
c.Assert(err, IsNil)
} else {
Expand All @@ -291,12 +321,13 @@ func (s *testStrictlyLabelsStoreSuite) TestStoreMatch(c *C) {
Header: &pdpb.RequestHeader{ClusterId: s.svr.ClusterID()},
Store: &metapb.Store{
Id: t.store.Id,
Address: fmt.Sprintf("tikv%d", t.store.Id),
Address: t.store.Address,
State: t.store.State,
Labels: t.store.Labels,
Version: t.store.Version,
},
})

if t.valid {
c.Assert(err, IsNil)
} else {
Expand Down
2 changes: 1 addition & 1 deletion server/api/min_resolved_ts.go
Original file line number Diff line number Diff line change
Expand Up @@ -48,7 +48,7 @@ type minResolvedTS struct {
// @Failure 500 {string} string "PD server failed to proceed the request."
// @Router /min-resolved-ts [get]
func (h *minResolvedTSHandler) GetMinResolvedTS(w http.ResponseWriter, r *http.Request) {
c := h.svr.GetRaftCluster()
c := getCluster(r)
value := c.GetMinResolvedTS()
persistInterval := c.GetOpts().GetPDServerConfig().MinResolvedTSPersistenceInterval
h.rd.JSON(w, http.StatusOK, minResolvedTS{
Expand Down
4 changes: 3 additions & 1 deletion server/api/operator_test.go
Original file line number Diff line number Diff line change
Expand Up @@ -364,7 +364,9 @@ func (s *testTransferRegionOperatorSuite) TestTransferRegionWithPlacementRule(c
if tc.placementRuleEnable {
err := s.svr.GetRaftCluster().GetRuleManager().Initialize(
s.svr.GetRaftCluster().GetOpts().GetMaxReplicas(),
s.svr.GetRaftCluster().GetOpts().GetLocationLabels())
s.svr.GetRaftCluster().GetOpts().GetLocationLabels(),
s.svr.GetRaftCluster().GetOpts().GetIsolationLevel(),
)
c.Assert(err, IsNil)
}
if len(tc.rules) > 0 {
Expand Down
2 changes: 1 addition & 1 deletion server/api/router.go
Original file line number Diff line number Diff line change
Expand Up @@ -327,7 +327,7 @@ func createRouter(prefix string, svr *server.Server) *mux.Router {

// min resolved ts API
minResolvedTSHandler := newMinResolvedTSHandler(svr, rd)
registerFunc(apiRouter, "/min-resolved-ts", minResolvedTSHandler.GetMinResolvedTS, setMethods("GET"))
registerFunc(clusterRouter, "/min-resolved-ts", minResolvedTSHandler.GetMinResolvedTS, setMethods("GET"))

// unsafe admin operation API
unsafeOperationHandler := newUnsafeOperationHandler(svr, rd)
Expand Down
9 changes: 7 additions & 2 deletions server/cluster/cluster.go
Original file line number Diff line number Diff line change
Expand Up @@ -63,6 +63,8 @@ import (
var (
// DefaultMinResolvedTSPersistenceInterval is the default value of min resolved ts persistence interval.
DefaultMinResolvedTSPersistenceInterval = 10 * time.Second

denySchedulersByLabelerCounter = schedule.LabelerEventCounter.WithLabelValues("schedulers", "deny")
)

// regionLabelGCInterval is the interval to run region-label's GC work.
Expand Down Expand Up @@ -251,7 +253,7 @@ func (c *RaftCluster) Start(s Server) error {

c.ruleManager = placement.NewRuleManager(c.storage, c, c.GetOpts())
if c.opt.IsPlacementRulesEnabled() {
err = c.ruleManager.Initialize(c.opt.GetMaxReplicas(), c.opt.GetLocationLabels())
err = c.ruleManager.Initialize(c.opt.GetMaxReplicas(), c.opt.GetLocationLabels(), c.opt.GetIsolationLevel())
if err != nil {
return err
}
Expand Down Expand Up @@ -1128,6 +1130,9 @@ func (c *RaftCluster) checkStoreLabels(s *core.StoreInfo) error {
}
for _, label := range s.GetLabels() {
key := label.GetKey()
if key == core.EngineKey {
continue
}
if _, ok := keysSet[key]; !ok {
log.Warn("not found the key match with the store label",
zap.Stringer("store", s.GetMeta()),
Expand Down Expand Up @@ -1451,8 +1456,8 @@ func (c *RaftCluster) checkStores() {
}
} else if c.IsPrepared() {
threshold := c.getThreshold(stores, store)
log.Debug("store serving threshold", zap.Uint64("store-id", storeID), zap.Float64("threshold", threshold))
regionSize := float64(store.GetRegionSize())
log.Debug("store serving threshold", zap.Uint64("store-id", storeID), zap.Float64("threshold", threshold), zap.Float64("region-size", regionSize))
if regionSize >= threshold {
if err := c.ReadyToServe(storeID); err != nil {
log.Error("change store to serving failed",
Expand Down
10 changes: 5 additions & 5 deletions server/cluster/cluster_test.go
Original file line number Diff line number Diff line change
Expand Up @@ -222,7 +222,7 @@ func (s *testClusterInfoSuite) TestSetOfflineStore(c *C) {
cluster.coordinator = newCoordinator(s.ctx, cluster, nil)
cluster.ruleManager = placement.NewRuleManager(storage.NewStorageWithMemoryBackend(), cluster, cluster.GetOpts())
if opt.IsPlacementRulesEnabled() {
err := cluster.ruleManager.Initialize(opt.GetMaxReplicas(), opt.GetLocationLabels())
err := cluster.ruleManager.Initialize(opt.GetMaxReplicas(), opt.GetLocationLabels(), opt.GetIsolationLevel())
if err != nil {
panic(err)
}
Expand Down Expand Up @@ -393,7 +393,7 @@ func (s *testClusterInfoSuite) TestUpStore(c *C) {
cluster.coordinator = newCoordinator(s.ctx, cluster, nil)
cluster.ruleManager = placement.NewRuleManager(storage.NewStorageWithMemoryBackend(), cluster, cluster.GetOpts())
if opt.IsPlacementRulesEnabled() {
err := cluster.ruleManager.Initialize(opt.GetMaxReplicas(), opt.GetLocationLabels())
err := cluster.ruleManager.Initialize(opt.GetMaxReplicas(), opt.GetLocationLabels(), opt.GetIsolationLevel())
if err != nil {
panic(err)
}
Expand Down Expand Up @@ -488,7 +488,7 @@ func (s *testClusterInfoSuite) TestDeleteStoreUpdatesClusterVersion(c *C) {
cluster.coordinator = newCoordinator(s.ctx, cluster, nil)
cluster.ruleManager = placement.NewRuleManager(storage.NewStorageWithMemoryBackend(), cluster, cluster.GetOpts())
if opt.IsPlacementRulesEnabled() {
err := cluster.ruleManager.Initialize(opt.GetMaxReplicas(), opt.GetLocationLabels())
err := cluster.ruleManager.Initialize(opt.GetMaxReplicas(), opt.GetLocationLabels(), opt.GetIsolationLevel())
if err != nil {
panic(err)
}
Expand Down Expand Up @@ -1118,7 +1118,7 @@ func (s *testClusterInfoSuite) TestOfflineAndMerge(c *C) {
cluster.coordinator = newCoordinator(s.ctx, cluster, nil)
cluster.ruleManager = placement.NewRuleManager(storage.NewStorageWithMemoryBackend(), cluster, cluster.GetOpts())
if opt.IsPlacementRulesEnabled() {
err := cluster.ruleManager.Initialize(opt.GetMaxReplicas(), opt.GetLocationLabels())
err := cluster.ruleManager.Initialize(opt.GetMaxReplicas(), opt.GetLocationLabels(), opt.GetIsolationLevel())
if err != nil {
panic(err)
}
Expand Down Expand Up @@ -1705,7 +1705,7 @@ func newTestRaftCluster(
rc.InitCluster(id, opt, s, basicCluster)
rc.ruleManager = placement.NewRuleManager(storage.NewStorageWithMemoryBackend(), rc, opt)
if opt.IsPlacementRulesEnabled() {
err := rc.ruleManager.Initialize(opt.GetMaxReplicas(), opt.GetLocationLabels())
err := rc.ruleManager.Initialize(opt.GetMaxReplicas(), opt.GetLocationLabels(), opt.GetIsolationLevel())
if err != nil {
panic(err)
}
Expand Down
21 changes: 20 additions & 1 deletion server/cluster/coordinator.go
Original file line number Diff line number Diff line change
Expand Up @@ -641,6 +641,10 @@ func (c *coordinator) addScheduler(scheduler schedule.Scheduler, args ...string)
c.wg.Add(1)
go c.runScheduler(s)
c.schedulers[s.GetName()] = s
if err := schedule.SaveSchedulerConfig(c.cluster.storage, scheduler); err != nil {
log.Error("can not save scheduler config", zap.String("scheduler-name", scheduler.GetName()), errs.ZapError(err))
return err
}
c.cluster.opt.AddSchedulerCfg(s.GetType(), args)
return nil
}
Expand Down Expand Up @@ -889,8 +893,23 @@ func (s *scheduleController) Schedule() []*operator.Operator {
}
cacheCluster := newCacheCluster(s.cluster)
// If we have schedule, reset interval to the minimal interval.
if ops := s.Scheduler.Schedule(cacheCluster); len(ops) > 0 {
ops := s.Scheduler.Schedule(cacheCluster)
foundDisabled := false
for _, op := range ops {
if labelMgr := s.cluster.GetRegionLabeler(); labelMgr != nil {
if labelMgr.ScheduleDisabled(s.cluster.GetRegion(op.RegionID())) {
denySchedulersByLabelerCounter.Inc()
foundDisabled = true
break
}
}
}
if len(ops) > 0 {
s.nextInterval = s.Scheduler.GetMinInterval()
// try regenerating operators
if foundDisabled {
continue
}
return ops
}
}
Expand Down
Loading

0 comments on commit 0c7d082

Please sign in to comment.