Skip to content

Commit

Permalink
Merge branch 'release-6.1' into cherry-pick-6919-to-release-6.1
Browse files Browse the repository at this point in the history
  • Loading branch information
ti-chi-bot[bot] authored Sep 13, 2024
2 parents be8d521 + d76f55d commit 10136bc
Show file tree
Hide file tree
Showing 29 changed files with 269 additions and 31 deletions.
26 changes: 26 additions & 0 deletions OWNERS
Original file line number Diff line number Diff line change
@@ -0,0 +1,26 @@
# See the OWNERS docs at https://go.k8s.io/owners
approvers:
- AndreMouche
- binshi-bing
- bufferflies
- CabinfeverB
- Connor1996
- disksing
- huachaohuang
- HunDunDM
- HuSharp
- JmPotato
- lhy1024
- nolouch
- overvenus
- qiuyesuifeng
- rleungx
- siddontang
- Yisaer
- zhouqiang-cl
reviewers:
- BusyJay
- howardlau1999
- Luffbee
- shafreeck
- xhebox
6 changes: 6 additions & 0 deletions OWNERS_ALIASES
Original file line number Diff line number Diff line change
@@ -0,0 +1,6 @@
# Sort the member alphabetically.
aliases:
sig-critical-approvers-config:
- easonn7
- kevin-xianliu
- niubell
7 changes: 7 additions & 0 deletions client/resource_group/controller/OWNERS
Original file line number Diff line number Diff line change
@@ -0,0 +1,7 @@
# See the OWNERS docs at https://go.k8s.io/owners
options:
no_parent_owners: true
filters:
"(OWNERS|config\\.go)$":
approvers:
- sig-critical-approvers-config
7 changes: 7 additions & 0 deletions client/tlsutil/OWNERS
Original file line number Diff line number Diff line change
@@ -0,0 +1,7 @@
# See the OWNERS docs at https://go.k8s.io/owners
options:
no_parent_owners: true
filters:
"(OWNERS|tlsconfig\\.go)$":
approvers:
- sig-critical-approvers-config
7 changes: 7 additions & 0 deletions conf/OWNERS
Original file line number Diff line number Diff line change
@@ -0,0 +1,7 @@
# See the OWNERS docs at https://go.k8s.io/owners
options:
no_parent_owners: true
filters:
"(OWNERS|config\\.toml)$":
approvers:
- sig-critical-approvers-config
7 changes: 7 additions & 0 deletions pkg/encryption/OWNERS
Original file line number Diff line number Diff line change
@@ -0,0 +1,7 @@
# See the OWNERS docs at https://go.k8s.io/owners
options:
no_parent_owners: true
filters:
"(OWNERS|config\\.go)$":
approvers:
- sig-critical-approvers-config
7 changes: 7 additions & 0 deletions pkg/mcs/resourcemanager/server/OWNERS
Original file line number Diff line number Diff line change
@@ -0,0 +1,7 @@
# See the OWNERS docs at https://go.k8s.io/owners
options:
no_parent_owners: true
filters:
"(OWNERS|config\\.go)$":
approvers:
- sig-critical-approvers-config
7 changes: 7 additions & 0 deletions pkg/mcs/scheduling/server/config/OWNERS
Original file line number Diff line number Diff line change
@@ -0,0 +1,7 @@
# See the OWNERS docs at https://go.k8s.io/owners
options:
no_parent_owners: true
filters:
"(OWNERS|config\\.go)$":
approvers:
- sig-critical-approvers-config
7 changes: 7 additions & 0 deletions pkg/mcs/tso/server/OWNERS
Original file line number Diff line number Diff line change
@@ -0,0 +1,7 @@
# See the OWNERS docs at https://go.k8s.io/owners
options:
no_parent_owners: true
filters:
"(OWNERS|config\\.go)$":
approvers:
- sig-critical-approvers-config
2 changes: 1 addition & 1 deletion pkg/mock/mockcluster/mockcluster.go
Original file line number Diff line number Diff line change
Expand Up @@ -188,7 +188,7 @@ func (mc *Cluster) AllocPeer(storeID uint64) (*metapb.Peer, error) {
func (mc *Cluster) initRuleManager() {
if mc.RuleManager == nil {
mc.RuleManager = placement.NewRuleManager(storage.NewStorageWithMemoryBackend(), mc, mc.GetOpts())
mc.RuleManager.Initialize(int(mc.GetReplicationConfig().MaxReplicas), mc.GetReplicationConfig().LocationLabels)
mc.RuleManager.Initialize(int(mc.GetReplicationConfig().MaxReplicas), mc.GetReplicationConfig().LocationLabels, mc.GetReplicationConfig().IsolationLevel)
}
}

Expand Down
7 changes: 7 additions & 0 deletions pkg/schedule/config/OWNERS
Original file line number Diff line number Diff line change
@@ -0,0 +1,7 @@
# See the OWNERS docs at https://go.k8s.io/owners
options:
no_parent_owners: true
filters:
"(OWNERS|(config|store_config)\\.go)$":
approvers:
- sig-critical-approvers-config
7 changes: 7 additions & 0 deletions pkg/schedule/schedulers/OWNERS
Original file line number Diff line number Diff line change
@@ -0,0 +1,7 @@
# See the OWNERS docs at https://go.k8s.io/owners
options:
no_parent_owners: true
filters:
"(OWNERS|hot_region_config\\.go)$":
approvers:
- sig-critical-approvers-config
17 changes: 13 additions & 4 deletions pkg/typeutil/size_test.go
Original file line number Diff line number Diff line change
Expand Up @@ -17,6 +17,7 @@ package typeutil
import (
"encoding/json"

"github.com/docker/go-units"
. "github.com/pingcap/check"
)

Expand All @@ -41,23 +42,31 @@ func (s *testSizeSuite) TestJSON(c *C) {
}

func (s *testSizeSuite) TestParseMbFromText(c *C) {
const defaultValue = 2

testdata := []struct {
body []string
size uint64
}{{
body: []string{"10Mib", "10MiB", "10M", "10MB"},
size: uint64(10),
size: 10,
}, {
body: []string{"10GiB", "10Gib", "10G", "10GB"},
size: uint64(10 * 1024),
size: 10 * units.GiB / units.MiB,
}, {
body: []string{"1024KiB", "1048576"},
size: 1,
}, {
body: []string{"100KiB", "1023KiB", "1048575", "0"},
size: 0,
}, {
body: []string{"10yiB", "10aib"},
size: uint64(1),
size: defaultValue,
}}

for _, t := range testdata {
for _, b := range t.body {
c.Assert(int(ParseMBFromText(b, 1)), Equals, int(t.size))
c.Assert(ParseMBFromText(b, defaultValue), Equals, t.size)
}
}
}
35 changes: 33 additions & 2 deletions server/api/label_test.go
Original file line number Diff line number Diff line change
Expand Up @@ -17,13 +17,15 @@ package api
import (
"context"
"fmt"
"strings"

. "github.com/pingcap/check"
"github.com/pingcap/kvproto/pkg/metapb"
"github.com/pingcap/kvproto/pkg/pdpb"
tu "github.com/tikv/pd/pkg/testutil"
"github.com/tikv/pd/server"
"github.com/tikv/pd/server/config"
"github.com/tikv/pd/server/core"
)

var _ = Suite(&testLabelsStoreSuite{})
Expand Down Expand Up @@ -264,19 +266,47 @@ func (s *testStrictlyLabelsStoreSuite) TestStoreMatch(c *C) {
valid: false,
expectError: "key matching the label was not found",
},
{
store: &metapb.Store{
Id: 3,
Address: "tiflash1",
State: metapb.StoreState_Up,
Labels: []*metapb.StoreLabel{
{
Key: "zone",
Value: "us-west-1",
},
{
Key: "disk",
Value: "ssd",
},
{
Key: core.EngineKey,
Value: core.EngineTiFlash,
},
},
Version: "3.0.0",
},
valid: true,
expectError: "placement rules is disabled",
},
}

for _, t := range cases {
resp, err := s.grpcSvr.PutStore(context.Background(), &pdpb.PutStoreRequest{
Header: &pdpb.RequestHeader{ClusterId: s.svr.ClusterID()},
Store: &metapb.Store{
Id: t.store.Id,
Address: fmt.Sprintf("tikv%d", t.store.Id),
Address: t.store.Address,
State: t.store.State,
Labels: t.store.Labels,
Version: t.store.Version,
},
})
if t.store.Address == "tiflash1" {
c.Assert(strings.Contains(resp.GetHeader().GetError().String(), t.expectError), IsTrue)
continue
}
if t.valid {
c.Assert(err, IsNil)
} else {
Expand All @@ -291,12 +321,13 @@ func (s *testStrictlyLabelsStoreSuite) TestStoreMatch(c *C) {
Header: &pdpb.RequestHeader{ClusterId: s.svr.ClusterID()},
Store: &metapb.Store{
Id: t.store.Id,
Address: fmt.Sprintf("tikv%d", t.store.Id),
Address: t.store.Address,
State: t.store.State,
Labels: t.store.Labels,
Version: t.store.Version,
},
})

if t.valid {
c.Assert(err, IsNil)
} else {
Expand Down
4 changes: 3 additions & 1 deletion server/api/operator_test.go
Original file line number Diff line number Diff line change
Expand Up @@ -364,7 +364,9 @@ func (s *testTransferRegionOperatorSuite) TestTransferRegionWithPlacementRule(c
if tc.placementRuleEnable {
err := s.svr.GetRaftCluster().GetRuleManager().Initialize(
s.svr.GetRaftCluster().GetOpts().GetMaxReplicas(),
s.svr.GetRaftCluster().GetOpts().GetLocationLabels())
s.svr.GetRaftCluster().GetOpts().GetLocationLabels(),
s.svr.GetRaftCluster().GetOpts().GetIsolationLevel(),
)
c.Assert(err, IsNil)
}
if len(tc.rules) > 0 {
Expand Down
5 changes: 4 additions & 1 deletion server/cluster/cluster.go
Original file line number Diff line number Diff line change
Expand Up @@ -253,7 +253,7 @@ func (c *RaftCluster) Start(s Server) error {

c.ruleManager = placement.NewRuleManager(c.storage, c, c.GetOpts())
if c.opt.IsPlacementRulesEnabled() {
err = c.ruleManager.Initialize(c.opt.GetMaxReplicas(), c.opt.GetLocationLabels())
err = c.ruleManager.Initialize(c.opt.GetMaxReplicas(), c.opt.GetLocationLabels(), c.opt.GetIsolationLevel())
if err != nil {
return err
}
Expand Down Expand Up @@ -1140,6 +1140,9 @@ func (c *RaftCluster) checkStoreLabels(s *core.StoreInfo) error {
}
for _, label := range s.GetLabels() {
key := label.GetKey()
if key == core.EngineKey {
continue
}
if _, ok := keysSet[key]; !ok {
log.Warn("not found the key match with the store label",
zap.Stringer("store", s.GetMeta()),
Expand Down
10 changes: 5 additions & 5 deletions server/cluster/cluster_test.go
Original file line number Diff line number Diff line change
Expand Up @@ -225,7 +225,7 @@ func (s *testClusterInfoSuite) TestSetOfflineStore(c *C) {
cluster.coordinator = newCoordinator(s.ctx, cluster, nil)
cluster.ruleManager = placement.NewRuleManager(storage.NewStorageWithMemoryBackend(), cluster, cluster.GetOpts())
if opt.IsPlacementRulesEnabled() {
err := cluster.ruleManager.Initialize(opt.GetMaxReplicas(), opt.GetLocationLabels())
err := cluster.ruleManager.Initialize(opt.GetMaxReplicas(), opt.GetLocationLabels(), opt.GetIsolationLevel())
if err != nil {
panic(err)
}
Expand Down Expand Up @@ -396,7 +396,7 @@ func (s *testClusterInfoSuite) TestUpStore(c *C) {
cluster.coordinator = newCoordinator(s.ctx, cluster, nil)
cluster.ruleManager = placement.NewRuleManager(storage.NewStorageWithMemoryBackend(), cluster, cluster.GetOpts())
if opt.IsPlacementRulesEnabled() {
err := cluster.ruleManager.Initialize(opt.GetMaxReplicas(), opt.GetLocationLabels())
err := cluster.ruleManager.Initialize(opt.GetMaxReplicas(), opt.GetLocationLabels(), opt.GetIsolationLevel())
if err != nil {
panic(err)
}
Expand Down Expand Up @@ -491,7 +491,7 @@ func (s *testClusterInfoSuite) TestDeleteStoreUpdatesClusterVersion(c *C) {
cluster.coordinator = newCoordinator(s.ctx, cluster, nil)
cluster.ruleManager = placement.NewRuleManager(storage.NewStorageWithMemoryBackend(), cluster, cluster.GetOpts())
if opt.IsPlacementRulesEnabled() {
err := cluster.ruleManager.Initialize(opt.GetMaxReplicas(), opt.GetLocationLabels())
err := cluster.ruleManager.Initialize(opt.GetMaxReplicas(), opt.GetLocationLabels(), opt.GetIsolationLevel())
if err != nil {
panic(err)
}
Expand Down Expand Up @@ -1121,7 +1121,7 @@ func (s *testClusterInfoSuite) TestOfflineAndMerge(c *C) {
cluster.coordinator = newCoordinator(s.ctx, cluster, nil)
cluster.ruleManager = placement.NewRuleManager(storage.NewStorageWithMemoryBackend(), cluster, cluster.GetOpts())
if opt.IsPlacementRulesEnabled() {
err := cluster.ruleManager.Initialize(opt.GetMaxReplicas(), opt.GetLocationLabels())
err := cluster.ruleManager.Initialize(opt.GetMaxReplicas(), opt.GetLocationLabels(), opt.GetIsolationLevel())
if err != nil {
panic(err)
}
Expand Down Expand Up @@ -1742,7 +1742,7 @@ func newTestRaftCluster(
rc.InitCluster(id, opt, s, basicCluster)
rc.ruleManager = placement.NewRuleManager(storage.NewStorageWithMemoryBackend(), rc, opt)
if opt.IsPlacementRulesEnabled() {
err := rc.ruleManager.Initialize(opt.GetMaxReplicas(), opt.GetLocationLabels())
err := rc.ruleManager.Initialize(opt.GetMaxReplicas(), opt.GetLocationLabels(), opt.GetIsolationLevel())
if err != nil {
panic(err)
}
Expand Down
7 changes: 7 additions & 0 deletions server/config/OWNERS
Original file line number Diff line number Diff line change
@@ -0,0 +1,7 @@
# See the OWNERS docs at https://go.k8s.io/owners
options:
no_parent_owners: true
filters:
"(OWNERS|(config|service_middleware_config)\\.go)$":
approvers:
- sig-critical-approvers-config
7 changes: 7 additions & 0 deletions server/config/persist_options.go
Original file line number Diff line number Diff line change
Expand Up @@ -260,6 +260,13 @@ func (o *PersistOptions) SetSplitMergeInterval(splitMergeInterval time.Duration)
o.SetScheduleConfig(v)
}

// SetMaxStoreDownTime to set the max store down time. It's only used to test.
func (o *PersistOptions) SetMaxStoreDownTime(time time.Duration) {
v := o.GetScheduleConfig().Clone()
v.MaxStoreDownTime = typeutil.NewDuration(time)
o.SetScheduleConfig(v)
}

// SetMaxMergeRegionSize sets the max merge region size.
func (o *PersistOptions) SetMaxMergeRegionSize(maxMergeRegionSize uint64) {
v := o.GetScheduleConfig().Clone()
Expand Down
9 changes: 7 additions & 2 deletions server/config/store_config.go
Original file line number Diff line number Diff line change
Expand Up @@ -134,9 +134,14 @@ func (c *StoreConfig) CheckRegionSize(size, mergeSize uint64) error {
if size < c.GetRegionMaxSize() {
return nil
}

// This could happen when the region split size is set to a value less than 1MiB,
// which is a very extreme case, we just pass the check here to prevent panic.
regionSplitSize := c.GetRegionSplitSize()
if regionSplitSize == 0 {
return nil
}
// the smallest of the split regions can not be merge again, so it's size should less merge size.
if smallSize := size % c.GetRegionSplitSize(); smallSize <= mergeSize && smallSize != 0 {
if smallSize := size % regionSplitSize; smallSize <= mergeSize && smallSize != 0 {
log.Debug("region size is too small", zap.Uint64("size", size), zap.Uint64("merge-size", mergeSize), zap.Uint64("small-size", smallSize))
return errs.ErrCheckerMergeAgain.FastGenByArgs("the smallest region of the split regions is less than max-merge-region-size, " +
"it will be merged again")
Expand Down
4 changes: 4 additions & 0 deletions server/config/store_config_test.go
Original file line number Diff line number Diff line change
Expand Up @@ -148,4 +148,8 @@ func (t *testTiKVConfigSuite) TestMergeCheck(c *C) {
c.Assert(config.CheckRegionKeys(v.keys, v.mergeKeys), NotNil)
}
}
// Test CheckRegionSize when the region split size is 0.
config.RegionSplitSize = "100KiB"
c.Assert(config.GetRegionSplitSize(), Equals, uint64(0))
c.Assert(config.CheckRegionSize(defaultRegionMaxSize, 50), IsNil)
}
5 changes: 4 additions & 1 deletion server/election/lease.go
Original file line number Diff line number Diff line change
Expand Up @@ -152,8 +152,11 @@ func (l *lease) keepAliveWorker(ctx context.Context, interval time.Duration) <-c
expire := start.Add(time.Duration(res.TTL) * time.Second)
select {
case ch <- expire:
case <-ctx1.Done():
// Here we don't use `ctx1.Done()` because we want to make sure if the keep alive success, we can update the expire time.
case <-ctx.Done():
}
} else {
log.Error("keep alive response ttl is zero", zap.String("purpose", l.Purpose))
}
}()

Expand Down
Loading

0 comments on commit 10136bc

Please sign in to comment.