Skip to content

Commit

Permalink
Merge branch 'fix_pv_patch_problem' of https://github.com/Yisaer/tidb…
Browse files Browse the repository at this point in the history
…-operator into fix_pv_patch_problem
  • Loading branch information
Song Gao committed Jun 19, 2020
2 parents 5f3f66a + 5c40cc4 commit 201d514
Show file tree
Hide file tree
Showing 14 changed files with 165 additions and 52 deletions.
8 changes: 8 additions & 0 deletions pkg/apis/pingcap/v1alpha1/tidbcluster.go
Original file line number Diff line number Diff line change
Expand Up @@ -194,10 +194,18 @@ func (tc *TidbCluster) PDUpgrading() bool {
return tc.Status.PD.Phase == UpgradePhase
}

func (tc *TidbCluster) PDScaling() bool {
return tc.Status.PD.Phase == ScalePhase
}

func (tc *TidbCluster) TiKVUpgrading() bool {
return tc.Status.TiKV.Phase == UpgradePhase
}

func (tc *TidbCluster) TiKVScaling() bool {
return tc.Status.TiKV.Phase == ScalePhase
}

func (tc *TidbCluster) TiDBUpgrading() bool {
return tc.Status.TiDB.Phase == UpgradePhase
}
Expand Down
2 changes: 2 additions & 0 deletions pkg/apis/pingcap/v1alpha1/types.go
Original file line number Diff line number Diff line change
Expand Up @@ -61,6 +61,8 @@ const (
NormalPhase MemberPhase = "Normal"
// UpgradePhase represents the upgrade state of TiDB cluster.
UpgradePhase MemberPhase = "Upgrade"
// ScalePhase represents the scaling state of TiDB cluster.
ScalePhase MemberPhase = "Scale"
)

// ConfigUpdateStrategy represents the strategy to update configuration
Expand Down
2 changes: 2 additions & 0 deletions pkg/manager/member/pd_member_manager.go
Original file line number Diff line number Diff line change
Expand Up @@ -315,6 +315,8 @@ func (pmm *pdMemberManager) syncTidbClusterStatus(tc *v1alpha1.TidbCluster, set
}
if upgrading {
tc.Status.PD.Phase = v1alpha1.UpgradePhase
} else if tc.PDStsDesiredReplicas() != *set.Spec.Replicas {
tc.Status.PD.Phase = v1alpha1.ScalePhase
} else {
tc.Status.PD.Phase = v1alpha1.NormalPhase
}
Expand Down
4 changes: 2 additions & 2 deletions pkg/manager/member/pd_member_manager_test.go
Original file line number Diff line number Diff line change
Expand Up @@ -317,7 +317,7 @@ func TestPDMemberManagerSyncUpdate(t *testing.T) {
},
expectTidbClusterFn: func(g *GomegaWithT, tc *v1alpha1.TidbCluster) {
g.Expect(tc.Status.ClusterID).To(Equal("1"))
g.Expect(tc.Status.PD.Phase).To(Equal(v1alpha1.NormalPhase))
g.Expect(tc.Status.PD.Phase).To(Equal(v1alpha1.ScalePhase))
g.Expect(tc.Status.PD.StatefulSet.ObservedGeneration).To(Equal(int64(1)))
g.Expect(len(tc.Status.PD.Members)).To(Equal(3))
g.Expect(tc.Status.PD.Members["pd1"].Health).To(Equal(true))
Expand Down Expand Up @@ -727,7 +727,7 @@ func TestPDMemberManagerSyncPDSts(t *testing.T) {
g.Expect(*set.Spec.UpdateStrategy.RollingUpdate.Partition).To(Equal(int32(3)))
},
expectTidbClusterFn: func(g *GomegaWithT, tc *v1alpha1.TidbCluster) {
g.Expect(tc.Status.PD.Phase).To(Equal(v1alpha1.NormalPhase))
g.Expect(tc.Status.PD.Phase).To(Equal(v1alpha1.ScalePhase))
},
},
}
Expand Down
10 changes: 10 additions & 0 deletions pkg/manager/member/pd_upgrader.go
Original file line number Diff line number Diff line change
Expand Up @@ -52,6 +52,16 @@ func (pu *pdUpgrader) gracefulUpgrade(tc *v1alpha1.TidbCluster, oldSet *apps.Sta
if !tc.Status.PD.Synced {
return fmt.Errorf("tidbcluster: [%s/%s]'s pd status sync failed,can not to be upgraded", ns, tcName)
}
if tc.PDScaling() {
klog.Infof("TidbCluster: [%s/%s]'s pd is scaling, can not upgrade pd",
ns, tcName)
_, podSpec, err := GetLastAppliedConfig(oldSet)
if err != nil {
return err
}
newSet.Spec.Template.Spec = *podSpec
return nil
}

tc.Status.PD.Phase = v1alpha1.UpgradePhase
if !templateEqual(newSet, oldSet) {
Expand Down
19 changes: 19 additions & 0 deletions pkg/manager/member/pd_upgrader_test.go
Original file line number Diff line number Diff line change
Expand Up @@ -162,6 +162,25 @@ func TestPDUpgraderUpgrade(t *testing.T) {
g.Expect(newSet.Spec.UpdateStrategy.RollingUpdate.Partition).To(Equal(controller.Int32Ptr(3)))
},
},
{
name: "pd scaling",
changeFn: func(tc *v1alpha1.TidbCluster) {
tc.Status.PD.Synced = true
tc.Status.PD.Phase = v1alpha1.ScalePhase
},
changePods: nil,
changeOldSet: func(set *apps.StatefulSet) {
set.Spec.Template.Spec.Containers[0].Image = "pd-test-image:old"
},
transferLeaderErr: false,
errExpectFn: func(g *GomegaWithT, err error) {
g.Expect(err).NotTo(HaveOccurred())
},
expectFn: func(g *GomegaWithT, tc *v1alpha1.TidbCluster, newSet *apps.StatefulSet) {
g.Expect(tc.Status.PD.Phase).To(Equal(v1alpha1.ScalePhase))
g.Expect(newSet.Spec.UpdateStrategy.RollingUpdate.Partition).To(Equal(controller.Int32Ptr(3)))
},
},
{
name: "update revision equals current revision",
changeFn: func(tc *v1alpha1.TidbCluster) {
Expand Down
2 changes: 2 additions & 0 deletions pkg/manager/member/tikv_member_manager.go
Original file line number Diff line number Diff line change
Expand Up @@ -633,6 +633,8 @@ func (tkmm *tikvMemberManager) syncTidbClusterStatus(tc *v1alpha1.TidbCluster, s
}
if upgrading && tc.Status.PD.Phase != v1alpha1.UpgradePhase {
tc.Status.TiKV.Phase = v1alpha1.UpgradePhase
} else if tc.TiKVStsDesiredReplicas() != *set.Spec.Replicas {
tc.Status.TiKV.Phase = v1alpha1.ScalePhase
} else {
tc.Status.TiKV.Phase = v1alpha1.NormalPhase
}
Expand Down
40 changes: 40 additions & 0 deletions pkg/manager/member/tikv_member_manager_test.go
Original file line number Diff line number Diff line change
Expand Up @@ -799,11 +799,15 @@ func TestTiKVMemberManagerSyncTidbClusterStatus(t *testing.T) {
status := apps.StatefulSetStatus{
Replicas: int32(3),
}
spec := apps.StatefulSetSpec{
Replicas: pointer.Int32Ptr(3),
}
now := metav1.Time{Time: time.Now()}
testFn := func(test *testcase, t *testing.T) {
tc := newTidbClusterForPD()
tc.Status.PD.Phase = v1alpha1.NormalPhase
set := &apps.StatefulSet{
Spec: spec,
Status: status,
}
if test.updateTC != nil {
Expand Down Expand Up @@ -910,6 +914,42 @@ func TestTiKVMemberManagerSyncTidbClusterStatus(t *testing.T) {
g.Expect(tc.Status.TiKV.Phase).To(Equal(v1alpha1.NormalPhase))
},
},
{
name: "statefulset is scaling out",
updateTC: func(tc *v1alpha1.TidbCluster) {
tc.Spec.TiKV.Replicas = 4
},
upgradingFn: func(lister corelisters.PodLister, controlInterface pdapi.PDControlInterface, set *apps.StatefulSet, cluster *v1alpha1.TidbCluster) (bool, error) {
return false, nil
},
errWhenGetStores: false,
storeInfo: nil,
errWhenGetTombstoneStores: false,
tombstoneStoreInfo: nil,
errExpectFn: nil,
tcExpectFn: func(g *GomegaWithT, tc *v1alpha1.TidbCluster) {
g.Expect(tc.Status.TiKV.StatefulSet.Replicas).To(Equal(int32(3)))
g.Expect(tc.Status.TiKV.Phase).To(Equal(v1alpha1.ScalePhase))
},
},
{
name: "statefulset is scaling in",
updateTC: func(tc *v1alpha1.TidbCluster) {
tc.Spec.TiKV.Replicas = 2
},
upgradingFn: func(lister corelisters.PodLister, controlInterface pdapi.PDControlInterface, set *apps.StatefulSet, cluster *v1alpha1.TidbCluster) (bool, error) {
return false, nil
},
errWhenGetStores: false,
storeInfo: nil,
errWhenGetTombstoneStores: false,
tombstoneStoreInfo: nil,
errExpectFn: nil,
tcExpectFn: func(g *GomegaWithT, tc *v1alpha1.TidbCluster) {
g.Expect(tc.Status.TiKV.StatefulSet.Replicas).To(Equal(int32(3)))
g.Expect(tc.Status.TiKV.Phase).To(Equal(v1alpha1.ScalePhase))
},
},
{
name: "get stores failed",
updateTC: nil,
Expand Down
4 changes: 3 additions & 1 deletion pkg/manager/member/tikv_scaler.go
Original file line number Diff line number Diff line change
Expand Up @@ -58,6 +58,8 @@ func (tsd *tikvScaler) ScaleOut(tc *v1alpha1.TidbCluster, oldSet *apps.StatefulS
_, ordinal, replicas, deleteSlots := scaleOne(oldSet, newSet)
resetReplicas(newSet, oldSet)
if tc.TiKVUpgrading() {
klog.Infof("TidbCluster: [%s/%s]'s tikv is upgrading, can not scale out until the upgrade completed",
tc.Namespace, tc.Name)
return nil
}

Expand All @@ -81,7 +83,7 @@ func (tsd *tikvScaler) ScaleIn(tc *v1alpha1.TidbCluster, oldSet *apps.StatefulSe

// tikv can not scale in when it is upgrading
if tc.TiKVUpgrading() {
klog.Infof("the TidbCluster: [%s/%s]'s tikv is upgrading,can not scale in until upgrade have completed",
klog.Infof("TidbCluster: [%s/%s]'s tikv is upgrading, can not scale in until upgrade completed",
ns, tcName)
return nil
}
Expand Down
5 changes: 4 additions & 1 deletion pkg/manager/member/tikv_upgrader.go
Original file line number Diff line number Diff line change
Expand Up @@ -55,7 +55,10 @@ func NewTiKVUpgrader(pdControl pdapi.PDControlInterface,
func (tku *tikvUpgrader) Upgrade(tc *v1alpha1.TidbCluster, oldSet *apps.StatefulSet, newSet *apps.StatefulSet) error {
ns := tc.GetNamespace()
tcName := tc.GetName()
if tc.Status.PD.Phase == v1alpha1.UpgradePhase {

if tc.Status.PD.Phase == v1alpha1.UpgradePhase || tc.TiKVScaling() {
klog.Infof("TidbCluster: [%s/%s]'s pd status is %v, tikv status is %v, can not upgrade tikv",
ns, tcName, tc.Status.PD.Phase, tc.Status.TiKV.Phase)
_, podSpec, err := GetLastAppliedConfig(oldSet)
if err != nil {
return err
Expand Down
22 changes: 22 additions & 0 deletions pkg/manager/member/tikv_upgrader_test.go
Original file line number Diff line number Diff line change
Expand Up @@ -290,6 +290,28 @@ func TestTiKVUpgraderUpgrade(t *testing.T) {
g.Expect(*newSet.Spec.UpdateStrategy.RollingUpdate.Partition).To(Equal(int32(3)))
},
},
{
name: "tikv can not upgrade when it is scaling",
changeFn: func(tc *v1alpha1.TidbCluster) {
tc.Status.PD.Phase = v1alpha1.NormalPhase
tc.Status.TiKV.Phase = v1alpha1.ScalePhase
tc.Status.TiKV.Synced = true
},
changeOldSet: func(oldSet *apps.StatefulSet) {
SetStatefulSetLastAppliedConfigAnnotation(oldSet)
},
changePods: nil,
beginEvictLeaderErr: false,
endEvictLeaderErr: false,
updatePodErr: false,
errExpectFn: func(g *GomegaWithT, err error) {
g.Expect(err).NotTo(HaveOccurred())
},
expectFn: func(g *GomegaWithT, tc *v1alpha1.TidbCluster, newSet *apps.StatefulSet, pods map[string]*corev1.Pod) {
g.Expect(tc.Status.TiKV.Phase).To(Equal(v1alpha1.ScalePhase))
g.Expect(*newSet.Spec.UpdateStrategy.RollingUpdate.Partition).To(Equal(int32(3)))
},
},
{
name: "get last apply config error",
changeFn: func(tc *v1alpha1.TidbCluster) {
Expand Down
48 changes: 44 additions & 4 deletions pkg/pdapi/pdapi.go
Original file line number Diff line number Diff line change
Expand Up @@ -36,9 +36,15 @@ import (
)

const (
DefaultTimeout = 5 * time.Second
DefaultTimeout = 5 * time.Second
evictSchedulerLeader = "evict-leader-scheduler"
)

// Payload only used to unmarshal the data from pdapi
type Payload struct {
StoreIdRanges map[string]interface{} `json:"store-id-ranges"`
}

// Namespace is a newtype of a string
type Namespace string

Expand Down Expand Up @@ -630,18 +636,52 @@ func (pc *pdClient) GetEvictLeaderSchedulers() ([]string, error) {
if err != nil {
return nil, err
}
schedulers := []string{}
var schedulers []string
err = json.Unmarshal(body, &schedulers)
if err != nil {
return nil, err
}
evicts := []string{}
var evicts []string
for _, scheduler := range schedulers {
if strings.HasPrefix(scheduler, "evict-leader-scheduler") {
evicts = append(evicts, scheduler)
}
}
return evicts, nil
evictSchedulers, err := pc.filterLeaderEvictScheduler(evicts)
if err != nil {
return nil, err
}
return evictSchedulers, nil
}

// This method is to make compatible between old pdapi version and 4.0 pdapi version.
// To get more detail, see: https://github.com/pingcap/tidb-operator/pull/1831
func (pc *pdClient) filterLeaderEvictScheduler(evictLeaderSchedulers []string) ([]string, error) {
var schedulerIds []string
if len(evictLeaderSchedulers) == 1 && evictLeaderSchedulers[0] == evictSchedulerLeader {
c, err := pc.GetConfig()
if err != nil {
return nil, err
}
if c.Schedule != nil && c.Schedule.SchedulersPayload != nil {
v, ok := c.Schedule.SchedulersPayload[evictSchedulerLeader]
if ok {
payload := &Payload{}
err := json.Unmarshal([]byte(v), payload)
if err != nil {
return nil, err
}
for k := range payload.StoreIdRanges {
schedulerIds = append(schedulerIds, fmt.Sprintf("%s-%v", evictSchedulerLeader, k))
}
}
}
} else {
for _, s := range evictLeaderSchedulers {
schedulerIds = append(schedulerIds, s)
}
}
return schedulerIds, nil
}

func (pc *pdClient) GetPDLeader() (*pdpb.Member, error) {
Expand Down
47 changes: 5 additions & 42 deletions pkg/webhook/pod/tikv_creater.go
Original file line number Diff line number Diff line change
Expand Up @@ -14,7 +14,6 @@
package pod

import (
"encoding/json"
"fmt"
"strings"

Expand All @@ -28,15 +27,9 @@ import (
)

const (
tikvNotBootstrapped = `TiKV cluster not bootstrapped, please start TiKV first"`
evictSchedulerLeader = "evict-leader-scheduler"
tikvNotBootstrapped = `TiKV cluster not bootstrapped, please start TiKV first"`
)

// Payload only used to unmarshal the data from pdapi
type Payload struct {
StoreIdRanges map[string]interface{} `json:"store-id-ranges"`
}

func (pc *PodAdmissionControl) admitCreateTiKVPod(pod *core.Pod, tc *v1alpha1.TidbCluster, pdClient pdapi.PDClient) *admission.AdmissionResponse {

name := pod.Name
Expand Down Expand Up @@ -67,9 +60,10 @@ func (pc *PodAdmissionControl) admitCreateTiKVPod(pod *core.Pod, tc *v1alpha1.Ti
return util.ARSuccess()
}

schedulerIds, err := filterLeaderEvictScheduler(evictLeaderSchedulers, pdClient)
if err != nil {
return util.ARFail(err)
schedulerIds := sets.String{}
for _, s := range evictLeaderSchedulers {
id := strings.Split(s, "-")[3]
schedulerIds.Insert(id)
}

// if the pod which is going to be created already have a store and was in evictLeaderSchedulers,
Expand All @@ -89,34 +83,3 @@ func (pc *PodAdmissionControl) admitCreateTiKVPod(pod *core.Pod, tc *v1alpha1.Ti

return util.ARSuccess()
}

// This method is to make compatible between old pdapi version and 4.0 pdapi version.
// To get more detail, see: https://github.com/pingcap/tidb-operator/pull/1831
func filterLeaderEvictScheduler(evictLeaderSchedulers []string, pdClient pdapi.PDClient) (sets.String, error) {
schedulerIds := sets.String{}
if len(evictLeaderSchedulers) == 1 && evictLeaderSchedulers[0] == evictSchedulerLeader {
c, err := pdClient.GetConfig()
if err != nil {
return schedulerIds, err
}
if c.Schedule != nil && c.Schedule.SchedulersPayload != nil {
v, ok := c.Schedule.SchedulersPayload[evictSchedulerLeader]
if ok {
payload := &Payload{}
err := json.Unmarshal([]byte(v), payload)
if err != nil {
return schedulerIds, err
}
for k := range payload.StoreIdRanges {
schedulerIds.Insert(k)
}
}
}
} else {
for _, s := range evictLeaderSchedulers {
id := strings.Split(s, "-")[3]
schedulerIds.Insert(id)
}
}
return schedulerIds, nil
}
4 changes: 2 additions & 2 deletions tests/e2e/util/image/image.go
Original file line number Diff line number Diff line change
Expand Up @@ -27,8 +27,8 @@ import (
)

const (
TiDBV3Version = "v3.0.8"
TiDBV3UpgradeVersion = "v3.0.9"
TiDBV3Version = "v3.1.1"
TiDBV3UpgradeVersion = "v3.1.2"
TiDBV4Version = "v4.0.0-rc.2"
TiDBV4UpgradeVersion = "v4.0.0"
PrometheusImage = "prom/prometheus"
Expand Down

0 comments on commit 201d514

Please sign in to comment.