From d29d1adcdf8910a50a29c1d7a015203d1ca9fec1 Mon Sep 17 00:00:00 2001 From: xiaojingchen Date: Mon, 24 Dec 2018 11:51:44 +0800 Subject: [PATCH 1/3] fix pd and tikv concurrent update (#234) * fix pd and tikv upgrading at same time * add unit test --- pkg/manager/member/tidb_member_manager.go | 2 +- .../member/tidb_member_manager_test.go | 32 +++++++++++++++++++ pkg/manager/member/tikv_member_manager.go | 2 +- .../member/tikv_member_manager_test.go | 19 +++++++++++ 4 files changed, 53 insertions(+), 2 deletions(-) diff --git a/pkg/manager/member/tidb_member_manager.go b/pkg/manager/member/tidb_member_manager.go index e67a32b98e5..3b74381dbde 100644 --- a/pkg/manager/member/tidb_member_manager.go +++ b/pkg/manager/member/tidb_member_manager.go @@ -331,7 +331,7 @@ func (tmm *tidbMemberManager) syncTidbClusterStatus(tc *v1alpha1.TidbCluster, se if err != nil { return err } - if upgrading { + if upgrading && tc.Status.TiKV.Phase != v1alpha1.UpgradePhase && tc.Status.PD.Phase != v1alpha1.UpgradePhase { tc.Status.TiDB.Phase = v1alpha1.UpgradePhase } else { tc.Status.TiDB.Phase = v1alpha1.NormalPhase diff --git a/pkg/manager/member/tidb_member_manager_test.go b/pkg/manager/member/tidb_member_manager_test.go index 38a62b96822..e75288a265f 100644 --- a/pkg/manager/member/tidb_member_manager_test.go +++ b/pkg/manager/member/tidb_member_manager_test.go @@ -332,6 +332,8 @@ func TestTiDBMemberManagerSyncTidbClusterStatus(t *testing.T) { now := metav1.Time{Time: time.Now()} testFn := func(test *testcase, t *testing.T) { tc := newTidbClusterForPD() + tc.Status.PD.Phase = v1alpha1.NormalPhase + tc.Status.TiKV.Phase = v1alpha1.NormalPhase set := &apps.StatefulSet{ Status: status, } @@ -384,6 +386,36 @@ func TestTiDBMemberManagerSyncTidbClusterStatus(t *testing.T) { g.Expect(tc.Status.TiDB.Phase).To(Equal(v1alpha1.UpgradePhase)) }, }, + { + name: "statefulset is upgrading but pd is upgrading", + updateTC: func(tc *v1alpha1.TidbCluster) { + tc.Status.PD.Phase = v1alpha1.UpgradePhase + }, + upgradingFn: func(lister corelisters.PodLister, set *apps.StatefulSet, cluster *v1alpha1.TidbCluster) (bool, error) { + return true, nil + }, + healthInfo: map[string]bool{}, + errExpectFn: nil, + tcExpectFn: func(g *GomegaWithT, tc *v1alpha1.TidbCluster) { + g.Expect(tc.Status.TiDB.StatefulSet.Replicas).To(Equal(int32(3))) + g.Expect(tc.Status.TiDB.Phase).To(Equal(v1alpha1.NormalPhase)) + }, + }, + { + name: "statefulset is upgrading but tikv is upgrading", + updateTC: func(tc *v1alpha1.TidbCluster) { + tc.Status.TiKV.Phase = v1alpha1.UpgradePhase + }, + upgradingFn: func(lister corelisters.PodLister, set *apps.StatefulSet, cluster *v1alpha1.TidbCluster) (bool, error) { + return true, nil + }, + healthInfo: map[string]bool{}, + errExpectFn: nil, + tcExpectFn: func(g *GomegaWithT, tc *v1alpha1.TidbCluster) { + g.Expect(tc.Status.TiDB.StatefulSet.Replicas).To(Equal(int32(3))) + g.Expect(tc.Status.TiDB.Phase).To(Equal(v1alpha1.NormalPhase)) + }, + }, { name: "statefulset is not upgrading", updateTC: nil, diff --git a/pkg/manager/member/tikv_member_manager.go b/pkg/manager/member/tikv_member_manager.go index 320343d23df..8fe442a2bfe 100644 --- a/pkg/manager/member/tikv_member_manager.go +++ b/pkg/manager/member/tikv_member_manager.go @@ -446,7 +446,7 @@ func (tkmm *tikvMemberManager) syncTidbClusterStatus(tc *v1alpha1.TidbCluster, s if err != nil { return err } - if upgrading { + if upgrading && tc.Status.PD.Phase != v1alpha1.UpgradePhase { tc.Status.TiKV.Phase = v1alpha1.UpgradePhase } else { tc.Status.TiKV.Phase = v1alpha1.NormalPhase diff --git a/pkg/manager/member/tikv_member_manager_test.go b/pkg/manager/member/tikv_member_manager_test.go index a7723c6b58c..981174f6ee6 100644 --- a/pkg/manager/member/tikv_member_manager_test.go +++ b/pkg/manager/member/tikv_member_manager_test.go @@ -866,6 +866,7 @@ func TestTiKVMemberManagerSyncTidbClusterStatus(t *testing.T) { now := metav1.Time{Time: time.Now()} testFn := func(test *testcase, t *testing.T) { tc := newTidbClusterForPD() + tc.Status.PD.Phase = v1alpha1.NormalPhase set := &apps.StatefulSet{ Status: status, } @@ -939,6 +940,24 @@ func TestTiKVMemberManagerSyncTidbClusterStatus(t *testing.T) { g.Expect(tc.Status.TiKV.Phase).To(Equal(v1alpha1.UpgradePhase)) }, }, + { + name: "statefulset is upgrading but pd is upgrading", + updateTC: func(tc *v1alpha1.TidbCluster) { + tc.Status.PD.Phase = v1alpha1.UpgradePhase + }, + upgradingFn: func(lister corelisters.PodLister, controlInterface controller.PDControlInterface, set *apps.StatefulSet, cluster *v1alpha1.TidbCluster) (bool, error) { + return true, nil + }, + errWhenGetStores: false, + storeInfo: nil, + errWhenGetTombstoneStores: false, + tombstoneStoreInfo: nil, + errExpectFn: nil, + tcExpectFn: func(g *GomegaWithT, tc *v1alpha1.TidbCluster) { + g.Expect(tc.Status.TiKV.StatefulSet.Replicas).To(Equal(int32(3))) + g.Expect(tc.Status.TiKV.Phase).To(Equal(v1alpha1.NormalPhase)) + }, + }, { name: "statefulset is not upgrading", updateTC: nil, From ebd499e54cd466e976696020413ac40d612ee0b3 Mon Sep 17 00:00:00 2001 From: xiaojingchen Date: Tue, 25 Dec 2018 15:21:43 +0800 Subject: [PATCH 2/3] fix pd control bugs (#245) --- pkg/controller/pd_control.go | 46 ++++- pkg/controller/pd_control_test.go | 221 ++++++++++++++++++++----- pkg/manager/member/pd_failover.go | 4 +- pkg/manager/member/pd_failover_test.go | 14 +- 4 files changed, 236 insertions(+), 49 deletions(-) diff --git a/pkg/controller/pd_control.go b/pkg/controller/pd_control.go index b43a30f81e8..a630f9816a5 100644 --- a/pkg/controller/pd_control.go +++ b/pkg/controller/pd_control.go @@ -304,6 +304,20 @@ func (pc *pdClient) GetStore(storeID uint64) (*StoreInfo, error) { } func (pc *pdClient) DeleteStore(storeID uint64) error { + var exist bool + stores, err := pc.GetStores() + if err != nil { + return err + } + for _, store := range stores.Stores { + if store.Store.GetId() == storeID { + exist = true + break + } + } + if !exist { + return nil + } apiURL := fmt.Sprintf("%s/%s/%d", pc.url, storePrefix, storeID) req, err := http.NewRequest("DELETE", apiURL, nil) if err != nil { @@ -316,7 +330,7 @@ func (pc *pdClient) DeleteStore(storeID uint64) error { defer DeferClose(res.Body, &err) // Remove an offline store should returns http.StatusOK - if res.StatusCode == http.StatusOK { + if res.StatusCode == http.StatusOK || res.StatusCode == http.StatusNotFound { return nil } body, err := ioutil.ReadAll(res.Body) @@ -328,6 +342,20 @@ func (pc *pdClient) DeleteStore(storeID uint64) error { } func (pc *pdClient) DeleteMemberByID(memberID uint64) error { + var exist bool + members, err := pc.GetMembers() + if err != nil { + return err + } + for _, member := range members.Members { + if member.MemberId == memberID { + exist = true + break + } + } + if !exist { + return nil + } apiURL := fmt.Sprintf("%s/%s/id/%d", pc.url, membersPrefix, memberID) req, err := http.NewRequest("DELETE", apiURL, nil) if err != nil { @@ -338,7 +366,7 @@ func (pc *pdClient) DeleteMemberByID(memberID uint64) error { return err } defer DeferClose(res.Body, &err) - if res.StatusCode == http.StatusOK { + if res.StatusCode == http.StatusOK || res.StatusCode == http.StatusNotFound { return nil } err2 := readErrorBody(res.Body) @@ -346,6 +374,20 @@ func (pc *pdClient) DeleteMemberByID(memberID uint64) error { } func (pc *pdClient) DeleteMember(name string) error { + var exist bool + members, err := pc.GetMembers() + if err != nil { + return err + } + for _, member := range members.Members { + if member.Name == name { + exist = true + break + } + } + if !exist { + return nil + } apiURL := fmt.Sprintf("%s/%s/name/%s", pc.url, membersPrefix, name) req, err := http.NewRequest("DELETE", apiURL, nil) if err != nil { diff --git a/pkg/controller/pd_control_test.go b/pkg/controller/pd_control_test.go index bc81f4f2ac8..0d7a614ee9a 100644 --- a/pkg/controller/pd_control_test.go +++ b/pkg/controller/pd_control_test.go @@ -359,29 +359,78 @@ func TestSetStoreLabels(t *testing.T) { func TestDeleteMember(t *testing.T) { g := NewGomegaWithT(t) name := "testMember" + member := &pdpb.Member{Name: name, MemberId: uint64(1)} + membersExist := &MembersInfo{ + Members: []*pdpb.Member{ + member, + }, + Leader: member, + EtcdLeader: member, + } + membersExistBytes, err := json.Marshal(membersExist) + g.Expect(err).NotTo(HaveOccurred()) + + membersNotExist := &MembersInfo{ + Members: []*pdpb.Member{}, + } + membersNotExistBytes, err := json.Marshal(membersNotExist) + g.Expect(err).NotTo(HaveOccurred()) + tcs := []struct { - caseName string - path string - method string - want bool + caseName string + prePath string + preMethod string + preResp []byte + exist bool + path string + method string + want bool }{{ - caseName: "success_DeleteMember", - path: fmt.Sprintf("/%s/name/%s", membersPrefix, name), - method: "DELETE", - want: true, + caseName: "success_DeleteMember", + prePath: fmt.Sprintf("/%s", membersPrefix), + preMethod: "GET", + preResp: membersExistBytes, + exist: true, + path: fmt.Sprintf("/%s/name/%s", membersPrefix, name), + method: "DELETE", + want: true, }, { - caseName: "failed_DeleteMember", - path: fmt.Sprintf("/%s/name/%s", membersPrefix, name), - method: "DELETE", - want: false, + caseName: "failed_DeleteMember", + prePath: fmt.Sprintf("/%s", membersPrefix), + preMethod: "GET", + preResp: membersExistBytes, + exist: true, + path: fmt.Sprintf("/%s/name/%s", membersPrefix, name), + method: "DELETE", + want: false, + }, { + caseName: "delete_not_exist_member", + prePath: fmt.Sprintf("/%s", membersPrefix), + preMethod: "GET", + preResp: membersNotExistBytes, + exist: false, + path: fmt.Sprintf("/%s/name/%s", membersPrefix, name), + method: "DELETE", + want: true, }, } for _, tc := range tcs { + count := 1 svc := getClientServer(func(w http.ResponseWriter, request *http.Request) { + if count == 1 { + g.Expect(request.Method).To(Equal(tc.preMethod), "check method") + g.Expect(request.URL.Path).To(Equal(tc.prePath), "check url") + w.Header().Set("Content-Type", ContentTypeJSON) + w.WriteHeader(http.StatusOK) + w.Write(tc.preResp) + count++ + return + } + + g.Expect(tc.exist).To(BeTrue()) g.Expect(request.Method).To(Equal(tc.method), "check method") g.Expect(request.URL.Path).To(Equal(tc.path), "check url") - w.Header().Set("Content-Type", ContentTypeJSON) if tc.want { w.WriteHeader(http.StatusOK) @@ -404,29 +453,78 @@ func TestDeleteMember(t *testing.T) { func TestDeleteMemberByID(t *testing.T) { g := NewGomegaWithT(t) id := uint64(1) + member := &pdpb.Member{Name: "test", MemberId: id} + membersExist := &MembersInfo{ + Members: []*pdpb.Member{ + member, + }, + Leader: member, + EtcdLeader: member, + } + membersExistBytes, err := json.Marshal(membersExist) + g.Expect(err).NotTo(HaveOccurred()) + + membersNotExist := &MembersInfo{ + Members: []*pdpb.Member{}, + } + membersNotExistBytes, err := json.Marshal(membersNotExist) + g.Expect(err).NotTo(HaveOccurred()) + tcs := []struct { - caseName string - path string - method string - want bool + caseName string + prePath string + preMethod string + preResp []byte + exist bool + path string + method string + want bool }{{ - caseName: "success_DeleteMemberByID", - path: fmt.Sprintf("/%s/id/%d", membersPrefix, id), - method: "DELETE", - want: true, + caseName: "success_DeleteMemberByID", + prePath: fmt.Sprintf("/%s", membersPrefix), + preMethod: "GET", + preResp: membersExistBytes, + exist: true, + path: fmt.Sprintf("/%s/id/%d", membersPrefix, id), + method: "DELETE", + want: true, }, { - caseName: "failed_DeleteMemberByID", - path: fmt.Sprintf("/%s/id/%d", membersPrefix, id), - method: "DELETE", - want: false, + caseName: "failed_DeleteMemberByID", + prePath: fmt.Sprintf("/%s", membersPrefix), + preMethod: "GET", + preResp: membersExistBytes, + exist: true, + path: fmt.Sprintf("/%s/id/%d", membersPrefix, id), + method: "DELETE", + want: false, + }, { + caseName: "delete_not_exit_member", + prePath: fmt.Sprintf("/%s", membersPrefix), + preMethod: "GET", + preResp: membersNotExistBytes, + exist: false, + path: fmt.Sprintf("/%s/id/%d", membersPrefix, id), + method: "DELETE", + want: true, }, } for _, tc := range tcs { + count := 1 svc := getClientServer(func(w http.ResponseWriter, request *http.Request) { + if count == 1 { + g.Expect(request.Method).To(Equal(tc.preMethod), "check method") + g.Expect(request.URL.Path).To(Equal(tc.prePath), "check url") + w.Header().Set("Content-Type", ContentTypeJSON) + w.WriteHeader(http.StatusOK) + w.Write(tc.preResp) + count++ + return + } + + g.Expect(tc.exist).To(BeTrue()) g.Expect(request.Method).To(Equal(tc.method), "check method") g.Expect(request.URL.Path).To(Equal(tc.path), "check url") - w.Header().Set("Content-Type", ContentTypeJSON) if tc.want { w.WriteHeader(http.StatusOK) @@ -449,26 +547,73 @@ func TestDeleteMemberByID(t *testing.T) { func TestDeleteStore(t *testing.T) { g := NewGomegaWithT(t) storeID := uint64(1) + store := &StoreInfo{ + Store: &MetaStore{Store: &metapb.Store{Id: storeID, State: metapb.StoreState_Up}}, + Status: &StoreStatus{}, + } + stores := &StoresInfo{ + Count: 1, + Stores: []*StoreInfo{ + store, + }, + } + + storesBytes, err := json.Marshal(stores) + g.Expect(err).NotTo(HaveOccurred()) + tcs := []struct { - caseName string - path string - method string - want bool + caseName string + prePath string + preMethod string + preResp []byte + exist bool + path string + method string + want bool }{{ - caseName: "success_DeleteStore", - path: fmt.Sprintf("/%s/%d", storePrefix, storeID), - method: "DELETE", - want: true, + caseName: "success_DeleteStore", + prePath: fmt.Sprintf("/%s", storesPrefix), + preMethod: "GET", + preResp: storesBytes, + exist: true, + path: fmt.Sprintf("/%s/%d", storePrefix, storeID), + method: "DELETE", + want: true, }, { - caseName: "failed_DeleteStore", - path: fmt.Sprintf("/%s/%d", storePrefix, storeID), - method: "DELETE", - want: false, + caseName: "failed_DeleteStore", + prePath: fmt.Sprintf("/%s", storesPrefix), + preMethod: "GET", + preResp: storesBytes, + exist: true, + path: fmt.Sprintf("/%s/%d", storePrefix, storeID), + method: "DELETE", + want: false, + }, { + caseName: "delete_not_exist_store", + prePath: fmt.Sprintf("/%s", storesPrefix), + preMethod: "GET", + preResp: storesBytes, + exist: true, + path: fmt.Sprintf("/%s/%d", storePrefix, storeID), + method: "DELETE", + want: true, }, } for _, tc := range tcs { + count := 1 svc := getClientServer(func(w http.ResponseWriter, request *http.Request) { + if count == 1 { + g.Expect(request.Method).To(Equal(tc.preMethod), "check method") + g.Expect(request.URL.Path).To(Equal(tc.prePath), "check url") + w.Header().Set("Content-Type", ContentTypeJSON) + w.WriteHeader(http.StatusOK) + w.Write(tc.preResp) + count++ + return + } + + g.Expect(tc.exist).To(BeTrue()) g.Expect(request.Method).To(Equal(tc.method), "check method") g.Expect(request.URL.Path).To(Equal(tc.path), "check url") diff --git a/pkg/manager/member/pd_failover.go b/pkg/manager/member/pd_failover.go index e6c0e74f532..9ccfd8969fc 100644 --- a/pkg/manager/member/pd_failover.go +++ b/pkg/manager/member/pd_failover.go @@ -157,12 +157,12 @@ func (pf *pdFailover) tryToDeleteAFailureMember(tc *v1alpha1.TidbCluster) error return nil } - memberID, err := strconv.Atoi(failureMember.MemberID) + memberID, err := strconv.ParseUint(failureMember.MemberID, 10, 64) if err != nil { return err } // invoke deleteMember api to delete a member from the pd cluster - err = pf.pdControl.GetPDClient(tc).DeleteMemberByID(uint64(memberID)) + err = pf.pdControl.GetPDClient(tc).DeleteMemberByID(memberID) if err != nil { return err } diff --git a/pkg/manager/member/pd_failover_test.go b/pkg/manager/member/pd_failover_test.go index 1e3e2bb2106..278e06cbb8e 100644 --- a/pkg/manager/member/pd_failover_test.go +++ b/pkg/manager/member/pd_failover_test.go @@ -243,7 +243,7 @@ func TestPDFailoverFailover(t *testing.T) { g.Expect(int(tc.Spec.PD.Replicas)).To(Equal(3)) g.Expect(len(tc.Status.PD.FailureMembers)).To(Equal(1)) g.Expect(tc.Status.PD.FailureMembers).To(Equal(map[string]v1alpha1.PDFailureMember{ - "test-pd-1": {PodName: "test-pd-1", MemberID: "1", PVCUID: "pvc-1-uid", MemberDeleted: false}, + "test-pd-1": {PodName: "test-pd-1", MemberID: "12891273174085095651", PVCUID: "pvc-1-uid", MemberDeleted: false}, })) }, }, @@ -554,7 +554,7 @@ func oneFailureMember(tc *v1alpha1.TidbCluster) { pd2: {Name: pd2, ID: "2", Health: true}, } tc.Status.PD.FailureMembers = map[string]v1alpha1.PDFailureMember{ - pd1: {PodName: pd1, PVCUID: "pvc-1-uid", MemberID: "1"}, + pd1: {PodName: pd1, PVCUID: "pvc-1-uid", MemberID: "12891273174085095651"}, } } @@ -577,7 +577,7 @@ func oneNotReadyMember(tc *v1alpha1.TidbCluster) { pd2 := ordinalPodName(v1alpha1.PDMemberType, tc.GetName(), 2) tc.Status.PD.Members = map[string]v1alpha1.PDMember{ pd0: {Name: pd0, ID: "0", Health: true}, - pd1: {Name: pd1, ID: "1", Health: false, LastTransitionTime: metav1.Time{Time: time.Now().Add(-10 * time.Minute)}}, + pd1: {Name: pd1, ID: "12891273174085095651", Health: false, LastTransitionTime: metav1.Time{Time: time.Now().Add(-10 * time.Minute)}}, pd2: {Name: pd2, ID: "2", Health: true}, } } @@ -588,11 +588,11 @@ func oneNotReadyMemberAndAFailureMember(tc *v1alpha1.TidbCluster) { pd2 := ordinalPodName(v1alpha1.PDMemberType, tc.GetName(), 2) tc.Status.PD.Members = map[string]v1alpha1.PDMember{ pd0: {Name: pd0, ID: "0", Health: true}, - pd1: {Name: pd1, ID: "1", Health: false, LastTransitionTime: metav1.Time{Time: time.Now().Add(-10 * time.Minute)}}, + pd1: {Name: pd1, ID: "12891273174085095651", Health: false, LastTransitionTime: metav1.Time{Time: time.Now().Add(-10 * time.Minute)}}, pd2: {Name: pd2, ID: "2", Health: true}, } tc.Status.PD.FailureMembers = map[string]v1alpha1.PDFailureMember{ - pd1: {PodName: pd1, PVCUID: "pvc-1-uid", MemberID: "1"}, + pd1: {PodName: pd1, PVCUID: "pvc-1-uid", MemberID: "12891273174085095651"}, } } @@ -602,7 +602,7 @@ func allMembersReady(tc *v1alpha1.TidbCluster) { pd2 := ordinalPodName(v1alpha1.PDMemberType, tc.GetName(), 2) tc.Status.PD.Members = map[string]v1alpha1.PDMember{ pd0: {Name: pd0, ID: "0", Health: true}, - pd1: {Name: pd1, ID: "1", Health: true}, + pd1: {Name: pd1, ID: "12891273174085095651", Health: true}, pd2: {Name: pd2, ID: "2", Health: true}, } } @@ -613,7 +613,7 @@ func twoMembersNotReady(tc *v1alpha1.TidbCluster) { pd2 := ordinalPodName(v1alpha1.PDMemberType, tc.GetName(), 2) tc.Status.PD.Members = map[string]v1alpha1.PDMember{ pd0: {Name: pd0, ID: "0", Health: false}, - pd1: {Name: pd1, ID: "1", Health: false}, + pd1: {Name: pd1, ID: "12891273174085095651", Health: false}, pd2: {Name: pd2, ID: "2", Health: true}, } } From e7dfebfb0b1967cf163c90e05115107d3a60b538 Mon Sep 17 00:00:00 2001 From: xiaojingchen Date: Wed, 26 Dec 2018 11:36:44 +0800 Subject: [PATCH 3/3] fix backup restore script (#251) * fix script bug * fix --- charts/tidb-backup/templates/scripts/_start_backup.sh.tpl | 2 +- charts/tidb-backup/templates/scripts/_start_restore.sh.tpl | 2 +- .../templates/scripts/_start_scheduled_backup.sh.tpl | 2 +- 3 files changed, 3 insertions(+), 3 deletions(-) diff --git a/charts/tidb-backup/templates/scripts/_start_backup.sh.tpl b/charts/tidb-backup/templates/scripts/_start_backup.sh.tpl index e9b61704a02..a899174563f 100644 --- a/charts/tidb-backup/templates/scripts/_start_backup.sh.tpl +++ b/charts/tidb-backup/templates/scripts/_start_backup.sh.tpl @@ -1,6 +1,6 @@ set -euo pipefail dirname=backup-`date +%Y-%m-%dT%H%M%S`-${MY_POD_NAME} -host=`echo {{ .Values.clusterName }}_TIDB_SERVICE_HOST | tr '[a-z]' '[A-Z]'` +host=`echo {{ .Values.clusterName }}_TIDB_SERVICE_HOST | tr '[a-z]' '[A-Z]'` | tr '-' '_' mkdir -p /data/${dirname}/ cp /savepoint-dir/savepoint /data/${dirname}/ diff --git a/charts/tidb-backup/templates/scripts/_start_restore.sh.tpl b/charts/tidb-backup/templates/scripts/_start_restore.sh.tpl index ace3c7c630d..e0ba6b0f582 100644 --- a/charts/tidb-backup/templates/scripts/_start_restore.sh.tpl +++ b/charts/tidb-backup/templates/scripts/_start_restore.sh.tpl @@ -3,7 +3,7 @@ set -euo pipefail dirname=restore-`date +%Y-%m-%dT%H%M%S`-${MY_POD_NAME} dataDir=/data/${dirname} mkdir -p ${dataDir}/ -host=`echo {{ .Values.clusterName }}_TIDB_SERVICE_HOST | tr '[a-z]' '[A-Z]'` +host=`echo {{ .Values.clusterName }}_TIDB_SERVICE_HOST | tr '[a-z]' '[A-Z]'` | tr '-' '_' {{- if .Values.restore.gcp }} downloader \ diff --git a/charts/tidb-cluster/templates/scripts/_start_scheduled_backup.sh.tpl b/charts/tidb-cluster/templates/scripts/_start_scheduled_backup.sh.tpl index ebc9eff4fc3..ce2d764eae8 100644 --- a/charts/tidb-cluster/templates/scripts/_start_scheduled_backup.sh.tpl +++ b/charts/tidb-cluster/templates/scripts/_start_scheduled_backup.sh.tpl @@ -1,6 +1,6 @@ set -euo pipefail dirname=scheduled-backup-`date +%Y-%m-%dT%H%M%S`-${MY_POD_NAME} -host=`echo {{ .Values.clusterName }}_TIDB_SERVICE_HOST | tr '[a-z]' '[A-Z]'` +host=`echo {{ .Values.clusterName }}_TIDB_SERVICE_HOST | tr '[a-z]' '[A-Z]'` | tr '-' '_' mkdir -p /data/${dirname}/ cp /savepoint-dir/savepoint /data/${dirname}/