diff --git a/Makefile b/Makefile index 30a4408739..bdb9f4f859 100644 --- a/Makefile +++ b/Makefile @@ -39,6 +39,7 @@ else docker: build endif docker build --tag "${DOCKER_REGISTRY}/pingcap/tidb-operator:${IMAGE_TAG}" images/tidb-operator + docker build --tag "${DOCKER_REGISTRY}/pingcap/tidb-backup-manager:${IMAGE_TAG}" images/tidb-backup-manager build: controller-manager scheduler discovery admission-webhook apiserver backup-manager diff --git a/hack/e2e.sh b/hack/e2e.sh index 478974c09f..30cc4af4aa 100755 --- a/hack/e2e.sh +++ b/hack/e2e.sh @@ -509,6 +509,7 @@ export IMAGE_TAG export SKIP_GINKGO export SKIP_IMAGE_LOAD export TIDB_OPERATOR_IMAGE=$DOCKER_REGISTRY/pingcap/tidb-operator:${IMAGE_TAG} +export TIDB_BACKUP_MANAGER_IMAGE=$DOCKER_REGISTRY/pingcap/tidb-backup-manager:${IMAGE_TAG} export E2E_IMAGE=$DOCKER_REGISTRY/pingcap/tidb-operator-e2e:${IMAGE_TAG} export PATH=$OUTPUT_BIN:$PATH diff --git a/hack/run-e2e.sh b/hack/run-e2e.sh index 69914dc0fd..0258eb1e39 100755 --- a/hack/run-e2e.sh +++ b/hack/run-e2e.sh @@ -32,6 +32,7 @@ GCP_SDK=${GCP_SDK:-/google-cloud-sdk} IMAGE_TAG=${IMAGE_TAG:-} SKIP_IMAGE_LOAD=${SKIP_IMAGE_LOAD:-} TIDB_OPERATOR_IMAGE=${TIDB_OPERATOR_IMAGE:-localhost:5000/pingcap/tidb-operator:latest} +TIDB_BACKUP_MANAGER_IMAGE=${TIDB_BACKUP_MANAGER_IMAGE:-localhost:5000/pingcap/tidb-backup-manager:latest} E2E_IMAGE=${E2E_IMAGE:-localhost:5000/pingcap/tidb-operator-e2e:latest} KUBECONFIG=${KUBECONFIG:-$HOME/.kube/config} KUBECONTEXT=${KUBECONTEXT:-} @@ -51,6 +52,7 @@ if [ -z "$KUBECONFIG" ]; then fi echo "TIDB_OPERATOR_IMAGE: $TIDB_OPERATOR_IMAGE" +echo "TIDB_BACKUP_MANAGER_IMAGE: $TIDB_BACKUP_MANAGER_IMAGE" echo "E2E_IMAGE: $E2E_IMAGE" echo "KUBECONFIG: $KUBECONFIG" echo "KUBECONTEXT: $KUBECONTEXT" @@ -212,6 +214,7 @@ function e2e::setup_helm_server() { function e2e::image_load() { local images=( $TIDB_OPERATOR_IMAGE + $TIDB_BACKUP_MANAGER_IMAGE $E2E_IMAGE ) if [ "$PROVIDER" == "kind" ]; then @@ -224,17 +227,22 @@ function e2e::image_load() { unset DOCKER_CONFIG # We don't need this and it may be read-only and fail the command to fail gcloud auth configure-docker GCP_TIDB_OPERATOR_IMAGE=gcr.io/$GCP_PROJECT/tidb-operator:$CLUSTER-$IMAGE_TAG + GCP_TIDB_BACKUP_MANAGER_IMAGE=gcr.io/$GCP_PROJECT/tidb-backup-image:$CLUSTER-$IMAGE_TAG GCP_E2E_IMAGE=gcr.io/$GCP_PROJECT/tidb-operator-e2e:$CLUSTER-$IMAGE_TAG docker tag $TIDB_OPERATOR_IMAGE $GCP_TIDB_OPERATOR_IMAGE docker tag $E2E_IMAGE $GCP_E2E_IMAGE + docker tag $TIDB_BACKUP_MANAGER_IMAGE $GCP_TIDB_BACKUP_MANAGER_IMAGE echo "info: pushing $GCP_TIDB_OPERATOR_IMAGE" docker push $GCP_TIDB_OPERATOR_IMAGE echo "info: pushing $GCP_E2E_IMAGE" docker push $GCP_E2E_IMAGE + echo "info: pushing $GCP_TIDB_BACKUP_MANAGER_IMAGE" + docker push $GCP_TIDB_BACKUP_MANAGER_IMAGE TIDB_OPERATOR_IMAGE=$GCP_TIDB_OPERATOR_IMAGE E2E_IMAGE=$GCP_E2E_IMAGE + TIDB_BACKUP_MANAGER_IMAGE=$GCP_TIDB_BACKUP_MANAGER_IMAGE elif [ "$PROVIDER" == "eks" ]; then - for repoName in e2e/tidb-operator e2e/tidb-operator-e2e; do + for repoName in e2e/tidb-operator e2e/tidb-operator-e2e e2e/tidb-backup-manager; do local ret=0 aws ecr describe-repositories --repository-names $repoName || ret=$? if [ $ret -ne 0 ]; then @@ -246,13 +254,18 @@ function e2e::image_load() { echo "info: logging in $ecrURL" aws ecr get-login-password | docker login --username AWS --password-stdin $ecrURL AWS_TIDB_OPERATOR_IMAGE=$ecrURL/e2e/tidb-operator:$CLUSTER-$IMAGE_TAG + AWS_TIDB_BACKUP_MANAGER_IMAGE=$ecrURL/e2e/tidb-backup-manager:$CLUSTER-$IMAGE_TAG AWS_E2E_IMAGE=$ecrURL/e2e/tidb-operator-e2e:$CLUSTER-$IMAGE_TAG docker tag $TIDB_OPERATOR_IMAGE $AWS_TIDB_OPERATOR_IMAGE + docker tag $TIDB_BACKUP_MANAGER_IMAGE $AWS_TIDB_BACKUP_MANAGER_IMAGE docker tag $E2E_IMAGE $AWS_E2E_IMAGE echo "info: pushing $AWS_TIDB_OPERATOR_IMAGE" docker push $AWS_TIDB_OPERATOR_IMAGE + echo "info: pushing $AWS_TIDB_BACKUP_MANAGER_IMAGE" + docker push $AWS_TIDB_BACKUP_MANAGER_IMAGE echo "info: pushing $AWS_E2E_IMAGE" docker push $AWS_E2E_IMAGE + TIDB_BACKUP_MANAGER_IMAGE=$AWS_TIDB_BACKUP_MANAGER_IMAGE TIDB_OPERATOR_IMAGE=$AWS_TIDB_OPERATOR_IMAGE E2E_IMAGE=$AWS_E2E_IMAGE else @@ -329,6 +342,7 @@ e2e_args=( # tidb-operator e2e flags --operator-tag=e2e --operator-image="${TIDB_OPERATOR_IMAGE}" + --backup-image="${TIDB_BACKUP_MANAGER_IMAGE}" --e2e-image="${E2E_IMAGE}" # two tidb versions can be configuraed: , --tidb-versions=v3.0.7,v3.0.8 diff --git a/tests/actions.go b/tests/actions.go index 6bd33923da..a5c797a6fa 100644 --- a/tests/actions.go +++ b/tests/actions.go @@ -236,6 +236,7 @@ type OperatorActions interface { CheckInitSQLOrDie(info *TidbClusterConfig) DeployAndCheckPump(tc *TidbClusterConfig) error WaitForTidbClusterReady(tc *v1alpha1.TidbCluster, timeout, pollInterval time.Duration) error + DataIsTheSameAs(from, to *TidbClusterConfig) (bool, error) } type operatorActions struct { @@ -292,6 +293,7 @@ type OperatorConfig struct { DefaultingEnabled bool ValidatingEnabled bool Cabundle string + BackupImage string } type TidbClusterConfig struct { @@ -405,6 +407,7 @@ func (tc *TidbClusterConfig) TidbClusterHelmSetString(m map[string]string) strin func (oi *OperatorConfig) OperatorHelmSetString(m map[string]string) string { set := map[string]string{ "operatorImage": oi.Image, + "tidbBackupManagerImage": oi.BackupImage, "controllerManager.autoFailover": "true", "scheduler.logLevel": "4", "testMode": strconv.FormatBool(oi.TestMode), diff --git a/tests/config.go b/tests/config.go index 8b704e6395..5cc4875e11 100644 --- a/tests/config.go +++ b/tests/config.go @@ -44,6 +44,7 @@ type Config struct { InstallOperator bool `yaml:"install_opeartor" json:"install_opeartor"` OperatorTag string `yaml:"operator_tag" json:"operator_tag"` OperatorImage string `yaml:"operator_image" json:"operator_image"` + BackupImage string `yaml:"backup_image" json:"backup_image"` OperatorFeatures map[string]bool `yaml:"operator_features" json:"operator_features"` UpgradeOperatorTag string `yaml:"upgrade_operator_tag" json:"upgrade_operator_tag"` UpgradeOperatorImage string `yaml:"upgrade_operator_image" json:"upgrade_operator_image"` diff --git a/tests/e2e/config/config.go b/tests/e2e/config/config.go index a931a82392..73cf85028c 100644 --- a/tests/e2e/config/config.go +++ b/tests/e2e/config/config.go @@ -45,6 +45,7 @@ func RegisterTiDBOperatorFlags(flags *flag.FlagSet) { flags.StringVar(&TestConfig.OperatorRepoUrl, "operator-repo-url", "https://github.com/pingcap/tidb-operator.git", "tidb-operator repo url used") flags.StringVar(&TestConfig.ChartDir, "chart-dir", "", "chart dir") flags.BoolVar(&TestConfig.PreloadImages, "preload-images", false, "if set, preload images in the bootstrap of e2e process") + flags.StringVar(&TestConfig.BackupImage, "backup-image", "", "backup image") } func AfterReadingAllFlags() error { @@ -104,6 +105,7 @@ func NewDefaultOperatorConfig(cfg *tests.Config) *tests.OperatorConfig { StsWebhookEnabled: true, PodWebhookEnabled: false, Cabundle: "", + BackupImage: cfg.BackupImage, } } diff --git a/tests/e2e/tidbcluster/tidbcluster.go b/tests/e2e/tidbcluster/tidbcluster.go index 57210b933c..cc47d2ac39 100644 --- a/tests/e2e/tidbcluster/tidbcluster.go +++ b/tests/e2e/tidbcluster/tidbcluster.go @@ -15,12 +15,17 @@ package tidbcluster import ( "context" + nerrors "errors" "fmt" _ "net/http/pprof" "strconv" "strings" "time" + "github.com/aws/aws-sdk-go/aws" + "github.com/aws/aws-sdk-go/aws/credentials" + "github.com/aws/aws-sdk-go/aws/session" + "github.com/aws/aws-sdk-go/service/s3" "github.com/onsi/ginkgo" "github.com/onsi/gomega" "github.com/pingcap/advanced-statefulset/pkg/apis/apps/v1/helper" @@ -40,6 +45,7 @@ import ( utilimage "github.com/pingcap/tidb-operator/tests/e2e/util/image" utilpod "github.com/pingcap/tidb-operator/tests/e2e/util/pod" "github.com/pingcap/tidb-operator/tests/e2e/util/portforward" + utiltidb "github.com/pingcap/tidb-operator/tests/e2e/util/tidb" "github.com/pingcap/tidb-operator/tests/pkg/apimachinery" "github.com/pingcap/tidb-operator/tests/pkg/blockwriter" "github.com/pingcap/tidb-operator/tests/pkg/fixture" @@ -287,6 +293,150 @@ var _ = ginkgo.Describe("[tidb-operator] TiDBCluster", func() { oa.StopInsertDataTo(&clusterA) }) + ginkgo.It("Adhoc backup and restore with BR CRD", func() { + if framework.TestContext.Provider != "aws" { + framework.Skipf("provider is not aws, skipping") + } + tcNameFrom := "backup" + tcNameTo := "restore" + serviceAccountName := "tidb-backup-manager" + backupFolder := time.Now().Format(time.RFC3339) + + // create backup cluster + tcFrom := fixture.GetTidbCluster(ns, tcNameFrom, utilimage.TiDBBRVersion) + tcFrom.Spec.PD.Replicas = 1 + tcFrom.Spec.TiKV.Replicas = 1 + tcFrom.Spec.TiDB.Replicas = 1 + err := genericCli.Create(context.TODO(), tcFrom) + framework.ExpectNoError(err) + err = oa.WaitForTidbClusterReady(tcFrom, 30*time.Minute, 15*time.Second) + framework.ExpectNoError(err) + clusterFrom := newTidbClusterConfig(e2econfig.TestConfig, ns, tcNameFrom, "", "") + + // create restore cluster + tcTo := fixture.GetTidbCluster(ns, tcNameTo, utilimage.TiDBBRVersion) + tcTo.Spec.PD.Replicas = 1 + tcTo.Spec.TiKV.Replicas = 1 + tcTo.Spec.TiDB.Replicas = 1 + err = genericCli.Create(context.TODO(), tcTo) + framework.ExpectNoError(err) + err = oa.WaitForTidbClusterReady(tcTo, 30*time.Minute, 15*time.Second) + framework.ExpectNoError(err) + clusterTo := newTidbClusterConfig(e2econfig.TestConfig, ns, tcNameTo, "", "") + + // import some data to sql with blockwriter + ginkgo.By(fmt.Sprintf("Begin inserting data into cluster %q", clusterFrom.ClusterName)) + oa.BeginInsertDataToOrDie(&clusterFrom) + err = wait.PollImmediate(time.Second*5, time.Minute*5, utiltidb.TiDBIsInserted(fw, tcFrom.GetNamespace(), tcFrom.GetName(), "root", "", "test", "block_writer")) + framework.ExpectNoError(err) + ginkgo.By(fmt.Sprintf("Stop inserting data into cluster %q", clusterFrom.ClusterName)) + oa.StopInsertDataTo(&clusterFrom) + + // prepare for create backup/restore CRD + backupRole := fixture.GetBackupRole(tcFrom, serviceAccountName) + _, err = c.RbacV1beta1().Roles(ns).Create(backupRole) + framework.ExpectNoError(err) + backupServiceAccount := fixture.GetBackupServiceAccount(tcFrom, serviceAccountName) + _, err = c.CoreV1().ServiceAccounts(ns).Create(backupServiceAccount) + framework.ExpectNoError(err) + backupRoleBinding := fixture.GetBackupRoleBing(tcFrom, serviceAccountName) + _, err = c.RbacV1beta1().RoleBindings(ns).Create(backupRoleBinding) + framework.ExpectNoError(err) + backupSecret := fixture.GetBackupSecret(tcFrom, "") + _, err = c.CoreV1().Secrets(ns).Create(backupSecret) + framework.ExpectNoError(err) + restoreSecret := fixture.GetBackupSecret(tcTo, "") + _, err = c.CoreV1().Secrets(ns).Create(restoreSecret) + framework.ExpectNoError(err) + cred := credentials.NewSharedCredentials("", "default") + val, err := cred.Get() + framework.ExpectNoError(err) + backupS3Secret := fixture.GetS3Secret(tcFrom, val.AccessKeyID, val.SecretAccessKey) + _, err = c.CoreV1().Secrets(ns).Create(backupS3Secret) + framework.ExpectNoError(err) + + ginkgo.By(fmt.Sprintf("Begion to backup data cluster %q", clusterFrom.ClusterName)) + // create backup CRD to process backup + backup := fixture.GetBackupCRDWithBR(tcFrom, backupFolder) + _, err = cli.PingcapV1alpha1().Backups(ns).Create(backup) + framework.ExpectNoError(err) + + // check backup is successed + err = wait.PollImmediate(5*time.Second, 10*time.Minute, func() (bool, error) { + tmpBackup, err := cli.PingcapV1alpha1().Backups(ns).Get(backup.Name, metav1.GetOptions{}) + if err != nil { + return false, err + } + // Check the status in conditions one by one, + // if the status other than complete or failed is running + for _, condition := range tmpBackup.Status.Conditions { + if condition.Type == v1alpha1.BackupComplete { + return true, nil + } else if condition.Type == v1alpha1.BackupFailed { + return false, errors.NewInternalError(nerrors.New(condition.Reason)) + } + } + return false, nil + }) + framework.ExpectNoError(err) + + ginkgo.By(fmt.Sprintf("Begion to Restore data cluster %q", clusterTo.ClusterName)) + // create restore CRD to process restore + restore := fixture.GetRestoreCRDWithBR(tcTo, backupFolder) + _, err = cli.PingcapV1alpha1().Restores(ns).Create(restore) + framework.ExpectNoError(err) + + // check restore is successed + err = wait.PollImmediate(5*time.Second, 10*time.Minute, func() (bool, error) { + tmpRestore, err := cli.PingcapV1alpha1().Restores(ns).Get(restore.Name, metav1.GetOptions{}) + if err != nil { + return false, err + } + // Check the status in conditions one by one, + // if the status other than complete or failed is running + for _, condition := range tmpRestore.Status.Conditions { + if condition.Type == v1alpha1.RestoreComplete { + return true, nil + } else if condition.Type == v1alpha1.RestoreFailed { + return false, errors.NewInternalError(nerrors.New(condition.Reason)) + } + } + return false, nil + }) + framework.ExpectNoError(err) + + ginkgo.By(fmt.Sprintf("Check the correctness of cluster %q and %q", clusterFrom.ClusterName, clusterTo.ClusterName)) + isSame, err := oa.DataIsTheSameAs(&clusterFrom, &clusterTo) + framework.ExpectNoError(err) + if !isSame { + framework.ExpectNoError(nerrors.New("backup database and restore database is not the same")) + } + + // delete backup data in S3 + err = cli.PingcapV1alpha1().Backups(ns).Delete(backup.Name, &metav1.DeleteOptions{}) + framework.ExpectNoError(err) + + err = wait.PollImmediate(5*time.Second, 5*time.Minute, func() (bool, error) { + awsConfig := aws.NewConfig(). + WithRegion(backup.Spec.S3.Region). + WithCredentials(cred) + svc := s3.New(session.Must(session.NewSession(awsConfig))) + input := &s3.ListObjectsV2Input{ + Bucket: aws.String(backup.Spec.S3.Bucket), + Prefix: aws.String(backup.Spec.S3.Prefix), + } + result, err := svc.ListObjectsV2(input) + if err != nil { + return false, err + } + if *result.KeyCount != 0 { + return false, nil + } + return true, nil + }) + framework.ExpectNoError(err) + }) + ginkgo.It("Test aggregated apiserver", func() { ginkgo.By(fmt.Sprintf("Starting to test apiserver, test apiserver image: %s", cfg.E2EImage)) framework.Logf("config: %v", config) diff --git a/tests/e2e/util/image/image.go b/tests/e2e/util/image/image.go index 4444496079..a6ab2c4fb5 100644 --- a/tests/e2e/util/image/image.go +++ b/tests/e2e/util/image/image.go @@ -32,6 +32,7 @@ const ( TiDBV3UpgradeVersion = "v3.0.9" TiDBTLSVersion = TiDBV3Version // must >= 3.0.5 TiDBV2Version = "v2.1.19" + TiDBBRVersion = "v4.0.0-beta.1" ) func ListImages() []string { diff --git a/tests/e2e/util/tidb/tidb.go b/tests/e2e/util/tidb/tidb.go index aba1656dad..7d301cf3c8 100644 --- a/tests/e2e/util/tidb/tidb.go +++ b/tests/e2e/util/tidb/tidb.go @@ -55,3 +55,50 @@ func TiDBIsConnectable(fw portforward.PortForward, ns, tc, user, password string return true, nil } } + +// TiDBIsInserted checks whether the tidb cluster has insert some data. +func TiDBIsInserted(fw portforward.PortForward, ns, tc, user, password, dbName, tableName string) wait.ConditionFunc { + return func() (bool, error) { + var db *sql.DB + dsn, cancel, err := GetTiDBDSN(fw, ns, tc, user, password, dbName) + if err != nil { + return false, err + } + + defer cancel() + if db, err = sql.Open("mysql", dsn); err != nil { + return false, err + } + + defer db.Close() + if err := db.Ping(); err != nil { + return false, err + } + + getCntFn := func(db *sql.DB, tableName string) (int, error) { + var cnt int + rows, err := db.Query(fmt.Sprintf("SELECT count(*) FROM %s", tableName)) + if err != nil { + return cnt, fmt.Errorf("failed to select count(*) from %s, %v", tableName, err) + } + for rows.Next() { + err := rows.Scan(&cnt) + if err != nil { + return cnt, fmt.Errorf("failed to scan count from %s, %v", tableName, err) + } + return cnt, nil + } + return cnt, fmt.Errorf("can not find count of table %s", tableName) + } + + cnt, err := getCntFn(db, tableName) + if err != nil { + return false, err + } + if cnt == 0 { + return false, nil + } + + return true, nil + } +} diff --git a/tests/pkg/fixture/fixture.go b/tests/pkg/fixture/fixture.go index 2e78d03c30..ea2b810fcf 100644 --- a/tests/pkg/fixture/fixture.go +++ b/tests/pkg/fixture/fixture.go @@ -14,8 +14,12 @@ package fixture import ( + "fmt" + "github.com/pingcap/tidb-operator/pkg/apis/pingcap/v1alpha1" + "github.com/pingcap/tidb-operator/pkg/label" corev1 "k8s.io/api/core/v1" + rbacv1beta1 "k8s.io/api/rbac/v1beta1" "k8s.io/apimachinery/pkg/api/resource" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/utils/pointer" @@ -43,6 +47,11 @@ var ( corev1.ResourceMemory: resource.MustParse("4Gi"), }, } + // hard-coded region and s3 bucket in our aws account for e2e testing + // TODO create s3 bucket in current region dynamically + AWSRegion = "us-west-2" + Bucket = "backup.e2e.us-west-2.tidbcloud.com" + S3Secret = "s3-secret" ) func WithStorage(r corev1.ResourceRequirements, size string) corev1.ResourceRequirements { @@ -197,3 +206,148 @@ func NewTidbMonitor(name, namespace string, tc *v1alpha1.TidbCluster, grafanaEna } return monitor } + +func GetBackupRole(tc *v1alpha1.TidbCluster, serviceAccountName string) *rbacv1beta1.Role { + return &rbacv1beta1.Role{ + ObjectMeta: metav1.ObjectMeta{ + Name: serviceAccountName, + Namespace: tc.GetNamespace(), + Labels: map[string]string{label.ComponentLabelKey: serviceAccountName}, + }, + Rules: []rbacv1beta1.PolicyRule{ + { + APIGroups: []string{""}, + Resources: []string{"events"}, + Verbs: []string{"*"}, + }, + { + APIGroups: []string{"pingcap.com"}, + Resources: []string{"backups", "restores"}, + Verbs: []string{"get", "watch", "list", "update"}, + }, + }, + } +} + +func GetBackupServiceAccount(tc *v1alpha1.TidbCluster, serviceAccountName string) *corev1.ServiceAccount { + return &corev1.ServiceAccount{ + ObjectMeta: metav1.ObjectMeta{ + Name: serviceAccountName, + Namespace: tc.GetNamespace(), + }, + } +} + +func GetBackupRoleBing(tc *v1alpha1.TidbCluster, serviceAccountName string) *rbacv1beta1.RoleBinding { + return &rbacv1beta1.RoleBinding{ + ObjectMeta: metav1.ObjectMeta{ + Name: serviceAccountName, + Namespace: tc.GetNamespace(), + Labels: map[string]string{label.ComponentLabelKey: serviceAccountName}, + }, + Subjects: []rbacv1beta1.Subject{ + { + Kind: rbacv1beta1.ServiceAccountKind, + Name: serviceAccountName, + }, + }, + RoleRef: rbacv1beta1.RoleRef{ + APIGroup: "rbac.authorization.k8s.io", + Kind: "Role", + Name: serviceAccountName, + }, + } +} + +func GetBackupSecret(tc *v1alpha1.TidbCluster, password string) *corev1.Secret { + return &corev1.Secret{ + ObjectMeta: metav1.ObjectMeta{ + Name: fmt.Sprintf("%s-backup-secret", tc.GetName()), + Namespace: tc.GetNamespace(), + }, + Data: map[string][]byte{ + "password": []byte(password), + }, + Type: corev1.SecretTypeOpaque, + } +} + +func GetS3Secret(tc *v1alpha1.TidbCluster, accessKey, secretKey string) *corev1.Secret { + return &corev1.Secret{ + ObjectMeta: metav1.ObjectMeta{ + Name: S3Secret, + Namespace: tc.GetNamespace(), + }, + Data: map[string][]byte{ + "access_key": []byte(accessKey), + "secret_key": []byte(secretKey), + }, + Type: corev1.SecretTypeOpaque, + } +} + +func GetBackupCRDWithBR(tc *v1alpha1.TidbCluster, backupFolder string) *v1alpha1.Backup { + sendCredToTikv := true + return &v1alpha1.Backup{ + ObjectMeta: metav1.ObjectMeta{ + Name: fmt.Sprintf("%s-backup", tc.GetName()), + Namespace: tc.GetNamespace(), + }, + Spec: v1alpha1.BackupSpec{ + Type: v1alpha1.BackupTypeFull, + StorageProvider: v1alpha1.StorageProvider{ + S3: &v1alpha1.S3StorageProvider{ + Provider: v1alpha1.S3StorageProviderTypeAWS, + Region: AWSRegion, + Bucket: Bucket, + Prefix: backupFolder, + SecretName: S3Secret, + }, + }, + From: v1alpha1.TiDBAccessConfig{ + Host: fmt.Sprintf("%s-tidb.%s", tc.GetName(), tc.GetNamespace()), + SecretName: fmt.Sprintf("%s-backup-secret", tc.GetName()), + Port: 4000, + User: "root", + }, + BR: &v1alpha1.BRConfig{ + Cluster: tc.GetName(), + ClusterNamespace: tc.GetNamespace(), + SendCredToTikv: &sendCredToTikv, + }, + }, + } +} + +func GetRestoreCRDWithBR(tc *v1alpha1.TidbCluster, backupFolder string) *v1alpha1.Restore { + sendCredToTikv := true + return &v1alpha1.Restore{ + ObjectMeta: metav1.ObjectMeta{ + Name: fmt.Sprintf("%s-restore", tc.GetName()), + Namespace: tc.GetNamespace(), + }, + Spec: v1alpha1.RestoreSpec{ + Type: v1alpha1.BackupTypeFull, + StorageProvider: v1alpha1.StorageProvider{ + S3: &v1alpha1.S3StorageProvider{ + Provider: v1alpha1.S3StorageProviderTypeAWS, + Region: AWSRegion, + Bucket: Bucket, + Prefix: backupFolder, + SecretName: S3Secret, + }, + }, + To: v1alpha1.TiDBAccessConfig{ + Host: fmt.Sprintf("%s-tidb.%s", tc.GetName(), tc.GetNamespace()), + SecretName: fmt.Sprintf("%s-backup-secret", tc.GetName()), + Port: 4000, + User: "root", + }, + BR: &v1alpha1.BRConfig{ + Cluster: tc.GetName(), + ClusterNamespace: tc.GetNamespace(), + SendCredToTikv: &sendCredToTikv, + }, + }, + } +}