diff --git a/tests/actions.go b/tests/actions.go index 61b50b11c3..35b54b3cbf 100644 --- a/tests/actions.go +++ b/tests/actions.go @@ -24,11 +24,13 @@ import ( _ "github.com/go-sql-driver/mysql" "github.com/golang/glog" + "github.com/pingcap/errors" "github.com/pingcap/kvproto/pkg/metapb" "github.com/pingcap/tidb-operator/pkg/apis/pingcap.com/v1alpha1" "github.com/pingcap/tidb-operator/pkg/client/clientset/versioned" "github.com/pingcap/tidb-operator/pkg/controller" "github.com/pingcap/tidb-operator/pkg/label" + "k8s.io/api/apps/v1beta1" batchv1 "k8s.io/api/batch/v1" corev1 "k8s.io/api/core/v1" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" @@ -103,6 +105,8 @@ type operatorActions struct { pdControl controller.PDControlInterface } +var _ = OperatorActions(&operatorActions{}) + type OperatorInfo struct { Namespace string ReleaseName string @@ -122,7 +126,7 @@ type TidbClusterInfo struct { StorageClassName string Password string RecordCount string - InsertBetchSize string + InsertBatchSize string Resources map[string]string Args map[string]string } @@ -344,6 +348,7 @@ func (oa *operatorActions) CheckTidbClusterStatus(info *TidbClusterInfo) error { return true, nil }); err != nil { + glog.Infof("check tidb cluster status failed: %s", err.Error()) return fmt.Errorf("failed to waiting for tidbcluster %s/%s ready in 10 minutes", ns, tcName) } @@ -358,10 +363,44 @@ func (oa *operatorActions) StopInsertDataTo(info *TidbClusterInfo) error { return nil } -func (oa *operatorActions) ScaleTidbCluster(info *TidbClusterInfo) error { return nil } -func (oa *operatorActions) UpgradeTidbCluster(info *TidbClusterInfo) error { return nil } -func (oa *operatorActions) DeployMonitor(info *TidbClusterInfo) error { return nil } -func (oa *operatorActions) CleanMonitor(info *TidbClusterInfo) error { return nil } +func chartPath(name string, tag string) string { + return "/charts/" + tag + "/" + name +} + +func (oa *operatorActions) ScaleTidbCluster(info *TidbClusterInfo) error { + cmd := fmt.Sprintf("helm upgrade %s %s --set-string %s", + info.ClusterName, chartPath("tidb-cluster", info.OperatorTag), info.HelmSetString()) + glog.Info("[SCALE] " + cmd) + res, err := exec.Command("/bin/sh", "-c", cmd).CombinedOutput() + if err != nil { + return errors.Wrapf(err, "failed to scale tidb cluster: %s", string(res)) + } + return nil +} + +func (oa *operatorActions) UpgradeTidbCluster(info *TidbClusterInfo) error { + cmd := fmt.Sprintf("helm upgrade %s %s --set-string %s", + info.ClusterName, chartPath("tidb-cluster", info.OperatorTag), info.HelmSetString()) + glog.Info("[UPGRADE] " + cmd) + res, err := exec.Command("/bin/sh", "-c", cmd).CombinedOutput() + if err != nil { + return errors.Wrapf(err, "failed to upgrade tidb cluster: %s", string(res)) + } + return nil +} + +func (oa *operatorActions) DeployMonitor(info *TidbClusterInfo) error { return nil } +func (oa *operatorActions) CleanMonitor(info *TidbClusterInfo) error { return nil } + +func getComponentContainer(set *v1beta1.StatefulSet) (corev1.Container, bool) { + name := set.Labels[label.ComponentLabelKey] + for _, c := range set.Spec.Template.Spec.Containers { + if c.Name == name { + return c, true + } + } + return corev1.Container{}, false +} func (oa *operatorActions) pdMembersReadyFn(tc *v1alpha1.TidbCluster) (bool, error) { tcName := tc.GetName() @@ -400,6 +439,11 @@ func (oa *operatorActions) pdMembersReadyFn(tc *v1alpha1.TidbCluster) (bool, err ns, pdSetName, pdSet.Status.ReadyReplicas, pdSet.Status.Replicas) return false, nil } + if c, ok := getComponentContainer(pdSet); !ok || tc.Spec.PD.Image != c.Image { + glog.Infof("statefulset: %s/%s .spec.template.spec.containers[name=pd].image(%s) != %s", + ns, pdSetName, c.Image, tc.Spec.PD.Image) + return false, nil + } for _, member := range tc.Status.PD.Members { if !member.Health { @@ -460,6 +504,11 @@ func (oa *operatorActions) tikvMembersReadyFn(tc *v1alpha1.TidbCluster) (bool, e ns, tikvSetName, tikvSet.Status.ReadyReplicas, tikvSet.Status.Replicas) return false, nil } + if c, ok := getComponentContainer(tikvSet); !ok || tc.Spec.TiKV.Image != c.Image { + glog.Infof("statefulset: %s/%s .spec.template.spec.containers[name=tikv].image(%s) != %s", + ns, tikvSetName, c.Image, tc.Spec.TiKV.Image) + return false, nil + } for _, store := range tc.Status.TiKV.Stores { if store.State != v1alpha1.TiKVStateUp { @@ -509,6 +558,11 @@ func (oa *operatorActions) tidbMembersReadyFn(tc *v1alpha1.TidbCluster) (bool, e ns, tidbSetName, tidbSet.Status.ReadyReplicas, tidbSet.Status.Replicas) return false, nil } + if c, ok := getComponentContainer(tidbSet); !ok || tc.Spec.TiDB.Image != c.Image { + glog.Infof("statefulset: %s/%s .spec.template.spec.containers[name=tikv].image(%s) != %s", + ns, tidbSetName, c.Image, tc.Spec.TiDB.Image) + return false, nil + } _, err = oa.kubeCli.CoreV1().Services(ns).Get(tidbSetName, metav1.GetOptions{}) if err != nil { @@ -577,8 +631,9 @@ outerLoop: for _, pod := range podList.Items { podName := pod.GetName() if pod.Labels[label.ClusterIDLabelKey] != clusterID { - return false, fmt.Errorf("tidbcluster %s/%s's pod %s's label %s not equals %s ", + glog.Infof("tidbcluster %s/%s's pod %s's label %s not equals %s ", ns, tcName, podName, label.ClusterIDLabelKey, clusterID) + return false, nil } component := pod.Labels[label.ComponentLabelKey] diff --git a/tests/cluster_info.go b/tests/cluster_info.go new file mode 100644 index 0000000000..332be46759 --- /dev/null +++ b/tests/cluster_info.go @@ -0,0 +1,50 @@ +package tests + +import "strconv" + +func (tc *TidbClusterInfo) set(name string, value string) (string, bool) { + // NOTE: not thread-safe, maybe make info struct immutable + if tc.Args == nil { + tc.Args = make(map[string]string) + } + origVal, ok := tc.Args[name] + tc.Args[name] = value + return origVal, ok +} + +func (tc *TidbClusterInfo) ScalePD(replicas uint) *TidbClusterInfo { + tc.set("pd.replicas", strconv.Itoa(int(replicas))) + return tc +} + +func (tc *TidbClusterInfo) ScaleTiKV(replicas uint) *TidbClusterInfo { + tc.set("tikv.replicas", strconv.Itoa(int(replicas))) + return tc +} + +func (tc *TidbClusterInfo) ScaleTiDB(replicas uint) *TidbClusterInfo { + tc.set("tidb.replicas", strconv.Itoa(int(replicas))) + return tc +} + +func (tc *TidbClusterInfo) UpgradePD(image string) *TidbClusterInfo { + tc.PDImage = image + return tc +} + +func (tc *TidbClusterInfo) UpgradeTiKV(image string) *TidbClusterInfo { + tc.TiKVImage = image + return tc +} + +func (tc *TidbClusterInfo) UpgradeTiDB(image string) *TidbClusterInfo { + tc.TiDBImage = image + return tc +} + +func (tc *TidbClusterInfo) UpgradeAll(tag string) *TidbClusterInfo { + return tc. + UpgradePD("pingcap/pd:" + tag). + UpgradeTiKV("pingcap/tikv:" + tag). + UpgradeTiDB("pingcap/tidb:" + tag) +} diff --git a/tests/cmd/e2e/main.go b/tests/cmd/e2e/main.go index 607ee90b55..9f8214ed3b 100644 --- a/tests/cmd/e2e/main.go +++ b/tests/cmd/e2e/main.go @@ -25,6 +25,12 @@ import ( "k8s.io/client-go/rest" ) +func perror(err error) { + if err != nil { + glog.Fatal(err) + } +} + func main() { flag.Parse() logs.InitLogs() @@ -53,13 +59,8 @@ func main() { SchedulerImage: "gcr.io/google-containers/hyperkube:v1.12.1", LogLevel: "2", } - - if err := oa.CleanOperator(operatorInfo); err != nil { - glog.Fatal(err) - } - if err := oa.DeployOperator(operatorInfo); err != nil { - glog.Fatal(err) - } + perror(oa.CleanOperator(operatorInfo)) + perror(oa.DeployOperator(operatorInfo)) clusterInfo := &tests.TidbClusterInfo{ Namespace: "tidb", @@ -70,22 +71,34 @@ func main() { TiDBImage: "pingcap/tidb:v2.1.3", StorageClassName: "local-storage", Password: "admin", - Args: map[string]string{}, + Resources: map[string]string{ + "pd.resources.limits.cpu": "1000m", + "pd.resources.limits.memory": "2Gi", + "pd.resources.requests.cpu": "200m", + "pd.resources.requests.memory": "1Gi", + "tikv.resources.limits.cpu": "2000m", + "tikv.resources.limits.memory": "4Gi", + "tikv.resources.requests.cpu": "1000m", + "tikv.resources.requests.memory": "2Gi", + "tidb.resources.limits.cpu": "2000m", + "tidb.resources.limits.memory": "4Gi", + "tidb.resources.requests.cpu": "500m", + "tidb.resources.requests.memory": "1Gi", + }, + Args: map[string]string{}, } - if err := oa.CreateSecret(clusterInfo); err != nil { - glog.Fatal(err) - } + perror(oa.CleanTidbCluster(clusterInfo)) + perror(oa.DeployTidbCluster(clusterInfo)) + perror(oa.CheckTidbClusterStatus(clusterInfo)) - if err := oa.CleanTidbCluster(clusterInfo); err != nil { - glog.Fatal(err) - } - if err := oa.DeployTidbCluster(clusterInfo); err != nil { - glog.Fatal(err) - } - if err := oa.CheckTidbClusterStatus(clusterInfo); err != nil { - glog.Fatal(err) - } + clusterInfo = clusterInfo.ScaleTiDB(3) + perror(oa.ScaleTidbCluster(clusterInfo)) + perror(oa.CheckTidbClusterStatus(clusterInfo)) + + clusterInfo = clusterInfo.UpgradeAll("v2.1.4") + perror(oa.UpgradeTidbCluster(clusterInfo)) + perror(oa.CheckTidbClusterStatus(clusterInfo)) restoreClusterInfo := &tests.TidbClusterInfo{ Namespace: "tidb", @@ -96,18 +109,26 @@ func main() { TiDBImage: "pingcap/tidb:v2.1.3", StorageClassName: "local-storage", Password: "admin", - Args: map[string]string{}, + Resources: map[string]string{ + "pd.resources.limits.cpu": "1000m", + "pd.resources.limits.memory": "2Gi", + "pd.resources.requests.cpu": "200m", + "pd.resources.requests.memory": "1Gi", + "tikv.resources.limits.cpu": "2000m", + "tikv.resources.limits.memory": "4Gi", + "tikv.resources.requests.cpu": "1000m", + "tikv.resources.requests.memory": "2Gi", + "tidb.resources.limits.cpu": "2000m", + "tidb.resources.limits.memory": "4Gi", + "tidb.resources.requests.cpu": "500m", + "tidb.resources.requests.memory": "1Gi", + }, + Args: map[string]string{}, } - if err := oa.CleanTidbCluster(restoreClusterInfo); err != nil { - glog.Fatal(err) - } - if err := oa.DeployTidbCluster(restoreClusterInfo); err != nil { - glog.Fatal(err) - } - if err := oa.CheckTidbClusterStatus(restoreClusterInfo); err != nil { - glog.Fatal(err) - } + perror(oa.CleanTidbCluster(restoreClusterInfo)) + perror(oa.DeployTidbCluster(restoreClusterInfo)) + perror(oa.CheckTidbClusterStatus(restoreClusterInfo)) backupCase := backup.NewBackupCase(oa, clusterInfo, restoreClusterInfo)