diff --git a/tests/cmd/stability/main.go b/tests/cmd/stability/main.go index 8adcb065a49..44d530ed3f0 100644 --- a/tests/cmd/stability/main.go +++ b/tests/cmd/stability/main.go @@ -22,6 +22,7 @@ import ( "os" "time" + "github.com/jinzhu/copier" "k8s.io/api/core/v1" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/apimachinery/pkg/util/wait" @@ -39,16 +40,13 @@ func main() { logs.InitLogs() defer logs.FlushLogs() - conf := tests.NewConfig() - err := conf.Parse() - if err != nil { - glog.Fatalf("failed to parse config: %v", err) - } - go func() { glog.Info(http.ListenAndServe("localhost:6060", nil)) }() + conf := tests.NewConfig() + conf.ParseOrDie() + // TODO read these args from config beginTidbVersion := "v2.1.0" toTidbVersion := "v2.1.4" diff --git a/tests/config.go b/tests/config.go index df7bfffeafd..30c49b69d2e 100644 --- a/tests/config.go +++ b/tests/config.go @@ -59,6 +59,13 @@ func (c *Config) Parse() error { return nil } +func (c *Config) ParseOrDie() { + err := c.Parse() + if err != nil { + panic(err) + } +} + func (c *Config) configFromFile(path string) error { data, err := ioutil.ReadFile(path) if err != nil { diff --git a/tests/e2e/create.go b/tests/e2e/create.go deleted file mode 100644 index f9d1f4b6ce5..00000000000 --- a/tests/e2e/create.go +++ /dev/null @@ -1,612 +0,0 @@ -// Copyright 2018 PingCAP, Inc. -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// See the License for the specific language governing permissions and -// limitations under the License.package spec - -package e2e - -import ( - "database/sql" - "encoding/json" - "fmt" - "io/ioutil" - "net/http" - "net/url" - "strconv" - "strings" - "time" - - _ "github.com/go-sql-driver/mysql" // init mysql driver - . "github.com/onsi/ginkgo" // revive:disable:dot-imports - . "github.com/onsi/gomega" // revive:disable:dot-imports - "github.com/pingcap/tidb-operator/pkg/apis/pingcap.com/v1alpha1" - "github.com/pingcap/tidb-operator/pkg/controller" - "github.com/pingcap/tidb-operator/pkg/label" - corev1 "k8s.io/api/core/v1" - metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" - "k8s.io/apimachinery/pkg/labels" - "k8s.io/apimachinery/pkg/util/wait" -) - -const ( - username = "admin" - password = "admin" -) - -type Result struct { - Metric struct { - Job string `json:"job"` - } `json:"metric"` - Values []interface{} `json:"values"` -} - -type Response struct { - Status string `json:"status"` - Data struct { - ResultType string `json:"resultType"` - Result []Result `json:"result"` - } -} - -func testCreate(spec clusterSpec) { - ns, clusterName := spec.ns, spec.clusterName - By(fmt.Sprintf("When create the TiDB cluster: %s/%s", ns, clusterName)) - instanceName := getInstanceName(ns, clusterName) - - cmdStr := fmt.Sprintf("helm install /charts/tidb-cluster -f /tidb-cluster-values.yaml"+ - " -n %s --namespace=%s --set %s", - instanceName, ns, buildSetFlag(spec)) - _, err := execCmd(cmdStr) - Expect(err).NotTo(HaveOccurred()) - - err = createSecret(ns, clusterName) - Expect(err).NotTo(HaveOccurred()) - - By("Then all members should running") - err = wait.Poll(5*time.Second, 5*time.Minute, func() (bool, error) { - return allMembersRunning(ns, clusterName) - }) - Expect(err).NotTo(HaveOccurred()) - - By("And password is set correctly") - err = wait.Poll(5*time.Second, 5*time.Minute, func() (bool, error) { - return passwordIsSet(ns, clusterName) - }) - Expect(err).NotTo(HaveOccurred()) - - By("And scheduling policy is correct") - nodeMap, err := getNodeMap(ns, clusterName, label.PDLabelVal) - Expect(err).NotTo(HaveOccurred()) - for _, podNamesArr := range nodeMap { - Expect(len(podNamesArr)).To(Equal(1)) - } - nodeMap, err = getNodeMap(ns, clusterName, label.TiKVLabelVal) - Expect(err).NotTo(HaveOccurred()) - for _, podNamesArr := range nodeMap { - Expect(len(podNamesArr)).To(Equal(1)) - } - - By("When create a table and add some data to this table") - err = wait.Poll(5*time.Second, 5*time.Minute, func() (bool, error) { - return addDataToCluster(ns, clusterName) - }) - Expect(err).NotTo(HaveOccurred()) - - By("Then the data is correct") - err = wait.Poll(5*time.Second, 5*time.Minute, func() (bool, error) { - return dataIsCorrect(ns, clusterName) - }) - Expect(err).NotTo(HaveOccurred()) -} - -func allMembersRunning(ns, clusterName string) (bool, error) { - tc, err := cli.PingcapV1alpha1().TidbClusters(ns).Get(clusterName, metav1.GetOptions{}) - if err != nil { - return false, nil - } - - running, err := pdMemberRunning(tc) - if err != nil || !running { - return false, nil - } - - running, err = tikvMemberRunning(tc) - if err != nil || !running { - return false, nil - } - - running, err = tidbMemberRunning(tc) - if err != nil || !running { - return false, nil - } - - synced, err := reclaimPolicySynced(tc) - if err != nil || !synced { - return false, nil - } - - synced, err = metaSynced(tc) - if err != nil || !synced { - return false, nil - } - - running, err = monitorMemberRunning(tc) - if err != nil || !running { - return false, nil - } - - return true, nil -} - -func addDataToCluster(ns, clusterName string) (bool, error) { - db, err := sql.Open("mysql", getDSN(ns, clusterName)) - if err != nil { - logf("can't open connection to mysql: %v", err) - return false, nil - } - defer db.Close() - - _, err = db.Exec(fmt.Sprintf("CREATE TABLE %s (clusterName VARCHAR(64))", testTableName)) - if err != nil { - logf("can't create table to mysql: %v", err) - return false, nil - } - - _, err = db.Exec(fmt.Sprintf("INSERT INTO %s VALUES (?)", testTableName), testTableVal) - if err != nil { - logf("can't insert data to mysql: %v", err) - return false, nil - } - - return true, nil -} - -func dataIsCorrect(ns, clusterName string) (bool, error) { - db, err := sql.Open("mysql", getDSN(ns, clusterName)) - if err != nil { - return false, nil - } - - rows, err := db.Query(fmt.Sprintf("SELECT * FROM %s", testTableName)) - if err != nil { - logf(err.Error()) - return false, nil - } - - for rows.Next() { - var v string - err := rows.Scan(&v) - if err != nil { - logf(err.Error()) - } - - if v == testTableVal { - return true, nil - } - - return true, fmt.Errorf("val should equal: %s", testTableVal) - } - - return false, nil -} - -func pdMemberRunning(tc *v1alpha1.TidbCluster) (bool, error) { - ns := tc.GetNamespace() - tcName := tc.GetName() - pdSetName := controller.PDMemberName(tcName) - pdSet, err := kubeCli.AppsV1beta1().StatefulSets(ns).Get(pdSetName, metav1.GetOptions{}) - if err != nil { - logf(err.Error()) - return false, nil - } - - logf("pdSet.Status: %+v", pdSet.Status) - - if tc.Status.PD.StatefulSet == nil { - logf("tc.Status.PD.StatefulSet is nil") - return false, nil - } - - if *pdSet.Spec.Replicas != tc.Spec.PD.Replicas { - logf("pdSet.Spec.Replicas(%d) != tc.Spec.PD.Replicas(%d)", - *pdSet.Spec.Replicas, tc.Spec.PD.Replicas) - return false, nil - } - - if pdSet.Status.ReadyReplicas != tc.Spec.PD.Replicas { - logf("pdSet.Status.ReadyReplicas(%d) != %d", - pdSet.Status.ReadyReplicas, tc.Spec.PD.Replicas) - return false, nil - } - - if len(tc.Status.PD.Members) != int(tc.Spec.PD.Replicas) { - logf("tc.Status.PD.Members count(%d) != %d", - len(tc.Status.PD.Members), tc.Spec.PD.Replicas) - return false, nil - } - - if pdSet.Status.ReadyReplicas != pdSet.Status.Replicas { - logf("pdSet.Status.ReadyReplicas(%d) != pdSet.Status.Replicas(%d)", - pdSet.Status.ReadyReplicas, pdSet.Status.Replicas) - return false, nil - } - - for _, member := range tc.Status.PD.Members { - if !member.Health { - logf("pd member(%s) is not health", member.ID) - return false, nil - } - } - - if tc.Status.ClusterID == "" { - logf("tc.Status.ClusterID is nil") - return false, nil - } - - _, err = kubeCli.CoreV1().Services(ns).Get(controller.PDMemberName(tcName), metav1.GetOptions{}) - if err != nil { - logf(err.Error()) - return false, nil - } - _, err = kubeCli.CoreV1().Services(ns).Get(controller.PDPeerMemberName(tcName), metav1.GetOptions{}) - if err != nil { - logf(err.Error()) - return false, nil - } - - return true, nil -} - -func tikvMemberRunning(tc *v1alpha1.TidbCluster) (bool, error) { - ns := tc.GetNamespace() - tcName := tc.GetName() - tikvSetName := controller.TiKVMemberName(tcName) - - tikvSet, err := kubeCli.AppsV1beta1().StatefulSets(ns).Get(tikvSetName, metav1.GetOptions{}) - if err != nil { - logf(err.Error()) - return false, nil - } - - logf("tikvSet.Status: %+v", tikvSet.Status) - - if tc.Status.TiKV.StatefulSet == nil { - logf("tc.Status.TiKV.StatefulSet is nil") - return false, nil - } - - if *tikvSet.Spec.Replicas != tc.Spec.TiKV.Replicas { - logf("tikvSet.Spec.Replicas(%d) != tc.Spec.TiKV.Replicas(%d)", - *tikvSet.Spec.Replicas, tc.Spec.TiKV.Replicas) - return false, nil - } - - if tikvSet.Status.ReadyReplicas != tc.Spec.TiKV.Replicas { - logf("tikvSet.Status.ReadyReplicas(%d) != %d", - tikvSet.Status.ReadyReplicas, tc.Spec.TiKV.Replicas) - return false, nil - } - - if len(tc.Status.TiKV.Stores) != int(tc.Spec.TiKV.Replicas) { - logf("tc.Status.TiKV.Stores.count(%d) != %d", - len(tc.Status.TiKV.Stores), tc.Spec.TiKV.Replicas) - return false, nil - } - - if tikvSet.Status.ReadyReplicas != tikvSet.Status.Replicas { - logf("tikvSet.Status.ReadyReplicas(%d) != tikvSet.Status.Replicas(%d)", - tikvSet.Status.ReadyReplicas, tikvSet.Status.Replicas) - return false, nil - } - - for _, store := range tc.Status.TiKV.Stores { - if store.State != v1alpha1.TiKVStateUp { - logf("store(%s) state != %s", store.ID, v1alpha1.TiKVStateUp) - return false, nil - } - } - - _, err = kubeCli.CoreV1().Services(ns).Get(controller.TiKVPeerMemberName(tcName), metav1.GetOptions{}) - if err != nil { - logf(err.Error()) - return false, nil - } - - return true, nil -} - -func tidbMemberRunning(tc *v1alpha1.TidbCluster) (bool, error) { - ns := tc.GetNamespace() - tcName := tc.GetName() - tidbSetName := controller.TiDBMemberName(tcName) - tidbSet, err := kubeCli.AppsV1beta1().StatefulSets(ns).Get(tidbSetName, metav1.GetOptions{}) - if err != nil { - logf(err.Error()) - return false, nil - } - - logf("tidbSet.Status: %+v", tidbSet.Status) - - if tc.Status.TiDB.StatefulSet == nil { - logf("tc.Status.TiDB.StatefulSet is nil") - return false, nil - } - - if *tidbSet.Spec.Replicas != tc.Spec.TiDB.Replicas { - logf("tidbSet.Spec.Replicas(%d) != tc.Spec.TiDB.Replicas(%d)", - *tidbSet.Spec.Replicas, tc.Spec.TiDB.Replicas) - return false, nil - } - - if tidbSet.Status.ReadyReplicas != tc.Spec.TiDB.Replicas { - logf("tidbSet.Status.ReadyReplicas(%d) != %d", - tidbSet.Status.ReadyReplicas, tc.Spec.TiDB.Replicas) - return false, nil - } - - if tidbSet.Status.ReadyReplicas != tidbSet.Status.Replicas { - logf("tidbSet.Status.ReadyReplicas(%d) != tidbSet.Status.Replicas(%d)", - tidbSet.Status.ReadyReplicas, tidbSet.Status.Replicas) - return false, nil - } - - _, err = kubeCli.CoreV1().Services(ns).Get(controller.TiDBMemberName(tcName), metav1.GetOptions{}) - if err != nil { - logf(err.Error()) - return false, nil - } - - return true, nil -} - -func monitorMemberRunning(tc *v1alpha1.TidbCluster) (bool, error) { - ns := tc.GetNamespace() - tcName := tc.GetName() - deployName := fmt.Sprintf("%s-monitor", tcName) - deploy, err := kubeCli.AppsV1beta1().Deployments(ns).Get(deployName, metav1.GetOptions{}) - if err != nil { - logf(err.Error()) - return false, nil - } - if deploy.Status.ReadyReplicas < 1 { - logf("monitor ready replicas %d < 1", deploy.Status.ReadyReplicas) - return false, nil - } - if err := checkGrafanaData(tc); err != nil { - logf("can't get grafana data: %v", err) - return false, nil - } - return true, nil -} - -func checkGrafanaData(tc *v1alpha1.TidbCluster) error { - ns := tc.GetNamespace() - tcName := tc.GetName() - svcName := fmt.Sprintf("%s-grafana", tcName) - end := time.Now() - start := end.Add(-time.Minute) - values := url.Values{} - values.Set("query", `sum(tikv_pd_heartbeat_tick_total{type="leader"}) by (job)`) - values.Set("start", fmt.Sprintf("%d", start.Unix())) - values.Set("end", fmt.Sprintf("%d", end.Unix())) - values.Set("step", "30") - u := fmt.Sprintf("http://%s.%s.svc.cluster.local:3000/api/datasources/proxy/1/api/v1/query_range?%s", svcName, ns, values.Encode()) - req, err := http.NewRequest(http.MethodGet, u, nil) - if err != nil { - return err - } - req.SetBasicAuth(username, password) - client := &http.Client{} - resp, err := client.Do(req) - if err != nil { - return err - } - defer resp.Body.Close() - buf, err := ioutil.ReadAll(resp.Body) - if err != nil { - return err - } - data := &Response{} - if err := json.Unmarshal(buf, data); err != nil { - return err - } - if data.Status != "success" || len(data.Data.Result) < 1 { - return fmt.Errorf("invalid response: status: %s, result: %v", data.Status, data.Data.Result) - } - return nil -} - -func reclaimPolicySynced(tc *v1alpha1.TidbCluster) (bool, error) { - ns := tc.GetNamespace() - instanceName := tc.GetLabels()[label.InstanceLabelKey] - labelSelector := label.New().Instance(instanceName) - pvcList, err := kubeCli.CoreV1().PersistentVolumeClaims(ns).List( - metav1.ListOptions{ - LabelSelector: labels.SelectorFromSet( - labelSelector.Labels(), - ).String(), - }, - ) - if err != nil { - logf(err.Error()) - return false, nil - } - - for _, pvc := range pvcList.Items { - pv, err := kubeCli.CoreV1().PersistentVolumes().Get(pvc.Spec.VolumeName, metav1.GetOptions{}) - if err != nil { - logf(err.Error()) - return false, nil - } - - logf("pv: %s's persistentVolumeReclaimPolicy is %s", pv.GetName(), pv.Spec.PersistentVolumeReclaimPolicy) - if pv.Spec.PersistentVolumeReclaimPolicy != tc.Spec.PVReclaimPolicy { - return false, nil - } - } - - return true, nil -} - -func passwordIsSet(ns, clusterName string) (bool, error) { - jobName := clusterName + "-tidb-initializer" - job, err := kubeCli.BatchV1().Jobs(ns).Get(jobName, metav1.GetOptions{}) - if err != nil { - return false, nil - } - if job.Status.Succeeded < 1 { - logf("password setter job not finished") - return false, nil - } - - db, err := sql.Open("mysql", getDSN(ns, clusterName)) - if err != nil { - logf("can't open connection to mysql: %v", err) - return false, nil - } - defer db.Close() - - if err = db.Ping(); err != nil { - logf("can't connect to tidb: %s/%s-tidb with password %s", ns, clusterName, password) - return false, nil - } - return true, nil -} - -func getDSN(ns, clusterName string) string { - return fmt.Sprintf("root:%s@(%s-tidb.%s:4000)/test?charset=utf8", password, clusterName, ns) -} - -func metaSynced(tc *v1alpha1.TidbCluster) (bool, error) { - ns := tc.GetNamespace() - instanceName := tc.GetLabels()[label.InstanceLabelKey] - - pdControl := controller.NewDefaultPDControl() - pdCli := pdControl.GetPDClient(tc) - cluster, err := pdCli.GetCluster() - if err != nil { - logf(err.Error()) - return false, nil - } - clusterID := strconv.FormatUint(cluster.Id, 10) - - labelSelector := label.New().Instance(instanceName) - podList, err := kubeCli.CoreV1().Pods(ns).List( - metav1.ListOptions{ - LabelSelector: labels.SelectorFromSet( - labelSelector.Labels(), - ).String(), - }, - ) - if err != nil { - logf(err.Error()) - return false, nil - } - -outerLoop: - for _, pod := range podList.Items { - podName := pod.GetName() - Expect(pod.Labels[label.ClusterIDLabelKey]).To(Equal(clusterID)) - - component := pod.Labels[label.ComponentLabelKey] - switch component { - case label.PDLabelVal: - var memberID string - members, err := pdCli.GetMembers() - if err != nil { - logf(err.Error()) - return false, nil - } - for _, member := range members.Members { - if member.Name == podName { - memberID = strconv.FormatUint(member.GetMemberId(), 10) - break - } - } - Expect(memberID).NotTo(BeEmpty()) - Expect(pod.Labels[label.MemberIDLabelKey]).To(Equal(memberID)) - case label.TiKVLabelVal: - var storeID string - stores, err := pdCli.GetStores() - if err != nil { - logf(err.Error()) - return false, nil - } - for _, store := range stores.Stores { - addr := store.Store.GetAddress() - if strings.Split(addr, ".")[0] == podName { - storeID = strconv.FormatUint(store.Store.GetId(), 10) - break - } - } - Expect(storeID).NotTo(BeEmpty()) - Expect(pod.Labels[label.StoreIDLabelKey]).To(Equal(storeID)) - case label.TiDBLabelVal: - continue outerLoop - } - - var pvcName string - for _, vol := range pod.Spec.Volumes { - if vol.PersistentVolumeClaim != nil { - pvcName = vol.PersistentVolumeClaim.ClaimName - break - } - } - if pvcName == "" { - logf("pod: %s/%s's pvcName is empty", ns, podName) - return false, nil - } - - pvc, err := kubeCli.CoreV1().PersistentVolumeClaims(ns).Get(pvcName, metav1.GetOptions{}) - if err != nil { - logf(err.Error()) - return false, nil - } - Expect(pvc.Labels[label.ClusterIDLabelKey]).To(Equal(clusterID)) - Expect(pvc.Labels[label.MemberIDLabelKey]).To(Equal(pod.Labels[label.MemberIDLabelKey])) - Expect(pvc.Labels[label.StoreIDLabelKey]).To(Equal(pod.Labels[label.StoreIDLabelKey])) - Expect(pvc.Annotations[label.AnnPodNameKey]).To(Equal(podName)) - - pvName := pvc.Spec.VolumeName - pv, err := kubeCli.CoreV1().PersistentVolumes().Get(pvName, metav1.GetOptions{}) - if err != nil { - logf(err.Error()) - return false, nil - } - Expect(pv.Labels[label.NamespaceLabelKey]).To(Equal(ns)) - Expect(pv.Labels[label.ComponentLabelKey]).To(Equal(pod.Labels[label.ComponentLabelKey])) - Expect(pv.Labels[label.NameLabelKey]).To(Equal(pod.Labels[label.NameLabelKey])) - Expect(pv.Labels[label.ManagedByLabelKey]).To(Equal(pod.Labels[label.ManagedByLabelKey])) - Expect(pv.Labels[label.InstanceLabelKey]).To(Equal(pod.Labels[label.InstanceLabelKey])) - Expect(pv.Labels[label.ClusterIDLabelKey]).To(Equal(clusterID)) - Expect(pv.Labels[label.MemberIDLabelKey]).To(Equal(pod.Labels[label.MemberIDLabelKey])) - Expect(pv.Labels[label.StoreIDLabelKey]).To(Equal(pod.Labels[label.StoreIDLabelKey])) - Expect(pv.Annotations[label.AnnPodNameKey]).To(Equal(podName)) - } - - return true, nil -} - -func createSecret(ns, clusterName string) error { - secretName := ns + "-" + clusterName - secret := corev1.Secret{ - ObjectMeta: metav1.ObjectMeta{ - Name: secretName, - Namespace: ns, - }, - Data: map[string][]byte{ - "root": []byte(password), - }, - Type: corev1.SecretTypeOpaque, - } - _, err := kubeCli.CoreV1().Secrets(ns).Create(&secret) - return err -} diff --git a/tests/e2e/e2e_test.go b/tests/e2e/e2e_test.go deleted file mode 100644 index ebca488c839..00000000000 --- a/tests/e2e/e2e_test.go +++ /dev/null @@ -1,65 +0,0 @@ -// Copyright 2018 PingCAP, Inc. -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// See the License for the specific language governing permissions and -// limitations under the License.package spec - -package e2e - -import ( - "fmt" - "testing" - - . "github.com/onsi/ginkgo" // revive:disable:dot-imports - . "github.com/onsi/gomega" // revive:disable:dot-imports - "github.com/pingcap/tidb-operator/pkg/client/clientset/versioned" - "k8s.io/client-go/kubernetes" - "k8s.io/client-go/rest" -) - -func TestE2E(t *testing.T) { - RegisterFailHandler(Fail) - - cfg, err := rest.InClusterConfig() - if err != nil { - panic(err) - } - cli, err = versioned.NewForConfig(cfg) - if err != nil { - panic(err) - } - kubeCli, err = kubernetes.NewForConfig(cfg) - if err != nil { - panic(err) - } - - RunSpecs(t, "TiDB Operator Smoke tests") -} - -var _ = SynchronizedBeforeSuite(func() []byte { - By("Clearing old TiDB Operator") - Expect(clearOperator()).NotTo(HaveOccurred()) - - By("Bootstrapping new TiDB Operator") - Expect(installOperator()).NotTo(HaveOccurred()) - - return nil -}, func(data []byte) {}) - -var _ = Describe("Smoke", func() { - for i := 0; i < len(fixtures); i++ { - fixture := fixtures[i] - It(fmt.Sprintf("Namespace: %s, clusterName: %s", fixture.ns, fixture.clusterName), func() { - for _, testCase := range fixture.cases { - testCase(fixture.clusterSpec) - } - }) - } -}) diff --git a/tests/e2e/scale.go b/tests/e2e/scale.go deleted file mode 100644 index 33824b6a4c9..00000000000 --- a/tests/e2e/scale.go +++ /dev/null @@ -1,279 +0,0 @@ -// Copyright 2018 PingCAP, Inc. -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// See the License for the specific language governing permissions and -// limitations under the License.package spec - -package e2e - -import ( - "fmt" - "time" - - . "github.com/onsi/ginkgo" // revive:disable:dot-imports - . "github.com/onsi/gomega" // revive:disable:dot-imports - "github.com/pingcap/tidb-operator/pkg/controller" - "github.com/pingcap/tidb-operator/pkg/label" - metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" - "k8s.io/apimachinery/pkg/types" - "k8s.io/apimachinery/pkg/util/wait" -) - -const ( - pdScaleOutTo = 5 - tikvScaleOutTo = 5 - tidbScaleOutTo = 3 - pdScaleInTo = 3 - tikvScaleInTo = 3 - tidbScaleInTo = 2 -) - -var podUIDsBeforeScale map[string]types.UID - -func testScale(spec clusterSpec) { - ns, clusterName := spec.ns, spec.clusterName - instanceName := getInstanceName(ns, clusterName) - By(fmt.Sprintf("When scale out TiDB cluster: pd ==> [%d], tikv ==> [%d], tidb ==> [%d]", pdScaleOutTo, tikvScaleOutTo, tidbScaleOutTo)) - err := wait.Poll(5*time.Second, 5*time.Minute, func() (bool, error) { - return scaleOut(ns, clusterName) - }) - Expect(err).NotTo(HaveOccurred()) - - By("Then TiDB cluster should scale out successfully") - err = wait.Poll(5*time.Second, 5*time.Minute, func() (bool, error) { - return scaled(ns, clusterName) - }) - Expect(err).NotTo(HaveOccurred()) - - By("And should scaled out correctly") - err = wait.Poll(5*time.Second, 5*time.Minute, func() (bool, error) { - return scaledCorrectly(ns, clusterName) - }) - Expect(err).NotTo(HaveOccurred()) - - By("And scheduling policy is correct") - nodeMap, err := getNodeMap(ns, clusterName, label.PDLabelVal) - Expect(err).NotTo(HaveOccurred()) - for nodeName, podNamesArr := range nodeMap { - if len(podNamesArr) > 2 { - Fail(fmt.Sprintf("node: %s has %d pods", nodeName, len(podNamesArr))) - } - } - nodeMap, err = getNodeMap(ns, clusterName, label.TiKVLabelVal) - Expect(err).NotTo(HaveOccurred()) - for nodeName, podNamesArr := range nodeMap { - if len(podNamesArr) > 2 { - Fail(fmt.Sprintf("node: %s has %d pods", nodeName, len(podNamesArr))) - } - } - - By("And the data is correct") - err = wait.Poll(5*time.Second, 5*time.Minute, func() (bool, error) { - return dataIsCorrect(ns, clusterName) - }) - Expect(err).NotTo(HaveOccurred()) - - By(fmt.Sprintf("When scale in TiDB cluster: pd ==> [%d], tikv ==> [%d], tidb ==> [%d]", pdScaleInTo, tikvScaleInTo, tidbScaleInTo)) - err = wait.Poll(5*time.Second, 5*time.Minute, func() (bool, error) { - return scaleIn(ns, clusterName) - }) - Expect(err).NotTo(HaveOccurred()) - - By("Then TiDB cluster scale in securely") - err = wait.Poll(5*time.Second, 5*time.Minute, func() (bool, error) { - return scaleInSafely(ns, clusterName) - }) - Expect(err).NotTo(HaveOccurred()) - - By("And should scale in successfully") - err = wait.Poll(5*time.Second, 5*time.Minute, func() (bool, error) { - return scaled(ns, clusterName) - }) - Expect(err).NotTo(HaveOccurred()) - - By("And should be scaled in correctly") - err = wait.Poll(5*time.Second, 5*time.Minute, func() (bool, error) { - return scaledCorrectly(ns, clusterName) - }) - Expect(err).NotTo(HaveOccurred()) - - By("And the data is correct") - err = wait.Poll(5*time.Second, 5*time.Minute, func() (bool, error) { - return dataIsCorrect(ns, clusterName) - }) - Expect(err).NotTo(HaveOccurred()) - - By(fmt.Sprintf("When scale out TiDB cluster one more time: pd ==> [%d], tikv ==> [%d], tidb ==> [%d]", pdScaleOutTo, tikvScaleOutTo, tidbScaleOutTo)) - err = wait.Poll(5*time.Second, 5*time.Minute, func() (bool, error) { - return scaleOut(ns, clusterName) - }) - Expect(err).NotTo(HaveOccurred()) - - By("Then TiDB cluster should scale out successfully") - err = wait.Poll(5*time.Second, 5*time.Minute, func() (bool, error) { - return scaled(ns, clusterName) - }) - Expect(err).NotTo(HaveOccurred()) - - By("And should scaled out correctly") - err = wait.Poll(5*time.Second, 5*time.Minute, func() (bool, error) { - return scaledCorrectly(ns, instanceName) - }) - Expect(err).NotTo(HaveOccurred()) - - By("And the data is correct") - err = wait.Poll(5*time.Second, 5*time.Minute, func() (bool, error) { - return dataIsCorrect(ns, clusterName) - }) - Expect(err).NotTo(HaveOccurred()) - -} - -func scaleOut(ns, clusterName string) (bool, error) { - tc, err := cli.PingcapV1alpha1().TidbClusters(ns).Get(clusterName, metav1.GetOptions{}) - if err != nil { - logf("failed to get tidbcluster when scale out tidbcluster, error: %v", err) - return false, nil - } - instanceName := tc.GetLabels()[label.InstanceLabelKey] - podUIDsBeforeScale, err = getPodsUID(ns, instanceName) - if err != nil { - return false, nil - } - - tc.Spec.PD.Replicas = pdScaleOutTo - tc.Spec.TiKV.Replicas = tikvScaleOutTo - tc.Spec.TiDB.Replicas = tidbScaleOutTo - - tc, err = cli.PingcapV1alpha1().TidbClusters(ns).Update(tc) - if err != nil { - logf("failed to update tidbcluster when scale out tidbcluster, error: %v", err) - return false, nil - } - logf("Replicas after scaled out: PD: %d , TiKV: %d, TiDB: %d", tc.Spec.PD.Replicas, tc.Spec.TiKV.Replicas, tc.Spec.TiDB.Replicas) - - return true, nil -} - -func scaled(ns, clusterName string) (bool, error) { - return allMembersRunning(ns, clusterName) -} - -func scaleIn(ns, clusterName string) (bool, error) { - tc, err := cli.PingcapV1alpha1().TidbClusters(ns).Get(clusterName, metav1.GetOptions{}) - if err != nil { - logf("failed to get tidbcluster when scale in tidbcluster, error: %v", err) - return false, nil - } - - if tc.Spec.PD.Replicas <= pdScaleInTo { - return true, fmt.Errorf("the tidbcluster's pd replicas less then pdScaleInTo: [%d]", pdScaleInTo) - } - if tc.Spec.TiKV.Replicas <= tikvScaleInTo { - return true, fmt.Errorf("the tidbcluster's tikv replicas less then tikvScaleInTo: [%d]", tikvScaleInTo) - } - if tc.Spec.TiDB.Replicas <= tidbScaleInTo { - return true, fmt.Errorf("the tidbcluster's tidb replicas less then tidbScaleInTo: [%d]", tidbScaleInTo) - } - - instanceName := tc.GetLabels()[label.InstanceLabelKey] - podUIDsBeforeScale, err = getPodsUID(ns, instanceName) - if err != nil { - return false, nil - } - if err != nil { - return false, nil - } - - tc.Spec.PD.Replicas = pdScaleInTo - tc.Spec.TiKV.Replicas = tikvScaleInTo - tc.Spec.TiDB.Replicas = tidbScaleInTo - - tc, err = cli.PingcapV1alpha1().TidbClusters(ns).Update(tc) - if err != nil { - logf("failed to update tidbcluster when scale in tidbcluster, error: %v", err) - return false, nil - } - logf("Replicas after scaled in: PD: %d , TiKV: %d, TiDB: %d", tc.Spec.PD.Replicas, tc.Spec.TiKV.Replicas, tc.Spec.TiDB.Replicas) - - return true, nil -} - -func scaledCorrectly(ns, instanceName string) (bool, error) { - podUIDs, err := getPodsUID(ns, instanceName) - if err != nil { - logf("failed to get pd pods's uid, error: %v", err) - return false, nil - } - - if len(podUIDsBeforeScale) == len(podUIDs) { - return false, fmt.Errorf("the length of pods before scale equals the length of pods after scale") - } - - for podName, uidAfter := range podUIDs { - if uidBefore, ok := podUIDsBeforeScale[podName]; ok && uidBefore != uidAfter { - return false, fmt.Errorf("pod: [%s] have be recreated", podName) - } - } - - return true, nil -} - -// scaleInSafely confirms member scale in safely -func scaleInSafely(ns, clusterName string) (bool, error) { - tc, err := cli.PingcapV1alpha1().TidbClusters(ns).Get(clusterName, metav1.GetOptions{}) - if err != nil { - logf("failed to get tidbcluster when scale in tidbcluster, error: %v", err) - return false, nil - } - - tikvSetName := controller.TiKVMemberName(clusterName) - tikvSet, err := kubeCli.AppsV1beta1().StatefulSets(ns).Get(tikvSetName, metav1.GetOptions{}) - if err != nil { - logf("failed to get tikvSet statefulset: [%s], error: %v", tikvSetName, err) - return false, nil - } - - pdClient := controller.NewDefaultPDControl().GetPDClient(tc) - stores, err := pdClient.GetStores() - if err != nil { - logf("pdClient.GetStores failed,error: %v", err) - return false, nil - } - if len(stores.Stores) > int(*tikvSet.Spec.Replicas) { - logf("stores.Stores: %v", stores.Stores) - logf("tikvSet.Spec.Replicas: %d", *tikvSet.Spec.Replicas) - return false, fmt.Errorf("the tikvSet.Spec.Replicas may reduce before tikv complete offline") - } - - if *tikvSet.Spec.Replicas == tc.Spec.TiKV.Replicas { - return true, nil - } - - return false, nil -} - -func getPodsUID(ns, instanceName string) (map[string]types.UID, error) { - result := map[string]types.UID{} - - selector, err := label.New().Instance(instanceName).Selector() - if err != nil { - return nil, err - } - pods, err := kubeCli.CoreV1().Pods(ns).List(metav1.ListOptions{LabelSelector: selector.String()}) - if err != nil { - return nil, err - } - for _, pod := range pods.Items { - result[pod.GetName()] = pod.GetUID() - } - - return result, nil -} diff --git a/tests/e2e/test_helper.go b/tests/e2e/test_helper.go deleted file mode 100644 index e02d6dd0c3a..00000000000 --- a/tests/e2e/test_helper.go +++ /dev/null @@ -1,273 +0,0 @@ -// Copyright 2018 PingCAP, Inc. -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// See the License for the specific language governing permissions and -// limitations under the License.package spec - -package e2e - -import ( - "bytes" - "fmt" - "os/exec" - "sort" - "strings" - "time" - - . "github.com/onsi/ginkgo" // revive:disable:dot-imports - "github.com/pingcap/tidb-operator/pkg/client/clientset/versioned" - "github.com/pingcap/tidb-operator/pkg/label" - apierrs "k8s.io/apimachinery/pkg/api/errors" - metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" - "k8s.io/apimachinery/pkg/labels" - "k8s.io/apimachinery/pkg/util/wait" - "k8s.io/client-go/kubernetes" -) - -const ( - operatorNs = "tidb-operator-e2e" - operatorHelmName = "tidb-operator-e2e" - testTableName = "demo_table" - testTableVal = "demo_val" -) - -var ( - cli versioned.Interface - kubeCli kubernetes.Interface -) - -type clusterSpec struct { - ns string - clusterName string - - // values override the specified helm values - values map[string]string -} - -type clusterFixture struct { - clusterSpec - - cases []testCase -} - -type testCase func(cluster clusterSpec) - -var fixtures = []clusterFixture{ - { - clusterSpec: clusterSpec{ - ns: "ns-1", - clusterName: "cluster-name-1", - }, - cases: []testCase{ - testCreate, - testScale, - testUpgrade, - }, - }, - { - clusterSpec: clusterSpec{ - ns: "ns-1", - clusterName: "cluster-name-2", - values: map[string]string{ - "tidb.separateSlowLog": "true", - }, - }, - cases: []testCase{ - testCreate, - testUpgrade, - testScale, - }, - }, - { - clusterSpec: clusterSpec{ - ns: "ns-2", - clusterName: "cluster-name-1", - values: map[string]string{ - "tidb.separateSlowLog": "false", - }, - }, - cases: []testCase{ - testCreate, - testScale, - testUpgrade, - }, - }, - // { // decrease a cluster due to CI machine resource limits - // ns: "ns-2", - // clusterName: "cluster-name-2", - // cases: []testCase{ - // testCreate, - // testUpgrade, - // testScale, - // }, - // }, -} - -func buildSetFlag(spec clusterSpec) string { - var buffer bytes.Buffer - buffer.WriteString(fmt.Sprintf("clusterName=%s,tidb.passwordSecretName=%s", spec.clusterName, spec.ns+"-"+spec.clusterName)) - for k, v := range spec.values { - buffer.WriteString(fmt.Sprintf(",%s=%s", k, v)) - } - return buffer.String() -} - -func clearOperator() error { - for _, fixture := range fixtures { - _, err := execCmd(fmt.Sprintf("helm del --purge %s", fmt.Sprintf("%s-%s", fixture.ns, fixture.clusterName))) - if err != nil && isNotFound(err) { - return err - } - - _, err = execCmd(fmt.Sprintf("kubectl delete pvc -n %s --all", fixture.ns)) - if err != nil { - return err - } - err = kubeCli.CoreV1().Secrets(fixture.ns).Delete(fixture.ns+"-"+fixture.clusterName, nil) - if err != nil && !apierrs.IsNotFound(err) { - return err - } - } - - _, err := execCmd(fmt.Sprintf("helm del --purge %s", operatorHelmName)) - if err != nil && isNotFound(err) { - return err - } - - for _, fixture := range fixtures { - err = wait.Poll(5*time.Second, 5*time.Minute, func() (bool, error) { - result, err := execCmd(fmt.Sprintf("kubectl get po --output=name -n %s", fixture.ns)) - if err != nil || result != "" { - return false, nil - } - _, err = execCmd(fmt.Sprintf(`kubectl get pv -l %s=%s,%s=%s --output=name | xargs -I {} \ - kubectl patch {} -p '{"spec":{"persistentVolumeReclaimPolicy":"Delete"}}'`, - label.NamespaceLabelKey, fixture.ns, label.InstanceLabelKey, getInstanceName(fixture.ns, fixture.clusterName))) - if err != nil { - logf(err.Error()) - } - result, _ = execCmd(fmt.Sprintf("kubectl get pv -l %s=%s,%s=%s 2>/dev/null|grep Released", - label.NamespaceLabelKey, fixture.ns, label.InstanceLabelKey, getInstanceName(fixture.ns, fixture.clusterName))) - if result != "" { - return false, nil - } - - return true, nil - }) - if err != nil { - return err - } - } - - return nil -} - -func installOperator() error { - _, err := execCmd(fmt.Sprintf( - "helm install /charts/tidb-operator -f /tidb-operator-values.yaml -n %s --namespace=%s", - operatorHelmName, - operatorNs)) - if err != nil { - return err - } - - monitorRestartCount() - return nil -} - -func monitorRestartCount() { - maxRestartCount := int32(3) - - go func() { - defer GinkgoRecover() - for { - select { - case <-time.After(5 * time.Second): - podList, err := kubeCli.CoreV1().Pods(metav1.NamespaceAll).List( - metav1.ListOptions{ - LabelSelector: labels.SelectorFromSet( - label.New().Labels(), - ).String(), - }, - ) - if err != nil { - continue - } - - for _, pod := range podList.Items { - for _, cs := range pod.Status.ContainerStatuses { - if cs.RestartCount > maxRestartCount { - Fail(fmt.Sprintf("POD: %s/%s's container: %s's restartCount is greater than: %d", - pod.GetNamespace(), pod.GetName(), cs.Name, maxRestartCount)) - return - } - } - } - } - } - }() -} - -func execCmd(cmdStr string) (string, error) { - logf(fmt.Sprintf("$ %s\n", cmdStr)) - result, err := exec.Command("/bin/sh", "-c", cmdStr).CombinedOutput() - resultStr := string(result) - logf(resultStr) - if err != nil { - logf(err.Error()) - return resultStr, err - } - - return resultStr, nil -} - -func nowStamp() string { - return time.Now().Format(time.StampMilli) -} - -func log(level string, format string, args ...interface{}) { - fmt.Fprintf(GinkgoWriter, nowStamp()+": "+level+": "+format+"\n", args...) -} - -// logf log a message in INFO format -func logf(format string, args ...interface{}) { - log("INFO", format, args...) -} - -func isNotFound(err error) bool { - return strings.Contains(err.Error(), "not found") -} - -func getNodeMap(ns, clusterName, component string) (map[string][]string, error) { - instanceName := getInstanceName(ns, clusterName) - nodeMap := make(map[string][]string) - selector := label.New().Instance(instanceName).Component(component).Labels() - podList, err := kubeCli.CoreV1().Pods(ns).List(metav1.ListOptions{ - LabelSelector: labels.SelectorFromSet(selector).String(), - }) - if err != nil { - return nil, err - } - - for _, pod := range podList.Items { - nodeName := pod.Spec.NodeName - if len(nodeMap[nodeName]) == 0 { - nodeMap[nodeName] = make([]string, 0) - } - nodeMap[nodeName] = append(nodeMap[nodeName], pod.GetName()) - sort.Strings(nodeMap[nodeName]) - } - - return nodeMap, nil -} - -func getInstanceName(ns, clusterName string) string { - return fmt.Sprintf("%s-%s", ns, clusterName) -} diff --git a/tests/e2e/upgrade.go b/tests/e2e/upgrade.go deleted file mode 100644 index ad5e3ed9e91..00000000000 --- a/tests/e2e/upgrade.go +++ /dev/null @@ -1,208 +0,0 @@ -// Copyright 2018 PingCAP, Inc. -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// See the License for the specific language governing permissions and -// limitations under the License.package spec - -package e2e - -import ( - "strings" - "time" - - . "github.com/onsi/ginkgo" // revive:disable:dot-imports - . "github.com/onsi/gomega" // revive:disable:dot-imports - "github.com/pingcap/tidb-operator/pkg/apis/pingcap.com/v1alpha1" - "github.com/pingcap/tidb-operator/pkg/controller" - "github.com/pingcap/tidb-operator/pkg/label" - apps "k8s.io/api/apps/v1beta1" - metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" - "k8s.io/apimachinery/pkg/util/wait" -) - -const ( - // TODO: the base version is also v2.1.0 now, so no upgrade right now - // change to later version when TiDB released - upgradeVersion = "v2.1.0" -) - -func testUpgrade(spec clusterSpec) { - ns, clusterName := spec.ns, spec.clusterName - pdNodeMap, err := getNodeMap(ns, clusterName, label.PDLabelVal) - Expect(err).NotTo(HaveOccurred()) - tikvNodeMap, err := getNodeMap(ns, clusterName, label.TiKVLabelVal) - Expect(err).NotTo(HaveOccurred()) - - By("When upgrade TiDB cluster to newer version") - err = wait.Poll(5*time.Second, 10*time.Minute, func() (bool, error) { - return upgrade(ns, clusterName) - }) - Expect(err).NotTo(HaveOccurred()) - - By("Then members should be upgrade in order: pd ==> tikv ==> tidb") - err = wait.Poll(5*time.Second, 10*time.Minute, func() (bool, error) { - return memberUpgraded(ns, clusterName) - }) - Expect(err).NotTo(HaveOccurred()) - - By("Then all members should running") - err = wait.Poll(5*time.Second, 5*time.Minute, func() (bool, error) { - return allMembersRunning(ns, clusterName) - }) - Expect(err).NotTo(HaveOccurred()) - - By("And scheduling policy is correct") - pdNodeMap1, err := getNodeMap(ns, clusterName, label.PDLabelVal) - Expect(err).NotTo(HaveOccurred()) - tikvNodeMap1, err := getNodeMap(ns, clusterName, label.TiKVLabelVal) - Expect(err).NotTo(HaveOccurred()) - - Expect(pdNodeMap).To(Equal(pdNodeMap1)) - Expect(tikvNodeMap).To(Equal(tikvNodeMap1)) - - By("And the data is correct") - err = wait.Poll(5*time.Second, 5*time.Minute, func() (bool, error) { - return dataIsCorrect(ns, clusterName) - }) - Expect(err).NotTo(HaveOccurred()) -} - -func upgrade(ns, clusterName string) (bool, error) { - tc, err := cli.PingcapV1alpha1().TidbClusters(ns).Get(clusterName, metav1.GetOptions{}) - if err != nil { - logf("failed to get tidbcluster, error: %v", err) - return false, nil - } - - tc.Spec.PD.Image = strings.Replace(tc.Spec.PD.Image, getImageTag(tc.Spec.PD.Image), upgradeVersion, -1) - tc.Spec.TiKV.Image = strings.Replace(tc.Spec.TiKV.Image, getImageTag(tc.Spec.TiKV.Image), upgradeVersion, -1) - tc.Spec.TiDB.Image = strings.Replace(tc.Spec.TiDB.Image, getImageTag(tc.Spec.TiDB.Image), upgradeVersion, -1) - - tc, err = cli.PingcapV1alpha1().TidbClusters(ns).Update(tc) - if err != nil { - logf("failed to update tidbcluster, error: %v", err) - return false, nil - } - logf("Images after upgraded: PD: %s, TiKV: %s, TiDB: %s", tc.Spec.PD.Image, tc.Spec.TiKV.Image, tc.Spec.TiDB.Image) - - return true, nil -} - -func memberUpgraded(ns, clusterName string) (bool, error) { - tc, err := cli.PingcapV1alpha1().TidbClusters(ns).Get(clusterName, metav1.GetOptions{}) - if err != nil { - logf("failed to get tidbcluster: [%s], error: %v", clusterName, err) - return false, nil - } - - pdSetName := controller.PDMemberName(clusterName) - pdSet, err := kubeCli.AppsV1beta1().StatefulSets(ns).Get(pdSetName, metav1.GetOptions{}) - if err != nil { - logf("failed to get pd statefulset: [%s], error: %v", pdSetName, err) - return false, nil - } - - tikvSetName := controller.TiKVMemberName(clusterName) - tikvSet, err := kubeCli.AppsV1beta1().StatefulSets(ns).Get(tikvSetName, metav1.GetOptions{}) - if err != nil { - logf("failed to get tikvSet statefulset: [%s], error: %v", tikvSetName, err) - return false, nil - } - - tidbSetName := controller.TiDBMemberName(clusterName) - tidbSet, err := kubeCli.AppsV1beta1().StatefulSets(ns).Get(tidbSetName, metav1.GetOptions{}) - if err != nil { - logf("failed to get tidbSet statefulset: [%s], error: %v", tidbSetName, err) - return false, nil - } - - if !imageUpgraded(tc, v1alpha1.PDMemberType, pdSet) { - return false, nil - } - if tc.Status.PD.Phase == v1alpha1.UpgradePhase { - logf("pd is upgrading") - Expect(tc.Status.TiKV.Phase).NotTo(Equal(v1alpha1.UpgradePhase)) - Expect(tc.Status.TiDB.Phase).NotTo(Equal(v1alpha1.UpgradePhase)) - Expect(imageUpgraded(tc, v1alpha1.PDMemberType, pdSet)).To(BeTrue()) - if !podsUpgraded(pdSet) { - Expect(imageUpgraded(tc, v1alpha1.TiKVMemberType, tikvSet)).To(BeFalse()) - Expect(imageUpgraded(tc, v1alpha1.TiDBMemberType, tidbSet)).To(BeFalse()) - } - return false, nil - } else if tc.Status.TiKV.Phase == v1alpha1.UpgradePhase { - logf("tikv is upgrading") - Expect(tc.Status.TiDB.Phase).NotTo(Equal(v1alpha1.UpgradePhase)) - Expect(imageUpgraded(tc, v1alpha1.PDMemberType, pdSet)).To(BeTrue()) - Expect(podsUpgraded(pdSet)).To(BeTrue()) - Expect(imageUpgraded(tc, v1alpha1.TiKVMemberType, tikvSet)).To(BeTrue()) - if !podsUpgraded(tikvSet) { - Expect(imageUpgraded(tc, v1alpha1.TiDBMemberType, tidbSet)).To(BeFalse()) - } - return false, nil - } else if tc.Status.TiDB.Phase == v1alpha1.UpgradePhase { - logf("tidb is upgrading") - Expect(imageUpgraded(tc, v1alpha1.PDMemberType, pdSet)).To(BeTrue()) - Expect(podsUpgraded(pdSet)).To(BeTrue()) - Expect(imageUpgraded(tc, v1alpha1.TiKVMemberType, tikvSet)).To(BeTrue()) - Expect(podsUpgraded(tikvSet)).To(BeTrue()) - Expect(imageUpgraded(tc, v1alpha1.TiDBMemberType, tidbSet)).To(BeTrue()) - return false, nil - } - if !imageUpgraded(tc, v1alpha1.PDMemberType, pdSet) { - return false, nil - } - if !podsUpgraded(pdSet) { - return false, nil - } - if !imageUpgraded(tc, v1alpha1.TiKVMemberType, tikvSet) { - return false, nil - } - if !podsUpgraded(tikvSet) { - return false, nil - } - if !imageUpgraded(tc, v1alpha1.TiDBMemberType, tidbSet) { - return false, nil - } - return podsUpgraded(tidbSet), nil - -} - -func imageUpgraded(tc *v1alpha1.TidbCluster, memberType v1alpha1.MemberType, set *apps.StatefulSet) bool { - for _, container := range set.Spec.Template.Spec.Containers { - if container.Name == memberType.String() { - if container.Image == getImage(tc, memberType) { - return true - } - } - } - return false -} - -func podsUpgraded(set *apps.StatefulSet) bool { - return set.Generation <= *set.Status.ObservedGeneration && set.Status.CurrentRevision == set.Status.UpdateRevision -} - -func getImage(tc *v1alpha1.TidbCluster, memberType v1alpha1.MemberType) string { - switch memberType { - case v1alpha1.PDMemberType: - return tc.Spec.PD.Image - case v1alpha1.TiKVMemberType: - return tc.Spec.TiKV.Image - case v1alpha1.TiDBMemberType: - return tc.Spec.TiDB.Image - default: - return "" - } -} - -func getImageTag(image string) string { - strs := strings.Split(image, ":") - return strs[len(strs)-1] -} diff --git a/tests/manifests/e2e-configmap.yaml b/tests/manifests/e2e-configmap.yaml deleted file mode 100644 index 3f4d76e07fa..00000000000 --- a/tests/manifests/e2e-configmap.yaml +++ /dev/null @@ -1,38 +0,0 @@ -apiVersion: v1 -kind: ConfigMap -metadata: - namespace: tidb-operator-e2e - name: tidb-operator-e2e-config -data: - e2e-config: |- - nodes: - - physical_node: 172.16.4.39 - nodes: - - 172.16.4.171 - - 172.16.4.172 - - 172.16.4.173 - - physical_node: 172.16.4.40 - nodes: - - 172.16.4.174 - - 172.16.4.175 - - 172.16.4.176 - etcds: - - physical_node: 172.16.4.39 - nodes: - - 172.16.4.171 - - 172.16.4.172 - - 172.16.4.173 - apiservers: - - physical_node: 172.16.4.39 - nodes: - - 172.16.4.171 - - 172.16.4.172 - - 172.16.4.173 - controller_manager: - - physical_node: 172.16.4.39 - nodes: - - 172.16.4.171 - - 172.16.4.172 - - 172.16.4.173 - - diff --git a/tests/manifests/e2e.yaml b/tests/manifests/e2e.yaml deleted file mode 100644 index 913dceb17b7..00000000000 --- a/tests/manifests/e2e.yaml +++ /dev/null @@ -1,50 +0,0 @@ ---- -kind: ClusterRoleBinding -apiVersion: rbac.authorization.k8s.io/v1beta1 -metadata: - name: tidb-operator-e2e -subjects: -- kind: ServiceAccount - namespace: tidb-operator-e2e - name: tidb-operator-e2e -roleRef: - kind: ClusterRole - name: cluster-admin - apiGroup: rbac.authorization.k8s.io ---- -kind: ServiceAccount -apiVersion: v1 -metadata: - namespace: tidb-operator-e2e - name: tidb-operator-e2e ---- -apiVersion: v1 -kind: Pod -metadata: - namespace: tidb-operator-e2e - name: tidb-operator-e2e -spec: - serviceAccount: tidb-operator-e2e - containers: - - name: tidb-operator-e2e - image: "" - imagePullPolicy: Always - command: ["sh", "-c", "/usr/local/bin/e2e"] - volumeMounts: - - mountPath: /logDir - name: logdir - - name: config - readOnly: true - mountPath: /etc/e2e - volumes: - - name: logdir - hostPath: - path: /var/log - type: Directory - - name: config - configMap: - name: tidb-operator-e2e-config - items: - - key: e2e-config - path: config.yaml - restartPolicy: Never diff --git a/tests/manifests/fault-trigger/fault-trigger.service b/tests/manifests/fault-trigger/fault-trigger.service new file mode 100644 index 00000000000..fb3a4fb439c --- /dev/null +++ b/tests/manifests/fault-trigger/fault-trigger.service @@ -0,0 +1,12 @@ +[Unit] +Description=TiDB Operator fault trigger +After=network.target + +[Service] +User=root +ExecStart=/usr/local/bin/fault-trigger +Restart=always +RestartSec=10s + +[Install] +WantedBy=multi-user.target \ No newline at end of file diff --git a/tests/manifests/stability.yaml b/tests/manifests/stability.yaml deleted file mode 100644 index 11715573544..00000000000 --- a/tests/manifests/stability.yaml +++ /dev/null @@ -1,55 +0,0 @@ ---- -kind: ClusterRoleBinding -apiVersion: rbac.authorization.k8s.io/v1beta1 -metadata: - name: tidb-operator-e2e -subjects: -- kind: ServiceAccount - namespace: tidb-operator-e2e - name: tidb-operator-e2e -roleRef: - kind: ClusterRole - name: cluster-admin - apiGroup: rbac.authorization.k8s.io ---- -kind: ServiceAccount -apiVersion: v1 -metadata: - namespace: tidb-operator-e2e - name: tidb-operator-e2e ---- -apiVersion: v1 -kind: Pod -metadata: - namespace: tidb-operator-e2e - name: tidb-operator-stability-test -spec: - serviceAccount: tidb-operator-e2e - containers: - - name: tidb-operator-e2e - image: "" - imagePullPolicy: Always - command: ["sh", "-c", "/usr/local/bin/stability-test"] - volumeMounts: - - mountPath: /logDir - name: logdir - - name: config - readOnly: true - mountPath: /etc/e2e - env: - - name: MY_NODE_NAME - valueFrom: - fieldRef: - fieldPath: spec.nodeName - volumes: - - name: logdir - hostPath: - path: /var/log - type: Directory - - name: config - configMap: - name: tidb-operator-e2e-config - items: - - key: e2e-config - path: config.yaml - restartPolicy: Never diff --git a/tests/manifests/stability/stability-configmap.yaml b/tests/manifests/stability/stability-configmap.yaml index 0af85753bb2..9b7a201e3d3 100644 --- a/tests/manifests/stability/stability-configmap.yaml +++ b/tests/manifests/stability/stability-configmap.yaml @@ -1,10 +1,10 @@ -apiVersion: v1 +apiVersion: v1 kind: ConfigMap metadata: - namespace: tidb-operator-e2e - name: tidb-operator-e2e-config + namespace: tidb-operator-stability + name: tidb-operator-stability-config data: - e2e-config: |- + config: |- nodes: - physical_node: 172.16.4.39 nodes: @@ -16,17 +16,15 @@ data: - 172.16.4.174 - 172.16.4.175 - 172.16.4.176 - etcds: + etcds: - physical_node: 172.16.4.39 nodes: - 172.16.4.171 - 172.16.4.172 - 172.16.4.173 - apiservers: + apiservers: - physical_node: 172.16.4.39 nodes: - 172.16.4.171 - 172.16.4.172 - 172.16.4.173 - - diff --git a/tests/manifests/stability/stability.yaml b/tests/manifests/stability/stability.yaml index 7c8eb32c079..86e36648207 100644 --- a/tests/manifests/stability/stability.yaml +++ b/tests/manifests/stability/stability.yaml @@ -29,15 +29,15 @@ spec: - name: tidb-operator-stability image: "" imagePullPolicy: Always - command: ["sh", "-c", "/usr/local/bin/stability"] + command: ["sh", "-c", "/usr/local/bin/stability-test"] args: - - --config=/etc/e2e/config.yaml + - --config=/etc/tidb-operator-stability/config.yaml volumeMounts: - mountPath: /logDir name: logdir - name: config readOnly: true - mountPath: /etc/e2e + mountPath: /etc/tidb-operator-stability volumes: - name: logdir hostPath: @@ -45,8 +45,8 @@ spec: type: Directory - name: config configMap: - name: tidb-operator-e2e-config + name: tidb-operator-stability-config items: - - key: e2e-config + - key: config path: config.yaml restartPolicy: Never