diff --git a/go.mod b/go.mod index d74812053ba..9455a6a0797 100644 --- a/go.mod +++ b/go.mod @@ -75,7 +75,7 @@ require ( github.com/prometheus/common v0.0.0-20180801064454-c7de2306084e // indirect github.com/prometheus/procfs v0.0.0-20180725123919-05ee40e3a273 // indirect github.com/renstrom/dedent v1.1.0 // indirect - github.com/robfig/cron v1.1.0 // indirect + github.com/robfig/cron v1.1.0 github.com/russross/blackfriday v1.5.2+incompatible // indirect github.com/shurcooL/sanitized_anchor_name v1.0.0 // indirect github.com/sirupsen/logrus v1.0.6 @@ -87,6 +87,7 @@ require ( github.com/uber/jaeger-lib v2.0.0+incompatible // indirect github.com/ugorji/go v1.1.1 // indirect github.com/unrolled/render v0.0.0-20180807193321-4206df6ff701 // indirect + github.com/urfave/negroni v1.0.0 // indirect github.com/xiang90/probing v0.0.0-20160813154853-07dd2e8dfe18 // indirect go.uber.org/atomic v1.3.2 // indirect go.uber.org/multierr v1.1.0 // indirect diff --git a/go.sum b/go.sum index 5b6d45c8435..5ce9be8c346 100644 --- a/go.sum +++ b/go.sum @@ -206,6 +206,7 @@ github.com/ugorji/go v1.1.1 h1:gmervu+jDMvXTbcHQ0pd2wee85nEoE0BsVyEuzkfK8w= github.com/ugorji/go v1.1.1/go.mod h1:hnLbHMwcvSihnDhEfx2/BzKp2xb0Y+ErdfYcrs9tkJQ= github.com/unrolled/render v0.0.0-20180807193321-4206df6ff701 h1:BJ/T25enw0WcbWqV132hGXRQdqCqe9XBzqh4AWVH7Bc= github.com/unrolled/render v0.0.0-20180807193321-4206df6ff701/go.mod h1:tu82oB5W2ykJRVioYsB+IQKcft7ryBr7w12qMBUPyXg= +github.com/urfave/negroni v1.0.0/go.mod h1:Meg73S6kFm/4PpbYdq35yYWoCZ9mS/YSx+lKnmiohz4= github.com/xiang90/probing v0.0.0-20160813154853-07dd2e8dfe18 h1:MPPkRncZLN9Kh4MEFmbnK4h3BD7AUmskWv2+EeZJCCs= github.com/xiang90/probing v0.0.0-20160813154853-07dd2e8dfe18/go.mod h1:UETIi67q53MR2AWcXfiuqkDkRtnGDLqkBTpCHuJHxtU= go.uber.org/atomic v1.3.2 h1:2Oa65PReHzfn29GpvgsYwloV9AVFHPDk8tYxt2c2tr4= diff --git a/tests/actions.go b/tests/actions.go index cbd88e1f9d9..9dcba1314f4 100644 --- a/tests/actions.go +++ b/tests/actions.go @@ -163,6 +163,12 @@ type OperatorActions interface { EmitEvent(info *TidbClusterConfig, msg string) BackupRestore(from, to *TidbClusterConfig) error BackupRestoreOrDie(from, to *TidbClusterConfig) + LabelNodes() error + LabelNodesOrDie() + CheckDisasterTolerance(info *TidbClusterConfig) error + CheckDisasterToleranceOrDie(info *TidbClusterConfig) + CheckDataRegionDisasterTolerance(info *TidbClusterConfig) error + CheckDataRegionDisasterToleranceOrDie(info *TidbClusterConfig) GetTidbMemberAssignedNodes(info *TidbClusterConfig) (map[string]string, error) GetTidbMemberAssignedNodesOrDie(info *TidbClusterConfig) map[string]string CheckTidbMemberAssignedNodes(info *TidbClusterConfig, oldAssignedNodes map[string]string) error @@ -219,7 +225,6 @@ type TidbClusterConfig struct { TiDBImage string StorageClassName string Password string - InitSQL string RecordCount string InsertBatchSize string Resources map[string]string @@ -242,6 +247,7 @@ type TidbClusterConfig struct { BlockWriteConfig blockwriter.Config GrafanaClient *metrics.Client + SubValues string } func (oi *OperatorConfig) ConfigTLS() *tls.Config { @@ -292,7 +298,6 @@ func (tc *TidbClusterConfig) TidbClusterHelmSetString(m map[string]string) strin "tikv.image": tc.TiKVImage, "tidb.image": tc.TiDBImage, "tidb.passwordSecretName": tc.InitSecretName, - "tidb.initSql": tc.InitSQL, "monitor.create": strconv.FormatBool(tc.Monitor), "enableConfigMapRollout": strconv.FormatBool(tc.EnableConfigMapRollout), "pd.preStartScript": tc.PDPreStartScript, @@ -456,6 +461,7 @@ func (oa *operatorActions) UpgradeOperator(info *OperatorConfig) error { --set operatorImage=%s`, info.ReleaseName, oa.operatorChartPath(info.Tag), info.Image) + res, err := exec.Command("/bin/sh", "-c", cmd).CombinedOutput() if err != nil { return fmt.Errorf("failed to upgrade operator to: %s, %v, %s", info.Image, err, string(res)) @@ -484,7 +490,22 @@ func (oa *operatorActions) DeployTidbCluster(info *TidbClusterConfig) error { cmd := fmt.Sprintf("helm install %s --name %s --namespace %s --set-string %s", oa.tidbClusterChartPath(info.OperatorTag), info.ClusterName, info.Namespace, info.TidbClusterHelmSetString(nil)) - glog.Infof(cmd) + if strings.TrimSpace(info.SubValues) != "" { + subVaulesPath := fmt.Sprintf("%s/%s.yaml", oa.tidbClusterChartPath(info.OperatorTag), info.ClusterName) + svFile, err := os.Create(subVaulesPath) + if err != nil { + return err + } + defer svFile.Close() + _, err = svFile.WriteString(info.SubValues) + if err != nil { + return err + } + + cmd = fmt.Sprintf(" %s --values %s", cmd, subVaulesPath) + } + glog.Info(cmd) + if res, err := exec.Command("/bin/sh", "-c", cmd).CombinedOutput(); err != nil { return fmt.Errorf("failed to deploy tidbcluster: %s/%s, %v, %s", info.Namespace, info.ClusterName, err, string(res)) @@ -762,8 +783,7 @@ func (oa *operatorActions) ScaleTidbCluster(info *TidbClusterConfig) error { oa.EmitEvent(info, fmt.Sprintf("ScaleTidbCluster to pd: %s, tikv: %s, tidb: %s", info.Args["pd.replicas"], info.Args["tikv.replicas"], info.Args["tidb.replicas"])) - cmd := fmt.Sprintf("helm upgrade %s %s --set-string %s", - info.ClusterName, oa.tidbClusterChartPath(info.OperatorTag), info.TidbClusterHelmSetString(nil)) + cmd := oa.getHelmUpgradeClusterCmd(info, nil) glog.Info("[SCALE] " + cmd) res, err := exec.Command("/bin/sh", "-c", cmd).CombinedOutput() if err != nil { @@ -868,8 +888,7 @@ func (oa *operatorActions) UpgradeTidbCluster(info *TidbClusterConfig) error { return pingcapErrors.Wrapf(err, "failed to add annotation to [%s/%s]", info.Namespace, info.ClusterName) } - cmd := fmt.Sprintf("helm upgrade %s %s --set-string %s", - info.ClusterName, oa.tidbClusterChartPath(info.OperatorTag), info.TidbClusterHelmSetString(nil)) + cmd := oa.getHelmUpgradeClusterCmd(info, nil) glog.Info("[UPGRADE] " + cmd) res, err := exec.Command("/bin/sh", "-c", cmd).CombinedOutput() if err != nil { @@ -1875,10 +1894,7 @@ func (oa *operatorActions) DeployScheduledBackup(info *TidbClusterConfig) error "scheduledBackup.secretName": info.BackupSecretName, } - setString := info.TidbClusterHelmSetString(sets) - - cmd := fmt.Sprintf("helm upgrade %s %s --set-string %s", - info.ClusterName, oa.tidbClusterChartPath(info.OperatorTag), setString) + cmd := oa.getHelmUpgradeClusterCmd(info, sets) glog.Infof("scheduled-backup delploy [%s]", cmd) res, err := exec.Command("/bin/sh", "-c", cmd).CombinedOutput() @@ -1896,10 +1912,7 @@ func (oa *operatorActions) disableScheduledBackup(info *TidbClusterConfig) error "scheduledBackup.create": "false", } - setString := info.TidbClusterHelmSetString(sets) - - cmd := fmt.Sprintf("helm upgrade %s %s --set-string %s", - info.ClusterName, oa.tidbClusterChartPath(info.OperatorTag), setString) + cmd := oa.getHelmUpgradeClusterCmd(info, sets) res, err := exec.Command("/bin/sh", "-c", cmd).CombinedOutput() if err != nil { @@ -2096,10 +2109,7 @@ func (oa *operatorActions) DeployIncrementalBackup(from *TidbClusterConfig, to * "binlog.drainer.ignoreSchemas": "", } - setString := from.TidbClusterHelmSetString(sets) - - cmd := fmt.Sprintf("helm upgrade %s %s --set-string %s", - from.ClusterName, oa.tidbClusterChartPath(from.OperatorTag), setString) + cmd := oa.getHelmUpgradeClusterCmd(from, sets) glog.Infof(cmd) res, err := exec.Command("/bin/sh", "-c", cmd).CombinedOutput() if err != nil { @@ -2410,3 +2420,14 @@ func (oa *operatorActions) EventWorker() { ce.events = retryEvents } } + +func (oa *operatorActions) getHelmUpgradeClusterCmd(info *TidbClusterConfig, set map[string]string) string { + cmd := fmt.Sprintf("helm upgrade %s %s --set-string %s", + info.ClusterName, oa.tidbClusterChartPath(info.OperatorTag), info.TidbClusterHelmSetString(set)) + if strings.TrimSpace(info.SubValues) != "" { + subVaulesPath := fmt.Sprintf("%s/%s.yaml", oa.tidbClusterChartPath(info.OperatorTag), info.ClusterName) + cmd = fmt.Sprintf(" %s --values %s", cmd, subVaulesPath) + } + + return cmd +} diff --git a/tests/cmd/e2e/main.go b/tests/cmd/e2e/main.go index 5b4e52ab795..0d36e855b60 100644 --- a/tests/cmd/e2e/main.go +++ b/tests/cmd/e2e/main.go @@ -15,10 +15,11 @@ package main import ( "fmt" - "k8s.io/api/core/v1" _ "net/http/pprof" "time" + "k8s.io/api/core/v1" + "github.com/golang/glog" "github.com/jinzhu/copier" "github.com/pingcap/tidb-operator/tests" @@ -32,7 +33,6 @@ func main() { defer logs.FlushLogs() conf := tests.ParseConfigOrDie() - conf.ChartDir = "/charts" conf.ManifestDir = "/manifests" cli, kubeCli := client.NewCliOrDie() @@ -103,6 +103,7 @@ func main() { BatchSize: 1, RawSize: 1, }, + SubValues: tests.GetAffinityConfigOrDie(name1, name1), EnableConfigMapRollout: true, PDMaxReplicas: 3, TiKVGrpcConcurrency: 4, @@ -145,6 +146,7 @@ func main() { BatchSize: 1, RawSize: 1, }, + SubValues: tests.GetAffinityConfigOrDie(name2, name2), EnableConfigMapRollout: false, PDMaxReplicas: 3, TiKVGrpcConcurrency: 4, @@ -167,6 +169,7 @@ func main() { "pd.replicas": "1", "discovery.image": conf.OperatorImage, }, + SubValues: tests.GetAffinityConfigOrDie(name3, name2), }, } @@ -174,6 +177,8 @@ func main() { oa.DumpAllLogs(operatorInfo, clusterInfos) }() + oa.LabelNodesOrDie() + // deploy operator if err := oa.CleanOperator(operatorInfo); err != nil { oa.DumpAllLogs(operatorInfo, nil) @@ -200,6 +205,11 @@ func main() { } } + // check disaster tolerance + for _, clusterInfo := range clusterInfos { + oa.CheckDisasterToleranceOrDie(clusterInfo) + } + for _, clusterInfo := range clusterInfos { go oa.BeginInsertDataToOrDie(clusterInfo) } @@ -300,6 +310,11 @@ func main() { } } + // check data regions disaster tolerance + for _, clusterInfo := range clusterInfos { + oa.CheckDataRegionDisasterToleranceOrDie(clusterInfo) + } + // backup and restore backupClusterInfo := clusterInfos[0] restoreClusterInfo := &tests.TidbClusterConfig{} diff --git a/tests/cmd/stability/main.go b/tests/cmd/stability/main.go index cf17fff145c..25411a87ee1 100644 --- a/tests/cmd/stability/main.go +++ b/tests/cmd/stability/main.go @@ -15,12 +15,13 @@ package main import ( "fmt" - "k8s.io/api/core/v1" "net/http" _ "net/http/pprof" "strconv" "time" + "k8s.io/api/core/v1" + "github.com/golang/glog" "github.com/jinzhu/copier" "github.com/pingcap/tidb-operator/tests" @@ -104,6 +105,8 @@ func main() { PDLogLevel: "info", EnableConfigMapRollout: true, } + cluster1.SubValues = tests.GetAffinityConfigOrDie(cluster1.ClusterName, cluster1.Namespace) + cluster2 := &tests.TidbClusterConfig{ Namespace: clusterName2, ClusterName: clusterName2, @@ -143,6 +146,7 @@ func main() { PDLogLevel: "info", EnableConfigMapRollout: false, } + cluster2.SubValues = tests.GetAffinityConfigOrDie(cluster2.ClusterName, cluster2.Namespace) // cluster backup and restore clusterBackupFrom := cluster1 @@ -173,6 +177,8 @@ func main() { }) go c.Start() + oa.LabelNodesOrDie() + fn := func() { run(oa, fta, conf, operatorCfg, allClusters, cluster1, cluster2, onePDCluster, upgardeTiDBVersions, clusterRestoreTo, clusterBackupFrom) @@ -212,6 +218,10 @@ func run(oa tests.OperatorActions, oa.CleanTidbClusterOrDie(onePDCluster) + // check disaster tolerance + oa.CheckDisasterToleranceOrDie(cluster1) + oa.CheckDisasterToleranceOrDie(cluster2) + go oa.BeginInsertDataToOrDie(cluster1) go oa.BeginInsertDataToOrDie(cluster2) defer oa.StopInsertDataTo(cluster1) @@ -280,6 +290,13 @@ func run(oa tests.OperatorActions, oa.UpgradeTidbClusterOrDie(cluster2) oa.CheckTidbClusterStatusOrDie(cluster2) + // after upgrade cluster, clean webhook + oa.CleanWebHookAndService(operatorCfg) + + // check data regions disaster tolerance + oa.CheckDataRegionDisasterToleranceOrDie(cluster1) + oa.CheckDataRegionDisasterToleranceOrDie(cluster2) + // deploy and check cluster restore oa.DeployTidbClusterOrDie(clusterRestoreTo) oa.CheckTidbClusterStatusOrDie(clusterRestoreTo) diff --git a/tests/config.go b/tests/config.go index 50ffbaffb77..83cf9850671 100644 --- a/tests/config.go +++ b/tests/config.go @@ -82,6 +82,7 @@ func NewConfig() (*Config, error) { flag.StringVar(&cfg.OperatorTag, "operator-tag", "master", "operator tag used to choose charts") flag.StringVar(&cfg.OperatorImage, "operator-image", "pingcap/tidb-operator:latest", "operator image") flag.StringVar(&cfg.OperatorRepoDir, "operator-repo-dir", "/tidb-operator", "local directory to which tidb-operator cloned") + flag.StringVar(&cfg.ChartDir, "chart-dir", "", "chart dir") flag.StringVar(&slack.WebhookURL, "slack-webhook-url", "", "slack webhook url") flag.Parse() @@ -91,11 +92,13 @@ func NewConfig() (*Config, error) { } cfg.OperatorRepoDir = operatorRepo - chartDir, err := ioutil.TempDir("", "charts") - if err != nil { - return nil, err + if strings.TrimSpace(cfg.ChartDir) == "" { + chartDir, err := ioutil.TempDir("", "charts") + if err != nil { + return nil, err + } + cfg.ChartDir = chartDir } - cfg.ChartDir = chartDir manifestDir, err := ioutil.TempDir("", "manifests") if err != nil { diff --git a/tests/dt.go b/tests/dt.go new file mode 100644 index 00000000000..3c60d6662a0 --- /dev/null +++ b/tests/dt.go @@ -0,0 +1,251 @@ +// Copyright 2019 PingCAP, Inc. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// See the License for the specific language governing permissions and +// limitations under the License. + +package tests + +import ( + "encoding/json" + "fmt" + "io/ioutil" + "net/http" + "strconv" + "time" + + "github.com/golang/glog" + + "github.com/pingcap/kvproto/pkg/metapb" + "github.com/pingcap/kvproto/pkg/pdpb" + "github.com/pingcap/tidb-operator/pkg/label" + "github.com/pingcap/tidb-operator/tests/slack" + corev1 "k8s.io/api/core/v1" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/labels" +) + +const ( + RackLabel = "rack" + RackNum = 3 +) + +// RegionInfo records detail region info for api usage. +type RegionInfo struct { + ID uint64 `json:"id"` + StartKey string `json:"start_key"` + EndKey string `json:"end_key"` + RegionEpoch *metapb.RegionEpoch `json:"epoch,omitempty"` + Peers []*metapb.Peer `json:"peers,omitempty"` + + Leader *metapb.Peer `json:"leader,omitempty"` + DownPeers []*pdpb.PeerStats `json:"down_peers,omitempty"` + PendingPeers []*metapb.Peer `json:"pending_peers,omitempty"` + WrittenBytes uint64 `json:"written_bytes,omitempty"` + ReadBytes uint64 `json:"read_bytes,omitempty"` + ApproximateSize int64 `json:"approximate_size,omitempty"` + ApproximateKeys int64 `json:"approximate_keys,omitempty"` +} + +// RegionsInfo contains some regions with the detailed region info. +type RegionsInfo struct { + Count int `json:"count"` + Regions []*RegionInfo `json:"regions"` +} + +func (oa *operatorActions) LabelNodes() error { + nodes, err := oa.kubeCli.CoreV1().Nodes().List(metav1.ListOptions{}) + if err != nil { + return err + } + + for i, node := range nodes.Items { + index := i % RackNum + node.Labels[RackLabel] = fmt.Sprintf("rack%d", index) + _, err = oa.kubeCli.CoreV1().Nodes().Update(&node) + if err != nil { + glog.Errorf("label node:[%s] failed!", node.Name) + return err + } + } + return nil +} + +func (oa *operatorActions) LabelNodesOrDie() { + err := oa.LabelNodes() + if err != nil { + slack.NotifyAndPanic(err) + } +} + +func (oa *operatorActions) CheckDisasterTolerance(cluster *TidbClusterConfig) error { + nodeMap := map[string]corev1.Node{} + nodes, err := oa.kubeCli.CoreV1().Nodes().List(metav1.ListOptions{}) + if err != nil { + return err + } + for _, node := range nodes.Items { + nodeMap[node.Name] = node + } + + pds, err := oa.kubeCli.CoreV1().Pods(cluster.Namespace).List( + metav1.ListOptions{LabelSelector: labels.SelectorFromSet( + label.New().Instance(cluster.ClusterName).PD().Labels(), + ).String()}) + if err != nil { + return err + } + err = oa.checkPodsDisasterTolerance(pds.Items, nodeMap) + if err != nil { + return err + } + + tikvs, err := oa.kubeCli.CoreV1().Pods(cluster.Namespace).List( + metav1.ListOptions{LabelSelector: labels.SelectorFromSet( + label.New().Instance(cluster.ClusterName).TiKV().Labels(), + ).String()}) + if err != nil { + return err + } + err = oa.checkPodsDisasterTolerance(tikvs.Items, nodeMap) + if err != nil { + return err + } + + tidbs, err := oa.kubeCli.CoreV1().Pods(cluster.Namespace).List( + metav1.ListOptions{LabelSelector: labels.SelectorFromSet( + label.New().Instance(cluster.ClusterName).TiDB().Labels(), + ).String()}) + if err != nil { + return err + } + return oa.checkPodsDisasterTolerance(tidbs.Items, nodeMap) +} + +func (oa *operatorActions) checkPodsDisasterTolerance(allPods []corev1.Pod, nodeMap map[string]corev1.Node) error { + rackPods := map[string][]corev1.Pod{} + for _, pod := range allPods { + if node, exist := nodeMap[pod.Spec.NodeName]; exist { + pods, exist := rackPods[node.Labels[RackLabel]] + if !exist { + pods = []corev1.Pod{} + } + pods = append(pods, pod) + rackPods[node.Labels[RackLabel]] = pods + } + } + + podNum := len(allPods) + minPodsOneRack := podNum / RackNum + maxPodsOneRack := minPodsOneRack + mod := podNum % RackNum + if mod > 0 { + maxPodsOneRack = maxPodsOneRack + 1 + } + + for rack, pods := range rackPods { + podNumOnRack := len(pods) + if podNumOnRack > maxPodsOneRack { + return fmt.Errorf("the rack:[%s] have pods more than %d", rack, maxPodsOneRack) + } + if podNumOnRack < minPodsOneRack { + return fmt.Errorf("the rack:[%s] have pods less than %d", rack, mod) + } + } + return nil +} + +func (oa *operatorActions) CheckDisasterToleranceOrDie(cluster *TidbClusterConfig) { + err := oa.CheckDisasterTolerance(cluster) + if err != nil { + slack.NotifyAndPanic(err) + } +} + +func (oa *operatorActions) CheckDataRegionDisasterToleranceOrDie(cluster *TidbClusterConfig) { + err := oa.CheckDataRegionDisasterTolerance(cluster) + if err != nil { + slack.NotifyAndPanic(err) + } +} + +func (oa *operatorActions) CheckDataRegionDisasterTolerance(cluster *TidbClusterConfig) error { + pdClient := http.Client{ + Timeout: 10 * time.Second, + } + url := fmt.Sprintf("http://%s-pd.%s:2379/pd/api/v1/regions", cluster.ClusterName, cluster.Namespace) + resp, err := pdClient.Get(url) + if err != nil { + return err + } + buf, _ := ioutil.ReadAll(resp.Body) + regions := &RegionsInfo{} + err = json.Unmarshal(buf, ®ions) + if err != nil { + return err + } + + rackNodeMap, err := oa.getNodeRackMap() + if err != nil { + return err + } + // check peers of region are located on difference racks + // by default region replicas is 3 and rack num is also 3 + // so each rack only have one peer of each data region; if not,return error + for _, region := range regions.Regions { + // regionRacks is map of rackName and the peerID + regionRacks := map[string]uint64{} + for _, peer := range region.Peers { + storeID := strconv.FormatUint(peer.StoreId, 10) + nodeName, err := oa.getNodeByStoreId(storeID, cluster) + if err != nil { + return err + } + rackName := rackNodeMap[nodeName] + // if the rack have more than one peer of the region, return error + if otherID, exist := regionRacks[rackName]; exist { + return fmt.Errorf("region[%d]'s peer: [%d]and[%d] are in same rack:[%s]", region.ID, otherID, peer.Id, rackName) + } + // add a new pair of rack and peer + regionRacks[rackName] = peer.Id + } + } + return nil +} + +func (oa *operatorActions) getNodeByStoreId(storeID string, cluster *TidbClusterConfig) (string, error) { + tc, err := oa.cli.PingcapV1alpha1().TidbClusters(cluster.Namespace).Get(cluster.ClusterName, metav1.GetOptions{}) + if err != nil { + return "", err + } + if store, exist := tc.Status.TiKV.Stores[storeID]; exist { + pod, err := oa.kubeCli.CoreV1().Pods(cluster.Namespace).Get(store.PodName, metav1.GetOptions{}) + if err != nil { + return "", err + } + return pod.Spec.NodeName, nil + } + + return "", fmt.Errorf("the storeID:[%s] is not exist in tidbCluster:[%s] Status", storeID, cluster.FullName()) +} + +// getNodeRackMap return the map of node and rack +func (oa *operatorActions) getNodeRackMap() (map[string]string, error) { + rackNodeMap := map[string]string{} + nodes, err := oa.kubeCli.CoreV1().Nodes().List(metav1.ListOptions{}) + if err != nil { + return rackNodeMap, err + } + for _, node := range nodes.Items { + rackNodeMap[node.Name] = node.Labels[RackLabel] + } + + return rackNodeMap, nil +} diff --git a/tests/manifests/e2e/e2e.yaml b/tests/manifests/e2e/e2e.yaml index 04b0f4c666d..25b1b91b32b 100644 --- a/tests/manifests/e2e/e2e.yaml +++ b/tests/manifests/e2e/e2e.yaml @@ -49,6 +49,7 @@ spec: - --operator-tag=e2e - --operator-image=pingcap/tidb-operator:latest - --tidb-versions=v3.0.0-beta.1,v3.0.0-rc.1 + - --chart-dir=/charts volumeMounts: - mountPath: /logDir name: logdir diff --git a/tests/util.go b/tests/util.go index 34ccab00815..0d430fdf708 100644 --- a/tests/util.go +++ b/tests/util.go @@ -14,6 +14,9 @@ package tests import ( + "bytes" + "fmt" + "html/template" "math/rand" "time" @@ -78,3 +81,49 @@ func GetKubeComponent(kubeCli kubernetes.Interface, node string, componentName s } return nil, nil } + +var affinityTemp string = `{{.Kind}}: + affinity: + podAntiAffinity: + preferredDuringSchedulingIgnoredDuringExecution: + - weight: {{.Weight}} + podAffinityTerm: + labelSelector: + matchLabels: + app.kubernetes.io/instance: {{.ClusterName}} + app.kubernetes.io/component: {{.Kind}} + topologyKey: "rack" + namespaces: + - {{.Namespace}} +` + +type AffinityInfo struct { + ClusterName string + Kind string + Weight int + Namespace string +} + +func GetAffinityConfigOrDie(clusterName, namespace string) string { + temp, err := template.New("dt-affinity").Parse(affinityTemp) + if err != nil { + slack.NotifyAndPanic(err) + } + + pdbuff := new(bytes.Buffer) + err = temp.Execute(pdbuff, &AffinityInfo{ClusterName: clusterName, Kind: "pd", Weight: 10, Namespace: namespace}) + if err != nil { + slack.NotifyAndPanic(err) + } + tikvbuff := new(bytes.Buffer) + err = temp.Execute(tikvbuff, &AffinityInfo{ClusterName: clusterName, Kind: "tikv", Weight: 10, Namespace: namespace}) + if err != nil { + slack.NotifyAndPanic(err) + } + tidbbuff := new(bytes.Buffer) + err = temp.Execute(tidbbuff, &AffinityInfo{ClusterName: clusterName, Kind: "tidb", Weight: 10, Namespace: namespace}) + if err != nil { + slack.NotifyAndPanic(err) + } + return fmt.Sprintf("%s%s%s", pdbuff.String(), tikvbuff.String(), tidbbuff.String()) +}