diff --git a/pkg/apis/pingcap.com/v1alpha1/types.go b/pkg/apis/pingcap.com/v1alpha1/types.go index 51f452ea8ec..c4a32ed6096 100644 --- a/pkg/apis/pingcap.com/v1alpha1/types.go +++ b/pkg/apis/pingcap.com/v1alpha1/types.go @@ -22,13 +22,14 @@ import ( ) const ( - // AnnotationStorageSize is a storage size annotation key - AnnotationStorageSize string = "storage.pingcap.com/size" - // TiKVStateUp represents status of Up of TiKV TiKVStateUp string = "Up" // TiKVStateDown represents status of Down of TiKV TiKVStateDown string = "Down" + // TiKVStateOffline represents status of Offline of TiKV + TiKVStateOffline string = "Offline" + // TiKVStateTombstone represents status of Tombstone of TiKV + TiKVStateTombstone string = "Tombstone" ) // MemberType represents member type @@ -37,16 +38,12 @@ type MemberType string const ( // PDMemberType is pd container type PDMemberType MemberType = "pd" - // TiDBMemberType is tidb container type TiDBMemberType MemberType = "tidb" - // TiKVMemberType is tikv container type TiKVMemberType MemberType = "tikv" - //PushGatewayMemberType is pushgateway container type PushGatewayMemberType MemberType = "pushgateway" - // UnknownMemberType is unknown container type UnknownMemberType MemberType = "unknown" ) diff --git a/pkg/controller/pd_control.go b/pkg/controller/pd_control.go index 4b07c351312..9989adacef5 100644 --- a/pkg/controller/pd_control.go +++ b/pkg/controller/pd_control.go @@ -32,7 +32,9 @@ import ( ) const ( - timeout = 5 * time.Second + timeout = 5 * time.Second + + // https://github.com/pingcap/pd/blob/master/server/coordinator.go#L42-L45 schedulerExisted = "scheduler existed" schedulerNotFound = "scheduler not found" ) diff --git a/pkg/controller/tidbcluster/tidb_cluster_control.go b/pkg/controller/tidbcluster/tidb_cluster_control.go index 92777ca0924..267ef19fb45 100644 --- a/pkg/controller/tidbcluster/tidb_cluster_control.go +++ b/pkg/controller/tidbcluster/tidb_cluster_control.go @@ -17,7 +17,6 @@ import ( "github.com/pingcap/tidb-operator/pkg/apis/pingcap.com/v1alpha1" "github.com/pingcap/tidb-operator/pkg/controller" "github.com/pingcap/tidb-operator/pkg/manager" - "github.com/pingcap/tidb-operator/pkg/util" apiequality "k8s.io/apimachinery/pkg/api/equality" errorutils "k8s.io/apimachinery/pkg/util/errors" "k8s.io/client-go/tools/record" @@ -161,7 +160,7 @@ func (tcc *defaultTidbClusterControl) IsTiKVAvailable(tc *v1alpha1.TidbCluster) var availableNum int32 for _, store := range tc.Status.TiKV.Stores { - if store.State == util.StoreUpState { + if store.State == v1alpha1.TiKVStateUp { availableNum++ } } diff --git a/pkg/manager/member/tikv_scaler.go b/pkg/manager/member/tikv_scaler.go index ba8c11d14c2..9ac8fd6fdd7 100644 --- a/pkg/manager/member/tikv_scaler.go +++ b/pkg/manager/member/tikv_scaler.go @@ -22,7 +22,6 @@ import ( "github.com/pingcap/tidb-operator/pkg/apis/pingcap.com/v1alpha1" "github.com/pingcap/tidb-operator/pkg/controller" "github.com/pingcap/tidb-operator/pkg/label" - "github.com/pingcap/tidb-operator/pkg/util" apps "k8s.io/api/apps/v1beta1" corelisters "k8s.io/client-go/listers/core/v1" ) @@ -86,7 +85,7 @@ func (tsd *tikvScaler) ScaleIn(tc *v1alpha1.TidbCluster, oldSet *apps.StatefulSe resetReplicas(newSet, oldSet) return err } - if state != util.StoreOfflineState { + if state != v1alpha1.TiKVStateOffline { if err := tsd.pdControl.GetPDClient(tc).DeleteStore(id); err != nil { resetReplicas(newSet, oldSet) return err diff --git a/pkg/manager/member/tikv_scaler_test.go b/pkg/manager/member/tikv_scaler_test.go index e825877f08c..6932c7fb5da 100644 --- a/pkg/manager/member/tikv_scaler_test.go +++ b/pkg/manager/member/tikv_scaler_test.go @@ -22,7 +22,6 @@ import ( "github.com/pingcap/tidb-operator/pkg/apis/pingcap.com/v1alpha1" "github.com/pingcap/tidb-operator/pkg/controller" "github.com/pingcap/tidb-operator/pkg/label" - "github.com/pingcap/tidb-operator/pkg/util" corev1 "k8s.io/api/core/v1" "k8s.io/apimachinery/pkg/api/errors" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" @@ -284,7 +283,7 @@ func TestTiKVScalerScaleIn(t *testing.T) { storeFun: func(tc *v1alpha1.TidbCluster) { normalStoreFun(tc) store := tc.Status.TiKV.Stores["1"] - store.State = util.StoreOfflineState + store.State = v1alpha1.TiKVStateOffline tc.Status.TiKV.Stores["1"] = store }, delStoreErr: false, @@ -379,7 +378,7 @@ func normalStoreFun(tc *v1alpha1.TidbCluster) { "1": { ID: "1", PodName: ordinalPodName(v1alpha1.TiKVMemberType, tc.GetName(), 4), - State: util.StoreUpState, + State: v1alpha1.TiKVStateUp, }, } } @@ -389,7 +388,7 @@ func tombstoneStoreFun(tc *v1alpha1.TidbCluster) { "1": { ID: "1", PodName: ordinalPodName(v1alpha1.TiKVMemberType, tc.GetName(), 4), - State: util.StoreTombstoneState, + State: v1alpha1.TiKVStateTombstone, }, } } diff --git a/pkg/util/util.go b/pkg/util/util.go index da7f092d77c..acb5a03c0fe 100644 --- a/pkg/util/util.go +++ b/pkg/util/util.go @@ -37,17 +37,6 @@ var ( } ) -const ( - // StoreUpState is state when tikv store is normal - StoreUpState = "Up" - // StoreOfflineState is state when tikv store is offline - StoreOfflineState = "Offline" - // StoreDownState is state when tikv store is down - StoreDownState = "Down" - // StoreTombstoneState is state when tikv store is tombstone - StoreTombstoneState = "Tombstone" -) - // AntiAffinityForPod creates a PodAntiAffinity with antiLabels func AntiAffinityForPod(namespace string, antiLabels map[string]string) *corev1.PodAntiAffinity { keys := []string{} @@ -114,12 +103,12 @@ func AffinityForNodeSelector(namespace string, required bool, antiLabels, select preferredTerms := []corev1.PreferredSchedulingTerm{} exps := []corev1.NodeSelectorRequirement{} for _, key := range keys { + if selector[key] == "" { + continue + } + values := strings.Split(selector[key], ",") // region,zone,rack,host are preferred labels, others are must match labels if weight, ok := topologySchedulingWeight[key]; ok { - if selector[key] == "" { - continue - } - values := strings.Split(selector[key], ",") t := corev1.PreferredSchedulingTerm{ Weight: weight, Preference: corev1.NodeSelectorTerm{ @@ -137,7 +126,7 @@ func AffinityForNodeSelector(namespace string, required bool, antiLabels, select requirement := corev1.NodeSelectorRequirement{ Key: key, Operator: corev1.NodeSelectorOpIn, - Values: []string{selector[key]}, + Values: values, } // NodeSelectorRequirement in the same MatchExpressions are ANDed otherwise ORed exps = append(exps, requirement) diff --git a/pkg/util/utils_test.go b/pkg/util/utils_test.go new file mode 100644 index 00000000000..2163883401d --- /dev/null +++ b/pkg/util/utils_test.go @@ -0,0 +1,532 @@ +// Copyright 2018 PingCAP, Inc. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// See the License for the specific language governing permissions and +// limitations under the License. + +package util + +import ( + "testing" + + . "github.com/onsi/gomega" + "github.com/pingcap/tidb-operator/pkg/apis/pingcap.com/v1alpha1" + corev1 "k8s.io/api/core/v1" + "k8s.io/apimachinery/pkg/api/resource" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/kubernetes/pkg/kubelet/apis" +) + +func TestAffinityForNodeSelector(t *testing.T) { + g := NewGomegaWithT(t) + type testcase struct { + name string + required bool + antiLabels map[string]string + selector map[string]string + expectFn func(*GomegaWithT, *corev1.Affinity) + } + + antiLabels := map[string]string{"region": "region1", "zone": "zone1", "rack": "rack1", apis.LabelHostname: "host1"} + testFn := func(test *testcase, t *testing.T) { + t.Log(test.name) + test.expectFn(g, AffinityForNodeSelector(metav1.NamespaceDefault, test.required, test.antiLabels, test.selector)) + } + + tests := []testcase{ + { + name: "selector is nil", + required: false, + antiLabels: nil, + selector: nil, + expectFn: func(g *GomegaWithT, affinity *corev1.Affinity) { + g.Expect(affinity).To(BeNil()) + }, + }, + { + name: "required, antiLabels is nil", + required: true, + antiLabels: nil, + selector: map[string]string{"a": "a1,a2,a3", "b": "b1"}, + expectFn: func(g *GomegaWithT, affinity *corev1.Affinity) { + affi := &corev1.Affinity{ + NodeAffinity: &corev1.NodeAffinity{ + RequiredDuringSchedulingIgnoredDuringExecution: &corev1.NodeSelector{ + NodeSelectorTerms: []corev1.NodeSelectorTerm{ + { + MatchExpressions: []corev1.NodeSelectorRequirement{ + { + Key: "a", + Operator: corev1.NodeSelectorOpIn, + Values: []string{"a1", "a2", "a3"}, + }, + { + Key: "b", + Operator: corev1.NodeSelectorOpIn, + Values: []string{"b1"}, + }, + }, + }, + }, + }, + }, + } + g.Expect(affinity).To(Equal(affi)) + }, + }, + { + name: "required, antiLabels is not nil", + required: true, + antiLabels: antiLabels, + selector: map[string]string{"a": "a1,a2,a3", "b": "b1"}, + expectFn: func(g *GomegaWithT, affinity *corev1.Affinity) { + affi := &corev1.Affinity{ + NodeAffinity: &corev1.NodeAffinity{ + RequiredDuringSchedulingIgnoredDuringExecution: &corev1.NodeSelector{ + NodeSelectorTerms: []corev1.NodeSelectorTerm{ + { + MatchExpressions: []corev1.NodeSelectorRequirement{ + { + Key: "a", + Operator: corev1.NodeSelectorOpIn, + Values: []string{"a1", "a2", "a3"}, + }, + { + Key: "b", + Operator: corev1.NodeSelectorOpIn, + Values: []string{"b1"}, + }, + }, + }, + }, + }, + }, + PodAntiAffinity: &corev1.PodAntiAffinity{ + PreferredDuringSchedulingIgnoredDuringExecution: []corev1.WeightedPodAffinityTerm{ + { + Weight: 80, + PodAffinityTerm: corev1.PodAffinityTerm{ + LabelSelector: &metav1.LabelSelector{MatchLabels: antiLabels}, + TopologyKey: apis.LabelHostname, + Namespaces: []string{metav1.NamespaceDefault}, + }, + }, + { + Weight: 40, + PodAffinityTerm: corev1.PodAffinityTerm{ + LabelSelector: &metav1.LabelSelector{MatchLabels: antiLabels}, + TopologyKey: "rack", + Namespaces: []string{metav1.NamespaceDefault}, + }, + }, + { + Weight: 10, + PodAffinityTerm: corev1.PodAffinityTerm{ + LabelSelector: &metav1.LabelSelector{MatchLabels: antiLabels}, + TopologyKey: "region", + Namespaces: []string{metav1.NamespaceDefault}, + }, + }, + { + Weight: 20, + PodAffinityTerm: corev1.PodAffinityTerm{ + LabelSelector: &metav1.LabelSelector{MatchLabels: antiLabels}, + TopologyKey: "zone", + Namespaces: []string{metav1.NamespaceDefault}, + }, + }, + }, + }, + } + g.Expect(affinity).To(Equal(affi)) + }, + }, + { + name: "not required", + required: false, + antiLabels: nil, + selector: map[string]string{ + "region": "region1", + "zone": "zone1,zone2", + "rack": "", + "a": "a1,a2,a3", + "b": "b1", + }, + expectFn: func(g *GomegaWithT, affinity *corev1.Affinity) { + affi := &corev1.Affinity{ + NodeAffinity: &corev1.NodeAffinity{ + RequiredDuringSchedulingIgnoredDuringExecution: &corev1.NodeSelector{ + NodeSelectorTerms: []corev1.NodeSelectorTerm{ + { + MatchExpressions: []corev1.NodeSelectorRequirement{ + { + Key: "a", + Operator: corev1.NodeSelectorOpIn, + Values: []string{"a1", "a2", "a3"}, + }, + { + Key: "b", + Operator: corev1.NodeSelectorOpIn, + Values: []string{"b1"}, + }, + }, + }, + }, + }, + PreferredDuringSchedulingIgnoredDuringExecution: []corev1.PreferredSchedulingTerm{ + { + Weight: 10, + Preference: corev1.NodeSelectorTerm{ + MatchExpressions: []corev1.NodeSelectorRequirement{ + { + Key: "region", + Operator: corev1.NodeSelectorOpIn, + Values: []string{"region1"}, + }, + }, + }, + }, + { + Weight: 20, + Preference: corev1.NodeSelectorTerm{ + MatchExpressions: []corev1.NodeSelectorRequirement{ + { + Key: "zone", + Operator: corev1.NodeSelectorOpIn, + Values: []string{"zone1", "zone2"}, + }, + }, + }, + }, + }, + }, + } + g.Expect(affinity).To(Equal(affi)) + }, + }, + } + + for i := range tests { + testFn(&tests[i], t) + } +} + +func TestResourceRequirement(t *testing.T) { + g := NewGomegaWithT(t) + type testcase struct { + name string + spec v1alpha1.ContainerSpec + defaultRequests []corev1.ResourceRequirements + expectFn func(*GomegaWithT, corev1.ResourceRequirements) + } + testFn := func(test *testcase, t *testing.T) { + t.Log(test.name) + test.expectFn(g, ResourceRequirement(test.spec, test.defaultRequests...)) + } + tests := []testcase{ + { + name: "don't have spec, has one defaultRequests", + spec: v1alpha1.ContainerSpec{}, + defaultRequests: []corev1.ResourceRequirements{ + { + Requests: corev1.ResourceList{ + corev1.ResourceCPU: resource.MustParse("100m"), + corev1.ResourceMemory: resource.MustParse("100Gi"), + }, + Limits: corev1.ResourceList{ + corev1.ResourceCPU: resource.MustParse("100m"), + corev1.ResourceMemory: resource.MustParse("100Gi"), + }, + }, + }, + expectFn: func(g *GomegaWithT, req corev1.ResourceRequirements) { + g.Expect(req).To(Equal(corev1.ResourceRequirements{ + Requests: corev1.ResourceList{ + corev1.ResourceCPU: resource.MustParse("100m"), + corev1.ResourceMemory: resource.MustParse("100Gi"), + }, + Limits: corev1.ResourceList{ + corev1.ResourceCPU: resource.MustParse("100m"), + corev1.ResourceMemory: resource.MustParse("100Gi"), + }, + })) + }, + }, + { + name: "don't have spec, has two defaultRequests", + spec: v1alpha1.ContainerSpec{}, + defaultRequests: []corev1.ResourceRequirements{ + { + Requests: corev1.ResourceList{ + corev1.ResourceCPU: resource.MustParse("100m"), + corev1.ResourceMemory: resource.MustParse("100Gi"), + }, + Limits: corev1.ResourceList{ + corev1.ResourceCPU: resource.MustParse("100m"), + corev1.ResourceMemory: resource.MustParse("100Gi"), + }, + }, + { + Requests: corev1.ResourceList{ + corev1.ResourceCPU: resource.MustParse("200m"), + corev1.ResourceMemory: resource.MustParse("200Gi"), + }, + Limits: corev1.ResourceList{ + corev1.ResourceCPU: resource.MustParse("200m"), + corev1.ResourceMemory: resource.MustParse("200Gi"), + }, + }, + }, + expectFn: func(g *GomegaWithT, req corev1.ResourceRequirements) { + g.Expect(req).To(Equal(corev1.ResourceRequirements{ + Requests: corev1.ResourceList{ + corev1.ResourceCPU: resource.MustParse("100m"), + corev1.ResourceMemory: resource.MustParse("100Gi"), + }, + Limits: corev1.ResourceList{ + corev1.ResourceCPU: resource.MustParse("100m"), + corev1.ResourceMemory: resource.MustParse("100Gi"), + }, + })) + }, + }, + { + name: "spec cover defaultRequests", + spec: v1alpha1.ContainerSpec{ + Requests: &v1alpha1.ResourceRequirement{ + Memory: "200Gi", + CPU: "200m", + }, + Limits: &v1alpha1.ResourceRequirement{ + Memory: "200Gi", + CPU: "200m", + }, + }, + defaultRequests: []corev1.ResourceRequirements{ + { + Requests: corev1.ResourceList{ + corev1.ResourceCPU: resource.MustParse("100m"), + corev1.ResourceMemory: resource.MustParse("100Gi"), + }, + Limits: corev1.ResourceList{ + corev1.ResourceCPU: resource.MustParse("100m"), + corev1.ResourceMemory: resource.MustParse("100Gi"), + }, + }, + }, + expectFn: func(g *GomegaWithT, req corev1.ResourceRequirements) { + g.Expect(req).To(Equal(corev1.ResourceRequirements{ + Requests: corev1.ResourceList{ + corev1.ResourceCPU: resource.MustParse("200m"), + corev1.ResourceMemory: resource.MustParse("200Gi"), + }, + Limits: corev1.ResourceList{ + corev1.ResourceCPU: resource.MustParse("200m"), + corev1.ResourceMemory: resource.MustParse("200Gi"), + }, + })) + }, + }, + { + name: "spec is not correct", + spec: v1alpha1.ContainerSpec{ + Requests: &v1alpha1.ResourceRequirement{ + Memory: "200xi", + CPU: "200x", + }, + Limits: &v1alpha1.ResourceRequirement{ + Memory: "200xi", + CPU: "200x", + }, + }, + defaultRequests: []corev1.ResourceRequirements{ + { + Requests: corev1.ResourceList{ + corev1.ResourceCPU: resource.MustParse("100m"), + corev1.ResourceMemory: resource.MustParse("100Gi"), + }, + Limits: corev1.ResourceList{ + corev1.ResourceCPU: resource.MustParse("100m"), + corev1.ResourceMemory: resource.MustParse("100Gi"), + }, + }, + }, + expectFn: func(g *GomegaWithT, req corev1.ResourceRequirements) { + g.Expect(req).To(Equal(corev1.ResourceRequirements{ + Requests: corev1.ResourceList{ + corev1.ResourceCPU: resource.MustParse("100m"), + corev1.ResourceMemory: resource.MustParse("100Gi"), + }, + Limits: corev1.ResourceList{ + corev1.ResourceCPU: resource.MustParse("100m"), + corev1.ResourceMemory: resource.MustParse("100Gi"), + }, + })) + }, + }, + { + name: "Request don't have CPU", + spec: v1alpha1.ContainerSpec{ + Requests: &v1alpha1.ResourceRequirement{ + Memory: "100Gi", + }, + }, + expectFn: func(g *GomegaWithT, req corev1.ResourceRequirements) { + g.Expect(req).To(Equal(corev1.ResourceRequirements{ + Requests: corev1.ResourceList{ + corev1.ResourceMemory: resource.MustParse("100Gi"), + }, + })) + }, + }, + { + name: "Request don't have CPU, default has", + spec: v1alpha1.ContainerSpec{ + Requests: &v1alpha1.ResourceRequirement{ + Memory: "100Gi", + }, + }, + defaultRequests: []corev1.ResourceRequirements{ + { + Requests: corev1.ResourceList{ + corev1.ResourceCPU: resource.MustParse("100m"), + }, + }, + }, + expectFn: func(g *GomegaWithT, req corev1.ResourceRequirements) { + g.Expect(req.Requests[corev1.ResourceMemory]).To(Equal(resource.MustParse("100Gi"))) + g.Expect(req.Requests[corev1.ResourceCPU]).To(Equal(resource.MustParse("100m"))) + }, + }, + { + name: "Request don't have memory", + spec: v1alpha1.ContainerSpec{ + Requests: &v1alpha1.ResourceRequirement{ + CPU: "100m", + }, + }, + expectFn: func(g *GomegaWithT, req corev1.ResourceRequirements) { + g.Expect(req).To(Equal(corev1.ResourceRequirements{ + Requests: corev1.ResourceList{ + corev1.ResourceCPU: resource.MustParse("100m"), + }, + })) + }, + }, + { + name: "Request don't have memory, default has", + spec: v1alpha1.ContainerSpec{ + Requests: &v1alpha1.ResourceRequirement{ + CPU: "100m", + }, + }, + defaultRequests: []corev1.ResourceRequirements{ + { + Requests: corev1.ResourceList{ + corev1.ResourceMemory: resource.MustParse("100Gi"), + }, + }, + }, + expectFn: func(g *GomegaWithT, req corev1.ResourceRequirements) { + g.Expect(req.Requests[corev1.ResourceMemory]).To(Equal(resource.MustParse("100Gi"))) + g.Expect(req.Requests[corev1.ResourceCPU]).To(Equal(resource.MustParse("100m"))) + }, + }, + + { + name: "Limits don't have CPU", + spec: v1alpha1.ContainerSpec{ + Limits: &v1alpha1.ResourceRequirement{ + Memory: "100Gi", + }, + }, + expectFn: func(g *GomegaWithT, req corev1.ResourceRequirements) { + g.Expect(req).To(Equal(corev1.ResourceRequirements{ + Limits: corev1.ResourceList{ + corev1.ResourceMemory: resource.MustParse("100Gi"), + }, + })) + }, + }, + { + name: "Limits don't have CPU, default has", + spec: v1alpha1.ContainerSpec{ + Limits: &v1alpha1.ResourceRequirement{ + Memory: "100Gi", + }, + }, + defaultRequests: []corev1.ResourceRequirements{ + { + Limits: corev1.ResourceList{ + corev1.ResourceCPU: resource.MustParse("100m"), + }, + }, + }, + expectFn: func(g *GomegaWithT, req corev1.ResourceRequirements) { + g.Expect(req.Limits[corev1.ResourceMemory]).To(Equal(resource.MustParse("100Gi"))) + g.Expect(req.Limits[corev1.ResourceCPU]).To(Equal(resource.MustParse("100m"))) + }, + }, + { + name: "Limits don't have memory", + spec: v1alpha1.ContainerSpec{ + Limits: &v1alpha1.ResourceRequirement{ + CPU: "100m", + }, + }, + expectFn: func(g *GomegaWithT, req corev1.ResourceRequirements) { + g.Expect(req).To(Equal(corev1.ResourceRequirements{ + Limits: corev1.ResourceList{ + corev1.ResourceCPU: resource.MustParse("100m"), + }, + })) + }, + }, + { + name: "Limits don't have memory, default has", + spec: v1alpha1.ContainerSpec{ + Limits: &v1alpha1.ResourceRequirement{ + CPU: "100m", + }, + }, + defaultRequests: []corev1.ResourceRequirements{ + { + Limits: corev1.ResourceList{ + corev1.ResourceMemory: resource.MustParse("100Gi"), + }, + }, + }, + expectFn: func(g *GomegaWithT, req corev1.ResourceRequirements) { + g.Expect(req.Limits[corev1.ResourceMemory]).To(Equal(resource.MustParse("100Gi"))) + g.Expect(req.Limits[corev1.ResourceCPU]).To(Equal(resource.MustParse("100m"))) + }, + }, + } + for i := range tests { + testFn(&tests[i], t) + } +} + +func TestGetOrdinalFromPodName(t *testing.T) { + g := NewGomegaWithT(t) + + i, err := GetOrdinalFromPodName("pod-1") + g.Expect(err).NotTo(HaveOccurred()) + g.Expect(i).To(Equal(int32(1))) + + i, err = GetOrdinalFromPodName("pod-notint") + g.Expect(err).To(HaveOccurred()) + g.Expect(i).To(Equal(int32(0))) +} + +func TestGetNextOrdinalPodName(t *testing.T) { + g := NewGomegaWithT(t) + g.Expect(GetNextOrdinalPodName("pod-1", 1)).To(Equal("pod-2")) +} diff --git a/tests/e2e/create.go b/tests/e2e/create.go index 405b1a5c6f9..d2279b80ed0 100644 --- a/tests/e2e/create.go +++ b/tests/e2e/create.go @@ -26,7 +26,6 @@ import ( "github.com/pingcap/tidb-operator/pkg/apis/pingcap.com/v1alpha1" "github.com/pingcap/tidb-operator/pkg/controller" "github.com/pingcap/tidb-operator/pkg/label" - "github.com/pingcap/tidb-operator/pkg/util" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/apimachinery/pkg/labels" "k8s.io/apimachinery/pkg/util/wait" @@ -273,8 +272,8 @@ func tikvMemberRunning(tc *v1alpha1.TidbCluster) (bool, error) { } for _, store := range tc.Status.TiKV.Stores { - if store.State != util.StoreUpState { - logf("store(%s) state != %s", store.ID, util.StoreUpState) + if store.State != v1alpha1.TiKVStateUp { + logf("store(%s) state != %s", store.ID, v1alpha1.TiKVStateUp) return false, nil } }