Skip to content

Commit

Permalink
Merge branch 'master' into user-guide
Browse files Browse the repository at this point in the history
  • Loading branch information
tennix authored Nov 22, 2018
2 parents 6db57f3 + f0cf8a4 commit 1c59a0a
Show file tree
Hide file tree
Showing 20 changed files with 2,016 additions and 170 deletions.
2 changes: 1 addition & 1 deletion charts/tidb-cluster/templates/config/_pd-config.tpl
Original file line number Diff line number Diff line change
Expand Up @@ -26,7 +26,7 @@ cert-path = ""
key-path = ""

[log]
level = "{{ .Values.pd.logLevel }}"
level = {{ .Values.pd.logLevel | default "info" | quote }}

# log format, one of json, text, console
#format = "text"
Expand Down
2 changes: 1 addition & 1 deletion charts/tidb-cluster/templates/config/_tidb-config.tpl
Original file line number Diff line number Diff line change
Expand Up @@ -43,7 +43,7 @@ lower-case-table-names = 2

[log]
# Log level: debug, info, warn, error, fatal.
level = "{{ .Values.tidb.logLevel }}"
level = {{ .Values.tidb.logLevel | default "info" | quote }}

# Log format, one of json, text, console.
format = "text"
Expand Down
2 changes: 1 addition & 1 deletion charts/tidb-cluster/templates/config/_tikv-config.tpl
Original file line number Diff line number Diff line change
Expand Up @@ -6,7 +6,7 @@
# e.g.: 78_000 = "1.3m"

# log level: trace, debug, info, warn, error, off.
log-level = "{{ .Values.tikv.logLevel }}"
log-level = {{ .Values.tikv.logLevel | default "info" | quote }}
# file to store log, write to stderr if it's empty.
# log-file = ""

Expand Down
4 changes: 2 additions & 2 deletions charts/tidb-operator/templates/scheduler-deployment.yaml
Original file line number Diff line number Diff line change
@@ -1,7 +1,7 @@
apiVersion: extensions/v1beta1
kind: Deployment
metadata:
name: tidb-scheduler
name: {{ .Values.scheduler.schedulerName }}
labels:
app.kubernetes.io/name: {{ template "chart.name" . }}
app.kubernetes.io/managed-by: {{ .Release.Service }}
Expand All @@ -26,7 +26,7 @@ spec:
serviceAccount: {{ .Values.scheduler.serviceAccount }}
{{- end }}
containers:
- name: tidb-scheduler
- name: {{ .Values.scheduler.schedulerName }}
image: {{ .Values.operatorImage }}
resources:
{{ toYaml .Values.scheduler.resources | indent 12 }}
Expand Down
Original file line number Diff line number Diff line change
@@ -1,7 +1,7 @@
apiVersion: v1
kind: ConfigMap
metadata:
name: tidb-scheduler-policy
name: {{ .Values.scheduler.schedulerName }}-policy
labels:
app.kubernetes.io/name: {{ template "chart.name" . }}
app.kubernetes.io/managed-by: {{ .Release.Service }}
Expand Down
12 changes: 6 additions & 6 deletions charts/tidb-operator/templates/scheduler-rbac.yaml
Original file line number Diff line number Diff line change
Expand Up @@ -13,7 +13,7 @@ metadata:
kind: ClusterRole
apiVersion: rbac.authorization.k8s.io/v1beta1
metadata:
name: {{ .Release.Name }}:tidb-scheduler
name: {{ .Release.Name }}:{{ .Values.scheduler.schedulerName }}
labels:
app.kubernetes.io/name: {{ template "chart.name" . }}
app.kubernetes.io/managed-by: {{ .Release.Service }}
Expand Down Expand Up @@ -54,7 +54,7 @@ rules:
kind: ClusterRoleBinding
apiVersion: rbac.authorization.k8s.io/v1beta1
metadata:
name: {{ .Release.Name }}:tidb-scheduler
name: {{ .Release.Name }}:{{ .Values.scheduler.schedulerName }}
labels:
app.kubernetes.io/name: {{ template "chart.name" . }}
app.kubernetes.io/managed-by: {{ .Release.Service }}
Expand All @@ -67,14 +67,14 @@ subjects:
namespace: {{ .Release.Namespace }}
roleRef:
kind: ClusterRole
name: {{ .Release.Name }}:tidb-scheduler
name: {{ .Release.Name }}:{{ .Values.scheduler.schedulerName }}
apiGroup: rbac.authorization.k8s.io
{{- if (not .Values.clusterScoped) }}
---
kind: Role
apiVersion: rbac.authorization.k8s.io/v1beta1
metadata:
name: {{ .Release.Name }}:tidb-scheduler
name: {{ .Release.Name }}:{{ .Values.scheduler.schedulerName }}
labels:
app.kubernetes.io/name: {{ template "chart.name" . }}
app.kubernetes.io/managed-by: {{ .Release.Service }}
Expand Down Expand Up @@ -107,7 +107,7 @@ rules:
kind: RoleBinding
apiVersion: rbac.authorization.k8s.io/v1beta1
metadata:
name: {{ .Release.Name }}:tidb-scheduler
name: {{ .Release.Name }}:{{ .Values.scheduler.schedulerName }}
labels:
app.kubernetes.io/name: {{ template "chart.name" . }}
app.kubernetes.io/managed-by: {{ .Release.Service }}
Expand All @@ -119,7 +119,7 @@ subjects:
name: {{ .Values.scheduler.serviceAccount }}
roleRef:
kind: Role
name: {{ .Release.Name }}:tidb-scheduler
name: {{ .Release.Name }}:{{ .Values.scheduler.schedulerName }}
apiGroup: rbac.authorization.k8s.io
{{- end }}
{{- end }}
2 changes: 1 addition & 1 deletion ci/pingcap_tidb_operator_build_dind.groovy
Original file line number Diff line number Diff line change
Expand Up @@ -51,7 +51,7 @@ def call(BUILD_BRANCH, CREDENTIALS_ID) {
GITHASH = sh(returnStdout: true, script: "git rev-parse HEAD").trim()
sh """
export GOPATH=${WORKSPACE}/go:$GOPATH
export PATH=${WORKSPACE}/go/bin
export PATH=${env.PATH}:${WORKSPACE}/go/bin
make check
make test
make
Expand Down
10 changes: 0 additions & 10 deletions pkg/controller/controller_utils.go
Original file line number Diff line number Diff line change
Expand Up @@ -159,16 +159,6 @@ func TiDBPeerMemberName(clusterName string) string {
return fmt.Sprintf("%s-tidb-peer", clusterName)
}

// PriTiDBMemberName returns privileged tidb member name
func PriTiDBMemberName(clusterName string) string {
return fmt.Sprintf("%s-privileged-tidb", clusterName)
}

// MonitorMemberName returns monitor member name
func MonitorMemberName(clusterName string) string {
return fmt.Sprintf("%s-monitor", clusterName)
}

// AnnProm adds annotations for prometheus scraping metrics
func AnnProm(port int32) map[string]string {
return map[string]string{
Expand Down
233 changes: 227 additions & 6 deletions pkg/controller/controller_utils_test.go
Original file line number Diff line number Diff line change
Expand Up @@ -15,21 +15,242 @@ package controller

import (
"fmt"
"testing"

. "github.com/onsi/gomega"
"github.com/pingcap/tidb-operator/pkg/apis/pingcap.com/v1alpha1"
apps "k8s.io/api/apps/v1beta1"
corev1 "k8s.io/api/core/v1"
"k8s.io/apimachinery/pkg/api/resource"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/client-go/tools/cache"
"k8s.io/apimachinery/pkg/types"
)

type fakeIndexer struct {
cache.Indexer
getError error
func TestRequeueError(t *testing.T) {
g := NewGomegaWithT(t)

err := RequeueErrorf("i am a requeue %s", "error")
g.Expect(IsRequeueError(err)).To(BeTrue())
_, ok := err.(error)
g.Expect(ok).To(BeTrue())
g.Expect(err.Error()).To(Equal("i am a requeue error"))
g.Expect(IsRequeueError(fmt.Errorf("i am not a requeue error"))).To(BeFalse())
}

func TestGetOwnerRef(t *testing.T) {
g := NewGomegaWithT(t)

tc := newTidbCluster()
tc.UID = types.UID("demo-uid")
ref := GetOwnerRef(tc)
g.Expect(ref.APIVersion).To(Equal(controllerKind.GroupVersion().String()))
g.Expect(ref.Kind).To(Equal(controllerKind.Kind))
g.Expect(ref.Name).To(Equal(tc.GetName()))
g.Expect(ref.UID).To(Equal(types.UID("demo-uid")))
g.Expect(*ref.Controller).To(BeTrue())
g.Expect(*ref.BlockOwnerDeletion).To(BeTrue())
}

func TestGetServiceType(t *testing.T) {
g := NewGomegaWithT(t)

services := []v1alpha1.Service{
{
Name: "a",
Type: string(corev1.ServiceTypeNodePort),
},
{
Name: "b",
Type: string(corev1.ServiceTypeLoadBalancer),
},
{
Name: "c",
Type: "Other",
},
}

g.Expect(GetServiceType(services, "a")).To(Equal(corev1.ServiceTypeNodePort))
g.Expect(GetServiceType(services, "b")).To(Equal(corev1.ServiceTypeLoadBalancer))
g.Expect(GetServiceType(services, "c")).To(Equal(corev1.ServiceTypeClusterIP))
g.Expect(GetServiceType(services, "d")).To(Equal(corev1.ServiceTypeClusterIP))
}

func TestTiKVCapacity(t *testing.T) {
g := NewGomegaWithT(t)

type testcase struct {
name string
limit *v1alpha1.ResourceRequirement
expectFn func(*GomegaWithT, string)
}
testFn := func(test *testcase, t *testing.T) {
t.Log(test.name)
test.expectFn(g, TiKVCapacity(test.limit))
}
tests := []testcase{
{
name: "limit is nil",
limit: nil,
expectFn: func(g *GomegaWithT, s string) {
g.Expect(s).To(Equal("0"))
},
},
{
name: "storage is empty",
limit: &v1alpha1.ResourceRequirement{
Storage: "",
},
expectFn: func(g *GomegaWithT, s string) {
g.Expect(s).To(Equal("0"))
},
},
{
name: "failed to parse quantity",
limit: &v1alpha1.ResourceRequirement{
Storage: "100x",
},
expectFn: func(g *GomegaWithT, s string) {
g.Expect(s).To(Equal("0"))
},
},
{
name: "100Gi",
limit: &v1alpha1.ResourceRequirement{
Storage: "100Gi",
},
expectFn: func(g *GomegaWithT, s string) {
g.Expect(s).To(Equal("100GB"))
},
},
{
name: "100GiB",
limit: &v1alpha1.ResourceRequirement{
Storage: "100Gi",
},
expectFn: func(g *GomegaWithT, s string) {
g.Expect(s).To(Equal("100GB"))
},
},
}

for i := range tests {
testFn(&tests[i], t)
}
}

func TestDefaultPushGatewayRequest(t *testing.T) {
g := NewGomegaWithT(t)

rr := DefaultPushGatewayRequest()
g.Expect(rr.Requests[corev1.ResourceCPU]).To(Equal(resource.MustParse("50m")))
g.Expect(rr.Requests[corev1.ResourceMemory]).To(Equal(resource.MustParse("50Mi")))
g.Expect(rr.Limits[corev1.ResourceCPU]).To(Equal(resource.MustParse("100m")))
g.Expect(rr.Limits[corev1.ResourceMemory]).To(Equal(resource.MustParse("100Mi")))
}

func TestGetPushgatewayImage(t *testing.T) {
g := NewGomegaWithT(t)

tc := &v1alpha1.TidbCluster{}
g.Expect(GetPushgatewayImage(tc)).To(Equal(defaultPushgatewayImage))
tc.Spec.TiKVPromGateway.Image = "image-1"
g.Expect(GetPushgatewayImage(tc)).To(Equal("image-1"))
}

func TestPDMemberName(t *testing.T) {
g := NewGomegaWithT(t)
g.Expect(PDMemberName("demo")).To(Equal("demo-pd"))
}

func TestPDPeerMemberName(t *testing.T) {
g := NewGomegaWithT(t)
g.Expect(PDPeerMemberName("demo")).To(Equal("demo-pd-peer"))
}

func TestTiKVMemberName(t *testing.T) {
g := NewGomegaWithT(t)
g.Expect(TiKVMemberName("demo")).To(Equal("demo-tikv"))
}

func (f *fakeIndexer) GetByKey(_ string) (interface{}, bool, error) {
return nil, false, f.getError
func TestTiKVPeerMemberName(t *testing.T) {
g := NewGomegaWithT(t)
g.Expect(TiKVPeerMemberName("demo")).To(Equal("demo-tikv-peer"))
}

func TestTiDBMemberName(t *testing.T) {
g := NewGomegaWithT(t)
g.Expect(TiDBMemberName("demo")).To(Equal("demo-tidb"))
}

func TestTiDBPeerMemberName(t *testing.T) {
g := NewGomegaWithT(t)
g.Expect(TiDBPeerMemberName("demo")).To(Equal("demo-tidb-peer"))
}

func TestAnnProm(t *testing.T) {
g := NewGomegaWithT(t)

ann := AnnProm(int32(9090))
g.Expect(ann["prometheus.io/scrape"]).To(Equal("true"))
g.Expect(ann["prometheus.io/path"]).To(Equal("/metrics"))
g.Expect(ann["prometheus.io/port"]).To(Equal("9090"))
}

func TestSetIfNotEmpty(t *testing.T) {
g := NewGomegaWithT(t)

type testcase struct {
name string
key string
value string
expectFn func(*GomegaWithT, map[string]string)
}
testFn := func(test *testcase, t *testing.T) {
t.Log(test.name)

m := map[string]string{"a": "a"}
setIfNotEmpty(m, test.key, test.value)

test.expectFn(g, m)
}
tests := []testcase{
{
name: "has key",
key: "a",
value: "aa",
expectFn: func(g *GomegaWithT, m map[string]string) {
g.Expect(m["a"]).To(Equal("aa"))
},
},
{
name: "don't have key",
key: "b",
value: "b",
expectFn: func(g *GomegaWithT, m map[string]string) {
g.Expect(m["b"]).To(Equal("b"))
},
},
{
name: "new key's value is empty",
key: "b",
value: "",
expectFn: func(g *GomegaWithT, m map[string]string) {
g.Expect(m["b"]).To(Equal(""))
},
},
{
name: "old key's value is empty",
key: "a",
value: "",
expectFn: func(g *GomegaWithT, m map[string]string) {
g.Expect(m["a"]).To(Equal("a"))
},
},
}

for i := range tests {
testFn(&tests[i], t)
}
}

func collectEvents(source <-chan string) []string {
Expand Down
2 changes: 1 addition & 1 deletion pkg/controller/pd_control.go
Original file line number Diff line number Diff line change
Expand Up @@ -32,7 +32,7 @@ import (
)

const (
timeout = 2 * time.Second
timeout = 5 * time.Second
schedulerExisted = "scheduler existed"
schedulerNotFound = "scheduler not found"
)
Expand Down
Loading

0 comments on commit 1c59a0a

Please sign in to comment.